mirror of
git://sourceware.org/git/lvm2.git
synced 2025-11-24 08:23:49 +03:00
Compare commits
103 Commits
dev-lvmguy
...
dev-agk-fs
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
60e50072e5 | ||
|
|
3a5561e5ab | ||
|
|
aedac100f9 | ||
|
|
18bbeec825 | ||
|
|
9ed11e9191 | ||
|
|
05aceaffbd | ||
|
|
f4b30b0dae | ||
|
|
43fb4aa69b | ||
|
|
872932a0fb | ||
|
|
0b019c5406 | ||
|
|
ef97360866 | ||
|
|
17838e6439 | ||
|
|
11589891d7 | ||
|
|
b6c4b7cfb0 | ||
|
|
c5b6c9ad44 | ||
|
|
6dea1ed5ae | ||
|
|
e4ef3d04ad | ||
|
|
547bdb63e1 | ||
|
|
9a50df291a | ||
|
|
e7ee89d80b | ||
|
|
2a5e24580a | ||
|
|
191a2517be | ||
|
|
1a0d57f895 | ||
|
|
9a62767f2d | ||
|
|
5d39927f22 | ||
|
|
9b23d9bfe4 | ||
|
|
f350283398 | ||
|
|
af7c8e7106 | ||
|
|
ca859b5149 | ||
|
|
d3bcec5993 | ||
|
|
910918d1c2 | ||
|
|
6360ba3d2d | ||
|
|
b7831fc14a | ||
|
|
70c1fa3764 | ||
|
|
8df3f300ba | ||
|
|
b76852bf35 | ||
|
|
26ca308ba9 | ||
|
|
7b0371e74e | ||
|
|
83249f3327 | ||
|
|
4c89d3794c | ||
|
|
10c3d94159 | ||
|
|
157948b5a5 | ||
|
|
c25b95e2ef | ||
|
|
51dfbf1fb3 | ||
|
|
daf1d4cadc | ||
|
|
fb42874a4f | ||
|
|
48778bc503 | ||
|
|
62abae1525 | ||
|
|
eb9586bd3b | ||
|
|
d6dd700bf7 | ||
|
|
7a064303fe | ||
|
|
964114950c | ||
|
|
1828822bd8 | ||
|
|
ce1e5b9991 | ||
|
|
80a6de616a | ||
|
|
21456dcf7f | ||
|
|
89661981e8 | ||
|
|
4a14617dc4 | ||
|
|
f9d28f1aec | ||
|
|
998151e83e | ||
|
|
8d0df0c011 | ||
|
|
27384c52cf | ||
|
|
c41e999488 | ||
|
|
4f7631b4ad | ||
|
|
5f6bdf707d | ||
|
|
84cceaf9b9 | ||
|
|
74ba326007 | ||
|
|
189fa64793 | ||
|
|
3bdc4045c2 | ||
|
|
d768fbe010 | ||
|
|
76f60cc430 | ||
|
|
2574d3257a | ||
|
|
64a2fad5d6 | ||
|
|
34caf83172 | ||
|
|
f79bd30a8b | ||
|
|
1784cc990e | ||
|
|
2d74de3f05 | ||
|
|
34a8d3c2fd | ||
|
|
932db3db53 | ||
|
|
fe18e5e77a | ||
|
|
929cf4b73c | ||
|
|
4de0e692db | ||
|
|
7d39b4d5e7 | ||
|
|
92691e345d | ||
|
|
c1865b0a86 | ||
|
|
b499d96215 | ||
|
|
e2354ea344 | ||
|
|
ffe3ca26e0 | ||
|
|
3fd3c9430d | ||
|
|
8ab0725077 | ||
|
|
3f4ecaf8c2 | ||
|
|
da634bfc89 | ||
|
|
1eb1869626 | ||
|
|
aa72caca5a | ||
|
|
f44e69f9fc | ||
|
|
9712995edd | ||
|
|
6716f5a2f4 | ||
|
|
263050bc07 | ||
|
|
c1ce371b59 | ||
|
|
b7ae57c6a6 | ||
|
|
21fc35dd1b | ||
|
|
7e411b111f | ||
|
|
f80d373753 |
5
README
5
README
@@ -6,11 +6,12 @@ Installation instructions are in INSTALL.
|
||||
There is no warranty - see COPYING and COPYING.LIB.
|
||||
|
||||
Tarballs are available from:
|
||||
ftp://sourceware.org/pub/lvm2/
|
||||
ftp://sources.redhat.com/pub/lvm2/
|
||||
|
||||
The source code is stored in git:
|
||||
http://git.fedorahosted.org/git/lvm2.git
|
||||
git clone git://git.fedorahosted.org/git/lvm2.git
|
||||
https://sourceware.org/git/?p=lvm2.git
|
||||
git clone git://sourceware.org/git/lvm2.git
|
||||
|
||||
Mailing list for general discussion related to LVM2:
|
||||
linux-lvm@redhat.com
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
Version 2.02.169 -
|
||||
=====================================
|
||||
Upstream git moved to https://sourceware.org/git/?p=lvm2
|
||||
Support conversion of raid type, stripesize and number of disks
|
||||
Reject writemostly/writebehind in lvchange during resynchronization.
|
||||
Deactivate active origin first before removal for improved workflow.
|
||||
Fix regression of accepting options --type and -m with lvresize (2.02.158).
|
||||
Add lvconvert --swapmetadata, new specific way to swap pool metadata LVs.
|
||||
Add lvconvert --startpoll, new specific way to start polling conversions.
|
||||
Add lvconvert --mergethin, new specific way to merge thin snapshots.
|
||||
|
||||
@@ -1316,14 +1316,13 @@ int dev_manager_raid_message(struct dev_manager *dm,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* These are the supported RAID messages for dm-raid v1.5.0 */
|
||||
/* These are the supported RAID messages for dm-raid v1.9.0 */
|
||||
if (strcmp(msg, "idle") &&
|
||||
strcmp(msg, "frozen") &&
|
||||
strcmp(msg, "resync") &&
|
||||
strcmp(msg, "recover") &&
|
||||
strcmp(msg, "check") &&
|
||||
strcmp(msg, "repair") &&
|
||||
strcmp(msg, "reshape")) {
|
||||
strcmp(msg, "repair")) {
|
||||
log_error(INTERNAL_ERROR "Unknown RAID message: %s.", msg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2014 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
|
||||
@@ -1104,6 +1104,19 @@ int lv_raid_healthy(const struct logical_volume *lv)
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Helper: check for any sub LVs after a disk removing reshape */
|
||||
static int _sublvs_remove_after_reshape(const struct logical_volume *lv)
|
||||
{
|
||||
uint32_t s;
|
||||
struct lv_segment *seg = first_seg(lv);
|
||||
|
||||
for (s = seg->area_count -1; s; s--)
|
||||
if (seg_lv(seg, s)->status & LV_REMOVE_AFTER_RESHAPE)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
char *lv_attr_dup_with_info_and_seg_status(struct dm_pool *mem, const struct lv_with_info_and_seg_status *lvdm)
|
||||
{
|
||||
const struct logical_volume *lv = lvdm->lv;
|
||||
@@ -1269,6 +1282,8 @@ char *lv_attr_dup_with_info_and_seg_status(struct dm_pool *mem, const struct lv_
|
||||
repstr[8] = 'p';
|
||||
else if (lv_is_raid_type(lv)) {
|
||||
uint64_t n;
|
||||
char *sync_action;
|
||||
|
||||
if (!activation())
|
||||
repstr[8] = 'X'; /* Unknown */
|
||||
else if (!lv_raid_healthy(lv))
|
||||
@@ -1276,10 +1291,17 @@ char *lv_attr_dup_with_info_and_seg_status(struct dm_pool *mem, const struct lv_
|
||||
else if (lv_is_raid(lv)) {
|
||||
if (lv_raid_mismatch_count(lv, &n) && n)
|
||||
repstr[8] = 'm'; /* RAID has 'm'ismatches */
|
||||
else if (lv_raid_sync_action(lv, &sync_action) &&
|
||||
!strcmp(sync_action, "reshape"))
|
||||
repstr[8] = 's'; /* LV is re(s)haping */
|
||||
else if (_sublvs_remove_after_reshape(lv))
|
||||
repstr[8] = 'R'; /* sub-LV got freed from raid set by reshaping
|
||||
and has to be 'R'emoved */
|
||||
} else if (lv->status & LV_WRITEMOSTLY)
|
||||
repstr[8] = 'w'; /* sub-LV has 'w'ritemostly */
|
||||
else if (lv->status & LV_REMOVE_AFTER_RESHAPE)
|
||||
repstr[8] = 'R'; /* sub-LV got 'R'emoved from raid set by reshaping */
|
||||
repstr[8] = 'R'; /* sub-LV got freed from raid set by reshaping
|
||||
and has to be 'R'emoved */
|
||||
} else if (lvdm->seg_status.type == SEG_STATUS_CACHE) {
|
||||
if (lvdm->seg_status.cache->fail)
|
||||
repstr[8] = 'F';
|
||||
|
||||
@@ -1284,7 +1284,6 @@ static int _lv_segment_reduce(struct lv_segment *seg, uint32_t reduction)
|
||||
uint32_t areas = (seg->area_count / (seg_is_raid10(seg) ? seg->data_copies : 1)) - seg->segtype->parity_devs;
|
||||
|
||||
/* Caller must ensure exact divisibility */
|
||||
// if (!seg_is_raid10(seg) && (seg_is_striped(seg) || seg_is_striped_raid(seg))) {
|
||||
if (seg_is_striped(seg) || seg_is_striped_raid(seg)) {
|
||||
if (reduction % areas) {
|
||||
log_error("Segment extent reduction %" PRIu32
|
||||
@@ -1296,20 +1295,16 @@ static int _lv_segment_reduce(struct lv_segment *seg, uint32_t reduction)
|
||||
} else
|
||||
area_reduction = reduction;
|
||||
|
||||
//printf("%s[%u] seg->lv=%s seg->len=%u seg->area_len=%u area_reduction=%u\n", __func__, __LINE__, seg->lv ? seg->lv->name : "?", seg->len, seg->area_len, area_reduction);
|
||||
for (s = 0; s < seg->area_count; s++)
|
||||
if (!release_and_discard_lv_segment_area(seg, s, area_reduction))
|
||||
return_0;
|
||||
|
||||
//printf("%s[%u] seg->lv=%s seg->len=%u seg->area_len=%u area_reduction=%u\n", __func__, __LINE__, seg->lv ? seg->lv->name : "?", seg->len, seg->area_len, area_reduction);
|
||||
seg->len -= reduction;
|
||||
//pprintf("%s[%u] seg->lv=%s seg->len=%u seg->area_len=%u area_reduction=%u\n", __func__, __LINE__, seg->lv ? seg->lv->name : "?", seg->len, seg->area_len, area_reduction);
|
||||
|
||||
if (seg_is_raid(seg))
|
||||
seg->area_len = seg->len;
|
||||
else
|
||||
seg->area_len -= area_reduction;
|
||||
//printf("%s[%u] seg->lv=%s seg->len=%u seg->area_len=%u area_reduction=%u\n", __func__, __LINE__, seg->lv ? seg->lv->name : "?", seg->len, seg->area_len, area_reduction);
|
||||
|
||||
return 1;
|
||||
}
|
||||
@@ -1333,7 +1328,6 @@ static int _lv_reduce(struct logical_volume *lv, uint32_t extents, int delete)
|
||||
clear_snapshot_merge(lv);
|
||||
}
|
||||
|
||||
//printf("%s[%u] lv=%s is_raid10=%d le_count=%u extents=%u lv->size=%s seg->len=%u seg->area_len=%u seg->reshape_len=%u\n", __func__, __LINE__, lv->name, is_raid10, lv->le_count, extents, display_size(lv->vg->cmd, lv->size), seg ? seg->len : 4711, seg ? seg->area_len : 4711, seg->reshape_len);
|
||||
dm_list_iterate_back_items(seg, &lv->segments) {
|
||||
if (!count)
|
||||
break;
|
||||
@@ -1390,7 +1384,7 @@ static int _lv_reduce(struct logical_volume *lv, uint32_t extents, int delete)
|
||||
}
|
||||
|
||||
seg = first_seg(lv);
|
||||
//printf("%s[%u] lv=%s le_count=%u extents=%u lv->size=%s seg->len=%u seg->area_len=%u\n", __func__, __LINE__, lv->name, lv->le_count, extents, display_size(lv->vg->cmd, lv->size), seg ? seg->len : 4711, seg ? seg->area_len : 4711);
|
||||
|
||||
if (is_raid10) {
|
||||
lv->le_count -= extents * data_copies;
|
||||
if (seg)
|
||||
@@ -1399,11 +1393,11 @@ static int _lv_reduce(struct logical_volume *lv, uint32_t extents, int delete)
|
||||
lv->le_count -= extents;
|
||||
|
||||
lv->size = (uint64_t) lv->le_count * lv->vg->extent_size;
|
||||
//printf("%s[%u] lv=%s le_count=%u lv->size=%s seg->len=%u seg->area_len=%u\n", __func__, __LINE__, lv->name, lv->le_count, display_size(lv->vg->cmd, lv->size), seg ? seg->len : 4711, seg ? seg->area_len : 4711);
|
||||
if (seg)
|
||||
seg->extents_copied = seg->len;
|
||||
|
||||
if (!delete)
|
||||
return 1;
|
||||
//printf("%s[%u] lv=%s le_count=%u lv->size=%s seg->len=%u seg->area_len=%u\n", __func__, __LINE__, lv->name, lv->le_count, display_size(lv->vg->cmd, lv->size), seg ? seg->len : 4711, seg ? seg->area_len : 4711);
|
||||
|
||||
if (lv == lv->vg->pool_metadata_spare_lv) {
|
||||
lv->status &= ~POOL_METADATA_SPARE;
|
||||
@@ -1513,11 +1507,10 @@ int lv_reduce(struct logical_volume *lv, uint32_t extents)
|
||||
{
|
||||
struct lv_segment *seg = first_seg(lv);
|
||||
|
||||
/* Ensure stipe boundary extents on RAID LVs */
|
||||
/* Ensure stripe boundary extents on RAID LVs */
|
||||
if (lv_is_raid(lv) && extents != lv->le_count)
|
||||
extents =_round_to_stripe_boundary(lv->vg, extents,
|
||||
seg_is_raid1(seg) ? 0 : _raid_stripes_count(seg), 0);
|
||||
|
||||
return _lv_reduce(lv, extents, 1);
|
||||
}
|
||||
|
||||
@@ -1834,7 +1827,7 @@ static int _setup_alloced_segment(struct logical_volume *lv, uint64_t status,
|
||||
dm_list_add(&lv->segments, &seg->list);
|
||||
|
||||
extents = aa[0].len * area_multiple;
|
||||
//printf("%s[%u] le_count=%u extents=%u\n", __func__, __LINE__, lv->le_count, extents);
|
||||
|
||||
if (!_setup_lv_size(lv, lv->le_count + extents))
|
||||
return_0;
|
||||
|
||||
@@ -3951,7 +3944,7 @@ bad:
|
||||
static int _lv_extend_layered_lv(struct alloc_handle *ah,
|
||||
struct logical_volume *lv,
|
||||
uint32_t extents, uint32_t first_area,
|
||||
uint32_t stripes, uint32_t stripe_size)
|
||||
uint32_t mirrors, uint32_t stripes, uint32_t stripe_size)
|
||||
{
|
||||
const struct segment_type *segtype;
|
||||
struct logical_volume *sub_lv, *meta_lv;
|
||||
@@ -3979,7 +3972,7 @@ static int _lv_extend_layered_lv(struct alloc_handle *ah,
|
||||
for (fa = first_area, s = 0; s < seg->area_count; s++) {
|
||||
if (is_temporary_mirror_layer(seg_lv(seg, s))) {
|
||||
if (!_lv_extend_layered_lv(ah, seg_lv(seg, s), extents / area_multiple,
|
||||
fa, stripes, stripe_size))
|
||||
fa, mirrors, stripes, stripe_size))
|
||||
return_0;
|
||||
fa += lv_mirror_count(seg_lv(seg, s));
|
||||
continue;
|
||||
@@ -3993,6 +3986,8 @@ static int _lv_extend_layered_lv(struct alloc_handle *ah,
|
||||
return 0;
|
||||
}
|
||||
|
||||
last_seg(lv)->data_copies = mirrors;
|
||||
|
||||
/* Extend metadata LVs only on initial creation */
|
||||
if (seg_is_raid_with_meta(seg) && !lv->le_count) {
|
||||
if (!seg->meta_areas) {
|
||||
@@ -4095,7 +4090,6 @@ static int _lv_extend_layered_lv(struct alloc_handle *ah,
|
||||
else
|
||||
seg->area_len += extents / area_multiple;
|
||||
|
||||
//pprintf("%s[%u] le_count=%u extents=%u seg->len=%u seg-area_len=%u\n", __func__, __LINE__, lv->le_count, extents, seg->len, seg->area_len);
|
||||
if (!_setup_lv_size(lv, lv->le_count + extents))
|
||||
return_0;
|
||||
|
||||
@@ -4201,7 +4195,7 @@ int lv_extend(struct logical_volume *lv,
|
||||
}
|
||||
|
||||
if (!(r = _lv_extend_layered_lv(ah, lv, new_extents - lv->le_count, 0,
|
||||
stripes, stripe_size)))
|
||||
mirrors, stripes, stripe_size)))
|
||||
goto_out;
|
||||
|
||||
/*
|
||||
@@ -5421,6 +5415,17 @@ int lv_resize(struct logical_volume *lv,
|
||||
if (!_lvresize_check(lv, lp))
|
||||
return_0;
|
||||
|
||||
if (seg->reshape_len) {
|
||||
/* Prevent resizing on out-of-sync reshapable raid */
|
||||
if (!lv_raid_in_sync(lv)) {
|
||||
log_error("Can't resize reshaping LV %s.", display_lvname(lv));
|
||||
return 0;
|
||||
}
|
||||
/* Remove any striped raid reshape space for LV resizing */
|
||||
if (!lv_raid_free_reshape_space(lv))
|
||||
return_0;
|
||||
}
|
||||
|
||||
if (lp->use_policies) {
|
||||
lp->extents = 0;
|
||||
lp->sign = SIGN_PLUS;
|
||||
@@ -5932,6 +5937,7 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv,
|
||||
int ask_discard;
|
||||
struct lv_list *lvl;
|
||||
struct seg_list *sl;
|
||||
struct lv_segment *seg = first_seg(lv);
|
||||
int is_last_pool = lv_is_pool(lv);
|
||||
|
||||
vg = lv->vg;
|
||||
@@ -6038,6 +6044,13 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv,
|
||||
is_last_pool = 1;
|
||||
}
|
||||
|
||||
/* Special case removing a striped raid LV with allocated reshape space */
|
||||
if (seg && seg->reshape_len) {
|
||||
if (!(seg->segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_STRIPED)))
|
||||
return_0;
|
||||
lv->le_count = seg->len = seg->area_len = seg_lv(seg, 0)->le_count * seg->area_count;
|
||||
}
|
||||
|
||||
/* Used cache pool, COW or historical LV cannot be activated */
|
||||
if ((!lv_is_cache_pool(lv) || dm_list_empty(&lv->segs_using_this_lv)) &&
|
||||
!lv_is_cow(lv) && !lv_is_historical(lv) &&
|
||||
@@ -6259,12 +6272,21 @@ int lv_remove_with_dependencies(struct cmd_context *cmd, struct logical_volume *
|
||||
/* Remove snapshot LVs first */
|
||||
if ((force == PROMPT) &&
|
||||
/* Active snapshot already needs to confirm each active LV */
|
||||
!lv_is_active(lv) &&
|
||||
yes_no_prompt("Removing origin %s will also remove %u "
|
||||
"snapshots(s). Proceed? [y/n]: ",
|
||||
lv->name, lv->origin_count) == 'n')
|
||||
(yes_no_prompt("Do you really want to remove%s "
|
||||
"%sorigin logical volume %s with %u snapshot(s)? [y/n]: ",
|
||||
lv_is_active(lv) ? " active" : "",
|
||||
vg_is_clustered(lv->vg) ? "clustered " : "",
|
||||
display_lvname(lv),
|
||||
lv->origin_count) == 'n'))
|
||||
goto no_remove;
|
||||
|
||||
if (!deactivate_lv(cmd, lv)) {
|
||||
stack;
|
||||
goto no_remove;
|
||||
}
|
||||
log_verbose("Removing origin logical volume %s with %u snapshots(s).",
|
||||
display_lvname(lv), lv->origin_count);
|
||||
|
||||
dm_list_iterate_safe(snh, snht, &lv->snapshot_segs)
|
||||
if (!lv_remove_with_dependencies(cmd, dm_list_struct_base(snh, struct lv_segment,
|
||||
origin_list)->cow,
|
||||
|
||||
@@ -498,7 +498,6 @@ int check_lv_segments(struct logical_volume *lv, int complete_vg)
|
||||
inc_error_count;
|
||||
}
|
||||
|
||||
|
||||
data_rimage_count = seg->area_count - seg->segtype->parity_devs;
|
||||
/* FIXME: raid varies seg->area_len? */
|
||||
if (seg->len != seg->area_len &&
|
||||
|
||||
@@ -1212,7 +1212,8 @@ struct logical_volume *first_replicator_dev(const struct logical_volume *lv);
|
||||
int lv_is_raid_with_tracking(const struct logical_volume *lv);
|
||||
uint32_t lv_raid_image_count(const struct logical_volume *lv);
|
||||
int lv_raid_change_image_count(struct logical_volume *lv,
|
||||
uint32_t new_count, const uint32_t region_size,
|
||||
uint32_t new_count,
|
||||
uint32_t new_region_size,
|
||||
struct dm_list *allocate_pvs);
|
||||
int lv_raid_split(struct logical_volume *lv, const char *split_name,
|
||||
uint32_t new_count, struct dm_list *splittable_pvs);
|
||||
@@ -1240,8 +1241,9 @@ uint32_t raid_rimage_extents(const struct segment_type *segtype,
|
||||
uint32_t raid_ensure_min_region_size(const struct logical_volume *lv, uint64_t raid_size, uint32_t region_size);
|
||||
int lv_raid_change_region_size(struct logical_volume *lv,
|
||||
int yes, int force, uint32_t new_region_size);
|
||||
uint32_t lv_raid_data_copies(const struct segment_type *segtype, uint32_t area_count);
|
||||
int lv_raid_in_sync(const struct logical_volume *lv);
|
||||
uint32_t lv_raid_data_copies(const struct segment_type *segtype, uint32_t area_count);
|
||||
int lv_raid_free_reshape_space(const struct logical_volume *lv);
|
||||
/* -- metadata/raid_manip.c */
|
||||
|
||||
/* ++ metadata/cache_manip.c */
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -286,7 +286,7 @@ struct segment_type *init_unknown_segtype(struct cmd_context *cmd,
|
||||
#define RAID_FEATURE_RAID0 (1U << 1) /* version 1.7 */
|
||||
#define RAID_FEATURE_RESHAPING (1U << 2) /* version 1.8 */
|
||||
#define RAID_FEATURE_RAID4 (1U << 3) /* ! version 1.8 or 1.9.0 */
|
||||
#define RAID_FEATURE_RESHAPE (1U << 4) /* version 1.10.2 */
|
||||
#define RAID_FEATURE_RESHAPE (1U << 4) /* version 1.10.1 */
|
||||
|
||||
#ifdef RAID_INTERNAL
|
||||
int init_raid_segtypes(struct cmd_context *cmd, struct segtype_library *seglib);
|
||||
|
||||
@@ -58,13 +58,13 @@
|
||||
#define r1__r0m _takeover_from_raid1_to_raid0_meta
|
||||
#define r1__r1 _takeover_from_raid1_to_raid1
|
||||
#define r1__r10 _takeover_from_raid1_to_raid10
|
||||
#define r1__r45 _takeover_from_raid1_to_raid45
|
||||
#define r1__r5 _takeover_from_raid1_to_raid5
|
||||
#define r1__str _takeover_from_raid1_to_striped
|
||||
#define r45_lin _takeover_from_raid45_to_linear
|
||||
#define r45_mir _takeover_from_raid45_to_mirrored
|
||||
#define r45_r0 _takeover_from_raid45_to_raid0
|
||||
#define r45_r0m _takeover_from_raid45_to_raid0_meta
|
||||
#define r45_r1 _takeover_from_raid45_to_raid1
|
||||
#define r5_r1 _takeover_from_raid5_to_raid1
|
||||
#define r45_r54 _takeover_from_raid45_to_raid54
|
||||
#define r45_r6 _takeover_from_raid45_to_raid6
|
||||
#define r45_str _takeover_from_raid45_to_striped
|
||||
@@ -109,8 +109,8 @@ static takeover_fn_t _takeover_fns[][11] = {
|
||||
/* mirror */ { X , X , N , mir_r0, mir_r0m, mir_r1, mir_r45, X , mir_r10, X , X },
|
||||
/* raid0 */ { r0__lin, r0__str, r0__mir, N , r0__r0m, r0__r1, r0__r45, r0__r6, r0__r10, X , X },
|
||||
/* raid0_meta */ { r0m_lin, r0m_str, r0m_mir, r0m_r0, N , r0m_r1, r0m_r45, r0m_r6, r0m_r10, X , X },
|
||||
/* raid1 */ { r1__lin, r1__str, r1__mir, r1__r0, r1__r0m, r1__r1, r1__r45, X , r1__r10, X , X },
|
||||
/* raid4/5 */ { r45_lin, r45_str, r45_mir, r45_r0, r45_r0m, r45_r1, r45_r54, r45_r6, X , X , X },
|
||||
/* raid1 */ { r1__lin, r1__str, r1__mir, r1__r0, r1__r0m, r1__r1, r1__r5, X , r1__r10, X , X },
|
||||
/* raid4/5 */ { r45_lin, r45_str, r45_mir, r45_r0, r45_r0m, r5_r1 , r45_r54, r45_r6, X , X , X },
|
||||
/* raid6 */ { X , r6__str, X , r6__r0, r6__r0m, X , r6__r45, X , X , X , X },
|
||||
/* raid10 */ { r10_lin, r10_str, r10_mir, r10_r0, r10_r0m, r10_r1, X , X , X , X , X },
|
||||
/* raid01 */ // { X , r01_str, X , X , X , X , X , X , r01_r10, r01_r01, X },
|
||||
|
||||
@@ -240,9 +240,9 @@ static int _raid_add_target_line(struct dev_manager *dm __attribute__((unused)),
|
||||
int delta_disks = 0, delta_disks_minus = 0, delta_disks_plus = 0, data_offset = 0;
|
||||
uint32_t s;
|
||||
uint64_t flags = 0;
|
||||
uint64_t rebuilds[4];
|
||||
uint64_t writemostly[4];
|
||||
struct dm_tree_node_raid_params params;
|
||||
uint64_t rebuilds[RAID_BITMAP_SIZE];
|
||||
uint64_t writemostly[RAID_BITMAP_SIZE];
|
||||
struct dm_tree_node_raid_params_v2 params;
|
||||
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
memset(&rebuilds, 0, sizeof(rebuilds));
|
||||
@@ -333,7 +333,7 @@ static int _raid_add_target_line(struct dev_manager *dm __attribute__((unused)),
|
||||
params.stripe_size = seg->stripe_size;
|
||||
params.flags = flags;
|
||||
|
||||
if (!dm_tree_node_add_raid_target_with_params(node, len, ¶ms))
|
||||
if (!dm_tree_node_add_raid_target_with_params_v2(node, len, ¶ms))
|
||||
return_0;
|
||||
|
||||
return add_areas_line(dm, seg, node, 0u, seg->area_count);
|
||||
@@ -448,19 +448,32 @@ out:
|
||||
return r;
|
||||
}
|
||||
|
||||
/* Define raid feature based on the tuple(major, minor, patchlevel) of raid target */
|
||||
struct raid_feature {
|
||||
uint32_t maj;
|
||||
uint32_t min;
|
||||
uint32_t patchlevel;
|
||||
unsigned raid_feature;
|
||||
const char *feature;
|
||||
};
|
||||
|
||||
/* Return true if tuple(@maj, @min, @patchlevel) is greater/equal to @*feature members */
|
||||
static int _check_feature(const struct raid_feature *feature, uint32_t maj, uint32_t min, uint32_t patchlevel)
|
||||
{
|
||||
return (maj > feature->maj) ||
|
||||
(maj == feature->maj && min >= feature->min) ||
|
||||
(maj == feature->maj && min == feature->min && patchlevel >= feature->patchlevel);
|
||||
}
|
||||
|
||||
static int _raid_target_present(struct cmd_context *cmd,
|
||||
const struct lv_segment *seg __attribute__((unused)),
|
||||
unsigned *attributes)
|
||||
{
|
||||
/* List of features with their kernel target version */
|
||||
static const struct feature {
|
||||
uint32_t maj;
|
||||
uint32_t min;
|
||||
unsigned raid_feature;
|
||||
const char *feature;
|
||||
} _features[] = {
|
||||
{ 1, 3, RAID_FEATURE_RAID10, SEG_TYPE_NAME_RAID10 },
|
||||
{ 1, 7, RAID_FEATURE_RAID0, SEG_TYPE_NAME_RAID0 },
|
||||
const struct raid_feature _features[] = {
|
||||
{ 1, 3, 0, RAID_FEATURE_RAID10, SEG_TYPE_NAME_RAID10 },
|
||||
{ 1, 7, 0, RAID_FEATURE_RAID0, SEG_TYPE_NAME_RAID0 },
|
||||
{ 1, 10, 1, RAID_FEATURE_RESHAPE, "reshaping" },
|
||||
};
|
||||
|
||||
static int _raid_checked = 0;
|
||||
@@ -482,22 +495,24 @@ static int _raid_target_present(struct cmd_context *cmd,
|
||||
return_0;
|
||||
|
||||
for (i = 0; i < DM_ARRAY_SIZE(_features); ++i)
|
||||
if ((maj > _features[i].maj) ||
|
||||
(maj == _features[i].maj && min >= _features[i].min))
|
||||
if (_check_feature(_features + i, maj, min, patchlevel))
|
||||
_raid_attrs |= _features[i].raid_feature;
|
||||
else
|
||||
log_very_verbose("Target raid does not support %s.",
|
||||
_features[i].feature);
|
||||
|
||||
/*
|
||||
* Seperate check for proper raid4 mapping supported
|
||||
*
|
||||
* If we get more of these range checks, avoid them
|
||||
* altogether by enhancing 'struct raid_feature'
|
||||
* and _check_feature() to handle them.
|
||||
*/
|
||||
if (!(maj == 1 && (min == 8 || (min == 9 && patchlevel == 0))))
|
||||
_raid_attrs |= RAID_FEATURE_RAID4;
|
||||
else
|
||||
log_very_verbose("Target raid does not support %s.",
|
||||
SEG_TYPE_NAME_RAID4);
|
||||
|
||||
if (maj > 1 ||
|
||||
(maj == 1 && (min > 10 || (min == 10 && patchlevel >= 2))))
|
||||
_raid_attrs |= RAID_FEATURE_RESHAPE;
|
||||
}
|
||||
|
||||
if (attributes)
|
||||
|
||||
@@ -69,7 +69,7 @@ FIELD(LVS, lv, BIN, "ActExcl", lvid, 10, lvactiveexclusively, lv_active_exclusiv
|
||||
FIELD(LVS, lv, SNUM, "Maj", major, 0, int32, lv_major, "Persistent major number or -1 if not persistent.", 0)
|
||||
FIELD(LVS, lv, SNUM, "Min", minor, 0, int32, lv_minor, "Persistent minor number or -1 if not persistent.", 0)
|
||||
FIELD(LVS, lv, SIZ, "Rahead", lvid, 0, lvreadahead, lv_read_ahead, "Read ahead setting in current units.", 0)
|
||||
FIELD(LVS, lv, SIZ, "LSize", size, 0, size64, lv_size, "Size of LV in current units.", 0)
|
||||
FIELD(LVS, lv, SIZ, "LSize", lvid, 0, lv_size, lv_size, "Size of LV in current units.", 0)
|
||||
FIELD(LVS, lv, SIZ, "MSize", lvid, 0, lvmetadatasize, lv_metadata_size, "For thin and cache pools, the size of the LV that holds the metadata.", 0)
|
||||
FIELD(LVS, lv, NUM, "#Seg", lvid, 0, lvsegcount, seg_count, "Number of segments in LV.", 0)
|
||||
FIELD(LVS, lv, STR, "Origin", lvid, 0, origin, origin, "For snapshots and thins, the origin device of this LV.", 0)
|
||||
@@ -241,9 +241,16 @@ FIELD(VGS, vg, NUM, "#VMdaCps", cmd, 0, vgmdacopies, vg_mda_copies, "Target numb
|
||||
* SEGS type fields
|
||||
*/
|
||||
FIELD(SEGS, seg, STR, "Type", list, 0, segtype, segtype, "Type of LV segment.", 0)
|
||||
FIELD(SEGS, seg, NUM, "#Str", area_count, 0, uint32, stripes, "Number of stripes or mirror legs.", 0)
|
||||
FIELD(SEGS, seg, NUM, "#Str", list, 0, seg_stripes, stripes, "Number of stripes or mirror/raid1 legs.", 0)
|
||||
FIELD(SEGS, seg, NUM, "#DStr", list, 0, seg_data_stripes, data_stripes, "Number of data stripes or mirror/raid1 legs.", 0)
|
||||
FIELD(SEGS, seg, SIZ, "RSize", list, 0, seg_reshape_len, reshape_len, "Size of out-of-place reshape space in current units.", 0)
|
||||
FIELD(SEGS, seg, NUM, "RSize", list, 0, seg_reshape_len_le, reshape_len_le, "Size of out-of-place reshape space in logical extents.", 0)
|
||||
FIELD(SEGS, seg, NUM, "#Cpy", list, 0, seg_data_copies, data_copies, "Number of data copies.", 0)
|
||||
FIELD(SEGS, seg, NUM, "DOff", list, 0, seg_data_offset, data_offset, "Data offset on each image device.", 0)
|
||||
FIELD(SEGS, seg, NUM, "NOff", list, 0, seg_new_data_offset, new_data_offset, "New data offset after any reshape on each image device.", 0)
|
||||
FIELD(SEGS, seg, NUM, "#Par", list, 0, seg_parity_chunks, parity_chunks, "Number of (rotating) parity chunks.", 0)
|
||||
FIELD(SEGS, seg, SIZ, "Stripe", stripe_size, 0, size32, stripe_size, "For stripes, amount of data placed on one device before switching to the next.", 0)
|
||||
FIELD(SEGS, seg, SIZ, "Region", region_size, 0, size32, region_size, "For mirrors, the unit of data copied when synchronising devices.", 0)
|
||||
FIELD(SEGS, seg, SIZ, "Region", region_size, 0, size32, region_size, "For mirrors/raids, the unit of data per leg when synchronizing devices.", 0)
|
||||
FIELD(SEGS, seg, SIZ, "Chunk", list, 0, chunksize, chunk_size, "For snapshots, the unit of data used when tracking changes.", 0)
|
||||
FIELD(SEGS, seg, NUM, "#Thins", list, 0, thincount, thin_count, "For thin pools, the number of thin volumes in this pool.", 0)
|
||||
FIELD(SEGS, seg, STR, "Discards", list, 0, discards, discards, "For thin pools, how discards are handled.", 0)
|
||||
@@ -276,4 +283,26 @@ FIELD(PVSEGS, pvseg, NUM, "SSize", len, 0, uint32, pvseg_size, "Number of extent
|
||||
/*
|
||||
* End of PVSEGS type fields
|
||||
*/
|
||||
|
||||
/*
|
||||
* MOUNTINFO type fields
|
||||
*/
|
||||
FIELD(MOUNTINFO, mountinfo, STR, "Mounted on", mountpoint, 0, string, mount_point, "Mount point of filesystem on device.", 0)
|
||||
/*
|
||||
* End of MOUNTINFO type fields
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* FSINFO type fields
|
||||
*/
|
||||
FIELD(FSINFO, fsinfo, SIZ, "FSUsed", fs_used, 0, size64, fs_used, "Space used in mounted filesystem on device.", 0)
|
||||
FIELD(FSINFO, fsinfo, SIZ, "FSSize", fs_size, 0, size64, fs_size, "Size of mounted filesystem on device.", 0)
|
||||
FIELD(FSINFO, fsinfo, SIZ, "FSFree", fs_free, 0, size64, fs_free, "Free space in mounted filesystem on device.", 0)
|
||||
FIELD(FSINFO, fsinfo, SIZ, "FSAvail", fs_avail, 0, size64, fs_avail, "Available space in mounted filesystem on device.", 0)
|
||||
/*
|
||||
* End of FSINFO type fields
|
||||
*/
|
||||
|
||||
|
||||
/* *INDENT-ON* */
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2010-2013 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2010-2017 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -446,8 +446,22 @@ GET_VG_NUM_PROPERTY_FN(vg_missing_pv_count, vg_missing_pv_count(vg))
|
||||
/* LVSEG */
|
||||
GET_LVSEG_STR_PROPERTY_FN(segtype, lvseg_segtype_dup(lvseg->lv->vg->vgmem, lvseg))
|
||||
#define _segtype_set prop_not_implemented_set
|
||||
GET_LVSEG_NUM_PROPERTY_FN(data_copies, lvseg->data_copies)
|
||||
#define _data_copies_set prop_not_implemented_set
|
||||
GET_LVSEG_NUM_PROPERTY_FN(reshape_len, lvseg->reshape_len)
|
||||
#define _reshape_len_set prop_not_implemented_set
|
||||
GET_LVSEG_NUM_PROPERTY_FN(reshape_len_le, lvseg->reshape_len)
|
||||
#define _reshape_len_le_set prop_not_implemented_set
|
||||
GET_LVSEG_NUM_PROPERTY_FN(data_offset, lvseg->data_offset)
|
||||
#define _data_offset_set prop_not_implemented_set
|
||||
GET_LVSEG_NUM_PROPERTY_FN(new_data_offset, lvseg->data_offset)
|
||||
#define _new_data_offset_set prop_not_implemented_set
|
||||
GET_LVSEG_NUM_PROPERTY_FN(parity_chunks, lvseg->data_offset)
|
||||
#define _parity_chunks_set prop_not_implemented_set
|
||||
GET_LVSEG_NUM_PROPERTY_FN(stripes, lvseg->area_count)
|
||||
#define _stripes_set prop_not_implemented_set
|
||||
GET_LVSEG_NUM_PROPERTY_FN(data_stripes, lvseg->area_count)
|
||||
#define _data_stripes_set prop_not_implemented_set
|
||||
GET_LVSEG_NUM_PROPERTY_FN(stripe_size, (SECTOR_SIZE * lvseg->stripe_size))
|
||||
#define _stripe_size_set prop_not_implemented_set
|
||||
GET_LVSEG_NUM_PROPERTY_FN(region_size, (SECTOR_SIZE * lvseg->region_size))
|
||||
@@ -506,6 +520,16 @@ GET_PVSEG_NUM_PROPERTY_FN(pvseg_start, pvseg->pe)
|
||||
GET_PVSEG_NUM_PROPERTY_FN(pvseg_size, (SECTOR_SIZE * pvseg->len))
|
||||
#define _pvseg_size_set prop_not_implemented_set
|
||||
|
||||
#define _mount_point_get prop_not_implemented_get
|
||||
#define _mount_point_set prop_not_implemented_set
|
||||
#define _fs_used_get prop_not_implemented_get
|
||||
#define _fs_used_set prop_not_implemented_set
|
||||
#define _fs_size_get prop_not_implemented_get
|
||||
#define _fs_size_set prop_not_implemented_set
|
||||
#define _fs_free_get prop_not_implemented_get
|
||||
#define _fs_free_set prop_not_implemented_set
|
||||
#define _fs_avail_get prop_not_implemented_get
|
||||
#define _fs_avail_set prop_not_implemented_set
|
||||
|
||||
struct lvm_property_type _properties[] = {
|
||||
#include "columns.h"
|
||||
|
||||
@@ -36,6 +36,8 @@ struct lvm_report_object {
|
||||
struct lv_segment *seg;
|
||||
struct pv_segment *pvseg;
|
||||
struct label *label;
|
||||
struct lvm_mountinfo *mountinfo;
|
||||
struct lvm_fsinfo *fsinfo;
|
||||
};
|
||||
|
||||
static uint32_t log_seqnum = 1;
|
||||
@@ -2296,6 +2298,22 @@ static int _size64_disp(struct dm_report *rh __attribute__((unused)),
|
||||
return _field_set_value(field, repstr, sortval);
|
||||
}
|
||||
|
||||
static int _lv_size_disp(struct dm_report *rh, struct dm_pool *mem,
|
||||
struct dm_report_field *field,
|
||||
const void *data, void *private)
|
||||
{
|
||||
const struct logical_volume *lv = (const struct logical_volume *) data;
|
||||
const struct lv_segment *seg = first_seg(lv);
|
||||
uint64_t size = lv->le_count;
|
||||
|
||||
if (!lv_is_raid_image(lv))
|
||||
size -= seg->reshape_len * (seg->area_count > 2 ? seg->area_count : 1);
|
||||
|
||||
size *= lv->vg->extent_size;
|
||||
|
||||
return _size64_disp(rh, mem, field, &size, private);
|
||||
}
|
||||
|
||||
static int _uint32_disp(struct dm_report *rh, struct dm_pool *mem __attribute__((unused)),
|
||||
struct dm_report_field *field,
|
||||
const void *data, void *private __attribute__((unused)))
|
||||
@@ -2412,6 +2430,197 @@ static int _segstartpe_disp(struct dm_report *rh,
|
||||
return dm_report_field_uint32(rh, field, &seg->le);
|
||||
}
|
||||
|
||||
/* Hepler: get used stripes = total stripes minux any to remove after reshape */
|
||||
static int _get_seg_used_stripes(const struct lv_segment *seg)
|
||||
{
|
||||
uint32_t s;
|
||||
uint32_t stripes = seg->area_count;
|
||||
|
||||
for (s = seg->area_count - 1; stripes && s; s--) {
|
||||
if (seg_type(seg, s) == AREA_LV &&
|
||||
seg_lv(seg, s)->status & LV_REMOVE_AFTER_RESHAPE)
|
||||
stripes--;
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
return stripes;
|
||||
}
|
||||
|
||||
static int _seg_stripes_disp(struct dm_report *rh, struct dm_pool *mem,
|
||||
struct dm_report_field *field,
|
||||
const void *data, void *private)
|
||||
{
|
||||
const struct lv_segment *seg = ((const struct lv_segment *) data);
|
||||
|
||||
return dm_report_field_uint32(rh, field, &seg->area_count);
|
||||
}
|
||||
|
||||
/* Report the number of data stripes, which is less than total stripes (e.g. 2 less for raid6) */
|
||||
static int _seg_data_stripes_disp(struct dm_report *rh, struct dm_pool *mem,
|
||||
struct dm_report_field *field,
|
||||
const void *data, void *private)
|
||||
{
|
||||
const struct lv_segment *seg = (const struct lv_segment *) data;
|
||||
uint32_t stripes = _get_seg_used_stripes(seg) - seg->segtype->parity_devs;
|
||||
|
||||
/* FIXME: in case of odd numbers of raid10 stripes */
|
||||
if (seg_is_raid10(seg))
|
||||
stripes /= seg->data_copies;
|
||||
|
||||
return dm_report_field_uint32(rh, field, &stripes);
|
||||
}
|
||||
|
||||
/* Helper: return the top-level, reshapable raid LV in case @seg belongs to an raid rimage LV */
|
||||
static struct logical_volume *_lv_for_raid_image_seg(const struct lv_segment *seg, struct dm_pool *mem)
|
||||
{
|
||||
char *lv_name;
|
||||
|
||||
if (seg_is_reshapable_raid(seg))
|
||||
return seg->lv;
|
||||
|
||||
if (seg->lv &&
|
||||
lv_is_raid_image(seg->lv) && !seg->le &&
|
||||
(lv_name = dm_pool_strdup(mem, seg->lv->name))) {
|
||||
char *p = strchr(lv_name, '_');
|
||||
|
||||
if (p) {
|
||||
/* Handle duplicated sub LVs */
|
||||
if (strstr(p, "_dup_"))
|
||||
p = strchr(p + 5, '_');
|
||||
|
||||
if (p) {
|
||||
struct lv_list *lvl;
|
||||
|
||||
*p = '\0';
|
||||
if ((lvl = find_lv_in_vg(seg->lv->vg, lv_name)) &&
|
||||
seg_is_reshapable_raid(first_seg(lvl->lv)))
|
||||
return lvl->lv;
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Helper: return the top-level raid LV in case it is reshapale for @seg or @seg if it is */
|
||||
static const struct lv_segment *_get_reshapable_seg(const struct lv_segment *seg, struct dm_pool *mem)
|
||||
{
|
||||
return _lv_for_raid_image_seg(seg, mem) ? seg : NULL;
|
||||
}
|
||||
|
||||
/* Display segment reshape length in current units */
|
||||
static int _seg_reshape_len_disp(struct dm_report *rh, struct dm_pool *mem,
|
||||
struct dm_report_field *field,
|
||||
const void *data, void *private)
|
||||
{
|
||||
const struct lv_segment *seg = _get_reshapable_seg((const struct lv_segment *) data, mem);
|
||||
|
||||
if (seg) {
|
||||
uint32_t reshape_len = seg->reshape_len * seg->area_count * seg->lv->vg->extent_size;
|
||||
|
||||
return _size32_disp(rh, mem, field, &reshape_len, private);
|
||||
}
|
||||
|
||||
return _field_set_value(field, "", &GET_TYPE_RESERVED_VALUE(num_undef_64));
|
||||
}
|
||||
|
||||
/* Display segment reshape length of in logical extents */
|
||||
static int _seg_reshape_len_le_disp(struct dm_report *rh, struct dm_pool *mem,
|
||||
struct dm_report_field *field,
|
||||
const void *data, void *private)
|
||||
{
|
||||
const struct lv_segment *seg = _get_reshapable_seg((const struct lv_segment *) data, mem);
|
||||
|
||||
if (seg) {
|
||||
uint32_t reshape_len = seg->reshape_len* seg->area_count;
|
||||
|
||||
return dm_report_field_uint32(rh, field, &reshape_len);
|
||||
}
|
||||
|
||||
return _field_set_value(field, "", &GET_TYPE_RESERVED_VALUE(num_undef_64));
|
||||
}
|
||||
|
||||
/* Display segment data copies (e.g. 3 for raid6) */
|
||||
static int _seg_data_copies_disp(struct dm_report *rh, struct dm_pool *mem,
|
||||
struct dm_report_field *field,
|
||||
const void *data, void *private)
|
||||
{
|
||||
const struct lv_segment *seg = (const struct lv_segment *) data;
|
||||
|
||||
if (seg->data_copies)
|
||||
return dm_report_field_uint32(rh, field, &seg->data_copies);
|
||||
|
||||
return _field_set_value(field, "", &GET_TYPE_RESERVED_VALUE(num_undef_64));
|
||||
}
|
||||
|
||||
/* Helper: display segment data offset/new data offset in sectors */
|
||||
static int _segdata_offset(struct dm_report *rh, struct dm_pool *mem,
|
||||
struct dm_report_field *field,
|
||||
const void *data, void *private, int new_data_offset)
|
||||
{
|
||||
const struct lv_segment *seg = (const struct lv_segment *) data;
|
||||
struct logical_volume *lv;
|
||||
|
||||
if ((lv = _lv_for_raid_image_seg(seg, mem))) {
|
||||
uint64_t data_offset;
|
||||
|
||||
if (lv_raid_data_offset(lv, &data_offset)) {
|
||||
if (new_data_offset && !lv_raid_image_in_sync(seg->lv))
|
||||
data_offset = data_offset ? 0 : seg->reshape_len * lv->vg->extent_size;
|
||||
|
||||
return dm_report_field_uint64(rh, field, &data_offset);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return _field_set_value(field, "", &GET_TYPE_RESERVED_VALUE(num_undef_64));
|
||||
}
|
||||
|
||||
static int _seg_data_offset_disp(struct dm_report *rh, struct dm_pool *mem,
|
||||
struct dm_report_field *field,
|
||||
const void *data, void *private)
|
||||
{
|
||||
return _segdata_offset(rh, mem, field, data, private, 0);
|
||||
}
|
||||
|
||||
static int _seg_new_data_offset_disp(struct dm_report *rh, struct dm_pool *mem,
|
||||
struct dm_report_field *field,
|
||||
const void *data, void *private)
|
||||
{
|
||||
return _segdata_offset(rh, mem, field, data, private, 1);
|
||||
}
|
||||
|
||||
static int _seg_parity_chunks_disp(struct dm_report *rh, struct dm_pool *mem,
|
||||
struct dm_report_field *field,
|
||||
const void *data, void *private)
|
||||
{
|
||||
const struct lv_segment *seg = (const struct lv_segment *) data;
|
||||
uint32_t parity_chunks = seg->segtype->parity_devs ?: seg->data_copies - 1;
|
||||
|
||||
if (parity_chunks) {
|
||||
uint32_t s, resilient_sub_lvs = 0;
|
||||
|
||||
for (s = 0; s < seg->area_count; s++) {
|
||||
if (seg_type(seg, s) == AREA_LV) {
|
||||
struct lv_segment *seg1 = first_seg(seg_lv(seg, s));
|
||||
|
||||
if (seg1->segtype->parity_devs ||
|
||||
seg1->data_copies > 1)
|
||||
resilient_sub_lvs++;
|
||||
}
|
||||
}
|
||||
|
||||
if (resilient_sub_lvs && resilient_sub_lvs == seg->area_count)
|
||||
parity_chunks++;
|
||||
|
||||
return dm_report_field_uint32(rh, field, &parity_chunks);
|
||||
}
|
||||
|
||||
return _field_set_value(field, "", &GET_TYPE_RESERVED_VALUE(num_undef_64));
|
||||
}
|
||||
|
||||
static int _segsize_disp(struct dm_report *rh, struct dm_pool *mem,
|
||||
struct dm_report_field *field,
|
||||
const void *data, void *private)
|
||||
@@ -3584,6 +3793,13 @@ static struct volume_group _unknown_vg = {
|
||||
.tags = DM_LIST_HEAD_INIT(_unknown_vg.tags),
|
||||
};
|
||||
|
||||
static struct lvm_mountinfo _unknown_mountinfo = {
|
||||
.mountpoint = ""
|
||||
};
|
||||
|
||||
static struct lvm_fsinfo _unknown_fsinfo = {
|
||||
};
|
||||
|
||||
static void *_obj_get_vg(void *obj)
|
||||
{
|
||||
struct volume_group *vg = ((struct lvm_report_object *)obj)->vg;
|
||||
@@ -3621,6 +3837,16 @@ static void *_obj_get_pvseg(void *obj)
|
||||
return ((struct lvm_report_object *)obj)->pvseg;
|
||||
}
|
||||
|
||||
static void *_obj_get_mountinfo(void *obj)
|
||||
{
|
||||
return ((struct lvm_report_object *)obj)->mountinfo;
|
||||
}
|
||||
|
||||
static void *_obj_get_fsinfo(void *obj)
|
||||
{
|
||||
return ((struct lvm_report_object *)obj)->fsinfo;
|
||||
}
|
||||
|
||||
static void *_obj_get_devtypes(void *obj)
|
||||
{
|
||||
return obj;
|
||||
@@ -3646,6 +3872,8 @@ static const struct dm_report_object_type _report_types[] = {
|
||||
{ LABEL, "Physical Volume Label", "pv_", _obj_get_label },
|
||||
{ SEGS, "Logical Volume Segment", "seg_", _obj_get_seg },
|
||||
{ PVSEGS, "Physical Volume Segment", "pvseg_", _obj_get_pvseg },
|
||||
{ MOUNTINFO, "Mount Point", "mount_", _obj_get_mountinfo },
|
||||
{ FSINFO, "Filesystem", "fs_", _obj_get_fsinfo },
|
||||
{ 0, "", "", NULL },
|
||||
};
|
||||
|
||||
@@ -3678,6 +3906,8 @@ typedef struct volume_group type_vg;
|
||||
typedef struct lv_segment type_seg;
|
||||
typedef struct pv_segment type_pvseg;
|
||||
typedef struct label type_label;
|
||||
typedef struct lvm_mountinfo type_mountinfo;
|
||||
typedef struct lvm_fsinfo type_fsinfo;
|
||||
|
||||
typedef dev_known_type_t type_devtype;
|
||||
|
||||
@@ -3805,7 +4035,8 @@ int report_object(void *handle, int selection_only, const struct volume_group *v
|
||||
const struct logical_volume *lv, const struct physical_volume *pv,
|
||||
const struct lv_segment *seg, const struct pv_segment *pvseg,
|
||||
const struct lv_with_info_and_seg_status *lvdm,
|
||||
const struct label *label)
|
||||
const struct label *label,
|
||||
const struct lvm_mountinfo *mountinfo, const struct lvm_fsinfo *fsinfo)
|
||||
{
|
||||
struct selection_handle *sh = selection_only ? (struct selection_handle *) handle : NULL;
|
||||
struct device dummy_device = { .dev = 0 };
|
||||
@@ -3816,7 +4047,9 @@ int report_object(void *handle, int selection_only, const struct volume_group *v
|
||||
.pv = (struct physical_volume *) pv,
|
||||
.seg = (struct lv_segment *) seg,
|
||||
.pvseg = (struct pv_segment *) pvseg,
|
||||
.label = (struct label *) (label ? : (pv ? pv_label(pv) : NULL))
|
||||
.label = (struct label *) (label ? : (pv ? pv_label(pv) : NULL)),
|
||||
.mountinfo = (struct lvm_mountinfo *) mountinfo ? : &_unknown_mountinfo,
|
||||
.fsinfo = (struct lvm_fsinfo *) fsinfo ? : &_unknown_fsinfo,
|
||||
};
|
||||
|
||||
/* FIXME workaround for pv_label going through cache; remove once struct
|
||||
|
||||
@@ -32,9 +32,22 @@ typedef enum {
|
||||
SEGS = 256,
|
||||
PVSEGS = 512,
|
||||
LABEL = 1024,
|
||||
DEVTYPES = 2048
|
||||
DEVTYPES = 2048,
|
||||
MOUNTINFO = 4096,
|
||||
FSINFO = 8192
|
||||
} report_type_t;
|
||||
|
||||
struct lvm_mountinfo {
|
||||
const char *mountpoint;
|
||||
};
|
||||
|
||||
struct lvm_fsinfo {
|
||||
uint64_t fs_used;
|
||||
uint64_t fs_size;
|
||||
uint64_t fs_free;
|
||||
uint64_t fs_avail;
|
||||
};
|
||||
|
||||
/*
|
||||
* The "struct selection_handle" is used only for selection
|
||||
* of items that should be processed further (not for display!).
|
||||
@@ -104,7 +117,8 @@ int report_object(void *handle, int selection_only, const struct volume_group *v
|
||||
const struct logical_volume *lv, const struct physical_volume *pv,
|
||||
const struct lv_segment *seg, const struct pv_segment *pvseg,
|
||||
const struct lv_with_info_and_seg_status *lvdm,
|
||||
const struct label *label);
|
||||
const struct label *label,
|
||||
const struct lvm_mountinfo *mountinfo, const struct lvm_fsinfo *fsinfo);
|
||||
int report_devtypes(void *handle);
|
||||
int report_cmdlog(void *handle, const char *type, const char *context,
|
||||
const char *object_type_name, const char *object_name,
|
||||
|
||||
@@ -3,3 +3,4 @@ dm_bit_get_prev
|
||||
dm_stats_update_regions_from_fd
|
||||
dm_bitset_parse_list
|
||||
dm_stats_bind_from_fd
|
||||
dm_tree_node_add_raid_target_with_params_v2
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2015 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2006 Rackable Systems All rights reserved.
|
||||
*
|
||||
* This file is part of the device-mapper userspace tools.
|
||||
@@ -1720,7 +1720,7 @@ int dm_tree_node_add_raid_target(struct dm_tree_node *node,
|
||||
const char *raid_type,
|
||||
uint32_t region_size,
|
||||
uint32_t stripe_size,
|
||||
uint64_t *rebuilds,
|
||||
uint64_t rebuilds,
|
||||
uint64_t flags);
|
||||
|
||||
/*
|
||||
@@ -1739,6 +1739,11 @@ int dm_tree_node_add_raid_target(struct dm_tree_node *node,
|
||||
*/
|
||||
#define DM_CACHE_METADATA_MAX_SECTORS DM_THIN_METADATA_MAX_SECTORS
|
||||
|
||||
/*
|
||||
* Define number of elements in rebuild and writemostly arrays
|
||||
* 'of struct dm_tree_node_raid_params'.
|
||||
*/
|
||||
|
||||
struct dm_tree_node_raid_params {
|
||||
const char *raid_type;
|
||||
|
||||
@@ -1747,32 +1752,73 @@ struct dm_tree_node_raid_params {
|
||||
uint32_t region_size;
|
||||
uint32_t stripe_size;
|
||||
|
||||
/*
|
||||
* 'rebuilds' and 'writemostly' are bitfields that signify
|
||||
* which devices in the array are to be rebuilt or marked
|
||||
* writemostly. The kernel supports up to 253 legs.
|
||||
* We limit ourselves by choosing a lower value
|
||||
* for DEFAULT_RAID{1}_MAX_IMAGES in defaults.h.
|
||||
*/
|
||||
uint64_t rebuilds;
|
||||
uint64_t writemostly;
|
||||
uint32_t writebehind; /* I/Os (kernel default COUNTER_MAX / 2) */
|
||||
uint32_t sync_daemon_sleep; /* ms (kernel default = 5sec) */
|
||||
uint32_t max_recovery_rate; /* kB/sec/disk */
|
||||
uint32_t min_recovery_rate; /* kB/sec/disk */
|
||||
uint32_t stripe_cache; /* sectors */
|
||||
|
||||
uint64_t flags; /* [no]sync */
|
||||
uint32_t reserved2;
|
||||
};
|
||||
|
||||
/*
|
||||
* Version 2 of above node raid params struct to keeep API compatibility.
|
||||
*
|
||||
* Extended for more than 64 legs (max 253 in the MD kernel runtime!),
|
||||
* delta_disks for disk add/remove reshaping,
|
||||
* data_offset for out-of-place reshaping
|
||||
* and data_copies for odd number of raid10 legs.
|
||||
*/
|
||||
#define RAID_BITMAP_SIZE 4 /* 4 * 64 bit elements in rebuilds/writemostly arrays */
|
||||
struct dm_tree_node_raid_params_v2 {
|
||||
const char *raid_type;
|
||||
|
||||
uint32_t stripes;
|
||||
uint32_t mirrors;
|
||||
uint32_t region_size;
|
||||
uint32_t stripe_size;
|
||||
|
||||
int delta_disks; /* +/- number of disks to add/remove (reshaping) */
|
||||
int data_offset; /* data offset to set (out-of-place reshaping) */
|
||||
|
||||
/*
|
||||
* 'rebuilds' and 'writemostly' are bitfields that signify
|
||||
* which devices in the array are to be rebuilt or marked
|
||||
* writemostly. By choosing a 'uint64_t', we limit ourself
|
||||
* to RAID arrays with 64 devices.
|
||||
* writemostly. The kernel supports up to 253 legs.
|
||||
* We limit ourselvs by choosing a lower value
|
||||
* for DEFAULT_RAID_MAX_IMAGES.
|
||||
*/
|
||||
uint64_t rebuilds[4];
|
||||
uint64_t writemostly[4];
|
||||
uint64_t rebuilds[RAID_BITMAP_SIZE];
|
||||
uint64_t writemostly[RAID_BITMAP_SIZE];
|
||||
uint32_t writebehind; /* I/Os (kernel default COUNTER_MAX / 2) */
|
||||
uint32_t data_copies; /* RAID # of data copies */
|
||||
uint32_t sync_daemon_sleep; /* ms (kernel default = 5sec) */
|
||||
uint32_t max_recovery_rate; /* kB/sec/disk */
|
||||
uint32_t min_recovery_rate; /* kB/sec/disk */
|
||||
uint32_t data_copies; /* RAID # of data copies */
|
||||
uint32_t stripe_cache; /* sectors */
|
||||
|
||||
uint64_t flags; /* [no]sync */
|
||||
uint64_t reserved2;
|
||||
};
|
||||
|
||||
int dm_tree_node_add_raid_target_with_params(struct dm_tree_node *node,
|
||||
uint64_t size,
|
||||
const struct dm_tree_node_raid_params *p);
|
||||
|
||||
/* Version 2 API function taking dm_tree_node_raid_params_v2 for aforementioned extensions. */
|
||||
int dm_tree_node_add_raid_target_with_params_v2(struct dm_tree_node *node,
|
||||
uint64_t size,
|
||||
const struct dm_tree_node_raid_params_v2 *p);
|
||||
|
||||
/* Cache feature_flags */
|
||||
#define DM_CACHE_FEATURE_WRITEBACK 0x00000001
|
||||
#define DM_CACHE_FEATURE_WRITETHROUGH 0x00000002
|
||||
|
||||
@@ -23,8 +23,6 @@
|
||||
#define DEV_NAME(dmt) (dmt->mangled_dev_name ? : dmt->dev_name)
|
||||
#define DEV_UUID(DMT) (dmt->mangled_uuid ? : dmt->uuid)
|
||||
|
||||
#define RAID_BITMAP_SIZE 4
|
||||
|
||||
int mangle_string(const char *str, const char *str_name, size_t len,
|
||||
char *buf, size_t buf_len, dm_string_mangling_t mode);
|
||||
|
||||
|
||||
@@ -3314,14 +3314,13 @@ int dm_tree_node_add_raid_target_with_params(struct dm_tree_node *node,
|
||||
seg->region_size = p->region_size;
|
||||
seg->stripe_size = p->stripe_size;
|
||||
seg->area_count = 0;
|
||||
seg->delta_disks = p->delta_disks;
|
||||
seg->data_offset = p->data_offset;
|
||||
memcpy(seg->rebuilds, p->rebuilds, sizeof(seg->rebuilds));
|
||||
memcpy(seg->writemostly, p->writemostly, sizeof(seg->writemostly));
|
||||
memset(seg->rebuilds, 0, sizeof(seg->rebuilds));
|
||||
seg->rebuilds[0] = p->rebuilds;
|
||||
memset(seg->writemostly, 0, sizeof(seg->writemostly));
|
||||
seg->writemostly[0] = p->writemostly;
|
||||
seg->writebehind = p->writebehind;
|
||||
seg->min_recovery_rate = p->min_recovery_rate;
|
||||
seg->max_recovery_rate = p->max_recovery_rate;
|
||||
seg->data_copies = p->data_copies;
|
||||
seg->flags = p->flags;
|
||||
|
||||
return 1;
|
||||
@@ -3332,21 +3331,61 @@ int dm_tree_node_add_raid_target(struct dm_tree_node *node,
|
||||
const char *raid_type,
|
||||
uint32_t region_size,
|
||||
uint32_t stripe_size,
|
||||
uint64_t *rebuilds,
|
||||
uint64_t rebuilds,
|
||||
uint64_t flags)
|
||||
{
|
||||
struct dm_tree_node_raid_params params = {
|
||||
.raid_type = raid_type,
|
||||
.region_size = region_size,
|
||||
.stripe_size = stripe_size,
|
||||
.rebuilds = rebuilds,
|
||||
.flags = flags
|
||||
};
|
||||
|
||||
memcpy(params.rebuilds, rebuilds, sizeof(params.rebuilds));
|
||||
|
||||
return dm_tree_node_add_raid_target_with_params(node, size, ¶ms);
|
||||
}
|
||||
|
||||
/*
|
||||
* Version 2 of dm_tree_node_add_raid_target() allowing for:
|
||||
*
|
||||
* - maximum 253 legs in a raid set (MD kernel limitation)
|
||||
* - delta_disks for disk add/remove reshaping
|
||||
* - data_offset for out-of-place reshaping
|
||||
* - data_copies to cope witth odd numbers of raid10 disks
|
||||
*/
|
||||
int dm_tree_node_add_raid_target_with_params_v2(struct dm_tree_node *node,
|
||||
uint64_t size,
|
||||
const struct dm_tree_node_raid_params_v2 *p)
|
||||
{
|
||||
unsigned i;
|
||||
struct load_segment *seg = NULL;
|
||||
|
||||
for (i = 0; i < DM_ARRAY_SIZE(_dm_segtypes) && !seg; ++i)
|
||||
if (!strcmp(p->raid_type, _dm_segtypes[i].target))
|
||||
if (!(seg = _add_segment(node,
|
||||
_dm_segtypes[i].type, size)))
|
||||
return_0;
|
||||
if (!seg) {
|
||||
log_error("Unsupported raid type %s.", p->raid_type);
|
||||
return 0;
|
||||
}
|
||||
|
||||
seg->region_size = p->region_size;
|
||||
seg->stripe_size = p->stripe_size;
|
||||
seg->area_count = 0;
|
||||
seg->delta_disks = p->delta_disks;
|
||||
seg->data_offset = p->data_offset;
|
||||
memcpy(seg->rebuilds, p->rebuilds, sizeof(seg->rebuilds));
|
||||
memcpy(seg->writemostly, p->writemostly, sizeof(seg->writemostly));
|
||||
seg->writebehind = p->writebehind;
|
||||
seg->data_copies = p->data_copies;
|
||||
seg->min_recovery_rate = p->min_recovery_rate;
|
||||
seg->max_recovery_rate = p->max_recovery_rate;
|
||||
seg->flags = p->flags;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int dm_tree_node_add_cache_target(struct dm_tree_node *node,
|
||||
uint64_t size,
|
||||
uint64_t feature_flags, /* DM_CACHE_FEATURE_* */
|
||||
|
||||
@@ -144,12 +144,12 @@ Makefile: Makefile.in
|
||||
|
||||
man-generator:
|
||||
$(CC) -DMAN_PAGE_GENERATOR -I$(top_builddir)/tools $(CFLAGS) $(top_srcdir)/tools/command.c -o $@
|
||||
- ./man-generator lvmconfig > test.gen
|
||||
- ./man-generator --primary lvmconfig > test.gen
|
||||
if [ ! -s test.gen ] ; then cp genfiles/*.gen $(top_builddir)/man; fi;
|
||||
|
||||
$(MAN8GEN): man-generator
|
||||
echo "Generating $@" ;
|
||||
if [ ! -e $@.gen ]; then ./man-generator $(basename $@) $(top_srcdir)/man/$@.des > $@.gen; fi
|
||||
if [ ! -e $@.gen ]; then ./man-generator --primary $(basename $@) $(top_srcdir)/man/$@.des > $@.gen; ./man-generator --secondary $(basename $@) >> $@.gen; fi
|
||||
if [ -f $(top_srcdir)/man/$@.end ]; then cat $(top_srcdir)/man/$@.end >> $@.gen; fi;
|
||||
cat $(top_srcdir)/man/see_also.end >> $@.gen
|
||||
$(SED) -e "s+#VERSION#+$(LVM_VERSION)+;s+#DEFAULT_SYS_DIR#+$(DEFAULT_SYS_DIR)+;s+#DEFAULT_ARCHIVE_DIR#+$(DEFAULT_ARCHIVE_DIR)+;s+#DEFAULT_BACKUP_DIR#+$(DEFAULT_BACKUP_DIR)+;s+#DEFAULT_PROFILE_DIR#+$(DEFAULT_PROFILE_DIR)+;s+#DEFAULT_CACHE_DIR#+$(DEFAULT_CACHE_DIR)+;s+#DEFAULT_LOCK_DIR#+$(DEFAULT_LOCK_DIR)+;s+#CLVMD_PATH#+@CLVMD_PATH@+;s+#LVM_PATH#+@LVM_PATH@+;s+#DEFAULT_RUN_DIR#+@DEFAULT_RUN_DIR@+;s+#DEFAULT_PID_DIR#+@DEFAULT_PID_DIR@+;s+#SYSTEMD_GENERATOR_DIR#+$(SYSTEMD_GENERATOR_DIR)+;s+#DEFAULT_MANGLING#+$(DEFAULT_MANGLING)+;" $@.gen > $@
|
||||
|
||||
@@ -27,6 +27,39 @@ A command run on a visible LV sometimes operates on a sub LV rather than
|
||||
the specified LV. In other cases, a sub LV must be specified directly on
|
||||
the command line.
|
||||
|
||||
Striped raid types are
|
||||
.B raid0/raid0_meta
|
||||
,
|
||||
.B raid5
|
||||
(an alias for raid5_ls),
|
||||
.B raid6
|
||||
(an alias for raid6_zr) and
|
||||
.B raid10
|
||||
(an alias for raid10_near).
|
||||
|
||||
As opposed to mirroring, raid5 and raid6 stripe data and calculate parity
|
||||
blocks. The parity blocks can be used for data block recovery in case devices
|
||||
fail. A maximum number of one device in a raid5 LV may fail and two in case
|
||||
of raid6. Striped raid types typically rotate the parity blocks for performance
|
||||
reasons thus avoiding contention on a single device. Layouts of raid5 rotating
|
||||
parity blocks can be one of left-asymmetric (raid5_la), left-symmetric (raid5_ls
|
||||
with alias raid5), right-asymmetric (raid5_ra), right-symmetric (raid5_rs) and raid5_n,
|
||||
which doesn't rotate parity blocks. Any \"_n\" layouts allow for conversion between
|
||||
raid levels (raid5_n -> raid6 or raid5_n -> striped/raid0/raid0_meta).
|
||||
raid6 layouts are zero-restart (raid6_zr with alias raid6), next-restart (raid6_nr),
|
||||
next-continue (raid6_nc). Additionally, special raid6 layouts for raid level conversions
|
||||
between raid5 and raid6 are raid6_ls_6, raid6_rs_6, raid6_la_6 and raid6_ra_6. Those
|
||||
correspond to their raid5 counterparts (e.g. raid5_rs can be directly converted to raid6_rs_6
|
||||
and vice-versa).
|
||||
raid10 (an alias for raid10_near) is currently limited to one data copy and even number of
|
||||
sub LVs. This is a mirror group layout thus a single sub LV may fail per mirror group
|
||||
without data loss.
|
||||
Striped raid types support converting the layout, their stripesize
|
||||
and their number of stripes.
|
||||
|
||||
The striped raid types combined with raid1 allow for conversion from linear -> striped/raid0/raid0_meta
|
||||
and vice-versa by e.g. linear <-> raid1 <-> raid5_n (then adding stripes) <-> striped/raid0/raid0_meta.
|
||||
|
||||
Sub LVs can be displayed with the command
|
||||
.B lvs -a
|
||||
|
||||
|
||||
@@ -28,9 +28,9 @@ to improve performance.
|
||||
|
||||
.SS Usage notes
|
||||
|
||||
In the usage section below, \fB--size\fP \fINumber\fP can be replaced
|
||||
in each case with \fB--extents\fP \fINumberExtents\fP. Also see both
|
||||
descriptions the options section.
|
||||
In the usage section below, \fB--size\fP \fISize\fP can be replaced
|
||||
with \fB--extents\fP \fINumber\fP. See both descriptions
|
||||
the options section.
|
||||
|
||||
In the usage section below, \fB--name\fP is omitted from the required
|
||||
options, even though it is typically used. When the name is not
|
||||
|
||||
@@ -1,5 +1,12 @@
|
||||
lvextend extends the size of an LV. This requires allocating logical
|
||||
extents from the VG's free physical extents. A copy\-on\-write snapshot LV
|
||||
can also be extended to provide more space to hold COW blocks. Use
|
||||
\fBlvconvert\fP(8) to change the number of data images in a RAID or
|
||||
extents from the VG's free physical extents. If the extension adds a new
|
||||
LV segment, the new segment will use the existing segment type of the LV.
|
||||
|
||||
Extending a copy\-on\-write snapshot LV adds space for COW blocks.
|
||||
|
||||
Use \fBlvconvert\fP(8) to change the number of data images in a RAID or
|
||||
mirrored LV.
|
||||
|
||||
In the usage section below, \fB--size\fP \fISize\fP can be replaced
|
||||
with \fB--extents\fP \fINumber\fP. See both descriptions
|
||||
the options section.
|
||||
|
||||
108
man/lvm.8.in
108
man/lvm.8.in
@@ -484,48 +484,70 @@ directly.
|
||||
.SH SEE ALSO
|
||||
.
|
||||
.nh
|
||||
.BR lvm.conf (5),
|
||||
.BR lvmcache (7),
|
||||
.BR lvmreport(7),
|
||||
.BR lvmthin (7),
|
||||
.BR clvmd (8),
|
||||
.BR lvm (8)
|
||||
.BR lvm.conf (5)
|
||||
.BR lvmconfig (8)
|
||||
|
||||
.BR pvchange (8)
|
||||
.BR pvck (8)
|
||||
.BR pvcreate (8)
|
||||
.BR pvdisplay (8)
|
||||
.BR pvmove (8)
|
||||
.BR pvremove (8)
|
||||
.BR pvresize (8)
|
||||
.BR pvs (8)
|
||||
.BR pvscan (8)
|
||||
|
||||
.BR vgcfgbackup (8)
|
||||
.BR vgcfgrestore (8)
|
||||
.BR vgchange (8)
|
||||
.BR vgck (8)
|
||||
.BR vgcreate (8)
|
||||
.BR vgconvert (8)
|
||||
.BR vgdisplay (8)
|
||||
.BR vgexport (8)
|
||||
.BR vgextend (8)
|
||||
.BR vgimport (8)
|
||||
.BR vgimportclone (8)
|
||||
.BR vgmerge (8)
|
||||
.BR vgmknodes (8)
|
||||
.BR vgreduce (8)
|
||||
.BR vgremove (8)
|
||||
.BR vgrename (8)
|
||||
.BR vgs (8)
|
||||
.BR vgscan (8)
|
||||
.BR vgsplit (8)
|
||||
|
||||
.BR lvcreate (8)
|
||||
.BR lvchange (8)
|
||||
.BR lvconvert (8)
|
||||
.BR lvdisplay (8)
|
||||
.BR lvextend (8)
|
||||
.BR lvreduce (8)
|
||||
.BR lvremove (8)
|
||||
.BR lvrename (8)
|
||||
.BR lvresize (8)
|
||||
.BR lvs (8)
|
||||
.BR lvscan (8)
|
||||
|
||||
.BR lvm2-activation-generator (8)
|
||||
.BR blkdeactivate (8)
|
||||
.BR lvmdump (8)
|
||||
|
||||
.BR dmeventd (8)
|
||||
.BR lvmetad (8)
|
||||
.BR lvmpolld (8)
|
||||
.BR lvmlockd (8)
|
||||
.BR lvmlockctl (8)
|
||||
.BR clvmd (8)
|
||||
.BR cmirrord (8)
|
||||
.BR lvmdbusd (8)
|
||||
|
||||
.BR lvmsystemid (7)
|
||||
.BR lvmreport (7)
|
||||
.BR lvmraid (7)
|
||||
.BR lvmthin (7)
|
||||
.BR lvmcache (7)
|
||||
|
||||
.BR dmsetup (8),
|
||||
.BR lvchange (8),
|
||||
.BR lvcreate (8),
|
||||
.BR lvdisplay (8),
|
||||
.BR lvextend (8),
|
||||
.BR lvmchange (8),
|
||||
.BR lvmconfig (8),
|
||||
.BR lvmdiskscan (8),
|
||||
.BR lvreduce (8),
|
||||
.BR lvremove (8),
|
||||
.BR lvrename (8),
|
||||
.BR lvresize (8),
|
||||
.BR lvs (8),
|
||||
.BR lvscan (8),
|
||||
.BR pvchange (8),
|
||||
.BR pvck (8),
|
||||
.BR pvcreate (8),
|
||||
.BR pvdisplay (8),
|
||||
.BR pvmove (8),
|
||||
.BR pvremove (8),
|
||||
.BR pvs (8),
|
||||
.BR pvscan (8),
|
||||
.BR vgcfgbackup (8),
|
||||
.BR vgchange (8),
|
||||
.BR vgck (8),
|
||||
.BR vgconvert (8),
|
||||
.BR vgcreate (8),
|
||||
.BR vgdisplay (8),
|
||||
.BR vgextend (8),
|
||||
.BR vgimport (8),
|
||||
.BR vgimportclone (8),
|
||||
.BR vgmerge (8),
|
||||
.BR vgmknodes (8),
|
||||
.BR vgreduce (8),
|
||||
.BR vgremove (8),
|
||||
.BR vgrename (8),
|
||||
.BR vgs (8),
|
||||
.BR vgscan (8),
|
||||
.BR vgsplit (8),
|
||||
.BR readline (3)
|
||||
|
||||
@@ -19,6 +19,11 @@ LVM RAID uses both Device Mapper (DM) and Multiple Device (MD) drivers
|
||||
from the Linux kernel. DM is used to create and manage visible LVM
|
||||
devices, and MD is used to place data on physical devices.
|
||||
|
||||
LVM creates hidden LVs (dm devices) layered between the visible LV and
|
||||
physical devices. LVs in that middle layers are called sub LVs.
|
||||
For LVM raid, a sub LV pair to store data and metadata (raid superblock
|
||||
and bitmap) is created per raid image/leg (see lvs command examples below).
|
||||
|
||||
.SH Create a RAID LV
|
||||
|
||||
To create a RAID LV, use lvcreate and specify an LV type.
|
||||
@@ -77,7 +82,7 @@ data that is written to one device before moving to the next.
|
||||
|
||||
Also called mirroring, raid1 uses multiple devices to duplicate LV data.
|
||||
The LV data remains available if all but one of the devices fail.
|
||||
The minimum number of devices required is 2.
|
||||
The minimum number of devices (i.e. sub LV pairs) required is 2.
|
||||
|
||||
.B lvcreate \-\-type raid1
|
||||
[\fB\-\-mirrors\fP \fINumber\fP]
|
||||
@@ -98,8 +103,8 @@ original and one mirror image.
|
||||
|
||||
\&
|
||||
|
||||
raid4 is a form of striping that uses an extra device dedicated to storing
|
||||
parity blocks. The LV data remains available if one device fails. The
|
||||
raid4 is a form of striping that uses an extra, first device dedicated to
|
||||
storing parity blocks. The LV data remains available if one device fails. The
|
||||
parity is used to recalculate data that is lost from a single device. The
|
||||
minimum number of devices required is 3.
|
||||
|
||||
@@ -131,10 +136,10 @@ stored on the same device.
|
||||
\&
|
||||
|
||||
raid5 is a form of striping that uses an extra device for storing parity
|
||||
blocks. LV data and parity blocks are stored on each device. The LV data
|
||||
remains available if one device fails. The parity is used to recalculate
|
||||
data that is lost from a single device. The minimum number of devices
|
||||
required is 3.
|
||||
blocks. LV data and parity blocks are stored on each device, typically in
|
||||
a rotating pattern for performance reasons. The LV data remains available
|
||||
if one device fails. The parity is used to recalculate data that is lost
|
||||
from a single device. The minimum number of devices required is 3.
|
||||
|
||||
.B lvcreate \-\-type raid5
|
||||
[\fB\-\-stripes\fP \fINumber\fP \fB\-\-stripesize\fP \fISize\fP]
|
||||
@@ -167,7 +172,8 @@ parity 0 with data restart.) See \fBRAID5 variants\fP below.
|
||||
\&
|
||||
|
||||
raid6 is a form of striping like raid5, but uses two extra devices for
|
||||
parity blocks. LV data and parity blocks are stored on each device. The
|
||||
parity blocks. LV data and parity blocks are stored on each device, typically
|
||||
in a rotating pattern for perfomramce reasons. The
|
||||
LV data remains available if up to two devices fail. The parity is used
|
||||
to recalculate data that is lost from one or two devices. The minimum
|
||||
number of devices required is 5.
|
||||
@@ -919,7 +925,6 @@ Convert the linear LV to raid1 with three images
|
||||
# lvconvert --type raid1 --mirrors 2 vg/my_lv
|
||||
.fi
|
||||
|
||||
.ig
|
||||
4. Converting an LV from \fBstriped\fP (with 4 stripes) to \fBraid6_nc\fP.
|
||||
|
||||
.nf
|
||||
@@ -927,9 +932,9 @@ Start with a striped LV:
|
||||
|
||||
# lvcreate --stripes 4 -L64M -n my_lv vg
|
||||
|
||||
Convert the striped LV to raid6_nc:
|
||||
Convert the striped LV to raid6_n_6:
|
||||
|
||||
# lvconvert --type raid6_nc vg/my_lv
|
||||
# lvconvert --type raid6 vg/my_lv
|
||||
|
||||
# lvs -a -o lv_name,segtype,sync_percent,data_copies
|
||||
LV Type Cpy%Sync #Cpy
|
||||
@@ -954,14 +959,12 @@ existing stripe devices. It then creates 2 additional MetaLV/DataLV pairs
|
||||
|
||||
If rotating data/parity is required, such as with raid6_nr, it must be
|
||||
done by reshaping (see below).
|
||||
..
|
||||
|
||||
|
||||
.SH RAID Reshaping
|
||||
|
||||
RAID reshaping is changing attributes of a RAID LV while keeping the same
|
||||
RAID level, i.e. changes that do not involve changing the number of
|
||||
devices. This includes changing RAID layout, stripe size, or number of
|
||||
RAID level. This includes changing RAID layout, stripe size, or number of
|
||||
stripes.
|
||||
|
||||
When changing the RAID layout or stripe size, no new SubLVs (MetaLVs or
|
||||
@@ -975,15 +978,12 @@ partially updated and corrupted. Instead, an existing stripe is quiesced,
|
||||
read, changed in layout, and the new stripe written to free space. Once
|
||||
that is done, the new stripe is unquiesced and used.)
|
||||
|
||||
(The reshaping features are planned for a future release.)
|
||||
|
||||
.ig
|
||||
.SS Examples
|
||||
|
||||
1. Converting raid6_n_6 to raid6_nr with rotating data/parity.
|
||||
|
||||
This conversion naturally follows a previous conversion from striped to
|
||||
raid6_n_6 (shown above). It completes the transition to a more
|
||||
This conversion naturally follows a previous conversion from striped/raid0
|
||||
to raid6_n_6 (shown above). It completes the transition to a more
|
||||
traditional RAID6.
|
||||
|
||||
.nf
|
||||
@@ -1029,15 +1029,13 @@ traditional RAID6.
|
||||
The DataLVs are larger (additional segment in each) which provides space
|
||||
for out-of-place reshaping. The result is:
|
||||
|
||||
FIXME: did the lv name change from my_lv to r?
|
||||
.br
|
||||
FIXME: should we change device names in the example to sda,sdb,sdc?
|
||||
.br
|
||||
FIXME: include -o devices or seg_pe_ranges above also?
|
||||
|
||||
.nf
|
||||
# lvs -a -o lv_name,segtype,seg_pe_ranges,dataoffset
|
||||
LV Type PE Ranges data
|
||||
LV Type PE Ranges Doff
|
||||
r raid6_nr r_rimage_0:0-32 \\
|
||||
r_rimage_1:0-32 \\
|
||||
r_rimage_2:0-32 \\
|
||||
@@ -1093,19 +1091,15 @@ RAID5 right asymmetric
|
||||
\[bu]
|
||||
Rotating parity 0 with data continuation
|
||||
|
||||
.ig
|
||||
raid5_n
|
||||
.br
|
||||
\[bu]
|
||||
RAID5 striping
|
||||
RAID5 parity n
|
||||
.br
|
||||
\[bu]
|
||||
Same layout as raid4 with a dedicated parity N with striped data.
|
||||
.br
|
||||
Dedicated parity device n used for striped/raid0 conversions
|
||||
\[bu]
|
||||
Used for
|
||||
.B RAID Takeover
|
||||
..
|
||||
Used for RAID Takeover
|
||||
|
||||
.SH RAID6 Variants
|
||||
|
||||
@@ -1144,7 +1138,24 @@ RAID6 N continue
|
||||
\[bu]
|
||||
Rotating parity N with data continuation
|
||||
|
||||
.ig
|
||||
raid6_n_6
|
||||
.br
|
||||
\[bu]
|
||||
RAID6 last parity devices
|
||||
.br
|
||||
\[bu]
|
||||
Dedicated last parity devices used for striped/raid0 conversions
|
||||
\[bu]
|
||||
Used for RAID Takeover
|
||||
|
||||
raid6_{ls,rs,la,ra}_6
|
||||
.br
|
||||
\[bu]
|
||||
RAID6 last parity device
|
||||
.br
|
||||
\[bu]
|
||||
Dedicated last parity device used for conversions from/to raid5_{ls,rs,la,ra}
|
||||
|
||||
raid6_n_6
|
||||
.br
|
||||
\[bu]
|
||||
@@ -1154,8 +1165,7 @@ RAID6 N continue
|
||||
Fixed P-Syndrome N-1 and Q-Syndrome N with striped data
|
||||
.br
|
||||
\[bu]
|
||||
Used for
|
||||
.B RAID Takeover
|
||||
Used for RAID Takeover
|
||||
|
||||
raid6_ls_6
|
||||
.br
|
||||
@@ -1166,8 +1176,7 @@ RAID6 N continue
|
||||
Same as raid5_ls for N-1 disks with fixed Q-Syndrome N
|
||||
.br
|
||||
\[bu]
|
||||
Used for
|
||||
.B RAID Takeover
|
||||
Used for RAID Takeover
|
||||
|
||||
raid6_la_6
|
||||
.br
|
||||
@@ -1178,8 +1187,7 @@ RAID6 N continue
|
||||
Same as raid5_la for N-1 disks with fixed Q-Syndrome N
|
||||
.br
|
||||
\[bu]
|
||||
Used for
|
||||
.B RAID Takeover
|
||||
Used forRAID Takeover
|
||||
|
||||
raid6_rs_6
|
||||
.br
|
||||
@@ -1190,8 +1198,7 @@ RAID6 N continue
|
||||
Same as raid5_rs for N-1 disks with fixed Q-Syndrome N
|
||||
.br
|
||||
\[bu]
|
||||
Used for
|
||||
.B RAID Takeover
|
||||
Used for RAID Takeover
|
||||
|
||||
raid6_ra_6
|
||||
.br
|
||||
@@ -1202,9 +1209,7 @@ RAID6 N continue
|
||||
Same as raid5_ra for N-1 disks with fixed Q-Syndrome N
|
||||
.br
|
||||
\[bu]
|
||||
Used for
|
||||
.B RAID Takeover
|
||||
..
|
||||
Used for RAID Takeover
|
||||
|
||||
|
||||
.ig
|
||||
|
||||
@@ -12,3 +12,8 @@ system.
|
||||
Sizes will be rounded if necessary. For example, the LV size must be an
|
||||
exact number of extents, and the size of a striped segment must be a
|
||||
multiple of the number of stripes.
|
||||
|
||||
In the usage section below, \fB--size\fP \fISize\fP can be replaced
|
||||
with \fB--extents\fP \fINumber\fP. See both descriptions
|
||||
the options section.
|
||||
|
||||
|
||||
@@ -1,2 +1,7 @@
|
||||
lvresize resizes an LV in the same way as lvextend and lvreduce. See
|
||||
\fBlvextend\fP(8) and \fBlvreduce\fP(8) for more information.
|
||||
|
||||
In the usage section below, \fB--size\fP \fISize\fP can be replaced
|
||||
with \fB--extents\fP \fINumber\fP. See both descriptions
|
||||
the options section.
|
||||
|
||||
|
||||
@@ -56,6 +56,7 @@ Inconsistencies are detected by initiating a "check" on a RAID logical volume.
|
||||
(The scrubbing operations, "check" and "repair", can be performed on a RAID
|
||||
logical volume via the 'lvchange' command.) (w)ritemostly signifies the
|
||||
devices in a RAID 1 logical volume that have been marked write-mostly.
|
||||
(R)emove after reshape signifies freed striped raid images to be removed.
|
||||
.IP
|
||||
Related to Thin pool Logical Volumes: (F)ailed, out of (D)ata space,
|
||||
(M)etadata read only.
|
||||
|
||||
@@ -198,6 +198,9 @@ class TestDbusService(unittest.TestCase):
|
||||
self.objs[MANAGER_INT][0].Manager.PvCreate(
|
||||
dbus.String(device), dbus.Int32(g_tmo), EOD)
|
||||
)
|
||||
|
||||
self._validate_lookup(device, pv_path)
|
||||
|
||||
self.assertTrue(pv_path is not None and len(pv_path) > 0)
|
||||
return pv_path
|
||||
|
||||
@@ -229,6 +232,7 @@ class TestDbusService(unittest.TestCase):
|
||||
dbus.Int32(g_tmo),
|
||||
EOD))
|
||||
|
||||
self._validate_lookup(vg_name, vg_path)
|
||||
self.assertTrue(vg_path is not None and len(vg_path) > 0)
|
||||
return ClientProxy(self.bus, vg_path, interfaces=(VG_INT, ))
|
||||
|
||||
@@ -263,6 +267,9 @@ class TestDbusService(unittest.TestCase):
|
||||
|
||||
def _create_raid5_thin_pool(self, vg=None):
|
||||
|
||||
meta_name = "meta_r5"
|
||||
data_name = "data_r5"
|
||||
|
||||
if not vg:
|
||||
pv_paths = []
|
||||
for pp in self.objs[PV_INT]:
|
||||
@@ -272,7 +279,7 @@ class TestDbusService(unittest.TestCase):
|
||||
|
||||
lv_meta_path = self.handle_return(
|
||||
vg.LvCreateRaid(
|
||||
dbus.String("meta_r5"),
|
||||
dbus.String(meta_name),
|
||||
dbus.String("raid5"),
|
||||
dbus.UInt64(mib(4)),
|
||||
dbus.UInt32(0),
|
||||
@@ -280,10 +287,11 @@ class TestDbusService(unittest.TestCase):
|
||||
dbus.Int32(g_tmo),
|
||||
EOD)
|
||||
)
|
||||
self._validate_lookup("%s/%s" % (vg.Name, meta_name), lv_meta_path)
|
||||
|
||||
lv_data_path = self.handle_return(
|
||||
vg.LvCreateRaid(
|
||||
dbus.String("data_r5"),
|
||||
dbus.String(data_name),
|
||||
dbus.String("raid5"),
|
||||
dbus.UInt64(mib(16)),
|
||||
dbus.UInt32(0),
|
||||
@@ -292,6 +300,8 @@ class TestDbusService(unittest.TestCase):
|
||||
EOD)
|
||||
)
|
||||
|
||||
self._validate_lookup("%s/%s" % (vg.Name, data_name), lv_data_path)
|
||||
|
||||
thin_pool_path = self.handle_return(
|
||||
vg.CreateThinPool(
|
||||
dbus.ObjectPath(lv_meta_path),
|
||||
@@ -339,7 +349,13 @@ class TestDbusService(unittest.TestCase):
|
||||
self.assertTrue(cached_thin_pool_object.ThinPool.MetaDataLv != '/')
|
||||
|
||||
def _lookup(self, lvm_id):
|
||||
return self.objs[MANAGER_INT][0].Manager.LookUpByLvmId(lvm_id)
|
||||
return self.objs[MANAGER_INT][0].\
|
||||
Manager.LookUpByLvmId(dbus.String(lvm_id))
|
||||
|
||||
def _validate_lookup(self, lvm_name, object_path):
|
||||
t = self._lookup(lvm_name)
|
||||
self.assertTrue(
|
||||
object_path == t, "%s != %s for %s" % (object_path, t, lvm_name))
|
||||
|
||||
def test_lookup_by_lvm_id(self):
|
||||
# For the moment lets just lookup what we know about which is PVs
|
||||
@@ -392,10 +408,8 @@ class TestDbusService(unittest.TestCase):
|
||||
def test_vg_rename(self):
|
||||
vg = self._vg_create().Vg
|
||||
|
||||
mgr = self.objs[MANAGER_INT][0].Manager
|
||||
|
||||
# Do a vg lookup
|
||||
path = mgr.LookUpByLvmId(dbus.String(vg.Name))
|
||||
path = self._lookup(vg.Name)
|
||||
|
||||
vg_name_start = vg.Name
|
||||
|
||||
@@ -406,7 +420,7 @@ class TestDbusService(unittest.TestCase):
|
||||
for i in range(0, 5):
|
||||
lv_t = self._create_lv(size=mib(4), vg=vg)
|
||||
full_name = "%s/%s" % (vg_name_start, lv_t.LvCommon.Name)
|
||||
lv_path = mgr.LookUpByLvmId(dbus.String(full_name))
|
||||
lv_path = self._lookup(full_name)
|
||||
self.assertTrue(lv_path == lv_t.object_path)
|
||||
|
||||
new_name = 'renamed_' + vg.Name
|
||||
@@ -417,7 +431,7 @@ class TestDbusService(unittest.TestCase):
|
||||
self._check_consistency()
|
||||
|
||||
# Do a vg lookup
|
||||
path = mgr.LookUpByLvmId(dbus.String(new_name))
|
||||
path = self._lookup(new_name)
|
||||
self.assertTrue(path != '/', "%s" % (path))
|
||||
self.assertTrue(prev_path == path, "%s != %s" % (prev_path, path))
|
||||
|
||||
@@ -435,14 +449,12 @@ class TestDbusService(unittest.TestCase):
|
||||
lv_proxy.Vg == vg.object_path, "%s != %s" %
|
||||
(lv_proxy.Vg, vg.object_path))
|
||||
full_name = "%s/%s" % (new_name, lv_proxy.Name)
|
||||
lv_path = mgr.LookUpByLvmId(dbus.String(full_name))
|
||||
lv_path = self._lookup(full_name)
|
||||
self.assertTrue(
|
||||
lv_path == lv_proxy.object_path, "%s != %s" %
|
||||
(lv_path, lv_proxy.object_path))
|
||||
|
||||
def _verify_hidden_lookups(self, lv_common_object, vgname):
|
||||
mgr = self.objs[MANAGER_INT][0].Manager
|
||||
|
||||
hidden_lv_paths = lv_common_object.HiddenLvs
|
||||
|
||||
for h in hidden_lv_paths:
|
||||
@@ -454,7 +466,7 @@ class TestDbusService(unittest.TestCase):
|
||||
|
||||
full_name = "%s/%s" % (vgname, h_lv.Name)
|
||||
# print("Hidden check %s" % (full_name))
|
||||
lookup_path = mgr.LookUpByLvmId(dbus.String(full_name))
|
||||
lookup_path = self._lookup(full_name)
|
||||
self.assertTrue(lookup_path != '/')
|
||||
self.assertTrue(lookup_path == h_lv.object_path)
|
||||
|
||||
@@ -462,7 +474,7 @@ class TestDbusService(unittest.TestCase):
|
||||
full_name = "%s/%s" % (vgname, h_lv.Name[1:-1])
|
||||
# print("Hidden check %s" % (full_name))
|
||||
|
||||
lookup_path = mgr.LookUpByLvmId(dbus.String(full_name))
|
||||
lookup_path = self._lookup(full_name)
|
||||
self.assertTrue(lookup_path != '/')
|
||||
self.assertTrue(lookup_path == h_lv.object_path)
|
||||
|
||||
@@ -471,7 +483,6 @@ class TestDbusService(unittest.TestCase):
|
||||
(vg, thin_pool) = self._create_raid5_thin_pool()
|
||||
|
||||
vg_name_start = vg.Name
|
||||
mgr = self.objs[MANAGER_INT][0].Manager
|
||||
|
||||
# noinspection PyTypeChecker
|
||||
self._verify_hidden_lookups(thin_pool.LvCommon, vg_name_start)
|
||||
@@ -486,11 +497,14 @@ class TestDbusService(unittest.TestCase):
|
||||
dbus.Int32(g_tmo),
|
||||
EOD))
|
||||
|
||||
self._validate_lookup(
|
||||
"%s/%s" % (vg_name_start, lv_name), thin_lv_path)
|
||||
|
||||
self.assertTrue(thin_lv_path != '/')
|
||||
|
||||
full_name = "%s/%s" % (vg_name_start, lv_name)
|
||||
|
||||
lookup_lv_path = mgr.LookUpByLvmId(dbus.String(full_name))
|
||||
lookup_lv_path = self._lookup(full_name)
|
||||
self.assertTrue(
|
||||
thin_lv_path == lookup_lv_path,
|
||||
"%s != %s" % (thin_lv_path, lookup_lv_path))
|
||||
@@ -518,7 +532,7 @@ class TestDbusService(unittest.TestCase):
|
||||
(lv_proxy.Vg, vg.object_path))
|
||||
full_name = "%s/%s" % (new_name, lv_proxy.Name)
|
||||
# print('Full Name %s' % (full_name))
|
||||
lv_path = mgr.LookUpByLvmId(dbus.String(full_name))
|
||||
lv_path = self._lookup(full_name)
|
||||
self.assertTrue(
|
||||
lv_path == lv_proxy.object_path, "%s != %s" %
|
||||
(lv_path, lv_proxy.object_path))
|
||||
@@ -543,75 +557,88 @@ class TestDbusService(unittest.TestCase):
|
||||
return lv
|
||||
|
||||
def test_lv_create(self):
|
||||
lv_name = lv_n()
|
||||
vg = self._vg_create().Vg
|
||||
self._test_lv_create(
|
||||
lv = self._test_lv_create(
|
||||
vg.LvCreate,
|
||||
(dbus.String(lv_n()), dbus.UInt64(mib(4)),
|
||||
(dbus.String(lv_name), dbus.UInt64(mib(4)),
|
||||
dbus.Array([], signature='(ott)'), dbus.Int32(g_tmo),
|
||||
EOD), vg, LV_BASE_INT)
|
||||
self._validate_lookup("%s/%s" % (vg.Name, lv_name), lv.object_path)
|
||||
|
||||
def test_lv_create_job(self):
|
||||
|
||||
lv_name = lv_n()
|
||||
vg = self._vg_create().Vg
|
||||
(object_path, job_path) = vg.LvCreate(
|
||||
dbus.String(lv_n()), dbus.UInt64(mib(4)),
|
||||
dbus.String(lv_name), dbus.UInt64(mib(4)),
|
||||
dbus.Array([], signature='(ott)'), dbus.Int32(0),
|
||||
EOD)
|
||||
|
||||
self.assertTrue(object_path == '/')
|
||||
self.assertTrue(job_path != '/')
|
||||
object_path = self._wait_for_job(job_path)
|
||||
|
||||
self._validate_lookup("%s/%s" % (vg.Name, lv_name), object_path)
|
||||
self.assertTrue(object_path != '/')
|
||||
|
||||
def test_lv_create_linear(self):
|
||||
|
||||
lv_name = lv_n()
|
||||
vg = self._vg_create().Vg
|
||||
self._test_lv_create(
|
||||
lv = self._test_lv_create(
|
||||
vg.LvCreateLinear,
|
||||
(dbus.String(lv_n()), dbus.UInt64(mib(4)), dbus.Boolean(False),
|
||||
(dbus.String(lv_name), dbus.UInt64(mib(4)), dbus.Boolean(False),
|
||||
dbus.Int32(g_tmo), EOD),
|
||||
vg, LV_BASE_INT)
|
||||
self._validate_lookup("%s/%s" % (vg.Name, lv_name), lv.object_path)
|
||||
|
||||
def test_lv_create_striped(self):
|
||||
lv_name = lv_n()
|
||||
pv_paths = []
|
||||
for pp in self.objs[PV_INT]:
|
||||
pv_paths.append(pp.object_path)
|
||||
|
||||
vg = self._vg_create(pv_paths).Vg
|
||||
self._test_lv_create(
|
||||
lv = self._test_lv_create(
|
||||
vg.LvCreateStriped,
|
||||
(dbus.String(lv_n()), dbus.UInt64(mib(4)),
|
||||
(dbus.String(lv_name), dbus.UInt64(mib(4)),
|
||||
dbus.UInt32(2), dbus.UInt32(8), dbus.Boolean(False),
|
||||
dbus.Int32(g_tmo), EOD),
|
||||
vg, LV_BASE_INT)
|
||||
self._validate_lookup("%s/%s" % (vg.Name, lv_name), lv.object_path)
|
||||
|
||||
def test_lv_create_mirror(self):
|
||||
lv_name = lv_n()
|
||||
pv_paths = []
|
||||
for pp in self.objs[PV_INT]:
|
||||
pv_paths.append(pp.object_path)
|
||||
|
||||
vg = self._vg_create(pv_paths).Vg
|
||||
self._test_lv_create(
|
||||
lv = self._test_lv_create(
|
||||
vg.LvCreateMirror,
|
||||
(dbus.String(lv_n()), dbus.UInt64(mib(4)), dbus.UInt32(2),
|
||||
(dbus.String(lv_name), dbus.UInt64(mib(4)), dbus.UInt32(2),
|
||||
dbus.Int32(g_tmo), EOD), vg, LV_BASE_INT)
|
||||
self._validate_lookup("%s/%s" % (vg.Name, lv_name), lv.object_path)
|
||||
|
||||
def test_lv_create_raid(self):
|
||||
lv_name = lv_n()
|
||||
pv_paths = []
|
||||
for pp in self.objs[PV_INT]:
|
||||
pv_paths.append(pp.object_path)
|
||||
|
||||
vg = self._vg_create(pv_paths).Vg
|
||||
self._test_lv_create(
|
||||
lv = self._test_lv_create(
|
||||
vg.LvCreateRaid,
|
||||
(dbus.String(lv_n()), dbus.String('raid5'), dbus.UInt64(mib(16)),
|
||||
(dbus.String(lv_name), dbus.String('raid5'), dbus.UInt64(mib(16)),
|
||||
dbus.UInt32(2), dbus.UInt32(8), dbus.Int32(g_tmo),
|
||||
EOD),
|
||||
vg,
|
||||
LV_BASE_INT)
|
||||
self._validate_lookup("%s/%s" % (vg.Name, lv_name), lv.object_path)
|
||||
|
||||
def _create_lv(self, thinpool=False, size=None, vg=None):
|
||||
|
||||
lv_name = lv_n()
|
||||
interfaces = list(LV_BASE_INT)
|
||||
|
||||
if thinpool:
|
||||
@@ -627,12 +654,15 @@ class TestDbusService(unittest.TestCase):
|
||||
if size is None:
|
||||
size = mib(4)
|
||||
|
||||
return self._test_lv_create(
|
||||
lv = self._test_lv_create(
|
||||
vg.LvCreateLinear,
|
||||
(dbus.String(lv_n()), dbus.UInt64(size),
|
||||
(dbus.String(lv_name), dbus.UInt64(size),
|
||||
dbus.Boolean(thinpool), dbus.Int32(g_tmo), EOD),
|
||||
vg, interfaces)
|
||||
|
||||
self._validate_lookup("%s/%s" % (vg.Name, lv_name), lv.object_path)
|
||||
return lv
|
||||
|
||||
def test_lv_create_rounding(self):
|
||||
self._create_lv(size=(mib(2) + 13))
|
||||
|
||||
@@ -643,7 +673,7 @@ class TestDbusService(unittest.TestCase):
|
||||
# Rename a regular LV
|
||||
lv = self._create_lv()
|
||||
|
||||
path = self.objs[MANAGER_INT][0].Manager.LookUpByLvmId(lv.LvCommon.Name)
|
||||
path = self._lookup(lv.LvCommon.Name)
|
||||
prev_path = path
|
||||
|
||||
new_name = 'renamed_' + lv.LvCommon.Name
|
||||
@@ -651,8 +681,7 @@ class TestDbusService(unittest.TestCase):
|
||||
self.handle_return(lv.Lv.Rename(dbus.String(new_name),
|
||||
dbus.Int32(g_tmo), EOD))
|
||||
|
||||
path = self.objs[MANAGER_INT][0].Manager.LookUpByLvmId(
|
||||
dbus.String(new_name))
|
||||
path = self._lookup(new_name)
|
||||
|
||||
self._check_consistency()
|
||||
self.assertTrue(prev_path == path, "%s != %s" % (prev_path, path))
|
||||
@@ -677,26 +706,32 @@ class TestDbusService(unittest.TestCase):
|
||||
|
||||
# This returns a LV with the LV interface, need to get a proxy for
|
||||
# thinpool interface too
|
||||
tp = self._create_lv(True)
|
||||
vg = self._vg_create().Vg
|
||||
tp = self._create_lv(thinpool=True, vg=vg)
|
||||
|
||||
lv_name = lv_n('_thin_lv')
|
||||
|
||||
thin_path = self.handle_return(
|
||||
tp.ThinPool.LvCreate(
|
||||
dbus.String(lv_n('_thin_lv')),
|
||||
dbus.String(lv_name),
|
||||
dbus.UInt64(mib(8)),
|
||||
dbus.Int32(g_tmo),
|
||||
EOD)
|
||||
)
|
||||
self._validate_lookup("%s/%s" % (vg.Name, lv_name), thin_path)
|
||||
|
||||
lv = ClientProxy(self.bus, thin_path,
|
||||
interfaces=(LV_COMMON_INT, LV_INT))
|
||||
|
||||
re_named = 'rename_test' + lv.LvCommon.Name
|
||||
rc = self.handle_return(
|
||||
lv.Lv.Rename(
|
||||
dbus.String('rename_test' + lv.LvCommon.Name),
|
||||
dbus.String(re_named),
|
||||
dbus.Int32(g_tmo),
|
||||
EOD)
|
||||
)
|
||||
|
||||
self._validate_lookup("%s/%s" % (vg.Name, re_named), thin_path)
|
||||
self.assertTrue(rc == '/')
|
||||
self._check_consistency()
|
||||
|
||||
@@ -748,18 +783,18 @@ class TestDbusService(unittest.TestCase):
|
||||
|
||||
def test_lv_create_pv_specific(self):
|
||||
vg = self._vg_create().Vg
|
||||
|
||||
lv_name = lv_n()
|
||||
pv = vg.Pvs
|
||||
|
||||
pvp = ClientProxy(self.bus, pv[0], interfaces=(PV_INT,))
|
||||
|
||||
self._test_lv_create(
|
||||
lv = self._test_lv_create(
|
||||
vg.LvCreate, (
|
||||
dbus.String(lv_n()),
|
||||
dbus.String(lv_name),
|
||||
dbus.UInt64(mib(4)),
|
||||
dbus.Array([[pvp.object_path, 0, (pvp.Pv.PeCount - 1)]],
|
||||
signature='(ott)'),
|
||||
dbus.Int32(g_tmo), EOD), vg, LV_BASE_INT)
|
||||
self._validate_lookup("%s/%s" % (vg.Name, lv_name), lv.object_path)
|
||||
|
||||
def test_lv_resize(self):
|
||||
|
||||
@@ -930,7 +965,8 @@ class TestDbusService(unittest.TestCase):
|
||||
self.assertTrue(vg_path == '/')
|
||||
self.assertTrue(vg_job and len(vg_job) > 0)
|
||||
|
||||
self._wait_for_job(vg_job)
|
||||
vg_path = self._wait_for_job(vg_job)
|
||||
self._validate_lookup(vg_name, vg_path)
|
||||
|
||||
def _test_expired_timer(self, num_lvs):
|
||||
rc = False
|
||||
@@ -945,17 +981,20 @@ class TestDbusService(unittest.TestCase):
|
||||
vg_proxy = self._vg_create(pv_paths)
|
||||
|
||||
for i in range(0, num_lvs):
|
||||
|
||||
lv_name = lv_n()
|
||||
vg_proxy.update()
|
||||
if vg_proxy.Vg.FreeCount > 0:
|
||||
job = self.handle_return(
|
||||
lv_path = self.handle_return(
|
||||
vg_proxy.Vg.LvCreateLinear(
|
||||
dbus.String(lv_n()),
|
||||
dbus.String(lv_name),
|
||||
dbus.UInt64(mib(4)),
|
||||
dbus.Boolean(False),
|
||||
dbus.Int32(g_tmo),
|
||||
EOD))
|
||||
self.assertTrue(job != '/')
|
||||
self.assertTrue(lv_path != '/')
|
||||
self._validate_lookup(
|
||||
"%s/%s" % (vg_proxy.Vg.Name, lv_name), lv_path)
|
||||
|
||||
else:
|
||||
# We ran out of space, test will probably fail
|
||||
break
|
||||
@@ -1064,15 +1103,18 @@ class TestDbusService(unittest.TestCase):
|
||||
|
||||
def test_lv_tags(self):
|
||||
vg = self._vg_create().Vg
|
||||
lv_name = lv_n()
|
||||
lv = self._test_lv_create(
|
||||
vg.LvCreateLinear,
|
||||
(dbus.String(lv_n()),
|
||||
(dbus.String(lv_name),
|
||||
dbus.UInt64(mib(4)),
|
||||
dbus.Boolean(False),
|
||||
dbus.Int32(g_tmo),
|
||||
EOD),
|
||||
vg, LV_BASE_INT)
|
||||
|
||||
self._validate_lookup("%s/%s" % (vg.Name, lv_name), lv.object_path)
|
||||
|
||||
t = ['Testing', 'tags']
|
||||
|
||||
self.handle_return(
|
||||
@@ -1148,15 +1190,18 @@ class TestDbusService(unittest.TestCase):
|
||||
|
||||
def test_vg_activate_deactivate(self):
|
||||
vg = self._vg_create().Vg
|
||||
self._test_lv_create(
|
||||
lv_name = lv_n()
|
||||
lv = self._test_lv_create(
|
||||
vg.LvCreateLinear, (
|
||||
dbus.String(lv_n()),
|
||||
dbus.String(lv_name),
|
||||
dbus.UInt64(mib(4)),
|
||||
dbus.Boolean(False),
|
||||
dbus.Int32(g_tmo),
|
||||
EOD),
|
||||
vg, LV_BASE_INT)
|
||||
|
||||
self._validate_lookup("%s/%s" % (vg.Name, lv_name), lv.object_path)
|
||||
|
||||
vg.update()
|
||||
|
||||
rc = self.handle_return(
|
||||
@@ -1361,15 +1406,19 @@ class TestDbusService(unittest.TestCase):
|
||||
|
||||
def test_snapshot_merge_thin(self):
|
||||
# Create a thin LV, snapshot it and merge it
|
||||
tp = self._create_lv(True)
|
||||
vg = self._vg_create().Vg
|
||||
tp = self._create_lv(thinpool=True, vg=vg)
|
||||
lv_name = lv_n('_thin_lv')
|
||||
|
||||
thin_path = self.handle_return(
|
||||
tp.ThinPool.LvCreate(
|
||||
dbus.String(lv_n('_thin_lv')),
|
||||
dbus.String(lv_name),
|
||||
dbus.UInt64(mib(10)),
|
||||
dbus.Int32(g_tmo),
|
||||
EOD))
|
||||
|
||||
self._validate_lookup("%s/%s" % (vg.Name, lv_name), thin_path)
|
||||
|
||||
lv_p = ClientProxy(self.bus, thin_path,
|
||||
interfaces=(LV_INT, LV_COMMON_INT))
|
||||
|
||||
@@ -1512,12 +1561,14 @@ class TestDbusService(unittest.TestCase):
|
||||
EOD))
|
||||
|
||||
# Create a VG and try to create LVs with different bad names
|
||||
vg_name = vg_n()
|
||||
vg_path = self.handle_return(
|
||||
mgr.VgCreate(
|
||||
dbus.String(vg_n()),
|
||||
dbus.String(vg_name),
|
||||
dbus.Array(pv_paths, 'o'),
|
||||
dbus.Int32(g_tmo),
|
||||
EOD))
|
||||
self._validate_lookup(vg_name, vg_path)
|
||||
|
||||
vg_proxy = ClientProxy(self.bus, vg_path, interfaces=(VG_INT, ))
|
||||
|
||||
@@ -1563,13 +1614,16 @@ class TestDbusService(unittest.TestCase):
|
||||
def test_invalid_tags(self):
|
||||
mgr = self.objs[MANAGER_INT][0].Manager
|
||||
pv_paths = [self.objs[PV_INT][0].object_path]
|
||||
vg_name = vg_n()
|
||||
|
||||
vg_path = self.handle_return(
|
||||
mgr.VgCreate(
|
||||
dbus.String(vg_n()),
|
||||
dbus.String(vg_name),
|
||||
dbus.Array(pv_paths, 'o'),
|
||||
dbus.Int32(g_tmo),
|
||||
EOD))
|
||||
self._validate_lookup(vg_name, vg_path)
|
||||
|
||||
vg_proxy = ClientProxy(self.bus, vg_path, interfaces=(VG_INT, ))
|
||||
|
||||
for c in self._invalid_tag_characters():
|
||||
@@ -1591,13 +1645,15 @@ class TestDbusService(unittest.TestCase):
|
||||
def test_tag_names(self):
|
||||
mgr = self.objs[MANAGER_INT][0].Manager
|
||||
pv_paths = [self.objs[PV_INT][0].object_path]
|
||||
vg_name = vg_n()
|
||||
|
||||
vg_path = self.handle_return(
|
||||
mgr.VgCreate(
|
||||
dbus.String(vg_n()),
|
||||
dbus.String(vg_name),
|
||||
dbus.Array(pv_paths, 'o'),
|
||||
dbus.Int32(g_tmo),
|
||||
EOD))
|
||||
self._validate_lookup(vg_name, vg_path)
|
||||
vg_proxy = ClientProxy(self.bus, vg_path, interfaces=(VG_INT, ))
|
||||
|
||||
for i in range(1, 64):
|
||||
@@ -1622,13 +1678,15 @@ class TestDbusService(unittest.TestCase):
|
||||
def test_tag_regression(self):
|
||||
mgr = self.objs[MANAGER_INT][0].Manager
|
||||
pv_paths = [self.objs[PV_INT][0].object_path]
|
||||
vg_name = vg_n()
|
||||
|
||||
vg_path = self.handle_return(
|
||||
mgr.VgCreate(
|
||||
dbus.String(vg_n()),
|
||||
dbus.String(vg_name),
|
||||
dbus.Array(pv_paths, 'o'),
|
||||
dbus.Int32(g_tmo),
|
||||
EOD))
|
||||
self._validate_lookup(vg_name, vg_path)
|
||||
vg_proxy = ClientProxy(self.bus, vg_path, interfaces=(VG_INT, ))
|
||||
|
||||
tag = '--h/K.6g0A4FOEatf3+k_nI/Yp&L_u2oy-=j649x:+dUcYWPEo6.IWT0c'
|
||||
|
||||
@@ -866,7 +866,7 @@ common_dev_() {
|
||||
else
|
||||
test -z "${offsets[@]}" && offsets="0:"
|
||||
fi ;;
|
||||
error) offsets=${@:3}
|
||||
error|zero) offsets=${@:3}
|
||||
test -z "${offsets[@]}" && offsets="0:" ;;
|
||||
esac
|
||||
|
||||
@@ -893,8 +893,8 @@ common_dev_() {
|
||||
case "$tgtype" in
|
||||
delay)
|
||||
echo "$from $len delay $pvdev $(($pos + $offset)) $read_ms $pvdev $(($pos + $offset)) $write_ms" ;;
|
||||
error)
|
||||
echo "$from $len error" ;;
|
||||
error|zero)
|
||||
echo "$from $len $tgtype" ;;
|
||||
esac
|
||||
pos=$(($pos + $len))
|
||||
done > "$name.devtable"
|
||||
@@ -1013,12 +1013,23 @@ restore_from_devtable() {
|
||||
#
|
||||
# Convert device to device with errors
|
||||
# Takes the list of pairs of error segment from:len
|
||||
# Original device table is replace with multiple lines
|
||||
# Combination with zero or delay is unsupported
|
||||
# Original device table is replaced with multiple lines
|
||||
# i.e. error_dev "$dev1" 8:32 96:8
|
||||
error_dev() {
|
||||
common_dev_ error "$@"
|
||||
}
|
||||
|
||||
#
|
||||
# Convert existing device to a device with zero segments
|
||||
# Takes the list of pairs of zero segment from:len
|
||||
# Combination with error or delay is unsupported
|
||||
# Original device table is replaced with multiple lines
|
||||
# i.e. zero_dev "$dev1" 8:32 96:8
|
||||
zero_dev() {
|
||||
common_dev_ zero "$@"
|
||||
}
|
||||
|
||||
backup_dev() {
|
||||
local dev
|
||||
|
||||
|
||||
@@ -188,7 +188,7 @@ run_syncaction_check() {
|
||||
# 'lvs' should show results
|
||||
lvchange --syncaction check $vg/$lv
|
||||
aux wait_for_sync $vg $lv
|
||||
check lv_attr_bit health $vg/$lv "-"
|
||||
check lv_attr_bit health $vg/$lv "-" || check lv_attr_bit health $vg/$lv "m"
|
||||
check lv_field $vg/$lv raid_mismatch_count "0"
|
||||
}
|
||||
|
||||
|
||||
@@ -21,14 +21,14 @@ aux prepare_vg 4
|
||||
|
||||
for d in $dev1 $dev2 $dev3 $dev4
|
||||
do
|
||||
aux delay_dev $d 1
|
||||
aux delay_dev $d 1 1
|
||||
done
|
||||
|
||||
#
|
||||
# Test writemostly prohibited on resyncrhonizing raid1
|
||||
# Test writemostly prohibited on resynchronizing raid1
|
||||
#
|
||||
|
||||
# Create 4-way striped LV
|
||||
# Create 4-way raid1 LV
|
||||
lvcreate -aey --ty raid1 -m 3 -L 32M -n $lv1 $vg
|
||||
not lvchange -y --writemostly $dev1 $vg/$lv1
|
||||
check lv_field $vg/$lv1 segtype "raid1"
|
||||
|
||||
68
test/shell/lvconvert-raid-reshape-linear_to_striped.sh
Normal file
68
test/shell/lvconvert-raid-reshape-linear_to_striped.sh
Normal file
@@ -0,0 +1,68 @@
|
||||
#!/bin/sh
|
||||
# Copyright (C) 2017 Red Hat, Inc. All rights reserved.
|
||||
#
|
||||
# This copyrighted material is made available to anyone wishing to use,
|
||||
# modify, copy, or redistribute it subject to the terms and conditions
|
||||
# of the GNU General Public License v.2.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA2110-1301 USA
|
||||
|
||||
SKIP_WITH_LVMLOCKD=1
|
||||
SKIP_WITH_LVMPOLLD=1
|
||||
|
||||
. lib/inittest
|
||||
|
||||
which mkfs.ext4 || skip
|
||||
aux have_raid 1 10 2 || skip
|
||||
|
||||
aux prepare_vg 5
|
||||
|
||||
#
|
||||
# Test single step linear -> striped conversion
|
||||
#
|
||||
|
||||
# Create linear LV
|
||||
lvcreate -aey -L 16M -n $lv1 $vg
|
||||
check lv_field $vg/$lv1 segtype "linear"
|
||||
check lv_field $vg/$lv1 stripes 1
|
||||
check lv_field $vg/$lv1 data_stripes 1
|
||||
echo y|mkfs -t ext4 $DM_DEV_DIR/$vg/$lv1
|
||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
|
||||
|
||||
# Convert linear -> raid1
|
||||
lvconvert -y -m 1 $vg/$lv1
|
||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
|
||||
check lv_field $vg/$lv1 segtype "raid1"
|
||||
check lv_field $vg/$lv1 stripes 2
|
||||
check lv_field $vg/$lv1 data_stripes 2
|
||||
check lv_field $vg/$lv1 regionsize "512.00k"
|
||||
aux wait_for_sync $vg $lv1
|
||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
|
||||
|
||||
# Convert raid1 -> raid5_n
|
||||
lvconvert -y --ty raid5_n $vg/$lv1
|
||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
|
||||
check lv_field $vg/$lv1 segtype "raid5_n"
|
||||
check lv_field $vg/$lv1 stripes 2
|
||||
check lv_field $vg/$lv1 data_stripes 1
|
||||
check lv_field $vg/$lv1 stripesize "64.00k"
|
||||
check lv_field $vg/$lv1 regionsize "512.00k"
|
||||
|
||||
# Convert raid5_n adding stripes
|
||||
lvconvert -y --stripes 4 $vg/$lv1
|
||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
|
||||
check lv_first_seg_field $vg/$lv1 segtype "raid5_n"
|
||||
check lv_first_seg_field $vg/$lv1 data_stripes 4
|
||||
check lv_first_seg_field $vg/$lv1 stripes 5
|
||||
check lv_first_seg_field $vg/$lv1 stripesize "64.00k"
|
||||
check lv_first_seg_field $vg/$lv1 regionsize "512.00k"
|
||||
aux wait_for_sync $vg $lv1
|
||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
|
||||
|
||||
# Convert raid5_n -> striped
|
||||
lvconvert -y --type striped $vg/$lv1
|
||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
|
||||
|
||||
vgremove -ff $vg
|
||||
106
test/shell/lvconvert-raid-reshape-striped_to_linear.sh
Normal file
106
test/shell/lvconvert-raid-reshape-striped_to_linear.sh
Normal file
@@ -0,0 +1,106 @@
|
||||
#!/bin/sh
|
||||
# Copyright (C) 2017 Red Hat, Inc. All rights reserved.
|
||||
#
|
||||
# This copyrighted material is made available to anyone wishing to use,
|
||||
# modify, copy, or redistribute it subject to the terms and conditions
|
||||
# of the GNU General Public License v.2.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA2110-1301 USA
|
||||
|
||||
SKIP_WITH_LVMLOCKD=1
|
||||
SKIP_WITH_LVMPOLLD=1
|
||||
|
||||
. lib/inittest
|
||||
|
||||
which mkfs.ext4 || skip
|
||||
aux have_raid 1 10 2 || skip
|
||||
|
||||
aux prepare_vg 5
|
||||
|
||||
#
|
||||
# Test single step linear -> striped conversion
|
||||
#
|
||||
|
||||
# Create 4-way striped LV
|
||||
lvcreate -aey -i 4 -I 32k -L 16M -n $lv1 $vg
|
||||
check lv_field $vg/$lv1 segtype "striped"
|
||||
check lv_field $vg/$lv1 data_stripes 4
|
||||
check lv_field $vg/$lv1 stripes 4
|
||||
check lv_field $vg/$lv1 stripesize "32.00k"
|
||||
check lv_field $vg/$lv1 reshape_len ""
|
||||
echo y|mkfs -t ext4 $DM_DEV_DIR/$vg/$lv1
|
||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
|
||||
|
||||
# Convert striped -> raid5(_n)
|
||||
lvconvert -y --ty raid5 -R 128k $vg/$lv1
|
||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
|
||||
check lv_field $vg/$lv1 segtype "raid5_n"
|
||||
check lv_field $vg/$lv1 data_stripes 4
|
||||
check lv_field $vg/$lv1 stripes 5
|
||||
check lv_field $vg/$lv1 stripesize "32.00k"
|
||||
check lv_field $vg/$lv1 regionsize "128.00k"
|
||||
check lv_field $vg/$lv1 reshape_len ""
|
||||
aux wait_for_sync $vg $lv1
|
||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
|
||||
|
||||
# Extend raid5_n LV by factor 4 to keep size once linear
|
||||
lvresize -y -L 64 $vg/$lv1
|
||||
check lv_field $vg/$lv1 segtype "raid5_n"
|
||||
check lv_field $vg/$lv1 data_stripes 4
|
||||
check lv_field $vg/$lv1 stripes 5
|
||||
check lv_field $vg/$lv1 stripesize "32.00k"
|
||||
check lv_field $vg/$lv1 regionsize "128.00k"
|
||||
check lv_field $vg/$lv1 reshape_len ""
|
||||
aux wait_for_sync $vg $lv1
|
||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
|
||||
|
||||
# Convert raid5_n LV to 1 stripe (2 legs total),
|
||||
# 64k stripesize and 1024k regionsize
|
||||
# FIXME: "--type" superfluous (cli fix needed)
|
||||
lvconvert -y -f --ty raid5_n --stripes 1 -I 64k -R 1024k $vg/$lv1
|
||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
|
||||
check lv_first_seg_field $vg/$lv1 segtype "raid5_n"
|
||||
check lv_first_seg_field $vg/$lv1 data_stripes 1
|
||||
check lv_first_seg_field $vg/$lv1 stripes 5
|
||||
check lv_first_seg_field $vg/$lv1 stripesize "64.00k"
|
||||
check lv_first_seg_field $vg/$lv1 regionsize "1.00m"
|
||||
check lv_first_seg_field $vg/$lv1 reshape_len 10
|
||||
# for slv in {0..4}
|
||||
# do
|
||||
# check lv_first_seg_field $vg/${lv1}_rimage_${slv} reshape_len 2
|
||||
# done
|
||||
aux wait_for_sync $vg $lv1 1
|
||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
|
||||
|
||||
# Remove the now freed legs
|
||||
lvconvert --stripes 1 $vg/$lv1
|
||||
check lv_first_seg_field $vg/$lv1 segtype "raid5_n"
|
||||
check lv_first_seg_field $vg/$lv1 data_stripes 1
|
||||
check lv_first_seg_field $vg/$lv1 stripes 2
|
||||
check lv_first_seg_field $vg/$lv1 stripesize "64.00k"
|
||||
check lv_first_seg_field $vg/$lv1 regionsize "1.00m"
|
||||
check lv_first_seg_field $vg/$lv1 reshape_len 4
|
||||
|
||||
# Convert raid5_n to raid1
|
||||
lvconvert -y --type raid1 $vg/$lv1
|
||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
|
||||
check lv_first_seg_field $vg/$lv1 segtype "raid1"
|
||||
check lv_first_seg_field $vg/$lv1 data_stripes 2
|
||||
check lv_first_seg_field $vg/$lv1 stripes 2
|
||||
check lv_first_seg_field $vg/$lv1 stripesize "0"
|
||||
check lv_first_seg_field $vg/$lv1 regionsize "1.00m"
|
||||
check lv_first_seg_field $vg/$lv1 reshape_len ""
|
||||
|
||||
# Convert raid5_n -> linear
|
||||
lvconvert -y --type linear $vg/$lv1
|
||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
|
||||
check lv_first_seg_field $vg/$lv1 segtype "linear"
|
||||
check lv_first_seg_field $vg/$lv1 data_stripes 1
|
||||
check lv_first_seg_field $vg/$lv1 stripes 1
|
||||
check lv_first_seg_field $vg/$lv1 stripesize "0"
|
||||
check lv_first_seg_field $vg/$lv1 regionsize "0"
|
||||
check lv_first_seg_field $vg/$lv1 reshape_len ""
|
||||
|
||||
vgremove -ff $vg
|
||||
@@ -17,7 +17,9 @@ SKIP_WITH_LVMPOLLD=1
|
||||
which mkfs.ext4 || skip
|
||||
aux have_raid 1 10 2 || skip
|
||||
|
||||
aux prepare_vg 64
|
||||
aux prepare_pvs 65 64
|
||||
|
||||
vgcreate -s 1M $vg $(cat DEVICES)
|
||||
|
||||
function _lvcreate
|
||||
{
|
||||
@@ -30,6 +32,7 @@ function _lvcreate
|
||||
|
||||
lvcreate -y -aey --type $level -i $req_stripes -L $size -n $lv $vg
|
||||
check lv_first_seg_field $vg/$lv segtype "$level"
|
||||
check lv_first_seg_field $vg/$lv datastripes $req_stripes
|
||||
check lv_first_seg_field $vg/$lv stripes $stripes
|
||||
mkfs.ext4 "$DM_DEV_DIR/$vg/$lv"
|
||||
fsck -fn "$DM_DEV_DIR/$vg/$lv"
|
||||
@@ -39,10 +42,11 @@ function _lvconvert
|
||||
{
|
||||
local req_level=$1
|
||||
local level=$2
|
||||
local stripes=$3
|
||||
local vg=$4
|
||||
local lv=$5
|
||||
local region_size=$6
|
||||
local data_stripes=$3
|
||||
local stripes=$4
|
||||
local vg=$5
|
||||
local lv=$6
|
||||
local region_size=$7
|
||||
local wait_and_check=1
|
||||
local R=""
|
||||
|
||||
@@ -53,6 +57,7 @@ function _lvconvert
|
||||
lvconvert -y --ty $req_level $R $vg/$lv
|
||||
[ $? -ne 0 ] && return $?
|
||||
check lv_first_seg_field $vg/$lv segtype "$level"
|
||||
check lv_first_seg_field $vg/$lv data_stripes $data_stripes
|
||||
check lv_first_seg_field $vg/$lv stripes $stripes
|
||||
[ -n "$region_size" ] && check lv_field $vg/$lv regionsize $region_size
|
||||
if [ "$wait_and_check" -eq 1 ]
|
||||
@@ -67,6 +72,8 @@ function _reshape_layout
|
||||
{
|
||||
local type=$1
|
||||
shift
|
||||
local data_stripes=$1
|
||||
shift
|
||||
local stripes=$1
|
||||
shift
|
||||
local vg=$1
|
||||
@@ -80,6 +87,7 @@ function _reshape_layout
|
||||
|
||||
lvconvert -vvvv -y --ty $type $opts $vg/$lv
|
||||
check lv_first_seg_field $vg/$lv segtype "$type"
|
||||
check lv_first_seg_field $vg/$lv data_stripes $data_stripes
|
||||
check lv_first_seg_field $vg/$lv stripes $stripes
|
||||
aux wait_for_sync $vg $lv $ignore_a_chars
|
||||
fsck -fn "$DM_DEV_DIR/$vg/$lv"
|
||||
@@ -99,101 +107,101 @@ check lv_first_seg_field $vg/$lv1 segtype "raid5_ls"
|
||||
aux wait_for_sync $vg $lv1
|
||||
|
||||
# Reshape it to 256K stripe size
|
||||
_reshape_layout raid5_ls 4 $vg $lv1 --stripesize 256K
|
||||
_reshape_layout raid5_ls 3 4 $vg $lv1 --stripesize 256K
|
||||
check lv_first_seg_field $vg/$lv1 stripesize "256.00k"
|
||||
|
||||
# Convert raid5(_n) -> striped
|
||||
not _lvconvert striped striped 3 $vg $lv1 512k
|
||||
_reshape_layout raid5_n 4 $vg $lv1
|
||||
_lvconvert striped striped 3 $vg $lv1
|
||||
not _lvconvert striped striped 3 3 $vg $lv1 512k
|
||||
_reshape_layout raid5_n 3 4 $vg $lv1
|
||||
_lvconvert striped striped 3 3 $vg $lv1
|
||||
|
||||
# Convert striped -> raid5_n
|
||||
_lvconvert raid5_n raid5_n 4 $vg $lv1 "" 1
|
||||
_lvconvert raid5_n raid5_n 3 4 $vg $lv1 "" 1
|
||||
|
||||
# Convert raid5_n -> raid5_ls
|
||||
_reshape_layout raid5_ls 4 $vg $lv1
|
||||
_reshape_layout raid5_ls 3 4 $vg $lv1
|
||||
|
||||
# Convert raid5_ls to 5 stripes
|
||||
_reshape_layout raid5_ls 6 $vg $lv1 --stripes 5
|
||||
_reshape_layout raid5_ls 5 6 $vg $lv1 --stripes 5
|
||||
|
||||
# Convert raid5_ls back to 3 stripes
|
||||
_reshape_layout raid5_ls 6 $vg $lv1 --stripes 3 --force
|
||||
_reshape_layout raid5_ls 4 $vg $lv1 --stripes 3
|
||||
_reshape_layout raid5_ls 3 6 $vg $lv1 --stripes 3 --force
|
||||
_reshape_layout raid5_ls 3 4 $vg $lv1 --stripes 3
|
||||
|
||||
# Convert raid5_ls to 7 stripes
|
||||
_reshape_layout raid5_ls 8 $vg $lv1 --stripes 7
|
||||
_reshape_layout raid5_ls 7 8 $vg $lv1 --stripes 7
|
||||
|
||||
# Convert raid5_ls to 9 stripes
|
||||
_reshape_layout raid5_ls 10 $vg $lv1 --stripes 9
|
||||
_reshape_layout raid5_ls 9 10 $vg $lv1 --stripes 9
|
||||
|
||||
# Convert raid5_ls to 14 stripes
|
||||
_reshape_layout raid5_ls 15 $vg $lv1 --stripes 14
|
||||
_reshape_layout raid5_ls 14 15 $vg $lv1 --stripes 14
|
||||
|
||||
# Convert raid5_ls to 63 stripes
|
||||
_reshape_layout raid5_ls 64 $vg $lv1 --stripes 63
|
||||
_reshape_layout raid5_ls 63 64 $vg $lv1 --stripes 63
|
||||
|
||||
# Convert raid5_ls back to 27 stripes
|
||||
_reshape_layout raid5_ls 64 $vg $lv1 --stripes 27 --force
|
||||
_reshape_layout raid5_ls 28 $vg $lv1 --stripes 27
|
||||
_reshape_layout raid5_ls 27 64 $vg $lv1 --stripes 27 --force
|
||||
_reshape_layout raid5_ls 27 28 $vg $lv1 --stripes 27
|
||||
|
||||
# Convert raid5_ls back to 4 stripes
|
||||
_reshape_layout raid5_ls 28 $vg $lv1 --stripes 4 --force
|
||||
_reshape_layout raid5_ls 5 $vg $lv1 --stripes 4
|
||||
_reshape_layout raid5_ls 4 28 $vg $lv1 --stripes 4 --force
|
||||
_reshape_layout raid5_ls 4 5 $vg $lv1 --stripes 4
|
||||
|
||||
# Convert raid5_ls back to 3 stripes
|
||||
_reshape_layout raid5_ls 5 $vg $lv1 --stripes 3 --force
|
||||
_reshape_layout raid5_ls 4 $vg $lv1 --stripes 3
|
||||
_reshape_layout raid5_ls 3 5 $vg $lv1 --stripes 3 --force
|
||||
_reshape_layout raid5_ls 3 4 $vg $lv1 --stripes 3
|
||||
|
||||
# Convert raid5_ls -> raid5_rs
|
||||
_reshape_layout raid5_rs 4 $vg $lv1
|
||||
_reshape_layout raid5_rs 3 4 $vg $lv1
|
||||
|
||||
# Convert raid5_rs -> raid5_la
|
||||
_reshape_layout raid5_la 4 $vg $lv1
|
||||
_reshape_layout raid5_la 3 4 $vg $lv1
|
||||
|
||||
# Convert raid5_la -> raid5_ra
|
||||
_reshape_layout raid5_ra 4 $vg $lv1
|
||||
_reshape_layout raid5_ra 3 4 $vg $lv1
|
||||
|
||||
# Convert raid5_ra -> raid6_ra_6
|
||||
_lvconvert raid6_ra_6 raid6_ra_6 5 $vg $lv1 "4.00m" 1
|
||||
_lvconvert raid6_ra_6 raid6_ra_6 3 5 $vg $lv1 "4.00m" 1
|
||||
|
||||
# Convert raid5_la -> raid6(_zr)
|
||||
_reshape_layout raid6 5 $vg $lv1
|
||||
_reshape_layout raid6 3 5 $vg $lv1
|
||||
|
||||
# Convert raid6(_zr) -> raid6_nc
|
||||
_reshape_layout raid6_nc 5 $vg $lv1
|
||||
_reshape_layout raid6_nc 3 5 $vg $lv1
|
||||
|
||||
# Convert raid6(_nc) -> raid6_nr
|
||||
_reshape_layout raid6_nr 5 $vg $lv1
|
||||
_reshape_layout raid6_nr 3 5 $vg $lv1
|
||||
|
||||
# Convert raid6_nr) -> raid6_rs_6
|
||||
_reshape_layout raid6_rs_6 5 $vg $lv1
|
||||
_reshape_layout raid6_rs_6 3 5 $vg $lv1
|
||||
|
||||
# Convert raid6_rs_6 to 5 stripes
|
||||
_reshape_layout raid6_rs_6 7 $vg $lv1 --stripes 5
|
||||
_reshape_layout raid6_rs_6 5 7 $vg $lv1 --stripes 5
|
||||
|
||||
# Convert raid6_rs_6 to 4 stripes
|
||||
_reshape_layout raid6_rs_6 7 $vg $lv1 --stripes 4 --force
|
||||
_reshape_layout raid6_rs_6 6 $vg $lv1 --stripes 4
|
||||
_reshape_layout raid6_rs_6 4 7 $vg $lv1 --stripes 4 --force
|
||||
_reshape_layout raid6_rs_6 4 6 $vg $lv1 --stripes 4
|
||||
check lv_first_seg_field $vg/$lv1 stripesize "256.00k"
|
||||
|
||||
# Convert raid6_rs_6 to raid6_n_6
|
||||
_reshape_layout raid6_n_6 6 $vg $lv1
|
||||
_reshape_layout raid6_n_6 4 6 $vg $lv1
|
||||
|
||||
# Convert raid6_n_6 -> striped
|
||||
_lvconvert striped striped 4 $vg $lv1
|
||||
_lvconvert striped striped 4 4 $vg $lv1
|
||||
check lv_first_seg_field $vg/$lv1 stripesize "256.00k"
|
||||
|
||||
# Convert striped -> raid10(_near)
|
||||
_lvconvert raid10 raid10 8 $vg $lv1
|
||||
_lvconvert raid10 raid10 4 8 $vg $lv1
|
||||
|
||||
# Convert raid10 to 10 stripes and 64K stripesize
|
||||
# FIXME: change once we support odd numbers of raid10 stripes
|
||||
not _reshape_layout raid10 9 $vg $lv1 --stripes 9 --stripesize 64K
|
||||
_reshape_layout raid10 10 $vg $lv1 --stripes 10 --stripesize 64K
|
||||
not _reshape_layout raid10 4 9 $vg $lv1 --stripes 9 --stripesize 64K
|
||||
_reshape_layout raid10 5 10 $vg $lv1 --stripes 10 --stripesize 64K
|
||||
check lv_first_seg_field $vg/$lv1 stripesize "64.00k"
|
||||
|
||||
# Convert raid6_n_6 -> striped
|
||||
_lvconvert striped striped 5 $vg $lv1
|
||||
_lvconvert striped striped 5 5 $vg $lv1
|
||||
check lv_first_seg_field $vg/$lv1 stripesize "64.00k"
|
||||
|
||||
vgremove -ff $vg
|
||||
|
||||
@@ -33,6 +33,7 @@ function _lvcreate
|
||||
|
||||
lvcreate -y -aey --type $level -i $req_stripes -L $size -n $lv $vg
|
||||
check lv_field $vg/$lv segtype "$level"
|
||||
check lv_field $vg/$lv data_stripes $req_stripes
|
||||
check lv_field $vg/$lv stripes $stripes
|
||||
mkfs.ext4 "$DM_DEV_DIR/$vg/$lv"
|
||||
fsck -fn "$DM_DEV_DIR/$vg/$lv"
|
||||
@@ -42,10 +43,11 @@ function _lvconvert
|
||||
{
|
||||
local req_level=$1
|
||||
local level=$2
|
||||
local stripes=$3
|
||||
local vg=$4
|
||||
local lv=$5
|
||||
local region_size=$6
|
||||
local data_stripes=$3
|
||||
local stripes=$4
|
||||
local vg=$5
|
||||
local lv=$6
|
||||
local region_size=$7
|
||||
local wait_and_check=1
|
||||
local R=""
|
||||
|
||||
@@ -56,6 +58,7 @@ function _lvconvert
|
||||
lvconvert -y --ty $req_level $R $vg/$lv
|
||||
[ $? -ne 0 ] && return $?
|
||||
check lv_field $vg/$lv segtype "$level"
|
||||
check lv_field $vg/$lv data_stripes $data_stripes
|
||||
check lv_field $vg/$lv stripes $stripes
|
||||
if [ "$wait_and_check" -eq 1 ]
|
||||
then
|
||||
@@ -70,19 +73,19 @@ function _invalid_raid5_conversions
|
||||
local vg=$1
|
||||
local lv=$2
|
||||
|
||||
not _lvconvert striped 4 $vg $lv1
|
||||
not _lvconvert raid0 raid0 4 $vg $lv1
|
||||
not _lvconvert raid0_meta raid0_meta 4 $vg $lv1
|
||||
not _lvconvert raid4 raid4 5 $vg $lv1
|
||||
not _lvconvert raid5_ls raid5_ls 5 $vg $lv1
|
||||
not _lvconvert raid5_rs raid5_rs 5 $vg $lv1
|
||||
not _lvconvert raid5_la raid5_la 5 $vg $lv1
|
||||
not _lvconvert raid5_ra raid5_ra 5 $vg $lv1
|
||||
not _lvconvert raid6_zr raid6_zr 6 $vg $lv1
|
||||
not _lvconvert raid6_nr raid6_nr 6 $vg $lv1
|
||||
not _lvconvert raid6_nc raid6_nc 6 $vg $lv1
|
||||
not _lvconvert raid6_n_6 raid6_n_6 6 $vg $lv1
|
||||
not _lvconvert raid6 raid6_n_6 6 $vg $lv1
|
||||
not _lvconvert striped 4 4 $vg $lv1
|
||||
not _lvconvert raid0 raid0 4 4 $vg $lv1
|
||||
not _lvconvert raid0_meta raid0_meta 4 4 $vg $lv1
|
||||
not _lvconvert raid4 raid4 4 5 $vg $lv1
|
||||
not _lvconvert raid5_ls raid5_ls 4 5 $vg $lv1
|
||||
not _lvconvert raid5_rs raid5_rs 4 5 $vg $lv1
|
||||
not _lvconvert raid5_la raid5_la 4 5 $vg $lv1
|
||||
not _lvconvert raid5_ra raid5_ra 4 5 $vg $lv1
|
||||
not _lvconvert raid6_zr raid6_zr 4 6 $vg $lv1
|
||||
not _lvconvert raid6_nr raid6_nr 4 6 $vg $lv1
|
||||
not _lvconvert raid6_nc raid6_nc 4 6 $vg $lv1
|
||||
not _lvconvert raid6_n_6 raid6_n_6 4 6 $vg $lv1
|
||||
not _lvconvert raid6 raid6_n_6 4 6 $vg $lv1
|
||||
}
|
||||
|
||||
# Delayst leg so that rebuilding status characters
|
||||
@@ -162,116 +165,116 @@ _lvcreate raid4 3 4 8M $vg $lv1
|
||||
aux wait_for_sync $vg $lv1
|
||||
|
||||
# Convert raid4 -> striped
|
||||
not _lvconvert striped striped 3 $vg $lv1 512k
|
||||
_lvconvert striped striped 3 $vg $lv1
|
||||
not _lvconvert striped striped 3 3 $vg $lv1 512k
|
||||
_lvconvert striped striped 3 3 $vg $lv1
|
||||
|
||||
# Convert striped -> raid4
|
||||
_lvconvert raid4 raid4 4 $vg $lv1 64k
|
||||
_lvconvert raid4 raid4 3 4 $vg $lv1 64k
|
||||
check lv_field $vg/$lv1 regionsize "64.00k"
|
||||
|
||||
# Convert raid4 -> raid5_n
|
||||
_lvconvert raid5 raid5_n 4 $vg $lv1 128k
|
||||
_lvconvert raid5 raid5_n 3 4 $vg $lv1 128k
|
||||
check lv_field $vg/$lv1 regionsize "128.00k"
|
||||
|
||||
# Convert raid5_n -> striped
|
||||
_lvconvert striped striped 3 $vg $lv1
|
||||
_lvconvert striped striped 3 3 $vg $lv1
|
||||
|
||||
# Convert striped -> raid5_n
|
||||
_lvconvert raid5_n raid5_n 4 $vg $lv1
|
||||
_lvconvert raid5_n raid5_n 3 4 $vg $lv1
|
||||
|
||||
# Convert raid5_n -> raid4
|
||||
_lvconvert raid4 raid4 4 $vg $lv1
|
||||
_lvconvert raid4 raid4 3 4 $vg $lv1
|
||||
|
||||
# Convert raid4 -> raid0
|
||||
_lvconvert raid0 raid0 3 $vg $lv1
|
||||
_lvconvert raid0 raid0 3 3 $vg $lv1
|
||||
|
||||
# Convert raid0 -> raid5_n
|
||||
_lvconvert raid5_n raid5_n 4 $vg $lv1
|
||||
_lvconvert raid5_n raid5_n 3 4 $vg $lv1
|
||||
|
||||
# Convert raid5_n -> raid0_meta
|
||||
_lvconvert raid0_meta raid0_meta 3 $vg $lv1
|
||||
_lvconvert raid0_meta raid0_meta 3 3 $vg $lv1
|
||||
|
||||
# Convert raid0_meta -> raid5_n
|
||||
_lvconvert raid5 raid5_n 4 $vg $lv1
|
||||
_lvconvert raid5 raid5_n 3 4 $vg $lv1
|
||||
|
||||
# Convert raid4 -> raid0_meta
|
||||
not _lvconvert raid0_meta raid0_meta 3 $vg $lv1 256k
|
||||
_lvconvert raid0_meta raid0_meta 3 $vg $lv1
|
||||
not _lvconvert raid0_meta raid0_meta 3 3 $vg $lv1 256k
|
||||
_lvconvert raid0_meta raid0_meta 3 3 $vg $lv1
|
||||
|
||||
# Convert raid0_meta -> raid4
|
||||
_lvconvert raid4 raid4 4 $vg $lv1
|
||||
_lvconvert raid4 raid4 3 4 $vg $lv1
|
||||
|
||||
# Convert raid4 -> raid0
|
||||
_lvconvert raid0 raid0 3 $vg $lv1
|
||||
_lvconvert raid0 raid0 3 3 $vg $lv1
|
||||
|
||||
# Convert raid0 -> raid4
|
||||
_lvconvert raid4 raid4 4 $vg $lv1
|
||||
_lvconvert raid4 raid4 3 4 $vg $lv1
|
||||
|
||||
# Convert raid4 -> striped
|
||||
_lvconvert striped striped 3 $vg $lv1
|
||||
_lvconvert striped striped 3 3 $vg $lv1
|
||||
|
||||
# Convert striped -> raid6_n_6
|
||||
_lvconvert raid6_n_6 raid6_n_6 5 $vg $lv1
|
||||
_lvconvert raid6_n_6 raid6_n_6 3 5 $vg $lv1
|
||||
|
||||
# Convert raid6_n_6 -> striped
|
||||
_lvconvert striped striped 3 $vg $lv1
|
||||
_lvconvert striped striped 3 3 $vg $lv1
|
||||
|
||||
# Convert striped -> raid6_n_6
|
||||
_lvconvert raid6 raid6_n_6 5 $vg $lv1
|
||||
_lvconvert raid6 raid6_n_6 3 5 $vg $lv1
|
||||
|
||||
# Convert raid6_n_6 -> raid5_n
|
||||
_lvconvert raid5_n raid5_n 4 $vg $lv1
|
||||
_lvconvert raid5_n raid5_n 3 4 $vg $lv1
|
||||
|
||||
# Convert raid5_n -> raid6_n_6
|
||||
_lvconvert raid6_n_6 raid6_n_6 5 $vg $lv1
|
||||
_lvconvert raid6_n_6 raid6_n_6 3 5 $vg $lv1
|
||||
|
||||
# Convert raid6_n_6 -> raid4
|
||||
_lvconvert raid4 raid4 4 $vg $lv1
|
||||
_lvconvert raid4 raid4 3 4 $vg $lv1
|
||||
|
||||
# Convert raid4 -> raid6_n_6
|
||||
_lvconvert raid6 raid6_n_6 5 $vg $lv1
|
||||
_lvconvert raid6 raid6_n_6 3 5 $vg $lv1
|
||||
|
||||
# Convert raid6_n_6 -> raid0
|
||||
_lvconvert raid0 raid0 3 $vg $lv1
|
||||
_lvconvert raid0 raid0 3 3 $vg $lv1
|
||||
|
||||
# Convert raid0 -> raid6_n_6
|
||||
_lvconvert raid6_n_6 raid6_n_6 5 $vg $lv1
|
||||
_lvconvert raid6_n_6 raid6_n_6 3 5 $vg $lv1
|
||||
|
||||
# Convert raid6_n_6 -> raid0_meta
|
||||
_lvconvert raid0_meta raid0_meta 3 $vg $lv1
|
||||
_lvconvert raid0_meta raid0_meta 3 3 $vg $lv1
|
||||
|
||||
# Convert raid0_meta -> raid6_n_6
|
||||
_lvconvert raid6 raid6_n_6 5 $vg $lv1
|
||||
_lvconvert raid6 raid6_n_6 3 5 $vg $lv1
|
||||
|
||||
# Convert raid6_n_6 -> striped
|
||||
not _lvconvert striped striped 3 $vg $lv1 128k
|
||||
_lvconvert striped striped 3 $vg $lv1
|
||||
not _lvconvert striped striped 3 3 $vg $lv1 128k
|
||||
_lvconvert striped striped 3 3 $vg $lv1
|
||||
|
||||
# Convert striped -> raid10
|
||||
_lvconvert raid10 raid10 6 $vg $lv1
|
||||
_lvconvert raid10 raid10 3 6 $vg $lv1
|
||||
|
||||
# Convert raid10 -> raid0
|
||||
not _lvconvert raid0 raid0 3 $vg $lv1 64k
|
||||
_lvconvert raid0 raid0 3 $vg $lv1
|
||||
not _lvconvert raid0 raid0 3 3 $vg $lv1 64k
|
||||
_lvconvert raid0 raid0 3 3 $vg $lv1
|
||||
|
||||
# Convert raid0 -> raid10
|
||||
_lvconvert raid10 raid10 6 $vg $lv1
|
||||
_lvconvert raid10 raid10 3 6 $vg $lv1
|
||||
|
||||
# Convert raid10 -> raid0_meta
|
||||
_lvconvert raid0_meta raid0_meta 3 $vg $lv1
|
||||
_lvconvert raid0_meta raid0_meta 3 3 $vg $lv1
|
||||
|
||||
# Convert raid0_meta -> raid5
|
||||
_lvconvert raid5_n raid5_n 4 $vg $lv1
|
||||
_lvconvert raid5_n raid5_n 3 4 $vg $lv1
|
||||
|
||||
# Convert raid5_n -> raid0_meta
|
||||
_lvconvert raid0_meta raid0_meta 3 $vg $lv1
|
||||
_lvconvert raid0_meta raid0_meta 3 3 $vg $lv1
|
||||
|
||||
# Convert raid0_meta -> raid10
|
||||
_lvconvert raid10 raid10 6 $vg $lv1
|
||||
_lvconvert raid10 raid10 3 6 $vg $lv1
|
||||
|
||||
# Convert raid10 -> striped
|
||||
not _lvconvert striped striped 3 $vg $lv1 256k
|
||||
_lvconvert striped striped 3 $vg $lv1
|
||||
not _lvconvert striped striped 3 3 $vg $lv1 256k
|
||||
_lvconvert striped striped 3 3 $vg $lv1
|
||||
|
||||
# Clean up
|
||||
lvremove -y $vg
|
||||
@@ -280,51 +283,51 @@ lvremove -y $vg
|
||||
_lvcreate raid5 4 5 8M $vg $lv1
|
||||
aux wait_for_sync $vg $lv1
|
||||
_invalid_raid5_conversions $vg $lv1
|
||||
not _lvconvert raid6_rs_6 raid6_rs_6 6 $vg $lv1
|
||||
not _lvconvert raid6_la_6 raid6_la_6 6 $vg $lv1
|
||||
not _lvconvert raid6_ra_6 raid6_ra_6 6 $vg $lv1
|
||||
_lvconvert raid6_ls_6 raid6_ls_6 6 $vg $lv1
|
||||
_lvconvert raid5_ls raid5_ls 5 $vg $lv1
|
||||
not _lvconvert raid6_rs_6 raid6_rs_6 4 6 $vg $lv1
|
||||
not _lvconvert raid6_la_6 raid6_la_6 4 6 $vg $lv1
|
||||
not _lvconvert raid6_ra_6 raid6_ra_6 4 6 $vg $lv1
|
||||
_lvconvert raid6_ls_6 raid6_ls_6 4 6 $vg $lv1
|
||||
_lvconvert raid5_ls raid5_ls 4 5 $vg $lv1
|
||||
lvremove -y $vg
|
||||
|
||||
_lvcreate raid5_ls 4 5 8M $vg $lv1
|
||||
aux wait_for_sync $vg $lv1
|
||||
_invalid_raid5_conversions $vg $lv1
|
||||
not _lvconvert raid6_rs_6 raid6_rs_6 6 $vg $lv1
|
||||
not _lvconvert raid6_la_6 raid6_la_6 6 $vg $lv1
|
||||
not _lvconvert raid6_ra_6 raid6_ra_6 6 $vg $lv1
|
||||
_lvconvert raid6_ls_6 raid6_ls_6 6 $vg $lv1
|
||||
_lvconvert raid5_ls raid5_ls 5 $vg $lv1
|
||||
not _lvconvert raid6_rs_6 raid6_rs_6 4 6 $vg $lv1
|
||||
not _lvconvert raid6_la_6 raid6_la_6 4 6 $vg $lv1
|
||||
not _lvconvert raid6_ra_6 raid6_ra_6 4 6 $vg $lv1
|
||||
_lvconvert raid6_ls_6 raid6_ls_6 4 6 $vg $lv1
|
||||
_lvconvert raid5_ls raid5_ls 4 5 $vg $lv1
|
||||
lvremove -y $vg
|
||||
|
||||
_lvcreate raid5_rs 4 5 8M $vg $lv1
|
||||
aux wait_for_sync $vg $lv1
|
||||
_invalid_raid5_conversions $vg $lv1
|
||||
not _lvconvert raid6_ra_6 raid6_ra_6 6 $vg $lv1
|
||||
not _lvconvert raid6_la_6 raid6_la_6 6 $vg $lv1
|
||||
not _lvconvert raid6_ra_6 raid6_ra_6 6 $vg $lv1
|
||||
_lvconvert raid6_rs_6 raid6_rs_6 6 $vg $lv1
|
||||
_lvconvert raid5_rs raid5_rs 5 $vg $lv1
|
||||
not _lvconvert raid6_ra_6 raid6_ra_6 4 6 $vg $lv1
|
||||
not _lvconvert raid6_la_6 raid6_la_6 4 6 $vg $lv1
|
||||
not _lvconvert raid6_ra_6 raid6_ra_6 4 6 $vg $lv1
|
||||
_lvconvert raid6_rs_6 raid6_rs_6 4 6 $vg $lv1
|
||||
_lvconvert raid5_rs raid5_rs 4 5 $vg $lv1
|
||||
lvremove -y $vg
|
||||
|
||||
_lvcreate raid5_la 4 5 8M $vg $lv1
|
||||
aux wait_for_sync $vg $lv1
|
||||
_invalid_raid5_conversions $vg $lv1
|
||||
not _lvconvert raid6_ls_6 raid6_ls_6 6 $vg $lv1
|
||||
not _lvconvert raid6_rs_6 raid6_rs_6 6 $vg $lv1
|
||||
not _lvconvert raid6_ra_6 raid6_ra_6 6 $vg $lv1
|
||||
_lvconvert raid6_la_6 raid6_la_6 6 $vg $lv1
|
||||
_lvconvert raid5_la raid5_la 5 $vg $lv1
|
||||
not _lvconvert raid6_ls_6 raid6_ls_6 4 6 $vg $lv1
|
||||
not _lvconvert raid6_rs_6 raid6_rs_6 4 6 $vg $lv1
|
||||
not _lvconvert raid6_ra_6 raid6_ra_6 4 6 $vg $lv1
|
||||
_lvconvert raid6_la_6 raid6_la_6 4 6 $vg $lv1
|
||||
_lvconvert raid5_la raid5_la 4 5 $vg $lv1
|
||||
lvremove -y $vg
|
||||
|
||||
_lvcreate raid5_ra 4 5 8M $vg $lv1
|
||||
aux wait_for_sync $vg $lv1
|
||||
_invalid_raid5_conversions $vg $lv1
|
||||
not _lvconvert raid6_ls_6 raid6_ls_6 6 $vg $lv1
|
||||
not _lvconvert raid6_rs_6 raid6_rs_6 6 $vg $lv1
|
||||
not _lvconvert raid6_la_6 raid6_la_6 6 $vg $lv1
|
||||
_lvconvert raid6_ra_6 raid6_ra_6 6 $vg $lv1
|
||||
_lvconvert raid5_ra raid5_ra 5 $vg $lv1
|
||||
not _lvconvert raid6_ls_6 raid6_ls_6 4 6 $vg $lv1
|
||||
not _lvconvert raid6_rs_6 raid6_rs_6 4 6 $vg $lv1
|
||||
not _lvconvert raid6_la_6 raid6_la_6 4 6 $vg $lv1
|
||||
_lvconvert raid6_ra_6 raid6_ra_6 4 6 $vg $lv1
|
||||
_lvconvert raid5_ra raid5_ra 4 5 $vg $lv1
|
||||
lvremove -y $vg
|
||||
|
||||
else
|
||||
|
||||
@@ -33,9 +33,9 @@ test_pvmove_resume() {
|
||||
# next LV on same VG and differetnt PV (we want to test 2 pvmoves per VG)
|
||||
lvcreate -an -Zn -l30 -n $lv2 $vg "$dev3"
|
||||
|
||||
aux delay_dev "$dev4" 0 250
|
||||
aux delay_dev "$dev4" 0 250 $(get first_extent_sector "$dev4"):
|
||||
test -e HAVE_DM_DELAY || { lvremove -f $vg; return 0; }
|
||||
aux delay_dev "$dev5" 0 250
|
||||
aux delay_dev "$dev5" 0 250 $(get first_extent_sector "$dev5"):
|
||||
|
||||
pvmove -i5 "$dev1" "$dev4" &
|
||||
PVMOVE=$!
|
||||
|
||||
88
tools/args.h
88
tools/args.h
@@ -109,10 +109,10 @@ arg(cachemode_ARG, '\0', "cachemode", cachemode_VAL, 0, 0,
|
||||
"been stored in both the cache pool and on the origin LV.\n"
|
||||
"While writethrough may be slower for writes, it is more\n"
|
||||
"resilient if something should happen to a device associated with the\n"
|
||||
"cache pool LV. With writethrough, all reads are served\n"
|
||||
"cache pool LV. With \\fBpassthrough\\fP, all reads are served\n"
|
||||
"from the origin LV (all reads miss the cache) and all writes are\n"
|
||||
"forwarded to the origin LV; additionally, write hits cause cache\n"
|
||||
"block invalidates. See lvmcache(7) for more information.\n")
|
||||
"block invalidates. See \\fBlvmcache\\fP(7) for more information.\n")
|
||||
|
||||
arg(cachepool_ARG, '\0', "cachepool", lv_VAL, 0, 0,
|
||||
"The name of a cache pool LV.\n")
|
||||
@@ -414,8 +414,15 @@ arg(pooldatasize_ARG, '\0', "pooldatasize", sizemb_VAL, 0, 0, NULL)
|
||||
arg(poolmetadata_ARG, '\0', "poolmetadata", lv_VAL, 0, 0,
|
||||
"The name of a an LV to use for storing pool metadata.\n")
|
||||
|
||||
arg(poolmetadatasize_ARG, '\0', "poolmetadatasize", sizemb_VAL, 0, 0,
|
||||
"The size of the pool metadata LV created by the command.\n")
|
||||
arg(poolmetadatasize_ARG, '\0', "poolmetadatasize", ssizemb_VAL, 0, 0,
|
||||
"#lvcreate\n"
|
||||
"#lvconvert\n"
|
||||
"Specifies the size of the new pool metadata LV.\n"
|
||||
"#lvresize\n"
|
||||
"#lvextend\n"
|
||||
"Specifies the new size of the pool metadata LV.\n"
|
||||
"The plus prefix \\fB+\\fP can be used, in which case\n"
|
||||
"the value is added to the current size.\n")
|
||||
|
||||
arg(poolmetadataspare_ARG, '\0', "poolmetadataspare", bool_VAL, 0, 0,
|
||||
"Enable or disable the automatic creation and management of a\n"
|
||||
@@ -646,7 +653,16 @@ arg(trustcache_ARG, '\0', "trustcache", 0, 0, 0,
|
||||
"Avoids certain device scanning during command processing. Do not use.\n")
|
||||
|
||||
arg(type_ARG, '\0', "type", segtype_VAL, 0, 0,
|
||||
"Specifies an LV type, or \"segment type\".\n")
|
||||
"Specifies an LV type, or \"segment type\".\n"
|
||||
"See usage definitions for specific ways to use these types.\n"
|
||||
"For more information about redundancy and performance (\\fBraid\\fP<N>, \\fBmirror\\fP, \\fBstriped\\fP, \\fBlinear\\fP) see \\fBlvmraid\\fP(7).\n"
|
||||
"For thin provisioning (\\fBthin\\fP, \\fBthin-pool\\fP) see \\fBlvmthin\\fP(7).\n"
|
||||
"For performance caching (\\fBcache\\fP, \\fBcache-pool\\fP) see \\fBlvmcache\\fP(7).\n"
|
||||
"For copy-on-write snapshots (\\fBsnapshot\\fP) see usage definitions.\n"
|
||||
"Several commands omit an explicit type option because the type\n"
|
||||
"is inferred from other options or shortcuts\n"
|
||||
"(e.g. --stripes, --mirrors, --snapshot, --virtualsize, --thin, --cache).\n"
|
||||
"Use inferred types with care because it can lead to unexpected results.\n")
|
||||
|
||||
arg(unbuffered_ARG, '\0', "unbuffered", 0, 0, 0,
|
||||
"Produce output immediately without sorting or aligning the columns properly.\n")
|
||||
@@ -684,7 +700,7 @@ arg(unquoted_ARG, '\0', "unquoted", 0, 0, 0,
|
||||
"pairs are not quoted.\n")
|
||||
|
||||
arg(usepolicies_ARG, '\0', "usepolicies", 0, 0, 0,
|
||||
"Perform an operation according to the policy configured in lvm.conf.\n"
|
||||
"Perform an operation according to the policy configured in lvm.conf\n"
|
||||
"or a profile.\n")
|
||||
|
||||
arg(validate_ARG, '\0', "validate", 0, 0, 0,
|
||||
@@ -798,8 +814,8 @@ arg(activate_ARG, 'a', "activate", activation_VAL, 0, 0,
|
||||
"if the list is set but empty, no LVs match.\n"
|
||||
"Autoactivation should be used during system boot to make it possible\n"
|
||||
"to select which LVs should be automatically activated by the system.\n"
|
||||
"See lvmlockd(8) for more information about activation options for shared VGs.\n"
|
||||
"See clvmd(8) for more information about activation options for clustered VGs.\n"
|
||||
"See lvmlockd(8) for more information about activation options \\fBey\\fP and \\fBsy\\fP for shared VGs.\n"
|
||||
"See clvmd(8) for more information about activation options \\fBey\\fP, \\fBsy\\fP, \\fBly\\fP and \\fBln\\fP for clustered VGs.\n"
|
||||
"#lvcreate\n"
|
||||
"Controls the active state of the new LV.\n"
|
||||
"\\fBy\\fP makes the LV active, or available.\n"
|
||||
@@ -958,15 +974,15 @@ arg(stripes_ARG, 'i', "stripes", number_VAL, 0, 0,
|
||||
"Specifies the number of stripes in a striped LV. This is the number of\n"
|
||||
"PVs (devices) that a striped LV is spread across. Data that\n"
|
||||
"appears sequential in the LV is spread across multiple devices in units of\n"
|
||||
"the stripe size (see --stripesize). This does not apply to\n"
|
||||
"existing allocated space, only newly allocated space can be striped.\n"
|
||||
"the stripe size (see --stripesize). This does not change existing\n"
|
||||
"allocated space, but only applies to space being allocated by the command.\n"
|
||||
"When creating a RAID 4/5/6 LV, this number does not include the extra\n"
|
||||
"devices that are required for parity. The largest number depends on\n"
|
||||
"the RAID type (raid0: 64, raid10: 32, raid4/5: 63, raid6: 62.)\n"
|
||||
"When unspecified, the default depends on the RAID type\n"
|
||||
"the RAID type (raid0: 64, raid10: 32, raid4/5: 63, raid6: 62), and\n"
|
||||
"when unspecified, the default depends on the RAID type\n"
|
||||
"(raid0: 2, raid10: 4, raid4/5: 3, raid6: 5.)\n"
|
||||
"When unspecified, to stripe across all PVs of the VG,\n"
|
||||
"set lvm.conf allocation/raid_stripe_all_devices=1.\n")
|
||||
"To stripe a new raid LV across all PVs by default,\n"
|
||||
"see lvm.conf allocation/raid_stripe_all_devices.\n")
|
||||
|
||||
arg(stripesize_ARG, 'I', "stripesize", sizekb_VAL, 0, 0,
|
||||
"The amount of data that is written to one device before\n"
|
||||
@@ -978,7 +994,7 @@ arg(logicalvolume_ARG, 'l', "logicalvolume", uint32_VAL, 0, 0,
|
||||
arg(maxlogicalvolumes_ARG, 'l', "maxlogicalvolumes", uint32_VAL, 0, 0,
|
||||
"Sets the maximum number of LVs allowed in a VG.\n")
|
||||
|
||||
arg(extents_ARG, 'l', "extents", numsignedper_VAL, 0, 0,
|
||||
arg(extents_ARG, 'l', "extents", extents_VAL, 0, 0,
|
||||
"#lvcreate\n"
|
||||
"Specifies the size of the new LV in logical extents.\n"
|
||||
"The --size and --extents options are alternate methods of specifying size.\n"
|
||||
@@ -1013,10 +1029,9 @@ arg(extents_ARG, 'l', "extents", numsignedper_VAL, 0, 0,
|
||||
"When expressed as a percentage, the size defines an upper limit for the\n"
|
||||
"number of logical extents in the new LV. The precise number of logical\n"
|
||||
"extents in the new LV is not determined until the command has completed.\n"
|
||||
"The plus prefix \\fB+\\fP can be used, in which case\n"
|
||||
"the value is added to the current size,\n"
|
||||
"or the minus prefix \\fB-\\fP can be used, in which case\n"
|
||||
"the value is subtracted from the current size.\n")
|
||||
"The plus \\fB+\\fP or minus \\fB-\\fP prefix can be used, in which case\n"
|
||||
"the value is not an absolute size, but is an amount added or subtracted\n"
|
||||
"relative to the current size.\n")
|
||||
|
||||
arg(list_ARG, 'l', "list", 0, 0, 0,
|
||||
"#lvmconfig\n"
|
||||
@@ -1033,18 +1048,20 @@ arg(list_ARG, 'l', "list", 0, 0, 0,
|
||||
arg(lvmpartition_ARG, 'l', "lvmpartition", 0, 0, 0,
|
||||
"Only report PVs.\n")
|
||||
|
||||
arg(size_ARG, 'L', "size", sizemb_VAL, 0, 0,
|
||||
/*
|
||||
* FIXME: for lvcreate, size only accepts absolute values, no +|-,
|
||||
* for lvresize, size can relative +|-, for lvreduce, size
|
||||
* can be relative -, and for lvextend, size can be relative +.
|
||||
* Should we define separate val enums for each of those cases,
|
||||
* and at the start of the command, change the val type for
|
||||
* size_ARG? The same for extents_ARG.
|
||||
*/
|
||||
arg(size_ARG, 'L', "size", ssizemb_VAL, 0, 0,
|
||||
"#lvcreate\n"
|
||||
"Specifies the size of the new LV.\n"
|
||||
"The --size and --extents options are alternate methods of specifying size.\n"
|
||||
"The total number of physical extents used will be\n"
|
||||
"greater when redundant data is needed for RAID levels.\n"
|
||||
"A suffix can be chosen from: \\fBbBsSkKmMgGtTpPeE\\fP.\n"
|
||||
"All units are base two values, regardless of letter capitalization:\n"
|
||||
"b|B is bytes, s|S is sectors of 512 bytes,\n"
|
||||
"k|K is kilobytes, m|M is megabytes,\n"
|
||||
"g|G is gigabytes, t|T is terabytes,\n"
|
||||
"p|P is petabytes, e|E is exabytes.\n"
|
||||
"#lvreduce\n"
|
||||
"#lvextend\n"
|
||||
"#lvresize\n"
|
||||
@@ -1052,12 +1069,6 @@ arg(size_ARG, 'L', "size", sizemb_VAL, 0, 0,
|
||||
"The --size and --extents options are alternate methods of specifying size.\n"
|
||||
"The total number of physical extents used will be\n"
|
||||
"greater when redundant data is needed for RAID levels.\n"
|
||||
"A suffix can be chosen from: \\fBbBsSkKmMgGtTpPeE\\fP.\n"
|
||||
"All units are base two values, regardless of letter capitalization:\n"
|
||||
"b|B is bytes, s|S is sectors of 512 bytes,\n"
|
||||
"k|K is kilobytes, m|M is megabytes,\n"
|
||||
"g|G is gigabytes, t|T is terabytes,\n"
|
||||
"p|P is petabytes, e|E is exabytes.\n"
|
||||
"The plus prefix \\fB+\\fP can be used, in which case\n"
|
||||
"the value is added to the current size,\n"
|
||||
"or the minus prefix \\fB-\\fP can be used, in which case\n"
|
||||
@@ -1095,7 +1106,7 @@ arg(maps_ARG, 'm', "maps", 0, 0, 0,
|
||||
|
||||
/* FIXME: should the unused mirrors option be removed from lvextend? */
|
||||
|
||||
arg(mirrors_ARG, 'm', "mirrors", numsigned_VAL, 0, 0,
|
||||
arg(mirrors_ARG, 'm', "mirrors", snumber_VAL, 0, 0,
|
||||
"#lvcreate\n"
|
||||
"Specifies the number of mirror images in addition to the original LV\n"
|
||||
"image, e.g. --mirrors 1 means there are two images of the data, the\n"
|
||||
@@ -1221,7 +1232,9 @@ arg(resizefs_ARG, 'r', "resizefs", 0, 0, 0,
|
||||
arg(reset_ARG, 'R', "reset", 0, 0, 0, NULL)
|
||||
|
||||
arg(regionsize_ARG, 'R', "regionsize", regionsize_VAL, 0, 0,
|
||||
"Size of each raid or mirror synchronization region.\n")
|
||||
"Size of each raid or mirror synchronization region.\n"
|
||||
"lvm.conf activation/raid_region_size can be used to\n"
|
||||
"configure a default.\n")
|
||||
|
||||
arg(physicalextentsize_ARG, 's', "physicalextentsize", sizemb_VAL, 0, 0,
|
||||
"#vgcreate\n"
|
||||
@@ -1286,9 +1299,10 @@ arg(stdin_ARG, 's', "stdin", 0, 0, 0, NULL)
|
||||
|
||||
arg(select_ARG, 'S', "select", string_VAL, ARG_GROUPABLE, 0,
|
||||
"Select objects for processing and reporting based on specified criteria.\n"
|
||||
"The criteria syntax is described in lvmreport(7) under Selection.\n"
|
||||
"For reporting commands, display rows that match the criteria.\n"
|
||||
"All rows can be displayed with an additional \"selected\" field (-o selected)\n"
|
||||
"The criteria syntax is described by \\fB--select help\\fP and \\fBlvmreport\\fP(7).\n"
|
||||
"For reporting commands, one row is displayed for each object matching the criteria.\n"
|
||||
"See \\fB--options help\\fP for selectable object fields.\n"
|
||||
"Rows can be displayed with an additional \"selected\" field (-o selected)\n"
|
||||
"showing 1 if the row matches the selection and 0 otherwise.\n"
|
||||
"For non-reporting commands which process LVM entities, the selection is\n"
|
||||
"used to choose items to process.\n")
|
||||
|
||||
@@ -190,20 +190,6 @@ OO_ALL: --commandprofile String, --config String, --debug,
|
||||
--driverloaded Bool, --help, --longhelp, --profile String, --quiet,
|
||||
--verbose, --version, --yes, --test
|
||||
|
||||
#
|
||||
# This list only applies to printing the usage text.
|
||||
# These common options are displayed once at the end of
|
||||
# a given command's usage. This is done to avoid excessive
|
||||
# repetition of common options, which may obscure the more
|
||||
# interesting and relevant parts of a common prototype.
|
||||
# This definition is *only* used when generating the command
|
||||
# usage strings, and is the basis for the division between
|
||||
# the "usage" and "usage_common" strings. This OO defn does
|
||||
# not relate to which optional opts are accepted by commands,
|
||||
# which is defined by the OO line.
|
||||
#
|
||||
OO_USAGE_COMMON: OO_ALL, --force, --noudevsync
|
||||
|
||||
#
|
||||
# options for pvs, lvs, vgs, fullreport
|
||||
#
|
||||
@@ -321,9 +307,12 @@ RULE: all not LV_thinpool LV_cachepool
|
||||
OO_LVCONVERT_RAID: --mirrors SNumber, --stripes_long Number,
|
||||
--stripesize SizeKB, --regionsize RegionSize, --interval Number
|
||||
|
||||
OO_LVCONVERT_POOL: --poolmetadata LV, --poolmetadatasize SizeMB,
|
||||
OO_LVCONVERT_POOL: --poolmetadata LV, --poolmetadatasize SSizeMB,
|
||||
--poolmetadataspare Bool, --readahead Readahead, --chunksize SizeKB
|
||||
|
||||
OO_LVCONVERT_CACHE: --cachemode CacheMode, --cachepolicy String,
|
||||
--cachesettings String, --zero Bool
|
||||
|
||||
OO_LVCONVERT: --alloc Alloc, --background, --force, --noudevsync
|
||||
|
||||
---
|
||||
@@ -349,23 +338,21 @@ lvconvert --type mirror LV
|
||||
OO: OO_LVCONVERT_RAID, OO_LVCONVERT, --mirrorlog MirrorLog
|
||||
OP: PV ...
|
||||
ID: lvconvert_raid_types
|
||||
DESC: Convert LV to type mirror (also see type raid1).
|
||||
DESC: Convert LV to type mirror (also see type raid1),
|
||||
DESC: (also see lvconvert --mirrors).
|
||||
RULE: all not lv_is_locked lv_is_pvmove
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
# When LV is already raid, this changes the raid layout
|
||||
# (changing layout of raid0 and raid1 not allowed.)
|
||||
|
||||
lvconvert --type raid LV
|
||||
OO: OO_LVCONVERT_RAID, OO_LVCONVERT
|
||||
OP: PV ...
|
||||
ID: lvconvert_raid_types
|
||||
DESC: Convert LV to raid.
|
||||
DESC: Convert LV to raid or change raid layout.
|
||||
RULE: all not lv_is_locked lv_is_pvmove
|
||||
|
||||
lvconvert --type raid LV_raid
|
||||
OO: OO_LVCONVERT_RAID, OO_LVCONVERT
|
||||
ID: lvconvert_raid_types
|
||||
DESC: Convert raid LV to different layout algorithm.
|
||||
RULE: all not lv_is_locked lv_is_pvmove
|
||||
RULE: all not LV_raid0 LV_raid1
|
||||
|
||||
lvconvert --mirrors SNumber LV
|
||||
OO: OO_LVCONVERT_RAID, OO_LVCONVERT, --mirrorlog MirrorLog
|
||||
OP: PV ...
|
||||
@@ -373,8 +360,8 @@ ID: lvconvert_raid_types
|
||||
DESC: Convert LV to raid1 or mirror, or change number of mirror images.
|
||||
RULE: all not lv_is_locked lv_is_pvmove
|
||||
|
||||
lvconvert --stripes_long SNumber LV_raid
|
||||
OO: OO_LVCONVERT_RAID, OO_LVCONVERT
|
||||
lvconvert --stripes_long Number LV_raid
|
||||
OO: OO_LVCONVERT, --interval Number, --regionsize RegionSize, --stripesize SizeKB
|
||||
OP: PV ...
|
||||
ID: lvconvert_raid_types
|
||||
DESC: Convert raid LV to change number of stripe images.
|
||||
@@ -382,7 +369,7 @@ RULE: all not lv_is_locked lv_is_pvmove
|
||||
RULE: all not LV_raid0 LV_raid1
|
||||
|
||||
lvconvert --stripesize SizeKB LV_raid
|
||||
OO: OO_LVCONVERT_RAID, OO_LVCONVERT
|
||||
OO: OO_LVCONVERT, --interval Number, --regionsize RegionSize
|
||||
ID: lvconvert_raid_types
|
||||
DESC: Convert raid LV to change the stripe size.
|
||||
RULE: all not lv_is_locked lv_is_pvmove
|
||||
@@ -394,13 +381,7 @@ ID: lvconvert_change_region_size
|
||||
DESC: Change the region size of an LV.
|
||||
RULE: all not lv_is_locked lv_is_pvmove
|
||||
RULE: all not LV_raid0
|
||||
|
||||
lvconvert LV_mirror_raid
|
||||
OO: OO_LVCONVERT
|
||||
ID: lvconvert_raid_types
|
||||
DESC: Remove out-of-place reshape space
|
||||
RULE: all not lv_is_locked lv_is_pvmove
|
||||
RULE: all not LV_raid0 LV_raid1
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
---
|
||||
|
||||
@@ -433,6 +414,7 @@ OP: PV ...
|
||||
ID: lvconvert_change_mirrorlog
|
||||
DESC: Change the type of mirror log used by a mirror LV.
|
||||
RULE: all not lv_is_locked lv_is_pvmove
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
---
|
||||
|
||||
@@ -450,8 +432,8 @@ RULE: all not lv_is_locked
|
||||
lvconvert --thin --thinpool LV LV_linear_striped_raid_cache
|
||||
OO: --type thin, --originname LV_new, --zero Bool, OO_LVCONVERT_POOL, OO_LVCONVERT
|
||||
ID: lvconvert_to_thin_with_external
|
||||
DESC: Convert LV to a thin LV, using the original LV as an external origin.
|
||||
DESC: (variant, infers --type thin).
|
||||
DESC: Convert LV to a thin LV, using the original LV as an external origin
|
||||
DESC: (infers --type thin).
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
RULE: all and lv_is_visible
|
||||
RULE: all not lv_is_locked
|
||||
@@ -459,20 +441,18 @@ RULE: all not lv_is_locked
|
||||
---
|
||||
|
||||
lvconvert --type cache --cachepool LV LV_linear_striped_raid_thinpool
|
||||
OO: --cache, --cachemode CacheMode, --cachepolicy String,
|
||||
--cachesettings String, --zero Bool, OO_LVCONVERT_POOL, OO_LVCONVERT
|
||||
OO: --cache, OO_LVCONVERT_CACHE, OO_LVCONVERT_POOL, OO_LVCONVERT
|
||||
ID: lvconvert_to_cache_vol
|
||||
DESC: Convert LV to type cache.
|
||||
RULE: all and lv_is_visible
|
||||
|
||||
# alternate form of lvconvert --type cache
|
||||
lvconvert --cache --cachepool LV LV_linear_striped_raid_thinpool
|
||||
OO: --type cache, --cachemode CacheMode, --cachepolicy String,
|
||||
--cachesettings String, --zero Bool, OO_LVCONVERT_POOL, OO_LVCONVERT
|
||||
OO: --type cache, OO_LVCONVERT_CACHE, OO_LVCONVERT_POOL, OO_LVCONVERT
|
||||
ID: lvconvert_to_cache_vol
|
||||
DESC: Convert LV to type cache (variant, infers --type cache).
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
DESC: Convert LV to type cache (infers --type cache).
|
||||
RULE: all and lv_is_visible
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
---
|
||||
|
||||
@@ -519,8 +499,7 @@ FLAGS: PREVIOUS_SYNTAX
|
||||
---
|
||||
|
||||
lvconvert --type cache-pool LV_linear_striped_raid
|
||||
OO: OO_LVCONVERT_POOL, OO_LVCONVERT,
|
||||
--cachemode CacheMode, --cachepolicy String, --cachesettings String
|
||||
OO: OO_LVCONVERT_CACHE, OO_LVCONVERT_POOL, OO_LVCONVERT
|
||||
OP: PV ...
|
||||
ID: lvconvert_to_cachepool
|
||||
DESC: Convert LV to type cache-pool.
|
||||
@@ -548,8 +527,7 @@ DESC: Convert LV to type cache-pool.
|
||||
# of creating a pool or swapping metadata should be used.
|
||||
|
||||
lvconvert --cachepool LV_linear_striped_raid_cachepool
|
||||
OO: --type cache-pool, OO_LVCONVERT_POOL, OO_LVCONVERT,
|
||||
--cachemode CacheMode, --cachepolicy String, --cachesettings String
|
||||
OO: --type cache-pool, OO_LVCONVERT_CACHE, OO_LVCONVERT_POOL, OO_LVCONVERT
|
||||
OP: PV ...
|
||||
ID: lvconvert_to_cachepool_or_swap_metadata
|
||||
DESC: Convert LV to type cache-pool (variant, use --type cache-pool).
|
||||
@@ -569,6 +547,7 @@ lvconvert --uncache LV_cache_thinpool
|
||||
OO: OO_LVCONVERT
|
||||
ID: lvconvert_split_and_remove_cachepool
|
||||
DESC: Separate and delete the cache pool from a cache LV.
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
---
|
||||
|
||||
@@ -576,6 +555,7 @@ lvconvert --swapmetadata --poolmetadata LV LV_thinpool_cachepool
|
||||
OO: --chunksize SizeKB, OO_LVCONVERT
|
||||
ID: lvconvert_swap_pool_metadata
|
||||
DESC: Swap metadata LV in a thin pool or cache pool (for repair only).
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
---
|
||||
|
||||
@@ -623,6 +603,7 @@ OO: OO_LVCONVERT
|
||||
ID: lvconvert_split_cow_snapshot
|
||||
DESC: Separate a COW snapshot from its origin LV.
|
||||
RULE: all not lv_is_locked lv_is_pvmove lv_is_origin lv_is_external_origin lv_is_merging_cow
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
---
|
||||
|
||||
@@ -640,9 +621,9 @@ OO: --snapshot, --chunksize SizeKB, --zero Bool, OO_LVCONVERT
|
||||
ID: lvconvert_combine_split_snapshot
|
||||
DESC: Combine a former COW snapshot (second arg) with a former
|
||||
DESC: origin LV (first arg) to reverse a splitsnapshot command.
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
RULE: all not lv_is_locked lv_is_pvmove
|
||||
RULE: all and lv_is_visible
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
lvconvert --snapshot LV LV_linear
|
||||
OO: --type snapshot, --chunksize SizeKB, --zero Bool, OO_LVCONVERT
|
||||
@@ -651,6 +632,7 @@ DESC: Combine a former COW snapshot (second arg) with a former
|
||||
DESC: origin LV (first arg) to reverse a splitsnapshot command.
|
||||
RULE: all not lv_is_locked lv_is_pvmove
|
||||
RULE: all and lv_is_visible
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
---
|
||||
|
||||
@@ -683,7 +665,7 @@ lvconvert --replace PV LV_raid
|
||||
OO: OO_LVCONVERT
|
||||
OP: PV ...
|
||||
ID: lvconvert_replace_pv
|
||||
DESC: Replace specific PV(s) in a raid* LV with another PV.
|
||||
DESC: Replace specific PV(s) in a raid LV with another PV.
|
||||
RULE: all not lv_is_locked lv_is_pvmove
|
||||
|
||||
---
|
||||
@@ -691,7 +673,7 @@ RULE: all not lv_is_locked lv_is_pvmove
|
||||
# This command just (re)starts the polling process on the LV
|
||||
# to continue a previous conversion.
|
||||
|
||||
lvconvert --startpoll LV_mirror
|
||||
lvconvert --startpoll LV_mirror_raid
|
||||
OO: OO_LVCONVERT
|
||||
ID: lvconvert_start_poll
|
||||
DESC: Poll LV to continue conversion.
|
||||
@@ -699,10 +681,10 @@ RULE: all and lv_is_converting
|
||||
|
||||
# alternate form of lvconvert --startpoll, this is only kept
|
||||
# for compat since this was how it used to be done.
|
||||
lvconvert LV_mirror
|
||||
lvconvert LV_mirror_raid
|
||||
OO: OO_LVCONVERT
|
||||
ID: lvconvert_start_poll
|
||||
DESC: Poll LV to continue conversion.
|
||||
DESC: Poll LV to continue conversion (also see --startpoll).
|
||||
RULE: all and lv_is_converting
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
@@ -717,9 +699,10 @@ OO_LVCREATE: --addtag Tag, --alloc Alloc, --autobackup Bool, --activate Active,
|
||||
--reportformat ReportFmt, --setactivationskip Bool, --wipesignatures Bool,
|
||||
--zero Bool
|
||||
|
||||
OO_LVCREATE_CACHE: --cachemode CacheMode, --cachepolicy String, --cachesettings String
|
||||
OO_LVCREATE_CACHE: --cachemode CacheMode, --cachepolicy String, --cachesettings String,
|
||||
--chunksize SizeKB
|
||||
|
||||
OO_LVCREATE_POOL: --poolmetadatasize SizeMB, --poolmetadataspare Bool, --chunksize SizeKB
|
||||
OO_LVCREATE_POOL: --poolmetadatasize SSizeMB, --poolmetadataspare Bool, --chunksize SizeKB
|
||||
|
||||
OO_LVCREATE_THIN: --discards Discards, --errorwhenfull Bool
|
||||
|
||||
@@ -728,7 +711,7 @@ OO_LVCREATE_RAID: --mirrors SNumber, --stripes Number, --stripesize SizeKB,
|
||||
|
||||
---
|
||||
|
||||
lvcreate --type error --size SizeMB VG
|
||||
lvcreate --type error --size SSizeMB VG
|
||||
OO: OO_LVCREATE
|
||||
ID: lvcreate_error_vol
|
||||
DESC: Create an LV that returns errors when used.
|
||||
@@ -736,7 +719,7 @@ FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
---
|
||||
|
||||
lvcreate --type zero --size SizeMB VG
|
||||
lvcreate --type zero --size SSizeMB VG
|
||||
OO: OO_LVCREATE
|
||||
ID: lvcreate_zero_vol
|
||||
DESC: Create an LV that returns zeros when read.
|
||||
@@ -744,7 +727,7 @@ FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
---
|
||||
|
||||
lvcreate --type linear --size SizeMB VG
|
||||
lvcreate --type linear --size SSizeMB VG
|
||||
OO: OO_LVCREATE
|
||||
OP: PV ...
|
||||
IO: --mirrors 0, --stripes 1
|
||||
@@ -752,27 +735,23 @@ ID: lvcreate_linear
|
||||
DESC: Create a linear LV.
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
# This is the one place we mention the optional --name
|
||||
# because it's the most common case and may be confusing
|
||||
# to people to not see the name parameter.
|
||||
|
||||
lvcreate --size SizeMB VG
|
||||
lvcreate --size SSizeMB VG
|
||||
OO: --type linear, OO_LVCREATE
|
||||
OP: PV ...
|
||||
IO: --mirrors 0, --stripes 1
|
||||
ID: lvcreate_linear
|
||||
DESC: Create a linear LV (default --type linear).
|
||||
DESC: When --name is omitted, the name is generated.
|
||||
DESC: Create a linear LV.
|
||||
|
||||
---
|
||||
|
||||
lvcreate --type striped --size SizeMB VG
|
||||
lvcreate --type striped --size SSizeMB VG
|
||||
OO: --stripes Number, --stripesize SizeKB, OO_LVCREATE
|
||||
OP: PV ...
|
||||
ID: lvcreate_striped
|
||||
DESC: Create a striped LV.
|
||||
DESC: Create a striped LV (also see lvcreate --stripes).
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
lvcreate --stripes Number --size SizeMB VG
|
||||
lvcreate --stripes Number --size SSizeMB VG
|
||||
OO: --type striped, --stripesize SizeKB, OO_LVCREATE
|
||||
OP: PV ...
|
||||
ID: lvcreate_striped
|
||||
@@ -780,72 +759,73 @@ DESC: Create a striped LV (infers --type striped).
|
||||
|
||||
---
|
||||
|
||||
lvcreate --type mirror --size SizeMB VG
|
||||
lvcreate --type mirror --size SSizeMB VG
|
||||
OO: --mirrors SNumber, --mirrorlog MirrorLog, --regionsize RegionSize, --stripes Number, OO_LVCREATE
|
||||
OP: PV ...
|
||||
ID: lvcreate_mirror
|
||||
DESC: Create a mirror LV (also see --type raid1).
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
# alternate form of lvcreate --type raid1|mirror
|
||||
lvcreate --mirrors SNumber --size SizeMB VG
|
||||
lvcreate --mirrors SNumber --size SSizeMB VG
|
||||
OO: --type raid1, --type mirror, --mirrorlog MirrorLog, --stripes Number, OO_LVCREATE_RAID, OO_LVCREATE
|
||||
OP: PV ...
|
||||
ID: lvcreate_mirror_or_raid1
|
||||
DESC: Create a raid1 or mirror LV (variant, infers --type raid1|mirror).
|
||||
DESC: Create a raid1 or mirror LV (infers --type raid1|mirror).
|
||||
|
||||
---
|
||||
|
||||
lvcreate --type raid --size SizeMB VG
|
||||
lvcreate --type raid --size SSizeMB VG
|
||||
OO: OO_LVCREATE_RAID, OO_LVCREATE
|
||||
OP: PV ...
|
||||
ID: lvcreate_raid_any
|
||||
DESC: Create a raid LV (a specific raid level must be used, e.g. raid1.)
|
||||
DESC: Create a raid LV (a specific raid level must be used, e.g. raid1).
|
||||
|
||||
---
|
||||
|
||||
# FIXME: the LV created by these commands actually has type linear or striped,
|
||||
# The LV created by these commands actually has type linear or striped,
|
||||
# not snapshot as specified by the command. If LVs never have type
|
||||
# snapshot, perhaps "snapshot" should not be considered an LV type, but
|
||||
# another new LV property?
|
||||
#
|
||||
# This is the one case where the --type variant is the unpreferred,
|
||||
# secondary syntax, because the LV type is not actually "snapshot".
|
||||
|
||||
# alternate form of lvcreate --snapshot
|
||||
lvcreate --type snapshot --size SizeMB LV
|
||||
lvcreate --type snapshot --size SSizeMB LV
|
||||
OO: --snapshot, --stripes Number, --stripesize SizeKB,
|
||||
--chunksize SizeKB, OO_LVCREATE
|
||||
OP: PV ...
|
||||
ID: lvcreate_cow_snapshot
|
||||
DESC: Create a COW snapshot LV from an origin LV.
|
||||
DESC: Create a COW snapshot LV of an origin LV
|
||||
DESC: (also see --snapshot).
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
lvcreate --snapshot --size SizeMB LV
|
||||
lvcreate --snapshot --size SSizeMB LV
|
||||
OO: --type snapshot, --stripes Number, --stripesize SizeKB,
|
||||
--chunksize SizeKB, OO_LVCREATE
|
||||
OP: PV ...
|
||||
ID: lvcreate_cow_snapshot
|
||||
DESC: Create a COW snapshot LV from an origin LV.
|
||||
DESC: Create a COW snapshot LV of an origin LV.
|
||||
|
||||
---
|
||||
|
||||
# alternate form of lvcreate --snapshot
|
||||
lvcreate --type snapshot --size SizeMB --virtualsize SizeMB VG
|
||||
lvcreate --type snapshot --size SSizeMB --virtualsize SizeMB VG
|
||||
OO: --snapshot, --chunksize SizeKB, OO_LVCREATE
|
||||
OP: PV ...
|
||||
ID: lvcreate_cow_snapshot_with_virtual_origin
|
||||
DESC: Create a sparse COW snapshot LV of a virtual origin LV
|
||||
DESC: (also see --snapshot).
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
lvcreate --snapshot --size SSizeMB --virtualsize SizeMB VG
|
||||
OO: --type snapshot, --chunksize SizeKB, OO_LVCREATE
|
||||
OP: PV ...
|
||||
ID: lvcreate_cow_snapshot_with_virtual_origin
|
||||
DESC: Create a sparse COW snapshot LV of a virtual origin LV.
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
lvcreate --snapshot --size SizeMB --virtualsize SizeMB VG
|
||||
OO: --type snapshot, --chunksize SizeKB, OO_LVCREATE
|
||||
OP: PV ...
|
||||
ID: lvcreate_cow_snapshot_with_virtual_origin
|
||||
DESC: Create a sparse COW snapshot LV of a virtual origin LV.
|
||||
|
||||
---
|
||||
|
||||
lvcreate --type thin-pool --size SizeMB VG
|
||||
lvcreate --type thin-pool --size SSizeMB VG
|
||||
OO: --thinpool LV_new, OO_LVCREATE_POOL, OO_LVCREATE_THIN, OO_LVCREATE,
|
||||
--stripes Number, --stripesize SizeKB
|
||||
OP: PV ...
|
||||
@@ -854,24 +834,24 @@ ID: lvcreate_thinpool
|
||||
DESC: Create a thin pool.
|
||||
|
||||
# alternate form of lvcreate --type thin-pool
|
||||
lvcreate --thin --size SizeMB VG
|
||||
lvcreate --thin --size SSizeMB VG
|
||||
OO: --type thin-pool, OO_LVCREATE_POOL, OO_LVCREATE_THIN, OO_LVCREATE,
|
||||
--stripes Number, --stripesize SizeKB
|
||||
OP: PV ...
|
||||
IO: --mirrors 0
|
||||
ID: lvcreate_thinpool
|
||||
DESC: Create a thin pool (variant, infers --type thin-pool).
|
||||
DESC: Create a thin pool (infers --type thin-pool).
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
# alternate form of lvcreate --type thin-pool
|
||||
lvcreate --size SizeMB --thinpool LV_new VG
|
||||
lvcreate --size SSizeMB --thinpool LV_new VG
|
||||
OO: --thin, --type thin-pool, OO_LVCREATE_POOL, OO_LVCREATE_THIN, OO_LVCREATE,
|
||||
--stripes Number, --stripesize SizeKB
|
||||
OP: PV ...
|
||||
IO: --mirrors 0
|
||||
ID: lvcreate_thinpool
|
||||
DESC: Create a thin pool named by the --thinpool arg
|
||||
DESC: (variant, infers --type thin-pool).
|
||||
DESC: (infers --type thin-pool).
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
---
|
||||
@@ -881,14 +861,14 @@ FLAGS: SECONDARY_SYNTAX
|
||||
# still needs to be listed as an optional addition to
|
||||
# --type cache-pool.
|
||||
|
||||
lvcreate --type cache-pool --size SizeMB VG
|
||||
lvcreate --type cache-pool --size SSizeMB VG
|
||||
OO: --cache, OO_LVCREATE_POOL, OO_LVCREATE_CACHE, OO_LVCREATE
|
||||
OP: PV ...
|
||||
ID: lvcreate_cachepool
|
||||
DESC: Create a cache pool.
|
||||
|
||||
# alternate form of lvcreate --type cache-pool
|
||||
lvcreate --type cache-pool --size SizeMB --cachepool LV_new VG
|
||||
lvcreate --type cache-pool --size SSizeMB --cachepool LV_new VG
|
||||
OO: --cache, OO_LVCREATE_POOL, OO_LVCREATE_CACHE, OO_LVCREATE
|
||||
OP: PV ...
|
||||
ID: lvcreate_cachepool
|
||||
@@ -903,6 +883,7 @@ OO: --thin, OO_LVCREATE_POOL, OO_LVCREATE_THIN, OO_LVCREATE
|
||||
IO: --mirrors 0
|
||||
ID: lvcreate_thin_vol
|
||||
DESC: Create a thin LV in a thin pool.
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
# alternate form of lvcreate --type thin
|
||||
lvcreate --type thin --virtualsize SizeMB LV_thinpool
|
||||
@@ -921,8 +902,7 @@ lvcreate --virtualsize SizeMB --thinpool LV_thinpool VG
|
||||
OO: --type thin, --thin, OO_LVCREATE_THIN, OO_LVCREATE
|
||||
IO: --mirrors 0
|
||||
ID: lvcreate_thin_vol
|
||||
DESC: Create a thin LV in a thin pool (variant, infers --type thin).
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
DESC: Create a thin LV in a thin pool (infers --type thin).
|
||||
|
||||
# alternate form of lvcreate --type thin
|
||||
lvcreate --virtualsize SizeMB LV_thinpool
|
||||
@@ -941,6 +921,7 @@ OO: --thin, OO_LVCREATE_THIN, OO_LVCREATE
|
||||
IO: --mirrors 0
|
||||
ID: lvcreate_thin_snapshot
|
||||
DESC: Create a thin LV that is a snapshot of an existing thin LV.
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
# alternate form of lvcreate --type thin
|
||||
lvcreate --thin LV_thin
|
||||
@@ -972,6 +953,7 @@ IO: --mirrors 0
|
||||
ID: lvcreate_thin_snapshot_of_external
|
||||
DESC: Create a thin LV that is a snapshot of an external origin LV
|
||||
DESC: (infers --type thin).
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
---
|
||||
|
||||
@@ -991,7 +973,7 @@ DESC: (infers --type thin).
|
||||
# definition. Note that when LV_new is used in arg pos 1,
|
||||
# it needs to include a VG name, i.e. VG/LV_new
|
||||
|
||||
lvcreate --type thin --virtualsize SizeMB --size SizeMB --thinpool LV_new
|
||||
lvcreate --type thin --virtualsize SizeMB --size SSizeMB --thinpool LV_new
|
||||
OO: --thin, OO_LVCREATE_POOL, OO_LVCREATE_THIN, OO_LVCREATE,
|
||||
--stripes Number, --stripesize SizeKB
|
||||
OP: PV ...
|
||||
@@ -1001,7 +983,7 @@ DESC: Create a thin LV, first creating a thin pool for it,
|
||||
DESC: where the new thin pool is named by the --thinpool arg.
|
||||
|
||||
# alternate form of lvcreate --type thin
|
||||
lvcreate --thin --virtualsize SizeMB --size SizeMB --thinpool LV_new
|
||||
lvcreate --thin --virtualsize SizeMB --size SSizeMB --thinpool LV_new
|
||||
OO: --type thin, OO_LVCREATE_POOL, OO_LVCREATE_THIN, OO_LVCREATE,
|
||||
--stripes Number, --stripesize SizeKB
|
||||
OP: PV ...
|
||||
@@ -1013,7 +995,7 @@ DESC: (variant, infers --type thin).
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
# alternate form of lvcreate --type thin
|
||||
lvcreate --type thin --virtualsize SizeMB --size SizeMB LV_new|VG
|
||||
lvcreate --type thin --virtualsize SizeMB --size SSizeMB LV_new|VG
|
||||
OO: --thin, OO_LVCREATE_POOL, OO_LVCREATE_THIN, OO_LVCREATE,
|
||||
--stripes Number, --stripesize SizeKB
|
||||
OP: PV ...
|
||||
@@ -1026,7 +1008,7 @@ DESC: arg is a VG name.
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
# alternate form of lvcreate --type thin
|
||||
lvcreate --thin --virtualsize SizeMB --size SizeMB LV_new|VG
|
||||
lvcreate --thin --virtualsize SizeMB --size SSizeMB LV_new|VG
|
||||
OO: --type thin, OO_LVCREATE_POOL, OO_LVCREATE_THIN, OO_LVCREATE,
|
||||
--stripes Number, --stripesize SizeKB
|
||||
OP: PV ...
|
||||
@@ -1040,7 +1022,7 @@ FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
---
|
||||
|
||||
lvcreate --size SizeMB --virtualsize SizeMB VG
|
||||
lvcreate --size SSizeMB --virtualsize SizeMB VG
|
||||
OO: --type thin, --type snapshot, --thin, --snapshot, OO_LVCREATE_POOL, OO_LVCREATE_THIN, OO_LVCREATE,
|
||||
--stripes Number, --stripesize SizeKB
|
||||
OP: PV ...
|
||||
@@ -1060,7 +1042,7 @@ FLAGS: SECONDARY_SYNTAX
|
||||
# but here it applies to creating the new origin that
|
||||
# is used to create the cache LV
|
||||
|
||||
lvcreate --type cache --size SizeMB --cachepool LV_cachepool VG
|
||||
lvcreate --type cache --size SSizeMB --cachepool LV_cachepool VG
|
||||
OO: --cache, OO_LVCREATE_POOL, OO_LVCREATE_CACHE, OO_LVCREATE,
|
||||
--stripes Number, --stripesize SizeKB
|
||||
OP: PV ...
|
||||
@@ -1070,7 +1052,7 @@ DESC: then combining it with the existing cache pool named
|
||||
DESC: by the --cachepool arg.
|
||||
|
||||
# alternate form of lvcreate --type cache
|
||||
lvcreate --size SizeMB --cachepool LV_cachepool VG
|
||||
lvcreate --size SSizeMB --cachepool LV_cachepool VG
|
||||
OO: --type cache, --cache, OO_LVCREATE_CACHE, OO_LVCREATE,
|
||||
--stripes Number, --stripesize SizeKB
|
||||
OP: PV ...
|
||||
@@ -1081,7 +1063,7 @@ DESC: by the --cachepool arg (variant, infers --type cache).
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
# alternate form of lvcreate --type cache
|
||||
lvcreate --type cache --size SizeMB LV_cachepool
|
||||
lvcreate --type cache --size SSizeMB LV_cachepool
|
||||
OO: --cache, OO_LVCREATE_POOL, OO_LVCREATE_CACHE, OO_LVCREATE,
|
||||
--stripes Number, --stripesize SizeKB
|
||||
OP: PV ...
|
||||
@@ -1100,7 +1082,7 @@ FLAGS: SECONDARY_SYNTAX
|
||||
# an already complicated command above.
|
||||
#
|
||||
# # alternate form for lvcreate_cache_vol_with_new_origin
|
||||
# lvcreate --cache --size SizeMB LV_cachepool
|
||||
# lvcreate --cache --size SSizeMB LV_cachepool
|
||||
# OO: --type cache, --cache, OO_LVCREATE_CACHE, OO_LVCREATE, --stripes Number, --stripesize SizeKB
|
||||
# OP: PV ...
|
||||
# ID: lvcreate_cache_vol_with_new_origin
|
||||
@@ -1112,7 +1094,7 @@ FLAGS: SECONDARY_SYNTAX
|
||||
# 2. If LV is not a cachepool, then it's a disguised lvconvert.
|
||||
#
|
||||
# # FIXME: this should be done by lvconvert, and this command removed
|
||||
# lvcreate --type cache --size SizeMB LV
|
||||
# lvcreate --type cache --size SSizeMB LV
|
||||
# OO: OO_LVCREATE_POOL, OO_LVCREATE_CACHE, OO_LVCREATE
|
||||
# OP: PV ...
|
||||
# ID: lvcreate_convert_to_cache_vol_with_cachepool
|
||||
@@ -1129,7 +1111,7 @@ FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
# def1: alternate form of lvcreate --type cache, or
|
||||
# def2: it should be done by lvconvert.
|
||||
lvcreate --cache --size SizeMB LV
|
||||
lvcreate --cache --size SSizeMB LV
|
||||
OO: OO_LVCREATE_CACHE, OO_LVCREATE_POOL, OO_LVCREATE,
|
||||
--stripes Number, --stripesize SizeKB
|
||||
OP: PV ...
|
||||
@@ -1163,10 +1145,10 @@ ID: lvdisplay_general
|
||||
|
||||
# --extents is not specified; it's an automatic alternative for --size
|
||||
|
||||
lvextend --size SizeMB LV
|
||||
lvextend --size SSizeMB LV
|
||||
OO: --alloc Alloc, --autobackup Bool, --force, --mirrors SNumber,
|
||||
--nofsck, --nosync, --noudevsync, --reportformat ReportFmt, --resizefs,
|
||||
--stripes Number, --stripesize SizeKB, --poolmetadatasize SizeMB,
|
||||
--stripes Number, --stripesize SizeKB, --poolmetadatasize SSizeMB,
|
||||
--type SegType
|
||||
OP: PV ...
|
||||
ID: lvextend_by_size
|
||||
@@ -1179,9 +1161,8 @@ OO: --alloc Alloc, --autobackup Bool, --force, --mirrors SNumber,
|
||||
--type SegType
|
||||
ID: lvextend_by_pv
|
||||
DESC: Extend an LV by specified PV extents.
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
lvextend --poolmetadatasize SizeMB LV_thinpool
|
||||
lvextend --poolmetadatasize SSizeMB LV_thinpool
|
||||
OO: --alloc Alloc, --autobackup Bool, --force, --mirrors SNumber,
|
||||
--nofsck, --nosync, --noudevsync,
|
||||
--reportformat ReportFmt, --stripes Number, --stripesize SizeKB,
|
||||
@@ -1208,7 +1189,7 @@ ID: lvmconfig_general
|
||||
|
||||
---
|
||||
|
||||
lvreduce --size SizeMB LV
|
||||
lvreduce --size SSizeMB LV
|
||||
OO: --autobackup Bool, --force, --nofsck, --noudevsync,
|
||||
--reportformat ReportFmt, --resizefs
|
||||
ID: lvreduce_general
|
||||
@@ -1236,10 +1217,10 @@ ID: lvrename_lv_lv
|
||||
# value can be checked to match the existing type; using it doesn't
|
||||
# currently enable any different behavior.
|
||||
|
||||
lvresize --size SizeMB LV
|
||||
lvresize --size SSizeMB LV
|
||||
OO: --alloc Alloc, --autobackup Bool, --force,
|
||||
--nofsck, --nosync, --noudevsync, --reportformat ReportFmt, --resizefs,
|
||||
--stripes Number, --stripesize SizeKB, --poolmetadatasize SizeMB,
|
||||
--stripes Number, --stripesize SizeKB, --poolmetadatasize SSizeMB,
|
||||
--type SegType
|
||||
OP: PV ...
|
||||
ID: lvresize_by_size
|
||||
@@ -1252,9 +1233,8 @@ OO: --alloc Alloc, --autobackup Bool, --force,
|
||||
--type SegType
|
||||
ID: lvresize_by_pv
|
||||
DESC: Resize an LV by specified PV extents.
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
lvresize --poolmetadatasize SizeMB LV_thinpool
|
||||
lvresize --poolmetadatasize SSizeMB LV_thinpool
|
||||
OO: --alloc Alloc, --autobackup Bool, --force,
|
||||
--nofsck, --nosync, --noudevsync,
|
||||
--reportformat ReportFmt, --stripes Number, --stripesize SizeKB,
|
||||
@@ -1530,7 +1510,6 @@ vgexport --all
|
||||
OO: OO_VGEXPORT
|
||||
ID: vgexport_all
|
||||
DESC: Export all VGs.
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
---
|
||||
|
||||
@@ -1654,14 +1633,12 @@ config
|
||||
OO: OO_CONFIG
|
||||
OP: String ...
|
||||
ID: lvmconfig_general
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
# use lvmconfig
|
||||
dumpconfig
|
||||
OO: OO_CONFIG
|
||||
OP: String ...
|
||||
ID: lvmconfig_general
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
devtypes
|
||||
OO: --aligned, --binary, --nameprefixes, --noheadings,
|
||||
@@ -1695,7 +1672,6 @@ ID: version_general
|
||||
# deprecated
|
||||
pvdata
|
||||
ID: pvdata_general
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
segtypes
|
||||
ID: segtypes_general
|
||||
@@ -1709,22 +1685,18 @@ ID: tags_general
|
||||
# deprecated
|
||||
lvmchange
|
||||
ID: lvmchange_general
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
# deprecated
|
||||
lvmdiskscan
|
||||
OO: --lvmpartition, --readonly
|
||||
ID: lvmdiskscan_general
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
# deprecated
|
||||
lvmsadc
|
||||
ID: lvmsadc_general
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
# deprecated
|
||||
lvmsar
|
||||
OO: --full, --stdin
|
||||
ID: lvmsar_general
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
|
||||
957
tools/command.c
957
tools/command.c
File diff suppressed because it is too large
Load Diff
@@ -213,7 +213,11 @@ struct command {
|
||||
|
||||
int define_commands(char *run_name);
|
||||
int command_id_to_enum(const char *str);
|
||||
void print_usage(struct command *cmd);
|
||||
void print_usage_common(struct command_name *cname, struct command *cmd);
|
||||
void print_usage(struct command *cmd, int longhelp, int desc_first);
|
||||
void print_usage_common_cmd(struct command_name *cname, struct command *cmd);
|
||||
void print_usage_common_lvm(struct command_name *cname, struct command *cmd);
|
||||
void print_usage_notes(struct command_name *cname, struct command *cmd);
|
||||
void factor_common_options(void);
|
||||
int command_has_alternate_extents(const char *name);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -740,12 +740,12 @@ static int _lvchange_writemostly(struct logical_volume *lv)
|
||||
struct lv_segment *raid_seg = first_seg(lv);
|
||||
|
||||
/*
|
||||
* Prohibit on synchronization.
|
||||
* Prohibit writebehind and writebehind during synchronization.
|
||||
*
|
||||
* FIXME: we can do better once we can distingush between
|
||||
* an initial sync after a linear -> raid1 upconversion
|
||||
* and any later additions of legs, requested resyncs
|
||||
* via lvchange or leg repairs/replacements.
|
||||
* an initial sync after a linear -> raid1 upconversion
|
||||
* and any later additions of legs, requested resyncs
|
||||
* via lvchange or leg repairs/replacements.
|
||||
*/
|
||||
if (!lv_raid_in_sync(lv)) {
|
||||
log_error("Unable to change write%s on %s while it is not in-sync.",
|
||||
|
||||
@@ -1401,6 +1401,8 @@ static int _lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *l
|
||||
lp->stripes = 0;
|
||||
if (!arg_is_set(cmd, type_ARG))
|
||||
lp->segtype = NULL;
|
||||
if (!arg_is_set(cmd, regionsize_ARG))
|
||||
lp->region_size = 0;
|
||||
|
||||
if (!lv_raid_convert(lv, lp->segtype,
|
||||
lp->yes, lp->force, lp->stripes, lp->stripe_size_supplied, lp->stripe_size,
|
||||
@@ -3666,8 +3668,9 @@ static int _lvconvert_combine_split_snapshot_single(struct cmd_context *cmd,
|
||||
int lvconvert_combine_split_snapshot_cmd(struct cmd_context *cmd, int argc, char **argv)
|
||||
{
|
||||
const char *vgname = NULL;
|
||||
const char *lvname1;
|
||||
const char *lvname2;
|
||||
const char *lvname1_orig;
|
||||
const char *lvname2_orig;
|
||||
const char *lvname1_split;
|
||||
char *vglv;
|
||||
int vglv_sz;
|
||||
|
||||
@@ -3685,20 +3688,25 @@ int lvconvert_combine_split_snapshot_cmd(struct cmd_context *cmd, int argc, char
|
||||
* This is the only instance in all commands.
|
||||
*/
|
||||
|
||||
lvname1 = cmd->position_argv[0];
|
||||
lvname2 = cmd->position_argv[1];
|
||||
lvname1_orig = cmd->position_argv[0];
|
||||
lvname2_orig = cmd->position_argv[1];
|
||||
|
||||
if (strstr("/", lvname1) && !strstr("/", lvname2) && !getenv("LVM_VG_NAME")) {
|
||||
if (!validate_lvname_param(cmd, &vgname, &lvname1))
|
||||
if (strchr(lvname1_orig, '/') && !strchr(lvname2_orig, '/') && !getenv("LVM_VG_NAME")) {
|
||||
if (!(lvname1_split = dm_pool_strdup(cmd->mem, lvname1_orig)))
|
||||
return_ECMD_FAILED;
|
||||
|
||||
vglv_sz = strlen(vgname) + strlen(lvname2) + 2;
|
||||
if (!validate_lvname_param(cmd, &vgname, &lvname1_split))
|
||||
return_ECMD_FAILED;
|
||||
|
||||
vglv_sz = strlen(vgname) + strlen(lvname2_orig) + 2;
|
||||
if (!(vglv = dm_pool_alloc(cmd->mem, vglv_sz)) ||
|
||||
dm_snprintf(vglv, vglv_sz, "%s/%s", vgname, lvname2) < 0) {
|
||||
dm_snprintf(vglv, vglv_sz, "%s/%s", vgname, lvname2_orig) < 0) {
|
||||
log_error("vg/lv string alloc failed.");
|
||||
return_ECMD_FAILED;
|
||||
}
|
||||
|
||||
/* vglv is now vgname/lvname2 and replaces lvname2_orig */
|
||||
|
||||
cmd->position_argv[1] = vglv;
|
||||
}
|
||||
|
||||
|
||||
@@ -629,19 +629,41 @@ static int _size_arg(struct cmd_context *cmd __attribute__((unused)),
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* negative not accepted */
|
||||
int size_kb_arg(struct cmd_context *cmd, struct arg_values *av)
|
||||
{
|
||||
if (!_size_arg(cmd, av, 2, 0))
|
||||
return 0;
|
||||
|
||||
if (av->sign == SIGN_MINUS) {
|
||||
log_error("Size may not be negative.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int ssize_kb_arg(struct cmd_context *cmd, struct arg_values *av)
|
||||
{
|
||||
return _size_arg(cmd, av, 2, 0);
|
||||
}
|
||||
|
||||
int size_mb_arg(struct cmd_context *cmd, struct arg_values *av)
|
||||
{
|
||||
return _size_arg(cmd, av, 2048, 0);
|
||||
if (!_size_arg(cmd, av, 2048, 0))
|
||||
return 0;
|
||||
|
||||
if (av->sign == SIGN_MINUS) {
|
||||
log_error("Size may not be negative.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int size_mb_arg_with_percent(struct cmd_context *cmd, struct arg_values *av)
|
||||
int ssize_mb_arg(struct cmd_context *cmd, struct arg_values *av)
|
||||
{
|
||||
return _size_arg(cmd, av, 2048, 1);
|
||||
return _size_arg(cmd, av, 2048, 0);
|
||||
}
|
||||
|
||||
int int_arg(struct cmd_context *cmd __attribute__((unused)), struct arg_values *av)
|
||||
@@ -672,8 +694,8 @@ int int_arg_with_sign(struct cmd_context *cmd __attribute__((unused)), struct ar
|
||||
return 1;
|
||||
}
|
||||
|
||||
int int_arg_with_sign_and_percent(struct cmd_context *cmd __attribute__((unused)),
|
||||
struct arg_values *av)
|
||||
int extents_arg(struct cmd_context *cmd __attribute__((unused)),
|
||||
struct arg_values *av)
|
||||
{
|
||||
char *ptr;
|
||||
|
||||
@@ -1253,13 +1275,9 @@ static int _command_required_opt_matches(struct cmd_context *cmd, int ci, int ro
|
||||
* For some commands, --size and --extents are interchangable,
|
||||
* but command[] definitions use only --size.
|
||||
*/
|
||||
if ((opt_enum == size_ARG) && arg_is_set(cmd, extents_ARG)) {
|
||||
if (!strcmp(commands[ci].name, "lvcreate") ||
|
||||
!strcmp(commands[ci].name, "lvresize") ||
|
||||
!strcmp(commands[ci].name, "lvextend") ||
|
||||
!strcmp(commands[ci].name, "lvreduce"))
|
||||
goto check_val;
|
||||
}
|
||||
if ((opt_enum == size_ARG) && arg_is_set(cmd, extents_ARG) &&
|
||||
command_has_alternate_extents(commands[ci].name))
|
||||
goto check_val;
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -1560,11 +1578,10 @@ static struct command *_find_command(struct cmd_context *cmd, const char *path,
|
||||
|
||||
if (!best_required) {
|
||||
/* cmd did not have all the required opt/pos args of any command */
|
||||
log_error("Failed to find a matching command definition.");
|
||||
log_error("Run '%s --help' for more information.", name);
|
||||
log_error("Incorrect syntax. Run '%s --help' for more information.", name);
|
||||
if (close_ro) {
|
||||
log_warn("Closest command usage is:");
|
||||
print_usage(&_cmdline.commands[close_i]);
|
||||
log_warn("Nearest similar command has syntax:");
|
||||
print_usage(&_cmdline.commands[close_i], 0, 0);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
@@ -1677,43 +1694,11 @@ static void _short_usage(const char *name)
|
||||
log_error("Run `%s --help' for more information.", name);
|
||||
}
|
||||
|
||||
static void _usage_notes(void)
|
||||
{
|
||||
/*
|
||||
* Excluding commonly understood syntax style like the meanings of:
|
||||
* [ ] for optional, ... for repeatable, | for one of the following,
|
||||
* -- for an option name, lower case strings and digits for literals.
|
||||
*/
|
||||
log_print("Usage notes:\n"
|
||||
". Variable parameters are: Number, String, PV, VG, LV, Tag.\n"
|
||||
". Select indicates that a required positional parameter can\n"
|
||||
" be omitted if the --select option is used.\n"
|
||||
". --size Number can be replaced with --extents NumberExtents.\n"
|
||||
". When --name is omitted from lvcreate, a new LV name is\n"
|
||||
" generated with the \"lvol\" prefix and a unique numeric suffix.\n"
|
||||
". The required VG parameter in lvcreate may be omitted when\n"
|
||||
" the VG name is included in another option, e.g. --name VG/LV.\n"
|
||||
". For required options listed in parentheses, e.g. (--A, --B),\n"
|
||||
" any one is required, after which the others are optional.\n"
|
||||
". The _new suffix indicates the VG or LV must not yet exist.\n"
|
||||
". LV followed by _<type> indicates that an LV of the given type\n"
|
||||
" is required. (raid represents any raid<N> type.)\n"
|
||||
". Input units are always treated as base two values, regardless of\n"
|
||||
" unit capitalization, e.g. 'k' and 'K' both refer to 1024.\n"
|
||||
". The default input unit is specified by letter, followed by |unit\n"
|
||||
" which represents other possible input units: bBsSkKmMgGtTpPeE.\n"
|
||||
". Output units can be specified with the --units option, for which\n"
|
||||
" lower/upper case letters refer to base 2/10 values.\n"
|
||||
" formats that are recognized, e.g. for compatibility.\n"
|
||||
". See man pages for short option equivalents of long option names,\n"
|
||||
" and for more detailed descriptions of variable parameters.\n"
|
||||
" \n");
|
||||
}
|
||||
|
||||
static int _usage(const char *name, int longhelp)
|
||||
{
|
||||
struct command_name *cname = find_command_name(name);
|
||||
struct command *cmd;
|
||||
int show_full = longhelp;
|
||||
int i;
|
||||
|
||||
if (!cname) {
|
||||
@@ -1721,8 +1706,19 @@ static int _usage(const char *name, int longhelp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Looks at all variants of each command name and figures out
|
||||
* which options are common to all variants (for compact output)
|
||||
*/
|
||||
factor_common_options();
|
||||
|
||||
log_print("%s - %s\n", name, cname->desc);
|
||||
|
||||
/* Reduce the default output when there are several variants. */
|
||||
|
||||
if (cname->variants < 3)
|
||||
show_full = 1;
|
||||
|
||||
for (i = 0; i < COMMAND_COUNT; i++) {
|
||||
if (strcmp(_cmdline.commands[i].name, name))
|
||||
continue;
|
||||
@@ -1730,18 +1726,23 @@ static int _usage(const char *name, int longhelp)
|
||||
if (_cmdline.commands[i].cmd_flags & CMD_FLAG_PREVIOUS_SYNTAX)
|
||||
continue;
|
||||
|
||||
if ((_cmdline.commands[i].cmd_flags & CMD_FLAG_SECONDARY_SYNTAX) && !longhelp)
|
||||
if ((_cmdline.commands[i].cmd_flags & CMD_FLAG_SECONDARY_SYNTAX) && !show_full)
|
||||
continue;
|
||||
|
||||
print_usage(&_cmdline.commands[i]);
|
||||
print_usage(&_cmdline.commands[i], show_full, 1);
|
||||
cmd = &_cmdline.commands[i];
|
||||
}
|
||||
|
||||
/* Common options are printed once for all variants of a command name. */
|
||||
print_usage_common(cname, cmd);
|
||||
if (show_full) {
|
||||
print_usage_common_cmd(cname, cmd);
|
||||
print_usage_common_lvm(cname, cmd);
|
||||
}
|
||||
|
||||
if (longhelp)
|
||||
_usage_notes();
|
||||
print_usage_notes(cname, cmd);
|
||||
else
|
||||
log_print("Use --longhelp to show all options and advanced commands.");
|
||||
|
||||
return 1;
|
||||
}
|
||||
@@ -1753,7 +1754,7 @@ static void _usage_all(void)
|
||||
for (i = 0; i < MAX_COMMAND_NAMES; i++) {
|
||||
if (!command_names[i].name)
|
||||
break;
|
||||
_usage(command_names[i].name, 0);
|
||||
_usage(command_names[i].name, 1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2040,7 +2041,12 @@ int version(struct cmd_context *cmd __attribute__((unused)),
|
||||
return ECMD_PROCESSED;
|
||||
}
|
||||
|
||||
static void _get_output_settings(struct cmd_context *cmd)
|
||||
static void _reset_current_settings_to_default(struct cmd_context *cmd)
|
||||
{
|
||||
cmd->current_settings = cmd->default_settings;
|
||||
}
|
||||
|
||||
static void _get_current_output_settings_from_args(struct cmd_context *cmd)
|
||||
{
|
||||
if (arg_is_set(cmd, debug_ARG))
|
||||
cmd->current_settings.debug = _LOG_FATAL + (arg_count(cmd, debug_ARG) - 1);
|
||||
@@ -2055,7 +2061,7 @@ static void _get_output_settings(struct cmd_context *cmd)
|
||||
}
|
||||
}
|
||||
|
||||
static void _apply_output_settings(struct cmd_context *cmd)
|
||||
static void _apply_current_output_settings(struct cmd_context *cmd)
|
||||
{
|
||||
init_debug(cmd->current_settings.debug);
|
||||
init_debug_classes_logged(cmd->default_settings.debug_classes);
|
||||
@@ -2063,10 +2069,12 @@ static void _apply_output_settings(struct cmd_context *cmd)
|
||||
init_silent(cmd->current_settings.silent);
|
||||
}
|
||||
|
||||
static int _get_settings(struct cmd_context *cmd)
|
||||
static int _get_current_settings(struct cmd_context *cmd)
|
||||
{
|
||||
const char *activation_mode;
|
||||
|
||||
_get_current_output_settings_from_args(cmd);
|
||||
|
||||
if (arg_is_set(cmd, test_ARG))
|
||||
cmd->current_settings.test = arg_is_set(cmd, test_ARG);
|
||||
|
||||
@@ -2239,8 +2247,10 @@ int help(struct cmd_context *cmd __attribute__((unused)), int argc, char **argv)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void _apply_settings(struct cmd_context *cmd)
|
||||
static void _apply_current_settings(struct cmd_context *cmd)
|
||||
{
|
||||
_apply_current_output_settings(cmd);
|
||||
|
||||
init_test(cmd->current_settings.test);
|
||||
init_full_scan_done(0);
|
||||
init_mirror_in_sync(0);
|
||||
@@ -2564,13 +2574,12 @@ int lvm_run_command(struct cmd_context *cmd, int argc, char **argv)
|
||||
}
|
||||
|
||||
/*
|
||||
* log_debug() can be enabled now that we know the settings
|
||||
* from the command. Previous calls to log_debug() will
|
||||
* do nothing.
|
||||
* Now we have the command line args, set up any known output logging
|
||||
* options immediately.
|
||||
*/
|
||||
cmd->current_settings = cmd->default_settings;
|
||||
_get_output_settings(cmd);
|
||||
_apply_output_settings(cmd);
|
||||
_reset_current_settings_to_default(cmd);
|
||||
_get_current_output_settings_from_args(cmd);
|
||||
_apply_current_output_settings(cmd);
|
||||
|
||||
log_debug("Parsing: %s", cmd->cmd_line);
|
||||
|
||||
@@ -2631,9 +2640,17 @@ int lvm_run_command(struct cmd_context *cmd, int argc, char **argv)
|
||||
if (arg_is_set(cmd, readonly_ARG))
|
||||
cmd->metadata_read_only = 1;
|
||||
|
||||
if ((ret = _get_settings(cmd)))
|
||||
/*
|
||||
* Now that all configs, profiles and command lines args are available,
|
||||
* freshly calculate and apply all settings. Specific command line
|
||||
* options take precedence over config files (which include --config as
|
||||
* that is treated like a config file).
|
||||
*/
|
||||
_reset_current_settings_to_default(cmd);
|
||||
if ((ret = _get_current_settings(cmd)))
|
||||
goto_out;
|
||||
_apply_settings(cmd);
|
||||
_apply_current_settings(cmd);
|
||||
|
||||
if (cmd->degraded_activation)
|
||||
log_debug("DEGRADED MODE. Incomplete RAID LVs will be processed.");
|
||||
|
||||
@@ -2784,8 +2801,13 @@ int lvm_run_command(struct cmd_context *cmd, int argc, char **argv)
|
||||
|
||||
log_debug("Completed: %s", cmd->cmd_line);
|
||||
|
||||
cmd->current_settings = cmd->default_settings;
|
||||
_apply_settings(cmd);
|
||||
/*
|
||||
* Reset all settings back to the persistent defaults that
|
||||
* ignore everything supplied on the command line of the
|
||||
* completed command.
|
||||
*/
|
||||
_reset_current_settings_to_default(cmd);
|
||||
_apply_current_settings(cmd);
|
||||
|
||||
/*
|
||||
* free off any memory the command used.
|
||||
|
||||
139
tools/reporter.c
139
tools/reporter.c
@@ -17,6 +17,8 @@
|
||||
|
||||
#include "report.h"
|
||||
|
||||
#include <sys/vfs.h>
|
||||
|
||||
typedef enum {
|
||||
REPORT_IDX_NULL = -1,
|
||||
REPORT_IDX_SINGLE,
|
||||
@@ -79,7 +81,7 @@ static int _vgs_single(struct cmd_context *cmd __attribute__((unused)),
|
||||
struct selection_handle *sh = handle->selection_handle;
|
||||
|
||||
if (!report_object(sh ? : handle->custom_handle, sh != NULL,
|
||||
vg, NULL, NULL, NULL, NULL, NULL, NULL))
|
||||
vg, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL))
|
||||
return_ECMD_FAILED;
|
||||
|
||||
check_current_backup(vg);
|
||||
@@ -177,7 +179,7 @@ static int _do_lvs_with_info_and_status_single(struct cmd_context *cmd,
|
||||
}
|
||||
|
||||
if (!report_object(sh ? : handle->custom_handle, sh != NULL,
|
||||
lv->vg, lv, NULL, NULL, NULL, &status, NULL))
|
||||
lv->vg, lv, NULL, NULL, NULL, &status, NULL, NULL, NULL))
|
||||
goto out;
|
||||
|
||||
r = ECMD_PROCESSED;
|
||||
@@ -239,7 +241,7 @@ static int _do_segs_with_info_and_status_single(struct cmd_context *cmd,
|
||||
}
|
||||
|
||||
if (!report_object(sh ? : handle->custom_handle, sh != NULL,
|
||||
seg->lv->vg, seg->lv, NULL, seg, NULL, &status, NULL))
|
||||
seg->lv->vg, seg->lv, NULL, seg, NULL, &status, NULL, NULL, NULL))
|
||||
goto_out;
|
||||
|
||||
r = ECMD_PROCESSED;
|
||||
@@ -367,7 +369,7 @@ static int _do_pvsegs_sub_single(struct cmd_context *cmd,
|
||||
if (!report_object(sh ? : handle->custom_handle, sh != NULL,
|
||||
vg, seg ? seg->lv : &_free_logical_volume,
|
||||
pvseg->pv, seg ? : &_free_lv_segment, pvseg,
|
||||
&status, pv_label(pvseg->pv))) {
|
||||
&status, pv_label(pvseg->pv), NULL, NULL)) {
|
||||
ret = ECMD_FAILED;
|
||||
goto_out;
|
||||
}
|
||||
@@ -443,17 +445,101 @@ static int _pvsegs_with_lv_info_and_status_single(struct cmd_context *cmd,
|
||||
return process_each_segment_in_pv(cmd, vg, pv, handle, _pvsegs_with_lv_info_and_status_sub_single);
|
||||
}
|
||||
|
||||
struct mountinfo_s { // FIXME
|
||||
unsigned maj; //FIXME
|
||||
unsigned min; //FIXME
|
||||
const char *mountpoint;
|
||||
};
|
||||
|
||||
static int _get_mountpoint(char *buffer, unsigned major, unsigned minor,
|
||||
char *target, void *cb_data)
|
||||
{
|
||||
struct mountinfo_s *data = cb_data;
|
||||
|
||||
if ((major == data->maj) && (minor == data->min))
|
||||
data->mountpoint = dm_strdup(target); // FIXME error/pool
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int _populate_mount_info(struct physical_volume *pv, struct lvm_mountinfo *mountinfo)
|
||||
{
|
||||
struct mountinfo_s data = {
|
||||
.maj = MAJOR(pv->dev->dev),
|
||||
.min = MINOR(pv->dev->dev),
|
||||
};
|
||||
|
||||
if (!dm_mountinfo_read(_get_mountpoint, &data))
|
||||
return 0;
|
||||
|
||||
if (data.mountpoint)
|
||||
mountinfo->mountpoint = data.mountpoint;
|
||||
else
|
||||
mountinfo->mountpoint = "";
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int _populate_fs_info(const char *mountpoint, struct lvm_fsinfo *fsinfo)
|
||||
{
|
||||
struct statfs buf;
|
||||
|
||||
if (statfs(mountpoint, &buf)) {
|
||||
log_sys_error("statfs", mountpoint);
|
||||
return 0;
|
||||
}
|
||||
|
||||
fsinfo->fs_size = (buf.f_blocks * buf.f_bsize) >> SECTOR_SHIFT;
|
||||
fsinfo->fs_free = (buf.f_bfree * buf.f_bsize) >> SECTOR_SHIFT;
|
||||
fsinfo->fs_avail = (buf.f_bavail * buf.f_bsize) >> SECTOR_SHIFT;
|
||||
fsinfo->fs_used = ((buf.f_blocks - buf.f_bfree) * buf.f_bsize) >> SECTOR_SHIFT;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int _do_pvs_with_mount_and_fs_info_single(struct cmd_context *cmd, struct volume_group *vg,
|
||||
struct physical_volume *pv,
|
||||
struct processing_handle *handle,
|
||||
int do_mount_info, int do_fs_info)
|
||||
{
|
||||
struct selection_handle *sh = handle->selection_handle;
|
||||
struct lvm_mountinfo mountinfo;
|
||||
struct lvm_fsinfo fsinfo;
|
||||
|
||||
if (do_mount_info)
|
||||
if (!_populate_mount_info(pv, &mountinfo))
|
||||
return_0;
|
||||
|
||||
if (do_fs_info && *mountinfo.mountpoint)
|
||||
if (!_populate_fs_info(mountinfo.mountpoint, &fsinfo))
|
||||
return_0;
|
||||
|
||||
if (!report_object(sh ? : handle->custom_handle, sh != NULL,
|
||||
vg, NULL, pv, NULL, NULL, NULL, NULL, do_mount_info ? &mountinfo : NULL, do_fs_info && *mountinfo.mountpoint ? &fsinfo : NULL))
|
||||
return_ECMD_FAILED;
|
||||
|
||||
return ECMD_PROCESSED;
|
||||
}
|
||||
|
||||
static int _pvs_single(struct cmd_context *cmd, struct volume_group *vg,
|
||||
struct physical_volume *pv,
|
||||
struct processing_handle *handle)
|
||||
{
|
||||
struct selection_handle *sh = handle->selection_handle;
|
||||
return _do_pvs_with_mount_and_fs_info_single(cmd, vg, pv, handle, 0, 0);
|
||||
}
|
||||
|
||||
if (!report_object(sh ? : handle->custom_handle, sh != NULL,
|
||||
vg, NULL, pv, NULL, NULL, NULL, NULL))
|
||||
return_ECMD_FAILED;
|
||||
static int _pvs_with_mount_info_single(struct cmd_context *cmd, struct volume_group *vg,
|
||||
struct physical_volume *pv,
|
||||
struct processing_handle *handle)
|
||||
{
|
||||
return _do_pvs_with_mount_and_fs_info_single(cmd, vg, pv, handle, 1, 0);
|
||||
}
|
||||
|
||||
return ECMD_PROCESSED;
|
||||
static int _pvs_with_fs_info_single(struct cmd_context *cmd, struct volume_group *vg,
|
||||
struct physical_volume *pv,
|
||||
struct processing_handle *handle)
|
||||
{
|
||||
return _do_pvs_with_mount_and_fs_info_single(cmd, vg, pv, handle, 1, 1);
|
||||
}
|
||||
|
||||
static int _label_single(struct cmd_context *cmd, struct label *label,
|
||||
@@ -462,7 +548,7 @@ static int _label_single(struct cmd_context *cmd, struct label *label,
|
||||
struct selection_handle *sh = handle->selection_handle;
|
||||
|
||||
if (!report_object(sh ? : handle->custom_handle, sh != NULL,
|
||||
NULL, NULL, NULL, NULL, NULL, NULL, label))
|
||||
NULL, NULL, NULL, NULL, NULL, NULL, label, NULL, NULL))
|
||||
return_ECMD_FAILED;
|
||||
|
||||
return ECMD_PROCESSED;
|
||||
@@ -487,6 +573,8 @@ static int _get_final_report_type(struct report_args *args,
|
||||
report_type_t report_type,
|
||||
int *lv_info_needed,
|
||||
int *lv_segment_status_needed,
|
||||
int *mountinfo_needed,
|
||||
int *fsinfo_needed,
|
||||
report_type_t *final_report_type)
|
||||
{
|
||||
/* Do we need to acquire LV device info in addition? */
|
||||
@@ -498,8 +586,16 @@ static int _get_final_report_type(struct report_args *args,
|
||||
/* Ensure options selected are compatible */
|
||||
if (report_type & SEGS)
|
||||
report_type |= LVS;
|
||||
|
||||
if (report_type & PVSEGS)
|
||||
report_type |= PVS;
|
||||
|
||||
if (report_type & FSINFO)
|
||||
report_type |= MOUNTINFO;
|
||||
|
||||
if (report_type & MOUNTINFO)
|
||||
report_type |= PVS; // FIXME Temporarily drive fs and mount from pvs
|
||||
|
||||
if ((report_type & (LVS | LVSINFO | LVSSTATUS | LVSINFOSTATUS)) &&
|
||||
(report_type & (PVS | LABEL)) && !(single_args->args_are_pvs || (args->full_report_vg && single_args->report_type == PVSEGS))) {
|
||||
log_error("Can't report LV and PV fields at the same time in %sreport type \"%s\"%s%s.",
|
||||
@@ -509,6 +605,12 @@ static int _get_final_report_type(struct report_args *args,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Do we need to acquire mount point information? */
|
||||
*mountinfo_needed = (report_type & MOUNTINFO) ? 1 : 0;
|
||||
|
||||
/* Do we need to acquire mounted filesystem information? */
|
||||
*fsinfo_needed = (report_type & FSINFO) ? 1 : 0;
|
||||
|
||||
/* Change report type if fields specified makes this necessary */
|
||||
if (report_type & FULL)
|
||||
report_type = FULL;
|
||||
@@ -603,7 +705,7 @@ static int _report_all_in_lv(struct cmd_context *cmd, struct processing_handle *
|
||||
|
||||
static int _report_all_in_pv(struct cmd_context *cmd, struct processing_handle *handle,
|
||||
struct physical_volume *pv, report_type_t type,
|
||||
int do_lv_info, int do_lv_seg_status)
|
||||
int do_lv_info, int do_lv_seg_status, int do_mount_info, int do_fs_info)
|
||||
{
|
||||
int r = 0;
|
||||
|
||||
@@ -635,7 +737,7 @@ int report_for_selection(struct cmd_context *cmd,
|
||||
struct selection_handle *sh = parent_handle->selection_handle;
|
||||
struct report_args args = {0};
|
||||
struct single_report_args *single_args = &args.single_args[REPORT_IDX_SINGLE];
|
||||
int do_lv_info, do_lv_seg_status;
|
||||
int do_lv_info, do_lv_seg_status, do_mount_info, do_fs_info;
|
||||
struct processing_handle *handle;
|
||||
int r = 0;
|
||||
|
||||
@@ -645,6 +747,7 @@ int report_for_selection(struct cmd_context *cmd,
|
||||
if (!_get_final_report_type(&args, single_args,
|
||||
single_args->report_type,
|
||||
&do_lv_info, &do_lv_seg_status,
|
||||
&do_mount_info, &do_fs_info,
|
||||
&sh->report_type))
|
||||
return_0;
|
||||
|
||||
@@ -688,7 +791,7 @@ int report_for_selection(struct cmd_context *cmd,
|
||||
r = _report_all_in_vg(cmd, handle, vg, sh->report_type, do_lv_info, do_lv_seg_status);
|
||||
break;
|
||||
case PVS:
|
||||
r = _report_all_in_pv(cmd, handle, pv, sh->report_type, do_lv_info, do_lv_seg_status);
|
||||
r = _report_all_in_pv(cmd, handle, pv, sh->report_type, do_lv_info, do_lv_seg_status, do_mount_info, do_fs_info);
|
||||
break;
|
||||
default:
|
||||
log_error(INTERNAL_ERROR "report_for_selection: incorrect report type");
|
||||
@@ -1079,6 +1182,7 @@ static int _do_report(struct cmd_context *cmd, struct processing_handle *handle,
|
||||
int lock_global = 0;
|
||||
int lv_info_needed;
|
||||
int lv_segment_status_needed;
|
||||
int do_mount_info, do_fs_info;
|
||||
int report_in_group = 0;
|
||||
int r = ECMD_FAILED;
|
||||
|
||||
@@ -1091,7 +1195,9 @@ static int _do_report(struct cmd_context *cmd, struct processing_handle *handle,
|
||||
handle->custom_handle = report_handle;
|
||||
|
||||
if (!_get_final_report_type(args, single_args, report_type, &lv_info_needed,
|
||||
&lv_segment_status_needed, &report_type))
|
||||
&lv_segment_status_needed,
|
||||
&do_mount_info, &do_fs_info,
|
||||
&report_type))
|
||||
goto_out;
|
||||
|
||||
if (!(args->log_only && (single_args->report_type != CMDLOG))) {
|
||||
@@ -1151,7 +1257,10 @@ static int _do_report(struct cmd_context *cmd, struct processing_handle *handle,
|
||||
if (single_args->args_are_pvs)
|
||||
r = process_each_pv(cmd, args->argc, args->argv, NULL,
|
||||
arg_is_set(cmd, all_ARG), 0,
|
||||
handle, &_pvs_single);
|
||||
handle,
|
||||
do_fs_info ? &_pvs_with_fs_info_single :
|
||||
do_mount_info ? &_pvs_with_mount_info_single :
|
||||
&_pvs_single);
|
||||
else
|
||||
r = process_each_vg(cmd, args->argc, args->argv, NULL, NULL,
|
||||
0, 0, handle, &_pvs_in_vg);
|
||||
|
||||
@@ -1305,7 +1305,6 @@ static int _validate_stripe_params(struct cmd_context *cmd, const struct segment
|
||||
return 0;
|
||||
}
|
||||
|
||||
// printf("%s[%u] *stripe_size=%u\n", __func__, __LINE__, *stripe_size);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -1325,7 +1324,6 @@ int get_stripe_params(struct cmd_context *cmd, const struct segment_type *segtyp
|
||||
*stripes_supplied = arg_is_set(cmd, stripes_long_ARG) ? : arg_is_set(cmd, stripes_ARG);
|
||||
|
||||
*stripe_size = arg_uint_value(cmd, stripesize_ARG, 0);
|
||||
// printf("%s[%u] *stripe_size=%u\n", __func__, __LINE__, *stripe_size);
|
||||
*stripe_size_supplied = arg_is_set(cmd, stripesize_ARG);
|
||||
if (*stripe_size) {
|
||||
if (arg_sign_value(cmd, stripesize_ARG, SIGN_NONE) == SIGN_MINUS) {
|
||||
@@ -1340,7 +1338,6 @@ int get_stripe_params(struct cmd_context *cmd, const struct segment_type *segtyp
|
||||
}
|
||||
}
|
||||
|
||||
// printf("%s[%u] *stripe_size=%u\n", __func__, __LINE__, *stripe_size);
|
||||
return _validate_stripe_params(cmd, segtype, stripes, stripe_size);
|
||||
}
|
||||
|
||||
@@ -5758,4 +5755,3 @@ bad:
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -183,12 +183,13 @@ int cachemode_arg(struct cmd_context *cmd, struct arg_values *av);
|
||||
int discards_arg(struct cmd_context *cmd, struct arg_values *av);
|
||||
int mirrorlog_arg(struct cmd_context *cmd, struct arg_values *av);
|
||||
int size_kb_arg(struct cmd_context *cmd, struct arg_values *av);
|
||||
int ssize_kb_arg(struct cmd_context *cmd, struct arg_values *av);
|
||||
int size_mb_arg(struct cmd_context *cmd, struct arg_values *av);
|
||||
int size_mb_arg_with_percent(struct cmd_context *cmd, struct arg_values *av);
|
||||
int ssize_mb_arg(struct cmd_context *cmd, struct arg_values *av);
|
||||
int int_arg(struct cmd_context *cmd, struct arg_values *av);
|
||||
int uint32_arg(struct cmd_context *cmd, struct arg_values *av);
|
||||
int int_arg_with_sign(struct cmd_context *cmd, struct arg_values *av);
|
||||
int int_arg_with_sign_and_percent(struct cmd_context *cmd, struct arg_values *av);
|
||||
int extents_arg(struct cmd_context *cmd, struct arg_values *av);
|
||||
int major_arg(struct cmd_context *cmd, struct arg_values *av);
|
||||
int minor_arg(struct cmd_context *cmd, struct arg_values *av);
|
||||
int string_arg(struct cmd_context *cmd, struct arg_values *av);
|
||||
|
||||
32
tools/vals.h
32
tools/vals.h
@@ -79,14 +79,14 @@
|
||||
* options included in the usage text below that should
|
||||
* be removed? Should "lvm1" be removed?
|
||||
*
|
||||
* For Number args that take optional units, a full usage
|
||||
* could be "Number[bBsSkKmMgGtTpPeE]" (with implied |),
|
||||
* but repeating this full specification produces cluttered
|
||||
* output, and doesn't indicate which unit is the default.
|
||||
* "Number[units]" would be cleaner, as would a subset of
|
||||
* common units, e.g. "Number[kmg...]", but neither helps
|
||||
* with default. "Number[k|unit]" and "Number[m|unit]" show
|
||||
* the default, and "unit" indicates that other units
|
||||
* Size is a Number that takes an optional unit.
|
||||
* A full usage could be "Size[b|B|s|S|k|K|m|M|g|G|t|T|p|P|e|E]"
|
||||
* but repeating this full specification produces long and
|
||||
* cluttered output, and doesn't indicate which unit is the default.
|
||||
* "Size[Units]" would be cleaner, as would a subset of
|
||||
* common units, e.g. "Size[kmg...]", but neither helps
|
||||
* with default. "Size[k|UNIT]" and "Size[m|UNIT]" show
|
||||
* the default, and "UNIT" indicates that other units
|
||||
* are possible without listing them all. This also
|
||||
* suggests using the preferred lower case letters, because
|
||||
* --size and other option args treat upper/lower letters
|
||||
@@ -112,21 +112,23 @@ val(tag_VAL, tag_arg, "Tag", NULL)
|
||||
val(select_VAL, NULL, "Select", NULL) /* used only for command defs */
|
||||
val(activationmode_VAL, string_arg, "ActivationMode", "partial|degraded|complete")
|
||||
val(activation_VAL, activation_arg, "Active", "y|n|ay")
|
||||
val(cachemode_VAL, cachemode_arg, "CacheMode", "writethrough|writeback")
|
||||
val(cachemode_VAL, cachemode_arg, "CacheMode", "writethrough|writeback|passthrough")
|
||||
val(discards_VAL, discards_arg, "Discards", "passdown|nopassdown|ignore")
|
||||
val(mirrorlog_VAL, mirrorlog_arg, "MirrorLog", "core|disk")
|
||||
val(sizekb_VAL, size_kb_arg, "SizeKB", "Number[k|unit]")
|
||||
val(sizemb_VAL, size_mb_arg, "SizeMB", "Number[m|unit]")
|
||||
val(regionsize_VAL, regionsize_arg, "RegionSize", "Number[m|unit]")
|
||||
val(numsigned_VAL, int_arg_with_sign, "SNumber", "[+|-]Number")
|
||||
val(numsignedper_VAL, int_arg_with_sign_and_percent, "SNumberP", "[+|-]Number[%VG|%PVS|%FREE]")
|
||||
val(sizekb_VAL, size_kb_arg, "SizeKB", "Size[k|UNIT]")
|
||||
val(sizemb_VAL, size_mb_arg, "SizeMB", "Size[m|UNIT]")
|
||||
val(ssizekb_VAL, ssize_kb_arg, "SSizeKB", "[+|-]Size[k|UNIT]")
|
||||
val(ssizemb_VAL, ssize_mb_arg, "SSizeMB", "[+|-]Size[m|UNIT]")
|
||||
val(regionsize_VAL, regionsize_arg, "RegionSize", "Size[m|UNIT]")
|
||||
val(snumber_VAL, int_arg_with_sign, "SNumber", "[+|-]Number")
|
||||
val(extents_VAL, extents_arg, "Extents", "[+|-]Number[PERCENT]")
|
||||
val(permission_VAL, permission_arg, "Permission", "rw|r")
|
||||
val(metadatatype_VAL, metadatatype_arg, "MetadataType", "lvm2|lvm1")
|
||||
val(units_VAL, string_arg, "Units", "r|R|h|H|b|B|s|S|k|K|m|M|g|G|t|T|p|P|e|E")
|
||||
val(segtype_VAL, segtype_arg, "SegType", "linear|striped|snapshot|mirror|raid|thin|cache|thin-pool|cache-pool")
|
||||
val(alloc_VAL, alloc_arg, "Alloc", "contiguous|cling|cling_by_tags|normal|anywhere|inherit")
|
||||
val(locktype_VAL, locktype_arg, "LockType", "sanlock|dlm|none")
|
||||
val(readahead_VAL, readahead_arg, "Readahead", "auto|none|NumberSectors")
|
||||
val(readahead_VAL, readahead_arg, "Readahead", "auto|none|Number")
|
||||
val(vgmetadatacopies_VAL, vgmetadatacopies_arg, "MetadataCopiesVG", "all|unmanaged|Number")
|
||||
val(pvmetadatacopies_VAL, pvmetadatacopies_arg, "MetadataCopiesPV", "0|1|2")
|
||||
val(metadatacopies_VAL, metadatacopies_arg, "unused", "unused")
|
||||
|
||||
Reference in New Issue
Block a user