mirror of
git://sourceware.org/git/lvm2.git
synced 2025-11-22 00:23:51 +03:00
Compare commits
89 Commits
sourceware
...
dev-agk-fs
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
60e50072e5 | ||
|
|
3a5561e5ab | ||
|
|
aedac100f9 | ||
|
|
18bbeec825 | ||
|
|
9ed11e9191 | ||
|
|
05aceaffbd | ||
|
|
f4b30b0dae | ||
|
|
43fb4aa69b | ||
|
|
872932a0fb | ||
|
|
0b019c5406 | ||
|
|
ef97360866 | ||
|
|
17838e6439 | ||
|
|
11589891d7 | ||
|
|
b6c4b7cfb0 | ||
|
|
c5b6c9ad44 | ||
|
|
6dea1ed5ae | ||
|
|
e4ef3d04ad | ||
|
|
547bdb63e1 | ||
|
|
9a50df291a | ||
|
|
e7ee89d80b | ||
|
|
2a5e24580a | ||
|
|
191a2517be | ||
|
|
1a0d57f895 | ||
|
|
9a62767f2d | ||
|
|
5d39927f22 | ||
|
|
9b23d9bfe4 | ||
|
|
f350283398 | ||
|
|
af7c8e7106 | ||
|
|
ca859b5149 | ||
|
|
d3bcec5993 | ||
|
|
910918d1c2 | ||
|
|
6360ba3d2d | ||
|
|
b7831fc14a | ||
|
|
70c1fa3764 | ||
|
|
8df3f300ba | ||
|
|
b76852bf35 | ||
|
|
26ca308ba9 | ||
|
|
7b0371e74e | ||
|
|
83249f3327 | ||
|
|
4c89d3794c | ||
|
|
10c3d94159 | ||
|
|
157948b5a5 | ||
|
|
c25b95e2ef | ||
|
|
51dfbf1fb3 | ||
|
|
daf1d4cadc | ||
|
|
fb42874a4f | ||
|
|
48778bc503 | ||
|
|
62abae1525 | ||
|
|
eb9586bd3b | ||
|
|
d6dd700bf7 | ||
|
|
7a064303fe | ||
|
|
964114950c | ||
|
|
1828822bd8 | ||
|
|
ce1e5b9991 | ||
|
|
80a6de616a | ||
|
|
21456dcf7f | ||
|
|
89661981e8 | ||
|
|
4a14617dc4 | ||
|
|
f9d28f1aec | ||
|
|
998151e83e | ||
|
|
8d0df0c011 | ||
|
|
27384c52cf | ||
|
|
c41e999488 | ||
|
|
4f7631b4ad | ||
|
|
5f6bdf707d | ||
|
|
84cceaf9b9 | ||
|
|
74ba326007 | ||
|
|
189fa64793 | ||
|
|
3bdc4045c2 | ||
|
|
d768fbe010 | ||
|
|
76f60cc430 | ||
|
|
2574d3257a | ||
|
|
64a2fad5d6 | ||
|
|
34caf83172 | ||
|
|
f79bd30a8b | ||
|
|
1784cc990e | ||
|
|
2d74de3f05 | ||
|
|
34a8d3c2fd | ||
|
|
932db3db53 | ||
|
|
fe18e5e77a | ||
|
|
929cf4b73c | ||
|
|
4de0e692db | ||
|
|
7d39b4d5e7 | ||
|
|
92691e345d | ||
|
|
c1865b0a86 | ||
|
|
b499d96215 | ||
|
|
e2354ea344 | ||
|
|
ffe3ca26e0 | ||
|
|
3fd3c9430d |
5
README
5
README
@@ -6,11 +6,12 @@ Installation instructions are in INSTALL.
|
||||
There is no warranty - see COPYING and COPYING.LIB.
|
||||
|
||||
Tarballs are available from:
|
||||
ftp://sourceware.org/pub/lvm2/
|
||||
ftp://sources.redhat.com/pub/lvm2/
|
||||
|
||||
The source code is stored in git:
|
||||
http://git.fedorahosted.org/git/lvm2.git
|
||||
git clone git://git.fedorahosted.org/git/lvm2.git
|
||||
https://sourceware.org/git/?p=lvm2.git
|
||||
git clone git://sourceware.org/git/lvm2.git
|
||||
|
||||
Mailing list for general discussion related to LVM2:
|
||||
linux-lvm@redhat.com
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
Version 2.02.169 -
|
||||
=====================================
|
||||
Upstream git moved to https://sourceware.org/git/?p=lvm2
|
||||
Support conversion of raid type, stripesize and number of disks
|
||||
Reject writemostly/writebehind in lvchange during resynchronization.
|
||||
Deactivate active origin first before removal for improved workflow.
|
||||
Fix regression of accepting options --type and -m with lvresize (2.02.158).
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2016 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -272,10 +272,18 @@ int lv_raid_percent(const struct logical_volume *lv, dm_percent_t *percent)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
int lv_raid_data_offset(const struct logical_volume *lv, uint64_t *data_offset)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
int lv_raid_dev_health(const struct logical_volume *lv, char **dev_health)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
int lv_raid_dev_count(const struct logical_volume *lv, uint32_t *dev_cnt)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
int lv_raid_mismatch_count(const struct logical_volume *lv, uint64_t *cnt)
|
||||
{
|
||||
return 0;
|
||||
@@ -984,6 +992,30 @@ int lv_raid_percent(const struct logical_volume *lv, dm_percent_t *percent)
|
||||
return lv_mirror_percent(lv->vg->cmd, lv, 0, percent, NULL);
|
||||
}
|
||||
|
||||
int lv_raid_data_offset(const struct logical_volume *lv, uint64_t *data_offset)
|
||||
{
|
||||
int r;
|
||||
struct dev_manager *dm;
|
||||
struct dm_status_raid *status;
|
||||
|
||||
if (!lv_info(lv->vg->cmd, lv, 0, NULL, 0, 0))
|
||||
return 0;
|
||||
|
||||
log_debug_activation("Checking raid data offset and dev sectors for LV %s/%s",
|
||||
lv->vg->name, lv->name);
|
||||
if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
|
||||
return_0;
|
||||
|
||||
if (!(r = dev_manager_raid_status(dm, lv, &status)))
|
||||
stack;
|
||||
|
||||
*data_offset = status->data_offset;
|
||||
|
||||
dev_manager_destroy(dm);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
int lv_raid_dev_health(const struct logical_volume *lv, char **dev_health)
|
||||
{
|
||||
int r;
|
||||
@@ -1013,6 +1045,32 @@ int lv_raid_dev_health(const struct logical_volume *lv, char **dev_health)
|
||||
return r;
|
||||
}
|
||||
|
||||
int lv_raid_dev_count(const struct logical_volume *lv, uint32_t *dev_cnt)
|
||||
{
|
||||
struct dev_manager *dm;
|
||||
struct dm_status_raid *status;
|
||||
|
||||
*dev_cnt = 0;
|
||||
|
||||
if (!lv_info(lv->vg->cmd, lv, 0, NULL, 0, 0))
|
||||
return 0;
|
||||
|
||||
log_debug_activation("Checking raid device count for LV %s/%s",
|
||||
lv->vg->name, lv->name);
|
||||
if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
|
||||
return_0;
|
||||
|
||||
if (!dev_manager_raid_status(dm, lv, &status)) {
|
||||
dev_manager_destroy(dm);
|
||||
return_0;
|
||||
}
|
||||
*dev_cnt = status->dev_count;
|
||||
|
||||
dev_manager_destroy(dm);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int lv_raid_mismatch_count(const struct logical_volume *lv, uint64_t *cnt)
|
||||
{
|
||||
struct dev_manager *dm;
|
||||
|
||||
@@ -168,6 +168,8 @@ int lv_snapshot_percent(const struct logical_volume *lv, dm_percent_t *percent);
|
||||
int lv_mirror_percent(struct cmd_context *cmd, const struct logical_volume *lv,
|
||||
int wait, dm_percent_t *percent, uint32_t *event_nr);
|
||||
int lv_raid_percent(const struct logical_volume *lv, dm_percent_t *percent);
|
||||
int lv_raid_dev_count(const struct logical_volume *lv, uint32_t *dev_cnt);
|
||||
int lv_raid_data_offset(const struct logical_volume *lv, uint64_t *data_offset);
|
||||
int lv_raid_dev_health(const struct logical_volume *lv, char **dev_health);
|
||||
int lv_raid_mismatch_count(const struct logical_volume *lv, uint64_t *cnt);
|
||||
int lv_raid_sync_action(const struct logical_volume *lv, char **sync_action);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2016 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -214,6 +214,14 @@ typedef enum {
|
||||
STATUS, /* DM_DEVICE_STATUS ioctl */
|
||||
} info_type_t;
|
||||
|
||||
/* Return length of segment depending on type and reshape_len */
|
||||
static uint32_t _seg_len(const struct lv_segment *seg)
|
||||
{
|
||||
uint32_t reshape_len = seg_is_raid(seg) ? ((seg->area_count - seg->segtype->parity_devs) * seg->reshape_len) : 0;
|
||||
|
||||
return seg->len - reshape_len;
|
||||
}
|
||||
|
||||
static int _info_run(const char *dlid, struct dm_info *dminfo,
|
||||
uint32_t *read_ahead,
|
||||
struct lv_seg_status *seg_status,
|
||||
@@ -250,7 +258,7 @@ static int _info_run(const char *dlid, struct dm_info *dminfo,
|
||||
if (seg_status && dminfo->exists) {
|
||||
start = length = seg_status->seg->lv->vg->extent_size;
|
||||
start *= seg_status->seg->le;
|
||||
length *= seg_status->seg->len;
|
||||
length *= _seg_len(seg_status->seg);
|
||||
|
||||
do {
|
||||
target = dm_get_next_target(dmt, target, &target_start,
|
||||
@@ -1308,14 +1316,13 @@ int dev_manager_raid_message(struct dev_manager *dm,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* These are the supported RAID messages for dm-raid v1.5.0 */
|
||||
/* These are the supported RAID messages for dm-raid v1.9.0 */
|
||||
if (strcmp(msg, "idle") &&
|
||||
strcmp(msg, "frozen") &&
|
||||
strcmp(msg, "resync") &&
|
||||
strcmp(msg, "recover") &&
|
||||
strcmp(msg, "check") &&
|
||||
strcmp(msg, "repair") &&
|
||||
strcmp(msg, "reshape")) {
|
||||
strcmp(msg, "repair")) {
|
||||
log_error(INTERNAL_ERROR "Unknown RAID message: %s.", msg);
|
||||
return 0;
|
||||
}
|
||||
@@ -2214,7 +2221,7 @@ static char *_add_error_or_zero_device(struct dev_manager *dm, struct dm_tree *d
|
||||
struct lv_segment *seg_i;
|
||||
struct dm_info info;
|
||||
int segno = -1, i = 0;
|
||||
uint64_t size = (uint64_t) seg->len * seg->lv->vg->extent_size;
|
||||
uint64_t size = (uint64_t) _seg_len(seg) * seg->lv->vg->extent_size;
|
||||
|
||||
dm_list_iterate_items(seg_i, &seg->lv->segments) {
|
||||
if (seg == seg_i) {
|
||||
@@ -2500,7 +2507,7 @@ static int _add_target_to_dtree(struct dev_manager *dm,
|
||||
return seg->segtype->ops->add_target_line(dm, dm->mem, dm->cmd,
|
||||
&dm->target_state, seg,
|
||||
laopts, dnode,
|
||||
extent_size * seg->len,
|
||||
extent_size * _seg_len(seg),
|
||||
&dm->pvmove_mirror_count);
|
||||
}
|
||||
|
||||
@@ -2693,7 +2700,7 @@ static int _add_segment_to_dtree(struct dev_manager *dm,
|
||||
/* Replace target and all its used devs with error mapping */
|
||||
log_debug_activation("Using error for pending delete %s.",
|
||||
display_lvname(seg->lv));
|
||||
if (!dm_tree_node_add_error_target(dnode, (uint64_t)seg->lv->vg->extent_size * seg->len))
|
||||
if (!dm_tree_node_add_error_target(dnode, (uint64_t)seg->lv->vg->extent_size * _seg_len(seg)))
|
||||
return_0;
|
||||
} else if (!_add_target_to_dtree(dm, dnode, seg, laopts))
|
||||
return_0;
|
||||
@@ -3165,7 +3172,6 @@ static int _tree_action(struct dev_manager *dm, const struct logical_volume *lv,
|
||||
log_error(INTERNAL_ERROR "_tree_action: Action %u not supported.", action);
|
||||
goto out;
|
||||
}
|
||||
|
||||
r = 1;
|
||||
|
||||
out:
|
||||
|
||||
@@ -71,7 +71,7 @@
|
||||
* FIXME: Increase these to 64 and further to the MD maximum
|
||||
* once the SubLVs split and name shift got enhanced
|
||||
*/
|
||||
#define DEFAULT_RAID1_MAX_IMAGES 10
|
||||
#define DEFAULT_RAID1_MAX_IMAGES 64
|
||||
#define DEFAULT_RAID_MAX_IMAGES 64
|
||||
#define DEFAULT_ALLOCATION_STRIPE_ALL_DEVICES 0 /* Don't stripe across all devices if not -i/--stripes given */
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -225,8 +225,8 @@ static int _read_linear(struct cmd_context *cmd, struct lv_map *lvm)
|
||||
while (le < lvm->lv->le_count) {
|
||||
len = _area_length(lvm, le);
|
||||
|
||||
if (!(seg = alloc_lv_segment(segtype, lvm->lv, le, len, 0, 0,
|
||||
NULL, 1, len, 0, 0, 0, NULL))) {
|
||||
if (!(seg = alloc_lv_segment(segtype, lvm->lv, le, len, 0, 0, 0,
|
||||
NULL, 1, len, 0, 0, 0, 0, NULL))) {
|
||||
log_error("Failed to allocate linear segment.");
|
||||
return 0;
|
||||
}
|
||||
@@ -297,10 +297,10 @@ static int _read_stripes(struct cmd_context *cmd, struct lv_map *lvm)
|
||||
|
||||
if (!(seg = alloc_lv_segment(segtype, lvm->lv,
|
||||
lvm->stripes * first_area_le,
|
||||
lvm->stripes * area_len,
|
||||
lvm->stripes * area_len, 0,
|
||||
0, lvm->stripe_size, NULL,
|
||||
lvm->stripes,
|
||||
area_len, 0, 0, 0, NULL))) {
|
||||
area_len, 0, 0, 0, 0, NULL))) {
|
||||
log_error("Failed to allocate striped segment.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 1997-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -192,9 +192,9 @@ static int _add_stripe_seg(struct dm_pool *mem,
|
||||
return_0;
|
||||
|
||||
if (!(seg = alloc_lv_segment(segtype, lv, *le_cur,
|
||||
area_len * usp->num_devs, 0,
|
||||
area_len * usp->num_devs, 0, 0,
|
||||
usp->striping, NULL, usp->num_devs,
|
||||
area_len, 0, 0, 0, NULL))) {
|
||||
area_len, 0, 0, 0, 0, NULL))) {
|
||||
log_error("Unable to allocate striped lv_segment structure");
|
||||
return 0;
|
||||
}
|
||||
@@ -232,8 +232,8 @@ static int _add_linear_seg(struct dm_pool *mem,
|
||||
area_len = (usp->devs[j].blocks) / POOL_PE_SIZE;
|
||||
|
||||
if (!(seg = alloc_lv_segment(segtype, lv, *le_cur,
|
||||
area_len, 0, usp->striping,
|
||||
NULL, 1, area_len,
|
||||
area_len, 0, 0, usp->striping,
|
||||
NULL, 1, area_len, 0,
|
||||
POOL_PE_SIZE, 0, 0, NULL))) {
|
||||
log_error("Unable to allocate linear lv_segment "
|
||||
"structure");
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -583,8 +583,10 @@ static int _print_segment(struct formatter *f, struct volume_group *vg,
|
||||
outf(f, "start_extent = %u", seg->le);
|
||||
outsize(f, (uint64_t) seg->len * vg->extent_size,
|
||||
"extent_count = %u", seg->len);
|
||||
|
||||
outnl(f);
|
||||
if (seg->reshape_len)
|
||||
outsize(f, (uint64_t) seg->reshape_len * vg->extent_size,
|
||||
"reshape_count = %u", seg->reshape_len);
|
||||
outf(f, "type = \"%s\"", seg->segtype->name);
|
||||
|
||||
if (!_out_list(f, &seg->tags, "tags"))
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2013 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -61,6 +61,9 @@ static const struct flag _lv_flags[] = {
|
||||
{LOCKED, "LOCKED", STATUS_FLAG},
|
||||
{LV_NOTSYNCED, "NOTSYNCED", STATUS_FLAG},
|
||||
{LV_REBUILD, "REBUILD", STATUS_FLAG},
|
||||
{LV_RESHAPE_DELTA_DISKS_PLUS, "RESHAPE_DELTA_DISKS_PLUS", STATUS_FLAG},
|
||||
{LV_RESHAPE_DELTA_DISKS_MINUS, "RESHAPE_DELTA_DISKS_MINUS", STATUS_FLAG},
|
||||
{LV_REMOVE_AFTER_RESHAPE, "REMOVE_AFTER_RESHAPE", STATUS_FLAG},
|
||||
{LV_WRITEMOSTLY, "WRITEMOSTLY", STATUS_FLAG},
|
||||
{LV_ACTIVATION_SKIP, "ACTIVATION_SKIP", COMPATIBLE_FLAG},
|
||||
{LV_ERROR_WHEN_FULL, "ERROR_WHEN_FULL", COMPATIBLE_FLAG},
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -354,7 +354,7 @@ static int _read_segment(struct logical_volume *lv, const struct dm_config_node
|
||||
struct lv_segment *seg;
|
||||
const struct dm_config_node *sn_child = sn->child;
|
||||
const struct dm_config_value *cv;
|
||||
uint32_t start_extent, extent_count;
|
||||
uint32_t area_extents, start_extent, extent_count, reshape_count, data_copies;
|
||||
struct segment_type *segtype;
|
||||
const char *segtype_str;
|
||||
|
||||
@@ -375,6 +375,12 @@ static int _read_segment(struct logical_volume *lv, const struct dm_config_node
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!_read_int32(sn_child, "reshape_count", &reshape_count))
|
||||
reshape_count = 0;
|
||||
|
||||
if (!_read_int32(sn_child, "data_copies", &data_copies))
|
||||
data_copies = 1;
|
||||
|
||||
segtype_str = SEG_TYPE_NAME_STRIPED;
|
||||
|
||||
if (!dm_config_get_str(sn_child, "type", &segtype_str)) {
|
||||
@@ -389,9 +395,11 @@ static int _read_segment(struct logical_volume *lv, const struct dm_config_node
|
||||
!segtype->ops->text_import_area_count(sn_child, &area_count))
|
||||
return_0;
|
||||
|
||||
area_extents = segtype->parity_devs ?
|
||||
raid_rimage_extents(segtype, extent_count, area_count - segtype->parity_devs, data_copies) : extent_count;
|
||||
if (!(seg = alloc_lv_segment(segtype, lv, start_extent,
|
||||
extent_count, 0, 0, NULL, area_count,
|
||||
extent_count, 0, 0, 0, NULL))) {
|
||||
extent_count, reshape_count, 0, 0, NULL, area_count,
|
||||
area_extents, data_copies, 0, 0, 0, NULL))) {
|
||||
log_error("Segment allocation failed");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2016 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -1104,6 +1104,19 @@ int lv_raid_healthy(const struct logical_volume *lv)
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Helper: check for any sub LVs after a disk removing reshape */
|
||||
static int _sublvs_remove_after_reshape(const struct logical_volume *lv)
|
||||
{
|
||||
uint32_t s;
|
||||
struct lv_segment *seg = first_seg(lv);
|
||||
|
||||
for (s = seg->area_count -1; s; s--)
|
||||
if (seg_lv(seg, s)->status & LV_REMOVE_AFTER_RESHAPE)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
char *lv_attr_dup_with_info_and_seg_status(struct dm_pool *mem, const struct lv_with_info_and_seg_status *lvdm)
|
||||
{
|
||||
const struct logical_volume *lv = lvdm->lv;
|
||||
@@ -1269,6 +1282,8 @@ char *lv_attr_dup_with_info_and_seg_status(struct dm_pool *mem, const struct lv_
|
||||
repstr[8] = 'p';
|
||||
else if (lv_is_raid_type(lv)) {
|
||||
uint64_t n;
|
||||
char *sync_action;
|
||||
|
||||
if (!activation())
|
||||
repstr[8] = 'X'; /* Unknown */
|
||||
else if (!lv_raid_healthy(lv))
|
||||
@@ -1276,8 +1291,17 @@ char *lv_attr_dup_with_info_and_seg_status(struct dm_pool *mem, const struct lv_
|
||||
else if (lv_is_raid(lv)) {
|
||||
if (lv_raid_mismatch_count(lv, &n) && n)
|
||||
repstr[8] = 'm'; /* RAID has 'm'ismatches */
|
||||
else if (lv_raid_sync_action(lv, &sync_action) &&
|
||||
!strcmp(sync_action, "reshape"))
|
||||
repstr[8] = 's'; /* LV is re(s)haping */
|
||||
else if (_sublvs_remove_after_reshape(lv))
|
||||
repstr[8] = 'R'; /* sub-LV got freed from raid set by reshaping
|
||||
and has to be 'R'emoved */
|
||||
} else if (lv->status & LV_WRITEMOSTLY)
|
||||
repstr[8] = 'w'; /* sub-LV has 'w'ritemostly */
|
||||
else if (lv->status & LV_REMOVE_AFTER_RESHAPE)
|
||||
repstr[8] = 'R'; /* sub-LV got freed from raid set by reshaping
|
||||
and has to be 'R'emoved */
|
||||
} else if (lvdm->seg_status.type == SEG_STATUS_CACHE) {
|
||||
if (lvdm->seg_status.cache->fail)
|
||||
repstr[8] = 'F';
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2003-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2012 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -21,11 +21,13 @@
|
||||
struct lv_segment *alloc_lv_segment(const struct segment_type *segtype,
|
||||
struct logical_volume *lv,
|
||||
uint32_t le, uint32_t len,
|
||||
uint32_t reshape_len,
|
||||
uint64_t status,
|
||||
uint32_t stripe_size,
|
||||
struct logical_volume *log_lv,
|
||||
uint32_t area_count,
|
||||
uint32_t area_len,
|
||||
uint32_t data_copies,
|
||||
uint32_t chunk_size,
|
||||
uint32_t region_size,
|
||||
uint32_t extents_copied,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2014 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -912,11 +912,13 @@ static uint32_t _round_to_stripe_boundary(struct volume_group *vg, uint32_t exte
|
||||
struct lv_segment *alloc_lv_segment(const struct segment_type *segtype,
|
||||
struct logical_volume *lv,
|
||||
uint32_t le, uint32_t len,
|
||||
uint32_t reshape_len,
|
||||
uint64_t status,
|
||||
uint32_t stripe_size,
|
||||
struct logical_volume *log_lv,
|
||||
uint32_t area_count,
|
||||
uint32_t area_len,
|
||||
uint32_t data_copies,
|
||||
uint32_t chunk_size,
|
||||
uint32_t region_size,
|
||||
uint32_t extents_copied,
|
||||
@@ -950,10 +952,12 @@ struct lv_segment *alloc_lv_segment(const struct segment_type *segtype,
|
||||
seg->lv = lv;
|
||||
seg->le = le;
|
||||
seg->len = len;
|
||||
seg->reshape_len = reshape_len;
|
||||
seg->status = status;
|
||||
seg->stripe_size = stripe_size;
|
||||
seg->area_count = area_count;
|
||||
seg->area_len = area_len;
|
||||
seg->data_copies = data_copies ? : lv_raid_data_copies(segtype, area_count);
|
||||
seg->chunk_size = chunk_size;
|
||||
seg->region_size = region_size;
|
||||
seg->extents_copied = extents_copied;
|
||||
@@ -1047,11 +1051,10 @@ static int _release_and_discard_lv_segment_area(struct lv_segment *seg, uint32_t
|
||||
if (lv_is_raid_image(lv)) {
|
||||
/* Calculate the amount of extents to reduce per rmate/rimage LV */
|
||||
uint32_t rimage_extents;
|
||||
struct lv_segment *seg1 = first_seg(lv);
|
||||
|
||||
/* FIXME: avoid extra seg_is_*() conditonals */
|
||||
area_reduction =_round_to_stripe_boundary(lv->vg, area_reduction,
|
||||
(seg_is_raid1(seg) || seg_is_any_raid0(seg)) ? 0 : _raid_stripes_count(seg), 0);
|
||||
rimage_extents = raid_rimage_extents(seg->segtype, area_reduction, seg_is_any_raid0(seg) ? 0 : _raid_stripes_count(seg),
|
||||
/* FIXME: avoid extra seg_is_*() conditionals here */
|
||||
rimage_extents = raid_rimage_extents(seg1->segtype, area_reduction, seg_is_any_raid0(seg) ? 0 : _raid_stripes_count(seg),
|
||||
seg_is_raid10(seg) ? 1 :_raid_data_copies(seg));
|
||||
if (!rimage_extents)
|
||||
return 0;
|
||||
@@ -1258,7 +1261,7 @@ static uint32_t _calc_area_multiple(const struct segment_type *segtype,
|
||||
* the 'stripes' argument will always need to
|
||||
* be given.
|
||||
*/
|
||||
if (!strcmp(segtype->name, _lv_type_names[LV_TYPE_RAID10])) {
|
||||
if (segtype_is_raid10(segtype)) {
|
||||
if (!stripes)
|
||||
return area_count / 2;
|
||||
return stripes;
|
||||
@@ -1278,16 +1281,17 @@ static uint32_t _calc_area_multiple(const struct segment_type *segtype,
|
||||
static int _lv_segment_reduce(struct lv_segment *seg, uint32_t reduction)
|
||||
{
|
||||
uint32_t area_reduction, s;
|
||||
uint32_t areas = (seg->area_count / (seg_is_raid10(seg) ? seg->data_copies : 1)) - seg->segtype->parity_devs;
|
||||
|
||||
/* Caller must ensure exact divisibility */
|
||||
if (seg_is_striped(seg)) {
|
||||
if (reduction % seg->area_count) {
|
||||
if (seg_is_striped(seg) || seg_is_striped_raid(seg)) {
|
||||
if (reduction % areas) {
|
||||
log_error("Segment extent reduction %" PRIu32
|
||||
" not divisible by #stripes %" PRIu32,
|
||||
reduction, seg->area_count);
|
||||
return 0;
|
||||
}
|
||||
area_reduction = (reduction / seg->area_count);
|
||||
area_reduction = reduction / areas;
|
||||
} else
|
||||
area_reduction = reduction;
|
||||
|
||||
@@ -1296,7 +1300,11 @@ static int _lv_segment_reduce(struct lv_segment *seg, uint32_t reduction)
|
||||
return_0;
|
||||
|
||||
seg->len -= reduction;
|
||||
seg->area_len -= area_reduction;
|
||||
|
||||
if (seg_is_raid(seg))
|
||||
seg->area_len = seg->len;
|
||||
else
|
||||
seg->area_len -= area_reduction;
|
||||
|
||||
return 1;
|
||||
}
|
||||
@@ -1306,11 +1314,13 @@ static int _lv_segment_reduce(struct lv_segment *seg, uint32_t reduction)
|
||||
*/
|
||||
static int _lv_reduce(struct logical_volume *lv, uint32_t extents, int delete)
|
||||
{
|
||||
struct lv_segment *seg;
|
||||
struct lv_segment *seg = first_seg(lv);;
|
||||
uint32_t count = extents;
|
||||
uint32_t reduction;
|
||||
struct logical_volume *pool_lv;
|
||||
struct logical_volume *external_lv = NULL;
|
||||
int is_raid10 = seg_is_any_raid10(seg) && seg->reshape_len;
|
||||
uint32_t data_copies = seg->data_copies;
|
||||
|
||||
if (lv_is_merging_origin(lv)) {
|
||||
log_debug_metadata("Dropping snapshot merge of %s to removed origin %s.",
|
||||
@@ -1373,8 +1383,18 @@ static int _lv_reduce(struct logical_volume *lv, uint32_t extents, int delete)
|
||||
count -= reduction;
|
||||
}
|
||||
|
||||
lv->le_count -= extents;
|
||||
seg = first_seg(lv);
|
||||
|
||||
if (is_raid10) {
|
||||
lv->le_count -= extents * data_copies;
|
||||
if (seg)
|
||||
seg->len = seg->area_len = lv->le_count;
|
||||
} else
|
||||
lv->le_count -= extents;
|
||||
|
||||
lv->size = (uint64_t) lv->le_count * lv->vg->extent_size;
|
||||
if (seg)
|
||||
seg->extents_copied = seg->len;
|
||||
|
||||
if (!delete)
|
||||
return 1;
|
||||
@@ -1487,11 +1507,10 @@ int lv_reduce(struct logical_volume *lv, uint32_t extents)
|
||||
{
|
||||
struct lv_segment *seg = first_seg(lv);
|
||||
|
||||
/* Ensure stipe boundary extents on RAID LVs */
|
||||
/* Ensure stripe boundary extents on RAID LVs */
|
||||
if (lv_is_raid(lv) && extents != lv->le_count)
|
||||
extents =_round_to_stripe_boundary(lv->vg, extents,
|
||||
seg_is_raid1(seg) ? 0 : _raid_stripes_count(seg), 0);
|
||||
|
||||
return _lv_reduce(lv, extents, 1);
|
||||
}
|
||||
|
||||
@@ -1793,10 +1812,10 @@ static int _setup_alloced_segment(struct logical_volume *lv, uint64_t status,
|
||||
area_multiple = _calc_area_multiple(segtype, area_count, 0);
|
||||
extents = aa[0].len * area_multiple;
|
||||
|
||||
if (!(seg = alloc_lv_segment(segtype, lv, lv->le_count, extents,
|
||||
if (!(seg = alloc_lv_segment(segtype, lv, lv->le_count, extents, 0,
|
||||
status, stripe_size, NULL,
|
||||
area_count,
|
||||
aa[0].len, 0u, region_size, 0u, NULL))) {
|
||||
aa[0].len, 0, 0u, region_size, 0u, NULL))) {
|
||||
log_error("Couldn't allocate new LV segment.");
|
||||
return 0;
|
||||
}
|
||||
@@ -3234,9 +3253,9 @@ int lv_add_virtual_segment(struct logical_volume *lv, uint64_t status,
|
||||
seg->area_len += extents;
|
||||
seg->len += extents;
|
||||
} else {
|
||||
if (!(seg = alloc_lv_segment(segtype, lv, lv->le_count, extents,
|
||||
if (!(seg = alloc_lv_segment(segtype, lv, lv->le_count, extents, 0,
|
||||
status, 0, NULL, 0,
|
||||
extents, 0, 0, 0, NULL))) {
|
||||
extents, 0, 0, 0, 0, NULL))) {
|
||||
log_error("Couldn't allocate new %s segment.", segtype->name);
|
||||
return 0;
|
||||
}
|
||||
@@ -3562,10 +3581,10 @@ static struct lv_segment *_convert_seg_to_mirror(struct lv_segment *seg,
|
||||
}
|
||||
|
||||
if (!(newseg = alloc_lv_segment(get_segtype_from_string(seg->lv->vg->cmd, SEG_TYPE_NAME_MIRROR),
|
||||
seg->lv, seg->le, seg->len,
|
||||
seg->lv, seg->le, seg->len, 0,
|
||||
seg->status, seg->stripe_size,
|
||||
log_lv,
|
||||
seg->area_count, seg->area_len,
|
||||
seg->area_count, seg->area_len, 0,
|
||||
seg->chunk_size, region_size,
|
||||
seg->extents_copied, NULL))) {
|
||||
log_error("Couldn't allocate converted LV segment.");
|
||||
@@ -3667,8 +3686,8 @@ int lv_add_segmented_mirror_image(struct alloc_handle *ah,
|
||||
}
|
||||
|
||||
if (!(new_seg = alloc_lv_segment(segtype, copy_lv,
|
||||
seg->le, seg->len, PVMOVE, 0,
|
||||
NULL, 1, seg->len,
|
||||
seg->le, seg->len, 0, PVMOVE, 0,
|
||||
NULL, 1, seg->len, 0,
|
||||
0, 0, 0, NULL)))
|
||||
return_0;
|
||||
|
||||
@@ -3863,9 +3882,9 @@ static int _lv_insert_empty_sublvs(struct logical_volume *lv,
|
||||
/*
|
||||
* First, create our top-level segment for our top-level LV
|
||||
*/
|
||||
if (!(mapseg = alloc_lv_segment(segtype, lv, 0, 0, lv->status,
|
||||
if (!(mapseg = alloc_lv_segment(segtype, lv, 0, 0, 0, lv->status,
|
||||
stripe_size, NULL,
|
||||
devices, 0, 0, region_size, 0, NULL))) {
|
||||
devices, 0, 0, 0, region_size, 0, NULL))) {
|
||||
log_error("Failed to create mapping segment for %s.",
|
||||
display_lvname(lv));
|
||||
return 0;
|
||||
@@ -3925,7 +3944,7 @@ bad:
|
||||
static int _lv_extend_layered_lv(struct alloc_handle *ah,
|
||||
struct logical_volume *lv,
|
||||
uint32_t extents, uint32_t first_area,
|
||||
uint32_t stripes, uint32_t stripe_size)
|
||||
uint32_t mirrors, uint32_t stripes, uint32_t stripe_size)
|
||||
{
|
||||
const struct segment_type *segtype;
|
||||
struct logical_volume *sub_lv, *meta_lv;
|
||||
@@ -3953,7 +3972,7 @@ static int _lv_extend_layered_lv(struct alloc_handle *ah,
|
||||
for (fa = first_area, s = 0; s < seg->area_count; s++) {
|
||||
if (is_temporary_mirror_layer(seg_lv(seg, s))) {
|
||||
if (!_lv_extend_layered_lv(ah, seg_lv(seg, s), extents / area_multiple,
|
||||
fa, stripes, stripe_size))
|
||||
fa, mirrors, stripes, stripe_size))
|
||||
return_0;
|
||||
fa += lv_mirror_count(seg_lv(seg, s));
|
||||
continue;
|
||||
@@ -3967,6 +3986,8 @@ static int _lv_extend_layered_lv(struct alloc_handle *ah,
|
||||
return 0;
|
||||
}
|
||||
|
||||
last_seg(lv)->data_copies = mirrors;
|
||||
|
||||
/* Extend metadata LVs only on initial creation */
|
||||
if (seg_is_raid_with_meta(seg) && !lv->le_count) {
|
||||
if (!seg->meta_areas) {
|
||||
@@ -4063,8 +4084,11 @@ static int _lv_extend_layered_lv(struct alloc_handle *ah,
|
||||
lv_set_hidden(seg_metalv(seg, s));
|
||||
}
|
||||
|
||||
seg->area_len += extents / area_multiple;
|
||||
seg->len += extents;
|
||||
if (seg_is_raid(seg))
|
||||
seg->area_len = seg->len;
|
||||
else
|
||||
seg->area_len += extents / area_multiple;
|
||||
|
||||
if (!_setup_lv_size(lv, lv->le_count + extents))
|
||||
return_0;
|
||||
@@ -4171,7 +4195,7 @@ int lv_extend(struct logical_volume *lv,
|
||||
}
|
||||
|
||||
if (!(r = _lv_extend_layered_lv(ah, lv, new_extents - lv->le_count, 0,
|
||||
stripes, stripe_size)))
|
||||
mirrors, stripes, stripe_size)))
|
||||
goto_out;
|
||||
|
||||
/*
|
||||
@@ -5391,6 +5415,17 @@ int lv_resize(struct logical_volume *lv,
|
||||
if (!_lvresize_check(lv, lp))
|
||||
return_0;
|
||||
|
||||
if (seg->reshape_len) {
|
||||
/* Prevent resizing on out-of-sync reshapable raid */
|
||||
if (!lv_raid_in_sync(lv)) {
|
||||
log_error("Can't resize reshaping LV %s.", display_lvname(lv));
|
||||
return 0;
|
||||
}
|
||||
/* Remove any striped raid reshape space for LV resizing */
|
||||
if (!lv_raid_free_reshape_space(lv))
|
||||
return_0;
|
||||
}
|
||||
|
||||
if (lp->use_policies) {
|
||||
lp->extents = 0;
|
||||
lp->sign = SIGN_PLUS;
|
||||
@@ -5902,6 +5937,7 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv,
|
||||
int ask_discard;
|
||||
struct lv_list *lvl;
|
||||
struct seg_list *sl;
|
||||
struct lv_segment *seg = first_seg(lv);
|
||||
int is_last_pool = lv_is_pool(lv);
|
||||
|
||||
vg = lv->vg;
|
||||
@@ -6008,6 +6044,13 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv,
|
||||
is_last_pool = 1;
|
||||
}
|
||||
|
||||
/* Special case removing a striped raid LV with allocated reshape space */
|
||||
if (seg && seg->reshape_len) {
|
||||
if (!(seg->segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_STRIPED)))
|
||||
return_0;
|
||||
lv->le_count = seg->len = seg->area_len = seg_lv(seg, 0)->le_count * seg->area_count;
|
||||
}
|
||||
|
||||
/* Used cache pool, COW or historical LV cannot be activated */
|
||||
if ((!lv_is_cache_pool(lv) || dm_list_empty(&lv->segs_using_this_lv)) &&
|
||||
!lv_is_cow(lv) && !lv_is_historical(lv) &&
|
||||
@@ -6309,7 +6352,6 @@ static int _lv_update_and_reload(struct logical_volume *lv, int origin_only)
|
||||
|
||||
log_very_verbose("Updating logical volume %s on disk(s)%s.",
|
||||
display_lvname(lock_lv), origin_only ? " (origin only)": "");
|
||||
|
||||
if (!vg_write(vg))
|
||||
return_0;
|
||||
|
||||
@@ -6776,8 +6818,8 @@ struct logical_volume *insert_layer_for_lv(struct cmd_context *cmd,
|
||||
return_NULL;
|
||||
|
||||
/* allocate a new linear segment */
|
||||
if (!(mapseg = alloc_lv_segment(segtype, lv_where, 0, layer_lv->le_count,
|
||||
status, 0, NULL, 1, layer_lv->le_count,
|
||||
if (!(mapseg = alloc_lv_segment(segtype, lv_where, 0, layer_lv->le_count, 0,
|
||||
status, 0, NULL, 1, layer_lv->le_count, 0,
|
||||
0, 0, 0, NULL)))
|
||||
return_NULL;
|
||||
|
||||
@@ -6833,8 +6875,8 @@ static int _extend_layer_lv_for_segment(struct logical_volume *layer_lv,
|
||||
|
||||
/* allocate a new segment */
|
||||
if (!(mapseg = alloc_lv_segment(segtype, layer_lv, layer_lv->le_count,
|
||||
seg->area_len, status, 0,
|
||||
NULL, 1, seg->area_len, 0, 0, 0, seg)))
|
||||
seg->area_len, 0, status, 0,
|
||||
NULL, 1, seg->area_len, 0, 0, 0, 0, seg)))
|
||||
return_0;
|
||||
|
||||
/* map the new segment to the original underlying are */
|
||||
|
||||
@@ -236,7 +236,7 @@ static void _check_raid_seg(struct lv_segment *seg, int *error_count)
|
||||
if (!seg->areas)
|
||||
raid_seg_error("zero areas");
|
||||
|
||||
if (seg->extents_copied > seg->area_len)
|
||||
if (seg->extents_copied > seg->len)
|
||||
raid_seg_error_val("extents_copied too large", seg->extents_copied);
|
||||
|
||||
/* Default < 10, change once raid1 split shift and rename SubLVs works! */
|
||||
@@ -475,7 +475,7 @@ int check_lv_segments(struct logical_volume *lv, int complete_vg)
|
||||
struct lv_segment *seg, *seg2;
|
||||
uint32_t le = 0;
|
||||
unsigned seg_count = 0, seg_found, external_lv_found = 0;
|
||||
uint32_t area_multiplier, s;
|
||||
uint32_t data_rimage_count, s;
|
||||
struct seg_list *sl;
|
||||
struct glv_list *glvl;
|
||||
int error_count = 0;
|
||||
@@ -498,13 +498,13 @@ int check_lv_segments(struct logical_volume *lv, int complete_vg)
|
||||
inc_error_count;
|
||||
}
|
||||
|
||||
area_multiplier = segtype_is_striped(seg->segtype) ?
|
||||
seg->area_count : 1;
|
||||
|
||||
if (seg->area_len * area_multiplier != seg->len) {
|
||||
log_error("LV %s: segment %u has inconsistent "
|
||||
"area_len %u",
|
||||
lv->name, seg_count, seg->area_len);
|
||||
data_rimage_count = seg->area_count - seg->segtype->parity_devs;
|
||||
/* FIXME: raid varies seg->area_len? */
|
||||
if (seg->len != seg->area_len &&
|
||||
seg->len != seg->area_len * data_rimage_count) {
|
||||
log_error("LV %s: segment %u with len=%u "
|
||||
" has inconsistent area_len %u",
|
||||
lv->name, seg_count, seg->len, seg->area_len);
|
||||
inc_error_count;
|
||||
}
|
||||
|
||||
@@ -766,10 +766,10 @@ static int _lv_split_segment(struct logical_volume *lv, struct lv_segment *seg,
|
||||
|
||||
/* Clone the existing segment */
|
||||
if (!(split_seg = alloc_lv_segment(seg->segtype,
|
||||
seg->lv, seg->le, seg->len,
|
||||
seg->lv, seg->le, seg->len, seg->reshape_len,
|
||||
seg->status, seg->stripe_size,
|
||||
seg->log_lv,
|
||||
seg->area_count, seg->area_len,
|
||||
seg->area_count, seg->area_len, seg->data_copies,
|
||||
seg->chunk_size, seg->region_size,
|
||||
seg->extents_copied, seg->pvmove_source_seg))) {
|
||||
log_error("Couldn't allocate cloned LV segment.");
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2016 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -137,7 +137,11 @@
|
||||
e.g. to prohibit allocation of a RAID image
|
||||
on a PV already holing an image of the RAID set */
|
||||
#define LOCKD_SANLOCK_LV UINT64_C(0x0080000000000000) /* LV - Internal use only */
|
||||
/* Next unused flag: UINT64_C(0x0100000000000000) */
|
||||
#define LV_RESHAPE_DELTA_DISKS_PLUS UINT64_C(0x0100000000000000) /* LV reshape flag delta disks plus image(s) */
|
||||
#define LV_RESHAPE_DELTA_DISKS_MINUS UINT64_C(0x0200000000000000) /* LV reshape flag delta disks minus image(s) */
|
||||
|
||||
#define LV_REMOVE_AFTER_RESHAPE UINT64_C(0x0400000000000000) /* LV needs to be removed after a shrinking reshape */
|
||||
/* Next unused flag: UINT64_C(0x0800000000000000) */
|
||||
|
||||
/* Format features flags */
|
||||
#define FMT_SEGMENTS 0x00000001U /* Arbitrary segment params? */
|
||||
@@ -446,6 +450,7 @@ struct lv_segment {
|
||||
const struct segment_type *segtype;
|
||||
uint32_t le;
|
||||
uint32_t len;
|
||||
uint32_t reshape_len; /* For RAID: user hidden additional out of place reshaping length off area_len and len */
|
||||
|
||||
uint64_t status;
|
||||
|
||||
@@ -454,6 +459,7 @@ struct lv_segment {
|
||||
uint32_t writebehind; /* For RAID (RAID1 only) */
|
||||
uint32_t min_recovery_rate; /* For RAID */
|
||||
uint32_t max_recovery_rate; /* For RAID */
|
||||
uint32_t data_offset; /* For RAID: data offset in sectors on each data component image */
|
||||
uint32_t area_count;
|
||||
uint32_t area_len;
|
||||
uint32_t chunk_size; /* For snapshots/thin_pool. In sectors. */
|
||||
@@ -464,6 +470,7 @@ struct lv_segment {
|
||||
struct logical_volume *cow;
|
||||
struct dm_list origin_list;
|
||||
uint32_t region_size; /* For mirrors, replicators - in sectors */
|
||||
uint32_t data_copies; /* For RAID: number of data copies (e.g. 3 for RAID 6 */
|
||||
uint32_t extents_copied;/* Number of extents synced for raids/mirrors */
|
||||
struct logical_volume *log_lv;
|
||||
struct lv_segment *pvmove_source_seg;
|
||||
@@ -1205,7 +1212,9 @@ struct logical_volume *first_replicator_dev(const struct logical_volume *lv);
|
||||
int lv_is_raid_with_tracking(const struct logical_volume *lv);
|
||||
uint32_t lv_raid_image_count(const struct logical_volume *lv);
|
||||
int lv_raid_change_image_count(struct logical_volume *lv,
|
||||
uint32_t new_count, struct dm_list *allocate_pvs);
|
||||
uint32_t new_count,
|
||||
uint32_t new_region_size,
|
||||
struct dm_list *allocate_pvs);
|
||||
int lv_raid_split(struct logical_volume *lv, const char *split_name,
|
||||
uint32_t new_count, struct dm_list *splittable_pvs);
|
||||
int lv_raid_split_and_track(struct logical_volume *lv,
|
||||
@@ -1233,6 +1242,8 @@ uint32_t raid_ensure_min_region_size(const struct logical_volume *lv, uint64_t r
|
||||
int lv_raid_change_region_size(struct logical_volume *lv,
|
||||
int yes, int force, uint32_t new_region_size);
|
||||
int lv_raid_in_sync(const struct logical_volume *lv);
|
||||
uint32_t lv_raid_data_copies(const struct segment_type *segtype, uint32_t area_count);
|
||||
int lv_raid_free_reshape_space(const struct logical_volume *lv);
|
||||
/* -- metadata/raid_manip.c */
|
||||
|
||||
/* ++ metadata/cache_manip.c */
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -43,7 +43,8 @@ struct segment_type *get_segtype_from_flag(struct cmd_context *cmd, uint64_t fla
|
||||
{
|
||||
struct segment_type *segtype;
|
||||
|
||||
dm_list_iterate_items(segtype, &cmd->segtypes)
|
||||
/* Iterate backwards to provide aliases; e.g. raid5 instead of raid5_ls */
|
||||
dm_list_iterate_back_items(segtype, &cmd->segtypes)
|
||||
if (flag & segtype->flags)
|
||||
return segtype;
|
||||
|
||||
|
||||
@@ -140,7 +140,11 @@ struct dev_manager;
|
||||
#define segtype_is_any_raid10(segtype) ((segtype)->flags & SEG_RAID10 ? 1 : 0)
|
||||
#define segtype_is_raid10(segtype) ((segtype)->flags & SEG_RAID10 ? 1 : 0)
|
||||
#define segtype_is_raid10_near(segtype) segtype_is_raid10(segtype)
|
||||
/* FIXME: once raid10_offset supported */
|
||||
#define segtype_is_raid10_offset(segtype) 0 // ((segtype)->flags & SEG_RAID10_OFFSET ? 1 : 0)
|
||||
#define segtype_is_raid_with_meta(segtype) (segtype_is_raid(segtype) && !segtype_is_raid0(segtype))
|
||||
#define segtype_is_striped_raid(segtype) (segtype_is_raid(segtype) && !segtype_is_raid1(segtype))
|
||||
#define segtype_is_reshapable_raid(segtype) ((segtype_is_striped_raid(segtype) && !segtype_is_any_raid0(segtype)) || segtype_is_raid10_near(segtype) || segtype_is_raid10_offset(segtype))
|
||||
#define segtype_is_snapshot(segtype) ((segtype)->flags & SEG_SNAPSHOT ? 1 : 0)
|
||||
#define segtype_is_striped(segtype) ((segtype)->flags & SEG_AREAS_STRIPED ? 1 : 0)
|
||||
#define segtype_is_thin(segtype) ((segtype)->flags & (SEG_THIN_POOL|SEG_THIN_VOLUME) ? 1 : 0)
|
||||
@@ -190,6 +194,8 @@ struct dev_manager;
|
||||
#define seg_is_raid10(seg) segtype_is_raid10((seg)->segtype)
|
||||
#define seg_is_raid10_near(seg) segtype_is_raid10_near((seg)->segtype)
|
||||
#define seg_is_raid_with_meta(seg) segtype_is_raid_with_meta((seg)->segtype)
|
||||
#define seg_is_striped_raid(seg) segtype_is_striped_raid((seg)->segtype)
|
||||
#define seg_is_reshapable_raid(seg) segtype_is_reshapable_raid((seg)->segtype)
|
||||
#define seg_is_replicator(seg) ((seg)->segtype->flags & SEG_REPLICATOR ? 1 : 0)
|
||||
#define seg_is_replicator_dev(seg) ((seg)->segtype->flags & SEG_REPLICATOR_DEV ? 1 : 0)
|
||||
#define seg_is_snapshot(seg) segtype_is_snapshot((seg)->segtype)
|
||||
@@ -280,6 +286,7 @@ struct segment_type *init_unknown_segtype(struct cmd_context *cmd,
|
||||
#define RAID_FEATURE_RAID0 (1U << 1) /* version 1.7 */
|
||||
#define RAID_FEATURE_RESHAPING (1U << 2) /* version 1.8 */
|
||||
#define RAID_FEATURE_RAID4 (1U << 3) /* ! version 1.8 or 1.9.0 */
|
||||
#define RAID_FEATURE_RESHAPE (1U << 4) /* version 1.10.1 */
|
||||
|
||||
#ifdef RAID_INTERNAL
|
||||
int init_raid_segtypes(struct cmd_context *cmd, struct segtype_library *seglib);
|
||||
|
||||
@@ -238,8 +238,8 @@ static struct lv_segment *_alloc_snapshot_seg(struct logical_volume *lv)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!(seg = alloc_lv_segment(segtype, lv, 0, lv->le_count, 0, 0,
|
||||
NULL, 0, lv->le_count, 0, 0, 0, NULL))) {
|
||||
if (!(seg = alloc_lv_segment(segtype, lv, 0, lv->le_count, 0, 0, 0,
|
||||
NULL, 0, lv->le_count, 0, 0, 0, 0, NULL))) {
|
||||
log_error("Couldn't allocate new snapshot segment.");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -58,13 +58,13 @@
|
||||
#define r1__r0m _takeover_from_raid1_to_raid0_meta
|
||||
#define r1__r1 _takeover_from_raid1_to_raid1
|
||||
#define r1__r10 _takeover_from_raid1_to_raid10
|
||||
#define r1__r45 _takeover_from_raid1_to_raid45
|
||||
#define r1__r5 _takeover_from_raid1_to_raid5
|
||||
#define r1__str _takeover_from_raid1_to_striped
|
||||
#define r45_lin _takeover_from_raid45_to_linear
|
||||
#define r45_mir _takeover_from_raid45_to_mirrored
|
||||
#define r45_r0 _takeover_from_raid45_to_raid0
|
||||
#define r45_r0m _takeover_from_raid45_to_raid0_meta
|
||||
#define r45_r1 _takeover_from_raid45_to_raid1
|
||||
#define r5_r1 _takeover_from_raid5_to_raid1
|
||||
#define r45_r54 _takeover_from_raid45_to_raid54
|
||||
#define r45_r6 _takeover_from_raid45_to_raid6
|
||||
#define r45_str _takeover_from_raid45_to_striped
|
||||
@@ -109,8 +109,8 @@ static takeover_fn_t _takeover_fns[][11] = {
|
||||
/* mirror */ { X , X , N , mir_r0, mir_r0m, mir_r1, mir_r45, X , mir_r10, X , X },
|
||||
/* raid0 */ { r0__lin, r0__str, r0__mir, N , r0__r0m, r0__r1, r0__r45, r0__r6, r0__r10, X , X },
|
||||
/* raid0_meta */ { r0m_lin, r0m_str, r0m_mir, r0m_r0, N , r0m_r1, r0m_r45, r0m_r6, r0m_r10, X , X },
|
||||
/* raid1 */ { r1__lin, r1__str, r1__mir, r1__r0, r1__r0m, r1__r1, r1__r45, X , r1__r10, X , X },
|
||||
/* raid4/5 */ { r45_lin, r45_str, r45_mir, r45_r0, r45_r0m, r45_r1, r45_r54, r45_r6, X , X , X },
|
||||
/* raid1 */ { r1__lin, r1__str, r1__mir, r1__r0, r1__r0m, r1__r1, r1__r5, X , r1__r10, X , X },
|
||||
/* raid4/5 */ { r45_lin, r45_str, r45_mir, r45_r0, r45_r0m, r5_r1 , r45_r54, r45_r6, X , X , X },
|
||||
/* raid6 */ { X , r6__str, X , r6__r0, r6__r0m, X , r6__r45, X , X , X , X },
|
||||
/* raid10 */ { r10_lin, r10_str, r10_mir, r10_r0, r10_r0m, r10_r1, X , X , X , X , X },
|
||||
/* raid01 */ // { X , r01_str, X , X , X , X , X , X , r01_r10, r01_r01, X },
|
||||
|
||||
165
lib/raid/raid.c
165
lib/raid/raid.c
@@ -137,6 +137,7 @@ static int _raid_text_import(struct lv_segment *seg,
|
||||
} raid_attr_import[] = {
|
||||
{ "region_size", &seg->region_size },
|
||||
{ "stripe_size", &seg->stripe_size },
|
||||
{ "data_copies", &seg->data_copies },
|
||||
{ "writebehind", &seg->writebehind },
|
||||
{ "min_recovery_rate", &seg->min_recovery_rate },
|
||||
{ "max_recovery_rate", &seg->max_recovery_rate },
|
||||
@@ -146,6 +147,10 @@ static int _raid_text_import(struct lv_segment *seg,
|
||||
for (i = 0; i < DM_ARRAY_SIZE(raid_attr_import); i++, aip++) {
|
||||
if (dm_config_has_node(sn, aip->name)) {
|
||||
if (!dm_config_get_uint32(sn, aip->name, aip->var)) {
|
||||
if (!strcmp(aip->name, "data_copies")) {
|
||||
*aip->var = 0;
|
||||
continue;
|
||||
}
|
||||
log_error("Couldn't read '%s' for segment %s of logical volume %s.",
|
||||
aip->name, dm_config_parent_name(sn), seg->lv->name);
|
||||
return 0;
|
||||
@@ -165,6 +170,9 @@ static int _raid_text_import(struct lv_segment *seg,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (seg->data_copies < 2)
|
||||
seg->data_copies = lv_raid_data_copies(seg->segtype, seg->area_count);
|
||||
|
||||
if (seg_is_any_raid0(seg))
|
||||
seg->area_len /= seg->area_count;
|
||||
|
||||
@@ -183,18 +191,31 @@ static int _raid_text_export_raid0(const struct lv_segment *seg, struct formatte
|
||||
|
||||
static int _raid_text_export_raid(const struct lv_segment *seg, struct formatter *f)
|
||||
{
|
||||
outf(f, "device_count = %u", seg->area_count);
|
||||
int raid0 = seg_is_any_raid0(seg);
|
||||
|
||||
if (raid0)
|
||||
outfc(f, (seg->area_count == 1) ? "# linear" : NULL,
|
||||
"stripe_count = %u", seg->area_count);
|
||||
|
||||
else {
|
||||
outf(f, "device_count = %u", seg->area_count);
|
||||
if (seg_is_any_raid10(seg) && seg->data_copies > 0)
|
||||
outf(f, "data_copies = %" PRIu32, seg->data_copies);
|
||||
if (seg->region_size)
|
||||
outf(f, "region_size = %" PRIu32, seg->region_size);
|
||||
}
|
||||
|
||||
if (seg->stripe_size)
|
||||
outf(f, "stripe_size = %" PRIu32, seg->stripe_size);
|
||||
if (seg->region_size)
|
||||
outf(f, "region_size = %" PRIu32, seg->region_size);
|
||||
if (seg->writebehind)
|
||||
outf(f, "writebehind = %" PRIu32, seg->writebehind);
|
||||
if (seg->min_recovery_rate)
|
||||
outf(f, "min_recovery_rate = %" PRIu32, seg->min_recovery_rate);
|
||||
if (seg->max_recovery_rate)
|
||||
outf(f, "max_recovery_rate = %" PRIu32, seg->max_recovery_rate);
|
||||
|
||||
if (!raid0) {
|
||||
if (seg_is_raid1(seg) && seg->writebehind)
|
||||
outf(f, "writebehind = %" PRIu32, seg->writebehind);
|
||||
if (seg->min_recovery_rate)
|
||||
outf(f, "min_recovery_rate = %" PRIu32, seg->min_recovery_rate);
|
||||
if (seg->max_recovery_rate)
|
||||
outf(f, "max_recovery_rate = %" PRIu32, seg->max_recovery_rate);
|
||||
}
|
||||
|
||||
return out_areas(f, seg, "raid");
|
||||
}
|
||||
@@ -216,14 +237,16 @@ static int _raid_add_target_line(struct dev_manager *dm __attribute__((unused)),
|
||||
struct dm_tree_node *node, uint64_t len,
|
||||
uint32_t *pvmove_mirror_count __attribute__((unused)))
|
||||
{
|
||||
int delta_disks = 0, delta_disks_minus = 0, delta_disks_plus = 0, data_offset = 0;
|
||||
uint32_t s;
|
||||
uint64_t flags = 0;
|
||||
uint64_t rebuilds = 0;
|
||||
uint64_t writemostly = 0;
|
||||
struct dm_tree_node_raid_params params;
|
||||
int raid0 = seg_is_any_raid0(seg);
|
||||
uint64_t rebuilds[RAID_BITMAP_SIZE];
|
||||
uint64_t writemostly[RAID_BITMAP_SIZE];
|
||||
struct dm_tree_node_raid_params_v2 params;
|
||||
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
memset(&rebuilds, 0, sizeof(rebuilds));
|
||||
memset(&writemostly, 0, sizeof(writemostly));
|
||||
|
||||
if (!seg->area_count) {
|
||||
log_error(INTERNAL_ERROR "_raid_add_target_line called "
|
||||
@@ -232,64 +255,85 @@ static int _raid_add_target_line(struct dev_manager *dm __attribute__((unused)),
|
||||
}
|
||||
|
||||
/*
|
||||
* 64 device restriction imposed by kernel as well. It is
|
||||
* not strictly a userspace limitation.
|
||||
* 253 device restriction imposed by kernel due to MD and dm-raid bitfield limitation in superblock.
|
||||
* It is not strictly a userspace limitation.
|
||||
*/
|
||||
if (seg->area_count > 64) {
|
||||
log_error("Unable to handle more than 64 devices in a "
|
||||
"single RAID array");
|
||||
if (seg->area_count > DEFAULT_RAID_MAX_IMAGES) {
|
||||
log_error("Unable to handle more than %u devices in a "
|
||||
"single RAID array", DEFAULT_RAID_MAX_IMAGES);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!raid0) {
|
||||
if (!seg_is_any_raid0(seg)) {
|
||||
if (!seg->region_size) {
|
||||
log_error("Missing region size for mirror segment.");
|
||||
log_error("Missing region size for raid segment in %s.",
|
||||
seg_lv(seg, 0)->name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (s = 0; s < seg->area_count; s++)
|
||||
if (seg_lv(seg, s)->status & LV_REBUILD)
|
||||
rebuilds |= 1ULL << s;
|
||||
for (s = 0; s < seg->area_count; s++) {
|
||||
uint64_t status = seg_lv(seg, s)->status;
|
||||
|
||||
for (s = 0; s < seg->area_count; s++)
|
||||
if (seg_lv(seg, s)->status & LV_WRITEMOSTLY)
|
||||
writemostly |= 1ULL << s;
|
||||
if (status & LV_REBUILD)
|
||||
rebuilds[s/64] |= 1ULL << (s%64);
|
||||
|
||||
if (status & LV_RESHAPE_DELTA_DISKS_PLUS) {
|
||||
delta_disks++;
|
||||
delta_disks_plus++;
|
||||
} else if (status & LV_RESHAPE_DELTA_DISKS_MINUS) {
|
||||
delta_disks--;
|
||||
delta_disks_minus++;
|
||||
}
|
||||
|
||||
if (delta_disks_plus && delta_disks_minus) {
|
||||
log_error(INTERNAL_ERROR "Invalid request for delta disks minus and delta disks plus!");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (status & LV_WRITEMOSTLY)
|
||||
writemostly[s/64] |= 1ULL << (s%64);
|
||||
}
|
||||
|
||||
data_offset = seg->data_offset;
|
||||
|
||||
if (mirror_in_sync())
|
||||
flags = DM_NOSYNC;
|
||||
}
|
||||
|
||||
params.raid_type = lvseg_name(seg);
|
||||
params.stripe_size = seg->stripe_size;
|
||||
params.flags = flags;
|
||||
|
||||
if (raid0) {
|
||||
params.mirrors = 1;
|
||||
params.stripes = seg->area_count;
|
||||
} else if (seg->segtype->parity_devs) {
|
||||
if (seg->segtype->parity_devs) {
|
||||
/* RAID 4/5/6 */
|
||||
params.mirrors = 1;
|
||||
params.stripes = seg->area_count - seg->segtype->parity_devs;
|
||||
} else if (seg_is_raid10(seg)) {
|
||||
/* RAID 10 only supports 2 mirrors now */
|
||||
params.mirrors = 2;
|
||||
params.stripes = seg->area_count / 2;
|
||||
} else if (seg_is_any_raid0(seg)) {
|
||||
params.mirrors = 1;
|
||||
params.stripes = seg->area_count;
|
||||
} else if (seg_is_any_raid10(seg)) {
|
||||
params.data_copies = seg->data_copies;
|
||||
params.stripes = seg->area_count;
|
||||
} else {
|
||||
/* RAID 1 */
|
||||
params.mirrors = seg->area_count;
|
||||
params.mirrors = seg->data_copies;
|
||||
params.stripes = 1;
|
||||
params.writebehind = seg->writebehind;
|
||||
memcpy(params.writemostly, writemostly, sizeof(params.writemostly));
|
||||
}
|
||||
|
||||
if (!raid0) {
|
||||
/* RAID 0 doesn't have a bitmap, thus no region_size, rebuilds etc. */
|
||||
if (!seg_is_any_raid0(seg)) {
|
||||
params.region_size = seg->region_size;
|
||||
params.rebuilds = rebuilds;
|
||||
params.writemostly = writemostly;
|
||||
memcpy(params.rebuilds, rebuilds, sizeof(params.rebuilds));
|
||||
params.min_recovery_rate = seg->min_recovery_rate;
|
||||
params.max_recovery_rate = seg->max_recovery_rate;
|
||||
params.delta_disks = delta_disks;
|
||||
params.data_offset = data_offset;
|
||||
}
|
||||
|
||||
if (!dm_tree_node_add_raid_target_with_params(node, len, ¶ms))
|
||||
params.stripe_size = seg->stripe_size;
|
||||
params.flags = flags;
|
||||
|
||||
if (!dm_tree_node_add_raid_target_with_params_v2(node, len, ¶ms))
|
||||
return_0;
|
||||
|
||||
return add_areas_line(dm, seg, node, 0u, seg->area_count);
|
||||
@@ -404,19 +448,32 @@ out:
|
||||
return r;
|
||||
}
|
||||
|
||||
/* Define raid feature based on the tuple(major, minor, patchlevel) of raid target */
|
||||
struct raid_feature {
|
||||
uint32_t maj;
|
||||
uint32_t min;
|
||||
uint32_t patchlevel;
|
||||
unsigned raid_feature;
|
||||
const char *feature;
|
||||
};
|
||||
|
||||
/* Return true if tuple(@maj, @min, @patchlevel) is greater/equal to @*feature members */
|
||||
static int _check_feature(const struct raid_feature *feature, uint32_t maj, uint32_t min, uint32_t patchlevel)
|
||||
{
|
||||
return (maj > feature->maj) ||
|
||||
(maj == feature->maj && min >= feature->min) ||
|
||||
(maj == feature->maj && min == feature->min && patchlevel >= feature->patchlevel);
|
||||
}
|
||||
|
||||
static int _raid_target_present(struct cmd_context *cmd,
|
||||
const struct lv_segment *seg __attribute__((unused)),
|
||||
unsigned *attributes)
|
||||
{
|
||||
/* List of features with their kernel target version */
|
||||
static const struct feature {
|
||||
uint32_t maj;
|
||||
uint32_t min;
|
||||
unsigned raid_feature;
|
||||
const char *feature;
|
||||
} _features[] = {
|
||||
{ 1, 3, RAID_FEATURE_RAID10, SEG_TYPE_NAME_RAID10 },
|
||||
{ 1, 7, RAID_FEATURE_RAID0, SEG_TYPE_NAME_RAID0 },
|
||||
const struct raid_feature _features[] = {
|
||||
{ 1, 3, 0, RAID_FEATURE_RAID10, SEG_TYPE_NAME_RAID10 },
|
||||
{ 1, 7, 0, RAID_FEATURE_RAID0, SEG_TYPE_NAME_RAID0 },
|
||||
{ 1, 10, 1, RAID_FEATURE_RESHAPE, "reshaping" },
|
||||
};
|
||||
|
||||
static int _raid_checked = 0;
|
||||
@@ -438,13 +495,19 @@ static int _raid_target_present(struct cmd_context *cmd,
|
||||
return_0;
|
||||
|
||||
for (i = 0; i < DM_ARRAY_SIZE(_features); ++i)
|
||||
if ((maj > _features[i].maj) ||
|
||||
(maj == _features[i].maj && min >= _features[i].min))
|
||||
if (_check_feature(_features + i, maj, min, patchlevel))
|
||||
_raid_attrs |= _features[i].raid_feature;
|
||||
else
|
||||
log_very_verbose("Target raid does not support %s.",
|
||||
_features[i].feature);
|
||||
|
||||
/*
|
||||
* Seperate check for proper raid4 mapping supported
|
||||
*
|
||||
* If we get more of these range checks, avoid them
|
||||
* altogether by enhancing 'struct raid_feature'
|
||||
* and _check_feature() to handle them.
|
||||
*/
|
||||
if (!(maj == 1 && (min == 8 || (min == 9 && patchlevel == 0))))
|
||||
_raid_attrs |= RAID_FEATURE_RAID4;
|
||||
else
|
||||
|
||||
@@ -69,7 +69,7 @@ FIELD(LVS, lv, BIN, "ActExcl", lvid, 10, lvactiveexclusively, lv_active_exclusiv
|
||||
FIELD(LVS, lv, SNUM, "Maj", major, 0, int32, lv_major, "Persistent major number or -1 if not persistent.", 0)
|
||||
FIELD(LVS, lv, SNUM, "Min", minor, 0, int32, lv_minor, "Persistent minor number or -1 if not persistent.", 0)
|
||||
FIELD(LVS, lv, SIZ, "Rahead", lvid, 0, lvreadahead, lv_read_ahead, "Read ahead setting in current units.", 0)
|
||||
FIELD(LVS, lv, SIZ, "LSize", size, 0, size64, lv_size, "Size of LV in current units.", 0)
|
||||
FIELD(LVS, lv, SIZ, "LSize", lvid, 0, lv_size, lv_size, "Size of LV in current units.", 0)
|
||||
FIELD(LVS, lv, SIZ, "MSize", lvid, 0, lvmetadatasize, lv_metadata_size, "For thin and cache pools, the size of the LV that holds the metadata.", 0)
|
||||
FIELD(LVS, lv, NUM, "#Seg", lvid, 0, lvsegcount, seg_count, "Number of segments in LV.", 0)
|
||||
FIELD(LVS, lv, STR, "Origin", lvid, 0, origin, origin, "For snapshots and thins, the origin device of this LV.", 0)
|
||||
@@ -241,9 +241,16 @@ FIELD(VGS, vg, NUM, "#VMdaCps", cmd, 0, vgmdacopies, vg_mda_copies, "Target numb
|
||||
* SEGS type fields
|
||||
*/
|
||||
FIELD(SEGS, seg, STR, "Type", list, 0, segtype, segtype, "Type of LV segment.", 0)
|
||||
FIELD(SEGS, seg, NUM, "#Str", area_count, 0, uint32, stripes, "Number of stripes or mirror legs.", 0)
|
||||
FIELD(SEGS, seg, NUM, "#Str", list, 0, seg_stripes, stripes, "Number of stripes or mirror/raid1 legs.", 0)
|
||||
FIELD(SEGS, seg, NUM, "#DStr", list, 0, seg_data_stripes, data_stripes, "Number of data stripes or mirror/raid1 legs.", 0)
|
||||
FIELD(SEGS, seg, SIZ, "RSize", list, 0, seg_reshape_len, reshape_len, "Size of out-of-place reshape space in current units.", 0)
|
||||
FIELD(SEGS, seg, NUM, "RSize", list, 0, seg_reshape_len_le, reshape_len_le, "Size of out-of-place reshape space in logical extents.", 0)
|
||||
FIELD(SEGS, seg, NUM, "#Cpy", list, 0, seg_data_copies, data_copies, "Number of data copies.", 0)
|
||||
FIELD(SEGS, seg, NUM, "DOff", list, 0, seg_data_offset, data_offset, "Data offset on each image device.", 0)
|
||||
FIELD(SEGS, seg, NUM, "NOff", list, 0, seg_new_data_offset, new_data_offset, "New data offset after any reshape on each image device.", 0)
|
||||
FIELD(SEGS, seg, NUM, "#Par", list, 0, seg_parity_chunks, parity_chunks, "Number of (rotating) parity chunks.", 0)
|
||||
FIELD(SEGS, seg, SIZ, "Stripe", stripe_size, 0, size32, stripe_size, "For stripes, amount of data placed on one device before switching to the next.", 0)
|
||||
FIELD(SEGS, seg, SIZ, "Region", region_size, 0, size32, region_size, "For mirrors, the unit of data copied when synchronising devices.", 0)
|
||||
FIELD(SEGS, seg, SIZ, "Region", region_size, 0, size32, region_size, "For mirrors/raids, the unit of data per leg when synchronizing devices.", 0)
|
||||
FIELD(SEGS, seg, SIZ, "Chunk", list, 0, chunksize, chunk_size, "For snapshots, the unit of data used when tracking changes.", 0)
|
||||
FIELD(SEGS, seg, NUM, "#Thins", list, 0, thincount, thin_count, "For thin pools, the number of thin volumes in this pool.", 0)
|
||||
FIELD(SEGS, seg, STR, "Discards", list, 0, discards, discards, "For thin pools, how discards are handled.", 0)
|
||||
@@ -276,4 +283,26 @@ FIELD(PVSEGS, pvseg, NUM, "SSize", len, 0, uint32, pvseg_size, "Number of extent
|
||||
/*
|
||||
* End of PVSEGS type fields
|
||||
*/
|
||||
|
||||
/*
|
||||
* MOUNTINFO type fields
|
||||
*/
|
||||
FIELD(MOUNTINFO, mountinfo, STR, "Mounted on", mountpoint, 0, string, mount_point, "Mount point of filesystem on device.", 0)
|
||||
/*
|
||||
* End of MOUNTINFO type fields
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* FSINFO type fields
|
||||
*/
|
||||
FIELD(FSINFO, fsinfo, SIZ, "FSUsed", fs_used, 0, size64, fs_used, "Space used in mounted filesystem on device.", 0)
|
||||
FIELD(FSINFO, fsinfo, SIZ, "FSSize", fs_size, 0, size64, fs_size, "Size of mounted filesystem on device.", 0)
|
||||
FIELD(FSINFO, fsinfo, SIZ, "FSFree", fs_free, 0, size64, fs_free, "Free space in mounted filesystem on device.", 0)
|
||||
FIELD(FSINFO, fsinfo, SIZ, "FSAvail", fs_avail, 0, size64, fs_avail, "Available space in mounted filesystem on device.", 0)
|
||||
/*
|
||||
* End of FSINFO type fields
|
||||
*/
|
||||
|
||||
|
||||
/* *INDENT-ON* */
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2010-2013 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2010-2017 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is part of LVM2.
|
||||
*
|
||||
@@ -446,8 +446,22 @@ GET_VG_NUM_PROPERTY_FN(vg_missing_pv_count, vg_missing_pv_count(vg))
|
||||
/* LVSEG */
|
||||
GET_LVSEG_STR_PROPERTY_FN(segtype, lvseg_segtype_dup(lvseg->lv->vg->vgmem, lvseg))
|
||||
#define _segtype_set prop_not_implemented_set
|
||||
GET_LVSEG_NUM_PROPERTY_FN(data_copies, lvseg->data_copies)
|
||||
#define _data_copies_set prop_not_implemented_set
|
||||
GET_LVSEG_NUM_PROPERTY_FN(reshape_len, lvseg->reshape_len)
|
||||
#define _reshape_len_set prop_not_implemented_set
|
||||
GET_LVSEG_NUM_PROPERTY_FN(reshape_len_le, lvseg->reshape_len)
|
||||
#define _reshape_len_le_set prop_not_implemented_set
|
||||
GET_LVSEG_NUM_PROPERTY_FN(data_offset, lvseg->data_offset)
|
||||
#define _data_offset_set prop_not_implemented_set
|
||||
GET_LVSEG_NUM_PROPERTY_FN(new_data_offset, lvseg->data_offset)
|
||||
#define _new_data_offset_set prop_not_implemented_set
|
||||
GET_LVSEG_NUM_PROPERTY_FN(parity_chunks, lvseg->data_offset)
|
||||
#define _parity_chunks_set prop_not_implemented_set
|
||||
GET_LVSEG_NUM_PROPERTY_FN(stripes, lvseg->area_count)
|
||||
#define _stripes_set prop_not_implemented_set
|
||||
GET_LVSEG_NUM_PROPERTY_FN(data_stripes, lvseg->area_count)
|
||||
#define _data_stripes_set prop_not_implemented_set
|
||||
GET_LVSEG_NUM_PROPERTY_FN(stripe_size, (SECTOR_SIZE * lvseg->stripe_size))
|
||||
#define _stripe_size_set prop_not_implemented_set
|
||||
GET_LVSEG_NUM_PROPERTY_FN(region_size, (SECTOR_SIZE * lvseg->region_size))
|
||||
@@ -506,6 +520,16 @@ GET_PVSEG_NUM_PROPERTY_FN(pvseg_start, pvseg->pe)
|
||||
GET_PVSEG_NUM_PROPERTY_FN(pvseg_size, (SECTOR_SIZE * pvseg->len))
|
||||
#define _pvseg_size_set prop_not_implemented_set
|
||||
|
||||
#define _mount_point_get prop_not_implemented_get
|
||||
#define _mount_point_set prop_not_implemented_set
|
||||
#define _fs_used_get prop_not_implemented_get
|
||||
#define _fs_used_set prop_not_implemented_set
|
||||
#define _fs_size_get prop_not_implemented_get
|
||||
#define _fs_size_set prop_not_implemented_set
|
||||
#define _fs_free_get prop_not_implemented_get
|
||||
#define _fs_free_set prop_not_implemented_set
|
||||
#define _fs_avail_get prop_not_implemented_get
|
||||
#define _fs_avail_set prop_not_implemented_set
|
||||
|
||||
struct lvm_property_type _properties[] = {
|
||||
#include "columns.h"
|
||||
|
||||
@@ -36,6 +36,8 @@ struct lvm_report_object {
|
||||
struct lv_segment *seg;
|
||||
struct pv_segment *pvseg;
|
||||
struct label *label;
|
||||
struct lvm_mountinfo *mountinfo;
|
||||
struct lvm_fsinfo *fsinfo;
|
||||
};
|
||||
|
||||
static uint32_t log_seqnum = 1;
|
||||
@@ -2296,6 +2298,22 @@ static int _size64_disp(struct dm_report *rh __attribute__((unused)),
|
||||
return _field_set_value(field, repstr, sortval);
|
||||
}
|
||||
|
||||
static int _lv_size_disp(struct dm_report *rh, struct dm_pool *mem,
|
||||
struct dm_report_field *field,
|
||||
const void *data, void *private)
|
||||
{
|
||||
const struct logical_volume *lv = (const struct logical_volume *) data;
|
||||
const struct lv_segment *seg = first_seg(lv);
|
||||
uint64_t size = lv->le_count;
|
||||
|
||||
if (!lv_is_raid_image(lv))
|
||||
size -= seg->reshape_len * (seg->area_count > 2 ? seg->area_count : 1);
|
||||
|
||||
size *= lv->vg->extent_size;
|
||||
|
||||
return _size64_disp(rh, mem, field, &size, private);
|
||||
}
|
||||
|
||||
static int _uint32_disp(struct dm_report *rh, struct dm_pool *mem __attribute__((unused)),
|
||||
struct dm_report_field *field,
|
||||
const void *data, void *private __attribute__((unused)))
|
||||
@@ -2412,6 +2430,197 @@ static int _segstartpe_disp(struct dm_report *rh,
|
||||
return dm_report_field_uint32(rh, field, &seg->le);
|
||||
}
|
||||
|
||||
/* Hepler: get used stripes = total stripes minux any to remove after reshape */
|
||||
static int _get_seg_used_stripes(const struct lv_segment *seg)
|
||||
{
|
||||
uint32_t s;
|
||||
uint32_t stripes = seg->area_count;
|
||||
|
||||
for (s = seg->area_count - 1; stripes && s; s--) {
|
||||
if (seg_type(seg, s) == AREA_LV &&
|
||||
seg_lv(seg, s)->status & LV_REMOVE_AFTER_RESHAPE)
|
||||
stripes--;
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
return stripes;
|
||||
}
|
||||
|
||||
static int _seg_stripes_disp(struct dm_report *rh, struct dm_pool *mem,
|
||||
struct dm_report_field *field,
|
||||
const void *data, void *private)
|
||||
{
|
||||
const struct lv_segment *seg = ((const struct lv_segment *) data);
|
||||
|
||||
return dm_report_field_uint32(rh, field, &seg->area_count);
|
||||
}
|
||||
|
||||
/* Report the number of data stripes, which is less than total stripes (e.g. 2 less for raid6) */
|
||||
static int _seg_data_stripes_disp(struct dm_report *rh, struct dm_pool *mem,
|
||||
struct dm_report_field *field,
|
||||
const void *data, void *private)
|
||||
{
|
||||
const struct lv_segment *seg = (const struct lv_segment *) data;
|
||||
uint32_t stripes = _get_seg_used_stripes(seg) - seg->segtype->parity_devs;
|
||||
|
||||
/* FIXME: in case of odd numbers of raid10 stripes */
|
||||
if (seg_is_raid10(seg))
|
||||
stripes /= seg->data_copies;
|
||||
|
||||
return dm_report_field_uint32(rh, field, &stripes);
|
||||
}
|
||||
|
||||
/* Helper: return the top-level, reshapable raid LV in case @seg belongs to an raid rimage LV */
|
||||
static struct logical_volume *_lv_for_raid_image_seg(const struct lv_segment *seg, struct dm_pool *mem)
|
||||
{
|
||||
char *lv_name;
|
||||
|
||||
if (seg_is_reshapable_raid(seg))
|
||||
return seg->lv;
|
||||
|
||||
if (seg->lv &&
|
||||
lv_is_raid_image(seg->lv) && !seg->le &&
|
||||
(lv_name = dm_pool_strdup(mem, seg->lv->name))) {
|
||||
char *p = strchr(lv_name, '_');
|
||||
|
||||
if (p) {
|
||||
/* Handle duplicated sub LVs */
|
||||
if (strstr(p, "_dup_"))
|
||||
p = strchr(p + 5, '_');
|
||||
|
||||
if (p) {
|
||||
struct lv_list *lvl;
|
||||
|
||||
*p = '\0';
|
||||
if ((lvl = find_lv_in_vg(seg->lv->vg, lv_name)) &&
|
||||
seg_is_reshapable_raid(first_seg(lvl->lv)))
|
||||
return lvl->lv;
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Helper: return the top-level raid LV in case it is reshapale for @seg or @seg if it is */
|
||||
static const struct lv_segment *_get_reshapable_seg(const struct lv_segment *seg, struct dm_pool *mem)
|
||||
{
|
||||
return _lv_for_raid_image_seg(seg, mem) ? seg : NULL;
|
||||
}
|
||||
|
||||
/* Display segment reshape length in current units */
|
||||
static int _seg_reshape_len_disp(struct dm_report *rh, struct dm_pool *mem,
|
||||
struct dm_report_field *field,
|
||||
const void *data, void *private)
|
||||
{
|
||||
const struct lv_segment *seg = _get_reshapable_seg((const struct lv_segment *) data, mem);
|
||||
|
||||
if (seg) {
|
||||
uint32_t reshape_len = seg->reshape_len * seg->area_count * seg->lv->vg->extent_size;
|
||||
|
||||
return _size32_disp(rh, mem, field, &reshape_len, private);
|
||||
}
|
||||
|
||||
return _field_set_value(field, "", &GET_TYPE_RESERVED_VALUE(num_undef_64));
|
||||
}
|
||||
|
||||
/* Display segment reshape length of in logical extents */
|
||||
static int _seg_reshape_len_le_disp(struct dm_report *rh, struct dm_pool *mem,
|
||||
struct dm_report_field *field,
|
||||
const void *data, void *private)
|
||||
{
|
||||
const struct lv_segment *seg = _get_reshapable_seg((const struct lv_segment *) data, mem);
|
||||
|
||||
if (seg) {
|
||||
uint32_t reshape_len = seg->reshape_len* seg->area_count;
|
||||
|
||||
return dm_report_field_uint32(rh, field, &reshape_len);
|
||||
}
|
||||
|
||||
return _field_set_value(field, "", &GET_TYPE_RESERVED_VALUE(num_undef_64));
|
||||
}
|
||||
|
||||
/* Display segment data copies (e.g. 3 for raid6) */
|
||||
static int _seg_data_copies_disp(struct dm_report *rh, struct dm_pool *mem,
|
||||
struct dm_report_field *field,
|
||||
const void *data, void *private)
|
||||
{
|
||||
const struct lv_segment *seg = (const struct lv_segment *) data;
|
||||
|
||||
if (seg->data_copies)
|
||||
return dm_report_field_uint32(rh, field, &seg->data_copies);
|
||||
|
||||
return _field_set_value(field, "", &GET_TYPE_RESERVED_VALUE(num_undef_64));
|
||||
}
|
||||
|
||||
/* Helper: display segment data offset/new data offset in sectors */
|
||||
static int _segdata_offset(struct dm_report *rh, struct dm_pool *mem,
|
||||
struct dm_report_field *field,
|
||||
const void *data, void *private, int new_data_offset)
|
||||
{
|
||||
const struct lv_segment *seg = (const struct lv_segment *) data;
|
||||
struct logical_volume *lv;
|
||||
|
||||
if ((lv = _lv_for_raid_image_seg(seg, mem))) {
|
||||
uint64_t data_offset;
|
||||
|
||||
if (lv_raid_data_offset(lv, &data_offset)) {
|
||||
if (new_data_offset && !lv_raid_image_in_sync(seg->lv))
|
||||
data_offset = data_offset ? 0 : seg->reshape_len * lv->vg->extent_size;
|
||||
|
||||
return dm_report_field_uint64(rh, field, &data_offset);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return _field_set_value(field, "", &GET_TYPE_RESERVED_VALUE(num_undef_64));
|
||||
}
|
||||
|
||||
static int _seg_data_offset_disp(struct dm_report *rh, struct dm_pool *mem,
|
||||
struct dm_report_field *field,
|
||||
const void *data, void *private)
|
||||
{
|
||||
return _segdata_offset(rh, mem, field, data, private, 0);
|
||||
}
|
||||
|
||||
static int _seg_new_data_offset_disp(struct dm_report *rh, struct dm_pool *mem,
|
||||
struct dm_report_field *field,
|
||||
const void *data, void *private)
|
||||
{
|
||||
return _segdata_offset(rh, mem, field, data, private, 1);
|
||||
}
|
||||
|
||||
static int _seg_parity_chunks_disp(struct dm_report *rh, struct dm_pool *mem,
|
||||
struct dm_report_field *field,
|
||||
const void *data, void *private)
|
||||
{
|
||||
const struct lv_segment *seg = (const struct lv_segment *) data;
|
||||
uint32_t parity_chunks = seg->segtype->parity_devs ?: seg->data_copies - 1;
|
||||
|
||||
if (parity_chunks) {
|
||||
uint32_t s, resilient_sub_lvs = 0;
|
||||
|
||||
for (s = 0; s < seg->area_count; s++) {
|
||||
if (seg_type(seg, s) == AREA_LV) {
|
||||
struct lv_segment *seg1 = first_seg(seg_lv(seg, s));
|
||||
|
||||
if (seg1->segtype->parity_devs ||
|
||||
seg1->data_copies > 1)
|
||||
resilient_sub_lvs++;
|
||||
}
|
||||
}
|
||||
|
||||
if (resilient_sub_lvs && resilient_sub_lvs == seg->area_count)
|
||||
parity_chunks++;
|
||||
|
||||
return dm_report_field_uint32(rh, field, &parity_chunks);
|
||||
}
|
||||
|
||||
return _field_set_value(field, "", &GET_TYPE_RESERVED_VALUE(num_undef_64));
|
||||
}
|
||||
|
||||
static int _segsize_disp(struct dm_report *rh, struct dm_pool *mem,
|
||||
struct dm_report_field *field,
|
||||
const void *data, void *private)
|
||||
@@ -3584,6 +3793,13 @@ static struct volume_group _unknown_vg = {
|
||||
.tags = DM_LIST_HEAD_INIT(_unknown_vg.tags),
|
||||
};
|
||||
|
||||
static struct lvm_mountinfo _unknown_mountinfo = {
|
||||
.mountpoint = ""
|
||||
};
|
||||
|
||||
static struct lvm_fsinfo _unknown_fsinfo = {
|
||||
};
|
||||
|
||||
static void *_obj_get_vg(void *obj)
|
||||
{
|
||||
struct volume_group *vg = ((struct lvm_report_object *)obj)->vg;
|
||||
@@ -3621,6 +3837,16 @@ static void *_obj_get_pvseg(void *obj)
|
||||
return ((struct lvm_report_object *)obj)->pvseg;
|
||||
}
|
||||
|
||||
static void *_obj_get_mountinfo(void *obj)
|
||||
{
|
||||
return ((struct lvm_report_object *)obj)->mountinfo;
|
||||
}
|
||||
|
||||
static void *_obj_get_fsinfo(void *obj)
|
||||
{
|
||||
return ((struct lvm_report_object *)obj)->fsinfo;
|
||||
}
|
||||
|
||||
static void *_obj_get_devtypes(void *obj)
|
||||
{
|
||||
return obj;
|
||||
@@ -3646,6 +3872,8 @@ static const struct dm_report_object_type _report_types[] = {
|
||||
{ LABEL, "Physical Volume Label", "pv_", _obj_get_label },
|
||||
{ SEGS, "Logical Volume Segment", "seg_", _obj_get_seg },
|
||||
{ PVSEGS, "Physical Volume Segment", "pvseg_", _obj_get_pvseg },
|
||||
{ MOUNTINFO, "Mount Point", "mount_", _obj_get_mountinfo },
|
||||
{ FSINFO, "Filesystem", "fs_", _obj_get_fsinfo },
|
||||
{ 0, "", "", NULL },
|
||||
};
|
||||
|
||||
@@ -3678,6 +3906,8 @@ typedef struct volume_group type_vg;
|
||||
typedef struct lv_segment type_seg;
|
||||
typedef struct pv_segment type_pvseg;
|
||||
typedef struct label type_label;
|
||||
typedef struct lvm_mountinfo type_mountinfo;
|
||||
typedef struct lvm_fsinfo type_fsinfo;
|
||||
|
||||
typedef dev_known_type_t type_devtype;
|
||||
|
||||
@@ -3805,7 +4035,8 @@ int report_object(void *handle, int selection_only, const struct volume_group *v
|
||||
const struct logical_volume *lv, const struct physical_volume *pv,
|
||||
const struct lv_segment *seg, const struct pv_segment *pvseg,
|
||||
const struct lv_with_info_and_seg_status *lvdm,
|
||||
const struct label *label)
|
||||
const struct label *label,
|
||||
const struct lvm_mountinfo *mountinfo, const struct lvm_fsinfo *fsinfo)
|
||||
{
|
||||
struct selection_handle *sh = selection_only ? (struct selection_handle *) handle : NULL;
|
||||
struct device dummy_device = { .dev = 0 };
|
||||
@@ -3816,7 +4047,9 @@ int report_object(void *handle, int selection_only, const struct volume_group *v
|
||||
.pv = (struct physical_volume *) pv,
|
||||
.seg = (struct lv_segment *) seg,
|
||||
.pvseg = (struct pv_segment *) pvseg,
|
||||
.label = (struct label *) (label ? : (pv ? pv_label(pv) : NULL))
|
||||
.label = (struct label *) (label ? : (pv ? pv_label(pv) : NULL)),
|
||||
.mountinfo = (struct lvm_mountinfo *) mountinfo ? : &_unknown_mountinfo,
|
||||
.fsinfo = (struct lvm_fsinfo *) fsinfo ? : &_unknown_fsinfo,
|
||||
};
|
||||
|
||||
/* FIXME workaround for pv_label going through cache; remove once struct
|
||||
|
||||
@@ -32,9 +32,22 @@ typedef enum {
|
||||
SEGS = 256,
|
||||
PVSEGS = 512,
|
||||
LABEL = 1024,
|
||||
DEVTYPES = 2048
|
||||
DEVTYPES = 2048,
|
||||
MOUNTINFO = 4096,
|
||||
FSINFO = 8192
|
||||
} report_type_t;
|
||||
|
||||
struct lvm_mountinfo {
|
||||
const char *mountpoint;
|
||||
};
|
||||
|
||||
struct lvm_fsinfo {
|
||||
uint64_t fs_used;
|
||||
uint64_t fs_size;
|
||||
uint64_t fs_free;
|
||||
uint64_t fs_avail;
|
||||
};
|
||||
|
||||
/*
|
||||
* The "struct selection_handle" is used only for selection
|
||||
* of items that should be processed further (not for display!).
|
||||
@@ -104,7 +117,8 @@ int report_object(void *handle, int selection_only, const struct volume_group *v
|
||||
const struct logical_volume *lv, const struct physical_volume *pv,
|
||||
const struct lv_segment *seg, const struct pv_segment *pvseg,
|
||||
const struct lv_with_info_and_seg_status *lvdm,
|
||||
const struct label *label);
|
||||
const struct label *label,
|
||||
const struct lvm_mountinfo *mountinfo, const struct lvm_fsinfo *fsinfo);
|
||||
int report_devtypes(void *handle);
|
||||
int report_cmdlog(void *handle, const char *type, const char *context,
|
||||
const char *object_type_name, const char *object_name,
|
||||
|
||||
@@ -3,3 +3,4 @@ dm_bit_get_prev
|
||||
dm_stats_update_regions_from_fd
|
||||
dm_bitset_parse_list
|
||||
dm_stats_bind_from_fd
|
||||
dm_tree_node_add_raid_target_with_params_v2
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2015 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2006 Rackable Systems All rights reserved.
|
||||
*
|
||||
* This file is part of the device-mapper userspace tools.
|
||||
@@ -331,6 +331,7 @@ struct dm_status_raid {
|
||||
char *dev_health;
|
||||
/* idle, frozen, resync, recover, check, repair */
|
||||
char *sync_action;
|
||||
uint64_t data_offset; /* RAID out-of-place reshaping */
|
||||
};
|
||||
|
||||
int dm_get_status_raid(struct dm_pool *mem, const char *params,
|
||||
@@ -1738,6 +1739,11 @@ int dm_tree_node_add_raid_target(struct dm_tree_node *node,
|
||||
*/
|
||||
#define DM_CACHE_METADATA_MAX_SECTORS DM_THIN_METADATA_MAX_SECTORS
|
||||
|
||||
/*
|
||||
* Define number of elements in rebuild and writemostly arrays
|
||||
* 'of struct dm_tree_node_raid_params'.
|
||||
*/
|
||||
|
||||
struct dm_tree_node_raid_params {
|
||||
const char *raid_type;
|
||||
|
||||
@@ -1749,25 +1755,70 @@ struct dm_tree_node_raid_params {
|
||||
/*
|
||||
* 'rebuilds' and 'writemostly' are bitfields that signify
|
||||
* which devices in the array are to be rebuilt or marked
|
||||
* writemostly. By choosing a 'uint64_t', we limit ourself
|
||||
* to RAID arrays with 64 devices.
|
||||
* writemostly. The kernel supports up to 253 legs.
|
||||
* We limit ourselves by choosing a lower value
|
||||
* for DEFAULT_RAID{1}_MAX_IMAGES in defaults.h.
|
||||
*/
|
||||
uint64_t rebuilds;
|
||||
uint64_t writemostly;
|
||||
uint32_t writebehind; /* I/Os (kernel default COUNTER_MAX / 2) */
|
||||
uint32_t writebehind; /* I/Os (kernel default COUNTER_MAX / 2) */
|
||||
uint32_t sync_daemon_sleep; /* ms (kernel default = 5sec) */
|
||||
uint32_t max_recovery_rate; /* kB/sec/disk */
|
||||
uint32_t min_recovery_rate; /* kB/sec/disk */
|
||||
uint32_t stripe_cache; /* sectors */
|
||||
|
||||
uint64_t flags; /* [no]sync */
|
||||
uint32_t reserved2;
|
||||
};
|
||||
|
||||
/*
|
||||
* Version 2 of above node raid params struct to keeep API compatibility.
|
||||
*
|
||||
* Extended for more than 64 legs (max 253 in the MD kernel runtime!),
|
||||
* delta_disks for disk add/remove reshaping,
|
||||
* data_offset for out-of-place reshaping
|
||||
* and data_copies for odd number of raid10 legs.
|
||||
*/
|
||||
#define RAID_BITMAP_SIZE 4 /* 4 * 64 bit elements in rebuilds/writemostly arrays */
|
||||
struct dm_tree_node_raid_params_v2 {
|
||||
const char *raid_type;
|
||||
|
||||
uint32_t stripes;
|
||||
uint32_t mirrors;
|
||||
uint32_t region_size;
|
||||
uint32_t stripe_size;
|
||||
|
||||
int delta_disks; /* +/- number of disks to add/remove (reshaping) */
|
||||
int data_offset; /* data offset to set (out-of-place reshaping) */
|
||||
|
||||
/*
|
||||
* 'rebuilds' and 'writemostly' are bitfields that signify
|
||||
* which devices in the array are to be rebuilt or marked
|
||||
* writemostly. The kernel supports up to 253 legs.
|
||||
* We limit ourselvs by choosing a lower value
|
||||
* for DEFAULT_RAID_MAX_IMAGES.
|
||||
*/
|
||||
uint64_t rebuilds[RAID_BITMAP_SIZE];
|
||||
uint64_t writemostly[RAID_BITMAP_SIZE];
|
||||
uint32_t writebehind; /* I/Os (kernel default COUNTER_MAX / 2) */
|
||||
uint32_t data_copies; /* RAID # of data copies */
|
||||
uint32_t sync_daemon_sleep; /* ms (kernel default = 5sec) */
|
||||
uint32_t max_recovery_rate; /* kB/sec/disk */
|
||||
uint32_t min_recovery_rate; /* kB/sec/disk */
|
||||
uint32_t stripe_cache; /* sectors */
|
||||
|
||||
uint64_t flags; /* [no]sync */
|
||||
uint64_t reserved2;
|
||||
};
|
||||
|
||||
int dm_tree_node_add_raid_target_with_params(struct dm_tree_node *node,
|
||||
uint64_t size,
|
||||
const struct dm_tree_node_raid_params *p);
|
||||
|
||||
/* Version 2 API function taking dm_tree_node_raid_params_v2 for aforementioned extensions. */
|
||||
int dm_tree_node_add_raid_target_with_params_v2(struct dm_tree_node *node,
|
||||
uint64_t size,
|
||||
const struct dm_tree_node_raid_params_v2 *p);
|
||||
|
||||
/* Cache feature_flags */
|
||||
#define DM_CACHE_FEATURE_WRITEBACK 0x00000001
|
||||
#define DM_CACHE_FEATURE_WRITETHROUGH 0x00000002
|
||||
|
||||
@@ -205,11 +205,14 @@ struct load_segment {
|
||||
struct dm_tree_node *replicator;/* Replicator-dev */
|
||||
uint64_t rdevice_index; /* Replicator-dev */
|
||||
|
||||
uint64_t rebuilds; /* raid */
|
||||
uint64_t writemostly; /* raid */
|
||||
int delta_disks; /* raid reshape number of disks */
|
||||
int data_offset; /* raid reshape data offset on disk to set */
|
||||
uint64_t rebuilds[RAID_BITMAP_SIZE]; /* raid */
|
||||
uint64_t writemostly[RAID_BITMAP_SIZE]; /* raid */
|
||||
uint32_t writebehind; /* raid */
|
||||
uint32_t max_recovery_rate; /* raid kB/sec/disk */
|
||||
uint32_t min_recovery_rate; /* raid kB/sec/disk */
|
||||
uint32_t data_copies; /* raid10 data_copies */
|
||||
|
||||
struct dm_tree_node *metadata; /* Thin_pool + Cache */
|
||||
struct dm_tree_node *pool; /* Thin_pool, Thin */
|
||||
@@ -2353,16 +2356,21 @@ static int _mirror_emit_segment_line(struct dm_task *dmt, struct load_segment *s
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Is parameter non-zero? */
|
||||
#define PARAM_IS_SET(p) ((p) ? 1 : 0)
|
||||
static int _2_if_value(unsigned p)
|
||||
{
|
||||
return p ? 2 : 0;
|
||||
}
|
||||
|
||||
/* Return number of bits assuming 4 * 64 bit size */
|
||||
static int _get_params_count(uint64_t bits)
|
||||
/* Return number of bits passed in @bits assuming 2 * 64 bit size */
|
||||
static int _get_params_count(uint64_t *bits)
|
||||
{
|
||||
int r = 0;
|
||||
int i = RAID_BITMAP_SIZE;
|
||||
|
||||
r += 2 * hweight32(bits & 0xFFFFFFFF);
|
||||
r += 2 * hweight32(bits >> 32);
|
||||
while (i--) {
|
||||
r += 2 * hweight32(bits[i] & 0xFFFFFFFF);
|
||||
r += 2 * hweight32(bits[i] >> 32);
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
@@ -2373,32 +2381,60 @@ static int _raid_emit_segment_line(struct dm_task *dmt, uint32_t major,
|
||||
size_t paramsize)
|
||||
{
|
||||
uint32_t i;
|
||||
uint32_t area_count = seg->area_count / 2;
|
||||
int param_count = 1; /* mandatory 'chunk size'/'stripe size' arg */
|
||||
int pos = 0;
|
||||
unsigned type = seg->type;
|
||||
unsigned type;
|
||||
|
||||
if (seg->area_count % 2)
|
||||
return 0;
|
||||
|
||||
if ((seg->flags & DM_NOSYNC) || (seg->flags & DM_FORCESYNC))
|
||||
param_count++;
|
||||
|
||||
param_count += 2 * (PARAM_IS_SET(seg->region_size) +
|
||||
PARAM_IS_SET(seg->writebehind) +
|
||||
PARAM_IS_SET(seg->min_recovery_rate) +
|
||||
PARAM_IS_SET(seg->max_recovery_rate));
|
||||
param_count += _2_if_value(seg->data_offset) +
|
||||
_2_if_value(seg->delta_disks) +
|
||||
_2_if_value(seg->region_size) +
|
||||
_2_if_value(seg->writebehind) +
|
||||
_2_if_value(seg->min_recovery_rate) +
|
||||
_2_if_value(seg->max_recovery_rate) +
|
||||
_2_if_value(seg->data_copies > 1);
|
||||
|
||||
/* rebuilds and writemostly are 64 bits */
|
||||
/* rebuilds and writemostly are BITMAP_SIZE * 64 bits */
|
||||
param_count += _get_params_count(seg->rebuilds);
|
||||
param_count += _get_params_count(seg->writemostly);
|
||||
|
||||
if ((type == SEG_RAID1) && seg->stripe_size)
|
||||
log_error("WARNING: Ignoring RAID1 stripe size");
|
||||
if ((seg->type == SEG_RAID1) && seg->stripe_size)
|
||||
log_info("WARNING: Ignoring RAID1 stripe size");
|
||||
|
||||
/* Kernel only expects "raid0", not "raid0_meta" */
|
||||
type = seg->type;
|
||||
if (type == SEG_RAID0_META)
|
||||
type = SEG_RAID0;
|
||||
#if 0
|
||||
/* Kernel only expects "raid10", not "raid10_{far,offset}" */
|
||||
else if (type == SEG_RAID10_FAR ||
|
||||
type == SEG_RAID10_OFFSET) {
|
||||
param_count += 2;
|
||||
type = SEG_RAID10_NEAR;
|
||||
}
|
||||
#endif
|
||||
|
||||
EMIT_PARAMS(pos, "%s %d %u", _dm_segtypes[type].target,
|
||||
EMIT_PARAMS(pos, "%s %d %u",
|
||||
// type == SEG_RAID10_NEAR ? "raid10" : _dm_segtypes[type].target,
|
||||
type == SEG_RAID10 ? "raid10" : _dm_segtypes[type].target,
|
||||
param_count, seg->stripe_size);
|
||||
|
||||
#if 0
|
||||
if (seg->type == SEG_RAID10_FAR)
|
||||
EMIT_PARAMS(pos, " raid10_format far");
|
||||
else if (seg->type == SEG_RAID10_OFFSET)
|
||||
EMIT_PARAMS(pos, " raid10_format offset");
|
||||
#endif
|
||||
|
||||
if (seg->data_copies > 1 && type == SEG_RAID10)
|
||||
EMIT_PARAMS(pos, " raid10_copies %u", seg->data_copies);
|
||||
|
||||
if (seg->flags & DM_NOSYNC)
|
||||
EMIT_PARAMS(pos, " nosync");
|
||||
else if (seg->flags & DM_FORCESYNC)
|
||||
@@ -2407,27 +2443,38 @@ static int _raid_emit_segment_line(struct dm_task *dmt, uint32_t major,
|
||||
if (seg->region_size)
|
||||
EMIT_PARAMS(pos, " region_size %u", seg->region_size);
|
||||
|
||||
for (i = 0; i < (seg->area_count / 2); i++)
|
||||
if (seg->rebuilds & (1ULL << i))
|
||||
/* If seg-data_offset == 1, kernel needs a zero offset to adjust to it */
|
||||
if (seg->data_offset)
|
||||
EMIT_PARAMS(pos, " data_offset %d", seg->data_offset == 1 ? 0 : seg->data_offset);
|
||||
|
||||
if (seg->delta_disks)
|
||||
EMIT_PARAMS(pos, " delta_disks %d", seg->delta_disks);
|
||||
|
||||
for (i = 0; i < area_count; i++)
|
||||
if (seg->rebuilds[i/64] & (1ULL << (i%64)))
|
||||
EMIT_PARAMS(pos, " rebuild %u", i);
|
||||
|
||||
if (seg->min_recovery_rate)
|
||||
EMIT_PARAMS(pos, " min_recovery_rate %u",
|
||||
seg->min_recovery_rate);
|
||||
|
||||
if (seg->max_recovery_rate)
|
||||
EMIT_PARAMS(pos, " max_recovery_rate %u",
|
||||
seg->max_recovery_rate);
|
||||
|
||||
for (i = 0; i < (seg->area_count / 2); i++)
|
||||
if (seg->writemostly & (1ULL << i))
|
||||
for (i = 0; i < area_count; i++)
|
||||
if (seg->writemostly[i/64] & (1ULL << (i%64)))
|
||||
EMIT_PARAMS(pos, " write_mostly %u", i);
|
||||
|
||||
if (seg->writebehind)
|
||||
EMIT_PARAMS(pos, " max_write_behind %u", seg->writebehind);
|
||||
|
||||
/*
|
||||
* Has to be before "min_recovery_rate" or the kernels
|
||||
* check will fail when both set and min > previous max
|
||||
*/
|
||||
if (seg->max_recovery_rate)
|
||||
EMIT_PARAMS(pos, " max_recovery_rate %u",
|
||||
seg->max_recovery_rate);
|
||||
|
||||
if (seg->min_recovery_rate)
|
||||
EMIT_PARAMS(pos, " min_recovery_rate %u",
|
||||
seg->min_recovery_rate);
|
||||
|
||||
/* Print number of metadata/data device pairs */
|
||||
EMIT_PARAMS(pos, " %u", seg->area_count/2);
|
||||
EMIT_PARAMS(pos, " %u", area_count);
|
||||
|
||||
if (_emit_areas_line(dmt, seg, params, paramsize, &pos) <= 0)
|
||||
return_0;
|
||||
@@ -3267,8 +3314,10 @@ int dm_tree_node_add_raid_target_with_params(struct dm_tree_node *node,
|
||||
seg->region_size = p->region_size;
|
||||
seg->stripe_size = p->stripe_size;
|
||||
seg->area_count = 0;
|
||||
seg->rebuilds = p->rebuilds;
|
||||
seg->writemostly = p->writemostly;
|
||||
memset(seg->rebuilds, 0, sizeof(seg->rebuilds));
|
||||
seg->rebuilds[0] = p->rebuilds;
|
||||
memset(seg->writemostly, 0, sizeof(seg->writemostly));
|
||||
seg->writemostly[0] = p->writemostly;
|
||||
seg->writebehind = p->writebehind;
|
||||
seg->min_recovery_rate = p->min_recovery_rate;
|
||||
seg->max_recovery_rate = p->max_recovery_rate;
|
||||
@@ -3296,6 +3345,47 @@ int dm_tree_node_add_raid_target(struct dm_tree_node *node,
|
||||
return dm_tree_node_add_raid_target_with_params(node, size, ¶ms);
|
||||
}
|
||||
|
||||
/*
|
||||
* Version 2 of dm_tree_node_add_raid_target() allowing for:
|
||||
*
|
||||
* - maximum 253 legs in a raid set (MD kernel limitation)
|
||||
* - delta_disks for disk add/remove reshaping
|
||||
* - data_offset for out-of-place reshaping
|
||||
* - data_copies to cope witth odd numbers of raid10 disks
|
||||
*/
|
||||
int dm_tree_node_add_raid_target_with_params_v2(struct dm_tree_node *node,
|
||||
uint64_t size,
|
||||
const struct dm_tree_node_raid_params_v2 *p)
|
||||
{
|
||||
unsigned i;
|
||||
struct load_segment *seg = NULL;
|
||||
|
||||
for (i = 0; i < DM_ARRAY_SIZE(_dm_segtypes) && !seg; ++i)
|
||||
if (!strcmp(p->raid_type, _dm_segtypes[i].target))
|
||||
if (!(seg = _add_segment(node,
|
||||
_dm_segtypes[i].type, size)))
|
||||
return_0;
|
||||
if (!seg) {
|
||||
log_error("Unsupported raid type %s.", p->raid_type);
|
||||
return 0;
|
||||
}
|
||||
|
||||
seg->region_size = p->region_size;
|
||||
seg->stripe_size = p->stripe_size;
|
||||
seg->area_count = 0;
|
||||
seg->delta_disks = p->delta_disks;
|
||||
seg->data_offset = p->data_offset;
|
||||
memcpy(seg->rebuilds, p->rebuilds, sizeof(seg->rebuilds));
|
||||
memcpy(seg->writemostly, p->writemostly, sizeof(seg->writemostly));
|
||||
seg->writebehind = p->writebehind;
|
||||
seg->data_copies = p->data_copies;
|
||||
seg->min_recovery_rate = p->min_recovery_rate;
|
||||
seg->max_recovery_rate = p->max_recovery_rate;
|
||||
seg->flags = p->flags;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int dm_tree_node_add_cache_target(struct dm_tree_node *node,
|
||||
uint64_t size,
|
||||
uint64_t feature_flags, /* DM_CACHE_FEATURE_* */
|
||||
|
||||
@@ -89,6 +89,8 @@ static unsigned _count_fields(const char *p)
|
||||
* <raid_type> <#devs> <health_str> <sync_ratio>
|
||||
* Versions 1.5.0+ (6 fields):
|
||||
* <raid_type> <#devs> <health_str> <sync_ratio> <sync_action> <mismatch_cnt>
|
||||
* Versions 1.9.0+ (7 fields):
|
||||
* <raid_type> <#devs> <health_str> <sync_ratio> <sync_action> <mismatch_cnt> <data_offset>
|
||||
*/
|
||||
int dm_get_status_raid(struct dm_pool *mem, const char *params,
|
||||
struct dm_status_raid **status)
|
||||
@@ -147,6 +149,22 @@ int dm_get_status_raid(struct dm_pool *mem, const char *params,
|
||||
if (sscanf(p, "%s %" PRIu64, s->sync_action, &s->mismatch_count) != 2)
|
||||
goto_bad;
|
||||
|
||||
if (num_fields < 7)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* All pre-1.9.0 version parameters are read. Now we check
|
||||
* for additional 1.9.0+ parameters (i.e. nr_fields at least 7).
|
||||
*
|
||||
* Note that data_offset will be 0 if the
|
||||
* kernel returns a pre-1.9.0 status.
|
||||
*/
|
||||
msg_fields = "<data_offset>";
|
||||
if (!(p = _skip_fields(params, 6))) /* skip pre-1.9.0 params */
|
||||
goto bad;
|
||||
if (sscanf(p, "%" PRIu64, &s->data_offset) != 1)
|
||||
goto bad;
|
||||
|
||||
out:
|
||||
*status = s;
|
||||
|
||||
|
||||
@@ -144,12 +144,12 @@ Makefile: Makefile.in
|
||||
|
||||
man-generator:
|
||||
$(CC) -DMAN_PAGE_GENERATOR -I$(top_builddir)/tools $(CFLAGS) $(top_srcdir)/tools/command.c -o $@
|
||||
- ./man-generator lvmconfig > test.gen
|
||||
- ./man-generator --primary lvmconfig > test.gen
|
||||
if [ ! -s test.gen ] ; then cp genfiles/*.gen $(top_builddir)/man; fi;
|
||||
|
||||
$(MAN8GEN): man-generator
|
||||
echo "Generating $@" ;
|
||||
if [ ! -e $@.gen ]; then ./man-generator $(basename $@) $(top_srcdir)/man/$@.des > $@.gen; fi
|
||||
if [ ! -e $@.gen ]; then ./man-generator --primary $(basename $@) $(top_srcdir)/man/$@.des > $@.gen; ./man-generator --secondary $(basename $@) >> $@.gen; fi
|
||||
if [ -f $(top_srcdir)/man/$@.end ]; then cat $(top_srcdir)/man/$@.end >> $@.gen; fi;
|
||||
cat $(top_srcdir)/man/see_also.end >> $@.gen
|
||||
$(SED) -e "s+#VERSION#+$(LVM_VERSION)+;s+#DEFAULT_SYS_DIR#+$(DEFAULT_SYS_DIR)+;s+#DEFAULT_ARCHIVE_DIR#+$(DEFAULT_ARCHIVE_DIR)+;s+#DEFAULT_BACKUP_DIR#+$(DEFAULT_BACKUP_DIR)+;s+#DEFAULT_PROFILE_DIR#+$(DEFAULT_PROFILE_DIR)+;s+#DEFAULT_CACHE_DIR#+$(DEFAULT_CACHE_DIR)+;s+#DEFAULT_LOCK_DIR#+$(DEFAULT_LOCK_DIR)+;s+#CLVMD_PATH#+@CLVMD_PATH@+;s+#LVM_PATH#+@LVM_PATH@+;s+#DEFAULT_RUN_DIR#+@DEFAULT_RUN_DIR@+;s+#DEFAULT_PID_DIR#+@DEFAULT_PID_DIR@+;s+#SYSTEMD_GENERATOR_DIR#+$(SYSTEMD_GENERATOR_DIR)+;s+#DEFAULT_MANGLING#+$(DEFAULT_MANGLING)+;" $@.gen > $@
|
||||
|
||||
@@ -27,6 +27,39 @@ A command run on a visible LV sometimes operates on a sub LV rather than
|
||||
the specified LV. In other cases, a sub LV must be specified directly on
|
||||
the command line.
|
||||
|
||||
Striped raid types are
|
||||
.B raid0/raid0_meta
|
||||
,
|
||||
.B raid5
|
||||
(an alias for raid5_ls),
|
||||
.B raid6
|
||||
(an alias for raid6_zr) and
|
||||
.B raid10
|
||||
(an alias for raid10_near).
|
||||
|
||||
As opposed to mirroring, raid5 and raid6 stripe data and calculate parity
|
||||
blocks. The parity blocks can be used for data block recovery in case devices
|
||||
fail. A maximum number of one device in a raid5 LV may fail and two in case
|
||||
of raid6. Striped raid types typically rotate the parity blocks for performance
|
||||
reasons thus avoiding contention on a single device. Layouts of raid5 rotating
|
||||
parity blocks can be one of left-asymmetric (raid5_la), left-symmetric (raid5_ls
|
||||
with alias raid5), right-asymmetric (raid5_ra), right-symmetric (raid5_rs) and raid5_n,
|
||||
which doesn't rotate parity blocks. Any \"_n\" layouts allow for conversion between
|
||||
raid levels (raid5_n -> raid6 or raid5_n -> striped/raid0/raid0_meta).
|
||||
raid6 layouts are zero-restart (raid6_zr with alias raid6), next-restart (raid6_nr),
|
||||
next-continue (raid6_nc). Additionally, special raid6 layouts for raid level conversions
|
||||
between raid5 and raid6 are raid6_ls_6, raid6_rs_6, raid6_la_6 and raid6_ra_6. Those
|
||||
correspond to their raid5 counterparts (e.g. raid5_rs can be directly converted to raid6_rs_6
|
||||
and vice-versa).
|
||||
raid10 (an alias for raid10_near) is currently limited to one data copy and even number of
|
||||
sub LVs. This is a mirror group layout thus a single sub LV may fail per mirror group
|
||||
without data loss.
|
||||
Striped raid types support converting the layout, their stripesize
|
||||
and their number of stripes.
|
||||
|
||||
The striped raid types combined with raid1 allow for conversion from linear -> striped/raid0/raid0_meta
|
||||
and vice-versa by e.g. linear <-> raid1 <-> raid5_n (then adding stripes) <-> striped/raid0/raid0_meta.
|
||||
|
||||
Sub LVs can be displayed with the command
|
||||
.B lvs -a
|
||||
|
||||
|
||||
@@ -28,9 +28,9 @@ to improve performance.
|
||||
|
||||
.SS Usage notes
|
||||
|
||||
In the usage section below, \fB--size\fP \fINumber\fP can be replaced
|
||||
in each case with \fB--extents\fP \fINumberExtents\fP. Also see both
|
||||
descriptions the options section.
|
||||
In the usage section below, \fB--size\fP \fISize\fP can be replaced
|
||||
with \fB--extents\fP \fINumber\fP. See both descriptions
|
||||
the options section.
|
||||
|
||||
In the usage section below, \fB--name\fP is omitted from the required
|
||||
options, even though it is typically used. When the name is not
|
||||
|
||||
@@ -1,5 +1,12 @@
|
||||
lvextend extends the size of an LV. This requires allocating logical
|
||||
extents from the VG's free physical extents. A copy\-on\-write snapshot LV
|
||||
can also be extended to provide more space to hold COW blocks. Use
|
||||
\fBlvconvert\fP(8) to change the number of data images in a RAID or
|
||||
extents from the VG's free physical extents. If the extension adds a new
|
||||
LV segment, the new segment will use the existing segment type of the LV.
|
||||
|
||||
Extending a copy\-on\-write snapshot LV adds space for COW blocks.
|
||||
|
||||
Use \fBlvconvert\fP(8) to change the number of data images in a RAID or
|
||||
mirrored LV.
|
||||
|
||||
In the usage section below, \fB--size\fP \fISize\fP can be replaced
|
||||
with \fB--extents\fP \fINumber\fP. See both descriptions
|
||||
the options section.
|
||||
|
||||
@@ -19,6 +19,11 @@ LVM RAID uses both Device Mapper (DM) and Multiple Device (MD) drivers
|
||||
from the Linux kernel. DM is used to create and manage visible LVM
|
||||
devices, and MD is used to place data on physical devices.
|
||||
|
||||
LVM creates hidden LVs (dm devices) layered between the visible LV and
|
||||
physical devices. LVs in that middle layers are called sub LVs.
|
||||
For LVM raid, a sub LV pair to store data and metadata (raid superblock
|
||||
and bitmap) is created per raid image/leg (see lvs command examples below).
|
||||
|
||||
.SH Create a RAID LV
|
||||
|
||||
To create a RAID LV, use lvcreate and specify an LV type.
|
||||
@@ -77,7 +82,7 @@ data that is written to one device before moving to the next.
|
||||
|
||||
Also called mirroring, raid1 uses multiple devices to duplicate LV data.
|
||||
The LV data remains available if all but one of the devices fail.
|
||||
The minimum number of devices required is 2.
|
||||
The minimum number of devices (i.e. sub LV pairs) required is 2.
|
||||
|
||||
.B lvcreate \-\-type raid1
|
||||
[\fB\-\-mirrors\fP \fINumber\fP]
|
||||
@@ -98,8 +103,8 @@ original and one mirror image.
|
||||
|
||||
\&
|
||||
|
||||
raid4 is a form of striping that uses an extra device dedicated to storing
|
||||
parity blocks. The LV data remains available if one device fails. The
|
||||
raid4 is a form of striping that uses an extra, first device dedicated to
|
||||
storing parity blocks. The LV data remains available if one device fails. The
|
||||
parity is used to recalculate data that is lost from a single device. The
|
||||
minimum number of devices required is 3.
|
||||
|
||||
@@ -131,10 +136,10 @@ stored on the same device.
|
||||
\&
|
||||
|
||||
raid5 is a form of striping that uses an extra device for storing parity
|
||||
blocks. LV data and parity blocks are stored on each device. The LV data
|
||||
remains available if one device fails. The parity is used to recalculate
|
||||
data that is lost from a single device. The minimum number of devices
|
||||
required is 3.
|
||||
blocks. LV data and parity blocks are stored on each device, typically in
|
||||
a rotating pattern for performance reasons. The LV data remains available
|
||||
if one device fails. The parity is used to recalculate data that is lost
|
||||
from a single device. The minimum number of devices required is 3.
|
||||
|
||||
.B lvcreate \-\-type raid5
|
||||
[\fB\-\-stripes\fP \fINumber\fP \fB\-\-stripesize\fP \fISize\fP]
|
||||
@@ -167,7 +172,8 @@ parity 0 with data restart.) See \fBRAID5 variants\fP below.
|
||||
\&
|
||||
|
||||
raid6 is a form of striping like raid5, but uses two extra devices for
|
||||
parity blocks. LV data and parity blocks are stored on each device. The
|
||||
parity blocks. LV data and parity blocks are stored on each device, typically
|
||||
in a rotating pattern for perfomramce reasons. The
|
||||
LV data remains available if up to two devices fail. The parity is used
|
||||
to recalculate data that is lost from one or two devices. The minimum
|
||||
number of devices required is 5.
|
||||
@@ -919,7 +925,6 @@ Convert the linear LV to raid1 with three images
|
||||
# lvconvert --type raid1 --mirrors 2 vg/my_lv
|
||||
.fi
|
||||
|
||||
.ig
|
||||
4. Converting an LV from \fBstriped\fP (with 4 stripes) to \fBraid6_nc\fP.
|
||||
|
||||
.nf
|
||||
@@ -927,9 +932,9 @@ Start with a striped LV:
|
||||
|
||||
# lvcreate --stripes 4 -L64M -n my_lv vg
|
||||
|
||||
Convert the striped LV to raid6_nc:
|
||||
Convert the striped LV to raid6_n_6:
|
||||
|
||||
# lvconvert --type raid6_nc vg/my_lv
|
||||
# lvconvert --type raid6 vg/my_lv
|
||||
|
||||
# lvs -a -o lv_name,segtype,sync_percent,data_copies
|
||||
LV Type Cpy%Sync #Cpy
|
||||
@@ -954,14 +959,12 @@ existing stripe devices. It then creates 2 additional MetaLV/DataLV pairs
|
||||
|
||||
If rotating data/parity is required, such as with raid6_nr, it must be
|
||||
done by reshaping (see below).
|
||||
..
|
||||
|
||||
|
||||
.SH RAID Reshaping
|
||||
|
||||
RAID reshaping is changing attributes of a RAID LV while keeping the same
|
||||
RAID level, i.e. changes that do not involve changing the number of
|
||||
devices. This includes changing RAID layout, stripe size, or number of
|
||||
RAID level. This includes changing RAID layout, stripe size, or number of
|
||||
stripes.
|
||||
|
||||
When changing the RAID layout or stripe size, no new SubLVs (MetaLVs or
|
||||
@@ -975,15 +978,12 @@ partially updated and corrupted. Instead, an existing stripe is quiesced,
|
||||
read, changed in layout, and the new stripe written to free space. Once
|
||||
that is done, the new stripe is unquiesced and used.)
|
||||
|
||||
(The reshaping features are planned for a future release.)
|
||||
|
||||
.ig
|
||||
.SS Examples
|
||||
|
||||
1. Converting raid6_n_6 to raid6_nr with rotating data/parity.
|
||||
|
||||
This conversion naturally follows a previous conversion from striped to
|
||||
raid6_n_6 (shown above). It completes the transition to a more
|
||||
This conversion naturally follows a previous conversion from striped/raid0
|
||||
to raid6_n_6 (shown above). It completes the transition to a more
|
||||
traditional RAID6.
|
||||
|
||||
.nf
|
||||
@@ -1029,15 +1029,13 @@ traditional RAID6.
|
||||
The DataLVs are larger (additional segment in each) which provides space
|
||||
for out-of-place reshaping. The result is:
|
||||
|
||||
FIXME: did the lv name change from my_lv to r?
|
||||
.br
|
||||
FIXME: should we change device names in the example to sda,sdb,sdc?
|
||||
.br
|
||||
FIXME: include -o devices or seg_pe_ranges above also?
|
||||
|
||||
.nf
|
||||
# lvs -a -o lv_name,segtype,seg_pe_ranges,dataoffset
|
||||
LV Type PE Ranges data
|
||||
LV Type PE Ranges Doff
|
||||
r raid6_nr r_rimage_0:0-32 \\
|
||||
r_rimage_1:0-32 \\
|
||||
r_rimage_2:0-32 \\
|
||||
@@ -1093,19 +1091,15 @@ RAID5 right asymmetric
|
||||
\[bu]
|
||||
Rotating parity 0 with data continuation
|
||||
|
||||
.ig
|
||||
raid5_n
|
||||
.br
|
||||
\[bu]
|
||||
RAID5 striping
|
||||
RAID5 parity n
|
||||
.br
|
||||
\[bu]
|
||||
Same layout as raid4 with a dedicated parity N with striped data.
|
||||
.br
|
||||
Dedicated parity device n used for striped/raid0 conversions
|
||||
\[bu]
|
||||
Used for
|
||||
.B RAID Takeover
|
||||
..
|
||||
Used for RAID Takeover
|
||||
|
||||
.SH RAID6 Variants
|
||||
|
||||
@@ -1144,7 +1138,24 @@ RAID6 N continue
|
||||
\[bu]
|
||||
Rotating parity N with data continuation
|
||||
|
||||
.ig
|
||||
raid6_n_6
|
||||
.br
|
||||
\[bu]
|
||||
RAID6 last parity devices
|
||||
.br
|
||||
\[bu]
|
||||
Dedicated last parity devices used for striped/raid0 conversions
|
||||
\[bu]
|
||||
Used for RAID Takeover
|
||||
|
||||
raid6_{ls,rs,la,ra}_6
|
||||
.br
|
||||
\[bu]
|
||||
RAID6 last parity device
|
||||
.br
|
||||
\[bu]
|
||||
Dedicated last parity device used for conversions from/to raid5_{ls,rs,la,ra}
|
||||
|
||||
raid6_n_6
|
||||
.br
|
||||
\[bu]
|
||||
@@ -1154,8 +1165,7 @@ RAID6 N continue
|
||||
Fixed P-Syndrome N-1 and Q-Syndrome N with striped data
|
||||
.br
|
||||
\[bu]
|
||||
Used for
|
||||
.B RAID Takeover
|
||||
Used for RAID Takeover
|
||||
|
||||
raid6_ls_6
|
||||
.br
|
||||
@@ -1166,8 +1176,7 @@ RAID6 N continue
|
||||
Same as raid5_ls for N-1 disks with fixed Q-Syndrome N
|
||||
.br
|
||||
\[bu]
|
||||
Used for
|
||||
.B RAID Takeover
|
||||
Used for RAID Takeover
|
||||
|
||||
raid6_la_6
|
||||
.br
|
||||
@@ -1178,8 +1187,7 @@ RAID6 N continue
|
||||
Same as raid5_la for N-1 disks with fixed Q-Syndrome N
|
||||
.br
|
||||
\[bu]
|
||||
Used for
|
||||
.B RAID Takeover
|
||||
Used forRAID Takeover
|
||||
|
||||
raid6_rs_6
|
||||
.br
|
||||
@@ -1190,8 +1198,7 @@ RAID6 N continue
|
||||
Same as raid5_rs for N-1 disks with fixed Q-Syndrome N
|
||||
.br
|
||||
\[bu]
|
||||
Used for
|
||||
.B RAID Takeover
|
||||
Used for RAID Takeover
|
||||
|
||||
raid6_ra_6
|
||||
.br
|
||||
@@ -1202,9 +1209,7 @@ RAID6 N continue
|
||||
Same as raid5_ra for N-1 disks with fixed Q-Syndrome N
|
||||
.br
|
||||
\[bu]
|
||||
Used for
|
||||
.B RAID Takeover
|
||||
..
|
||||
Used for RAID Takeover
|
||||
|
||||
|
||||
.ig
|
||||
|
||||
@@ -12,3 +12,8 @@ system.
|
||||
Sizes will be rounded if necessary. For example, the LV size must be an
|
||||
exact number of extents, and the size of a striped segment must be a
|
||||
multiple of the number of stripes.
|
||||
|
||||
In the usage section below, \fB--size\fP \fISize\fP can be replaced
|
||||
with \fB--extents\fP \fINumber\fP. See both descriptions
|
||||
the options section.
|
||||
|
||||
|
||||
@@ -1,2 +1,7 @@
|
||||
lvresize resizes an LV in the same way as lvextend and lvreduce. See
|
||||
\fBlvextend\fP(8) and \fBlvreduce\fP(8) for more information.
|
||||
|
||||
In the usage section below, \fB--size\fP \fISize\fP can be replaced
|
||||
with \fB--extents\fP \fINumber\fP. See both descriptions
|
||||
the options section.
|
||||
|
||||
|
||||
@@ -56,6 +56,7 @@ Inconsistencies are detected by initiating a "check" on a RAID logical volume.
|
||||
(The scrubbing operations, "check" and "repair", can be performed on a RAID
|
||||
logical volume via the 'lvchange' command.) (w)ritemostly signifies the
|
||||
devices in a RAID 1 logical volume that have been marked write-mostly.
|
||||
(R)emove after reshape signifies freed striped raid images to be removed.
|
||||
.IP
|
||||
Related to Thin pool Logical Volumes: (F)ailed, out of (D)ata space,
|
||||
(M)etadata read only.
|
||||
|
||||
@@ -198,6 +198,9 @@ class TestDbusService(unittest.TestCase):
|
||||
self.objs[MANAGER_INT][0].Manager.PvCreate(
|
||||
dbus.String(device), dbus.Int32(g_tmo), EOD)
|
||||
)
|
||||
|
||||
self._validate_lookup(device, pv_path)
|
||||
|
||||
self.assertTrue(pv_path is not None and len(pv_path) > 0)
|
||||
return pv_path
|
||||
|
||||
@@ -229,6 +232,7 @@ class TestDbusService(unittest.TestCase):
|
||||
dbus.Int32(g_tmo),
|
||||
EOD))
|
||||
|
||||
self._validate_lookup(vg_name, vg_path)
|
||||
self.assertTrue(vg_path is not None and len(vg_path) > 0)
|
||||
return ClientProxy(self.bus, vg_path, interfaces=(VG_INT, ))
|
||||
|
||||
@@ -263,6 +267,9 @@ class TestDbusService(unittest.TestCase):
|
||||
|
||||
def _create_raid5_thin_pool(self, vg=None):
|
||||
|
||||
meta_name = "meta_r5"
|
||||
data_name = "data_r5"
|
||||
|
||||
if not vg:
|
||||
pv_paths = []
|
||||
for pp in self.objs[PV_INT]:
|
||||
@@ -272,7 +279,7 @@ class TestDbusService(unittest.TestCase):
|
||||
|
||||
lv_meta_path = self.handle_return(
|
||||
vg.LvCreateRaid(
|
||||
dbus.String("meta_r5"),
|
||||
dbus.String(meta_name),
|
||||
dbus.String("raid5"),
|
||||
dbus.UInt64(mib(4)),
|
||||
dbus.UInt32(0),
|
||||
@@ -280,10 +287,11 @@ class TestDbusService(unittest.TestCase):
|
||||
dbus.Int32(g_tmo),
|
||||
EOD)
|
||||
)
|
||||
self._validate_lookup("%s/%s" % (vg.Name, meta_name), lv_meta_path)
|
||||
|
||||
lv_data_path = self.handle_return(
|
||||
vg.LvCreateRaid(
|
||||
dbus.String("data_r5"),
|
||||
dbus.String(data_name),
|
||||
dbus.String("raid5"),
|
||||
dbus.UInt64(mib(16)),
|
||||
dbus.UInt32(0),
|
||||
@@ -292,6 +300,8 @@ class TestDbusService(unittest.TestCase):
|
||||
EOD)
|
||||
)
|
||||
|
||||
self._validate_lookup("%s/%s" % (vg.Name, data_name), lv_data_path)
|
||||
|
||||
thin_pool_path = self.handle_return(
|
||||
vg.CreateThinPool(
|
||||
dbus.ObjectPath(lv_meta_path),
|
||||
@@ -339,7 +349,13 @@ class TestDbusService(unittest.TestCase):
|
||||
self.assertTrue(cached_thin_pool_object.ThinPool.MetaDataLv != '/')
|
||||
|
||||
def _lookup(self, lvm_id):
|
||||
return self.objs[MANAGER_INT][0].Manager.LookUpByLvmId(lvm_id)
|
||||
return self.objs[MANAGER_INT][0].\
|
||||
Manager.LookUpByLvmId(dbus.String(lvm_id))
|
||||
|
||||
def _validate_lookup(self, lvm_name, object_path):
|
||||
t = self._lookup(lvm_name)
|
||||
self.assertTrue(
|
||||
object_path == t, "%s != %s for %s" % (object_path, t, lvm_name))
|
||||
|
||||
def test_lookup_by_lvm_id(self):
|
||||
# For the moment lets just lookup what we know about which is PVs
|
||||
@@ -392,10 +408,8 @@ class TestDbusService(unittest.TestCase):
|
||||
def test_vg_rename(self):
|
||||
vg = self._vg_create().Vg
|
||||
|
||||
mgr = self.objs[MANAGER_INT][0].Manager
|
||||
|
||||
# Do a vg lookup
|
||||
path = mgr.LookUpByLvmId(dbus.String(vg.Name))
|
||||
path = self._lookup(vg.Name)
|
||||
|
||||
vg_name_start = vg.Name
|
||||
|
||||
@@ -406,7 +420,7 @@ class TestDbusService(unittest.TestCase):
|
||||
for i in range(0, 5):
|
||||
lv_t = self._create_lv(size=mib(4), vg=vg)
|
||||
full_name = "%s/%s" % (vg_name_start, lv_t.LvCommon.Name)
|
||||
lv_path = mgr.LookUpByLvmId(dbus.String(full_name))
|
||||
lv_path = self._lookup(full_name)
|
||||
self.assertTrue(lv_path == lv_t.object_path)
|
||||
|
||||
new_name = 'renamed_' + vg.Name
|
||||
@@ -417,7 +431,7 @@ class TestDbusService(unittest.TestCase):
|
||||
self._check_consistency()
|
||||
|
||||
# Do a vg lookup
|
||||
path = mgr.LookUpByLvmId(dbus.String(new_name))
|
||||
path = self._lookup(new_name)
|
||||
self.assertTrue(path != '/', "%s" % (path))
|
||||
self.assertTrue(prev_path == path, "%s != %s" % (prev_path, path))
|
||||
|
||||
@@ -435,14 +449,12 @@ class TestDbusService(unittest.TestCase):
|
||||
lv_proxy.Vg == vg.object_path, "%s != %s" %
|
||||
(lv_proxy.Vg, vg.object_path))
|
||||
full_name = "%s/%s" % (new_name, lv_proxy.Name)
|
||||
lv_path = mgr.LookUpByLvmId(dbus.String(full_name))
|
||||
lv_path = self._lookup(full_name)
|
||||
self.assertTrue(
|
||||
lv_path == lv_proxy.object_path, "%s != %s" %
|
||||
(lv_path, lv_proxy.object_path))
|
||||
|
||||
def _verify_hidden_lookups(self, lv_common_object, vgname):
|
||||
mgr = self.objs[MANAGER_INT][0].Manager
|
||||
|
||||
hidden_lv_paths = lv_common_object.HiddenLvs
|
||||
|
||||
for h in hidden_lv_paths:
|
||||
@@ -454,7 +466,7 @@ class TestDbusService(unittest.TestCase):
|
||||
|
||||
full_name = "%s/%s" % (vgname, h_lv.Name)
|
||||
# print("Hidden check %s" % (full_name))
|
||||
lookup_path = mgr.LookUpByLvmId(dbus.String(full_name))
|
||||
lookup_path = self._lookup(full_name)
|
||||
self.assertTrue(lookup_path != '/')
|
||||
self.assertTrue(lookup_path == h_lv.object_path)
|
||||
|
||||
@@ -462,7 +474,7 @@ class TestDbusService(unittest.TestCase):
|
||||
full_name = "%s/%s" % (vgname, h_lv.Name[1:-1])
|
||||
# print("Hidden check %s" % (full_name))
|
||||
|
||||
lookup_path = mgr.LookUpByLvmId(dbus.String(full_name))
|
||||
lookup_path = self._lookup(full_name)
|
||||
self.assertTrue(lookup_path != '/')
|
||||
self.assertTrue(lookup_path == h_lv.object_path)
|
||||
|
||||
@@ -471,7 +483,6 @@ class TestDbusService(unittest.TestCase):
|
||||
(vg, thin_pool) = self._create_raid5_thin_pool()
|
||||
|
||||
vg_name_start = vg.Name
|
||||
mgr = self.objs[MANAGER_INT][0].Manager
|
||||
|
||||
# noinspection PyTypeChecker
|
||||
self._verify_hidden_lookups(thin_pool.LvCommon, vg_name_start)
|
||||
@@ -486,11 +497,14 @@ class TestDbusService(unittest.TestCase):
|
||||
dbus.Int32(g_tmo),
|
||||
EOD))
|
||||
|
||||
self._validate_lookup(
|
||||
"%s/%s" % (vg_name_start, lv_name), thin_lv_path)
|
||||
|
||||
self.assertTrue(thin_lv_path != '/')
|
||||
|
||||
full_name = "%s/%s" % (vg_name_start, lv_name)
|
||||
|
||||
lookup_lv_path = mgr.LookUpByLvmId(dbus.String(full_name))
|
||||
lookup_lv_path = self._lookup(full_name)
|
||||
self.assertTrue(
|
||||
thin_lv_path == lookup_lv_path,
|
||||
"%s != %s" % (thin_lv_path, lookup_lv_path))
|
||||
@@ -518,7 +532,7 @@ class TestDbusService(unittest.TestCase):
|
||||
(lv_proxy.Vg, vg.object_path))
|
||||
full_name = "%s/%s" % (new_name, lv_proxy.Name)
|
||||
# print('Full Name %s' % (full_name))
|
||||
lv_path = mgr.LookUpByLvmId(dbus.String(full_name))
|
||||
lv_path = self._lookup(full_name)
|
||||
self.assertTrue(
|
||||
lv_path == lv_proxy.object_path, "%s != %s" %
|
||||
(lv_path, lv_proxy.object_path))
|
||||
@@ -543,75 +557,88 @@ class TestDbusService(unittest.TestCase):
|
||||
return lv
|
||||
|
||||
def test_lv_create(self):
|
||||
lv_name = lv_n()
|
||||
vg = self._vg_create().Vg
|
||||
self._test_lv_create(
|
||||
lv = self._test_lv_create(
|
||||
vg.LvCreate,
|
||||
(dbus.String(lv_n()), dbus.UInt64(mib(4)),
|
||||
(dbus.String(lv_name), dbus.UInt64(mib(4)),
|
||||
dbus.Array([], signature='(ott)'), dbus.Int32(g_tmo),
|
||||
EOD), vg, LV_BASE_INT)
|
||||
self._validate_lookup("%s/%s" % (vg.Name, lv_name), lv.object_path)
|
||||
|
||||
def test_lv_create_job(self):
|
||||
|
||||
lv_name = lv_n()
|
||||
vg = self._vg_create().Vg
|
||||
(object_path, job_path) = vg.LvCreate(
|
||||
dbus.String(lv_n()), dbus.UInt64(mib(4)),
|
||||
dbus.String(lv_name), dbus.UInt64(mib(4)),
|
||||
dbus.Array([], signature='(ott)'), dbus.Int32(0),
|
||||
EOD)
|
||||
|
||||
self.assertTrue(object_path == '/')
|
||||
self.assertTrue(job_path != '/')
|
||||
object_path = self._wait_for_job(job_path)
|
||||
|
||||
self._validate_lookup("%s/%s" % (vg.Name, lv_name), object_path)
|
||||
self.assertTrue(object_path != '/')
|
||||
|
||||
def test_lv_create_linear(self):
|
||||
|
||||
lv_name = lv_n()
|
||||
vg = self._vg_create().Vg
|
||||
self._test_lv_create(
|
||||
lv = self._test_lv_create(
|
||||
vg.LvCreateLinear,
|
||||
(dbus.String(lv_n()), dbus.UInt64(mib(4)), dbus.Boolean(False),
|
||||
(dbus.String(lv_name), dbus.UInt64(mib(4)), dbus.Boolean(False),
|
||||
dbus.Int32(g_tmo), EOD),
|
||||
vg, LV_BASE_INT)
|
||||
self._validate_lookup("%s/%s" % (vg.Name, lv_name), lv.object_path)
|
||||
|
||||
def test_lv_create_striped(self):
|
||||
lv_name = lv_n()
|
||||
pv_paths = []
|
||||
for pp in self.objs[PV_INT]:
|
||||
pv_paths.append(pp.object_path)
|
||||
|
||||
vg = self._vg_create(pv_paths).Vg
|
||||
self._test_lv_create(
|
||||
lv = self._test_lv_create(
|
||||
vg.LvCreateStriped,
|
||||
(dbus.String(lv_n()), dbus.UInt64(mib(4)),
|
||||
(dbus.String(lv_name), dbus.UInt64(mib(4)),
|
||||
dbus.UInt32(2), dbus.UInt32(8), dbus.Boolean(False),
|
||||
dbus.Int32(g_tmo), EOD),
|
||||
vg, LV_BASE_INT)
|
||||
self._validate_lookup("%s/%s" % (vg.Name, lv_name), lv.object_path)
|
||||
|
||||
def test_lv_create_mirror(self):
|
||||
lv_name = lv_n()
|
||||
pv_paths = []
|
||||
for pp in self.objs[PV_INT]:
|
||||
pv_paths.append(pp.object_path)
|
||||
|
||||
vg = self._vg_create(pv_paths).Vg
|
||||
self._test_lv_create(
|
||||
lv = self._test_lv_create(
|
||||
vg.LvCreateMirror,
|
||||
(dbus.String(lv_n()), dbus.UInt64(mib(4)), dbus.UInt32(2),
|
||||
(dbus.String(lv_name), dbus.UInt64(mib(4)), dbus.UInt32(2),
|
||||
dbus.Int32(g_tmo), EOD), vg, LV_BASE_INT)
|
||||
self._validate_lookup("%s/%s" % (vg.Name, lv_name), lv.object_path)
|
||||
|
||||
def test_lv_create_raid(self):
|
||||
lv_name = lv_n()
|
||||
pv_paths = []
|
||||
for pp in self.objs[PV_INT]:
|
||||
pv_paths.append(pp.object_path)
|
||||
|
||||
vg = self._vg_create(pv_paths).Vg
|
||||
self._test_lv_create(
|
||||
lv = self._test_lv_create(
|
||||
vg.LvCreateRaid,
|
||||
(dbus.String(lv_n()), dbus.String('raid5'), dbus.UInt64(mib(16)),
|
||||
(dbus.String(lv_name), dbus.String('raid5'), dbus.UInt64(mib(16)),
|
||||
dbus.UInt32(2), dbus.UInt32(8), dbus.Int32(g_tmo),
|
||||
EOD),
|
||||
vg,
|
||||
LV_BASE_INT)
|
||||
self._validate_lookup("%s/%s" % (vg.Name, lv_name), lv.object_path)
|
||||
|
||||
def _create_lv(self, thinpool=False, size=None, vg=None):
|
||||
|
||||
lv_name = lv_n()
|
||||
interfaces = list(LV_BASE_INT)
|
||||
|
||||
if thinpool:
|
||||
@@ -627,12 +654,15 @@ class TestDbusService(unittest.TestCase):
|
||||
if size is None:
|
||||
size = mib(4)
|
||||
|
||||
return self._test_lv_create(
|
||||
lv = self._test_lv_create(
|
||||
vg.LvCreateLinear,
|
||||
(dbus.String(lv_n()), dbus.UInt64(size),
|
||||
(dbus.String(lv_name), dbus.UInt64(size),
|
||||
dbus.Boolean(thinpool), dbus.Int32(g_tmo), EOD),
|
||||
vg, interfaces)
|
||||
|
||||
self._validate_lookup("%s/%s" % (vg.Name, lv_name), lv.object_path)
|
||||
return lv
|
||||
|
||||
def test_lv_create_rounding(self):
|
||||
self._create_lv(size=(mib(2) + 13))
|
||||
|
||||
@@ -643,7 +673,7 @@ class TestDbusService(unittest.TestCase):
|
||||
# Rename a regular LV
|
||||
lv = self._create_lv()
|
||||
|
||||
path = self.objs[MANAGER_INT][0].Manager.LookUpByLvmId(lv.LvCommon.Name)
|
||||
path = self._lookup(lv.LvCommon.Name)
|
||||
prev_path = path
|
||||
|
||||
new_name = 'renamed_' + lv.LvCommon.Name
|
||||
@@ -651,8 +681,7 @@ class TestDbusService(unittest.TestCase):
|
||||
self.handle_return(lv.Lv.Rename(dbus.String(new_name),
|
||||
dbus.Int32(g_tmo), EOD))
|
||||
|
||||
path = self.objs[MANAGER_INT][0].Manager.LookUpByLvmId(
|
||||
dbus.String(new_name))
|
||||
path = self._lookup(new_name)
|
||||
|
||||
self._check_consistency()
|
||||
self.assertTrue(prev_path == path, "%s != %s" % (prev_path, path))
|
||||
@@ -677,26 +706,32 @@ class TestDbusService(unittest.TestCase):
|
||||
|
||||
# This returns a LV with the LV interface, need to get a proxy for
|
||||
# thinpool interface too
|
||||
tp = self._create_lv(True)
|
||||
vg = self._vg_create().Vg
|
||||
tp = self._create_lv(thinpool=True, vg=vg)
|
||||
|
||||
lv_name = lv_n('_thin_lv')
|
||||
|
||||
thin_path = self.handle_return(
|
||||
tp.ThinPool.LvCreate(
|
||||
dbus.String(lv_n('_thin_lv')),
|
||||
dbus.String(lv_name),
|
||||
dbus.UInt64(mib(8)),
|
||||
dbus.Int32(g_tmo),
|
||||
EOD)
|
||||
)
|
||||
self._validate_lookup("%s/%s" % (vg.Name, lv_name), thin_path)
|
||||
|
||||
lv = ClientProxy(self.bus, thin_path,
|
||||
interfaces=(LV_COMMON_INT, LV_INT))
|
||||
|
||||
re_named = 'rename_test' + lv.LvCommon.Name
|
||||
rc = self.handle_return(
|
||||
lv.Lv.Rename(
|
||||
dbus.String('rename_test' + lv.LvCommon.Name),
|
||||
dbus.String(re_named),
|
||||
dbus.Int32(g_tmo),
|
||||
EOD)
|
||||
)
|
||||
|
||||
self._validate_lookup("%s/%s" % (vg.Name, re_named), thin_path)
|
||||
self.assertTrue(rc == '/')
|
||||
self._check_consistency()
|
||||
|
||||
@@ -748,18 +783,18 @@ class TestDbusService(unittest.TestCase):
|
||||
|
||||
def test_lv_create_pv_specific(self):
|
||||
vg = self._vg_create().Vg
|
||||
|
||||
lv_name = lv_n()
|
||||
pv = vg.Pvs
|
||||
|
||||
pvp = ClientProxy(self.bus, pv[0], interfaces=(PV_INT,))
|
||||
|
||||
self._test_lv_create(
|
||||
lv = self._test_lv_create(
|
||||
vg.LvCreate, (
|
||||
dbus.String(lv_n()),
|
||||
dbus.String(lv_name),
|
||||
dbus.UInt64(mib(4)),
|
||||
dbus.Array([[pvp.object_path, 0, (pvp.Pv.PeCount - 1)]],
|
||||
signature='(ott)'),
|
||||
dbus.Int32(g_tmo), EOD), vg, LV_BASE_INT)
|
||||
self._validate_lookup("%s/%s" % (vg.Name, lv_name), lv.object_path)
|
||||
|
||||
def test_lv_resize(self):
|
||||
|
||||
@@ -930,7 +965,8 @@ class TestDbusService(unittest.TestCase):
|
||||
self.assertTrue(vg_path == '/')
|
||||
self.assertTrue(vg_job and len(vg_job) > 0)
|
||||
|
||||
self._wait_for_job(vg_job)
|
||||
vg_path = self._wait_for_job(vg_job)
|
||||
self._validate_lookup(vg_name, vg_path)
|
||||
|
||||
def _test_expired_timer(self, num_lvs):
|
||||
rc = False
|
||||
@@ -945,17 +981,20 @@ class TestDbusService(unittest.TestCase):
|
||||
vg_proxy = self._vg_create(pv_paths)
|
||||
|
||||
for i in range(0, num_lvs):
|
||||
|
||||
lv_name = lv_n()
|
||||
vg_proxy.update()
|
||||
if vg_proxy.Vg.FreeCount > 0:
|
||||
job = self.handle_return(
|
||||
lv_path = self.handle_return(
|
||||
vg_proxy.Vg.LvCreateLinear(
|
||||
dbus.String(lv_n()),
|
||||
dbus.String(lv_name),
|
||||
dbus.UInt64(mib(4)),
|
||||
dbus.Boolean(False),
|
||||
dbus.Int32(g_tmo),
|
||||
EOD))
|
||||
self.assertTrue(job != '/')
|
||||
self.assertTrue(lv_path != '/')
|
||||
self._validate_lookup(
|
||||
"%s/%s" % (vg_proxy.Vg.Name, lv_name), lv_path)
|
||||
|
||||
else:
|
||||
# We ran out of space, test will probably fail
|
||||
break
|
||||
@@ -1064,15 +1103,18 @@ class TestDbusService(unittest.TestCase):
|
||||
|
||||
def test_lv_tags(self):
|
||||
vg = self._vg_create().Vg
|
||||
lv_name = lv_n()
|
||||
lv = self._test_lv_create(
|
||||
vg.LvCreateLinear,
|
||||
(dbus.String(lv_n()),
|
||||
(dbus.String(lv_name),
|
||||
dbus.UInt64(mib(4)),
|
||||
dbus.Boolean(False),
|
||||
dbus.Int32(g_tmo),
|
||||
EOD),
|
||||
vg, LV_BASE_INT)
|
||||
|
||||
self._validate_lookup("%s/%s" % (vg.Name, lv_name), lv.object_path)
|
||||
|
||||
t = ['Testing', 'tags']
|
||||
|
||||
self.handle_return(
|
||||
@@ -1148,15 +1190,18 @@ class TestDbusService(unittest.TestCase):
|
||||
|
||||
def test_vg_activate_deactivate(self):
|
||||
vg = self._vg_create().Vg
|
||||
self._test_lv_create(
|
||||
lv_name = lv_n()
|
||||
lv = self._test_lv_create(
|
||||
vg.LvCreateLinear, (
|
||||
dbus.String(lv_n()),
|
||||
dbus.String(lv_name),
|
||||
dbus.UInt64(mib(4)),
|
||||
dbus.Boolean(False),
|
||||
dbus.Int32(g_tmo),
|
||||
EOD),
|
||||
vg, LV_BASE_INT)
|
||||
|
||||
self._validate_lookup("%s/%s" % (vg.Name, lv_name), lv.object_path)
|
||||
|
||||
vg.update()
|
||||
|
||||
rc = self.handle_return(
|
||||
@@ -1361,15 +1406,19 @@ class TestDbusService(unittest.TestCase):
|
||||
|
||||
def test_snapshot_merge_thin(self):
|
||||
# Create a thin LV, snapshot it and merge it
|
||||
tp = self._create_lv(True)
|
||||
vg = self._vg_create().Vg
|
||||
tp = self._create_lv(thinpool=True, vg=vg)
|
||||
lv_name = lv_n('_thin_lv')
|
||||
|
||||
thin_path = self.handle_return(
|
||||
tp.ThinPool.LvCreate(
|
||||
dbus.String(lv_n('_thin_lv')),
|
||||
dbus.String(lv_name),
|
||||
dbus.UInt64(mib(10)),
|
||||
dbus.Int32(g_tmo),
|
||||
EOD))
|
||||
|
||||
self._validate_lookup("%s/%s" % (vg.Name, lv_name), thin_path)
|
||||
|
||||
lv_p = ClientProxy(self.bus, thin_path,
|
||||
interfaces=(LV_INT, LV_COMMON_INT))
|
||||
|
||||
@@ -1512,12 +1561,14 @@ class TestDbusService(unittest.TestCase):
|
||||
EOD))
|
||||
|
||||
# Create a VG and try to create LVs with different bad names
|
||||
vg_name = vg_n()
|
||||
vg_path = self.handle_return(
|
||||
mgr.VgCreate(
|
||||
dbus.String(vg_n()),
|
||||
dbus.String(vg_name),
|
||||
dbus.Array(pv_paths, 'o'),
|
||||
dbus.Int32(g_tmo),
|
||||
EOD))
|
||||
self._validate_lookup(vg_name, vg_path)
|
||||
|
||||
vg_proxy = ClientProxy(self.bus, vg_path, interfaces=(VG_INT, ))
|
||||
|
||||
@@ -1563,13 +1614,16 @@ class TestDbusService(unittest.TestCase):
|
||||
def test_invalid_tags(self):
|
||||
mgr = self.objs[MANAGER_INT][0].Manager
|
||||
pv_paths = [self.objs[PV_INT][0].object_path]
|
||||
vg_name = vg_n()
|
||||
|
||||
vg_path = self.handle_return(
|
||||
mgr.VgCreate(
|
||||
dbus.String(vg_n()),
|
||||
dbus.String(vg_name),
|
||||
dbus.Array(pv_paths, 'o'),
|
||||
dbus.Int32(g_tmo),
|
||||
EOD))
|
||||
self._validate_lookup(vg_name, vg_path)
|
||||
|
||||
vg_proxy = ClientProxy(self.bus, vg_path, interfaces=(VG_INT, ))
|
||||
|
||||
for c in self._invalid_tag_characters():
|
||||
@@ -1591,13 +1645,15 @@ class TestDbusService(unittest.TestCase):
|
||||
def test_tag_names(self):
|
||||
mgr = self.objs[MANAGER_INT][0].Manager
|
||||
pv_paths = [self.objs[PV_INT][0].object_path]
|
||||
vg_name = vg_n()
|
||||
|
||||
vg_path = self.handle_return(
|
||||
mgr.VgCreate(
|
||||
dbus.String(vg_n()),
|
||||
dbus.String(vg_name),
|
||||
dbus.Array(pv_paths, 'o'),
|
||||
dbus.Int32(g_tmo),
|
||||
EOD))
|
||||
self._validate_lookup(vg_name, vg_path)
|
||||
vg_proxy = ClientProxy(self.bus, vg_path, interfaces=(VG_INT, ))
|
||||
|
||||
for i in range(1, 64):
|
||||
@@ -1622,13 +1678,15 @@ class TestDbusService(unittest.TestCase):
|
||||
def test_tag_regression(self):
|
||||
mgr = self.objs[MANAGER_INT][0].Manager
|
||||
pv_paths = [self.objs[PV_INT][0].object_path]
|
||||
vg_name = vg_n()
|
||||
|
||||
vg_path = self.handle_return(
|
||||
mgr.VgCreate(
|
||||
dbus.String(vg_n()),
|
||||
dbus.String(vg_name),
|
||||
dbus.Array(pv_paths, 'o'),
|
||||
dbus.Int32(g_tmo),
|
||||
EOD))
|
||||
self._validate_lookup(vg_name, vg_path)
|
||||
vg_proxy = ClientProxy(self.bus, vg_path, interfaces=(VG_INT, ))
|
||||
|
||||
tag = '--h/K.6g0A4FOEatf3+k_nI/Yp&L_u2oy-=j649x:+dUcYWPEo6.IWT0c'
|
||||
|
||||
@@ -1317,7 +1317,7 @@ udev_wait() {
|
||||
wait_for_sync() {
|
||||
local i
|
||||
for i in {1..100} ; do
|
||||
check in_sync $1 $2 && return
|
||||
check in_sync $1 $2 $3 && return
|
||||
sleep .2
|
||||
done
|
||||
|
||||
|
||||
@@ -178,7 +178,7 @@ linear() {
|
||||
$(lvl $lv -o+devices)
|
||||
}
|
||||
|
||||
# in_sync <VG> <LV>
|
||||
# in_sync <VG> <LV> <ignore 'a'>
|
||||
# Works for "mirror" and "raid*"
|
||||
in_sync() {
|
||||
local a
|
||||
@@ -187,8 +187,11 @@ in_sync() {
|
||||
local type
|
||||
local snap=""
|
||||
local lvm_name="$1/$2"
|
||||
local ignore_a="$3"
|
||||
local dm_name=$(echo $lvm_name | sed s:-:--: | sed s:/:-:)
|
||||
|
||||
[ -z "$ignore_a" ] && ignore_a=0
|
||||
|
||||
a=( $(dmsetup status $dm_name) ) || \
|
||||
die "Unable to get sync status of $1"
|
||||
|
||||
@@ -225,7 +228,7 @@ in_sync() {
|
||||
return 1
|
||||
fi
|
||||
|
||||
[[ ${a[$(($idx - 1))]} =~ a ]] && \
|
||||
[[ ${a[$(($idx - 1))]} =~ a ]] && [ $ignore_a -eq 0 ] && \
|
||||
die "$lvm_name ($type$snap) in-sync, but 'a' characters in health status"
|
||||
|
||||
echo "$lvm_name ($type$snap) is in-sync \"${a[@]}\""
|
||||
@@ -310,6 +313,12 @@ lv_field() {
|
||||
die "lv_field: lv=$1, field=\"$2\", actual=\"$actual\", expected=\"$3\""
|
||||
}
|
||||
|
||||
lv_first_seg_field() {
|
||||
local actual=$(get lv_first_seg_field "$1" "$2" "${@:4}")
|
||||
test "$actual" = "$3" || \
|
||||
die "lv_field: lv=$1, field=\"$2\", actual=\"$actual\", expected=\"$3\""
|
||||
}
|
||||
|
||||
lvh_field() {
|
||||
local actual=$(get lvh_field "$1" "$2" "${@:4}")
|
||||
test "$actual" = "$3" || \
|
||||
|
||||
@@ -42,6 +42,11 @@ lv_field() {
|
||||
trim_ "$r"
|
||||
}
|
||||
|
||||
lv_first_seg_field() {
|
||||
local r=$(lvs --config 'log{prefix=""}' --noheadings -o "$2" "${@:3}" "$1" | head -1)
|
||||
trim_ "$r"
|
||||
}
|
||||
|
||||
lvh_field() {
|
||||
local r=$(lvs -H --config 'log{prefix=""}' --noheadings -o "$2" "${@:3}" "$1")
|
||||
trim_ "$r"
|
||||
|
||||
@@ -188,7 +188,7 @@ run_syncaction_check() {
|
||||
# 'lvs' should show results
|
||||
lvchange --syncaction check $vg/$lv
|
||||
aux wait_for_sync $vg $lv
|
||||
check lv_attr_bit health $vg/$lv "-"
|
||||
check lv_attr_bit health $vg/$lv "-" || check lv_attr_bit health $vg/$lv "m"
|
||||
check lv_field $vg/$lv raid_mismatch_count "0"
|
||||
}
|
||||
|
||||
|
||||
@@ -21,14 +21,14 @@ aux prepare_vg 4
|
||||
|
||||
for d in $dev1 $dev2 $dev3 $dev4
|
||||
do
|
||||
aux delay_dev $d 1
|
||||
aux delay_dev $d 1 1
|
||||
done
|
||||
|
||||
#
|
||||
# Test writemostly prohibited on resyncrhonizing raid1
|
||||
# Test writemostly prohibited on resynchronizing raid1
|
||||
#
|
||||
|
||||
# Create 4-way striped LV
|
||||
# Create 4-way raid1 LV
|
||||
lvcreate -aey --ty raid1 -m 3 -L 32M -n $lv1 $vg
|
||||
not lvchange -y --writemostly $dev1 $vg/$lv1
|
||||
check lv_field $vg/$lv1 segtype "raid1"
|
||||
|
||||
68
test/shell/lvconvert-raid-reshape-linear_to_striped.sh
Normal file
68
test/shell/lvconvert-raid-reshape-linear_to_striped.sh
Normal file
@@ -0,0 +1,68 @@
|
||||
#!/bin/sh
|
||||
# Copyright (C) 2017 Red Hat, Inc. All rights reserved.
|
||||
#
|
||||
# This copyrighted material is made available to anyone wishing to use,
|
||||
# modify, copy, or redistribute it subject to the terms and conditions
|
||||
# of the GNU General Public License v.2.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA2110-1301 USA
|
||||
|
||||
SKIP_WITH_LVMLOCKD=1
|
||||
SKIP_WITH_LVMPOLLD=1
|
||||
|
||||
. lib/inittest
|
||||
|
||||
which mkfs.ext4 || skip
|
||||
aux have_raid 1 10 2 || skip
|
||||
|
||||
aux prepare_vg 5
|
||||
|
||||
#
|
||||
# Test single step linear -> striped conversion
|
||||
#
|
||||
|
||||
# Create linear LV
|
||||
lvcreate -aey -L 16M -n $lv1 $vg
|
||||
check lv_field $vg/$lv1 segtype "linear"
|
||||
check lv_field $vg/$lv1 stripes 1
|
||||
check lv_field $vg/$lv1 data_stripes 1
|
||||
echo y|mkfs -t ext4 $DM_DEV_DIR/$vg/$lv1
|
||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
|
||||
|
||||
# Convert linear -> raid1
|
||||
lvconvert -y -m 1 $vg/$lv1
|
||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
|
||||
check lv_field $vg/$lv1 segtype "raid1"
|
||||
check lv_field $vg/$lv1 stripes 2
|
||||
check lv_field $vg/$lv1 data_stripes 2
|
||||
check lv_field $vg/$lv1 regionsize "512.00k"
|
||||
aux wait_for_sync $vg $lv1
|
||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
|
||||
|
||||
# Convert raid1 -> raid5_n
|
||||
lvconvert -y --ty raid5_n $vg/$lv1
|
||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
|
||||
check lv_field $vg/$lv1 segtype "raid5_n"
|
||||
check lv_field $vg/$lv1 stripes 2
|
||||
check lv_field $vg/$lv1 data_stripes 1
|
||||
check lv_field $vg/$lv1 stripesize "64.00k"
|
||||
check lv_field $vg/$lv1 regionsize "512.00k"
|
||||
|
||||
# Convert raid5_n adding stripes
|
||||
lvconvert -y --stripes 4 $vg/$lv1
|
||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
|
||||
check lv_first_seg_field $vg/$lv1 segtype "raid5_n"
|
||||
check lv_first_seg_field $vg/$lv1 data_stripes 4
|
||||
check lv_first_seg_field $vg/$lv1 stripes 5
|
||||
check lv_first_seg_field $vg/$lv1 stripesize "64.00k"
|
||||
check lv_first_seg_field $vg/$lv1 regionsize "512.00k"
|
||||
aux wait_for_sync $vg $lv1
|
||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
|
||||
|
||||
# Convert raid5_n -> striped
|
||||
lvconvert -y --type striped $vg/$lv1
|
||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
|
||||
|
||||
vgremove -ff $vg
|
||||
106
test/shell/lvconvert-raid-reshape-striped_to_linear.sh
Normal file
106
test/shell/lvconvert-raid-reshape-striped_to_linear.sh
Normal file
@@ -0,0 +1,106 @@
|
||||
#!/bin/sh
|
||||
# Copyright (C) 2017 Red Hat, Inc. All rights reserved.
|
||||
#
|
||||
# This copyrighted material is made available to anyone wishing to use,
|
||||
# modify, copy, or redistribute it subject to the terms and conditions
|
||||
# of the GNU General Public License v.2.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA2110-1301 USA
|
||||
|
||||
SKIP_WITH_LVMLOCKD=1
|
||||
SKIP_WITH_LVMPOLLD=1
|
||||
|
||||
. lib/inittest
|
||||
|
||||
which mkfs.ext4 || skip
|
||||
aux have_raid 1 10 2 || skip
|
||||
|
||||
aux prepare_vg 5
|
||||
|
||||
#
|
||||
# Test single step linear -> striped conversion
|
||||
#
|
||||
|
||||
# Create 4-way striped LV
|
||||
lvcreate -aey -i 4 -I 32k -L 16M -n $lv1 $vg
|
||||
check lv_field $vg/$lv1 segtype "striped"
|
||||
check lv_field $vg/$lv1 data_stripes 4
|
||||
check lv_field $vg/$lv1 stripes 4
|
||||
check lv_field $vg/$lv1 stripesize "32.00k"
|
||||
check lv_field $vg/$lv1 reshape_len ""
|
||||
echo y|mkfs -t ext4 $DM_DEV_DIR/$vg/$lv1
|
||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
|
||||
|
||||
# Convert striped -> raid5(_n)
|
||||
lvconvert -y --ty raid5 -R 128k $vg/$lv1
|
||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
|
||||
check lv_field $vg/$lv1 segtype "raid5_n"
|
||||
check lv_field $vg/$lv1 data_stripes 4
|
||||
check lv_field $vg/$lv1 stripes 5
|
||||
check lv_field $vg/$lv1 stripesize "32.00k"
|
||||
check lv_field $vg/$lv1 regionsize "128.00k"
|
||||
check lv_field $vg/$lv1 reshape_len ""
|
||||
aux wait_for_sync $vg $lv1
|
||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
|
||||
|
||||
# Extend raid5_n LV by factor 4 to keep size once linear
|
||||
lvresize -y -L 64 $vg/$lv1
|
||||
check lv_field $vg/$lv1 segtype "raid5_n"
|
||||
check lv_field $vg/$lv1 data_stripes 4
|
||||
check lv_field $vg/$lv1 stripes 5
|
||||
check lv_field $vg/$lv1 stripesize "32.00k"
|
||||
check lv_field $vg/$lv1 regionsize "128.00k"
|
||||
check lv_field $vg/$lv1 reshape_len ""
|
||||
aux wait_for_sync $vg $lv1
|
||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
|
||||
|
||||
# Convert raid5_n LV to 1 stripe (2 legs total),
|
||||
# 64k stripesize and 1024k regionsize
|
||||
# FIXME: "--type" superfluous (cli fix needed)
|
||||
lvconvert -y -f --ty raid5_n --stripes 1 -I 64k -R 1024k $vg/$lv1
|
||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
|
||||
check lv_first_seg_field $vg/$lv1 segtype "raid5_n"
|
||||
check lv_first_seg_field $vg/$lv1 data_stripes 1
|
||||
check lv_first_seg_field $vg/$lv1 stripes 5
|
||||
check lv_first_seg_field $vg/$lv1 stripesize "64.00k"
|
||||
check lv_first_seg_field $vg/$lv1 regionsize "1.00m"
|
||||
check lv_first_seg_field $vg/$lv1 reshape_len 10
|
||||
# for slv in {0..4}
|
||||
# do
|
||||
# check lv_first_seg_field $vg/${lv1}_rimage_${slv} reshape_len 2
|
||||
# done
|
||||
aux wait_for_sync $vg $lv1 1
|
||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
|
||||
|
||||
# Remove the now freed legs
|
||||
lvconvert --stripes 1 $vg/$lv1
|
||||
check lv_first_seg_field $vg/$lv1 segtype "raid5_n"
|
||||
check lv_first_seg_field $vg/$lv1 data_stripes 1
|
||||
check lv_first_seg_field $vg/$lv1 stripes 2
|
||||
check lv_first_seg_field $vg/$lv1 stripesize "64.00k"
|
||||
check lv_first_seg_field $vg/$lv1 regionsize "1.00m"
|
||||
check lv_first_seg_field $vg/$lv1 reshape_len 4
|
||||
|
||||
# Convert raid5_n to raid1
|
||||
lvconvert -y --type raid1 $vg/$lv1
|
||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
|
||||
check lv_first_seg_field $vg/$lv1 segtype "raid1"
|
||||
check lv_first_seg_field $vg/$lv1 data_stripes 2
|
||||
check lv_first_seg_field $vg/$lv1 stripes 2
|
||||
check lv_first_seg_field $vg/$lv1 stripesize "0"
|
||||
check lv_first_seg_field $vg/$lv1 regionsize "1.00m"
|
||||
check lv_first_seg_field $vg/$lv1 reshape_len ""
|
||||
|
||||
# Convert raid5_n -> linear
|
||||
lvconvert -y --type linear $vg/$lv1
|
||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
|
||||
check lv_first_seg_field $vg/$lv1 segtype "linear"
|
||||
check lv_first_seg_field $vg/$lv1 data_stripes 1
|
||||
check lv_first_seg_field $vg/$lv1 stripes 1
|
||||
check lv_first_seg_field $vg/$lv1 stripesize "0"
|
||||
check lv_first_seg_field $vg/$lv1 regionsize "0"
|
||||
check lv_first_seg_field $vg/$lv1 reshape_len ""
|
||||
|
||||
vgremove -ff $vg
|
||||
207
test/shell/lvconvert-raid-reshape.sh
Normal file
207
test/shell/lvconvert-raid-reshape.sh
Normal file
@@ -0,0 +1,207 @@
|
||||
#!/bin/sh
|
||||
# Copyright (C) 2017 Red Hat, Inc. All rights reserved.
|
||||
#
|
||||
# This copyrighted material is made available to anyone wishing to use,
|
||||
# modify, copy, or redistribute it subject to the terms and conditions
|
||||
# of the GNU General Public License v.2.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA2110-1301 USA
|
||||
|
||||
SKIP_WITH_LVMLOCKD=1
|
||||
SKIP_WITH_LVMPOLLD=1
|
||||
|
||||
. lib/inittest
|
||||
|
||||
which mkfs.ext4 || skip
|
||||
aux have_raid 1 10 2 || skip
|
||||
|
||||
aux prepare_pvs 65 64
|
||||
|
||||
vgcreate -s 1M $vg $(cat DEVICES)
|
||||
|
||||
function _lvcreate
|
||||
{
|
||||
local level=$1
|
||||
local req_stripes=$2
|
||||
local stripes=$3
|
||||
local size=$4
|
||||
local vg=$5
|
||||
local lv=$6
|
||||
|
||||
lvcreate -y -aey --type $level -i $req_stripes -L $size -n $lv $vg
|
||||
check lv_first_seg_field $vg/$lv segtype "$level"
|
||||
check lv_first_seg_field $vg/$lv datastripes $req_stripes
|
||||
check lv_first_seg_field $vg/$lv stripes $stripes
|
||||
mkfs.ext4 "$DM_DEV_DIR/$vg/$lv"
|
||||
fsck -fn "$DM_DEV_DIR/$vg/$lv"
|
||||
}
|
||||
|
||||
function _lvconvert
|
||||
{
|
||||
local req_level=$1
|
||||
local level=$2
|
||||
local data_stripes=$3
|
||||
local stripes=$4
|
||||
local vg=$5
|
||||
local lv=$6
|
||||
local region_size=$7
|
||||
local wait_and_check=1
|
||||
local R=""
|
||||
|
||||
[ -n "$region_size" ] && R="-R $region_size"
|
||||
[ "${level:0:7}" = "striped" ] && wait_and_check=0
|
||||
[ "${level:0:5}" = "raid0" ] && wait_and_check=0
|
||||
|
||||
lvconvert -y --ty $req_level $R $vg/$lv
|
||||
[ $? -ne 0 ] && return $?
|
||||
check lv_first_seg_field $vg/$lv segtype "$level"
|
||||
check lv_first_seg_field $vg/$lv data_stripes $data_stripes
|
||||
check lv_first_seg_field $vg/$lv stripes $stripes
|
||||
[ -n "$region_size" ] && check lv_field $vg/$lv regionsize $region_size
|
||||
if [ "$wait_and_check" -eq 1 ]
|
||||
then
|
||||
fsck -fn "$DM_DEV_DIR/$vg/$lv"
|
||||
aux wait_for_sync $vg $lv
|
||||
fi
|
||||
fsck -fn "$DM_DEV_DIR/$vg/$lv"
|
||||
}
|
||||
|
||||
function _reshape_layout
|
||||
{
|
||||
local type=$1
|
||||
shift
|
||||
local data_stripes=$1
|
||||
shift
|
||||
local stripes=$1
|
||||
shift
|
||||
local vg=$1
|
||||
shift
|
||||
local lv=$1
|
||||
shift
|
||||
local opts="$*"
|
||||
local ignore_a_chars=0
|
||||
|
||||
[[ "$opts" =~ "--stripes" ]] && ignore_a_chars=1
|
||||
|
||||
lvconvert -vvvv -y --ty $type $opts $vg/$lv
|
||||
check lv_first_seg_field $vg/$lv segtype "$type"
|
||||
check lv_first_seg_field $vg/$lv data_stripes $data_stripes
|
||||
check lv_first_seg_field $vg/$lv stripes $stripes
|
||||
aux wait_for_sync $vg $lv $ignore_a_chars
|
||||
fsck -fn "$DM_DEV_DIR/$vg/$lv"
|
||||
}
|
||||
|
||||
# Delay leg so that rebuilding status characters
|
||||
# can be read before resync finished too quick.
|
||||
# aux delay_dev "$dev1" 1
|
||||
|
||||
#
|
||||
# Start out with raid5(_ls)
|
||||
#
|
||||
|
||||
# Create 3-way striped raid5 (4 legs total)
|
||||
_lvcreate raid5_ls 3 4 16M $vg $lv1
|
||||
check lv_first_seg_field $vg/$lv1 segtype "raid5_ls"
|
||||
aux wait_for_sync $vg $lv1
|
||||
|
||||
# Reshape it to 256K stripe size
|
||||
_reshape_layout raid5_ls 3 4 $vg $lv1 --stripesize 256K
|
||||
check lv_first_seg_field $vg/$lv1 stripesize "256.00k"
|
||||
|
||||
# Convert raid5(_n) -> striped
|
||||
not _lvconvert striped striped 3 3 $vg $lv1 512k
|
||||
_reshape_layout raid5_n 3 4 $vg $lv1
|
||||
_lvconvert striped striped 3 3 $vg $lv1
|
||||
|
||||
# Convert striped -> raid5_n
|
||||
_lvconvert raid5_n raid5_n 3 4 $vg $lv1 "" 1
|
||||
|
||||
# Convert raid5_n -> raid5_ls
|
||||
_reshape_layout raid5_ls 3 4 $vg $lv1
|
||||
|
||||
# Convert raid5_ls to 5 stripes
|
||||
_reshape_layout raid5_ls 5 6 $vg $lv1 --stripes 5
|
||||
|
||||
# Convert raid5_ls back to 3 stripes
|
||||
_reshape_layout raid5_ls 3 6 $vg $lv1 --stripes 3 --force
|
||||
_reshape_layout raid5_ls 3 4 $vg $lv1 --stripes 3
|
||||
|
||||
# Convert raid5_ls to 7 stripes
|
||||
_reshape_layout raid5_ls 7 8 $vg $lv1 --stripes 7
|
||||
|
||||
# Convert raid5_ls to 9 stripes
|
||||
_reshape_layout raid5_ls 9 10 $vg $lv1 --stripes 9
|
||||
|
||||
# Convert raid5_ls to 14 stripes
|
||||
_reshape_layout raid5_ls 14 15 $vg $lv1 --stripes 14
|
||||
|
||||
# Convert raid5_ls to 63 stripes
|
||||
_reshape_layout raid5_ls 63 64 $vg $lv1 --stripes 63
|
||||
|
||||
# Convert raid5_ls back to 27 stripes
|
||||
_reshape_layout raid5_ls 27 64 $vg $lv1 --stripes 27 --force
|
||||
_reshape_layout raid5_ls 27 28 $vg $lv1 --stripes 27
|
||||
|
||||
# Convert raid5_ls back to 4 stripes
|
||||
_reshape_layout raid5_ls 4 28 $vg $lv1 --stripes 4 --force
|
||||
_reshape_layout raid5_ls 4 5 $vg $lv1 --stripes 4
|
||||
|
||||
# Convert raid5_ls back to 3 stripes
|
||||
_reshape_layout raid5_ls 3 5 $vg $lv1 --stripes 3 --force
|
||||
_reshape_layout raid5_ls 3 4 $vg $lv1 --stripes 3
|
||||
|
||||
# Convert raid5_ls -> raid5_rs
|
||||
_reshape_layout raid5_rs 3 4 $vg $lv1
|
||||
|
||||
# Convert raid5_rs -> raid5_la
|
||||
_reshape_layout raid5_la 3 4 $vg $lv1
|
||||
|
||||
# Convert raid5_la -> raid5_ra
|
||||
_reshape_layout raid5_ra 3 4 $vg $lv1
|
||||
|
||||
# Convert raid5_ra -> raid6_ra_6
|
||||
_lvconvert raid6_ra_6 raid6_ra_6 3 5 $vg $lv1 "4.00m" 1
|
||||
|
||||
# Convert raid5_la -> raid6(_zr)
|
||||
_reshape_layout raid6 3 5 $vg $lv1
|
||||
|
||||
# Convert raid6(_zr) -> raid6_nc
|
||||
_reshape_layout raid6_nc 3 5 $vg $lv1
|
||||
|
||||
# Convert raid6(_nc) -> raid6_nr
|
||||
_reshape_layout raid6_nr 3 5 $vg $lv1
|
||||
|
||||
# Convert raid6_nr) -> raid6_rs_6
|
||||
_reshape_layout raid6_rs_6 3 5 $vg $lv1
|
||||
|
||||
# Convert raid6_rs_6 to 5 stripes
|
||||
_reshape_layout raid6_rs_6 5 7 $vg $lv1 --stripes 5
|
||||
|
||||
# Convert raid6_rs_6 to 4 stripes
|
||||
_reshape_layout raid6_rs_6 4 7 $vg $lv1 --stripes 4 --force
|
||||
_reshape_layout raid6_rs_6 4 6 $vg $lv1 --stripes 4
|
||||
check lv_first_seg_field $vg/$lv1 stripesize "256.00k"
|
||||
|
||||
# Convert raid6_rs_6 to raid6_n_6
|
||||
_reshape_layout raid6_n_6 4 6 $vg $lv1
|
||||
|
||||
# Convert raid6_n_6 -> striped
|
||||
_lvconvert striped striped 4 4 $vg $lv1
|
||||
check lv_first_seg_field $vg/$lv1 stripesize "256.00k"
|
||||
|
||||
# Convert striped -> raid10(_near)
|
||||
_lvconvert raid10 raid10 4 8 $vg $lv1
|
||||
|
||||
# Convert raid10 to 10 stripes and 64K stripesize
|
||||
# FIXME: change once we support odd numbers of raid10 stripes
|
||||
not _reshape_layout raid10 4 9 $vg $lv1 --stripes 9 --stripesize 64K
|
||||
_reshape_layout raid10 5 10 $vg $lv1 --stripes 10 --stripesize 64K
|
||||
check lv_first_seg_field $vg/$lv1 stripesize "64.00k"
|
||||
|
||||
# Convert raid6_n_6 -> striped
|
||||
_lvconvert striped striped 5 5 $vg $lv1
|
||||
check lv_first_seg_field $vg/$lv1 stripesize "64.00k"
|
||||
|
||||
vgremove -ff $vg
|
||||
@@ -33,6 +33,7 @@ function _lvcreate
|
||||
|
||||
lvcreate -y -aey --type $level -i $req_stripes -L $size -n $lv $vg
|
||||
check lv_field $vg/$lv segtype "$level"
|
||||
check lv_field $vg/$lv data_stripes $req_stripes
|
||||
check lv_field $vg/$lv stripes $stripes
|
||||
mkfs.ext4 "$DM_DEV_DIR/$vg/$lv"
|
||||
fsck -fn "$DM_DEV_DIR/$vg/$lv"
|
||||
@@ -42,10 +43,11 @@ function _lvconvert
|
||||
{
|
||||
local req_level=$1
|
||||
local level=$2
|
||||
local stripes=$3
|
||||
local vg=$4
|
||||
local lv=$5
|
||||
local region_size=$6
|
||||
local data_stripes=$3
|
||||
local stripes=$4
|
||||
local vg=$5
|
||||
local lv=$6
|
||||
local region_size=$7
|
||||
local wait_and_check=1
|
||||
local R=""
|
||||
|
||||
@@ -56,6 +58,7 @@ function _lvconvert
|
||||
lvconvert -y --ty $req_level $R $vg/$lv
|
||||
[ $? -ne 0 ] && return $?
|
||||
check lv_field $vg/$lv segtype "$level"
|
||||
check lv_field $vg/$lv data_stripes $data_stripes
|
||||
check lv_field $vg/$lv stripes $stripes
|
||||
if [ "$wait_and_check" -eq 1 ]
|
||||
then
|
||||
@@ -70,19 +73,19 @@ function _invalid_raid5_conversions
|
||||
local vg=$1
|
||||
local lv=$2
|
||||
|
||||
not _lvconvert striped 4 $vg $lv1
|
||||
not _lvconvert raid0 raid0 4 $vg $lv1
|
||||
not _lvconvert raid0_meta raid0_meta 4 $vg $lv1
|
||||
not _lvconvert raid4 raid4 5 $vg $lv1
|
||||
not _lvconvert raid5_ls raid5_ls 5 $vg $lv1
|
||||
not _lvconvert raid5_rs raid5_rs 5 $vg $lv1
|
||||
not _lvconvert raid5_la raid5_la 5 $vg $lv1
|
||||
not _lvconvert raid5_ra raid5_ra 5 $vg $lv1
|
||||
not _lvconvert raid6_zr raid6_zr 6 $vg $lv1
|
||||
not _lvconvert raid6_nr raid6_nr 6 $vg $lv1
|
||||
not _lvconvert raid6_nc raid6_nc 6 $vg $lv1
|
||||
not _lvconvert raid6_n_6 raid6_n_6 6 $vg $lv1
|
||||
not _lvconvert raid6 raid6_n_6 6 $vg $lv1
|
||||
not _lvconvert striped 4 4 $vg $lv1
|
||||
not _lvconvert raid0 raid0 4 4 $vg $lv1
|
||||
not _lvconvert raid0_meta raid0_meta 4 4 $vg $lv1
|
||||
not _lvconvert raid4 raid4 4 5 $vg $lv1
|
||||
not _lvconvert raid5_ls raid5_ls 4 5 $vg $lv1
|
||||
not _lvconvert raid5_rs raid5_rs 4 5 $vg $lv1
|
||||
not _lvconvert raid5_la raid5_la 4 5 $vg $lv1
|
||||
not _lvconvert raid5_ra raid5_ra 4 5 $vg $lv1
|
||||
not _lvconvert raid6_zr raid6_zr 4 6 $vg $lv1
|
||||
not _lvconvert raid6_nr raid6_nr 4 6 $vg $lv1
|
||||
not _lvconvert raid6_nc raid6_nc 4 6 $vg $lv1
|
||||
not _lvconvert raid6_n_6 raid6_n_6 4 6 $vg $lv1
|
||||
not _lvconvert raid6 raid6_n_6 4 6 $vg $lv1
|
||||
}
|
||||
|
||||
# Delayst leg so that rebuilding status characters
|
||||
@@ -117,8 +120,7 @@ fsck -fn "$DM_DEV_DIR/$vg/$lv1"
|
||||
lvconvert -m 4 -R 128K $vg/$lv1
|
||||
check lv_field $vg/$lv1 segtype "raid1"
|
||||
check lv_field $vg/$lv1 stripes 5
|
||||
# FIXME: once lv_raid_chanage_image_count() supports region_size changes
|
||||
not check lv_field $vg/$lv1 regionsize "128.00k"
|
||||
check lv_field $vg/$lv1 regionsize "128.00k"
|
||||
fsck -fn "$DM_DEV_DIR/$vg/$lv1"
|
||||
aux wait_for_sync $vg $lv1
|
||||
fsck -fn "$DM_DEV_DIR/$vg/$lv1"
|
||||
@@ -163,110 +165,116 @@ _lvcreate raid4 3 4 8M $vg $lv1
|
||||
aux wait_for_sync $vg $lv1
|
||||
|
||||
# Convert raid4 -> striped
|
||||
not _lvconvert striped striped 3 $vg $lv1 512k
|
||||
_lvconvert striped striped 3 $vg $lv1
|
||||
not _lvconvert striped striped 3 3 $vg $lv1 512k
|
||||
_lvconvert striped striped 3 3 $vg $lv1
|
||||
|
||||
# Convert striped -> raid4
|
||||
_lvconvert raid4 raid4 4 $vg $lv1 64k
|
||||
_lvconvert raid4 raid4 3 4 $vg $lv1 64k
|
||||
check lv_field $vg/$lv1 regionsize "64.00k"
|
||||
|
||||
# Convert raid4 -> raid5_n
|
||||
_lvconvert raid5 raid5_n 4 $vg $lv1 128k
|
||||
_lvconvert raid5 raid5_n 3 4 $vg $lv1 128k
|
||||
check lv_field $vg/$lv1 regionsize "128.00k"
|
||||
|
||||
# Convert raid5_n -> striped
|
||||
_lvconvert striped striped 3 $vg $lv1
|
||||
_lvconvert striped striped 3 3 $vg $lv1
|
||||
|
||||
# Convert striped -> raid5_n
|
||||
_lvconvert raid5_n raid5_n 4 $vg $lv1
|
||||
_lvconvert raid5_n raid5_n 3 4 $vg $lv1
|
||||
|
||||
# Convert raid5_n -> raid4
|
||||
_lvconvert raid4 raid4 4 $vg $lv1
|
||||
_lvconvert raid4 raid4 3 4 $vg $lv1
|
||||
|
||||
# Convert raid4 -> raid0
|
||||
_lvconvert raid0 raid0 3 $vg $lv1
|
||||
_lvconvert raid0 raid0 3 3 $vg $lv1
|
||||
|
||||
# Convert raid0 -> raid5_n
|
||||
_lvconvert raid5_n raid5_n 4 $vg $lv1
|
||||
_lvconvert raid5_n raid5_n 3 4 $vg $lv1
|
||||
|
||||
# Convert raid5_n -> raid0_meta
|
||||
_lvconvert raid0_meta raid0_meta 3 $vg $lv1
|
||||
_lvconvert raid0_meta raid0_meta 3 3 $vg $lv1
|
||||
|
||||
# Convert raid0_meta -> raid5_n
|
||||
_lvconvert raid5 raid5_n 4 $vg $lv1
|
||||
_lvconvert raid5 raid5_n 3 4 $vg $lv1
|
||||
|
||||
# Convert raid4 -> raid0_meta
|
||||
not _lvconvert raid0_meta raid0_meta 3 $vg $lv1 256k
|
||||
_lvconvert raid0_meta raid0_meta 3 $vg $lv1
|
||||
not _lvconvert raid0_meta raid0_meta 3 3 $vg $lv1 256k
|
||||
_lvconvert raid0_meta raid0_meta 3 3 $vg $lv1
|
||||
|
||||
# Convert raid0_meta -> raid4
|
||||
_lvconvert raid4 raid4 4 $vg $lv1
|
||||
_lvconvert raid4 raid4 3 4 $vg $lv1
|
||||
|
||||
# Convert raid4 -> raid0
|
||||
_lvconvert raid0 raid0 3 $vg $lv1
|
||||
_lvconvert raid0 raid0 3 3 $vg $lv1
|
||||
|
||||
# Convert raid0 -> raid4
|
||||
_lvconvert raid4 raid4 4 $vg $lv1
|
||||
_lvconvert raid4 raid4 3 4 $vg $lv1
|
||||
|
||||
# Convert raid4 -> striped
|
||||
_lvconvert striped striped 3 $vg $lv1
|
||||
_lvconvert striped striped 3 3 $vg $lv1
|
||||
|
||||
# Convert striped -> raid6_n_6
|
||||
_lvconvert raid6_n_6 raid6_n_6 5 $vg $lv1
|
||||
_lvconvert raid6_n_6 raid6_n_6 3 5 $vg $lv1
|
||||
|
||||
# Convert raid6_n_6 -> striped
|
||||
_lvconvert striped striped 3 $vg $lv1
|
||||
_lvconvert striped striped 3 3 $vg $lv1
|
||||
|
||||
# Convert striped -> raid6_n_6
|
||||
_lvconvert raid6 raid6_n_6 5 $vg $lv1
|
||||
_lvconvert raid6 raid6_n_6 3 5 $vg $lv1
|
||||
|
||||
# Convert raid6_n_6 -> raid5_n
|
||||
_lvconvert raid5_n raid5_n 4 $vg $lv1
|
||||
_lvconvert raid5_n raid5_n 3 4 $vg $lv1
|
||||
|
||||
# Convert raid5_n -> raid6_n_6
|
||||
_lvconvert raid6_n_6 raid6_n_6 5 $vg $lv1
|
||||
_lvconvert raid6_n_6 raid6_n_6 3 5 $vg $lv1
|
||||
|
||||
# Convert raid6_n_6 -> raid4
|
||||
_lvconvert raid4 raid4 4 $vg $lv1
|
||||
_lvconvert raid4 raid4 3 4 $vg $lv1
|
||||
|
||||
# Convert raid4 -> raid6_n_6
|
||||
_lvconvert raid6 raid6_n_6 5 $vg $lv1
|
||||
_lvconvert raid6 raid6_n_6 3 5 $vg $lv1
|
||||
|
||||
# Convert raid6_n_6 -> raid0
|
||||
_lvconvert raid0 raid0 3 $vg $lv1
|
||||
_lvconvert raid0 raid0 3 3 $vg $lv1
|
||||
|
||||
# Convert raid0 -> raid6_n_6
|
||||
_lvconvert raid6_n_6 raid6_n_6 5 $vg $lv1
|
||||
_lvconvert raid6_n_6 raid6_n_6 3 5 $vg $lv1
|
||||
|
||||
# Convert raid6_n_6 -> raid0_meta
|
||||
_lvconvert raid0_meta raid0_meta 3 $vg $lv1
|
||||
_lvconvert raid0_meta raid0_meta 3 3 $vg $lv1
|
||||
|
||||
# Convert raid0_meta -> raid6_n_6
|
||||
_lvconvert raid6 raid6_n_6 5 $vg $lv1
|
||||
_lvconvert raid6 raid6_n_6 3 5 $vg $lv1
|
||||
|
||||
# Convert raid6_n_6 -> striped
|
||||
not _lvconvert striped striped 3 $vg $lv1 128k
|
||||
_lvconvert striped striped 3 $vg $lv1
|
||||
not _lvconvert striped striped 3 3 $vg $lv1 128k
|
||||
_lvconvert striped striped 3 3 $vg $lv1
|
||||
|
||||
# Convert striped -> raid10
|
||||
_lvconvert raid10 raid10 6 $vg $lv1
|
||||
_lvconvert raid10 raid10 3 6 $vg $lv1
|
||||
|
||||
# Convert raid10 -> raid0
|
||||
not _lvconvert raid0 raid0 3 $vg $lv1 64k
|
||||
_lvconvert raid0 raid0 3 $vg $lv1
|
||||
not _lvconvert raid0 raid0 3 3 $vg $lv1 64k
|
||||
_lvconvert raid0 raid0 3 3 $vg $lv1
|
||||
|
||||
# Convert raid0 -> raid10
|
||||
_lvconvert raid10 raid10 6 $vg $lv1
|
||||
_lvconvert raid10 raid10 3 6 $vg $lv1
|
||||
|
||||
# Convert raid10 -> raid0
|
||||
_lvconvert raid0_meta raid0_meta 3 $vg $lv1
|
||||
# Convert raid10 -> raid0_meta
|
||||
_lvconvert raid0_meta raid0_meta 3 3 $vg $lv1
|
||||
|
||||
# Convert raid0_meta -> raid5
|
||||
_lvconvert raid5_n raid5_n 3 4 $vg $lv1
|
||||
|
||||
# Convert raid5_n -> raid0_meta
|
||||
_lvconvert raid0_meta raid0_meta 3 3 $vg $lv1
|
||||
|
||||
# Convert raid0_meta -> raid10
|
||||
_lvconvert raid10 raid10 6 $vg $lv1
|
||||
_lvconvert raid10 raid10 3 6 $vg $lv1
|
||||
|
||||
# Convert raid10 -> striped
|
||||
not _lvconvert striped striped 3 $vg $lv1 256k
|
||||
_lvconvert striped striped 3 $vg $lv1
|
||||
not _lvconvert striped striped 3 3 $vg $lv1 256k
|
||||
_lvconvert striped striped 3 3 $vg $lv1
|
||||
|
||||
# Clean up
|
||||
lvremove -y $vg
|
||||
@@ -275,51 +283,51 @@ lvremove -y $vg
|
||||
_lvcreate raid5 4 5 8M $vg $lv1
|
||||
aux wait_for_sync $vg $lv1
|
||||
_invalid_raid5_conversions $vg $lv1
|
||||
not _lvconvert raid6_rs_6 raid6_rs_6 6 $vg $lv1
|
||||
not _lvconvert raid6_la_6 raid6_la_6 6 $vg $lv1
|
||||
not _lvconvert raid6_ra_6 raid6_ra_6 6 $vg $lv1
|
||||
_lvconvert raid6_ls_6 raid6_ls_6 6 $vg $lv1
|
||||
_lvconvert raid5_ls raid5_ls 5 $vg $lv1
|
||||
not _lvconvert raid6_rs_6 raid6_rs_6 4 6 $vg $lv1
|
||||
not _lvconvert raid6_la_6 raid6_la_6 4 6 $vg $lv1
|
||||
not _lvconvert raid6_ra_6 raid6_ra_6 4 6 $vg $lv1
|
||||
_lvconvert raid6_ls_6 raid6_ls_6 4 6 $vg $lv1
|
||||
_lvconvert raid5_ls raid5_ls 4 5 $vg $lv1
|
||||
lvremove -y $vg
|
||||
|
||||
_lvcreate raid5_ls 4 5 8M $vg $lv1
|
||||
aux wait_for_sync $vg $lv1
|
||||
_invalid_raid5_conversions $vg $lv1
|
||||
not _lvconvert raid6_rs_6 raid6_rs_6 6 $vg $lv1
|
||||
not _lvconvert raid6_la_6 raid6_la_6 6 $vg $lv1
|
||||
not _lvconvert raid6_ra_6 raid6_ra_6 6 $vg $lv1
|
||||
_lvconvert raid6_ls_6 raid6_ls_6 6 $vg $lv1
|
||||
_lvconvert raid5_ls raid5_ls 5 $vg $lv1
|
||||
not _lvconvert raid6_rs_6 raid6_rs_6 4 6 $vg $lv1
|
||||
not _lvconvert raid6_la_6 raid6_la_6 4 6 $vg $lv1
|
||||
not _lvconvert raid6_ra_6 raid6_ra_6 4 6 $vg $lv1
|
||||
_lvconvert raid6_ls_6 raid6_ls_6 4 6 $vg $lv1
|
||||
_lvconvert raid5_ls raid5_ls 4 5 $vg $lv1
|
||||
lvremove -y $vg
|
||||
|
||||
_lvcreate raid5_rs 4 5 8M $vg $lv1
|
||||
aux wait_for_sync $vg $lv1
|
||||
_invalid_raid5_conversions $vg $lv1
|
||||
not _lvconvert raid6_ra_6 raid6_ra_6 6 $vg $lv1
|
||||
not _lvconvert raid6_la_6 raid6_la_6 6 $vg $lv1
|
||||
not _lvconvert raid6_ra_6 raid6_ra_6 6 $vg $lv1
|
||||
_lvconvert raid6_rs_6 raid6_rs_6 6 $vg $lv1
|
||||
_lvconvert raid5_rs raid5_rs 5 $vg $lv1
|
||||
not _lvconvert raid6_ra_6 raid6_ra_6 4 6 $vg $lv1
|
||||
not _lvconvert raid6_la_6 raid6_la_6 4 6 $vg $lv1
|
||||
not _lvconvert raid6_ra_6 raid6_ra_6 4 6 $vg $lv1
|
||||
_lvconvert raid6_rs_6 raid6_rs_6 4 6 $vg $lv1
|
||||
_lvconvert raid5_rs raid5_rs 4 5 $vg $lv1
|
||||
lvremove -y $vg
|
||||
|
||||
_lvcreate raid5_la 4 5 8M $vg $lv1
|
||||
aux wait_for_sync $vg $lv1
|
||||
_invalid_raid5_conversions $vg $lv1
|
||||
not _lvconvert raid6_ls_6 raid6_ls_6 6 $vg $lv1
|
||||
not _lvconvert raid6_rs_6 raid6_rs_6 6 $vg $lv1
|
||||
not _lvconvert raid6_ra_6 raid6_ra_6 6 $vg $lv1
|
||||
_lvconvert raid6_la_6 raid6_la_6 6 $vg $lv1
|
||||
_lvconvert raid5_la raid5_la 5 $vg $lv1
|
||||
not _lvconvert raid6_ls_6 raid6_ls_6 4 6 $vg $lv1
|
||||
not _lvconvert raid6_rs_6 raid6_rs_6 4 6 $vg $lv1
|
||||
not _lvconvert raid6_ra_6 raid6_ra_6 4 6 $vg $lv1
|
||||
_lvconvert raid6_la_6 raid6_la_6 4 6 $vg $lv1
|
||||
_lvconvert raid5_la raid5_la 4 5 $vg $lv1
|
||||
lvremove -y $vg
|
||||
|
||||
_lvcreate raid5_ra 4 5 8M $vg $lv1
|
||||
aux wait_for_sync $vg $lv1
|
||||
_invalid_raid5_conversions $vg $lv1
|
||||
not _lvconvert raid6_ls_6 raid6_ls_6 6 $vg $lv1
|
||||
not _lvconvert raid6_rs_6 raid6_rs_6 6 $vg $lv1
|
||||
not _lvconvert raid6_la_6 raid6_la_6 6 $vg $lv1
|
||||
_lvconvert raid6_ra_6 raid6_ra_6 6 $vg $lv1
|
||||
_lvconvert raid5_ra raid5_ra 5 $vg $lv1
|
||||
not _lvconvert raid6_ls_6 raid6_ls_6 4 6 $vg $lv1
|
||||
not _lvconvert raid6_rs_6 raid6_rs_6 4 6 $vg $lv1
|
||||
not _lvconvert raid6_la_6 raid6_la_6 4 6 $vg $lv1
|
||||
_lvconvert raid6_ra_6 raid6_ra_6 4 6 $vg $lv1
|
||||
_lvconvert raid5_ra raid5_ra 4 5 $vg $lv1
|
||||
lvremove -y $vg
|
||||
|
||||
else
|
||||
|
||||
77
tools/args.h
77
tools/args.h
@@ -109,10 +109,10 @@ arg(cachemode_ARG, '\0', "cachemode", cachemode_VAL, 0, 0,
|
||||
"been stored in both the cache pool and on the origin LV.\n"
|
||||
"While writethrough may be slower for writes, it is more\n"
|
||||
"resilient if something should happen to a device associated with the\n"
|
||||
"cache pool LV. With writethrough, all reads are served\n"
|
||||
"cache pool LV. With \\fBpassthrough\\fP, all reads are served\n"
|
||||
"from the origin LV (all reads miss the cache) and all writes are\n"
|
||||
"forwarded to the origin LV; additionally, write hits cause cache\n"
|
||||
"block invalidates. See lvmcache(7) for more information.\n")
|
||||
"block invalidates. See \\fBlvmcache\\fP(7) for more information.\n")
|
||||
|
||||
arg(cachepool_ARG, '\0', "cachepool", lv_VAL, 0, 0,
|
||||
"The name of a cache pool LV.\n")
|
||||
@@ -414,8 +414,15 @@ arg(pooldatasize_ARG, '\0', "pooldatasize", sizemb_VAL, 0, 0, NULL)
|
||||
arg(poolmetadata_ARG, '\0', "poolmetadata", lv_VAL, 0, 0,
|
||||
"The name of a an LV to use for storing pool metadata.\n")
|
||||
|
||||
arg(poolmetadatasize_ARG, '\0', "poolmetadatasize", sizemb_VAL, 0, 0,
|
||||
"The size of the pool metadata LV created by the command.\n")
|
||||
arg(poolmetadatasize_ARG, '\0', "poolmetadatasize", ssizemb_VAL, 0, 0,
|
||||
"#lvcreate\n"
|
||||
"#lvconvert\n"
|
||||
"Specifies the size of the new pool metadata LV.\n"
|
||||
"#lvresize\n"
|
||||
"#lvextend\n"
|
||||
"Specifies the new size of the pool metadata LV.\n"
|
||||
"The plus prefix \\fB+\\fP can be used, in which case\n"
|
||||
"the value is added to the current size.\n")
|
||||
|
||||
arg(poolmetadataspare_ARG, '\0', "poolmetadataspare", bool_VAL, 0, 0,
|
||||
"Enable or disable the automatic creation and management of a\n"
|
||||
@@ -693,7 +700,7 @@ arg(unquoted_ARG, '\0', "unquoted", 0, 0, 0,
|
||||
"pairs are not quoted.\n")
|
||||
|
||||
arg(usepolicies_ARG, '\0', "usepolicies", 0, 0, 0,
|
||||
"Perform an operation according to the policy configured in lvm.conf.\n"
|
||||
"Perform an operation according to the policy configured in lvm.conf\n"
|
||||
"or a profile.\n")
|
||||
|
||||
arg(validate_ARG, '\0', "validate", 0, 0, 0,
|
||||
@@ -807,8 +814,8 @@ arg(activate_ARG, 'a', "activate", activation_VAL, 0, 0,
|
||||
"if the list is set but empty, no LVs match.\n"
|
||||
"Autoactivation should be used during system boot to make it possible\n"
|
||||
"to select which LVs should be automatically activated by the system.\n"
|
||||
"See lvmlockd(8) for more information about activation options for shared VGs.\n"
|
||||
"See clvmd(8) for more information about activation options for clustered VGs.\n"
|
||||
"See lvmlockd(8) for more information about activation options \\fBey\\fP and \\fBsy\\fP for shared VGs.\n"
|
||||
"See clvmd(8) for more information about activation options \\fBey\\fP, \\fBsy\\fP, \\fBly\\fP and \\fBln\\fP for clustered VGs.\n"
|
||||
"#lvcreate\n"
|
||||
"Controls the active state of the new LV.\n"
|
||||
"\\fBy\\fP makes the LV active, or available.\n"
|
||||
@@ -967,15 +974,15 @@ arg(stripes_ARG, 'i', "stripes", number_VAL, 0, 0,
|
||||
"Specifies the number of stripes in a striped LV. This is the number of\n"
|
||||
"PVs (devices) that a striped LV is spread across. Data that\n"
|
||||
"appears sequential in the LV is spread across multiple devices in units of\n"
|
||||
"the stripe size (see --stripesize). This does not apply to\n"
|
||||
"existing allocated space, only newly allocated space can be striped.\n"
|
||||
"the stripe size (see --stripesize). This does not change existing\n"
|
||||
"allocated space, but only applies to space being allocated by the command.\n"
|
||||
"When creating a RAID 4/5/6 LV, this number does not include the extra\n"
|
||||
"devices that are required for parity. The largest number depends on\n"
|
||||
"the RAID type (raid0: 64, raid10: 32, raid4/5: 63, raid6: 62.)\n"
|
||||
"When unspecified, the default depends on the RAID type\n"
|
||||
"the RAID type (raid0: 64, raid10: 32, raid4/5: 63, raid6: 62), and\n"
|
||||
"when unspecified, the default depends on the RAID type\n"
|
||||
"(raid0: 2, raid10: 4, raid4/5: 3, raid6: 5.)\n"
|
||||
"When unspecified, to stripe across all PVs of the VG,\n"
|
||||
"set lvm.conf allocation/raid_stripe_all_devices=1.\n")
|
||||
"To stripe a new raid LV across all PVs by default,\n"
|
||||
"see lvm.conf allocation/raid_stripe_all_devices.\n")
|
||||
|
||||
arg(stripesize_ARG, 'I', "stripesize", sizekb_VAL, 0, 0,
|
||||
"The amount of data that is written to one device before\n"
|
||||
@@ -987,7 +994,7 @@ arg(logicalvolume_ARG, 'l', "logicalvolume", uint32_VAL, 0, 0,
|
||||
arg(maxlogicalvolumes_ARG, 'l', "maxlogicalvolumes", uint32_VAL, 0, 0,
|
||||
"Sets the maximum number of LVs allowed in a VG.\n")
|
||||
|
||||
arg(extents_ARG, 'l', "extents", numsignedper_VAL, 0, 0,
|
||||
arg(extents_ARG, 'l', "extents", extents_VAL, 0, 0,
|
||||
"#lvcreate\n"
|
||||
"Specifies the size of the new LV in logical extents.\n"
|
||||
"The --size and --extents options are alternate methods of specifying size.\n"
|
||||
@@ -1022,10 +1029,9 @@ arg(extents_ARG, 'l', "extents", numsignedper_VAL, 0, 0,
|
||||
"When expressed as a percentage, the size defines an upper limit for the\n"
|
||||
"number of logical extents in the new LV. The precise number of logical\n"
|
||||
"extents in the new LV is not determined until the command has completed.\n"
|
||||
"The plus prefix \\fB+\\fP can be used, in which case\n"
|
||||
"the value is added to the current size,\n"
|
||||
"or the minus prefix \\fB-\\fP can be used, in which case\n"
|
||||
"the value is subtracted from the current size.\n")
|
||||
"The plus \\fB+\\fP or minus \\fB-\\fP prefix can be used, in which case\n"
|
||||
"the value is not an absolute size, but is an amount added or subtracted\n"
|
||||
"relative to the current size.\n")
|
||||
|
||||
arg(list_ARG, 'l', "list", 0, 0, 0,
|
||||
"#lvmconfig\n"
|
||||
@@ -1042,18 +1048,20 @@ arg(list_ARG, 'l', "list", 0, 0, 0,
|
||||
arg(lvmpartition_ARG, 'l', "lvmpartition", 0, 0, 0,
|
||||
"Only report PVs.\n")
|
||||
|
||||
arg(size_ARG, 'L', "size", sizemb_VAL, 0, 0,
|
||||
/*
|
||||
* FIXME: for lvcreate, size only accepts absolute values, no +|-,
|
||||
* for lvresize, size can relative +|-, for lvreduce, size
|
||||
* can be relative -, and for lvextend, size can be relative +.
|
||||
* Should we define separate val enums for each of those cases,
|
||||
* and at the start of the command, change the val type for
|
||||
* size_ARG? The same for extents_ARG.
|
||||
*/
|
||||
arg(size_ARG, 'L', "size", ssizemb_VAL, 0, 0,
|
||||
"#lvcreate\n"
|
||||
"Specifies the size of the new LV.\n"
|
||||
"The --size and --extents options are alternate methods of specifying size.\n"
|
||||
"The total number of physical extents used will be\n"
|
||||
"greater when redundant data is needed for RAID levels.\n"
|
||||
"A suffix can be chosen from: \\fBbBsSkKmMgGtTpPeE\\fP.\n"
|
||||
"All units are base two values, regardless of letter capitalization:\n"
|
||||
"b|B is bytes, s|S is sectors of 512 bytes,\n"
|
||||
"k|K is kilobytes, m|M is megabytes,\n"
|
||||
"g|G is gigabytes, t|T is terabytes,\n"
|
||||
"p|P is petabytes, e|E is exabytes.\n"
|
||||
"#lvreduce\n"
|
||||
"#lvextend\n"
|
||||
"#lvresize\n"
|
||||
@@ -1061,12 +1069,6 @@ arg(size_ARG, 'L', "size", sizemb_VAL, 0, 0,
|
||||
"The --size and --extents options are alternate methods of specifying size.\n"
|
||||
"The total number of physical extents used will be\n"
|
||||
"greater when redundant data is needed for RAID levels.\n"
|
||||
"A suffix can be chosen from: \\fBbBsSkKmMgGtTpPeE\\fP.\n"
|
||||
"All units are base two values, regardless of letter capitalization:\n"
|
||||
"b|B is bytes, s|S is sectors of 512 bytes,\n"
|
||||
"k|K is kilobytes, m|M is megabytes,\n"
|
||||
"g|G is gigabytes, t|T is terabytes,\n"
|
||||
"p|P is petabytes, e|E is exabytes.\n"
|
||||
"The plus prefix \\fB+\\fP can be used, in which case\n"
|
||||
"the value is added to the current size,\n"
|
||||
"or the minus prefix \\fB-\\fP can be used, in which case\n"
|
||||
@@ -1104,7 +1106,7 @@ arg(maps_ARG, 'm', "maps", 0, 0, 0,
|
||||
|
||||
/* FIXME: should the unused mirrors option be removed from lvextend? */
|
||||
|
||||
arg(mirrors_ARG, 'm', "mirrors", numsigned_VAL, 0, 0,
|
||||
arg(mirrors_ARG, 'm', "mirrors", snumber_VAL, 0, 0,
|
||||
"#lvcreate\n"
|
||||
"Specifies the number of mirror images in addition to the original LV\n"
|
||||
"image, e.g. --mirrors 1 means there are two images of the data, the\n"
|
||||
@@ -1230,7 +1232,9 @@ arg(resizefs_ARG, 'r', "resizefs", 0, 0, 0,
|
||||
arg(reset_ARG, 'R', "reset", 0, 0, 0, NULL)
|
||||
|
||||
arg(regionsize_ARG, 'R', "regionsize", regionsize_VAL, 0, 0,
|
||||
"Size of each raid or mirror synchronization region.\n")
|
||||
"Size of each raid or mirror synchronization region.\n"
|
||||
"lvm.conf activation/raid_region_size can be used to\n"
|
||||
"configure a default.\n")
|
||||
|
||||
arg(physicalextentsize_ARG, 's', "physicalextentsize", sizemb_VAL, 0, 0,
|
||||
"#vgcreate\n"
|
||||
@@ -1295,9 +1299,10 @@ arg(stdin_ARG, 's', "stdin", 0, 0, 0, NULL)
|
||||
|
||||
arg(select_ARG, 'S', "select", string_VAL, ARG_GROUPABLE, 0,
|
||||
"Select objects for processing and reporting based on specified criteria.\n"
|
||||
"The criteria syntax is described in lvmreport(7) under Selection.\n"
|
||||
"For reporting commands, display rows that match the criteria.\n"
|
||||
"All rows can be displayed with an additional \"selected\" field (-o selected)\n"
|
||||
"The criteria syntax is described by \\fB--select help\\fP and \\fBlvmreport\\fP(7).\n"
|
||||
"For reporting commands, one row is displayed for each object matching the criteria.\n"
|
||||
"See \\fB--options help\\fP for selectable object fields.\n"
|
||||
"Rows can be displayed with an additional \"selected\" field (-o selected)\n"
|
||||
"showing 1 if the row matches the selection and 0 otherwise.\n"
|
||||
"For non-reporting commands which process LVM entities, the selection is\n"
|
||||
"used to choose items to process.\n")
|
||||
|
||||
@@ -307,9 +307,12 @@ RULE: all not LV_thinpool LV_cachepool
|
||||
OO_LVCONVERT_RAID: --mirrors SNumber, --stripes_long Number,
|
||||
--stripesize SizeKB, --regionsize RegionSize, --interval Number
|
||||
|
||||
OO_LVCONVERT_POOL: --poolmetadata LV, --poolmetadatasize SizeMB,
|
||||
OO_LVCONVERT_POOL: --poolmetadata LV, --poolmetadatasize SSizeMB,
|
||||
--poolmetadataspare Bool, --readahead Readahead, --chunksize SizeKB
|
||||
|
||||
OO_LVCONVERT_CACHE: --cachemode CacheMode, --cachepolicy String,
|
||||
--cachesettings String, --zero Bool
|
||||
|
||||
OO_LVCONVERT: --alloc Alloc, --background, --force, --noudevsync
|
||||
|
||||
---
|
||||
@@ -335,14 +338,19 @@ lvconvert --type mirror LV
|
||||
OO: OO_LVCONVERT_RAID, OO_LVCONVERT, --mirrorlog MirrorLog
|
||||
OP: PV ...
|
||||
ID: lvconvert_raid_types
|
||||
DESC: Convert LV to type mirror (also see type raid1).
|
||||
DESC: Convert LV to type mirror (also see type raid1),
|
||||
DESC: (also see lvconvert --mirrors).
|
||||
RULE: all not lv_is_locked lv_is_pvmove
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
# When LV is already raid, this changes the raid layout
|
||||
# (changing layout of raid0 and raid1 not allowed.)
|
||||
|
||||
lvconvert --type raid LV
|
||||
OO: OO_LVCONVERT_RAID, OO_LVCONVERT
|
||||
OP: PV ...
|
||||
ID: lvconvert_raid_types
|
||||
DESC: Convert LV to raid.
|
||||
DESC: Convert LV to raid or change raid layout.
|
||||
RULE: all not lv_is_locked lv_is_pvmove
|
||||
|
||||
lvconvert --mirrors SNumber LV
|
||||
@@ -352,12 +360,28 @@ ID: lvconvert_raid_types
|
||||
DESC: Convert LV to raid1 or mirror, or change number of mirror images.
|
||||
RULE: all not lv_is_locked lv_is_pvmove
|
||||
|
||||
lvconvert --stripes_long Number LV_raid
|
||||
OO: OO_LVCONVERT, --interval Number, --regionsize RegionSize, --stripesize SizeKB
|
||||
OP: PV ...
|
||||
ID: lvconvert_raid_types
|
||||
DESC: Convert raid LV to change number of stripe images.
|
||||
RULE: all not lv_is_locked lv_is_pvmove
|
||||
RULE: all not LV_raid0 LV_raid1
|
||||
|
||||
lvconvert --stripesize SizeKB LV_raid
|
||||
OO: OO_LVCONVERT, --interval Number, --regionsize RegionSize
|
||||
ID: lvconvert_raid_types
|
||||
DESC: Convert raid LV to change the stripe size.
|
||||
RULE: all not lv_is_locked lv_is_pvmove
|
||||
RULE: all not LV_raid0 LV_raid1
|
||||
|
||||
lvconvert --regionsize RegionSize LV_raid
|
||||
OO: OO_LVCONVERT
|
||||
ID: lvconvert_change_region_size
|
||||
DESC: Change the region size of an LV.
|
||||
RULE: all not lv_is_locked lv_is_pvmove
|
||||
RULE: all not LV_raid0
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
---
|
||||
|
||||
@@ -390,6 +414,7 @@ OP: PV ...
|
||||
ID: lvconvert_change_mirrorlog
|
||||
DESC: Change the type of mirror log used by a mirror LV.
|
||||
RULE: all not lv_is_locked lv_is_pvmove
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
---
|
||||
|
||||
@@ -407,8 +432,8 @@ RULE: all not lv_is_locked
|
||||
lvconvert --thin --thinpool LV LV_linear_striped_raid_cache
|
||||
OO: --type thin, --originname LV_new, --zero Bool, OO_LVCONVERT_POOL, OO_LVCONVERT
|
||||
ID: lvconvert_to_thin_with_external
|
||||
DESC: Convert LV to a thin LV, using the original LV as an external origin.
|
||||
DESC: (variant, infers --type thin).
|
||||
DESC: Convert LV to a thin LV, using the original LV as an external origin
|
||||
DESC: (infers --type thin).
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
RULE: all and lv_is_visible
|
||||
RULE: all not lv_is_locked
|
||||
@@ -416,20 +441,18 @@ RULE: all not lv_is_locked
|
||||
---
|
||||
|
||||
lvconvert --type cache --cachepool LV LV_linear_striped_raid_thinpool
|
||||
OO: --cache, --cachemode CacheMode, --cachepolicy String,
|
||||
--cachesettings String, --zero Bool, OO_LVCONVERT_POOL, OO_LVCONVERT
|
||||
OO: --cache, OO_LVCONVERT_CACHE, OO_LVCONVERT_POOL, OO_LVCONVERT
|
||||
ID: lvconvert_to_cache_vol
|
||||
DESC: Convert LV to type cache.
|
||||
RULE: all and lv_is_visible
|
||||
|
||||
# alternate form of lvconvert --type cache
|
||||
lvconvert --cache --cachepool LV LV_linear_striped_raid_thinpool
|
||||
OO: --type cache, --cachemode CacheMode, --cachepolicy String,
|
||||
--cachesettings String, --zero Bool, OO_LVCONVERT_POOL, OO_LVCONVERT
|
||||
OO: --type cache, OO_LVCONVERT_CACHE, OO_LVCONVERT_POOL, OO_LVCONVERT
|
||||
ID: lvconvert_to_cache_vol
|
||||
DESC: Convert LV to type cache (variant, infers --type cache).
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
DESC: Convert LV to type cache (infers --type cache).
|
||||
RULE: all and lv_is_visible
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
---
|
||||
|
||||
@@ -476,8 +499,7 @@ FLAGS: PREVIOUS_SYNTAX
|
||||
---
|
||||
|
||||
lvconvert --type cache-pool LV_linear_striped_raid
|
||||
OO: OO_LVCONVERT_POOL, OO_LVCONVERT,
|
||||
--cachemode CacheMode, --cachepolicy String, --cachesettings String
|
||||
OO: OO_LVCONVERT_CACHE, OO_LVCONVERT_POOL, OO_LVCONVERT
|
||||
OP: PV ...
|
||||
ID: lvconvert_to_cachepool
|
||||
DESC: Convert LV to type cache-pool.
|
||||
@@ -505,8 +527,7 @@ DESC: Convert LV to type cache-pool.
|
||||
# of creating a pool or swapping metadata should be used.
|
||||
|
||||
lvconvert --cachepool LV_linear_striped_raid_cachepool
|
||||
OO: --type cache-pool, OO_LVCONVERT_POOL, OO_LVCONVERT,
|
||||
--cachemode CacheMode, --cachepolicy String, --cachesettings String
|
||||
OO: --type cache-pool, OO_LVCONVERT_CACHE, OO_LVCONVERT_POOL, OO_LVCONVERT
|
||||
OP: PV ...
|
||||
ID: lvconvert_to_cachepool_or_swap_metadata
|
||||
DESC: Convert LV to type cache-pool (variant, use --type cache-pool).
|
||||
@@ -526,6 +547,7 @@ lvconvert --uncache LV_cache_thinpool
|
||||
OO: OO_LVCONVERT
|
||||
ID: lvconvert_split_and_remove_cachepool
|
||||
DESC: Separate and delete the cache pool from a cache LV.
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
---
|
||||
|
||||
@@ -533,6 +555,7 @@ lvconvert --swapmetadata --poolmetadata LV LV_thinpool_cachepool
|
||||
OO: --chunksize SizeKB, OO_LVCONVERT
|
||||
ID: lvconvert_swap_pool_metadata
|
||||
DESC: Swap metadata LV in a thin pool or cache pool (for repair only).
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
---
|
||||
|
||||
@@ -580,6 +603,7 @@ OO: OO_LVCONVERT
|
||||
ID: lvconvert_split_cow_snapshot
|
||||
DESC: Separate a COW snapshot from its origin LV.
|
||||
RULE: all not lv_is_locked lv_is_pvmove lv_is_origin lv_is_external_origin lv_is_merging_cow
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
---
|
||||
|
||||
@@ -597,9 +621,9 @@ OO: --snapshot, --chunksize SizeKB, --zero Bool, OO_LVCONVERT
|
||||
ID: lvconvert_combine_split_snapshot
|
||||
DESC: Combine a former COW snapshot (second arg) with a former
|
||||
DESC: origin LV (first arg) to reverse a splitsnapshot command.
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
RULE: all not lv_is_locked lv_is_pvmove
|
||||
RULE: all and lv_is_visible
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
lvconvert --snapshot LV LV_linear
|
||||
OO: --type snapshot, --chunksize SizeKB, --zero Bool, OO_LVCONVERT
|
||||
@@ -608,6 +632,7 @@ DESC: Combine a former COW snapshot (second arg) with a former
|
||||
DESC: origin LV (first arg) to reverse a splitsnapshot command.
|
||||
RULE: all not lv_is_locked lv_is_pvmove
|
||||
RULE: all and lv_is_visible
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
---
|
||||
|
||||
@@ -640,7 +665,7 @@ lvconvert --replace PV LV_raid
|
||||
OO: OO_LVCONVERT
|
||||
OP: PV ...
|
||||
ID: lvconvert_replace_pv
|
||||
DESC: Replace specific PV(s) in a raid* LV with another PV.
|
||||
DESC: Replace specific PV(s) in a raid LV with another PV.
|
||||
RULE: all not lv_is_locked lv_is_pvmove
|
||||
|
||||
---
|
||||
@@ -648,7 +673,7 @@ RULE: all not lv_is_locked lv_is_pvmove
|
||||
# This command just (re)starts the polling process on the LV
|
||||
# to continue a previous conversion.
|
||||
|
||||
lvconvert --startpoll LV_mirror
|
||||
lvconvert --startpoll LV_mirror_raid
|
||||
OO: OO_LVCONVERT
|
||||
ID: lvconvert_start_poll
|
||||
DESC: Poll LV to continue conversion.
|
||||
@@ -656,10 +681,10 @@ RULE: all and lv_is_converting
|
||||
|
||||
# alternate form of lvconvert --startpoll, this is only kept
|
||||
# for compat since this was how it used to be done.
|
||||
lvconvert LV_mirror
|
||||
lvconvert LV_mirror_raid
|
||||
OO: OO_LVCONVERT
|
||||
ID: lvconvert_start_poll
|
||||
DESC: Poll LV to continue conversion.
|
||||
DESC: Poll LV to continue conversion (also see --startpoll).
|
||||
RULE: all and lv_is_converting
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
@@ -674,9 +699,10 @@ OO_LVCREATE: --addtag Tag, --alloc Alloc, --autobackup Bool, --activate Active,
|
||||
--reportformat ReportFmt, --setactivationskip Bool, --wipesignatures Bool,
|
||||
--zero Bool
|
||||
|
||||
OO_LVCREATE_CACHE: --cachemode CacheMode, --cachepolicy String, --cachesettings String
|
||||
OO_LVCREATE_CACHE: --cachemode CacheMode, --cachepolicy String, --cachesettings String,
|
||||
--chunksize SizeKB
|
||||
|
||||
OO_LVCREATE_POOL: --poolmetadatasize SizeMB, --poolmetadataspare Bool, --chunksize SizeKB
|
||||
OO_LVCREATE_POOL: --poolmetadatasize SSizeMB, --poolmetadataspare Bool, --chunksize SizeKB
|
||||
|
||||
OO_LVCREATE_THIN: --discards Discards, --errorwhenfull Bool
|
||||
|
||||
@@ -685,7 +711,7 @@ OO_LVCREATE_RAID: --mirrors SNumber, --stripes Number, --stripesize SizeKB,
|
||||
|
||||
---
|
||||
|
||||
lvcreate --type error --size SizeMB VG
|
||||
lvcreate --type error --size SSizeMB VG
|
||||
OO: OO_LVCREATE
|
||||
ID: lvcreate_error_vol
|
||||
DESC: Create an LV that returns errors when used.
|
||||
@@ -693,7 +719,7 @@ FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
---
|
||||
|
||||
lvcreate --type zero --size SizeMB VG
|
||||
lvcreate --type zero --size SSizeMB VG
|
||||
OO: OO_LVCREATE
|
||||
ID: lvcreate_zero_vol
|
||||
DESC: Create an LV that returns zeros when read.
|
||||
@@ -701,7 +727,7 @@ FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
---
|
||||
|
||||
lvcreate --type linear --size SizeMB VG
|
||||
lvcreate --type linear --size SSizeMB VG
|
||||
OO: OO_LVCREATE
|
||||
OP: PV ...
|
||||
IO: --mirrors 0, --stripes 1
|
||||
@@ -709,27 +735,23 @@ ID: lvcreate_linear
|
||||
DESC: Create a linear LV.
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
# This is the one place we mention the optional --name
|
||||
# because it's the most common case and may be confusing
|
||||
# to people to not see the name parameter.
|
||||
|
||||
lvcreate --size SizeMB VG
|
||||
lvcreate --size SSizeMB VG
|
||||
OO: --type linear, OO_LVCREATE
|
||||
OP: PV ...
|
||||
IO: --mirrors 0, --stripes 1
|
||||
ID: lvcreate_linear
|
||||
DESC: Create a linear LV (default --type linear).
|
||||
DESC: When --name is omitted, the name is generated.
|
||||
DESC: Create a linear LV.
|
||||
|
||||
---
|
||||
|
||||
lvcreate --type striped --size SizeMB VG
|
||||
lvcreate --type striped --size SSizeMB VG
|
||||
OO: --stripes Number, --stripesize SizeKB, OO_LVCREATE
|
||||
OP: PV ...
|
||||
ID: lvcreate_striped
|
||||
DESC: Create a striped LV.
|
||||
DESC: Create a striped LV (also see lvcreate --stripes).
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
lvcreate --stripes Number --size SizeMB VG
|
||||
lvcreate --stripes Number --size SSizeMB VG
|
||||
OO: --type striped, --stripesize SizeKB, OO_LVCREATE
|
||||
OP: PV ...
|
||||
ID: lvcreate_striped
|
||||
@@ -737,72 +759,73 @@ DESC: Create a striped LV (infers --type striped).
|
||||
|
||||
---
|
||||
|
||||
lvcreate --type mirror --size SizeMB VG
|
||||
lvcreate --type mirror --size SSizeMB VG
|
||||
OO: --mirrors SNumber, --mirrorlog MirrorLog, --regionsize RegionSize, --stripes Number, OO_LVCREATE
|
||||
OP: PV ...
|
||||
ID: lvcreate_mirror
|
||||
DESC: Create a mirror LV (also see --type raid1).
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
# alternate form of lvcreate --type raid1|mirror
|
||||
lvcreate --mirrors SNumber --size SizeMB VG
|
||||
lvcreate --mirrors SNumber --size SSizeMB VG
|
||||
OO: --type raid1, --type mirror, --mirrorlog MirrorLog, --stripes Number, OO_LVCREATE_RAID, OO_LVCREATE
|
||||
OP: PV ...
|
||||
ID: lvcreate_mirror_or_raid1
|
||||
DESC: Create a raid1 or mirror LV (variant, infers --type raid1|mirror).
|
||||
DESC: Create a raid1 or mirror LV (infers --type raid1|mirror).
|
||||
|
||||
---
|
||||
|
||||
lvcreate --type raid --size SizeMB VG
|
||||
lvcreate --type raid --size SSizeMB VG
|
||||
OO: OO_LVCREATE_RAID, OO_LVCREATE
|
||||
OP: PV ...
|
||||
ID: lvcreate_raid_any
|
||||
DESC: Create a raid LV (a specific raid level must be used, e.g. raid1.)
|
||||
DESC: Create a raid LV (a specific raid level must be used, e.g. raid1).
|
||||
|
||||
---
|
||||
|
||||
# FIXME: the LV created by these commands actually has type linear or striped,
|
||||
# The LV created by these commands actually has type linear or striped,
|
||||
# not snapshot as specified by the command. If LVs never have type
|
||||
# snapshot, perhaps "snapshot" should not be considered an LV type, but
|
||||
# another new LV property?
|
||||
#
|
||||
# This is the one case where the --type variant is the unpreferred,
|
||||
# secondary syntax, because the LV type is not actually "snapshot".
|
||||
|
||||
# alternate form of lvcreate --snapshot
|
||||
lvcreate --type snapshot --size SizeMB LV
|
||||
lvcreate --type snapshot --size SSizeMB LV
|
||||
OO: --snapshot, --stripes Number, --stripesize SizeKB,
|
||||
--chunksize SizeKB, OO_LVCREATE
|
||||
OP: PV ...
|
||||
ID: lvcreate_cow_snapshot
|
||||
DESC: Create a COW snapshot LV from an origin LV.
|
||||
DESC: Create a COW snapshot LV of an origin LV
|
||||
DESC: (also see --snapshot).
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
lvcreate --snapshot --size SizeMB LV
|
||||
lvcreate --snapshot --size SSizeMB LV
|
||||
OO: --type snapshot, --stripes Number, --stripesize SizeKB,
|
||||
--chunksize SizeKB, OO_LVCREATE
|
||||
OP: PV ...
|
||||
ID: lvcreate_cow_snapshot
|
||||
DESC: Create a COW snapshot LV from an origin LV.
|
||||
DESC: Create a COW snapshot LV of an origin LV.
|
||||
|
||||
---
|
||||
|
||||
# alternate form of lvcreate --snapshot
|
||||
lvcreate --type snapshot --size SizeMB --virtualsize SizeMB VG
|
||||
lvcreate --type snapshot --size SSizeMB --virtualsize SizeMB VG
|
||||
OO: --snapshot, --chunksize SizeKB, OO_LVCREATE
|
||||
OP: PV ...
|
||||
ID: lvcreate_cow_snapshot_with_virtual_origin
|
||||
DESC: Create a sparse COW snapshot LV of a virtual origin LV
|
||||
DESC: (also see --snapshot).
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
lvcreate --snapshot --size SSizeMB --virtualsize SizeMB VG
|
||||
OO: --type snapshot, --chunksize SizeKB, OO_LVCREATE
|
||||
OP: PV ...
|
||||
ID: lvcreate_cow_snapshot_with_virtual_origin
|
||||
DESC: Create a sparse COW snapshot LV of a virtual origin LV.
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
lvcreate --snapshot --size SizeMB --virtualsize SizeMB VG
|
||||
OO: --type snapshot, --chunksize SizeKB, OO_LVCREATE
|
||||
OP: PV ...
|
||||
ID: lvcreate_cow_snapshot_with_virtual_origin
|
||||
DESC: Create a sparse COW snapshot LV of a virtual origin LV.
|
||||
|
||||
---
|
||||
|
||||
lvcreate --type thin-pool --size SizeMB VG
|
||||
lvcreate --type thin-pool --size SSizeMB VG
|
||||
OO: --thinpool LV_new, OO_LVCREATE_POOL, OO_LVCREATE_THIN, OO_LVCREATE,
|
||||
--stripes Number, --stripesize SizeKB
|
||||
OP: PV ...
|
||||
@@ -811,24 +834,24 @@ ID: lvcreate_thinpool
|
||||
DESC: Create a thin pool.
|
||||
|
||||
# alternate form of lvcreate --type thin-pool
|
||||
lvcreate --thin --size SizeMB VG
|
||||
lvcreate --thin --size SSizeMB VG
|
||||
OO: --type thin-pool, OO_LVCREATE_POOL, OO_LVCREATE_THIN, OO_LVCREATE,
|
||||
--stripes Number, --stripesize SizeKB
|
||||
OP: PV ...
|
||||
IO: --mirrors 0
|
||||
ID: lvcreate_thinpool
|
||||
DESC: Create a thin pool (variant, infers --type thin-pool).
|
||||
DESC: Create a thin pool (infers --type thin-pool).
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
# alternate form of lvcreate --type thin-pool
|
||||
lvcreate --size SizeMB --thinpool LV_new VG
|
||||
lvcreate --size SSizeMB --thinpool LV_new VG
|
||||
OO: --thin, --type thin-pool, OO_LVCREATE_POOL, OO_LVCREATE_THIN, OO_LVCREATE,
|
||||
--stripes Number, --stripesize SizeKB
|
||||
OP: PV ...
|
||||
IO: --mirrors 0
|
||||
ID: lvcreate_thinpool
|
||||
DESC: Create a thin pool named by the --thinpool arg
|
||||
DESC: (variant, infers --type thin-pool).
|
||||
DESC: (infers --type thin-pool).
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
---
|
||||
@@ -838,14 +861,14 @@ FLAGS: SECONDARY_SYNTAX
|
||||
# still needs to be listed as an optional addition to
|
||||
# --type cache-pool.
|
||||
|
||||
lvcreate --type cache-pool --size SizeMB VG
|
||||
lvcreate --type cache-pool --size SSizeMB VG
|
||||
OO: --cache, OO_LVCREATE_POOL, OO_LVCREATE_CACHE, OO_LVCREATE
|
||||
OP: PV ...
|
||||
ID: lvcreate_cachepool
|
||||
DESC: Create a cache pool.
|
||||
|
||||
# alternate form of lvcreate --type cache-pool
|
||||
lvcreate --type cache-pool --size SizeMB --cachepool LV_new VG
|
||||
lvcreate --type cache-pool --size SSizeMB --cachepool LV_new VG
|
||||
OO: --cache, OO_LVCREATE_POOL, OO_LVCREATE_CACHE, OO_LVCREATE
|
||||
OP: PV ...
|
||||
ID: lvcreate_cachepool
|
||||
@@ -860,6 +883,7 @@ OO: --thin, OO_LVCREATE_POOL, OO_LVCREATE_THIN, OO_LVCREATE
|
||||
IO: --mirrors 0
|
||||
ID: lvcreate_thin_vol
|
||||
DESC: Create a thin LV in a thin pool.
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
# alternate form of lvcreate --type thin
|
||||
lvcreate --type thin --virtualsize SizeMB LV_thinpool
|
||||
@@ -878,8 +902,7 @@ lvcreate --virtualsize SizeMB --thinpool LV_thinpool VG
|
||||
OO: --type thin, --thin, OO_LVCREATE_THIN, OO_LVCREATE
|
||||
IO: --mirrors 0
|
||||
ID: lvcreate_thin_vol
|
||||
DESC: Create a thin LV in a thin pool (variant, infers --type thin).
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
DESC: Create a thin LV in a thin pool (infers --type thin).
|
||||
|
||||
# alternate form of lvcreate --type thin
|
||||
lvcreate --virtualsize SizeMB LV_thinpool
|
||||
@@ -898,6 +921,7 @@ OO: --thin, OO_LVCREATE_THIN, OO_LVCREATE
|
||||
IO: --mirrors 0
|
||||
ID: lvcreate_thin_snapshot
|
||||
DESC: Create a thin LV that is a snapshot of an existing thin LV.
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
# alternate form of lvcreate --type thin
|
||||
lvcreate --thin LV_thin
|
||||
@@ -929,6 +953,7 @@ IO: --mirrors 0
|
||||
ID: lvcreate_thin_snapshot_of_external
|
||||
DESC: Create a thin LV that is a snapshot of an external origin LV
|
||||
DESC: (infers --type thin).
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
---
|
||||
|
||||
@@ -948,7 +973,7 @@ DESC: (infers --type thin).
|
||||
# definition. Note that when LV_new is used in arg pos 1,
|
||||
# it needs to include a VG name, i.e. VG/LV_new
|
||||
|
||||
lvcreate --type thin --virtualsize SizeMB --size SizeMB --thinpool LV_new
|
||||
lvcreate --type thin --virtualsize SizeMB --size SSizeMB --thinpool LV_new
|
||||
OO: --thin, OO_LVCREATE_POOL, OO_LVCREATE_THIN, OO_LVCREATE,
|
||||
--stripes Number, --stripesize SizeKB
|
||||
OP: PV ...
|
||||
@@ -958,7 +983,7 @@ DESC: Create a thin LV, first creating a thin pool for it,
|
||||
DESC: where the new thin pool is named by the --thinpool arg.
|
||||
|
||||
# alternate form of lvcreate --type thin
|
||||
lvcreate --thin --virtualsize SizeMB --size SizeMB --thinpool LV_new
|
||||
lvcreate --thin --virtualsize SizeMB --size SSizeMB --thinpool LV_new
|
||||
OO: --type thin, OO_LVCREATE_POOL, OO_LVCREATE_THIN, OO_LVCREATE,
|
||||
--stripes Number, --stripesize SizeKB
|
||||
OP: PV ...
|
||||
@@ -970,7 +995,7 @@ DESC: (variant, infers --type thin).
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
# alternate form of lvcreate --type thin
|
||||
lvcreate --type thin --virtualsize SizeMB --size SizeMB LV_new|VG
|
||||
lvcreate --type thin --virtualsize SizeMB --size SSizeMB LV_new|VG
|
||||
OO: --thin, OO_LVCREATE_POOL, OO_LVCREATE_THIN, OO_LVCREATE,
|
||||
--stripes Number, --stripesize SizeKB
|
||||
OP: PV ...
|
||||
@@ -983,7 +1008,7 @@ DESC: arg is a VG name.
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
# alternate form of lvcreate --type thin
|
||||
lvcreate --thin --virtualsize SizeMB --size SizeMB LV_new|VG
|
||||
lvcreate --thin --virtualsize SizeMB --size SSizeMB LV_new|VG
|
||||
OO: --type thin, OO_LVCREATE_POOL, OO_LVCREATE_THIN, OO_LVCREATE,
|
||||
--stripes Number, --stripesize SizeKB
|
||||
OP: PV ...
|
||||
@@ -997,7 +1022,7 @@ FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
---
|
||||
|
||||
lvcreate --size SizeMB --virtualsize SizeMB VG
|
||||
lvcreate --size SSizeMB --virtualsize SizeMB VG
|
||||
OO: --type thin, --type snapshot, --thin, --snapshot, OO_LVCREATE_POOL, OO_LVCREATE_THIN, OO_LVCREATE,
|
||||
--stripes Number, --stripesize SizeKB
|
||||
OP: PV ...
|
||||
@@ -1017,7 +1042,7 @@ FLAGS: SECONDARY_SYNTAX
|
||||
# but here it applies to creating the new origin that
|
||||
# is used to create the cache LV
|
||||
|
||||
lvcreate --type cache --size SizeMB --cachepool LV_cachepool VG
|
||||
lvcreate --type cache --size SSizeMB --cachepool LV_cachepool VG
|
||||
OO: --cache, OO_LVCREATE_POOL, OO_LVCREATE_CACHE, OO_LVCREATE,
|
||||
--stripes Number, --stripesize SizeKB
|
||||
OP: PV ...
|
||||
@@ -1027,7 +1052,7 @@ DESC: then combining it with the existing cache pool named
|
||||
DESC: by the --cachepool arg.
|
||||
|
||||
# alternate form of lvcreate --type cache
|
||||
lvcreate --size SizeMB --cachepool LV_cachepool VG
|
||||
lvcreate --size SSizeMB --cachepool LV_cachepool VG
|
||||
OO: --type cache, --cache, OO_LVCREATE_CACHE, OO_LVCREATE,
|
||||
--stripes Number, --stripesize SizeKB
|
||||
OP: PV ...
|
||||
@@ -1038,7 +1063,7 @@ DESC: by the --cachepool arg (variant, infers --type cache).
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
# alternate form of lvcreate --type cache
|
||||
lvcreate --type cache --size SizeMB LV_cachepool
|
||||
lvcreate --type cache --size SSizeMB LV_cachepool
|
||||
OO: --cache, OO_LVCREATE_POOL, OO_LVCREATE_CACHE, OO_LVCREATE,
|
||||
--stripes Number, --stripesize SizeKB
|
||||
OP: PV ...
|
||||
@@ -1057,7 +1082,7 @@ FLAGS: SECONDARY_SYNTAX
|
||||
# an already complicated command above.
|
||||
#
|
||||
# # alternate form for lvcreate_cache_vol_with_new_origin
|
||||
# lvcreate --cache --size SizeMB LV_cachepool
|
||||
# lvcreate --cache --size SSizeMB LV_cachepool
|
||||
# OO: --type cache, --cache, OO_LVCREATE_CACHE, OO_LVCREATE, --stripes Number, --stripesize SizeKB
|
||||
# OP: PV ...
|
||||
# ID: lvcreate_cache_vol_with_new_origin
|
||||
@@ -1069,7 +1094,7 @@ FLAGS: SECONDARY_SYNTAX
|
||||
# 2. If LV is not a cachepool, then it's a disguised lvconvert.
|
||||
#
|
||||
# # FIXME: this should be done by lvconvert, and this command removed
|
||||
# lvcreate --type cache --size SizeMB LV
|
||||
# lvcreate --type cache --size SSizeMB LV
|
||||
# OO: OO_LVCREATE_POOL, OO_LVCREATE_CACHE, OO_LVCREATE
|
||||
# OP: PV ...
|
||||
# ID: lvcreate_convert_to_cache_vol_with_cachepool
|
||||
@@ -1086,7 +1111,7 @@ FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
# def1: alternate form of lvcreate --type cache, or
|
||||
# def2: it should be done by lvconvert.
|
||||
lvcreate --cache --size SizeMB LV
|
||||
lvcreate --cache --size SSizeMB LV
|
||||
OO: OO_LVCREATE_CACHE, OO_LVCREATE_POOL, OO_LVCREATE,
|
||||
--stripes Number, --stripesize SizeKB
|
||||
OP: PV ...
|
||||
@@ -1120,10 +1145,10 @@ ID: lvdisplay_general
|
||||
|
||||
# --extents is not specified; it's an automatic alternative for --size
|
||||
|
||||
lvextend --size SizeMB LV
|
||||
lvextend --size SSizeMB LV
|
||||
OO: --alloc Alloc, --autobackup Bool, --force, --mirrors SNumber,
|
||||
--nofsck, --nosync, --noudevsync, --reportformat ReportFmt, --resizefs,
|
||||
--stripes Number, --stripesize SizeKB, --poolmetadatasize SizeMB,
|
||||
--stripes Number, --stripesize SizeKB, --poolmetadatasize SSizeMB,
|
||||
--type SegType
|
||||
OP: PV ...
|
||||
ID: lvextend_by_size
|
||||
@@ -1136,9 +1161,8 @@ OO: --alloc Alloc, --autobackup Bool, --force, --mirrors SNumber,
|
||||
--type SegType
|
||||
ID: lvextend_by_pv
|
||||
DESC: Extend an LV by specified PV extents.
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
lvextend --poolmetadatasize SizeMB LV_thinpool
|
||||
lvextend --poolmetadatasize SSizeMB LV_thinpool
|
||||
OO: --alloc Alloc, --autobackup Bool, --force, --mirrors SNumber,
|
||||
--nofsck, --nosync, --noudevsync,
|
||||
--reportformat ReportFmt, --stripes Number, --stripesize SizeKB,
|
||||
@@ -1165,7 +1189,7 @@ ID: lvmconfig_general
|
||||
|
||||
---
|
||||
|
||||
lvreduce --size SizeMB LV
|
||||
lvreduce --size SSizeMB LV
|
||||
OO: --autobackup Bool, --force, --nofsck, --noudevsync,
|
||||
--reportformat ReportFmt, --resizefs
|
||||
ID: lvreduce_general
|
||||
@@ -1193,10 +1217,10 @@ ID: lvrename_lv_lv
|
||||
# value can be checked to match the existing type; using it doesn't
|
||||
# currently enable any different behavior.
|
||||
|
||||
lvresize --size SizeMB LV
|
||||
lvresize --size SSizeMB LV
|
||||
OO: --alloc Alloc, --autobackup Bool, --force,
|
||||
--nofsck, --nosync, --noudevsync, --reportformat ReportFmt, --resizefs,
|
||||
--stripes Number, --stripesize SizeKB, --poolmetadatasize SizeMB,
|
||||
--stripes Number, --stripesize SizeKB, --poolmetadatasize SSizeMB,
|
||||
--type SegType
|
||||
OP: PV ...
|
||||
ID: lvresize_by_size
|
||||
@@ -1209,9 +1233,8 @@ OO: --alloc Alloc, --autobackup Bool, --force,
|
||||
--type SegType
|
||||
ID: lvresize_by_pv
|
||||
DESC: Resize an LV by specified PV extents.
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
lvresize --poolmetadatasize SizeMB LV_thinpool
|
||||
lvresize --poolmetadatasize SSizeMB LV_thinpool
|
||||
OO: --alloc Alloc, --autobackup Bool, --force,
|
||||
--nofsck, --nosync, --noudevsync,
|
||||
--reportformat ReportFmt, --stripes Number, --stripesize SizeKB,
|
||||
@@ -1487,7 +1510,6 @@ vgexport --all
|
||||
OO: OO_VGEXPORT
|
||||
ID: vgexport_all
|
||||
DESC: Export all VGs.
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
---
|
||||
|
||||
@@ -1611,14 +1633,12 @@ config
|
||||
OO: OO_CONFIG
|
||||
OP: String ...
|
||||
ID: lvmconfig_general
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
# use lvmconfig
|
||||
dumpconfig
|
||||
OO: OO_CONFIG
|
||||
OP: String ...
|
||||
ID: lvmconfig_general
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
devtypes
|
||||
OO: --aligned, --binary, --nameprefixes, --noheadings,
|
||||
@@ -1652,7 +1672,6 @@ ID: version_general
|
||||
# deprecated
|
||||
pvdata
|
||||
ID: pvdata_general
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
segtypes
|
||||
ID: segtypes_general
|
||||
@@ -1666,22 +1685,18 @@ ID: tags_general
|
||||
# deprecated
|
||||
lvmchange
|
||||
ID: lvmchange_general
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
# deprecated
|
||||
lvmdiskscan
|
||||
OO: --lvmpartition, --readonly
|
||||
ID: lvmdiskscan_general
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
# deprecated
|
||||
lvmsadc
|
||||
ID: lvmsadc_general
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
# deprecated
|
||||
lvmsar
|
||||
OO: --full, --stdin
|
||||
ID: lvmsar_general
|
||||
FLAGS: SECONDARY_SYNTAX
|
||||
|
||||
|
||||
756
tools/command.c
756
tools/command.c
File diff suppressed because it is too large
Load Diff
@@ -213,9 +213,11 @@ struct command {
|
||||
|
||||
int define_commands(char *run_name);
|
||||
int command_id_to_enum(const char *str);
|
||||
void print_usage(struct command *cmd, int longhelp);
|
||||
void print_usage(struct command *cmd, int longhelp, int desc_first);
|
||||
void print_usage_common_cmd(struct command_name *cname, struct command *cmd);
|
||||
void print_usage_common_lvm(struct command_name *cname, struct command *cmd);
|
||||
void print_usage_notes(struct command_name *cname, struct command *cmd);
|
||||
void factor_common_options(void);
|
||||
int command_has_alternate_extents(const char *name);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1228,6 +1228,9 @@ static int _lvconvert_mirrors(struct cmd_context *cmd,
|
||||
static int _is_valid_raid_conversion(const struct segment_type *from_segtype,
|
||||
const struct segment_type *to_segtype)
|
||||
{
|
||||
if (!from_segtype)
|
||||
return 1;
|
||||
|
||||
if (from_segtype == to_segtype)
|
||||
return 1;
|
||||
|
||||
@@ -1356,7 +1359,7 @@ static int _lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *l
|
||||
DEFAULT_RAID1_MAX_IMAGES, lp->segtype->name, display_lvname(lv));
|
||||
return 0;
|
||||
}
|
||||
if (!lv_raid_change_image_count(lv, image_count, lp->pvh))
|
||||
if (!lv_raid_change_image_count(lv, image_count, lp->region_size, lp->pvh))
|
||||
return_0;
|
||||
|
||||
log_print_unless_silent("Logical volume %s successfully converted.",
|
||||
@@ -1365,10 +1368,13 @@ static int _lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *l
|
||||
return 1;
|
||||
}
|
||||
goto try_new_takeover_or_reshape;
|
||||
} else if (!*lp->type_str || seg->segtype == lp->segtype) {
|
||||
}
|
||||
#if 0
|
||||
} else if ((!*lp->type_str || seg->segtype == lp->segtype) && !lp->stripe_size_supplied) {
|
||||
log_error("Conversion operation not yet supported.");
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
if ((seg_is_linear(seg) || seg_is_striped(seg) || seg_is_mirrored(seg) || lv_is_raid(lv)) &&
|
||||
(lp->type_str && lp->type_str[0])) {
|
||||
@@ -1390,10 +1396,16 @@ static int _lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *l
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* FIXME This needs changing globally. */
|
||||
if (!arg_is_set(cmd, stripes_long_ARG))
|
||||
lp->stripes = 0;
|
||||
if (!arg_is_set(cmd, type_ARG))
|
||||
lp->segtype = NULL;
|
||||
if (!arg_is_set(cmd, regionsize_ARG))
|
||||
lp->region_size = 0;
|
||||
|
||||
if (!lv_raid_convert(lv, lp->segtype, lp->yes, lp->force, lp->stripes, lp->stripe_size_supplied, lp->stripe_size,
|
||||
if (!lv_raid_convert(lv, lp->segtype,
|
||||
lp->yes, lp->force, lp->stripes, lp->stripe_size_supplied, lp->stripe_size,
|
||||
lp->region_size, lp->pvh))
|
||||
return_0;
|
||||
|
||||
@@ -1410,12 +1422,16 @@ try_new_takeover_or_reshape:
|
||||
/* FIXME This needs changing globally. */
|
||||
if (!arg_is_set(cmd, stripes_long_ARG))
|
||||
lp->stripes = 0;
|
||||
if (!arg_is_set(cmd, type_ARG))
|
||||
lp->segtype = NULL;
|
||||
|
||||
/* Only let raid4 through for now. */
|
||||
if (lp->type_str && lp->type_str[0] && lp->segtype != seg->segtype &&
|
||||
((seg_is_raid4(seg) && seg_is_striped(lp) && lp->stripes > 1) ||
|
||||
(seg_is_striped(seg) && seg->area_count > 1 && seg_is_raid4(lp)))) {
|
||||
if (!lv_raid_convert(lv, lp->segtype, lp->yes, lp->force, lp->stripes, lp->stripe_size_supplied, lp->stripe_size,
|
||||
if (!lp->segtype ||
|
||||
(lp->type_str && lp->type_str[0] && lp->segtype != seg->segtype &&
|
||||
((seg_is_raid4(seg) && seg_is_striped(lp) && lp->stripes > 1) ||
|
||||
(seg_is_striped(seg) && seg->area_count > 1 && seg_is_raid4(lp))))) {
|
||||
if (!lv_raid_convert(lv, lp->segtype,
|
||||
lp->yes, lp->force, lp->stripes, lp->stripe_size_supplied, lp->stripe_size,
|
||||
lp->region_size, lp->pvh))
|
||||
return_0;
|
||||
|
||||
@@ -1700,6 +1716,8 @@ static int _lvconvert_raid_types(struct cmd_context *cmd, struct logical_volume
|
||||
/* FIXME This is incomplete */
|
||||
if (_mirror_or_raid_type_requested(cmd, lp->type_str) || _raid0_type_requested(lp->type_str) ||
|
||||
_striped_type_requested(lp->type_str) || lp->mirrorlog || lp->corelog) {
|
||||
if (!arg_is_set(cmd, type_ARG))
|
||||
lp->segtype = first_seg(lv)->segtype;
|
||||
/* FIXME Handle +/- adjustments too? */
|
||||
if (!get_stripe_params(cmd, lp->segtype, &lp->stripes, &lp->stripe_size, &lp->stripes_supplied, &lp->stripe_size_supplied))
|
||||
goto_out;
|
||||
@@ -2990,9 +3008,9 @@ static int _lvconvert_to_pool(struct cmd_context *cmd,
|
||||
}
|
||||
|
||||
/* Allocate a new pool segment */
|
||||
if (!(seg = alloc_lv_segment(pool_segtype, pool_lv, 0, data_lv->le_count,
|
||||
if (!(seg = alloc_lv_segment(pool_segtype, pool_lv, 0, data_lv->le_count, 0,
|
||||
pool_lv->status, 0, NULL, 1,
|
||||
data_lv->le_count, 0, 0, 0, NULL)))
|
||||
data_lv->le_count, 0, 0, 0, 0, NULL)))
|
||||
return_0;
|
||||
|
||||
/* Add the new segment to the layer LV */
|
||||
@@ -3650,8 +3668,9 @@ static int _lvconvert_combine_split_snapshot_single(struct cmd_context *cmd,
|
||||
int lvconvert_combine_split_snapshot_cmd(struct cmd_context *cmd, int argc, char **argv)
|
||||
{
|
||||
const char *vgname = NULL;
|
||||
const char *lvname1;
|
||||
const char *lvname2;
|
||||
const char *lvname1_orig;
|
||||
const char *lvname2_orig;
|
||||
const char *lvname1_split;
|
||||
char *vglv;
|
||||
int vglv_sz;
|
||||
|
||||
@@ -3669,20 +3688,25 @@ int lvconvert_combine_split_snapshot_cmd(struct cmd_context *cmd, int argc, char
|
||||
* This is the only instance in all commands.
|
||||
*/
|
||||
|
||||
lvname1 = cmd->position_argv[0];
|
||||
lvname2 = cmd->position_argv[1];
|
||||
lvname1_orig = cmd->position_argv[0];
|
||||
lvname2_orig = cmd->position_argv[1];
|
||||
|
||||
if (strstr("/", lvname1) && !strstr("/", lvname2) && !getenv("LVM_VG_NAME")) {
|
||||
if (!validate_lvname_param(cmd, &vgname, &lvname1))
|
||||
if (strchr(lvname1_orig, '/') && !strchr(lvname2_orig, '/') && !getenv("LVM_VG_NAME")) {
|
||||
if (!(lvname1_split = dm_pool_strdup(cmd->mem, lvname1_orig)))
|
||||
return_ECMD_FAILED;
|
||||
|
||||
vglv_sz = strlen(vgname) + strlen(lvname2) + 2;
|
||||
if (!validate_lvname_param(cmd, &vgname, &lvname1_split))
|
||||
return_ECMD_FAILED;
|
||||
|
||||
vglv_sz = strlen(vgname) + strlen(lvname2_orig) + 2;
|
||||
if (!(vglv = dm_pool_alloc(cmd->mem, vglv_sz)) ||
|
||||
dm_snprintf(vglv, vglv_sz, "%s/%s", vgname, lvname2) < 0) {
|
||||
dm_snprintf(vglv, vglv_sz, "%s/%s", vgname, lvname2_orig) < 0) {
|
||||
log_error("vg/lv string alloc failed.");
|
||||
return_ECMD_FAILED;
|
||||
}
|
||||
|
||||
/* vglv is now vgname/lvname2 and replaces lvname2_orig */
|
||||
|
||||
cmd->position_argv[1] = vglv;
|
||||
}
|
||||
|
||||
|
||||
@@ -629,19 +629,41 @@ static int _size_arg(struct cmd_context *cmd __attribute__((unused)),
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* negative not accepted */
|
||||
int size_kb_arg(struct cmd_context *cmd, struct arg_values *av)
|
||||
{
|
||||
if (!_size_arg(cmd, av, 2, 0))
|
||||
return 0;
|
||||
|
||||
if (av->sign == SIGN_MINUS) {
|
||||
log_error("Size may not be negative.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int ssize_kb_arg(struct cmd_context *cmd, struct arg_values *av)
|
||||
{
|
||||
return _size_arg(cmd, av, 2, 0);
|
||||
}
|
||||
|
||||
int size_mb_arg(struct cmd_context *cmd, struct arg_values *av)
|
||||
{
|
||||
return _size_arg(cmd, av, 2048, 0);
|
||||
if (!_size_arg(cmd, av, 2048, 0))
|
||||
return 0;
|
||||
|
||||
if (av->sign == SIGN_MINUS) {
|
||||
log_error("Size may not be negative.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int size_mb_arg_with_percent(struct cmd_context *cmd, struct arg_values *av)
|
||||
int ssize_mb_arg(struct cmd_context *cmd, struct arg_values *av)
|
||||
{
|
||||
return _size_arg(cmd, av, 2048, 1);
|
||||
return _size_arg(cmd, av, 2048, 0);
|
||||
}
|
||||
|
||||
int int_arg(struct cmd_context *cmd __attribute__((unused)), struct arg_values *av)
|
||||
@@ -672,8 +694,8 @@ int int_arg_with_sign(struct cmd_context *cmd __attribute__((unused)), struct ar
|
||||
return 1;
|
||||
}
|
||||
|
||||
int int_arg_with_sign_and_percent(struct cmd_context *cmd __attribute__((unused)),
|
||||
struct arg_values *av)
|
||||
int extents_arg(struct cmd_context *cmd __attribute__((unused)),
|
||||
struct arg_values *av)
|
||||
{
|
||||
char *ptr;
|
||||
|
||||
@@ -1253,13 +1275,9 @@ static int _command_required_opt_matches(struct cmd_context *cmd, int ci, int ro
|
||||
* For some commands, --size and --extents are interchangable,
|
||||
* but command[] definitions use only --size.
|
||||
*/
|
||||
if ((opt_enum == size_ARG) && arg_is_set(cmd, extents_ARG)) {
|
||||
if (!strcmp(commands[ci].name, "lvcreate") ||
|
||||
!strcmp(commands[ci].name, "lvresize") ||
|
||||
!strcmp(commands[ci].name, "lvextend") ||
|
||||
!strcmp(commands[ci].name, "lvreduce"))
|
||||
goto check_val;
|
||||
}
|
||||
if ((opt_enum == size_ARG) && arg_is_set(cmd, extents_ARG) &&
|
||||
command_has_alternate_extents(commands[ci].name))
|
||||
goto check_val;
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -1560,11 +1578,10 @@ static struct command *_find_command(struct cmd_context *cmd, const char *path,
|
||||
|
||||
if (!best_required) {
|
||||
/* cmd did not have all the required opt/pos args of any command */
|
||||
log_error("Failed to find a matching command definition.");
|
||||
log_error("Run '%s --help' for more information.", name);
|
||||
log_error("Incorrect syntax. Run '%s --help' for more information.", name);
|
||||
if (close_ro) {
|
||||
log_warn("Closest command usage is:");
|
||||
print_usage(&_cmdline.commands[close_i], 0);
|
||||
log_warn("Nearest similar command has syntax:");
|
||||
print_usage(&_cmdline.commands[close_i], 0, 0);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
@@ -1681,6 +1698,7 @@ static int _usage(const char *name, int longhelp)
|
||||
{
|
||||
struct command_name *cname = find_command_name(name);
|
||||
struct command *cmd;
|
||||
int show_full = longhelp;
|
||||
int i;
|
||||
|
||||
if (!cname) {
|
||||
@@ -1699,7 +1717,7 @@ static int _usage(const char *name, int longhelp)
|
||||
/* Reduce the default output when there are several variants. */
|
||||
|
||||
if (cname->variants < 3)
|
||||
longhelp = 1;
|
||||
show_full = 1;
|
||||
|
||||
for (i = 0; i < COMMAND_COUNT; i++) {
|
||||
if (strcmp(_cmdline.commands[i].name, name))
|
||||
@@ -1708,19 +1726,23 @@ static int _usage(const char *name, int longhelp)
|
||||
if (_cmdline.commands[i].cmd_flags & CMD_FLAG_PREVIOUS_SYNTAX)
|
||||
continue;
|
||||
|
||||
if ((_cmdline.commands[i].cmd_flags & CMD_FLAG_SECONDARY_SYNTAX) && !longhelp)
|
||||
if ((_cmdline.commands[i].cmd_flags & CMD_FLAG_SECONDARY_SYNTAX) && !show_full)
|
||||
continue;
|
||||
|
||||
print_usage(&_cmdline.commands[i], longhelp);
|
||||
print_usage(&_cmdline.commands[i], show_full, 1);
|
||||
cmd = &_cmdline.commands[i];
|
||||
}
|
||||
|
||||
/* Common options are printed once for all variants of a command name. */
|
||||
if (longhelp) {
|
||||
if (show_full) {
|
||||
print_usage_common_cmd(cname, cmd);
|
||||
print_usage_common_lvm(cname, cmd);
|
||||
} else
|
||||
log_print("Use --longhelp to show all options.");
|
||||
}
|
||||
|
||||
if (longhelp)
|
||||
print_usage_notes(cname, cmd);
|
||||
else
|
||||
log_print("Use --longhelp to show all options and advanced commands.");
|
||||
|
||||
return 1;
|
||||
}
|
||||
@@ -1732,7 +1754,7 @@ static void _usage_all(void)
|
||||
for (i = 0; i < MAX_COMMAND_NAMES; i++) {
|
||||
if (!command_names[i].name)
|
||||
break;
|
||||
_usage(command_names[i].name, 0);
|
||||
_usage(command_names[i].name, 1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2019,7 +2041,12 @@ int version(struct cmd_context *cmd __attribute__((unused)),
|
||||
return ECMD_PROCESSED;
|
||||
}
|
||||
|
||||
static void _get_output_settings(struct cmd_context *cmd)
|
||||
static void _reset_current_settings_to_default(struct cmd_context *cmd)
|
||||
{
|
||||
cmd->current_settings = cmd->default_settings;
|
||||
}
|
||||
|
||||
static void _get_current_output_settings_from_args(struct cmd_context *cmd)
|
||||
{
|
||||
if (arg_is_set(cmd, debug_ARG))
|
||||
cmd->current_settings.debug = _LOG_FATAL + (arg_count(cmd, debug_ARG) - 1);
|
||||
@@ -2034,7 +2061,7 @@ static void _get_output_settings(struct cmd_context *cmd)
|
||||
}
|
||||
}
|
||||
|
||||
static void _apply_output_settings(struct cmd_context *cmd)
|
||||
static void _apply_current_output_settings(struct cmd_context *cmd)
|
||||
{
|
||||
init_debug(cmd->current_settings.debug);
|
||||
init_debug_classes_logged(cmd->default_settings.debug_classes);
|
||||
@@ -2042,10 +2069,12 @@ static void _apply_output_settings(struct cmd_context *cmd)
|
||||
init_silent(cmd->current_settings.silent);
|
||||
}
|
||||
|
||||
static int _get_settings(struct cmd_context *cmd)
|
||||
static int _get_current_settings(struct cmd_context *cmd)
|
||||
{
|
||||
const char *activation_mode;
|
||||
|
||||
_get_current_output_settings_from_args(cmd);
|
||||
|
||||
if (arg_is_set(cmd, test_ARG))
|
||||
cmd->current_settings.test = arg_is_set(cmd, test_ARG);
|
||||
|
||||
@@ -2218,8 +2247,10 @@ int help(struct cmd_context *cmd __attribute__((unused)), int argc, char **argv)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void _apply_settings(struct cmd_context *cmd)
|
||||
static void _apply_current_settings(struct cmd_context *cmd)
|
||||
{
|
||||
_apply_current_output_settings(cmd);
|
||||
|
||||
init_test(cmd->current_settings.test);
|
||||
init_full_scan_done(0);
|
||||
init_mirror_in_sync(0);
|
||||
@@ -2543,13 +2574,12 @@ int lvm_run_command(struct cmd_context *cmd, int argc, char **argv)
|
||||
}
|
||||
|
||||
/*
|
||||
* log_debug() can be enabled now that we know the settings
|
||||
* from the command. Previous calls to log_debug() will
|
||||
* do nothing.
|
||||
* Now we have the command line args, set up any known output logging
|
||||
* options immediately.
|
||||
*/
|
||||
cmd->current_settings = cmd->default_settings;
|
||||
_get_output_settings(cmd);
|
||||
_apply_output_settings(cmd);
|
||||
_reset_current_settings_to_default(cmd);
|
||||
_get_current_output_settings_from_args(cmd);
|
||||
_apply_current_output_settings(cmd);
|
||||
|
||||
log_debug("Parsing: %s", cmd->cmd_line);
|
||||
|
||||
@@ -2610,9 +2640,17 @@ int lvm_run_command(struct cmd_context *cmd, int argc, char **argv)
|
||||
if (arg_is_set(cmd, readonly_ARG))
|
||||
cmd->metadata_read_only = 1;
|
||||
|
||||
if ((ret = _get_settings(cmd)))
|
||||
/*
|
||||
* Now that all configs, profiles and command lines args are available,
|
||||
* freshly calculate and apply all settings. Specific command line
|
||||
* options take precedence over config files (which include --config as
|
||||
* that is treated like a config file).
|
||||
*/
|
||||
_reset_current_settings_to_default(cmd);
|
||||
if ((ret = _get_current_settings(cmd)))
|
||||
goto_out;
|
||||
_apply_settings(cmd);
|
||||
_apply_current_settings(cmd);
|
||||
|
||||
if (cmd->degraded_activation)
|
||||
log_debug("DEGRADED MODE. Incomplete RAID LVs will be processed.");
|
||||
|
||||
@@ -2763,8 +2801,13 @@ int lvm_run_command(struct cmd_context *cmd, int argc, char **argv)
|
||||
|
||||
log_debug("Completed: %s", cmd->cmd_line);
|
||||
|
||||
cmd->current_settings = cmd->default_settings;
|
||||
_apply_settings(cmd);
|
||||
/*
|
||||
* Reset all settings back to the persistent defaults that
|
||||
* ignore everything supplied on the command line of the
|
||||
* completed command.
|
||||
*/
|
||||
_reset_current_settings_to_default(cmd);
|
||||
_apply_current_settings(cmd);
|
||||
|
||||
/*
|
||||
* free off any memory the command used.
|
||||
|
||||
139
tools/reporter.c
139
tools/reporter.c
@@ -17,6 +17,8 @@
|
||||
|
||||
#include "report.h"
|
||||
|
||||
#include <sys/vfs.h>
|
||||
|
||||
typedef enum {
|
||||
REPORT_IDX_NULL = -1,
|
||||
REPORT_IDX_SINGLE,
|
||||
@@ -79,7 +81,7 @@ static int _vgs_single(struct cmd_context *cmd __attribute__((unused)),
|
||||
struct selection_handle *sh = handle->selection_handle;
|
||||
|
||||
if (!report_object(sh ? : handle->custom_handle, sh != NULL,
|
||||
vg, NULL, NULL, NULL, NULL, NULL, NULL))
|
||||
vg, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL))
|
||||
return_ECMD_FAILED;
|
||||
|
||||
check_current_backup(vg);
|
||||
@@ -177,7 +179,7 @@ static int _do_lvs_with_info_and_status_single(struct cmd_context *cmd,
|
||||
}
|
||||
|
||||
if (!report_object(sh ? : handle->custom_handle, sh != NULL,
|
||||
lv->vg, lv, NULL, NULL, NULL, &status, NULL))
|
||||
lv->vg, lv, NULL, NULL, NULL, &status, NULL, NULL, NULL))
|
||||
goto out;
|
||||
|
||||
r = ECMD_PROCESSED;
|
||||
@@ -239,7 +241,7 @@ static int _do_segs_with_info_and_status_single(struct cmd_context *cmd,
|
||||
}
|
||||
|
||||
if (!report_object(sh ? : handle->custom_handle, sh != NULL,
|
||||
seg->lv->vg, seg->lv, NULL, seg, NULL, &status, NULL))
|
||||
seg->lv->vg, seg->lv, NULL, seg, NULL, &status, NULL, NULL, NULL))
|
||||
goto_out;
|
||||
|
||||
r = ECMD_PROCESSED;
|
||||
@@ -367,7 +369,7 @@ static int _do_pvsegs_sub_single(struct cmd_context *cmd,
|
||||
if (!report_object(sh ? : handle->custom_handle, sh != NULL,
|
||||
vg, seg ? seg->lv : &_free_logical_volume,
|
||||
pvseg->pv, seg ? : &_free_lv_segment, pvseg,
|
||||
&status, pv_label(pvseg->pv))) {
|
||||
&status, pv_label(pvseg->pv), NULL, NULL)) {
|
||||
ret = ECMD_FAILED;
|
||||
goto_out;
|
||||
}
|
||||
@@ -443,17 +445,101 @@ static int _pvsegs_with_lv_info_and_status_single(struct cmd_context *cmd,
|
||||
return process_each_segment_in_pv(cmd, vg, pv, handle, _pvsegs_with_lv_info_and_status_sub_single);
|
||||
}
|
||||
|
||||
struct mountinfo_s { // FIXME
|
||||
unsigned maj; //FIXME
|
||||
unsigned min; //FIXME
|
||||
const char *mountpoint;
|
||||
};
|
||||
|
||||
static int _get_mountpoint(char *buffer, unsigned major, unsigned minor,
|
||||
char *target, void *cb_data)
|
||||
{
|
||||
struct mountinfo_s *data = cb_data;
|
||||
|
||||
if ((major == data->maj) && (minor == data->min))
|
||||
data->mountpoint = dm_strdup(target); // FIXME error/pool
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int _populate_mount_info(struct physical_volume *pv, struct lvm_mountinfo *mountinfo)
|
||||
{
|
||||
struct mountinfo_s data = {
|
||||
.maj = MAJOR(pv->dev->dev),
|
||||
.min = MINOR(pv->dev->dev),
|
||||
};
|
||||
|
||||
if (!dm_mountinfo_read(_get_mountpoint, &data))
|
||||
return 0;
|
||||
|
||||
if (data.mountpoint)
|
||||
mountinfo->mountpoint = data.mountpoint;
|
||||
else
|
||||
mountinfo->mountpoint = "";
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int _populate_fs_info(const char *mountpoint, struct lvm_fsinfo *fsinfo)
|
||||
{
|
||||
struct statfs buf;
|
||||
|
||||
if (statfs(mountpoint, &buf)) {
|
||||
log_sys_error("statfs", mountpoint);
|
||||
return 0;
|
||||
}
|
||||
|
||||
fsinfo->fs_size = (buf.f_blocks * buf.f_bsize) >> SECTOR_SHIFT;
|
||||
fsinfo->fs_free = (buf.f_bfree * buf.f_bsize) >> SECTOR_SHIFT;
|
||||
fsinfo->fs_avail = (buf.f_bavail * buf.f_bsize) >> SECTOR_SHIFT;
|
||||
fsinfo->fs_used = ((buf.f_blocks - buf.f_bfree) * buf.f_bsize) >> SECTOR_SHIFT;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int _do_pvs_with_mount_and_fs_info_single(struct cmd_context *cmd, struct volume_group *vg,
|
||||
struct physical_volume *pv,
|
||||
struct processing_handle *handle,
|
||||
int do_mount_info, int do_fs_info)
|
||||
{
|
||||
struct selection_handle *sh = handle->selection_handle;
|
||||
struct lvm_mountinfo mountinfo;
|
||||
struct lvm_fsinfo fsinfo;
|
||||
|
||||
if (do_mount_info)
|
||||
if (!_populate_mount_info(pv, &mountinfo))
|
||||
return_0;
|
||||
|
||||
if (do_fs_info && *mountinfo.mountpoint)
|
||||
if (!_populate_fs_info(mountinfo.mountpoint, &fsinfo))
|
||||
return_0;
|
||||
|
||||
if (!report_object(sh ? : handle->custom_handle, sh != NULL,
|
||||
vg, NULL, pv, NULL, NULL, NULL, NULL, do_mount_info ? &mountinfo : NULL, do_fs_info && *mountinfo.mountpoint ? &fsinfo : NULL))
|
||||
return_ECMD_FAILED;
|
||||
|
||||
return ECMD_PROCESSED;
|
||||
}
|
||||
|
||||
static int _pvs_single(struct cmd_context *cmd, struct volume_group *vg,
|
||||
struct physical_volume *pv,
|
||||
struct processing_handle *handle)
|
||||
{
|
||||
struct selection_handle *sh = handle->selection_handle;
|
||||
return _do_pvs_with_mount_and_fs_info_single(cmd, vg, pv, handle, 0, 0);
|
||||
}
|
||||
|
||||
if (!report_object(sh ? : handle->custom_handle, sh != NULL,
|
||||
vg, NULL, pv, NULL, NULL, NULL, NULL))
|
||||
return_ECMD_FAILED;
|
||||
static int _pvs_with_mount_info_single(struct cmd_context *cmd, struct volume_group *vg,
|
||||
struct physical_volume *pv,
|
||||
struct processing_handle *handle)
|
||||
{
|
||||
return _do_pvs_with_mount_and_fs_info_single(cmd, vg, pv, handle, 1, 0);
|
||||
}
|
||||
|
||||
return ECMD_PROCESSED;
|
||||
static int _pvs_with_fs_info_single(struct cmd_context *cmd, struct volume_group *vg,
|
||||
struct physical_volume *pv,
|
||||
struct processing_handle *handle)
|
||||
{
|
||||
return _do_pvs_with_mount_and_fs_info_single(cmd, vg, pv, handle, 1, 1);
|
||||
}
|
||||
|
||||
static int _label_single(struct cmd_context *cmd, struct label *label,
|
||||
@@ -462,7 +548,7 @@ static int _label_single(struct cmd_context *cmd, struct label *label,
|
||||
struct selection_handle *sh = handle->selection_handle;
|
||||
|
||||
if (!report_object(sh ? : handle->custom_handle, sh != NULL,
|
||||
NULL, NULL, NULL, NULL, NULL, NULL, label))
|
||||
NULL, NULL, NULL, NULL, NULL, NULL, label, NULL, NULL))
|
||||
return_ECMD_FAILED;
|
||||
|
||||
return ECMD_PROCESSED;
|
||||
@@ -487,6 +573,8 @@ static int _get_final_report_type(struct report_args *args,
|
||||
report_type_t report_type,
|
||||
int *lv_info_needed,
|
||||
int *lv_segment_status_needed,
|
||||
int *mountinfo_needed,
|
||||
int *fsinfo_needed,
|
||||
report_type_t *final_report_type)
|
||||
{
|
||||
/* Do we need to acquire LV device info in addition? */
|
||||
@@ -498,8 +586,16 @@ static int _get_final_report_type(struct report_args *args,
|
||||
/* Ensure options selected are compatible */
|
||||
if (report_type & SEGS)
|
||||
report_type |= LVS;
|
||||
|
||||
if (report_type & PVSEGS)
|
||||
report_type |= PVS;
|
||||
|
||||
if (report_type & FSINFO)
|
||||
report_type |= MOUNTINFO;
|
||||
|
||||
if (report_type & MOUNTINFO)
|
||||
report_type |= PVS; // FIXME Temporarily drive fs and mount from pvs
|
||||
|
||||
if ((report_type & (LVS | LVSINFO | LVSSTATUS | LVSINFOSTATUS)) &&
|
||||
(report_type & (PVS | LABEL)) && !(single_args->args_are_pvs || (args->full_report_vg && single_args->report_type == PVSEGS))) {
|
||||
log_error("Can't report LV and PV fields at the same time in %sreport type \"%s\"%s%s.",
|
||||
@@ -509,6 +605,12 @@ static int _get_final_report_type(struct report_args *args,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Do we need to acquire mount point information? */
|
||||
*mountinfo_needed = (report_type & MOUNTINFO) ? 1 : 0;
|
||||
|
||||
/* Do we need to acquire mounted filesystem information? */
|
||||
*fsinfo_needed = (report_type & FSINFO) ? 1 : 0;
|
||||
|
||||
/* Change report type if fields specified makes this necessary */
|
||||
if (report_type & FULL)
|
||||
report_type = FULL;
|
||||
@@ -603,7 +705,7 @@ static int _report_all_in_lv(struct cmd_context *cmd, struct processing_handle *
|
||||
|
||||
static int _report_all_in_pv(struct cmd_context *cmd, struct processing_handle *handle,
|
||||
struct physical_volume *pv, report_type_t type,
|
||||
int do_lv_info, int do_lv_seg_status)
|
||||
int do_lv_info, int do_lv_seg_status, int do_mount_info, int do_fs_info)
|
||||
{
|
||||
int r = 0;
|
||||
|
||||
@@ -635,7 +737,7 @@ int report_for_selection(struct cmd_context *cmd,
|
||||
struct selection_handle *sh = parent_handle->selection_handle;
|
||||
struct report_args args = {0};
|
||||
struct single_report_args *single_args = &args.single_args[REPORT_IDX_SINGLE];
|
||||
int do_lv_info, do_lv_seg_status;
|
||||
int do_lv_info, do_lv_seg_status, do_mount_info, do_fs_info;
|
||||
struct processing_handle *handle;
|
||||
int r = 0;
|
||||
|
||||
@@ -645,6 +747,7 @@ int report_for_selection(struct cmd_context *cmd,
|
||||
if (!_get_final_report_type(&args, single_args,
|
||||
single_args->report_type,
|
||||
&do_lv_info, &do_lv_seg_status,
|
||||
&do_mount_info, &do_fs_info,
|
||||
&sh->report_type))
|
||||
return_0;
|
||||
|
||||
@@ -688,7 +791,7 @@ int report_for_selection(struct cmd_context *cmd,
|
||||
r = _report_all_in_vg(cmd, handle, vg, sh->report_type, do_lv_info, do_lv_seg_status);
|
||||
break;
|
||||
case PVS:
|
||||
r = _report_all_in_pv(cmd, handle, pv, sh->report_type, do_lv_info, do_lv_seg_status);
|
||||
r = _report_all_in_pv(cmd, handle, pv, sh->report_type, do_lv_info, do_lv_seg_status, do_mount_info, do_fs_info);
|
||||
break;
|
||||
default:
|
||||
log_error(INTERNAL_ERROR "report_for_selection: incorrect report type");
|
||||
@@ -1079,6 +1182,7 @@ static int _do_report(struct cmd_context *cmd, struct processing_handle *handle,
|
||||
int lock_global = 0;
|
||||
int lv_info_needed;
|
||||
int lv_segment_status_needed;
|
||||
int do_mount_info, do_fs_info;
|
||||
int report_in_group = 0;
|
||||
int r = ECMD_FAILED;
|
||||
|
||||
@@ -1091,7 +1195,9 @@ static int _do_report(struct cmd_context *cmd, struct processing_handle *handle,
|
||||
handle->custom_handle = report_handle;
|
||||
|
||||
if (!_get_final_report_type(args, single_args, report_type, &lv_info_needed,
|
||||
&lv_segment_status_needed, &report_type))
|
||||
&lv_segment_status_needed,
|
||||
&do_mount_info, &do_fs_info,
|
||||
&report_type))
|
||||
goto_out;
|
||||
|
||||
if (!(args->log_only && (single_args->report_type != CMDLOG))) {
|
||||
@@ -1151,7 +1257,10 @@ static int _do_report(struct cmd_context *cmd, struct processing_handle *handle,
|
||||
if (single_args->args_are_pvs)
|
||||
r = process_each_pv(cmd, args->argc, args->argv, NULL,
|
||||
arg_is_set(cmd, all_ARG), 0,
|
||||
handle, &_pvs_single);
|
||||
handle,
|
||||
do_fs_info ? &_pvs_with_fs_info_single :
|
||||
do_mount_info ? &_pvs_with_mount_info_single :
|
||||
&_pvs_single);
|
||||
else
|
||||
r = process_each_vg(cmd, args->argc, args->argv, NULL, NULL,
|
||||
0, 0, handle, &_pvs_in_vg);
|
||||
|
||||
@@ -183,12 +183,13 @@ int cachemode_arg(struct cmd_context *cmd, struct arg_values *av);
|
||||
int discards_arg(struct cmd_context *cmd, struct arg_values *av);
|
||||
int mirrorlog_arg(struct cmd_context *cmd, struct arg_values *av);
|
||||
int size_kb_arg(struct cmd_context *cmd, struct arg_values *av);
|
||||
int ssize_kb_arg(struct cmd_context *cmd, struct arg_values *av);
|
||||
int size_mb_arg(struct cmd_context *cmd, struct arg_values *av);
|
||||
int size_mb_arg_with_percent(struct cmd_context *cmd, struct arg_values *av);
|
||||
int ssize_mb_arg(struct cmd_context *cmd, struct arg_values *av);
|
||||
int int_arg(struct cmd_context *cmd, struct arg_values *av);
|
||||
int uint32_arg(struct cmd_context *cmd, struct arg_values *av);
|
||||
int int_arg_with_sign(struct cmd_context *cmd, struct arg_values *av);
|
||||
int int_arg_with_sign_and_percent(struct cmd_context *cmd, struct arg_values *av);
|
||||
int extents_arg(struct cmd_context *cmd, struct arg_values *av);
|
||||
int major_arg(struct cmd_context *cmd, struct arg_values *av);
|
||||
int minor_arg(struct cmd_context *cmd, struct arg_values *av);
|
||||
int string_arg(struct cmd_context *cmd, struct arg_values *av);
|
||||
|
||||
32
tools/vals.h
32
tools/vals.h
@@ -79,14 +79,14 @@
|
||||
* options included in the usage text below that should
|
||||
* be removed? Should "lvm1" be removed?
|
||||
*
|
||||
* For Number args that take optional units, a full usage
|
||||
* could be "Number[bBsSkKmMgGtTpPeE]" (with implied |),
|
||||
* but repeating this full specification produces cluttered
|
||||
* output, and doesn't indicate which unit is the default.
|
||||
* "Number[Units]" would be cleaner, as would a subset of
|
||||
* common units, e.g. "Number[kmg...]", but neither helps
|
||||
* with default. "Number[k|Unit]" and "Number[m|Unit]" show
|
||||
* the default, and "Unit" indicates that other units
|
||||
* Size is a Number that takes an optional unit.
|
||||
* A full usage could be "Size[b|B|s|S|k|K|m|M|g|G|t|T|p|P|e|E]"
|
||||
* but repeating this full specification produces long and
|
||||
* cluttered output, and doesn't indicate which unit is the default.
|
||||
* "Size[Units]" would be cleaner, as would a subset of
|
||||
* common units, e.g. "Size[kmg...]", but neither helps
|
||||
* with default. "Size[k|UNIT]" and "Size[m|UNIT]" show
|
||||
* the default, and "UNIT" indicates that other units
|
||||
* are possible without listing them all. This also
|
||||
* suggests using the preferred lower case letters, because
|
||||
* --size and other option args treat upper/lower letters
|
||||
@@ -112,21 +112,23 @@ val(tag_VAL, tag_arg, "Tag", NULL)
|
||||
val(select_VAL, NULL, "Select", NULL) /* used only for command defs */
|
||||
val(activationmode_VAL, string_arg, "ActivationMode", "partial|degraded|complete")
|
||||
val(activation_VAL, activation_arg, "Active", "y|n|ay")
|
||||
val(cachemode_VAL, cachemode_arg, "CacheMode", "writethrough|writeback")
|
||||
val(cachemode_VAL, cachemode_arg, "CacheMode", "writethrough|writeback|passthrough")
|
||||
val(discards_VAL, discards_arg, "Discards", "passdown|nopassdown|ignore")
|
||||
val(mirrorlog_VAL, mirrorlog_arg, "MirrorLog", "core|disk")
|
||||
val(sizekb_VAL, size_kb_arg, "SizeKB", "Number[k|Unit]")
|
||||
val(sizemb_VAL, size_mb_arg, "SizeMB", "Number[m|Unit]")
|
||||
val(regionsize_VAL, regionsize_arg, "RegionSize", "Number[m|Unit]")
|
||||
val(numsigned_VAL, int_arg_with_sign, "SNumber", "[+|-]Number")
|
||||
val(numsignedper_VAL, int_arg_with_sign_and_percent, "SNumberP", "[+|-]Number[%VG|%PVS|%FREE]")
|
||||
val(sizekb_VAL, size_kb_arg, "SizeKB", "Size[k|UNIT]")
|
||||
val(sizemb_VAL, size_mb_arg, "SizeMB", "Size[m|UNIT]")
|
||||
val(ssizekb_VAL, ssize_kb_arg, "SSizeKB", "[+|-]Size[k|UNIT]")
|
||||
val(ssizemb_VAL, ssize_mb_arg, "SSizeMB", "[+|-]Size[m|UNIT]")
|
||||
val(regionsize_VAL, regionsize_arg, "RegionSize", "Size[m|UNIT]")
|
||||
val(snumber_VAL, int_arg_with_sign, "SNumber", "[+|-]Number")
|
||||
val(extents_VAL, extents_arg, "Extents", "[+|-]Number[PERCENT]")
|
||||
val(permission_VAL, permission_arg, "Permission", "rw|r")
|
||||
val(metadatatype_VAL, metadatatype_arg, "MetadataType", "lvm2|lvm1")
|
||||
val(units_VAL, string_arg, "Units", "r|R|h|H|b|B|s|S|k|K|m|M|g|G|t|T|p|P|e|E")
|
||||
val(segtype_VAL, segtype_arg, "SegType", "linear|striped|snapshot|mirror|raid|thin|cache|thin-pool|cache-pool")
|
||||
val(alloc_VAL, alloc_arg, "Alloc", "contiguous|cling|cling_by_tags|normal|anywhere|inherit")
|
||||
val(locktype_VAL, locktype_arg, "LockType", "sanlock|dlm|none")
|
||||
val(readahead_VAL, readahead_arg, "Readahead", "auto|none|NumberSectors")
|
||||
val(readahead_VAL, readahead_arg, "Readahead", "auto|none|Number")
|
||||
val(vgmetadatacopies_VAL, vgmetadatacopies_arg, "MetadataCopiesVG", "all|unmanaged|Number")
|
||||
val(pvmetadatacopies_VAL, pvmetadatacopies_arg, "MetadataCopiesPV", "0|1|2")
|
||||
val(metadatacopies_VAL, metadatacopies_arg, "unused", "unused")
|
||||
|
||||
Reference in New Issue
Block a user