1
0
mirror of git://sourceware.org/git/lvm2.git synced 2025-11-29 00:23:49 +03:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Heinz Mauelshagen
53b62d950a signal: workaround sigint handler flaw 2017-04-07 17:07:45 +02:00
12 changed files with 253 additions and 410 deletions

View File

@@ -182,7 +182,7 @@ int cluster_send(struct clog_request *rq)
}
/*
* Once the request heads for the cluster, the luid loses
* Once the request heads for the cluster, the luid looses
* all its meaning.
*/
rq->u_rq.luid = 0;

View File

@@ -1891,7 +1891,7 @@ static int _raid_reshape_remove_images(struct logical_volume *lv,
log_print_unless_silent("If that leaves the logical volume larger than %llu extents due to stripe rounding,",
(unsigned long long) extend_le_count);
log_print_unless_silent("you may want to grow the content afterwards (filesystem etc.)");
log_warn("WARNING: to remove freed stripes after the conversion has finished, you have to run \"lvconvert --stripes %u %s\"",
log_warn("WARNING: too remove freed stripes after the conversion has finished, you have to run \"lvconvert --stripes %u %s\"",
new_stripes, display_lvname(lv));
if (!force) {
@@ -1976,7 +1976,6 @@ static int _raid_reshape_remove_images(struct logical_volume *lv,
* Reshape: keep images in RAID @lv but change stripe size or data copies
*
*/
static const char *_get_segtype_alias(const struct segment_type *segtype);
static int _raid_reshape_keep_images(struct logical_volume *lv,
const struct segment_type *new_segtype,
int yes, int force, int *force_repair,
@@ -1996,6 +1995,8 @@ static int _raid_reshape_keep_images(struct logical_volume *lv,
return 0;
}
seg->stripe_size = new_stripe_size;
/*
* Reshape layout alogorithm or chunksize:
*
@@ -2009,22 +2010,8 @@ static int _raid_reshape_keep_images(struct logical_volume *lv,
* The dm-raid target is able to use the space whereever it
* is found by appropriately selecting forward or backward reshape.
*/
if (seg->segtype != new_segtype) {
const char *alias = _get_segtype_alias(seg->segtype);
if (!strcmp(alias, new_segtype->name))
alloc_reshape_space = 0;
}
if (seg->stripe_size != new_stripe_size)
alloc_reshape_space = 1;
seg->stripe_size = new_stripe_size;
if (seg->area_count == 2)
alloc_reshape_space = 0;
if (alloc_reshape_space &&
if (seg->area_count != 2 &&
alloc_reshape_space &&
!_lv_alloc_reshape_space(lv, where, NULL, allocate_pvs))
return 0;
@@ -2389,7 +2376,7 @@ static int _reshape_requested(const struct logical_volume *lv, const struct segm
if (seg_is_any_raid10(seg) && seg->area_count > 2 &&
stripes && stripes < seg->area_count - seg->segtype->parity_devs) {
log_error("Can't remove stripes from raid10");
return 2;
goto err;
}
if (data_copies != seg->data_copies) {
@@ -2400,20 +2387,22 @@ static int _reshape_requested(const struct logical_volume *lv, const struct segm
/* Change layout (e.g. raid5_ls -> raid5_ra) keeping # of stripes */
if (seg->segtype != segtype) {
if (stripes && stripes != _data_rimages_count(seg, seg->area_count))
return 2;
goto err;
return 1;
}
if (stripes && stripes == _data_rimages_count(seg, seg->area_count) &&
stripe_size == seg->stripe_size &&
region_size == seg->region_size) {
stripe_size == seg->stripe_size) {
log_error("LV %s already has %u stripes.",
display_lvname(lv), stripes);
return 2;
}
return (stripes || stripe_size) ? 1 : 0;
err:
return 2;
}
/*
@@ -2932,7 +2921,7 @@ static int _raid_remove_images(struct logical_volume *lv, int yes,
/* Convert to linear? */
if (new_count == 1) {
if (!yes && yes_no_prompt("Are you sure you want to convert %s LV %s to type %s losing all resilience? [y/n]: ",
if (!yes && yes_no_prompt("Are you sure you want to convert %s LV %s to type %s loosing all resilience? [y/n]: ",
lvseg_name(first_seg(lv)), display_lvname(lv), SEG_TYPE_NAME_LINEAR) == 'n') {
log_error("Logical volume %s NOT converted to \"%s\".",
display_lvname(lv), SEG_TYPE_NAME_LINEAR);
@@ -3072,9 +3061,9 @@ int lv_raid_split(struct logical_volume *lv, int yes, const char *split_name,
return 0;
}
/* Split on a 2-legged raid1 LV causes losing all resilience */
/* Split on a 2-legged raid1 LV causes loosing all resilience */
if (new_count == 1 &&
!yes && yes_no_prompt("Are you sure you want to split %s LV %s losing all resilience? [y/n]: ",
!yes && yes_no_prompt("Are you sure you want to split %s LV %s loosing all resilience? [y/n]: ",
lvseg_name(first_seg(lv)), display_lvname(lv)) == 'n') {
log_error("Logical volume %s NOT split.", display_lvname(lv));
return 0;
@@ -3214,9 +3203,9 @@ int lv_raid_split_and_track(struct logical_volume *lv,
return 0;
}
/* Split and track changes on a 2-legged raid1 LV causes losing resilience for newly written data. */
/* Split and track changes on a 2-legged raid1 LV causes loosing resilience for newly written data. */
if (seg->area_count == 2 &&
!yes && yes_no_prompt("Are you sure you want to split and track %s LV %s losing resilience for any newly written data? [y/n]: ",
!yes && yes_no_prompt("Are you sure you want to split and track %s LV %s loosing resilience for any newly written data? [y/n]: ",
lvseg_name(seg), display_lvname(lv)) == 'n') {
log_error("Logical volume %s NOT split.", display_lvname(lv));
return 0;
@@ -4422,12 +4411,6 @@ static const char *_get_segtype_alias(const struct segment_type *segtype)
if (!strcmp(segtype->name, SEG_TYPE_NAME_RAID6_ZR))
return SEG_TYPE_NAME_RAID6;
if (!strcmp(segtype->name, SEG_TYPE_NAME_RAID10))
return SEG_TYPE_NAME_RAID10_NEAR;
if (!strcmp(segtype->name, SEG_TYPE_NAME_RAID10_NEAR))
return SEG_TYPE_NAME_RAID10;
return "";
}
@@ -5023,7 +5006,6 @@ static int _takeover_upconvert_wrapper(TAKEOVER_FN_ARGS)
{
uint32_t extents_copied, region_size, seg_len, stripe_size;
struct lv_segment *seg = first_seg(lv);
const struct segment_type *initial_segtype = seg->segtype;
struct dm_list removal_lvs;
dm_list_init(&removal_lvs);
@@ -5101,7 +5083,7 @@ static int _takeover_upconvert_wrapper(TAKEOVER_FN_ARGS)
/* Add metadata LVs */
if (seg_is_raid0(seg)) {
log_debug_metadata("Adding metadata LVs to %s.", display_lvname(lv));
if (!_raid0_add_or_remove_metadata_lvs(lv, 0 /* update_and_reload */, allocate_pvs, NULL))
if (!_raid0_add_or_remove_metadata_lvs(lv, 1 /* update_and_reload */, allocate_pvs, NULL))
return 0;
/* raid0_meta -> raid4 needs clearing of MetaLVs in order to avoid raid disk role change issues in the kernel */
}
@@ -5129,28 +5111,8 @@ static int _takeover_upconvert_wrapper(TAKEOVER_FN_ARGS)
log_debug_metadata("Adding %" PRIu32 " component LV pair(s) to %s.",
new_image_count - lv_raid_image_count(lv),
display_lvname(lv));
if (!_lv_raid_change_image_count(lv, 1, new_image_count, allocate_pvs, NULL, 0, 1)) {
/*
* Rollback to initial type raid0/striped after failure to upconvert
* to raid4/5/6/10 elminating any newly allocated metadata devices
* (raid4/5 -> raid6 doesn't need any explicit changes after
* the allocation of the additional sub LV pair failed)
*
* - initial type is raid0 -> just remove remove metadata devices
*
* - initial type is striped -> convert back to it (removes metadata devices)
*/
if (segtype_is_raid0(initial_segtype) &&
!_raid0_add_or_remove_metadata_lvs(lv, 0, NULL, &removal_lvs))
return_0;
if (segtype_is_striped_target(initial_segtype) &&
!_convert_raid0_to_striped(lv, 0, &removal_lvs))
return_0;
if (!_eliminate_extracted_lvs(lv->vg, &removal_lvs)) /* Updates vg */
return_0;
if (!_lv_raid_change_image_count(lv, 1, new_image_count, allocate_pvs, NULL, 0, 1))
return 0;
}
seg = first_seg(lv);
}

View File

@@ -1,6 +1,6 @@
/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
* Copyright (C) 2004-2016 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
@@ -89,7 +89,6 @@ struct dev_manager;
#define SEG_TYPE_NAME_RAID0_META "raid0_meta"
#define SEG_TYPE_NAME_RAID1 "raid1"
#define SEG_TYPE_NAME_RAID10 "raid10"
#define SEG_TYPE_NAME_RAID10_NEAR "raid10_near"
#define SEG_TYPE_NAME_RAID4 "raid4"
#define SEG_TYPE_NAME_RAID5 "raid5"
#define SEG_TYPE_NAME_RAID5_N "raid5_n"
@@ -138,12 +137,11 @@ struct dev_manager;
#define segtype_is_raid6_rs_6(segtype) ((segtype)->flags & SEG_RAID6_RS_6 ? 1 : 0)
#define segtype_is_raid6_la_6(segtype) ((segtype)->flags & SEG_RAID6_LA_6 ? 1 : 0)
#define segtype_is_raid6_ra_6(segtype) ((segtype)->flags & SEG_RAID6_RA_6 ? 1 : 0)
#define segtype_is_any_raid10(segtype) ((segtype)->flags & SEG_RAID10 ? 1 : 0)
#define segtype_is_raid10(segtype) ((segtype)->flags & SEG_RAID10 ? 1 : 0)
#define segtype_is_raid10_near(segtype) ((segtype)->flags & SEG_RAID10_NEAR ? 1 : 0)
/* FIXME: once raid10_{far,offset} supported */
#define segtype_is_raid10_far(segtype) 0 /* FIXME ((segtype)->flags & SEG_RAID10_FAR ? 1 : 0 */
#define segtype_is_raid10_near(segtype) segtype_is_raid10(segtype)
/* FIXME: once raid10_offset supported */
#define segtype_is_raid10_offset(segtype) 0 /* FIXME ((segtype)->flags & SEG_RAID10_OFFSET ? 1 : 0 */
#define segtype_is_any_raid10(segtype) (segtype_is_raid10(segtype) || segtype_is_raid10_near(segtype) || segtype_is_raid10_far(segtype) || segtype_is_raid10_offset(segtype))
#define segtype_is_raid_with_meta(segtype) (segtype_is_raid(segtype) && !segtype_is_raid0(segtype))
#define segtype_is_striped_raid(segtype) (segtype_is_raid(segtype) && !segtype_is_raid1(segtype))
#define segtype_is_reshapable_raid(segtype) ((segtype_is_striped_raid(segtype) && !segtype_is_any_raid0(segtype)) || segtype_is_raid10_near(segtype) || segtype_is_raid10_offset(segtype))

View File

@@ -34,7 +34,26 @@ static void _catch_sigint(int unused __attribute__((unused)))
_sigint_caught = 1;
}
static void _register_sigint_caught(struct sigaction *old_handler)
{
struct sigaction handler;
/* Grab old sigaction for SIGINT: shall not fail. */
if (sigaction(SIGINT, NULL, &handler))
log_sys_debug("sigaction", "SIGINT");
handler.sa_flags &= ~SA_RESTART; /* Clear restart flag */
handler.sa_handler = _catch_sigint;
/* Override the signal handler: shall not fail. */
if (sigaction(SIGINT, &handler, old_handler))
log_sys_debug("sigaction", "SIGINT");
}
int sigint_caught(void) {
printf("%s[%u] CALLED\n", __func__, __LINE__);
_register_sigint_caught(NULL);
if (_sigint_caught)
log_error("Interrupted...");
@@ -58,7 +77,6 @@ void sigint_clear(void)
void sigint_allow(void)
{
struct sigaction handler;
sigset_t sigs;
if (memlock_count_daemon())
@@ -67,25 +85,16 @@ void sigint_allow(void)
* Do not overwrite the backed-up handler data -
* just increase nesting count.
*/
if (++_handler_installed > MAX_SIGINTS)
if (++_handler_installed >= MAX_SIGINTS)
return;
/* Grab old sigaction for SIGINT: shall not fail. */
if (sigaction(SIGINT, NULL, &handler))
log_sys_debug("sigaction", "SIGINT");
handler.sa_flags &= ~SA_RESTART; /* Clear restart flag */
handler.sa_handler = _catch_sigint;
/* Override the signal handler: shall not fail. */
if (sigaction(SIGINT, &handler, &_oldhandler[_handler_installed - 1]))
log_sys_debug("sigaction", "SIGINT");
_register_sigint_caught(&_oldhandler[_handler_installed - 1]);
/* Unmask SIGINT. Remember to mask it again on restore. */
if (sigprocmask(0, NULL, &sigs))
log_sys_debug("sigprocmask", "");
if ((_oldmasked[_handler_installed - 1] = sigismember(&sigs, SIGINT))) {
if ((_oldmasked[_handler_installed] = sigismember(&sigs, SIGINT))) {
sigdelset(&sigs, SIGINT);
if (sigprocmask(SIG_SETMASK, &sigs, NULL))
log_sys_debug("sigprocmask", "SIG_SETMASK");

View File

@@ -599,7 +599,6 @@ static const struct raid_type {
{ SEG_TYPE_NAME_RAID0_META, 0, SEG_RAID0_META | SEG_AREAS_STRIPED },
{ SEG_TYPE_NAME_RAID1, 0, SEG_RAID1 | SEG_AREAS_MIRRORED },
{ SEG_TYPE_NAME_RAID10, 0, SEG_RAID10 | SEG_AREAS_MIRRORED },
{ SEG_TYPE_NAME_RAID10_NEAR,0, SEG_RAID10_NEAR | SEG_AREAS_MIRRORED },
{ SEG_TYPE_NAME_RAID4, 1, SEG_RAID4 },
{ SEG_TYPE_NAME_RAID5, 1, SEG_RAID5 },
{ SEG_TYPE_NAME_RAID5_N, 1, SEG_RAID5_N },

View File

@@ -108,7 +108,6 @@ static const struct {
*/
{ SEG_RAID5_LS, "raid5"}, /* same as "raid5_ls" (default for MD also) */
{ SEG_RAID6_ZR, "raid6"}, /* same as "raid6_zr" */
{ SEG_RAID10, "raid10_near"}, /* same as "raid10" */
};
/* Some segment types have a list of areas of other devices attached */

View File

@@ -7,17 +7,6 @@ To display the current LV type, run the command:
.B lvs -o name,segtype
.I LV
In some cases, an LV is a single device mapper (dm) layer above physical
devices. In other cases, hidden LVs (dm devices) are layered between the
visible LV and physical devices. LVs in the middle layers are called sub LVs.
A command run on a visible LV sometimes operates on a sub LV rather than
the specified LV. In other cases, a sub LV must be specified directly on
the command line.
Sub LVs can be displayed with the command:
.B lvs -a
The
.B linear
type is equivalent to the
@@ -31,41 +20,45 @@ type is deprecated and the
.B raid1
type should be used. They are both implementations of mirroring.
In some cases, an LV is a single device mapper (dm) layer above physical
devices. In other cases, hidden LVs (dm devices) are layered between the
visible LV and physical devices. LVs in the middle layers are called sub LVs.
A command run on a visible LV sometimes operates on a sub LV rather than
the specified LV. In other cases, a sub LV must be specified directly on
the command line.
Striped raid types are
\fBraid0/raid0_meta\fP,
\fBraid5\fP (an alias for raid5_ls),
\fBraid6\fP (an alias for raid6_zr) and
\fBraid10\fP (an alias for raid10_near).
.B raid0/raid0_meta
,
.B raid5
(an alias for raid5_ls),
.B raid6
(an alias for raid6_zr) and
.B raid10
(an alias for raid10_near).
As opposed to mirroring, raid5 and raid6 stripe data and calculate parity
blocks. The parity blocks can be used for data block recovery in case
devices fail. A maximum number of one device in a raid5 LV may fail, and
two in case of raid6. Striped raid types typically rotate the parity and
data blocks for performance reasons, thus avoiding contention on a single
device. Specific arrangements of parity and data blocks (layouts) can be
used to optimize I/O performance, or to convert between raid levels. See
\fBlvmraid\fP(7) for more information.
blocks. The parity blocks can be used for data block recovery in case devices
fail. A maximum number of one device in a raid5 LV may fail and two in case
of raid6. Striped raid types typically rotate the parity blocks for performance
reasons thus avoiding contention on a single device. Layouts of raid5 rotating
parity blocks can be one of left-asymmetric (raid5_la), left-symmetric (raid5_ls
with alias raid5), right-asymmetric (raid5_ra), right-symmetric (raid5_rs) and raid5_n,
which doesn't rotate parity blocks. Any \"_n\" layouts allow for conversion between
raid levels (raid5_n -> raid6 or raid5_n -> striped/raid0/raid0_meta).
raid6 layouts are zero-restart (raid6_zr with alias raid6), next-restart (raid6_nr),
next-continue (raid6_nc). Additionally, special raid6 layouts for raid level conversions
between raid5 and raid6 are raid6_ls_6, raid6_rs_6, raid6_la_6 and raid6_ra_6. Those
correspond to their raid5 counterparts (e.g. raid5_rs can be directly converted to raid6_rs_6
and vice-versa).
raid10 (an alias for raid10_near) is currently limited to one data copy and even number of
sub LVs. This is a mirror group layout thus a single sub LV may fail per mirror group
without data loss.
Striped raid types support converting the layout, their stripesize
and their number of stripes.
Layouts of raid5 rotating parity blocks can be: left-asymmetric
(raid5_la), left-symmetric (raid5_ls with alias raid5), right-asymmetric
(raid5_ra), right-symmetric (raid5_rs) and raid5_n, which doesn't rotate
parity blocks. Layouts of raid6 are: zero-restart (raid6_zr with alias
raid6), next-restart (raid6_nr), and next-continue (raid6_nc).
The striped raid types combined with raid1 allow for conversion from linear -> striped/raid0/raid0_meta
and vice-versa by e.g. linear <-> raid1 <-> raid5_n (then adding stripes) <-> striped/raid0/raid0_meta.
Layouts including _n allow for conversion between raid levels (raid5_n to
raid6 or raid5_n to striped/raid0/raid0_meta). Additionally, special raid6
layouts for raid level conversions between raid5 and raid6 are:
raid6_ls_6, raid6_rs_6, raid6_la_6 and raid6_ra_6. Those correspond to
their raid5 counterparts (e.g. raid5_rs can be directly converted to
raid6_rs_6 and vice-versa).
raid10 (an alias for raid10_near) is currently limited to one data copy
and even number of sub LVs. This is a mirror group layout, thus a single
sub LV may fail per mirror group without data loss.
Striped raid types support converting the layout, their stripesize and
their number of stripes.
The striped raid types combined with raid1 allow for conversion from
linear -> striped/raid0/raid0_meta and vice-versa by e.g. linear <-> raid1
<-> raid5_n (then adding stripes) <-> striped/raid0/raid0_meta.
Sub LVs can be displayed with the command
.B lvs -a

View File

@@ -1,8 +1,8 @@
lvcreate creates a new LV in a VG. For standard LVs, this requires
allocating logical extents from the VG's free physical extents. If there
is not enough free space, the VG can be extended with other PVs
(\fBvgextend\fP(8)), or existing LVs can be reduced or removed
(\fBlvremove\fP(8), \fBlvreduce\fP(8).)
is not enough free space, then the VG can be extended (see
\fBvgextend\fP(8)) with other PVs, or existing LVs can be reduced or
removed (see \fBlvremove\fP, \fBlvreduce\fP.)
To control which PVs a new LV will use, specify one or more PVs as
position args at the end of the command line. lvcreate will allocate
@@ -19,7 +19,7 @@ unique PVs be available in the VG for allocation.
Thin pools (for thin provisioning) and cache pools (for caching) are
represented by special LVs with types thin-pool and cache-pool (see
\fBlvmthin\fP(7) and \fBlvmcache\fP(7)). The pool LVs are not usable as
standard block devices, but the LV names act as references to the pools.
standard block devices, but the LV names act references to the pools.
Thin LVs are thinly provisioned from a thin pool, and are created with a
virtual size rather than a physical size. A cache LV is the combination of
@@ -27,9 +27,10 @@ a standard LV with a cache pool, used to cache active portions of the LV
to improve performance.
.SS Usage notes
In the usage section below, \fB--size\fP \fISize\fP can be replaced
with \fB--extents\fP \fINumber\fP. See descriptions in the options section.
with \fB--extents\fP \fINumber\fP. See both descriptions
the options section.
In the usage section below, \fB--name\fP is omitted from the required
options, even though it is typically used. When the name is not
options, even though it is typically used. When the name is not
specified, a new LV name is generated with the "lvol" prefix and a unique
numeric suffix.
numeric suffix. Also see the description in the options section.

View File

@@ -45,7 +45,7 @@ The basic RAID levels that can be used are:
To display the LV type of an existing LV, run:
.B lvs -o name,segtype
\fILV\fP
\fIVG\fP/\fILV\fP
(The LV type is also referred to as "segment type" or "segtype".)
@@ -306,7 +306,7 @@ The command to scrub a RAID LV can operate in two different modes:
.B lvchange --syncaction
.BR check | repair
.I LV
.IR VG / LV
.HP
.B check
@@ -325,20 +325,20 @@ the RAID LV. To control the I/O rate used for scrubbing, use:
.HP
.B --maxrecoveryrate
\fISize\fP[k|UNIT]
.BR \fIRate [ b | B | s | S | k | K | m | M | g | G ]
.br
Sets the maximum recovery rate for a RAID LV. \fISize\fP is specified as
Sets the maximum recovery rate for a RAID LV. \fIRate\fP is specified as
an amount per second for each device in the array. If no suffix is given,
then KiB/sec/device is used. Setting the recovery rate to \fB0\fP
then KiB/sec/device is assumed. Setting the recovery rate to \fB0\fP
means it will be unbounded.
.HP
.BR --minrecoveryrate
\fISize\fP[k|UNIT]
.BR \fIRate [ b | B | s | S | k | K | m | M | g | G ]
.br
Sets the minimum recovery rate for a RAID LV. \fISize\fP is specified as
Sets the minimum recovery rate for a RAID LV. \fIRate\fP is specified as
an amount per second for each device in the array. If no suffix is given,
then KiB/sec/device is used. Setting the recovery rate to \fB0\fP
then KiB/sec/device is assumed. Setting the recovery rate to \fB0\fP
means it will be unbounded.
.P
@@ -357,9 +357,9 @@ Also, if mismatches were found, the lvs attr field will display the letter
"m" (mismatch) in the 9th position, e.g.
.nf
# lvs -o name,vgname,segtype,attr vg/lv
LV VG Type Attr
lv vg raid1 Rwi-a-r-m-
# lvs -o name,vgname,segtype,attr vg/lvol0
LV VG Type Attr
lvol0 vg raid1 Rwi-a-r-m-
.fi
@@ -427,9 +427,9 @@ Each rimage SubLV holds a portion of LV data. No parity is used.
No RAID metadata is used.
.nf
# lvcreate --type raid0 --stripes 2 --name lvr0 ...
lvcreate --type raid0 --stripes 2 --name lvr0 ...
# lvs -a -o name,segtype,devices
lvs -a -o name,segtype,devices
lvr0 raid0 lvr0_rimage_0(0),lvr0_rimage_1(0)
[lvr0_rimage_0] linear /dev/sda(...)
[lvr0_rimage_1] linear /dev/sdb(...)
@@ -441,9 +441,9 @@ Each rimage SubLV holds a complete copy of LV data. No parity is used.
Each rmeta SubLV holds RAID metadata.
.nf
# lvcreate --type raid1 --mirrors 1 --name lvr1 ...
lvcreate --type raid1 --mirrors 1 --name lvr1 ...
# lvs -a -o name,segtype,devices
lvs -a -o name,segtype,devices
lvr1 raid1 lvr1_rimage_0(0),lvr1_rimage_1(0)
[lvr1_rimage_0] linear /dev/sda(...)
[lvr1_rimage_1] linear /dev/sdb(...)
@@ -457,9 +457,9 @@ At least three rimage SubLVs each hold a portion of LV data and one rimage SubLV
holds parity. Each rmeta SubLV holds RAID metadata.
.nf
# lvcreate --type raid4 --stripes 2 --name lvr4 ...
lvcreate --type raid4 --stripes 2 --name lvr4 ...
# lvs -a -o name,segtype,devices
lvs -a -o name,segtype,devices
lvr4 raid4 lvr4_rimage_0(0),\\
lvr4_rimage_1(0),\\
lvr4_rimage_2(0)
@@ -478,9 +478,9 @@ At least three rimage SubLVs each typcially hold a portion of LV data and parity
Each rmeta SubLV holds RAID metadata.
.nf
# lvcreate --type raid5 --stripes 2 --name lvr5 ...
lvcreate --type raid5 --stripes 2 --name lvr5 ...
# lvs -a -o name,segtype,devices
lvs -a -o name,segtype,devices
lvr5 raid5 lvr5_rimage_0(0),\\
lvr5_rimage_1(0),\\
lvr5_rimage_2(0)
@@ -499,9 +499,9 @@ At least five rimage SubLVs each typically hold a portion of LV data and parity.
Each rmeta SubLV holds RAID metadata.
.nf
# lvcreate --type raid6 --stripes 3 --name lvr6
lvcreate --type raid6 --stripes 3 --name lvr6
# lvs -a -o name,segtype,devices
lvs -a -o name,segtype,devices
lvr6 raid6 lvr6_rimage_0(0),\\
lvr6_rimage_1(0),\\
lvr6_rimage_2(0),\\
@@ -520,7 +520,6 @@ Each rmeta SubLV holds RAID metadata.
[lvr6_rmeta_3] linear /dev/sdd(...)
[lvr6_rmeta_4] linear /dev/sde(...)
[lvr6_rmeta_5] linear /dev/sdf(...)
.fi
.B raid10
.br
@@ -528,9 +527,9 @@ At least four rimage SubLVs each hold a portion of LV data. No parity is used.
Each rmeta SubLV holds RAID metadata.
.nf
# lvcreate --type raid10 --stripes 2 --mirrors 1 --name lvr10
lvcreate --type raid10 --stripes 2 --mirrors 1 --name lvr10
# lvs -a -o name,segtype,devices
lvs -a -o name,segtype,devices
lvr10 raid10 lvr10_rimage_0(0),\\
lvr10_rimage_1(0),\\
lvr10_rimage_2(0),\\
@@ -616,8 +615,8 @@ A RAID LV that is missing devices may be activated or not, depending on
the "activation mode" used in lvchange:
.B lvchange -ay --activationmode
.BR complete | degraded | partial
.I LV
.RB { complete | degraded | partial }
.IR VG / LV
.B complete
.br
@@ -655,12 +654,12 @@ repeated to replace multiple PVs. Replacement devices can be optionally
listed with either option.
.B lvconvert --repair
.I LV
.IR VG / LV
[\fINewPVs\fP]
.B lvconvert --replace
\fIOldPV\fP
.I LV
.IR VG / LV
[\fINewPV\fP]
.B lvconvert
@@ -669,7 +668,7 @@ listed with either option.
.B --replace
\fIOldPV2\fP
...
.I LV
.IR VG / LV
[\fINewPVs\fP]
New devices require synchronization with existing devices, see
@@ -685,18 +684,18 @@ in the RAID LV operating in degraded mode until it is reactivated. Use
the lvchange command to refresh an LV:
.B lvchange --refresh
.I LV
.IR VG / LV
.nf
# lvs -o name,vgname,segtype,attr,size vg
LV VG Type Attr LSize
lv vg raid1 Rwi-a-r-r- 100.00g
LV VG Type Attr LSize
raid1 vg raid1 Rwi-a-r-r- 100.00g
# lvchange --refresh vg/lv
# lvchange --refresh vg/raid1
# lvs -o name,vgname,segtype,attr,size vg
LV VG Type Attr LSize
lv vg raid1 Rwi-a-r--- 100.00g
LV VG Type Attr LSize
raid1 vg raid1 Rwi-a-r--- 100.00g
.fi
.SS Automatic repair
@@ -727,7 +726,7 @@ synchronization is started.
The specific command run by dmeventd to warn or repair is:
.br
.B lvconvert --repair --use-policies
.I LV
.IR VG / LV
.SS Corrupted Data
@@ -742,9 +741,8 @@ This should be rare, and can be detected (see \fBScrubbing\fP).
If specific PVs in a RAID LV are known to have corrupt data, the data on
those PVs can be reconstructed with:
.B lvchange --rebuild
.I PV
.I LV
.B lvchange --rebuild PV
.IR VG / LV
The rebuild option can be repeated with different PVs to replace the data
on multiple PVs.
@@ -790,8 +788,8 @@ while all devices are still written to.
.B lvchange
.BR -- [ raid ] writemostly
\fIPV\fP[\fB:y\fP|\fBn\fP|\fBt\fP]
.I LV
.BR \fIPhysicalVolume [ : { y | n | t }]
.IR VG / LV
The specified device will be marked as "write mostly", which means that
reading from this device will be avoided, and other devices will be
@@ -817,8 +815,8 @@ will not complete until writes to all the mirror images are complete.
.B lvchange
.BR -- [ raid ] writebehind
.I Number
.I LV
.IR IOCount
.IR VG / LV
To report the current write behind setting, run:
@@ -837,7 +835,7 @@ using lvconvert and specifying the new RAID level as the LV type:
.B lvconvert --type
.I RaidLevel
.I LV
\fIVG\fP/\fILV\fP
[\fIPVs\fP]
The most common and recommended RAID takeover conversions are:
@@ -907,8 +905,6 @@ between raid4 and raid5.
between raid4/raid5 and raid6.
.IP \(bu 3
between striped/raid0 and raid10.
.IP \(bu 3
between striped and raid4.
.SS Examples
@@ -939,6 +935,8 @@ between striped and raid4.
[lv_mimage_0] linear 100.00g
[lv_mimage_1] linear 100.00g
[lv_mlog] linear 3.00m
.IP \(bu 3
between striped and raid4.
.SS Examples
@@ -1125,35 +1123,31 @@ For performance reasons the raid6_nr RaidLV can be restriped.
Convert it from 3-way striped to 5-way-striped.
.nf
# lvconvert --stripes 5 vg/lv
# lvconvert --stripes 5 -y vg/lv
Using default stripesize 64.00 KiB.
WARNING: Adding stripes to active logical volume vg/lv will \\
grow it from 99 to 165 extents!
Run "lvresize -l99 vg/lv" to shrink it or use the additional \\
capacity.
WARNING: Adding stripes to active logical volume vg/lv will grow it from 99 to 165 extents!
Run "lvresize -l99 vg/lv" to shrink it or use the additional capacity.
Logical volume vg/lv successfully converted.
# lvs vg/lv
LV VG Attr LSize Cpy%Sync
lv vg rwi-a-r-s- 652.00m 52.94
# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
root fedora -wi-ao---- 15.00g
swap fedora -wi-ao---- 3.99g
lv vg rwi-a-r-s- 652.00m 52.94
# lvs -a -o lv_name,attr,segtype,seg_pe_ranges,dataoffset vg
LV Attr Type PE Ranges DOff
lv rwi-a-r--- raid6_nr lv_rimage_0:0-33 \\
lv_rimage_1:0-33 \\
lv_rimage_2:0-33 ... \\
lv_rimage_5:0-33 \\
lv_rimage_6:0-33 0
[lv_rimage_0] iwi-aor--- linear /dev/sda:0-32 0
LV Attr Type PE Ranges DOff
lv rwi-a-r--- raid6_nr lv_rimage_0:0-33 lv_rimage_1:0-33 lv_rimage_2:0-33 ... lv_rimage_5:0-33 lv_rimage_6:0-33 0
[lv_rimage_0] iwi-aor--- linear /dev/sda:0-32 0
[lv_rimage_0] iwi-aor--- linear /dev/sda:34-34
[lv_rimage_1] iwi-aor--- linear /dev/sdaa:0-32 0
[lv_rimage_1] iwi-aor--- linear /dev/sdaa:0-32 0
[lv_rimage_1] iwi-aor--- linear /dev/sdaa:34-34
[lv_rimage_2] iwi-aor--- linear /dev/sdab:0-32 0
[lv_rimage_2] iwi-aor--- linear /dev/sdab:0-32 0
[lv_rimage_2] iwi-aor--- linear /dev/sdab:34-34
[lv_rimage_3] iwi-aor--- linear /dev/sdac:1-34 0
[lv_rimage_4] iwi-aor--- linear /dev/sdad:1-34 0
[lv_rimage_5] iwi-aor--- linear /dev/sdae:1-34 0
[lv_rimage_6] iwi-aor--- linear /dev/sdaf:1-34 0
[lv_rimage_3] iwi-aor--- linear /dev/sdac:1-34 0
[lv_rimage_4] iwi-aor--- linear /dev/sdad:1-34 0
[lv_rimage_5] iwi-aor--- linear /dev/sdae:1-34 0
[lv_rimage_6] iwi-aor--- linear /dev/sdaf:1-34 0
[lv_rmeta_0] ewi-aor--- linear /dev/sda:33-33
[lv_rmeta_1] ewi-aor--- linear /dev/sdaa:33-33
[lv_rmeta_2] ewi-aor--- linear /dev/sdab:33-33
@@ -1169,35 +1163,28 @@ The force option needs to be used, because removing stripes
(i.e. image SubLVs) from a RaidLV will shrink its size.
.nf
# lvconvert --stripes 4 vg/lv
# lvconvert --stripes 4 --force -y vg/lv
Using default stripesize 64.00 KiB.
WARNING: Removing stripes from active logical volume vg/lv will \\
shrink it from 660.00 MiB to 528.00 MiB!
WARNING: Removing stripes from active logical volume vg/lv will shrink it from 660.00 MiB to 528.00 MiB!
THIS MAY DESTROY (PARTS OF) YOUR DATA!
If that leaves the logical volume larger than 206 extents due \\
to stripe rounding,
If that leaves the logical volume larger than 206 extents due to stripe rounding,
you may want to grow the content afterwards (filesystem etc.)
WARNING: to remove freed stripes after the conversion has finished,\\
you have to run "lvconvert --stripes 4 vg/lv"
WARNING: too remove freed stripes after the conversion has finished, you have to run "lvconvert --stripes 4 vg/lv"
Logical volume vg/lv successfully converted.
# lvs -a -o lv_name,attr,segtype,seg_pe_ranges,dataoffset vg
LV Attr Type PE Ranges DOff
lv rwi-a-r-s- raid6_nr lv_rimage_0:0-33 \\
lv_rimage_1:0-33 \\
lv_rimage_2:0-33 ... \\
lv_rimage_5:0-33 \\
lv_rimage_6:0-33 0
[lv_rimage_0] Iwi-aor--- linear /dev/sda:0-32 0
LV Attr Type PE Ranges DOff
lv rwi-a-r-s- raid6_nr lv_rimage_0:0-33 lv_rimage_1:0-33 lv_rimage_2:0-33 ... lv_rimage_5:0-33 lv_rimage_6:0-33 0
[lv_rimage_0] Iwi-aor--- linear /dev/sda:0-32 0
[lv_rimage_0] Iwi-aor--- linear /dev/sda:34-34
[lv_rimage_1] Iwi-aor--- linear /dev/sdaa:0-32 0
[lv_rimage_1] Iwi-aor--- linear /dev/sdaa:0-32 0
[lv_rimage_1] Iwi-aor--- linear /dev/sdaa:34-34
[lv_rimage_2] Iwi-aor--- linear /dev/sdab:0-32 0
[lv_rimage_2] Iwi-aor--- linear /dev/sdab:0-32 0
[lv_rimage_2] Iwi-aor--- linear /dev/sdab:34-34
[lv_rimage_3] Iwi-aor--- linear /dev/sdac:1-34 0
[lv_rimage_4] Iwi-aor--- linear /dev/sdad:1-34 0
[lv_rimage_5] Iwi-aor--- linear /dev/sdae:1-34 0
[lv_rimage_6] Iwi-aor-R- linear /dev/sdaf:1-34 0
[lv_rimage_3] Iwi-aor--- linear /dev/sdac:1-34 0
[lv_rimage_4] Iwi-aor--- linear /dev/sdad:1-34 0
[lv_rimage_5] Iwi-aor--- linear /dev/sdae:1-34 0
[lv_rimage_6] Iwi-aor-R- linear /dev/sdaf:1-34 0
[lv_rmeta_0] ewi-aor--- linear /dev/sda:33-33
[lv_rmeta_1] ewi-aor--- linear /dev/sdaa:33-33
[lv_rmeta_2] ewi-aor--- linear /dev/sdab:33-33
@@ -1212,48 +1199,37 @@ The 'R' in the same column of the attribute field shows the freed image Sub LVs
.nf
# lvs -o lv_name,attr,segtype,seg_pe_ranges,dataoffset vg
LV Attr Type PE Ranges DOff
lv rwi-a-r-R- raid6_nr lv_rimage_0:0-33 \\
lv_rimage_1:0-33 \\
lv_rimage_2:0-33 ... \\
lv_rimage_5:0-33 \\
lv_rimage_6:0-33 8192
LV Attr Type PE Ranges DOff
lv rwi-a-r-R- raid6_nr lv_rimage_0:0-33 lv_rimage_1:0-33 lv_rimage_2:0-33 ... lv_rimage_5:0-33 lv_rimage_6:0-33 8192
.fi
Now that the reshape is finished the 'R' atribute on the RaidLV shows images can be removed.
.nf
# lvs -o lv_name,attr,segtype,seg_pe_ranges,dataoffset vg
LV Attr Type PE Ranges DOff
lv rwi-a-r-R- raid6_nr lv_rimage_0:0-33 \\
lv_rimage_1:0-33 \\
lv_rimage_2:0-33 ... \\
lv_rimage_5:0-33 \\
lv_rimage_6:0-33 8192
LV Attr Type PE Ranges DOff
lv rwi-a-r-R- raid6_nr lv_rimage_0:0-33 lv_rimage_1:0-33 lv_rimage_2:0-33 ... lv_rimage_5:0-33 lv_rimage_6:0-33 8192
.fi
This is achieved by repeating the command ("lvconvert --stripes 4 vg/lv" would be sufficient).
.nf
# lvconvert --stripes 4 vg/lv
# lvconvert --stripes 4 --force -y vg/lv
Using default stripesize 64.00 KiB.
Logical volume vg/lv successfully converted.
# lvs -a -o lv_name,attr,segtype,seg_pe_ranges,dataoffset vg
LV Attr Type PE Ranges DOff
lv rwi-a-r--- raid6_nr lv_rimage_0:0-33 \\
lv_rimage_1:0-33 \\
lv_rimage_2:0-33 ... \\
lv_rimage_5:0-33 8192
[lv_rimage_0] iwi-aor--- linear /dev/sda:0-32 8192
LV Attr Type PE Ranges DOff
lv rwi-a-r--- raid6_nr lv_rimage_0:0-33 lv_rimage_1:0-33 lv_rimage_2:0-33 ... lv_rimage_5:0-33 8192
[lv_rimage_0] iwi-aor--- linear /dev/sda:0-32 8192
[lv_rimage_0] iwi-aor--- linear /dev/sda:34-34
[lv_rimage_1] iwi-aor--- linear /dev/sdaa:0-32 8192
[lv_rimage_1] iwi-aor--- linear /dev/sdaa:0-32 8192
[lv_rimage_1] iwi-aor--- linear /dev/sdaa:34-34
[lv_rimage_2] iwi-aor--- linear /dev/sdab:0-32 8192
[lv_rimage_2] iwi-aor--- linear /dev/sdab:0-32 8192
[lv_rimage_2] iwi-aor--- linear /dev/sdab:34-34
[lv_rimage_3] iwi-aor--- linear /dev/sdac:1-34 8192
[lv_rimage_4] iwi-aor--- linear /dev/sdad:1-34 8192
[lv_rimage_5] iwi-aor--- linear /dev/sdae:1-34 8192
[lv_rimage_3] iwi-aor--- linear /dev/sdac:1-34 8192
[lv_rimage_4] iwi-aor--- linear /dev/sdad:1-34 8192
[lv_rimage_5] iwi-aor--- linear /dev/sdae:1-34 8192
[lv_rmeta_0] ewi-aor--- linear /dev/sda:33-33
[lv_rmeta_1] ewi-aor--- linear /dev/sdaa:33-33
[lv_rmeta_2] ewi-aor--- linear /dev/sdab:33-33
@@ -1324,72 +1300,54 @@ In case the RaidLV should be converted to striped:
raid6_ra_6
raid6_rs_6
raid6_n_6
.fi
A direct conversion isn't possible thus the command informed about the possible ones.
raid6_n_6 is suitable to convert to striped so convert to it first (this is a reshape
changing the raid6 layout from raid6_nr to raid6_n_6).
.nf
# lvconvert --type raid6_n_6
Using default stripesize 64.00 KiB.
Converting raid6_nr LV vg/lv to raid6_n_6.
Are you sure you want to convert raid6_nr LV vg/lv? [y/n]: y
Logical volume vg/lv successfully converted.
.fi
Wait for the reshape to finish.
.nf
# lvconvert --type striped vg/lv
# lvconvert -y --type striped vg/lv
Logical volume vg/lv successfully converted.
# lvs -o lv_name,attr,segtype,seg_pe_ranges,dataoffset vg
LV Attr Type PE Ranges DOff
lv -wi-a----- striped /dev/sda:2-32 \\
/dev/sdaa:2-32 \\
/dev/sdab:2-32 \\
/dev/sdac:3-33
lv -wi-a----- striped /dev/sda:34-35 \\
/dev/sdaa:34-35 \\
/dev/sdab:34-35 \\
/dev/sdac:34-35
[root@vm46 ~]# lvs -o lv_name,attr,segtype,seg_pe_ranges,dataoffset vg
LV Attr Type PE Ranges DOff
lv -wi-a----- striped /dev/sda:2-32 /dev/sdaa:2-32 /dev/sdab:2-32 /dev/sdac:3-33
lv -wi-a----- striped /dev/sda:34-35 /dev/sdaa:34-35 /dev/sdab:34-35 /dev/sdac:34-35
.fi
From striped we can convert to raid10
.nf
# lvconvert --type raid10 vg/lv
# lvconvert -y --type raid10 vg/lv
Using default stripesize 64.00 KiB.
Logical volume vg/lv successfully converted.
# lvs -o lv_name,attr,segtype,seg_pe_ranges,dataoffset vg
LV Attr Type PE Ranges DOff
lv rwi-a-r--- raid10 lv_rimage_0:0-32 \\
lv_rimage_4:0-32 \\
lv_rimage_1:0-32 ... \\
lv_rimage_3:0-32 \\
lv_rimage_7:0-32 0
LV Attr Type PE Ranges DOff
lv rwi-a-r--- raid10 lv_rimage_0:0-32 lv_rimage_4:0-32 lv_rimage_1:0-32 ... lv_rimage_3:0-32 lv_rimage_7:0-32 0
# lvs -a -o lv_name,attr,segtype,seg_pe_ranges,dataoffset vg
WARNING: Cannot find matching striped segment for vg/lv_rimage_3.
LV Attr Type PE Ranges DOff
lv rwi-a-r--- raid10 lv_rimage_0:0-32 \\
lv_rimage_4:0-32 \\
lv_rimage_1:0-32 ... \\
lv_rimage_3:0-32 \\
lv_rimage_7:0-32 0
[lv_rimage_0] iwi-aor--- linear /dev/sda:2-32 0
LV Attr Type PE Ranges DOff
lv rwi-a-r--- raid10 lv_rimage_0:0-32 lv_rimage_4:0-32 lv_rimage_1:0-32 ... lv_rimage_3:0-32 lv_rimage_7:0-32 0
[lv_rimage_0] iwi-aor--- linear /dev/sda:2-32 0
[lv_rimage_0] iwi-aor--- linear /dev/sda:34-35
[lv_rimage_1] iwi-aor--- linear /dev/sdaa:2-32 0
[lv_rimage_1] iwi-aor--- linear /dev/sdaa:2-32 0
[lv_rimage_1] iwi-aor--- linear /dev/sdaa:34-35
[lv_rimage_2] iwi-aor--- linear /dev/sdab:2-32 0
[lv_rimage_2] iwi-aor--- linear /dev/sdab:2-32 0
[lv_rimage_2] iwi-aor--- linear /dev/sdab:34-35
[lv_rimage_3] iwi-XXr--- linear /dev/sdac:3-35 0
[lv_rimage_4] iwi-aor--- linear /dev/sdad:1-33 0
[lv_rimage_5] iwi-aor--- linear /dev/sdae:1-33 0
[lv_rimage_6] iwi-aor--- linear /dev/sdaf:1-33 0
[lv_rimage_7] iwi-aor--- linear /dev/sdag:1-33 0
[lv_rimage_3] iwi-XXr--- linear /dev/sdac:3-35 0
[lv_rimage_4] iwi-aor--- linear /dev/sdad:1-33 0
[lv_rimage_5] iwi-aor--- linear /dev/sdae:1-33 0
[lv_rimage_6] iwi-aor--- linear /dev/sdaf:1-33 0
[lv_rimage_7] iwi-aor--- linear /dev/sdag:1-33 0
[lv_rmeta_0] ewi-aor--- linear /dev/sda:0-0
[lv_rmeta_1] ewi-aor--- linear /dev/sdaa:0-0
[lv_rmeta_2] ewi-aor--- linear /dev/sdab:0-0
@@ -1410,27 +1368,24 @@ by restripe (4 steps).
We start with the linear LV.
.nf
# lvs -a -o name,size,segtype,syncpercent,datastripes,\\
stripesize,reshapelenle,devices vg
LV LSize Type Cpy%Sync #DStr Stripe RSize Devices
lv 128.00m linear 1 0 /dev/sda(0)
# lvs -aoname,attr,size,segtype,syncpercent,datastripes,stripesize,reshapelenle,devices vg
LV Attr LSize Type Cpy%Sync #DStr Stripe RSize Devices
lv -wi-a----- 128.00m linear 1 0 /dev/sda(0)
.fi
Then convert it to a 2-way raid1.
.nf
# lvconvert --mirrors 1 vg/lv
# lvconvert -m1 vg/lv
Logical volume vg/lv successfully converted.
# lvs -a -o name,size,segtype,datastripes,\\
stripesize,reshapelenle,devices vg
LV LSize Type #DStr Stripe RSize Devices
lv 128.00m raid1 2 0 lv_rimage_0(0),\\
lv_rimage_1(0)
[lv_rimage_0] 128.00m linear 1 0 /dev/sda(0)
[lv_rimage_1] 128.00m linear 1 0 /dev/sdhx(1)
[lv_rmeta_0] 4.00m linear 1 0 /dev/sda(32)
[lv_rmeta_1] 4.00m linear 1 0 /dev/sdhx(0)
# lvs -aoname,attr,size,segtype,syncpercent,datastripes,stripesize,reshapelenle,devices vg
LV Attr LSize Type Cpy%Sync #DStr Stripe RSize Devices
lv rwi-a-r--- 128.00m raid1 100.00 2 0 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] iwi-aor--- 128.00m linear 1 0 /dev/sda(0)
[lv_rimage_1] iwi-aor--- 128.00m linear 1 0 /dev/sdhx(1)
[lv_rmeta_0] ewi-aor--- 4.00m linear 1 0 /dev/sda(32)
[lv_rmeta_1] ewi-aor--- 4.00m linear 1 0 /dev/sdhx(0)
.fi
Once the raid1 LV is fully synchronized we convert it to raid5_n (only 2-way raid1
@@ -1439,19 +1394,17 @@ SubLVs at the end and can be converted to striped directly without any additiona
conversion.
.nf
# lvconvert --type raid5_n vg/lv
# lvconvert -y --ty raid5_n vg/lv
Using default stripesize 64.00 KiB.
Logical volume vg/lv successfully converted.
# lvs -a -o name,size,segtype,syncpercent,datastripes,\\
stripesize,reshapelenle,devices vg
LV LSize Type #DStr Stripe RSize Devices
lv 128.00m raid5_n 1 64.00k 0 lv_rimage_0(0),\\
lv_rimage_1(0)
[lv_rimage_0] 128.00m linear 1 0 0 /dev/sda(0)
[lv_rimage_1] 128.00m linear 1 0 0 /dev/sdhx(1)
[lv_rmeta_0] 4.00m linear 1 0 /dev/sda(32)
[lv_rmeta_1] 4.00m linear 1 0 /dev/sdhx(0)
# lvs -aoname,attr,size,segtype,syncpercent,datastripes,stripesize,reshapelenle,devices vg
LV Attr LSize Type Cpy%Sync #DStr Stripe RSize Devices
lv rwi-a-r--- 128.00m raid5_n 100.00 1 64.00k 0 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] iwi-aor--- 128.00m linear 1 0 0 /dev/sda(0)
[lv_rimage_1] iwi-aor--- 128.00m linear 1 0 0 /dev/sdhx(1)
[lv_rmeta_0] ewi-aor--- 4.00m linear 1 0 /dev/sda(32)
[lv_rmeta_1] ewi-aor--- 4.00m linear 1 0 /dev/sdhx(0)
.fi
Now we'll change the number of data stripes from 1 to 5 and request 128K stripe size
@@ -1460,61 +1413,45 @@ to the one given). That additonal space can be used by e.g. growing any contain
or the LV can be reduced in size after the reshaping conversion has finished.
.nf
# lvconvert --stripesize 128k --stripes 5 vg/lv
# lvconvert --yes --stripesize 128k --stripes 5 vg/lv
Converting stripesize 64.00 KiB of raid5_n LV vg/lv to 128.00 KiB.
WARNING: Adding stripes to active logical volume vg/lv will grow \\
it from 32 to 160 extents!
WARNING: Adding stripes to active logical volume vg/lv will grow it from 32 to 160 extents!
Run "lvresize -l32 vg/lv" to shrink it or use the additional capacity.
Logical volume vg/lv successfully converted.
# lvs -a -o name,size,segtype,datastripes,\\
stripesize,reshapelenle,devices
LV LSize Type #DStr Stripe RSize Devices
lv 640.00m raid5_n 5 128.00k 6 lv_rimage_0(0),\\
lv_rimage_1(0),\\
lv_rimage_2(0),\\
lv_rimage_3(0),\\
lv_rimage_4(0),\\
lv_rimage_5(0)
[lv_rimage_0] 132.00m linear 1 0 1 /dev/sda(33)
[lv_rimage_0] 132.00m linear 1 0 /dev/sda(0)
[lv_rimage_1] 132.00m linear 1 0 1 /dev/sdhx(33)
[lv_rimage_1] 132.00m linear 1 0 /dev/sdhx(1)
[lv_rimage_2] 132.00m linear 1 0 1 /dev/sdhw(33)
[lv_rimage_2] 132.00m linear 1 0 /dev/sdhw(1)
[lv_rimage_3] 132.00m linear 1 0 1 /dev/sdhv(33)
[lv_rimage_3] 132.00m linear 1 0 /dev/sdhv(1)
[lv_rimage_4] 132.00m linear 1 0 1 /dev/sdhu(33)
[lv_rimage_4] 132.00m linear 1 0 /dev/sdhu(1)
[lv_rimage_5] 132.00m linear 1 0 1 /dev/sdht(33)
[lv_rimage_5] 132.00m linear 1 0 /dev/sdht(1)
[lv_rmeta_0] 4.00m linear 1 0 /dev/sda(32)
[lv_rmeta_1] 4.00m linear 1 0 /dev/sdhx(0)
[lv_rmeta_2] 4.00m linear 1 0 /dev/sdhw(0)
[lv_rmeta_3] 4.00m linear 1 0 /dev/sdhv(0)
[lv_rmeta_4] 4.00m linear 1 0 /dev/sdhu(0)
[lv_rmeta_5] 4.00m linear 1 0 /dev/sdht(0)
# lvs -aoname,attr,size,segtype,syncpercent,datastripes,stripesize,reshapelenle,devices vg
LV Attr LSize Type Cpy%Sync #DStr Stripe RSize Devices
lv rwi-a-r--- 640.00m raid5_n 100.00 5 128.00k 6 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0),lv_rimage_3(0),lv_rimage_4(0),lv_rimage_5(0)
[lv_rimage_0] iwi-aor--- 132.00m linear 1 0 1 /dev/sda(33)
[lv_rimage_0] iwi-aor--- 132.00m linear 1 0 /dev/sda(0)
[lv_rimage_1] iwi-aor--- 132.00m linear 1 0 1 /dev/sdhx(33)
[lv_rimage_1] iwi-aor--- 132.00m linear 1 0 /dev/sdhx(1)
[lv_rimage_2] iwi-aor--- 132.00m linear 1 0 1 /dev/sdhw(33)
[lv_rimage_2] iwi-aor--- 132.00m linear 1 0 /dev/sdhw(1)
[lv_rimage_3] iwi-aor--- 132.00m linear 1 0 1 /dev/sdhv(33)
[lv_rimage_3] iwi-aor--- 132.00m linear 1 0 /dev/sdhv(1)
[lv_rimage_4] iwi-aor--- 132.00m linear 1 0 1 /dev/sdhu(33)
[lv_rimage_4] iwi-aor--- 132.00m linear 1 0 /dev/sdhu(1)
[lv_rimage_5] iwi-aor--- 132.00m linear 1 0 1 /dev/sdht(33)
[lv_rimage_5] iwi-aor--- 132.00m linear 1 0 /dev/sdht(1)
[lv_rmeta_0] ewi-aor--- 4.00m linear 1 0 /dev/sda(32)
[lv_rmeta_1] ewi-aor--- 4.00m linear 1 0 /dev/sdhx(0)
[lv_rmeta_2] ewi-aor--- 4.00m linear 1 0 /dev/sdhw(0)
[lv_rmeta_3] ewi-aor--- 4.00m linear 1 0 /dev/sdhv(0)
[lv_rmeta_4] ewi-aor--- 4.00m linear 1 0 /dev/sdhu(0)
[lv_rmeta_5] ewi-aor--- 4.00m linear 1 0 /dev/sdht(0)
.fi
Once the conversion has finished we can can convert to striped.
.nf
# lvconvert --type striped vg/lv
[root@vm46 ~]# lvconvert -y --ty striped vg/lv
Logical volume vg/lv successfully converted.
# lvs -a -o name,size,segtype,datastripes,\\
stripesize,reshapelenle,devices vg
LV LSize Type #DStr Stripe RSize Devices
lv 640.00m striped 5 128.00k /dev/sda(33),\\
/dev/sdhx(33),\\
/dev/sdhw(33),\\
/dev/sdhv(33),\\
/dev/sdhu(33)
lv 640.00m striped 5 128.00k /dev/sda(0),\\
/dev/sdhx(1),\\
/dev/sdhw(1),\\
/dev/sdhv(1),\\
/dev/sdhu(1)
[root@vm46 ~]# lvs -aoname,attr,size,segtype,syncpercent,datastripes,stripesize,reshapelenle,devices vg|sed 's/ *$//'
LV Attr LSize Type Cpy%Sync #DStr Stripe RSize Devices
lv -wi-a----- 640.00m striped 5 128.00k /dev/sda(33),/dev/sdhx(33),/dev/sdhw(33),/dev/sdhv(33),/dev/sdhu(33)
lv -wi-a----- 640.00m striped 5 128.00k /dev/sda(0),/dev/sdhx(1),/dev/sdhw(1),/dev/sdhv(1),/dev/sdhu(1)
.fi
Reversing these steps will convert a given striped LV to linear.
@@ -1571,7 +1508,6 @@ RAID5 parity n
.br
\[bu]
Dedicated parity device n used for striped/raid0 conversions
.br
\[bu]
Used for RAID Takeover
@@ -1620,7 +1556,6 @@ RAID6 last parity devices
\[bu]
Fixed dedicated last devices (P-Syndrome N-1 and Q-Syndrome N)
with striped data used for striped/raid0 conversions
.br
\[bu]
Used for RAID Takeover
@@ -1704,7 +1639,7 @@ The command to start duplication is:
[\fB--stripes\fP \fINumber\fP \fB--stripesize\fP \fISize\fP]
.RS
.B --duplicate
.I LV
.IR VG / LV
[\fIPVs\fP]
.RE
@@ -1754,7 +1689,7 @@ the new devices, specify the name of SubLV 0 (suffix _dup_0):
.B lvconvert --unduplicate
.BI --name
.IB LV _dup_0
.I LV
.IR VG / LV
To make the RAID LV use the data copy on the new devices, and drop the old
devices, specify the name of SubLV 1 (suffix _dup_1):
@@ -1762,7 +1697,7 @@ devices, specify the name of SubLV 1 (suffix _dup_1):
.B lvconvert --unduplicate
.BI --name
.IB LV _dup_1
.I LV
.IR VG / LV
FIXME: To make the LV use the data on the original devices, but keep the
data copy as a new LV, ...

View File

@@ -27,7 +27,7 @@ function _test_regionsize
local vg=$4
local lv=$5
lvconvert --type $type --yes -R $regionsize $vg/$lv
lvconvert --yes -R $regionsize $vg/$lv
[ $? -ne 0 ] && return 1
check lv_field $vg/$lv regionsize "$regionsize_str"
fsck -fn "$DM_DEV_DIR/$vg/$lv"

View File

@@ -2658,15 +2658,10 @@ static int _lvconvert_swap_pool_metadata(struct cmd_context *cmd,
* Create a new pool LV, using the lv arg as the data sub LV.
* The metadata sub LV is either a new LV created here, or an
* existing LV specified by --poolmetadata.
*
* process_single_lv is the LV currently being processed by
* process_each_lv(). It will sometimes be the same as the
* lv arg, and sometimes not.
*/
static int _lvconvert_to_pool(struct cmd_context *cmd,
struct logical_volume *lv,
struct logical_volume *process_single_lv,
int to_thinpool,
int to_cachepool,
struct dm_list *use_pvh)
@@ -2755,11 +2750,6 @@ static int _lvconvert_to_pool(struct cmd_context *cmd,
*/
if ((pool_metadata_name = arg_str_value(cmd, poolmetadata_ARG, NULL))) {
if (!validate_lvname_param(cmd, &vg->name, &pool_metadata_name)) {
log_error("Metadata LV %s not found.", pool_metadata_name);
return 0;
}
if (!(metadata_lv = find_lv(vg, pool_metadata_name))) {
log_error("Unknown pool metadata LV %s.", pool_metadata_name);
return 0;
@@ -2778,12 +2768,6 @@ static int _lvconvert_to_pool(struct cmd_context *cmd,
return 0;
}
if (metadata_lv == process_single_lv) {
log_error("Use a different LV for pool metadata %s.",
display_lvname(metadata_lv));
return 0;
}
if (!lv_is_visible(metadata_lv)) {
log_error("Can't convert internal LV %s.",
display_lvname(metadata_lv));
@@ -3776,7 +3760,7 @@ static int _lvconvert_to_pool_single(struct cmd_context *cmd,
} else
use_pvh = &lv->vg->pvs;
if (!_lvconvert_to_pool(cmd, lv, lv, to_thinpool, to_cachepool, use_pvh))
if (!_lvconvert_to_pool(cmd, lv, to_thinpool, to_cachepool, use_pvh))
return_ECMD_FAILED;
return ECMD_PROCESSED;
@@ -3826,13 +3810,7 @@ static int _lvconvert_to_cache_vol_single(struct cmd_context *cmd,
goto out;
}
if (cachepool_lv == lv) {
log_error("Use a different LV for cache pool LV and cache LV %s.",
display_lvname(cachepool_lv));
goto out;
}
if (!_lvconvert_to_pool(cmd, cachepool_lv, lv, 0, 1, &vg->pvs)) {
if (!_lvconvert_to_pool(cmd, cachepool_lv, 0, 1, &vg->pvs)) {
log_error("LV %s could not be converted to a cache pool.",
display_lvname(cachepool_lv));
goto out;
@@ -3930,13 +3908,7 @@ static int _lvconvert_to_thin_with_external_single(struct cmd_context *cmd,
goto out;
}
if (thinpool_lv == lv) {
log_error("Use a different LV for thin pool LV and thin LV %s.",
display_lvname(thinpool_lv));
goto out;
}
if (!_lvconvert_to_pool(cmd, thinpool_lv, lv, 1, 0, &vg->pvs)) {
if (!_lvconvert_to_pool(cmd, thinpool_lv, 1, 0, &vg->pvs)) {
log_error("LV %s could not be converted to a thin pool.",
display_lvname(thinpool_lv));
goto out;
@@ -4089,7 +4061,7 @@ static int _lvconvert_to_pool_or_swap_metadata_single(struct cmd_context *cmd,
return _lvconvert_swap_pool_metadata_single(cmd, lv, handle);
}
if (!_lvconvert_to_pool(cmd, lv, lv, to_thinpool, to_cachepool, use_pvh))
if (!_lvconvert_to_pool(cmd, lv, to_thinpool, to_cachepool, use_pvh))
return_ECMD_FAILED;
return ECMD_PROCESSED;

View File

@@ -1338,25 +1338,6 @@ static int _opt_synonym_is_set(struct cmd_context *cmd, int opt_std)
return opt_syn && arg_is_set(cmd, opt_syn);
}
static int _command_optional_opt_matches(struct cmd_context *cmd, int ci, int oo)
{
int opt_enum = commands[ci].optional_opt_args[oo].opt;
if (val_bit_is_set(commands[ci].optional_opt_args[oo].def.val_bits, conststr_VAL)) {
if (!strcmp(commands[ci].optional_opt_args[oo].def.str, arg_str_value(cmd, opt_enum, "")))
return 1;
return 0;
}
if (val_bit_is_set(commands[ci].optional_opt_args[oo].def.val_bits, constnum_VAL)) {
if (commands[ci].optional_opt_args[oo].def.num == arg_int_value(cmd, opt_enum, 0))
return 1;
return 0;
}
return 1;
}
static int _command_ignore_opt_matches(struct cmd_context *cmd, int ci, int io)
{
int opt_enum = commands[ci].ignore_opt_args[io].opt;
@@ -1644,8 +1625,7 @@ static struct command *_find_command(struct cmd_context *cmd, const char *path,
continue;
for (j = 0; j < commands[i].oo_count; j++) {
if ((commands[i].optional_opt_args[j].opt == opt_enum) &&
_command_optional_opt_matches(cmd, i, j)) {
if (commands[i].optional_opt_args[j].opt == opt_enum) {
accepted = 1;
break;
}
@@ -1715,13 +1695,8 @@ static struct command *_find_command(struct cmd_context *cmd, const char *path,
if (best_unused_count) {
for (i = 0; i < best_unused_count; i++) {
const char *opt_val = NULL;
opt_enum = best_unused_options[i];
opt_val = arg_value(cmd, opt_enum);
log_error("Invalid option for command: %s%s%s.",
arg_long_option_name(opt_enum),
opt_val ? " " : "", opt_val ?: "");
log_error("Invalid option for command: %s.",
arg_long_option_name(best_unused_options[i]));
}
return NULL;
}