mirror of
git://sourceware.org/git/lvm2.git
synced 2026-01-08 16:32:48 +03:00
Compare commits
23 Commits
dev-dct-wr
...
v2_03_11
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3e8bd8d1bd | ||
|
|
e9503f257a | ||
|
|
b84a9927b7 | ||
|
|
23ef677762 | ||
|
|
5dbe2fdd9d | ||
|
|
9fe7aba251 | ||
|
|
57594fe673 | ||
|
|
47608ff49b | ||
|
|
7691213a91 | ||
|
|
9b3458d5a9 | ||
|
|
a2affffed5 | ||
|
|
205fb35b50 | ||
|
|
10a095a58b | ||
|
|
b68141a49d | ||
|
|
9c0253d930 | ||
|
|
aba9652e58 | ||
|
|
1cc75317f9 | ||
|
|
5fef89361d | ||
|
|
2317ba3934 | ||
|
|
d7058cfa98 | ||
|
|
8801a86a3e | ||
|
|
6316959438 | ||
|
|
125da10d47 |
@@ -1 +1 @@
|
||||
1.02.175-git (2020-08-09)
|
||||
1.02.175 (2021-01-08)
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
Version 2.03.11 -
|
||||
==================================
|
||||
Version 2.03.11 - 08th January 2021
|
||||
===================================
|
||||
Fix pvck handling MDA at offset different from 4096.
|
||||
Partial or degraded activation of writecache is not allowed.
|
||||
Enhance error handling for fsadm and hanled correct fsck result.
|
||||
Dmeventd lvm plugin ignores higher reserved_stack lvm.conf values.
|
||||
Support using BLKZEROOUT for clearing devices.
|
||||
@@ -20,6 +22,9 @@ Version 2.03.11 -
|
||||
Enhance --use-policy percentage rounding.
|
||||
Configure --with-vdo and --with-writecache as internal segments.
|
||||
Improving VDO man page examples.
|
||||
Allow pvmove of writecache origin.
|
||||
Report integrity fields.
|
||||
Integrity volumes defaults to journal mode.
|
||||
Switch code base to use flexible array syntax.
|
||||
Fix 64bit math when calculation cachevol size.
|
||||
Preserve uint32_t for seqno handling.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
Version 1.02.175 -
|
||||
===================================
|
||||
Version 1.02.175 - 08th January 2021
|
||||
====================================
|
||||
|
||||
Version 1.02.173 - 09th August 2020
|
||||
===================================
|
||||
|
||||
10
aclocal.m4
vendored
10
aclocal.m4
vendored
@@ -496,12 +496,14 @@ AC_DEFUN([AM_PATH_PYTHON],
|
||||
m4_default([$3], [AC_MSG_ERROR([no suitable Python interpreter found])])
|
||||
else
|
||||
|
||||
dnl Query Python for its version number. Getting [:3] seems to be
|
||||
dnl the best way to do this; it's what "site.py" does in the standard
|
||||
dnl library.
|
||||
dnl Query Python for its version number. Although site.py simply uses
|
||||
dnl sys.version[:3], printing that failed with Python 3.10, since the
|
||||
dnl trailing zero was eliminated. So now we output just the major
|
||||
dnl and minor version numbers, as numbers. Apparently the tertiary
|
||||
dnl version is not of interest.
|
||||
|
||||
AC_CACHE_CHECK([for $am_display_PYTHON version], [am_cv_python_version],
|
||||
[am_cv_python_version=`$PYTHON -c "import sys; sys.stdout.write(sys.version[[:3]])"`])
|
||||
[am_cv_python_version=`$PYTHON -c "import sys; print('%u.%u' % sys.version_info[[:2]])"`])
|
||||
AC_SUBST([PYTHON_VERSION], [$am_cv_python_version])
|
||||
|
||||
dnl Use the values of $prefix and $exec_prefix for the corresponding
|
||||
|
||||
@@ -937,8 +937,7 @@ global {
|
||||
# a volume group's metadata, instead of always granting the read-only
|
||||
# requests immediately, delay them to allow the read-write requests to
|
||||
# be serviced. Without this setting, write access may be stalled by a
|
||||
# high volume of read-only requests. This option only affects
|
||||
# locking_type 1 viz. local file-based locking.
|
||||
# high volume of read-only requests. This option only affects file locks.
|
||||
prioritise_write_locks = 1
|
||||
|
||||
# Configuration option global/library_dir.
|
||||
|
||||
2
configure
vendored
2
configure
vendored
@@ -11962,7 +11962,7 @@ $as_echo_n "checking for $am_display_PYTHON version... " >&6; }
|
||||
if ${am_cv_python_version+:} false; then :
|
||||
$as_echo_n "(cached) " >&6
|
||||
else
|
||||
am_cv_python_version=`$PYTHON -c "import sys; sys.stdout.write(sys.version[:3])"`
|
||||
am_cv_python_version=`$PYTHON -c "import sys; print('%u.%u' % sys.version_info[:2])"`
|
||||
fi
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_python_version" >&5
|
||||
$as_echo "$am_cv_python_version" >&6; }
|
||||
|
||||
@@ -2270,21 +2270,31 @@ static int _pool_callback(struct dm_tree_node *node,
|
||||
const struct pool_cb_data *data = cb_data;
|
||||
const struct logical_volume *pool_lv = data->pool_lv;
|
||||
const struct logical_volume *mlv = first_seg(pool_lv)->metadata_lv;
|
||||
struct cmd_context *cmd = pool_lv->vg->cmd;
|
||||
long buf[64 / sizeof(long)]; /* buffer for short disk header (64B) */
|
||||
int args = 0;
|
||||
char *mpath;
|
||||
const char *argv[19] = { /* Max supported 15 args */
|
||||
find_config_tree_str_allow_empty(pool_lv->vg->cmd, data->exec, NULL)
|
||||
find_config_tree_str_allow_empty(cmd, data->exec, NULL)
|
||||
};
|
||||
|
||||
if (!*argv[0]) /* *_check tool is unconfigured/disabled with "" setting */
|
||||
return 1;
|
||||
|
||||
if (!(mpath = lv_dmpath_dup(data->dm->mem, mlv))) {
|
||||
log_error("Failed to build device path for checking pool metadata %s.",
|
||||
display_lvname(mlv));
|
||||
return 0;
|
||||
if (lv_is_cache_vol(pool_lv)) {
|
||||
if (!(mpath = lv_dmpath_suffix_dup(data->dm->mem, pool_lv, "-cmeta"))) {
|
||||
log_error("Failed to build device path for checking cachevol metadata %s.",
|
||||
display_lvname(pool_lv));
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
if (!(mpath = lv_dmpath_dup(data->dm->mem, mlv))) {
|
||||
log_error("Failed to build device path for checking pool metadata %s.",
|
||||
display_lvname(mlv));
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
log_debug("Running check command on %s", mpath);
|
||||
|
||||
if (data->skip_zero) {
|
||||
if ((fd = open(mpath, O_RDONLY)) < 0) {
|
||||
@@ -2312,7 +2322,7 @@ static int _pool_callback(struct dm_tree_node *node,
|
||||
}
|
||||
}
|
||||
|
||||
if (!(cn = find_config_tree_array(mlv->vg->cmd, data->opts, NULL))) {
|
||||
if (!(cn = find_config_tree_array(cmd, data->opts, NULL))) {
|
||||
log_error(INTERNAL_ERROR "Unable to find configuration for pool check options.");
|
||||
return 0;
|
||||
}
|
||||
@@ -2334,7 +2344,7 @@ static int _pool_callback(struct dm_tree_node *node,
|
||||
|
||||
argv[++args] = mpath;
|
||||
|
||||
if (!(ret = exec_cmd(pool_lv->vg->cmd, (const char * const *)argv,
|
||||
if (!(ret = exec_cmd(cmd, (const char * const *)argv,
|
||||
&status, 0))) {
|
||||
if (status == ENOENT) {
|
||||
log_warn("WARNING: Check is skipped, please install recommended missing binary %s!",
|
||||
@@ -2343,7 +2353,7 @@ static int _pool_callback(struct dm_tree_node *node,
|
||||
}
|
||||
|
||||
if ((data->version.maj || data->version.min || data->version.patch) &&
|
||||
!_check_tool_version(pool_lv->vg->cmd, argv[0],
|
||||
!_check_tool_version(cmd, argv[0],
|
||||
data->version.maj, data->version.min, data->version.patch)) {
|
||||
log_warn("WARNING: Check is skipped, please upgrade installed version of %s!",
|
||||
argv[0]);
|
||||
@@ -2387,10 +2397,6 @@ static int _pool_register_callback(struct dev_manager *dm,
|
||||
return 1;
|
||||
#endif
|
||||
|
||||
/* Skip for single-device cache pool */
|
||||
if (lv_is_cache(lv) && lv_is_cache_vol(first_seg(lv)->pool_lv))
|
||||
return 1;
|
||||
|
||||
if (!(data = dm_pool_zalloc(dm->mem, sizeof(*data)))) {
|
||||
log_error("Failed to allocated path for callback.");
|
||||
return 0;
|
||||
@@ -3483,6 +3489,12 @@ static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
|
||||
!_pool_register_callback(dm, dnode, lv))
|
||||
return_0;
|
||||
|
||||
if (lv_is_cache(lv) && lv_is_cache_vol(first_seg(lv)->pool_lv) &&
|
||||
/* Register callback only for layer activation or non-layered cache LV */
|
||||
(layer || !lv_layer(lv)) &&
|
||||
!_pool_register_callback(dm, dnode, lv))
|
||||
return_0;
|
||||
|
||||
/*
|
||||
* Update tables for ANY PVMOVE holders for active LV where the name starts with 'pvmove',
|
||||
* but it's not anymore PVMOVE LV and also it's not a PVMOVE _mimage LV.
|
||||
|
||||
@@ -982,8 +982,7 @@ cfg(global_prioritise_write_locks_CFG, "prioritise_write_locks", global_CFG_SECT
|
||||
"a volume group's metadata, instead of always granting the read-only\n"
|
||||
"requests immediately, delay them to allow the read-write requests to\n"
|
||||
"be serviced. Without this setting, write access may be stalled by a\n"
|
||||
"high volume of read-only requests. This option only affects\n"
|
||||
"locking_type 1 viz. local file-based locking.\n")
|
||||
"high volume of read-only requests. This option only affects file locks.\n")
|
||||
|
||||
cfg(global_library_dir_CFG, "library_dir", global_CFG_SECTION, CFG_DEFAULT_UNDEFINED, CFG_TYPE_STRING, NULL, vsn(1, 0, 0), NULL, 0, NULL,
|
||||
"Search this directory first for shared libraries.\n")
|
||||
|
||||
@@ -895,12 +895,52 @@ int lv_get_raid_integrity_settings(struct logical_volume *lv, struct integrity_s
|
||||
return 0;
|
||||
}
|
||||
|
||||
int lv_raid_integrity_total_mismatches(struct cmd_context *cmd,
|
||||
const struct logical_volume *lv,
|
||||
uint64_t *mismatches)
|
||||
{
|
||||
struct logical_volume *lv_image;
|
||||
struct lv_segment *seg, *seg_image;
|
||||
uint32_t s;
|
||||
uint64_t mismatches_image;
|
||||
uint64_t total = 0;
|
||||
int errors = 0;
|
||||
|
||||
if (!lv_is_raid(lv))
|
||||
return 0;
|
||||
|
||||
seg = first_seg(lv);
|
||||
|
||||
for (s = 0; s < seg->area_count; s++) {
|
||||
lv_image = seg_lv(seg, s);
|
||||
seg_image = first_seg(lv_image);
|
||||
|
||||
if (!seg_is_integrity(seg_image))
|
||||
continue;
|
||||
|
||||
mismatches_image = 0;
|
||||
|
||||
if (!lv_integrity_mismatches(cmd, lv_image, &mismatches_image))
|
||||
errors++;
|
||||
|
||||
total += mismatches_image;
|
||||
}
|
||||
*mismatches = total;
|
||||
|
||||
if (errors)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
int lv_integrity_mismatches(struct cmd_context *cmd,
|
||||
const struct logical_volume *lv,
|
||||
uint64_t *mismatches)
|
||||
{
|
||||
struct lv_with_info_and_seg_status status;
|
||||
|
||||
if (lv_is_raid(lv) && lv_raid_has_integrity((struct logical_volume *)lv))
|
||||
return lv_raid_integrity_total_mismatches(cmd, lv, mismatches);
|
||||
|
||||
if (!lv_is_integrity(lv))
|
||||
return_0;
|
||||
|
||||
|
||||
@@ -1034,6 +1034,37 @@ char *lv_dmpath_dup(struct dm_pool *mem, const struct logical_volume *lv)
|
||||
return repstr;
|
||||
}
|
||||
|
||||
/* maybe factor a common function with lv_dmpath_dup */
|
||||
char *lv_dmpath_suffix_dup(struct dm_pool *mem, const struct logical_volume *lv,
|
||||
const char *suffix)
|
||||
{
|
||||
char *name;
|
||||
char *repstr;
|
||||
size_t len;
|
||||
|
||||
if (!*lv->vg->name)
|
||||
return dm_pool_strdup(mem, "");
|
||||
|
||||
if (!(name = dm_build_dm_name(mem, lv->vg->name, lv->name, NULL))) {
|
||||
log_error("dm_build_dm_name failed");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
len = strlen(dm_dir()) + strlen(name) + strlen(suffix) + 2;
|
||||
|
||||
if (!(repstr = dm_pool_zalloc(mem, len))) {
|
||||
log_error("dm_pool_alloc failed");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (dm_snprintf(repstr, len, "%s/%s%s", dm_dir(), name, suffix) < 0) {
|
||||
log_error("lv_dmpath snprintf failed");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return repstr;
|
||||
}
|
||||
|
||||
char *lv_uuid_dup(struct dm_pool *mem, const struct logical_volume *lv)
|
||||
{
|
||||
return id_format_and_copy(mem ? mem : lv->vg->vgmem, &lv->lvid.id[1]);
|
||||
|
||||
@@ -194,6 +194,9 @@ char *lv_lock_args_dup(struct dm_pool *mem, const struct logical_volume *lv);
|
||||
char *lvseg_kernel_discards_dup_with_info_and_seg_status(struct dm_pool *mem, const struct lv_with_info_and_seg_status *lvdm);
|
||||
char *lv_time_dup(struct dm_pool *mem, const struct logical_volume *lv, int iso_mode);
|
||||
|
||||
char *lv_dmpath_suffix_dup(struct dm_pool *mem, const struct logical_volume *lv,
|
||||
const char *suffix);
|
||||
|
||||
typedef enum {
|
||||
PERCENT_GET_DATA = 0,
|
||||
PERCENT_GET_METADATA,
|
||||
|
||||
@@ -1433,5 +1433,6 @@ int lv_extend_integrity_in_raid(struct logical_volume *lv, struct dm_list *pvh);
|
||||
int lv_get_raid_integrity_settings(struct logical_volume *lv, struct integrity_settings **isettings);
|
||||
int integrity_mode_set(const char *mode, struct integrity_settings *settings);
|
||||
int lv_integrity_mismatches(struct cmd_context *cmd, const struct logical_volume *lv, uint64_t *mismatches);
|
||||
int lv_raid_integrity_total_mismatches(struct cmd_context *cmd, const struct logical_volume *lv, uint64_t *mismatches);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1916,6 +1916,10 @@ static int _lv_each_dependency(struct logical_volume *lv,
|
||||
return_0;
|
||||
if (lvseg->metadata_lv && !fn(lvseg->metadata_lv, data))
|
||||
return_0;
|
||||
if (lvseg->writecache && !fn(lvseg->writecache, data))
|
||||
return_0;
|
||||
if (lvseg->integrity_meta_dev && !fn(lvseg->integrity_meta_dev, data))
|
||||
return_0;
|
||||
for (s = 0; s < lvseg->area_count; ++s) {
|
||||
if (seg_type(lvseg, s) == AREA_LV && !fn(seg_lv(lvseg,s), data))
|
||||
return_0;
|
||||
|
||||
@@ -3338,6 +3338,10 @@ static int _integritymismatches_disp(struct dm_report *rh __attribute__((unused)
|
||||
if (lv_is_integrity(lv) && lv_integrity_mismatches(lv->vg->cmd, lv, &mismatches))
|
||||
return dm_report_field_uint64(rh, field, &mismatches);
|
||||
|
||||
if (lv_is_raid(lv) && lv_raid_has_integrity(lv) &&
|
||||
lv_raid_integrity_total_mismatches(lv->vg->cmd, lv, &mismatches))
|
||||
return dm_report_field_uint64(rh, field, &mismatches);
|
||||
|
||||
return _field_set_value(field, "", &GET_TYPE_RESERVED_VALUE(num_undef_64));
|
||||
}
|
||||
|
||||
|
||||
@@ -255,7 +255,7 @@ static int _target_present(struct cmd_context *cmd,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (min >= 2) {
|
||||
if (min >= 3) {
|
||||
_writecache_cleaner_supported = 1;
|
||||
_writecache_max_age_supported = 1;
|
||||
}
|
||||
|
||||
@@ -303,6 +303,16 @@ afterwards. Some underlying devices perform better with fua, some with
|
||||
nofua. Testing is necessary to determine which.
|
||||
Applicable only to persistent memory.
|
||||
|
||||
.IP \[bu] 2
|
||||
cleaner = 0|1
|
||||
|
||||
Setting cleaner=1 enables the writecache cleaner mode in which data is
|
||||
gradually flushed from the cache. If this is done prior to detaching the
|
||||
writecache, then the splitcache command will have little or no flushing to
|
||||
perform. If not done beforehand, the splitcache command enables the
|
||||
cleaner mode and waits for flushing to complete before detaching the
|
||||
writecache. Adding cleaner=0 to the splitcache command will skip the
|
||||
cleaner mode, and any required flushing is performed in device suspend.
|
||||
|
||||
.SS dm-cache with separate data and metadata LVs
|
||||
|
||||
|
||||
@@ -1,32 +1,29 @@
|
||||
.TH "LVMVDO" "7" "LVM TOOLS #VERSION#" "Red Hat, Inc" "\""
|
||||
|
||||
.SH NAME
|
||||
lvmvdo \(em LVM Virtual Data Optimizer support
|
||||
|
||||
lvmvdo \(em Support for Virtual Data Optimizer in LVM
|
||||
.SH DESCRIPTION
|
||||
|
||||
VDO (which includes kvdo and vdo) is software that provides inline
|
||||
VDO is software that provides inline
|
||||
block-level deduplication, compression, and thin provisioning capabilities
|
||||
for primary storage.
|
||||
|
||||
Deduplication is a technique for reducing the consumption of storage
|
||||
resources by eliminating multiple copies of duplicate blocks. Compression
|
||||
takes the individual unique blocks and shrinks them with coding
|
||||
algorithms; these reduced blocks are then efficiently packed together into
|
||||
physical blocks. Thin provisioning manages the mapping from LBAs presented
|
||||
by VDO to where the data has actually been stored, and also eliminates any
|
||||
blocks of all zeroes.
|
||||
takes the individual unique blocks and shrinks them. These reduced blocks are then efficiently packed together into
|
||||
physical blocks. Thin provisioning manages the mapping from logical blocks
|
||||
presented by VDO to where the data has actually been physically stored,
|
||||
and also eliminates any blocks of all zeroes.
|
||||
|
||||
With deduplication, instead of writing the same data more than once each
|
||||
duplicate block is detected and recorded as a reference to the original
|
||||
block. VDO maintains a mapping from logical block addresses (used by the
|
||||
With deduplication, instead of writing the same data more than once, VDO detects and records each
|
||||
duplicate block as a reference to the original
|
||||
block. VDO maintains a mapping from Logical Block Addresses (LBA) (used by the
|
||||
storage layer above VDO) to physical block addresses (used by the storage
|
||||
layer under VDO). After deduplication, multiple logical block addresses
|
||||
may be mapped to the same physical block address; these are called shared
|
||||
blocks and are reference-counted by the software.
|
||||
|
||||
With VDO's compression, multiple blocks (or shared blocks) are compressed
|
||||
with the fast LZ4 algorithm, and binned together where possible so that
|
||||
With compression, VDO compresses multiple blocks (or shared blocks)
|
||||
with the fast LZ4 algorithm, and bins them together where possible so that
|
||||
multiple compressed blocks fit within a 4 KB block on the underlying
|
||||
storage. Mapping from LBA is to a physical block address and index within
|
||||
it for the desired compressed data. All compressed blocks are individually
|
||||
@@ -39,65 +36,55 @@ allocated for storing the new block data to ensure that other logical
|
||||
block addresses that are mapped to the shared physical block are not
|
||||
modified.
|
||||
|
||||
For usage of VDO with \fBlvm\fP(8) standard VDO userspace tools
|
||||
\fBvdoformat\fP(8) and currently non-standard kernel VDO module
|
||||
"\fIkvdo\fP" needs to be installed on the system.
|
||||
To use VDO with \fBlvm\fP(8), you must install the standard VDO user-space tools
|
||||
\fBvdoformat\fP(8) and the currently non-standard kernel VDO module
|
||||
"\fIkvdo\fP".
|
||||
|
||||
The "\fIkvdo\fP" module implements fine-grained storage virtualization,
|
||||
thin provisioning, block sharing, and compression;
|
||||
the "\fIuds\fP" module provides memory-efficient duplicate
|
||||
identification. The userspace tools include \fBvdostats\fP(8)
|
||||
for extracting statistics from those volumes.
|
||||
|
||||
|
||||
.SH VDO Terms
|
||||
|
||||
thin provisioning, block sharing, and compression.
|
||||
The "\fIuds\fP" module provides memory-efficient duplicate
|
||||
identification. The user-space tools include \fBvdostats\fP(8)
|
||||
for extracting statistics from VDO volumes.
|
||||
.SH VDO TERMS
|
||||
.TP
|
||||
VDODataLV
|
||||
.br
|
||||
VDO data LV
|
||||
.br
|
||||
large hidden LV with suffix _vdata created in a VG.
|
||||
A large hidden LV with the _vdata suffix. It is created in a VG
|
||||
.br
|
||||
used by VDO target to store all data and metadata blocks.
|
||||
|
||||
used by the VDO kernel target to store all data and metadata blocks.
|
||||
.TP
|
||||
VDOPoolLV
|
||||
.br
|
||||
VDO pool LV
|
||||
.br
|
||||
maintains virtual for LV(s) stored in attached VDO data LV
|
||||
and it has same size.
|
||||
A pool for virtual VDOLV(s), which are the size of used VDODataLV.
|
||||
.br
|
||||
contains VDOLV(s) (currently supports only a single VDOLV).
|
||||
|
||||
Only a single VDOLV is currently supported.
|
||||
.TP
|
||||
VDOLV
|
||||
.br
|
||||
VDO LV
|
||||
.br
|
||||
created from VDOPoolLV
|
||||
Created from VDOPoolLV.
|
||||
.br
|
||||
appears blank after creation
|
||||
|
||||
.SH VDO Usage
|
||||
|
||||
Appears blank after creation.
|
||||
.SH VDO USAGE
|
||||
The primary methods for using VDO with lvm2:
|
||||
|
||||
.SS 1. Create VDOPoolLV with VDOLV
|
||||
|
||||
Create a VDOPoolLV that will hold VDO data together with
|
||||
virtual size VDOLV, that user can use. When the virtual size
|
||||
is not specified, then such LV is created with maximum size that
|
||||
always fits into data volume even if there cannot happen any
|
||||
deduplication and compression
|
||||
(i.e. it can hold uncompressible content of /dev/urandom).
|
||||
When the name of VDOPoolLV is not specified, it is taken from
|
||||
.SS 1. Create a VDOPoolLV and a VDOLV
|
||||
Create a VDOPoolLV that will hold VDO data, and a
|
||||
virtual size VDOLV that the user can use. If you do not specify the virtual size,
|
||||
then the VDOLV is created with the maximum size that
|
||||
always fits into data volume even if no
|
||||
deduplication or compression can happen
|
||||
(i.e. it can hold the incompressible content of /dev/urandom).
|
||||
If you do not specify the name of VDOPoolLV, it is taken from
|
||||
the sequence of vpool0, vpool1 ...
|
||||
|
||||
Note: As the performance of TRIM/Discard operation is slow for large
|
||||
volumes of VDO type, please try to avoid sending discard requests unless
|
||||
necessary as it may take considerable amount of time to finish discard
|
||||
Note: The performance of TRIM/Discard operations is slow for large
|
||||
volumes of VDO type. Please try to avoid sending discard requests unless
|
||||
necessary because it might take considerable amount of time to finish the discard
|
||||
operation.
|
||||
|
||||
.nf
|
||||
@@ -106,22 +93,19 @@ operation.
|
||||
.fi
|
||||
|
||||
.I Example
|
||||
.br
|
||||
.nf
|
||||
# lvcreate --type vdo -n vdo0 -L 10G -V 100G vg/vdopool0
|
||||
# mkfs.ext4 -E nodiscard /dev/vg/vdo0
|
||||
.fi
|
||||
|
||||
.SS 2. Create VDOPoolLV from conversion of an existing LV into VDODataLV
|
||||
|
||||
Convert an already created/existing LV into a volume that can hold
|
||||
VDO data and metadata (a volume reference by VDOPoolLV).
|
||||
User will be prompted to confirm such conversion as it is \fBIRREVERSIBLY
|
||||
DESTROYING\fP content of such volume, as it's being immediately
|
||||
formatted by \fBvdoformat\fP(8) as VDO pool data volume. User can
|
||||
specify virtual size of associated VDOLV with this VDOPoolLV.
|
||||
When the virtual size is not specified, it will be set to the maximum size
|
||||
that can keep 100% uncompressible data there.
|
||||
.SS 2. Convert an existing LV into VDOPoolLV
|
||||
Convert an already created or existing LV into a VDOPoolLV, which is a volume
|
||||
that can hold data and metadata.
|
||||
You will be prompted to confirm such conversion because it \fBIRREVERSIBLY
|
||||
DESTROYS\fP the content of such volume and the volume is immediately
|
||||
formatted by \fBvdoformat\fP(8) as a VDO pool data volume. You can
|
||||
specify the virtual size of the VDOLV associated with this VDOPoolLV.
|
||||
If you do not specify the virtual size, it will be set to the maximum size
|
||||
that can keep 100% incompressible data there.
|
||||
|
||||
.nf
|
||||
.B lvconvert --type vdo-pool -n VDOLV -V VirtualSize VG/VDOPoolLV
|
||||
@@ -129,22 +113,18 @@ that can keep 100% uncompressible data there.
|
||||
.fi
|
||||
|
||||
.I Example
|
||||
.br
|
||||
.nf
|
||||
# lvconvert --type vdo-pool -n vdo0 -V10G vg/existinglv
|
||||
# lvconvert --type vdo-pool -n vdo0 -V10G vg/ExistingLV
|
||||
.fi
|
||||
|
||||
.SS 3. Change default settings used for creating VDOPoolLV
|
||||
|
||||
VDO allows to set large variety of options. Lots of these settings
|
||||
can be specified by lvm.conf or profile settings. User can prepare
|
||||
number of different profiles in #DEFAULT_SYS_DIR#/profile directory
|
||||
and just specify profile file name.
|
||||
Check output of \fBlvmconfig --type full\fP for detailed description
|
||||
of all individual vdo settings.
|
||||
.SS 3. Change the default settings used for creating a VDOPoolLV
|
||||
VDO allows to set a large variety of options. Lots of these settings
|
||||
can be specified in lvm.conf or profile settings. You can prepare
|
||||
a number of different profiles in the #DEFAULT_SYS_DIR#/profile directory
|
||||
and just specify the profile file name.
|
||||
Check the output of \fBlvmconfig --type full\fP for a detailed description
|
||||
of all individual VDO settings.
|
||||
|
||||
.I Example
|
||||
.br
|
||||
.nf
|
||||
# cat <<EOF > #DEFAULT_SYS_DIR#/profile/vdo_create.profile
|
||||
allocation {
|
||||
@@ -173,10 +153,8 @@ EOF
|
||||
# lvcreate --vdo -L10G --metadataprofile vdo_create vg/vdopool0
|
||||
# lvcreate --vdo -L10G --config 'allocation/vdo_cpu_threads=4' vg/vdopool1
|
||||
.fi
|
||||
|
||||
.SS 4. Change compression and deduplication of VDOPoolLV
|
||||
|
||||
Disable or enable compression and deduplication for VDO pool LV
|
||||
.SS 4. Change the compression and deduplication of a VDOPoolLV
|
||||
Disable or enable the compression and deduplication for VDOPoolLV
|
||||
(the volume that maintains all VDO LV(s) associated with it).
|
||||
|
||||
.nf
|
||||
@@ -184,24 +162,20 @@ Disable or enable compression and deduplication for VDO pool LV
|
||||
.fi
|
||||
|
||||
.I Example
|
||||
.br
|
||||
.nf
|
||||
# lvchange --compression n vg/vdpool0
|
||||
# lvchange --deduplication y vg/vdpool1
|
||||
# lvchange --compression n vg/vdopool0
|
||||
# lvchange --deduplication y vg/vdopool1
|
||||
.fi
|
||||
|
||||
.SS 5. Checking usage of VDOPoolLV
|
||||
|
||||
To quickly check how much data of VDOPoolLV are already consumed
|
||||
use \fBlvs\fP(8). Field Data% will report how much data occupies
|
||||
content of virtual data for VDOLV and how much space is already
|
||||
consumed with all the data and metadata blocks in VDOPoolLV.
|
||||
For a detailed description use \fBvdostats\fP(8) command.
|
||||
.SS 5. Checking the usage of VDOPoolLV
|
||||
To quickly check how much data on a VDOPoolLV is already consumed,
|
||||
use \fBlvs\fP(8). The Data% field reports how much data is occupied
|
||||
in the content of the virtual data for the VDOLV and how much space is already
|
||||
consumed with all the data and metadata blocks in the VDOPoolLV.
|
||||
For a detailed description, use the \fBvdostats\fP(8) command.
|
||||
|
||||
Note: \fBvdostats\fP(8) currently understands only /dev/mapper device names.
|
||||
|
||||
.I Example
|
||||
.br
|
||||
.nf
|
||||
# lvcreate --type vdo -L10G -V20G -n vdo0 vg/vdopool0
|
||||
# mkfs.ext4 -E nodiscard /dev/vg/vdo0
|
||||
@@ -219,35 +193,36 @@ Note: \fBvdostats\fP(8) currently understands only /dev/mapper device names.
|
||||
data blocks used : 79
|
||||
...
|
||||
.fi
|
||||
.SS 6. Extending the VDOPoolLV size
|
||||
You can add more space to hold VDO data and metadata by
|
||||
extending the VDODataLV using the commands
|
||||
\fBlvresize\fP(8) and \fBlvextend\fP(8).
|
||||
The extension needs to add at least one new VDO slab. You can configure
|
||||
the slab size with the \fBallocation/vdo_slab_size_mb\fP setting.
|
||||
|
||||
.SS 6. Extending VDOPoolLV size
|
||||
You can also enable automatic size extension of a monitored VDOPoolLV
|
||||
with the \fBactivation/vdo_pool_autoextend_percent\fP and
|
||||
\fBactivation/vdo_pool_autoextend_threshold\fP settings.
|
||||
|
||||
Adding more space to hold VDO data and metadata can be made via
|
||||
extension of VDODataLV with commands
|
||||
\fBlvresize\fP(8), \fBlvextend\fP(8).
|
||||
Note: You cannot reduce the size of a VDOPoolLV.
|
||||
|
||||
Note: Size of VDOPoolLV cannot be reduced.
|
||||
|
||||
Note: Size of cached VDOPoolLV cannot be changed.
|
||||
Note: You cannot change the size of a cached VDOPoolLV.
|
||||
|
||||
.nf
|
||||
.B lvextend -L+AddingSize VG/VDOPoolLV
|
||||
.fi
|
||||
|
||||
.I Example
|
||||
.br
|
||||
.nf
|
||||
# lvextend -L+50G vg/vdopool0
|
||||
# lvresize -L300G vg/vdopool1
|
||||
.fi
|
||||
.SS 7. Extending or reducing the VDOLV size
|
||||
You can extend or reduce a virtual VDO LV as a standard LV with the
|
||||
\fBlvresize\fP(8), \fBlvextend\fP(8), and \fBlvreduce\fP(8) commands.
|
||||
|
||||
.SS 7. Extending or reducing VDOLV size
|
||||
|
||||
VDO LV can be extended or reduced as standard LV with commands
|
||||
\fBlvresize\fP(8), \fBlvextend\fP(8), \fBlvreduce\fP(8).
|
||||
|
||||
Note: Reduction needs to process TRIM for reduced disk area
|
||||
to unmap used data blocks from VDOPoolLV and it may take
|
||||
Note: The reduction needs to process TRIM for reduced disk area
|
||||
to unmap used data blocks from the VDOPoolLV, which might take
|
||||
a long time.
|
||||
|
||||
.nf
|
||||
@@ -256,96 +231,122 @@ a long time.
|
||||
.fi
|
||||
|
||||
.I Example
|
||||
.br
|
||||
.nf
|
||||
# lvextend -L+50G vg/vdo0
|
||||
# lvreduce -L-50G vg/vdo1
|
||||
# lvresize -L200G vg/vdo2
|
||||
.fi
|
||||
|
||||
.SS 8. Component activation of VDODataLV
|
||||
|
||||
VDODataLV can be activated separately as component LV for examination
|
||||
purposes. It activates data LV in read-only mode and cannot be modified.
|
||||
If the VDODataLV is active as component, any upper LV using this volume CANNOT
|
||||
be activated. User has to deactivate VDODataLV first to continue to use VDOPoolLV.
|
||||
.SS 8. Component activation of a VDODataLV
|
||||
You can activate a VDODataLV separately as a component LV for examination
|
||||
purposes. The activation of the VDODataLV activates the data LV in read-only mode,
|
||||
and the data LV cannot be modified.
|
||||
If the VDODataLV is active as a component, any upper LV using this volume CANNOT
|
||||
be activated. You have to deactivate the VDODataLV first to continue to use the VDOPoolLV.
|
||||
|
||||
.I Example
|
||||
.br
|
||||
.nf
|
||||
# lvchange -ay vg/vpool0_vdata
|
||||
# lvchange -an vg/vpool0_vdata
|
||||
.fi
|
||||
|
||||
|
||||
.SH VDO Topics
|
||||
|
||||
.SH VDO TOPICS
|
||||
.SS 1. Stacking VDO
|
||||
|
||||
User can convert/stack VDO with existing volumes.
|
||||
|
||||
.SS 2. VDO on top of raid
|
||||
|
||||
Using Raid type LV for VDO Data LV.
|
||||
You can convert or stack a VDOPooLV with these currently supported
|
||||
volume types: linear, stripe, raid, and cache with cachepool.
|
||||
.SS 2. VDOPoolLV on top of raid
|
||||
Using a raid type LV for a VDODataLV.
|
||||
|
||||
.I Example
|
||||
.br
|
||||
.nf
|
||||
# lvcreate --type raid1 -L 5G -n vpool vg
|
||||
# lvconvert --type vdo-pool -V 10G vg/vpool
|
||||
# lvcreate --type raid1 -L 5G -n vdopool vg
|
||||
# lvconvert --type vdo-pool -V 10G vg/vdopool
|
||||
.fi
|
||||
.SS 3. Caching a VDODataLV or a VDOPoolLV
|
||||
VDODataLV (accepts also VDOPoolLV) caching provides a mechanism
|
||||
to accelerate reads and writes of already compressed and deduplicated
|
||||
data blocks together with VDO metadata.
|
||||
|
||||
.SS 3. Caching VDODataLV, VDOPoolLV
|
||||
|
||||
VDO Pool LV (accepts also VDOPoolLV) caching provides mechanism
|
||||
to accelerate read and write of already compressed and deduplicated
|
||||
blocks together with vdo metadata.
|
||||
|
||||
Cached VDO Data LV cannot be currently resized (also automatic
|
||||
resize will not work).
|
||||
A cached VDO data LV cannot be currently resized. Also, the threshold
|
||||
based automatic resize will not work.
|
||||
|
||||
.I Example
|
||||
.br
|
||||
.nf
|
||||
# lvcreate --type vdo -L 5G -V 10G -n vdo1 vg/vpool
|
||||
# lvcreate --type cache-pool -L 1G -n cpool vg
|
||||
# lvconvert --cache --cachepool vg/cpool vg/vpool
|
||||
# lvconvert --uncache vg/vpool
|
||||
# lvcreate --type vdo -L 5G -V 10G -n vdo1 vg/vdopool
|
||||
# lvcreate --type cache-pool -L 1G -n cachepool vg
|
||||
# lvconvert --cache --cachepool vg/cachepool vg/vdopool
|
||||
# lvconvert --uncache vg/vdopool
|
||||
.fi
|
||||
|
||||
.SS 4. Caching VDOLV
|
||||
|
||||
VDO LV cache allow users to 'cache' device for better perfomance before
|
||||
it hits processing of VDO Pool LV layer.
|
||||
.SS 4. Caching a VDOLV
|
||||
VDO LV cache allow you to 'cache' a device for better performance before
|
||||
it hits the processing of the VDO Pool LV layer.
|
||||
|
||||
.I Example
|
||||
.br
|
||||
.nf
|
||||
# lvcreate -L 5G -V 10G -n vdo1 vg/vpool
|
||||
# lvcreate --type cache-pool -L 1G -n cpool vg
|
||||
# lvconvert --cache --cachepool vg/cpool vg/vdo1
|
||||
# lvcreate --type vdo -L 5G -V 10G -n vdo1 vg/vdopool
|
||||
# lvcreate --type cache-pool -L 1G -n cachepool vg
|
||||
# lvconvert --cache --cachepool vg/cachepool vg/vdo1
|
||||
# lvconvert --uncache vg/vdo1
|
||||
.fi
|
||||
.SS 5. Usage of Discard/TRIM with a VDOLV
|
||||
You can discard data on a VDO LV and reduce used blocks on a VDOPoolLV.
|
||||
However, the current performance of discard operations is still not optimal
|
||||
and takes a considerable amount of time and CPU.
|
||||
Unless you really need it, you should avoid using discard.
|
||||
|
||||
.SS 5. Usage of Discard/TRIM with VDOLV
|
||||
|
||||
User can discard data in VDO LV and reduce used blocks in VDOPoolLV.
|
||||
However present performance of discard operation is still not optimal
|
||||
and takes considerable amount of time and CPU.
|
||||
So unless it's really needed users should avoid usage of discard.
|
||||
|
||||
When block device is going to be rewritten,
|
||||
block will be automatically reused for new data.
|
||||
Discard is useful in situation, when it is known the given portion of a VDO LV
|
||||
When a block device is going to be rewritten,
|
||||
its blocks will be automatically reused for new data.
|
||||
Discard is useful in situations when user knows that the given portion of a VDO LV
|
||||
is not going to be used and the discarded space can be used for block
|
||||
provisioning in other regions of VDO LV.
|
||||
For the same reason, user should avoid using mkfs with discard for
|
||||
freshly created VDO LV to save a lot of time this operation would
|
||||
take otherwise as device after create empty.
|
||||
provisioning in other regions of the VDO LV.
|
||||
For the same reason, you should avoid using mkfs with discard for
|
||||
a freshly created VDO LV to save a lot of time that this operation would
|
||||
take otherwise as device is already expected to be empty.
|
||||
.SS 6. Memory usage
|
||||
The VDO target requires 370 MiB of RAM plus an additional 268 MiB
|
||||
per each 1 TiB of physical storage managed by the volume.
|
||||
|
||||
.br
|
||||
UDS requires a minimum of 250 MiB of RAM,
|
||||
which is also the default amount that deduplication uses.
|
||||
|
||||
\&
|
||||
The memory required for the UDS index is determined by the index type
|
||||
and the required size of the deduplication window and
|
||||
is controlled by the \fBallocation/vdo_use_sparse_index\fP setting.
|
||||
|
||||
With enabled UDS sparse indexing, it relies on the temporal locality of data
|
||||
and attempts to retain only the most relevant index entries in memory and
|
||||
can maintain a deduplication window that is ten times larger
|
||||
than with dense while using the same amount of memory.
|
||||
|
||||
Although the sparse index provides the greatest coverage,
|
||||
the dense index provides more deduplication advice.
|
||||
For most workloads, given the same amount of memory,
|
||||
the difference in deduplication rates between dense
|
||||
and sparse indexes is negligible.
|
||||
|
||||
A dense index with 1 GiB of RAM maintains a 1 TiB deduplication window,
|
||||
while a sparse index with 1 GiB of RAM maintains a 10 TiB deduplication window.
|
||||
In general, 1 GiB is sufficient for 4 TiB of physical space with
|
||||
a dense index and 40 TiB with a sparse index.
|
||||
.SS 7. Storage space requirements
|
||||
You can configure a VDOPoolLV to use up to 256 TiB of physical storage.
|
||||
Only a certain part of the physical storage is usable to store data.
|
||||
This section provides the calculations to determine the usable size
|
||||
of a VDO-managed volume.
|
||||
|
||||
The VDO target requires storage for two types of VDO metadata and for the UDS index:
|
||||
.TP
|
||||
\(bu
|
||||
The first type of VDO metadata uses approximately 1 MiB for each 4 GiB
|
||||
of physical storage plus an additional 1 MiB per slab.
|
||||
.TP
|
||||
\(bu
|
||||
The second type of VDO metadata consumes approximately 1.25 MiB
|
||||
for each 1 GiB of logical storage, rounded up to the nearest slab.
|
||||
.TP
|
||||
\(bu
|
||||
The amount of storage required for the UDS index depends on the type of index
|
||||
and the amount of RAM allocated to the index. For each 1 GiB of RAM,
|
||||
a dense UDS index uses 17 GiB of storage and a sparse UDS index will use
|
||||
170 GiB of storage.
|
||||
|
||||
.SH SEE ALSO
|
||||
.BR lvm (8),
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
vgsplit moves one or more PVs from a source VG to a destination VG. The
|
||||
PVs can be specified explicitly or implicitly by naming an LV, in which
|
||||
case on PVs underlying the LV are moved.
|
||||
vgsplit moves one or more PVs from a source VG (the first VG arg) to a
|
||||
destination VG (the second VG arg). The PV(s) to move are named after the
|
||||
source and destination VGs, or an LV is named, in which case the PVs
|
||||
underlying the LV are moved.
|
||||
|
||||
If the destination VG does not exist, a new VG is created (command options
|
||||
can be used to specify properties of the new VG, also see
|
||||
|
||||
@@ -8,9 +8,10 @@ vgsplit - Move physical volumes into a new or existing volume group
|
||||
[ \fIoption_args\fP ]
|
||||
.br
|
||||
.SH DESCRIPTION
|
||||
vgsplit moves one or more PVs from a source VG to a destination VG. The
|
||||
PVs can be specified explicitly or implicitly by naming an LV, in which
|
||||
case on PVs underlying the LV are moved.
|
||||
vgsplit moves one or more PVs from a source VG (the first VG arg) to a
|
||||
destination VG (the second VG arg). The PV(s) to move are named after the
|
||||
source and destination VGs, or an LV is named, in which case the PVs
|
||||
underlying the LV are moved.
|
||||
|
||||
If the destination VG does not exist, a new VG is created (command options
|
||||
can be used to specify properties of the new VG, also see
|
||||
|
||||
@@ -798,6 +798,7 @@ fi
|
||||
|
||||
CHECK=""
|
||||
RESIZE=""
|
||||
NEWSIZE=""
|
||||
|
||||
while [ "$#" -ne 0 ]
|
||||
do
|
||||
@@ -811,8 +812,11 @@ do
|
||||
"-y"|"--yes") YES="-y" ;;
|
||||
"-l"|"--lvresize") DO_LVRESIZE=1 ;;
|
||||
"-c"|"--cryptresize") DO_CRYPTRESIZE=1 ;;
|
||||
"check") CHECK=$2 ; shift ;;
|
||||
"resize") RESIZE=$2 ; NEWSIZE=$3 ; shift 2 ;;
|
||||
"check") test -z "${2-}" && error "Missing <device>. (see: $TOOL --help)"
|
||||
CHECK=$2 ; shift ;;
|
||||
"resize") test -z "${2-}" && error "Missing <device>. (see: $TOOL --help)"
|
||||
RESIZE=$2 ; shift
|
||||
if test -n "${2-}" ; then NEWSIZE="${2-}" ; shift ; fi ;;
|
||||
*) error "Wrong argument \"$1\". (see: $TOOL --help)"
|
||||
esac
|
||||
shift
|
||||
|
||||
@@ -144,6 +144,19 @@ lvconvert -y --type cache --cachedevice "$dev2" $vg/$lv1
|
||||
check lv_field $vg/$lv1 segtype cache
|
||||
check lv_field $vg/${lv1}_cache_cvol segtype linear -a
|
||||
check lv_field $vg/${lv1}_cache_cvol lv_size "60.00m"
|
||||
lvs -o chunksize $vg/$lv1 |tee out
|
||||
grep 64.00k out
|
||||
lvchange -ay $vg/$lv1
|
||||
lvchange -an $vg/$lv1
|
||||
lvremove $vg/$lv1
|
||||
|
||||
lvcreate -n $lv1 -l8 -an $vg "$dev1"
|
||||
lvconvert -y --type cache --cachedevice "$dev2" --chunksize 128k $vg/$lv1
|
||||
check lv_field $vg/$lv1 segtype cache
|
||||
check lv_field $vg/${lv1}_cache_cvol segtype linear -a
|
||||
check lv_field $vg/${lv1}_cache_cvol lv_size "60.00m"
|
||||
lvs -o chunksize $vg/$lv1 |tee out
|
||||
grep 128.00k out
|
||||
lvchange -ay $vg/$lv1
|
||||
lvchange -an $vg/$lv1
|
||||
lvremove $vg/$lv1
|
||||
|
||||
@@ -96,9 +96,19 @@ lvcreate -n $lv1 -L20M $vg
|
||||
lvcreate -n ${lv1}bar -L10M $vg
|
||||
trap 'cleanup_mounted_and_teardown' EXIT
|
||||
|
||||
# prints help
|
||||
fsadm
|
||||
|
||||
# check needs arg
|
||||
not fsadm check
|
||||
|
||||
if check_missing ext2; then
|
||||
mkfs.ext2 -b4096 -j "$dev_vg_lv"
|
||||
|
||||
# Check 'check' works
|
||||
fsadm check $vg_lv
|
||||
# Check 'resize' without size parameter works
|
||||
fsadm resize $vg_lv
|
||||
fsadm --lvresize resize $vg_lv 30M
|
||||
# Fails - not enough space for 4M fs
|
||||
not fsadm -y --lvresize resize "$dev_vg_lv" 4M
|
||||
|
||||
212
test/shell/integrity-syncaction.sh
Normal file
212
test/shell/integrity-syncaction.sh
Normal file
@@ -0,0 +1,212 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (C) 2018 Red Hat, Inc. All rights reserved.
|
||||
#
|
||||
# This copyrighted material is made available to anyone wishing to use,
|
||||
# modify, copy, or redistribute it subject to the terms and conditions
|
||||
# of the GNU General Public License v.2.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
SKIP_WITH_LVMPOLLD=1
|
||||
|
||||
. lib/inittest
|
||||
|
||||
aux have_integrity 1 5 0 || skip
|
||||
which mkfs.xfs || skip
|
||||
which xfs_growfs || skip
|
||||
|
||||
mnt="mnt"
|
||||
mkdir -p $mnt
|
||||
|
||||
aux prepare_devs 3 40
|
||||
|
||||
# Use awk instead of anoyingly long log out from printf
|
||||
#printf "%0.sA" {1..16384} >> fileA
|
||||
awk 'BEGIN { while (z++ < 16384) printf "A" }' > fileA
|
||||
awk 'BEGIN { while (z++ < 16384) printf "B" }' > fileB
|
||||
awk 'BEGIN { while (z++ < 16384) printf "C" }' > fileC
|
||||
|
||||
_prepare_vg() {
|
||||
# zero devs so we are sure to find the correct file data
|
||||
# on the underlying devs when corrupting it
|
||||
dd if=/dev/zero of="$dev1" bs=1M oflag=direct || true
|
||||
dd if=/dev/zero of="$dev2" bs=1M oflag=direct || true
|
||||
dd if=/dev/zero of="$dev3" bs=1M oflag=direct || true
|
||||
vgcreate $SHARED $vg "$dev1" "$dev2" "$dev3"
|
||||
pvs
|
||||
}
|
||||
|
||||
_test1() {
|
||||
mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv1"
|
||||
|
||||
mount "$DM_DEV_DIR/$vg/$lv1" $mnt
|
||||
|
||||
# we don't want fileA to be located too early in the fs,
|
||||
# otherwise activating the LV will trigger the corruption
|
||||
# to be found and corrected, leaving nothing for syncaction
|
||||
# to find and correct.
|
||||
dd if=/dev/urandom of=$mnt/rand16M bs=1M count=16
|
||||
|
||||
cp fileA $mnt
|
||||
cp fileB $mnt
|
||||
cp fileC $mnt
|
||||
|
||||
umount $mnt
|
||||
lvchange -an $vg/$lv1
|
||||
|
||||
xxd "$dev1" > dev1.txt
|
||||
# corrupt fileB
|
||||
sed -e 's/4242 4242 4242 4242 4242 4242 4242 4242/4242 4242 4242 4242 4242 4242 4242 4243/' dev1.txt > dev1.bad
|
||||
rm -f dev1.txt
|
||||
xxd -r dev1.bad > "$dev1"
|
||||
rm -f dev1.bad
|
||||
|
||||
lvchange -ay $vg/$lv1
|
||||
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_0 |tee mismatch
|
||||
grep 0 mismatch
|
||||
|
||||
lvchange --syncaction check $vg/$lv1
|
||||
|
||||
_wait_recalc $vg/$lv1
|
||||
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_0 |tee mismatch
|
||||
not grep 0 mismatch
|
||||
|
||||
mount "$DM_DEV_DIR/$vg/$lv1" $mnt
|
||||
cmp -b $mnt/fileA fileA
|
||||
cmp -b $mnt/fileB fileB
|
||||
cmp -b $mnt/fileC fileC
|
||||
umount $mnt
|
||||
}
|
||||
|
||||
_test2() {
|
||||
mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv1"
|
||||
|
||||
mount "$DM_DEV_DIR/$vg/$lv1" $mnt
|
||||
|
||||
# we don't want fileA to be located too early in the fs,
|
||||
# otherwise activating the LV will trigger the corruption
|
||||
# to be found and corrected, leaving nothing for syncaction
|
||||
# to find and correct.
|
||||
dd if=/dev/urandom of=$mnt/rand16M bs=1M count=16
|
||||
|
||||
cp fileA $mnt
|
||||
cp fileB $mnt
|
||||
cp fileC $mnt
|
||||
|
||||
umount $mnt
|
||||
lvchange -an $vg/$lv1
|
||||
|
||||
# corrupt fileB and fileC on dev1
|
||||
xxd "$dev1" > dev1.txt
|
||||
sed -e 's/4242 4242 4242 4242 4242 4242 4242 4242/4242 4242 4242 4242 4242 4242 4242 4243/' dev1.txt > dev1.bad
|
||||
sed -e 's/4343 4343 4343 4343 4343 4343 4343 4343/4444 4444 4444 4444 4444 4444 4444 4444/' dev1.txt > dev1.bad
|
||||
rm -f dev1.txt
|
||||
xxd -r dev1.bad > "$dev1"
|
||||
rm -f dev1.bad
|
||||
|
||||
# corrupt fileA on dev2
|
||||
xxd "$dev2" > dev2.txt
|
||||
sed -e 's/4141 4141 4141 4141 4141 4141 4141 4141/4141 4141 4141 4141 4141 4141 4145 4141/' dev2.txt > dev2.bad
|
||||
rm -f dev2.txt
|
||||
xxd -r dev2.bad > "$dev2"
|
||||
rm -f dev2.bad
|
||||
|
||||
lvchange -ay $vg/$lv1
|
||||
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_0 |tee mismatch
|
||||
grep 0 mismatch
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_1 |tee mismatch
|
||||
grep 0 mismatch
|
||||
|
||||
lvchange --syncaction check $vg/$lv1
|
||||
|
||||
_wait_recalc $vg/$lv1
|
||||
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_0 |tee mismatch
|
||||
not grep 0 mismatch
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_1 |tee mismatch
|
||||
not grep 0 mismatch
|
||||
|
||||
mount "$DM_DEV_DIR/$vg/$lv1" $mnt
|
||||
cmp -b $mnt/fileA fileA
|
||||
cmp -b $mnt/fileB fileB
|
||||
cmp -b $mnt/fileC fileC
|
||||
umount $mnt
|
||||
}
|
||||
|
||||
_sync_percent() {
|
||||
local checklv=$1
|
||||
get lv_field "$checklv" sync_percent | cut -d. -f1
|
||||
}
|
||||
|
||||
_wait_recalc() {
|
||||
local checklv=$1
|
||||
|
||||
for i in $(seq 1 10) ; do
|
||||
sync=$(_sync_percent "$checklv")
|
||||
echo "sync_percent is $sync"
|
||||
|
||||
if test "$sync" = "100"; then
|
||||
return
|
||||
fi
|
||||
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# TODO: There is some strange bug, first leg of RAID with integrity
|
||||
# enabled never gets in sync. I saw this in BB, but not when executing
|
||||
# the commands manually
|
||||
if test -z "$sync"; then
|
||||
echo "TEST\ WARNING: Resync of dm-integrity device '$checklv' failed"
|
||||
dmsetup status "$DM_DEV_DIR/mapper/${checklv/\//-}"
|
||||
exit
|
||||
fi
|
||||
echo "timeout waiting for recalc"
|
||||
return 1
|
||||
}
|
||||
|
||||
_prepare_vg
|
||||
lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 6 $vg "$dev1" "$dev2"
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
_wait_recalc $vg/${lv1}_rimage_1
|
||||
_wait_recalc $vg/$lv1
|
||||
_test1
|
||||
lvs -o integritymismatches $vg/$lv1 |tee mismatch
|
||||
not grep 0 mismatch
|
||||
lvchange -an $vg/$lv1
|
||||
lvconvert --raidintegrity n $vg/$lv1
|
||||
lvremove $vg/$lv1
|
||||
vgremove -ff $vg
|
||||
|
||||
_prepare_vg
|
||||
lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 6 $vg "$dev1" "$dev2"
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
_wait_recalc $vg/${lv1}_rimage_1
|
||||
_wait_recalc $vg/$lv1
|
||||
_test2
|
||||
lvs -o integritymismatches $vg/$lv1 |tee mismatch
|
||||
not grep 0 mismatch
|
||||
lvchange -an $vg/$lv1
|
||||
lvconvert --raidintegrity n $vg/$lv1
|
||||
lvremove $vg/$lv1
|
||||
vgremove -ff $vg
|
||||
|
||||
_prepare_vg
|
||||
lvcreate --type raid5 --raidintegrity y -n $lv1 -l 6 $vg "$dev1" "$dev2" "$dev3"
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
_wait_recalc $vg/${lv1}_rimage_1
|
||||
_wait_recalc $vg/${lv1}_rimage_2
|
||||
_wait_recalc $vg/$lv1
|
||||
_test1
|
||||
lvs -o integritymismatches $vg/$lv1 |tee mismatch
|
||||
not grep 0 mismatch
|
||||
lvchange -an $vg/$lv1
|
||||
lvconvert --raidintegrity n $vg/$lv1
|
||||
lvremove $vg/$lv1
|
||||
vgremove -ff $vg
|
||||
|
||||
@@ -46,62 +46,14 @@ _prepare_vg() {
|
||||
pvs
|
||||
}
|
||||
|
||||
_test_fs_with_error() {
|
||||
mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv1"
|
||||
|
||||
mount "$DM_DEV_DIR/$vg/$lv1" $mnt
|
||||
|
||||
# add original data
|
||||
cp fileA $mnt
|
||||
cp fileB $mnt
|
||||
cp fileC $mnt
|
||||
|
||||
umount $mnt
|
||||
lvchange -an $vg/$lv1
|
||||
|
||||
# corrupt the original data on the underying dev
|
||||
# flip one bit in fileB, changing a 0x42 to 0x43
|
||||
# the bit is changed in the last 4096 byte block
|
||||
# of the file, so when reading back the file we
|
||||
# will get the first three 4096 byte blocks, for
|
||||
# a total of 12288 bytes before getting an error
|
||||
# on the last 4096 byte block.
|
||||
xxd "$dev1" > dev1.txt
|
||||
tac dev1.txt > dev1.rev
|
||||
rm -f dev1.txt
|
||||
sed -e '0,/4242 4242 4242 4242 4242 4242 4242 4242/ s/4242 4242 4242 4242 4242 4242 4242 4242/4242 4242 4242 4242 4242 4242 4242 4243/' dev1.rev > dev1.rev.bad
|
||||
rm -f dev1.rev
|
||||
tac dev1.rev.bad > dev1.bad
|
||||
rm -f dev1.rev.bad
|
||||
xxd -r dev1.bad > "$dev1"
|
||||
rm -f dev1.bad
|
||||
|
||||
lvchange -ay $vg/$lv1
|
||||
mount "$DM_DEV_DIR/$vg/$lv1" $mnt
|
||||
|
||||
# read complete fileA which was not corrupted
|
||||
dd if=$mnt/fileA of=tmp bs=1k
|
||||
ls -l tmp
|
||||
stat -c %s tmp
|
||||
cmp -b fileA tmp
|
||||
rm tmp
|
||||
|
||||
# read partial fileB which was corrupted
|
||||
not dd if=$mnt/fileB of=tmp bs=1k
|
||||
ls -l tmp
|
||||
stat -c %s tmp | grep 12288
|
||||
not cmp -b fileB tmp
|
||||
rm tmp
|
||||
|
||||
umount $mnt
|
||||
}
|
||||
|
||||
_test_fs_with_read_repair() {
|
||||
mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv1"
|
||||
|
||||
mount "$DM_DEV_DIR/$vg/$lv1" $mnt
|
||||
|
||||
# add original data
|
||||
cp randA $mnt
|
||||
cp randB $mnt
|
||||
cp randC $mnt
|
||||
cp fileA $mnt
|
||||
cp fileB $mnt
|
||||
cp fileC $mnt
|
||||
@@ -109,87 +61,21 @@ _test_fs_with_read_repair() {
|
||||
umount $mnt
|
||||
lvchange -an $vg/$lv1
|
||||
|
||||
# FIXME: this is only finding/corrupting the bit with raid1
|
||||
# other raid levels may require looking at a different dev.
|
||||
# (Attempt this xxd/tac/sed/xxd on each dev in the LV?)
|
||||
|
||||
xxd "$dev1" > dev1.txt
|
||||
tac dev1.txt > dev1.rev
|
||||
rm -f dev1.txt
|
||||
sed -e '0,/4242 4242 4242 4242 4242 4242 4242 4242/ s/4242 4242 4242 4242 4242 4242 4242 4242/4242 4242 4242 4242 4242 4242 4242 4243/' dev1.rev > dev1.rev.bad
|
||||
rm -f dev1.rev
|
||||
tac dev1.rev.bad > dev1.bad
|
||||
rm -f dev1.rev.bad
|
||||
xxd -r dev1.bad > "$dev1"
|
||||
rm -f dev1.bad
|
||||
|
||||
lvchange -ay $vg/$lv1
|
||||
mount "$DM_DEV_DIR/$vg/$lv1" $mnt
|
||||
|
||||
# read complete fileA which was not corrupted
|
||||
dd if=$mnt/fileA of=tmp bs=1k
|
||||
ls -l tmp
|
||||
stat -c %s tmp | grep 16384
|
||||
cmp -b fileA tmp
|
||||
rm tmp
|
||||
|
||||
# read complete fileB, corruption is corrected by raid
|
||||
dd if=$mnt/fileB of=tmp bs=1k
|
||||
ls -l tmp
|
||||
stat -c %s tmp | grep 16384
|
||||
cmp -b fileB tmp
|
||||
rm tmp
|
||||
|
||||
umount $mnt
|
||||
}
|
||||
|
||||
_test_fs_with_syncaction_check() {
|
||||
mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv1"
|
||||
|
||||
mount "$DM_DEV_DIR/$vg/$lv1" $mnt
|
||||
|
||||
# add original data
|
||||
cp fileA $mnt
|
||||
cp fileB $mnt
|
||||
cp fileC $mnt
|
||||
|
||||
umount $mnt
|
||||
lvchange -an $vg/$lv1
|
||||
|
||||
# FIXME: this is only finding/corrupting the bit with raid1
|
||||
# other raid levels may require looking at a different dev.
|
||||
# (Attempt this xxd/tac/sed/xxd on each dev in the LV?)
|
||||
|
||||
xxd "$dev1" > dev1.txt
|
||||
tac dev1.txt > dev1.rev
|
||||
rm -f dev1.txt
|
||||
sed -e '0,/4242 4242 4242 4242 4242 4242 4242 4242/ s/4242 4242 4242 4242 4242 4242 4242 4242/4242 4242 4242 4242 4242 4242 4242 4243/' dev1.rev > dev1.rev.bad
|
||||
rm -f dev1.rev
|
||||
tac dev1.rev.bad > dev1.bad
|
||||
rm -f dev1.rev.bad
|
||||
xxd -r dev1.bad > "$dev1"
|
||||
rm -f dev1.bad
|
||||
for dev in "$@"; do
|
||||
xxd "$dev" > dev.txt
|
||||
# corrupt fileB
|
||||
sed -e 's/4242 4242 4242 4242 4242 4242 4242 4242/4242 4242 4242 4242 4242 4242 4242 4243/' dev.txt > dev.bad
|
||||
rm -f dev.txt
|
||||
xxd -r dev.bad > "$dev"
|
||||
rm -f dev.bad
|
||||
done
|
||||
|
||||
lvchange -ay $vg/$lv1
|
||||
|
||||
lvchange --syncaction check $vg/$lv1
|
||||
|
||||
mount "$DM_DEV_DIR/$vg/$lv1" $mnt
|
||||
|
||||
# read complete fileA which was not corrupted
|
||||
dd if=$mnt/fileA of=tmp bs=1k
|
||||
ls -l tmp
|
||||
stat -c %s tmp | grep 16384
|
||||
cmp -b fileA tmp
|
||||
rm tmp
|
||||
|
||||
# read complete fileB
|
||||
dd if=$mnt/fileB of=tmp bs=1k
|
||||
ls -l tmp
|
||||
stat -c %s tmp | grep 16384
|
||||
cmp -b fileB tmp
|
||||
rm tmp
|
||||
|
||||
cmp -b $mnt/fileA fileA
|
||||
cmp -b $mnt/fileB fileB
|
||||
cmp -b $mnt/fileC fileC
|
||||
umount $mnt
|
||||
}
|
||||
|
||||
@@ -282,170 +168,105 @@ _wait_recalc() {
|
||||
# it is detected by integrity and corrected by raid.
|
||||
|
||||
_prepare_vg
|
||||
lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg
|
||||
lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg "$dev1" "$dev2"
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
_wait_recalc $vg/${lv1}_rimage_1
|
||||
_test_fs_with_read_repair
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_0
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_1
|
||||
_wait_recalc $vg/$lv1
|
||||
_test_fs_with_read_repair "$dev1"
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_0 |tee mismatch
|
||||
not grep 0 mismatch
|
||||
lvs -o integritymismatches $vg/$lv1 |tee mismatch
|
||||
not grep 0 mismatch
|
||||
lvchange -an $vg/$lv1
|
||||
lvconvert --raidintegrity n $vg/$lv1
|
||||
lvremove $vg/$lv1
|
||||
vgremove -ff $vg
|
||||
|
||||
_prepare_vg
|
||||
lvcreate --type raid1 -m2 --raidintegrity y -n $lv1 -l 8 $vg
|
||||
lvcreate --type raid1 -m2 --raidintegrity y -n $lv1 -l 8 $vg "$dev1" "$dev2" "$dev3"
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
_wait_recalc $vg/${lv1}_rimage_1
|
||||
_wait_recalc $vg/${lv1}_rimage_2
|
||||
_test_fs_with_read_repair
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_0
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_1
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_2
|
||||
_wait_recalc $vg/$lv1
|
||||
_test_fs_with_read_repair "$dev1" "$dev2"
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_0 |tee mismatch
|
||||
not grep 0 mismatch
|
||||
lvs -o integritymismatches $vg/$lv1 |tee mismatch
|
||||
not grep 0 mismatch
|
||||
lvchange -an $vg/$lv1
|
||||
lvconvert --raidintegrity n $vg/$lv1
|
||||
lvremove $vg/$lv1
|
||||
vgremove -ff $vg
|
||||
|
||||
_prepare_vg
|
||||
lvcreate --type raid4 --raidintegrity y -n $lv1 -l 8 $vg
|
||||
lvcreate --type raid4 --raidintegrity y -n $lv1 -l 8 $vg "$dev1" "$dev2" "$dev3"
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
_wait_recalc $vg/${lv1}_rimage_1
|
||||
_wait_recalc $vg/${lv1}_rimage_2
|
||||
_test_fs_with_read_repair
|
||||
_wait_recalc $vg/$lv1
|
||||
_test_fs_with_read_repair "$dev1" "$dev2" "$dev3"
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_0
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_1
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_2
|
||||
lvs -o integritymismatches $vg/$lv1 |tee mismatch
|
||||
not grep 0 mismatch
|
||||
lvchange -an $vg/$lv1
|
||||
lvconvert --raidintegrity n $vg/$lv1
|
||||
lvremove $vg/$lv1
|
||||
vgremove -ff $vg
|
||||
|
||||
_prepare_vg
|
||||
lvcreate --type raid5 --raidintegrity y -n $lv1 -l 8 $vg
|
||||
lvcreate --type raid5 --raidintegrity y -n $lv1 -l 8 $vg "$dev1" "$dev2" "$dev3"
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
_wait_recalc $vg/${lv1}_rimage_1
|
||||
_wait_recalc $vg/${lv1}_rimage_2
|
||||
_test_fs_with_read_repair
|
||||
_wait_recalc $vg/$lv1
|
||||
_test_fs_with_read_repair "$dev1" "$dev2" "$dev3"
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_0
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_1
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_2
|
||||
lvs -o integritymismatches $vg/$lv1 |tee mismatch
|
||||
not grep 0 mismatch
|
||||
lvchange -an $vg/$lv1
|
||||
lvconvert --raidintegrity n $vg/$lv1
|
||||
lvremove $vg/$lv1
|
||||
vgremove -ff $vg
|
||||
|
||||
_prepare_vg
|
||||
lvcreate --type raid6 --raidintegrity y -n $lv1 -l 8 $vg
|
||||
lvcreate --type raid6 --raidintegrity y -n $lv1 -l 8 $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5"
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
_wait_recalc $vg/${lv1}_rimage_1
|
||||
_wait_recalc $vg/${lv1}_rimage_2
|
||||
_wait_recalc $vg/${lv1}_rimage_3
|
||||
_wait_recalc $vg/${lv1}_rimage_4
|
||||
_test_fs_with_read_repair
|
||||
_wait_recalc $vg/$lv1
|
||||
_test_fs_with_read_repair "$dev1" "$dev2" "$dev3" "$dev4" "$dev5"
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_0
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_1
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_2
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_3
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_4
|
||||
lvs -o integritymismatches $vg/$lv1 |tee mismatch
|
||||
not grep 0 mismatch
|
||||
lvchange -an $vg/$lv1
|
||||
lvconvert --raidintegrity n $vg/$lv1
|
||||
lvremove $vg/$lv1
|
||||
vgremove -ff $vg
|
||||
|
||||
_prepare_vg
|
||||
lvcreate --type raid10 --raidintegrity y -n $lv1 -l 8 $vg
|
||||
lvcreate --type raid10 --raidintegrity y -n $lv1 -l 8 $vg "$dev1" "$dev2" "$dev3" "$dev4"
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
_wait_recalc $vg/${lv1}_rimage_1
|
||||
_wait_recalc $vg/${lv1}_rimage_2
|
||||
_wait_recalc $vg/${lv1}_rimage_3
|
||||
_test_fs_with_read_repair
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_0
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_1
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_2
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_3
|
||||
lvchange -an $vg/$lv1
|
||||
lvconvert --raidintegrity n $vg/$lv1
|
||||
lvremove $vg/$lv1
|
||||
vgremove -ff $vg
|
||||
|
||||
# Test corrupting data on an image and verifying that
|
||||
# it is detected and corrected using syncaction check
|
||||
|
||||
_prepare_vg
|
||||
lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
_wait_recalc $vg/${lv1}_rimage_1
|
||||
_test_fs_with_syncaction_check
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_0
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_1
|
||||
check lv_field $vg/${lv1}_rimage_0 integritymismatches "1"
|
||||
check lv_field $vg/${lv1}_rimage_1 integritymismatches "0"
|
||||
lvchange -an $vg/$lv1
|
||||
lvconvert --raidintegrity n $vg/$lv1
|
||||
lvremove $vg/$lv1
|
||||
vgremove -ff $vg
|
||||
|
||||
_prepare_vg
|
||||
lvcreate --type raid4 --raidintegrity y -n $lv1 -l 8 $vg
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
_wait_recalc $vg/${lv1}_rimage_1
|
||||
_wait_recalc $vg/${lv1}_rimage_2
|
||||
_test_fs_with_syncaction_check
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_0
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_1
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_2
|
||||
check lv_field $vg/${lv1}_rimage_0 integritymismatches "2"
|
||||
check lv_field $vg/${lv1}_rimage_1 integritymismatches "0"
|
||||
check lv_field $vg/${lv1}_rimage_2 integritymismatches "0"
|
||||
lvchange -an $vg/$lv1
|
||||
lvconvert --raidintegrity n $vg/$lv1
|
||||
lvremove $vg/$lv1
|
||||
vgremove -ff $vg
|
||||
|
||||
_prepare_vg
|
||||
lvcreate --type raid5 --raidintegrity y -n $lv1 -l 8 $vg
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
_wait_recalc $vg/${lv1}_rimage_1
|
||||
_wait_recalc $vg/${lv1}_rimage_2
|
||||
_test_fs_with_syncaction_check
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_0
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_1
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_2
|
||||
lvchange -an $vg/$lv1
|
||||
lvconvert --raidintegrity n $vg/$lv1
|
||||
lvremove $vg/$lv1
|
||||
vgremove -ff $vg
|
||||
|
||||
_prepare_vg
|
||||
lvcreate --type raid6 --raidintegrity y -n $lv1 -l 8 $vg
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
_wait_recalc $vg/${lv1}_rimage_1
|
||||
_wait_recalc $vg/${lv1}_rimage_2
|
||||
_wait_recalc $vg/${lv1}_rimage_3
|
||||
_wait_recalc $vg/${lv1}_rimage_4
|
||||
_test_fs_with_syncaction_check
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_0
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_1
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_2
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_3
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_4
|
||||
lvchange -an $vg/$lv1
|
||||
lvconvert --raidintegrity n $vg/$lv1
|
||||
lvremove $vg/$lv1
|
||||
vgremove -ff $vg
|
||||
|
||||
_prepare_vg
|
||||
lvcreate --type raid10 --raidintegrity y -n $lv1 -l 8 $vg
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
_wait_recalc $vg/${lv1}_rimage_1
|
||||
_wait_recalc $vg/${lv1}_rimage_2
|
||||
_wait_recalc $vg/${lv1}_rimage_3
|
||||
_test_fs_with_syncaction_check
|
||||
_wait_recalc $vg/$lv1
|
||||
_test_fs_with_read_repair "$dev1" "$dev3"
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_0
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_1
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_2
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_3
|
||||
lvs -o integritymismatches $vg/$lv1 |tee mismatch
|
||||
not grep 0 mismatch
|
||||
lvchange -an $vg/$lv1
|
||||
lvconvert --raidintegrity n $vg/$lv1
|
||||
lvremove $vg/$lv1
|
||||
@@ -457,6 +278,7 @@ _prepare_vg
|
||||
lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
_wait_recalc $vg/${lv1}_rimage_1
|
||||
_wait_recalc $vg/$lv1
|
||||
_add_new_data_to_mnt
|
||||
lvconvert --raidintegrity n $vg/$lv1
|
||||
_add_more_data_to_mnt
|
||||
@@ -471,6 +293,8 @@ _prepare_vg
|
||||
lvcreate --type raid4 --raidintegrity y -n $lv1 -l 8 $vg
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
_wait_recalc $vg/${lv1}_rimage_1
|
||||
_wait_recalc $vg/${lv1}_rimage_2
|
||||
_wait_recalc $vg/$lv1
|
||||
_add_new_data_to_mnt
|
||||
lvconvert --raidintegrity n $vg/$lv1
|
||||
_add_more_data_to_mnt
|
||||
@@ -485,6 +309,8 @@ _prepare_vg
|
||||
lvcreate --type raid5 --raidintegrity y -n $lv1 -l 8 $vg
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
_wait_recalc $vg/${lv1}_rimage_1
|
||||
_wait_recalc $vg/${lv1}_rimage_2
|
||||
_wait_recalc $vg/$lv1
|
||||
_add_new_data_to_mnt
|
||||
lvconvert --raidintegrity n $vg/$lv1
|
||||
_add_more_data_to_mnt
|
||||
@@ -499,6 +325,10 @@ _prepare_vg
|
||||
lvcreate --type raid6 --raidintegrity y -n $lv1 -l 8 $vg
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
_wait_recalc $vg/${lv1}_rimage_1
|
||||
_wait_recalc $vg/${lv1}_rimage_2
|
||||
_wait_recalc $vg/${lv1}_rimage_3
|
||||
_wait_recalc $vg/${lv1}_rimage_4
|
||||
_wait_recalc $vg/$lv1
|
||||
_add_new_data_to_mnt
|
||||
lvconvert --raidintegrity n $vg/$lv1
|
||||
_add_more_data_to_mnt
|
||||
@@ -513,6 +343,7 @@ _prepare_vg
|
||||
lvcreate --type raid10 --raidintegrity y -n $lv1 -l 8 $vg
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
_wait_recalc $vg/${lv1}_rimage_1
|
||||
_wait_recalc $vg/$lv1
|
||||
_add_new_data_to_mnt
|
||||
lvconvert --raidintegrity n $vg/$lv1
|
||||
_add_more_data_to_mnt
|
||||
@@ -527,6 +358,7 @@ vgremove -ff $vg
|
||||
|
||||
_prepare_vg
|
||||
lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg
|
||||
_wait_recalc $vg/$lv1
|
||||
_add_new_data_to_mnt
|
||||
lvconvert --raidintegrity y $vg/$lv1
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
@@ -541,6 +373,7 @@ vgremove -ff $vg
|
||||
|
||||
_prepare_vg
|
||||
lvcreate --type raid4 -n $lv1 -l 8 $vg
|
||||
_wait_recalc $vg/$lv1
|
||||
_add_new_data_to_mnt
|
||||
lvconvert --raidintegrity y $vg/$lv1
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
@@ -555,6 +388,7 @@ vgremove -ff $vg
|
||||
|
||||
_prepare_vg
|
||||
lvcreate --type raid5 -n $lv1 -l 8 $vg
|
||||
_wait_recalc $vg/$lv1
|
||||
_add_new_data_to_mnt
|
||||
lvconvert --raidintegrity y $vg/$lv1
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
@@ -569,6 +403,12 @@ vgremove -ff $vg
|
||||
|
||||
_prepare_vg
|
||||
lvcreate --type raid6 -n $lv1 -l 8 $vg
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
_wait_recalc $vg/${lv1}_rimage_1
|
||||
_wait_recalc $vg/${lv1}_rimage_2
|
||||
_wait_recalc $vg/${lv1}_rimage_3
|
||||
_wait_recalc $vg/${lv1}_rimage_4
|
||||
_wait_recalc $vg/$lv1
|
||||
_add_new_data_to_mnt
|
||||
lvconvert --raidintegrity y $vg/$lv1
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
@@ -583,6 +423,7 @@ vgremove -ff $vg
|
||||
|
||||
_prepare_vg
|
||||
lvcreate --type raid10 -n $lv1 -l 8 $vg
|
||||
_wait_recalc $vg/$lv1
|
||||
_add_new_data_to_mnt
|
||||
lvconvert --raidintegrity y $vg/$lv1
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
@@ -601,6 +442,7 @@ _prepare_vg
|
||||
lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
_wait_recalc $vg/${lv1}_rimage_1
|
||||
_wait_recalc $vg/$lv1
|
||||
lvs -a -o+devices $vg
|
||||
_add_new_data_to_mnt
|
||||
umount $mnt
|
||||
@@ -624,6 +466,10 @@ _prepare_vg
|
||||
lvcreate --type raid6 --raidintegrity y -n $lv1 -l 8 $vg
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
_wait_recalc $vg/${lv1}_rimage_1
|
||||
_wait_recalc $vg/${lv1}_rimage_2
|
||||
_wait_recalc $vg/${lv1}_rimage_3
|
||||
_wait_recalc $vg/${lv1}_rimage_4
|
||||
_wait_recalc $vg/$lv1
|
||||
lvs -a -o+devices $vg
|
||||
_add_new_data_to_mnt
|
||||
umount $mnt
|
||||
@@ -649,6 +495,7 @@ _prepare_vg
|
||||
lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
_wait_recalc $vg/${lv1}_rimage_1
|
||||
_wait_recalc $vg/$lv1
|
||||
lvs -a -o+devices $vg
|
||||
_add_new_data_to_mnt
|
||||
lvextend -l 16 $vg/$lv1
|
||||
@@ -668,6 +515,8 @@ _prepare_vg
|
||||
lvcreate --type raid5 --raidintegrity y -n $lv1 -l 8 $vg
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
_wait_recalc $vg/${lv1}_rimage_1
|
||||
_wait_recalc $vg/${lv1}_rimage_2
|
||||
_wait_recalc $vg/$lv1
|
||||
lvs -a -o+devices $vg
|
||||
_add_new_data_to_mnt
|
||||
lvextend -l 16 $vg/$lv1
|
||||
@@ -687,6 +536,7 @@ _prepare_vg
|
||||
lvcreate --type raid10 --raidintegrity y -n $lv1 -l 8 $vg
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
_wait_recalc $vg/${lv1}_rimage_1
|
||||
_wait_recalc $vg/$lv1
|
||||
lvs -a -o+devices $vg
|
||||
_add_new_data_to_mnt
|
||||
lvextend -l 16 $vg/$lv1
|
||||
@@ -708,6 +558,7 @@ _prepare_vg
|
||||
lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
_wait_recalc $vg/${lv1}_rimage_1
|
||||
_wait_recalc $vg/$lv1
|
||||
lvs -a -o+devices $vg
|
||||
_add_new_data_to_mnt
|
||||
lvconvert -y -m+1 $vg/$lv1
|
||||
@@ -730,6 +581,7 @@ lvcreate --type raid1 -m2 --raidintegrity y -n $lv1 -l 8 $vg
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
_wait_recalc $vg/${lv1}_rimage_1
|
||||
_wait_recalc $vg/${lv1}_rimage_2
|
||||
_wait_recalc $vg/$lv1
|
||||
lvs -a -o+devices $vg
|
||||
_add_new_data_to_mnt
|
||||
lvconvert -y -m-1 $vg/$lv1
|
||||
@@ -748,6 +600,7 @@ _prepare_vg
|
||||
lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
_wait_recalc $vg/${lv1}_rimage_1
|
||||
_wait_recalc $vg/$lv1
|
||||
lvs -a -o+devices $vg
|
||||
_add_new_data_to_mnt
|
||||
not lvconvert -y -m-1 $vg/$lv1
|
||||
@@ -768,23 +621,36 @@ vgremove -ff $vg
|
||||
# Repeat many of the tests above using bitmap mode
|
||||
|
||||
_prepare_vg
|
||||
lvcreate --type raid1 -m1 --raidintegrity y --raidintegritymode bitmap -n $lv1 -l 8 $vg
|
||||
_test_fs_with_read_repair
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_0
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_1
|
||||
lvcreate --type raid1 -m1 --raidintegrity y --raidintegritymode bitmap -n $lv1 -l 8 $vg "$dev1 "$dev2"
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
_wait_recalc $vg/${lv1}_rimage_1
|
||||
_wait_recalc $vg/$lv1
|
||||
_test_fs_with_read_repair "$dev1"
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_0 |tee mismatch
|
||||
not grep 0 mismatch
|
||||
lvs -o integritymismatches $vg/$lv1 |tee mismatch
|
||||
not grep 0 mismatch
|
||||
lvchange -an $vg/$lv1
|
||||
lvconvert --raidintegrity n $vg/$lv1
|
||||
lvremove $vg/$lv1
|
||||
vgremove -ff $vg
|
||||
|
||||
_prepare_vg
|
||||
lvcreate --type raid6 --raidintegrity y --raidintegritymode bitmap -n $lv1 -l 8 $vg
|
||||
_test_fs_with_read_repair
|
||||
lvcreate --type raid6 --raidintegrity y --raidintegritymode bitmap -n $lv1 -l 8 $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5"
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
_wait_recalc $vg/${lv1}_rimage_1
|
||||
_wait_recalc $vg/${lv1}_rimage_2
|
||||
_wait_recalc $vg/${lv1}_rimage_3
|
||||
_wait_recalc $vg/${lv1}_rimage_4
|
||||
_wait_recalc $vg/$lv1
|
||||
_test_fs_with_read_repair "$dev1" "$dev2" "$dev3" "$dev4" "$dev5"
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_0
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_1
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_2
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_3
|
||||
lvs -o integritymismatches $vg/${lv1}_rimage_4
|
||||
lvs -o integritymismatches $vg/$lv1 |tee mismatch
|
||||
not grep 0 mismatch
|
||||
lvchange -an $vg/$lv1
|
||||
lvconvert --raidintegrity n $vg/$lv1
|
||||
lvremove $vg/$lv1
|
||||
@@ -792,7 +658,7 @@ vgremove -ff $vg
|
||||
|
||||
# remove from active lv
|
||||
_prepare_vg
|
||||
lvcreate --type raid1 -m1 --raidintegrity y --raidintegritymode bitmap -n $lv1 -l 8 $vg
|
||||
lvcreate --type raid1 -m1 --raidintegrity y --raidintegritymode bitmap -n $lv1 -l 8 $vg "$dev1" "$dev2"
|
||||
_wait_recalc $vg/${lv1}_rimage_0
|
||||
_wait_recalc $vg/${lv1}_rimage_1
|
||||
_add_new_data_to_mnt
|
||||
|
||||
@@ -145,7 +145,7 @@ pvcreate --pvmetadatacopies 2 --metadatasize 32M "$dev1"
|
||||
|
||||
vgcreate $SHARED -s 64K --metadatasize 32M $vg "$dev1" "$dev2" "$dev3" "$dev4"
|
||||
|
||||
for i in $(seq 1 500); do lvcreate -an -n lv$i -l1 $vg; done
|
||||
for i in $(seq 1 500); do echo "lvcreate -an -n lv$i -l1 $vg"; done | lvm
|
||||
|
||||
pvck --dump headers "$dev1" > h1
|
||||
|
||||
|
||||
@@ -149,5 +149,34 @@ lvchange -an $vg/$lv2
|
||||
lvremove $vg/$lv1
|
||||
lvremove $vg/$lv2
|
||||
|
||||
# Repeat similar using uncache
|
||||
|
||||
lvcreate -n $lv1 -L 560M -an $vg "$dev1"
|
||||
lvcreate -n $lv2 -L 500M -an $vg "$dev2"
|
||||
|
||||
lvchange -ay $vg/$lv1
|
||||
lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1
|
||||
|
||||
_add_new_data_to_mnt
|
||||
_add_more_data_to_mnt
|
||||
dd if=/dev/zero of=$mnt/big1 bs=1M count=100 oflag=sync
|
||||
|
||||
umount $mnt
|
||||
lvchange -an $vg/$lv1
|
||||
|
||||
lvconvert --uncache $vg/$lv1
|
||||
|
||||
check lv_field $vg/$lv1 segtype linear
|
||||
not lvs $vg/$lv2
|
||||
|
||||
lvchange -ay $vg/$lv1
|
||||
mount "$DM_DEV_DIR/$vg/$lv1" $mnt
|
||||
|
||||
_verify_data_on_mnt
|
||||
_verify_more_data_on_mnt
|
||||
|
||||
umount $mnt
|
||||
lvchange -an $vg/$lv1
|
||||
|
||||
vgremove -ff $vg
|
||||
|
||||
|
||||
@@ -99,6 +99,11 @@ lvchange -an $vg/$lv1
|
||||
|
||||
aux disable_dev "$dev2"
|
||||
|
||||
lvs -a -o+lv_health_status $vg |tee out
|
||||
grep $lv1 out | grep partial
|
||||
grep $lv2 out | grep partial
|
||||
check lv_attr_bit health $vg/$lv1 "p"
|
||||
|
||||
not lvconvert --splitcache $vg/$lv1
|
||||
lvconvert --splitcache --force --yes $vg/$lv1
|
||||
|
||||
@@ -128,6 +133,11 @@ lvchange -an $vg/$lv1
|
||||
|
||||
aux disable_dev "$dev3"
|
||||
|
||||
lvs -a -o+lv_health_status $vg |tee out
|
||||
grep $lv1 out | grep partial
|
||||
grep $lv2 out | grep partial
|
||||
check lv_attr_bit health $vg/$lv1 "p"
|
||||
|
||||
not lvconvert --splitcache $vg/$lv1
|
||||
lvconvert --splitcache --force --yes $vg/$lv1
|
||||
|
||||
@@ -171,5 +181,51 @@ lvconvert -y --type writecache --cachevol $lv2 $vg/$lv1
|
||||
fail vgsplit $vg $vg1 "$dev2"
|
||||
fail vgsplit $vg $vg1 "$dev3"
|
||||
lvremove $vg/$lv1
|
||||
vgremove $vg
|
||||
|
||||
#
|
||||
# uncache
|
||||
#
|
||||
vgcreate $SHARED $vg "$dev1" "$dev2" "$dev3" "$dev4"
|
||||
|
||||
# while inactive
|
||||
|
||||
lvcreate -n $lv1 -l 16 -an $vg "$dev1" "$dev4"
|
||||
lvcreate -n $lv2 -l 4 -an $vg "$dev2"
|
||||
|
||||
lvconvert -y --type writecache --cachevol $lv2 $vg/$lv1
|
||||
|
||||
lvchange -ay $vg/$lv1
|
||||
mkfs_mount_umount $lv1
|
||||
lvchange -an $vg/$lv1
|
||||
|
||||
lvconvert --uncache $vg/$lv1
|
||||
lvs -o segtype $vg/$lv1 | grep linear
|
||||
not lvs $vg/$lv2
|
||||
|
||||
lvchange -ay $vg/$lv1
|
||||
mount_umount $lv1
|
||||
lvchange -an $vg/$lv1
|
||||
lvremove -y $vg/$lv1
|
||||
|
||||
# while active
|
||||
|
||||
lvcreate -n $lv1 -l 16 -an $vg "$dev1" "$dev4"
|
||||
lvcreate -n $lv2 -l 4 -an $vg "$dev2"
|
||||
|
||||
lvconvert -y --type writecache --cachevol $lv2 $vg/$lv1
|
||||
|
||||
lvchange -ay $vg/$lv1
|
||||
mkfs_mount_umount $lv1
|
||||
|
||||
lvconvert --uncache $vg/$lv1
|
||||
lvs -o segtype $vg/$lv1 | grep linear
|
||||
not lvs $vg/$lv2
|
||||
|
||||
lvchange -an $vg/$lv1
|
||||
lvchange -ay $vg/$lv1
|
||||
mount_umount $lv1
|
||||
lvchange -an $vg/$lv1
|
||||
lvremove -y $vg/$lv1
|
||||
|
||||
vgremove -ff $vg
|
||||
|
||||
@@ -1,198 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (C) 2018 Red Hat, Inc. All rights reserved.
|
||||
#
|
||||
# This copyrighted material is made available to anyone wishing to use,
|
||||
# modify, copy, or redistribute it subject to the terms and conditions
|
||||
# of the GNU General Public License v.2.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
SKIP_WITH_LVMPOLLD=1
|
||||
|
||||
. lib/inittest
|
||||
|
||||
mkfs_mount_unmount()
|
||||
{
|
||||
lvt=$1
|
||||
|
||||
mkfs.xfs -f "$DM_DEV_DIR/$vg/$lvt"
|
||||
mount "$DM_DEV_DIR/$vg/$lvt" "$mount_dir"
|
||||
cp pattern1 "$mount_dir/pattern1"
|
||||
umount "$mount_dir"
|
||||
}
|
||||
|
||||
setup_thin_lvs()
|
||||
{
|
||||
pool=$1
|
||||
|
||||
for i in $(seq 1 4); do
|
||||
lvcreate --type thin -V1G -n th$i --thinpool $pool $vg
|
||||
mkfs_mount_unmount th${i}
|
||||
lvchange -an $vg/th${i}
|
||||
done
|
||||
}
|
||||
|
||||
diff_thin_lvs()
|
||||
{
|
||||
for i in $(seq 1 4); do
|
||||
diff pattern1 "${mount_dir}_${i}/pattern1"
|
||||
diff pattern2 "${mount_dir}_${i}/pattern2"
|
||||
done
|
||||
}
|
||||
|
||||
mount_thin_lvs()
|
||||
{
|
||||
for i in $(seq 1 4); do
|
||||
lvchange -ay $vg/th$i
|
||||
mount "$DM_DEV_DIR/$vg/th$i" "${mount_dir}_${i}"
|
||||
done
|
||||
}
|
||||
|
||||
unmount_thin_lvs()
|
||||
{
|
||||
for i in $(seq 1 4); do
|
||||
umount "${mount_dir}_${i}"
|
||||
lvchange -an $vg/th${i}
|
||||
done
|
||||
}
|
||||
|
||||
write_thin_lvs()
|
||||
{
|
||||
for i in $(seq 1 4); do
|
||||
cp pattern2 "${mount_dir}_${i}/pattern2"
|
||||
done
|
||||
}
|
||||
|
||||
aux have_writecache 1 0 0 || skip
|
||||
which mkfs.xfs || skip
|
||||
|
||||
mount_dir="mnt"
|
||||
mount_dir_1="mnt1"
|
||||
mount_dir_2="mnt2"
|
||||
mount_dir_3="mnt3"
|
||||
mount_dir_4="mnt4"
|
||||
mkdir -p "$mount_dir"
|
||||
for i in $(seq 1 4); do
|
||||
mkdir -p "${mount_dir}_${i}"
|
||||
done
|
||||
|
||||
# generate random data
|
||||
dd if=/dev/urandom of=pattern1 bs=512K count=1
|
||||
dd if=/dev/urandom of=pattern2 bs=512 count=15
|
||||
|
||||
aux prepare_devs 6 40
|
||||
|
||||
vgcreate $SHARED $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6"
|
||||
|
||||
#
|
||||
# writecache as thin pool data
|
||||
# splitcache while inactive
|
||||
#
|
||||
|
||||
# lv1 holds thin pool data and uses writecache
|
||||
# lv2 holds cachevol for writecache
|
||||
# lv3 holds thin pool metadata
|
||||
lvcreate -n $lv1 -l 16 -an $vg "$dev1" "$dev2"
|
||||
lvcreate -n $lv2 -l 2 -an $vg "$dev3"
|
||||
lvcreate -n $lv3 -l 2 -an $vg "$dev4"
|
||||
lvconvert -y --type writecache --cachevol $lv2 $vg/$lv1
|
||||
lvconvert -y --type thin-pool --poolmetadata $lv3 --poolmetadataspare n $vg/$lv1
|
||||
|
||||
setup_thin_lvs $lv1
|
||||
mount_thin_lvs
|
||||
write_thin_lvs
|
||||
unmount_thin_lvs
|
||||
|
||||
lvchange -an $vg/$lv1
|
||||
lvconvert --splitcache --cachesettings cleaner=0 $vg/${lv1}_tdata
|
||||
lvs -o segtype $vg/$lv2 | grep linear
|
||||
|
||||
mount_thin_lvs
|
||||
diff_thin_lvs
|
||||
unmount_thin_lvs
|
||||
|
||||
lvremove -y $vg/$lv1
|
||||
lvremove -y $vg/$lv2
|
||||
|
||||
#
|
||||
# writecache as thin pool data
|
||||
# splitcache while active
|
||||
#
|
||||
|
||||
# lv1 holds thin pool data and uses writecache
|
||||
# lv2 holds cachevol for writecache
|
||||
# lv3 holds thin pool metadata
|
||||
lvcreate -n $lv1 -l 16 -an $vg "$dev1" "$dev2"
|
||||
lvcreate -n $lv2 -l 2 -an $vg "$dev3"
|
||||
lvcreate -n $lv3 -l 2 -an $vg "$dev4"
|
||||
lvconvert -y --type writecache --cachevol $lv2 $vg/$lv1
|
||||
lvconvert -y --type thin-pool --poolmetadata $lv3 --poolmetadataspare n $vg/$lv1
|
||||
|
||||
setup_thin_lvs $lv1
|
||||
mount_thin_lvs
|
||||
write_thin_lvs
|
||||
|
||||
# FIXME: splitcache setting cleaner on tdata writecache doesn't work,
|
||||
# bypassing that with cleaner=0 here.
|
||||
|
||||
lvconvert --splitcache --cachesettings cleaner=0 $vg/${lv1}_tdata
|
||||
lvs -o segtype $vg/$lv2 | grep linear
|
||||
|
||||
diff_thin_lvs
|
||||
unmount_thin_lvs
|
||||
|
||||
mount_thin_lvs
|
||||
diff_thin_lvs
|
||||
unmount_thin_lvs
|
||||
|
||||
lvremove -y $vg/$lv1
|
||||
lvremove -y $vg/$lv2
|
||||
|
||||
|
||||
#
|
||||
# add writecache to raid, then use writecache for thin pool data
|
||||
#
|
||||
|
||||
# lv1 holds thin pool data and uses writecache
|
||||
# lv2 holds cachevol for writecache
|
||||
# lv3 holds thin pool metadata
|
||||
lvcreate --type raid1 -m1 -n $lv1 -l 16 -an $vg "$dev1" "$dev2" "$dev5" "$dev6"
|
||||
lvcreate -n $lv2 -l 2 -an $vg "$dev3"
|
||||
lvcreate -n $lv3 -l 2 -an $vg "$dev4"
|
||||
lvconvert -y --type writecache --cachevol $lv2 $vg/$lv1
|
||||
lvconvert -y --type thin-pool --poolmetadata $lv3 --poolmetadataspare n $vg/$lv1
|
||||
|
||||
setup_thin_lvs $lv1
|
||||
mount_thin_lvs
|
||||
write_thin_lvs
|
||||
|
||||
lvconvert --splitcache --cachesettings cleaner=0 $vg/${lv1}_tdata
|
||||
lvs -o segtype $vg/$lv2 | grep linear
|
||||
|
||||
diff_thin_lvs
|
||||
unmount_thin_lvs
|
||||
|
||||
mount_thin_lvs
|
||||
diff_thin_lvs
|
||||
unmount_thin_lvs
|
||||
|
||||
lvremove -y $vg/$lv1
|
||||
lvremove -y $vg/$lv2
|
||||
|
||||
|
||||
#
|
||||
# remove writecache from thin pool data when cachevol is missing
|
||||
#
|
||||
|
||||
#
|
||||
# FIXME: add writecache to existing thin pool data
|
||||
#
|
||||
|
||||
#
|
||||
# FIXME: thin pool data cannot be extended when it uses writecache
|
||||
#
|
||||
|
||||
vgremove -ff $vg
|
||||
@@ -505,14 +505,14 @@ DESC: Add a writecache to an LV, using a specified cache device.
|
||||
RULE: all and lv_is_visible
|
||||
|
||||
lvconvert --type cache --cachedevice PV LV_linear_striped_raid_thinpool
|
||||
OO: OO_LVCONVERT, --cachesize SizeMB, --cachesettings String
|
||||
OO: OO_LVCONVERT, --cachesize SizeMB, --cachesettings String, --chunksize SizeKB
|
||||
ID: lvconvert_to_cache_with_device
|
||||
DESC: Add a cache to an LV, using a specified cache device.
|
||||
RULE: all and lv_is_visible
|
||||
|
||||
---
|
||||
|
||||
lvconvert --type thin-pool LV_linear_striped_raid_cache_writecache
|
||||
lvconvert --type thin-pool LV_linear_striped_raid_cache
|
||||
OO: --stripes_long Number, --stripesize SizeKB,
|
||||
--discards Discards, OO_LVCONVERT_POOL, OO_LVCONVERT
|
||||
OP: PV ...
|
||||
|
||||
@@ -202,7 +202,7 @@ static int _lvchange_activate(struct cmd_context *cmd, struct logical_volume *lv
|
||||
strcmp(lv->vg->system_id, cmd->system_id) &&
|
||||
is_change_activating(activate)) {
|
||||
log_error("Cannot activate LVs in a foreign VG.");
|
||||
return ECMD_FAILED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (lv_activation_skip(lv, activate, arg_is_set(cmd, ignoreactivationskip_ARG)))
|
||||
|
||||
@@ -3662,6 +3662,7 @@ struct lvconvert_result {
|
||||
unsigned need_polling:1;
|
||||
unsigned wait_cleaner_writecache:1;
|
||||
unsigned active_begin:1;
|
||||
unsigned remove_cache:1;
|
||||
struct dm_list poll_idls;
|
||||
};
|
||||
|
||||
@@ -4966,8 +4967,18 @@ static int _lvconvert_split_cache_single(struct cmd_context *cmd,
|
||||
return ECMD_FAILED;
|
||||
|
||||
if (cmd->command->command_enum == lvconvert_split_and_remove_cache_CMD) {
|
||||
if (lvremove_single(cmd, lv_fast, NULL) != ECMD_PROCESSED)
|
||||
return ECMD_FAILED;
|
||||
struct lvconvert_result *lr = (struct lvconvert_result *) handle->custom_handle;
|
||||
/*
|
||||
* If detach is ongoing, then the remove needs to wait
|
||||
* until _lvconvert_detach_writecache_when_clean(),
|
||||
* after the detach has finished. When lr->remove_cache
|
||||
* has been set, when_clean() knows it should remove
|
||||
* lv_fast at the end.
|
||||
*/
|
||||
if (!lr->wait_cleaner_writecache) {
|
||||
if (lvremove_single(cmd, lv_fast, NULL) != ECMD_PROCESSED)
|
||||
return ECMD_FAILED;
|
||||
}
|
||||
}
|
||||
ret = 1;
|
||||
} else if (lv_is_cache(lv_main) && lv_is_cache_vol(lv_fast)) {
|
||||
@@ -5637,6 +5648,10 @@ static int _lvconvert_detach_writecache(struct cmd_context *cmd,
|
||||
lr->wait_cleaner_writecache = 1;
|
||||
lr->active_begin = active_begin;
|
||||
|
||||
/* The command wants to remove the cache after detaching. */
|
||||
if (cmd->command->command_enum == lvconvert_split_and_remove_cache_CMD)
|
||||
lr->remove_cache = 1;
|
||||
|
||||
dm_list_add(&lr->poll_idls, &idl->list);
|
||||
return 1;
|
||||
}
|
||||
@@ -5679,6 +5694,7 @@ static int _lvconvert_detach_writecache_when_clean(struct cmd_context *cmd,
|
||||
struct poll_operation_id *id;
|
||||
struct volume_group *vg;
|
||||
struct logical_volume *lv;
|
||||
struct logical_volume *lv_fast;
|
||||
uint32_t lockd_state, error_flags;
|
||||
uint64_t dirty;
|
||||
int ret;
|
||||
@@ -5759,6 +5775,8 @@ static int _lvconvert_detach_writecache_when_clean(struct cmd_context *cmd,
|
||||
|
||||
log_print("Detaching writecache completed cleaning.");
|
||||
|
||||
lv_fast = first_seg(lv)->writecache;
|
||||
|
||||
/*
|
||||
* When the cleaner has finished, we can detach with noflush since
|
||||
* the cleaner has done the flushing.
|
||||
@@ -5770,6 +5788,17 @@ static int _lvconvert_detach_writecache_when_clean(struct cmd_context *cmd,
|
||||
goto out_release;
|
||||
}
|
||||
|
||||
/*
|
||||
* The detach was started by an uncache command that wants to remove
|
||||
* the cachevol after detaching.
|
||||
*/
|
||||
if (lr->remove_cache) {
|
||||
if (lvremove_single(cmd, lv_fast, NULL) != ECMD_PROCESSED) {
|
||||
log_error("Removing the writecache cachevol failed.");
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
|
||||
ret = 1;
|
||||
backup(vg);
|
||||
|
||||
|
||||
@@ -3064,11 +3064,11 @@ int pvck(struct cmd_context *cmd, int argc, char **argv)
|
||||
return ECMD_FAILED;
|
||||
}
|
||||
|
||||
label_scan_setup_bcache();
|
||||
|
||||
if (dev) {
|
||||
char buf[4096];
|
||||
|
||||
label_scan_setup_bcache();
|
||||
|
||||
/*
|
||||
* This buf is not used, but bcache data is used for subsequent
|
||||
* reads in the filters and by _read_bytes for other disk structs.
|
||||
|
||||
@@ -4,14 +4,7 @@
|
||||
|
||||
# Udev rules for LVM.
|
||||
#
|
||||
# Scan all block devices having a PV label for LVM metadata.
|
||||
# Store this information in LVMetaD (the LVM metadata daemon) and maintain LVM
|
||||
# metadata state for improved performance by avoiding further scans while
|
||||
# running subsequent LVM commands or while using lvm2app library.
|
||||
# Also, notify LVMetaD about any relevant block device removal.
|
||||
#
|
||||
# This rule is essential for having the information in LVMetaD up-to-date.
|
||||
# It also requires blkid to be called on block devices before so only devices
|
||||
# This rule requires blkid to be called on block devices before so only devices
|
||||
# used as LVM PVs are processed (ID_FS_TYPE="LVM2_member" or "LVM1_member").
|
||||
|
||||
SUBSYSTEM!="block", GOTO="lvm_end"
|
||||
@@ -19,8 +12,7 @@ SUBSYSTEM!="block", GOTO="lvm_end"
|
||||
|
||||
ENV{DM_UDEV_DISABLE_OTHER_RULES_FLAG}=="1", GOTO="lvm_end"
|
||||
|
||||
# If the PV label got lost, inform lvmetad immediately.
|
||||
# Detect the lost PV label by comparing previous ID_FS_TYPE value with current one.
|
||||
# Detect removed PV label by comparing previous ID_FS_TYPE value with current one.
|
||||
ENV{.ID_FS_TYPE_NEW}="$env{ID_FS_TYPE}"
|
||||
IMPORT{db}="ID_FS_TYPE"
|
||||
ENV{ID_FS_TYPE}=="LVM2_member|LVM1_member", ENV{.ID_FS_TYPE_NEW}!="LVM2_member|LVM1_member", ENV{LVM_PV_GONE}="1"
|
||||
@@ -31,7 +23,6 @@ ENV{LVM_PV_GONE}=="1", GOTO="lvm_scan"
|
||||
ENV{ID_FS_TYPE}!="LVM2_member|LVM1_member", GOTO="lvm_end"
|
||||
ENV{DM_MULTIPATH_DEVICE_PATH}=="1", GOTO="lvm_end"
|
||||
|
||||
# Inform lvmetad about any PV that is gone.
|
||||
ACTION=="remove", GOTO="lvm_scan"
|
||||
|
||||
# Create /dev/disk/by-id/lvm-pv-uuid-<PV_UUID> symlink for each PV
|
||||
@@ -69,7 +60,6 @@ ENV{LVM_LOOP_PV_ACTIVATED}!="1", ENV{SYSTEMD_READY}="0"
|
||||
GOTO="lvm_end"
|
||||
|
||||
# If the PV is not a special device listed above, scan only if necessary.
|
||||
# For "direct_pvscan" mode (see below), this means run rules only an ADD events.
|
||||
# For "systemd_background" mode, systemd takes care of this by activating
|
||||
# the lvm2-pvscan@.service only once.
|
||||
LABEL="next"
|
||||
@@ -113,6 +103,7 @@ ENV{SYSTEMD_ALIAS}="/dev/block/$major:$minor"
|
||||
ENV{SYSTEMD_WANTS}+="lvm2-pvscan@$major:$minor.service"
|
||||
GOTO="lvm_end"
|
||||
|
||||
# FIXME: this mode is not used and should be removed.
|
||||
LABEL="direct_pvscan"
|
||||
|
||||
# The table below summarises the situations in which we reach the LABEL="lvm_scan"
|
||||
|
||||
Reference in New Issue
Block a user