1
0
mirror of git://sourceware.org/git/lvm2.git synced 2026-01-18 00:34:07 +03:00

Compare commits

..

1 Commits

Author SHA1 Message Date
David Teigland
b6e67d688e writecache: enable use with thin pool data
. it's of little use since the thin pool cannot be extended
  while it's using writecache.

. lvconvert --splitcache on tdata isn't working, the writecache
  table doesn't get updated with the cleaner setting.

. adding writecache to existing tdata not yet implemented
2020-10-28 13:37:44 -05:00
132 changed files with 1316 additions and 8264 deletions

View File

@@ -1 +1 @@
2.03.12(2)-git (2021-01-08)
2.03.11(2)-git (2020-08-09)

View File

@@ -1 +1 @@
1.02.177-git (2021-01-08)
1.02.175-git (2020-08-09)

View File

@@ -1,22 +1,5 @@
Version 2.03.12 -
===================================
Check if lvcreate passes read_only_volume_list with tags and skips zeroing.
Allocation prints better error when metadata cannot fit on a single PV.
Pvmove can better resolve full thin-pool tree move.
Limit pool metadata spare to 16GiB.
Improves convertsion and allocation of pool metadata.
Support thin pool metadata 15.88GiB, adds 64MiB, thin_pool_crop_metadata=0.
Enhance lvdisplay to report raid availiable/partial.
Support online rename of VDO pools.
Imporove removal of pmspare when last pool is removed.
Fix problem with wiping of converted LVs.
Fix memleak in scanning (2.03.11).
Fix corner case allocation for thin-pools.
Version 2.03.11 - 08th January 2021
===================================
Fix pvck handling MDA at offset different from 4096.
Partial or degraded activation of writecache is not allowed.
Version 2.03.11 -
==================================
Enhance error handling for fsadm and hanled correct fsck result.
Dmeventd lvm plugin ignores higher reserved_stack lvm.conf values.
Support using BLKZEROOUT for clearing devices.
@@ -37,9 +20,6 @@ Version 2.03.11 - 08th January 2021
Enhance --use-policy percentage rounding.
Configure --with-vdo and --with-writecache as internal segments.
Improving VDO man page examples.
Allow pvmove of writecache origin.
Report integrity fields.
Integrity volumes defaults to journal mode.
Switch code base to use flexible array syntax.
Fix 64bit math when calculation cachevol size.
Preserve uint32_t for seqno handling.

View File

@@ -1,9 +1,5 @@
Version 1.02.177 -
====================================
Add dm_tree_node_add_thin_pool_target_v1 with crop_metadata support.
Version 1.02.175 - 08th January 2021
====================================
Version 1.02.175 -
===================================
Version 1.02.173 - 09th August 2020
===================================

10
aclocal.m4 vendored
View File

@@ -496,14 +496,12 @@ AC_DEFUN([AM_PATH_PYTHON],
m4_default([$3], [AC_MSG_ERROR([no suitable Python interpreter found])])
else
dnl Query Python for its version number. Although site.py simply uses
dnl sys.version[:3], printing that failed with Python 3.10, since the
dnl trailing zero was eliminated. So now we output just the major
dnl and minor version numbers, as numbers. Apparently the tertiary
dnl version is not of interest.
dnl Query Python for its version number. Getting [:3] seems to be
dnl the best way to do this; it's what "site.py" does in the standard
dnl library.
AC_CACHE_CHECK([for $am_display_PYTHON version], [am_cv_python_version],
[am_cv_python_version=`$PYTHON -c "import sys; print('%u.%u' % sys.version_info[[:2]])"`])
[am_cv_python_version=`$PYTHON -c "import sys; sys.stdout.write(sys.version[[:3]])"`])
AC_SUBST([PYTHON_VERSION], [$am_cv_python_version])
dnl Use the values of $prefix and $exec_prefix for the corresponding

View File

@@ -78,14 +78,14 @@ devices {
# routines to acquire this information. For example, this information
# is used to drive LVM filtering like MD component detection, multipath
# component detection, partition detection and others.
#
#
# Accepted values:
# none
# No external device information source is used.
# udev
# Reuse existing udev database records. Applicable only if LVM is
# compiled with udev support.
#
#
external_device_info_source = "none"
# Configuration option devices/hints.
@@ -94,13 +94,13 @@ devices {
# scanning, and will only scan the listed PVs. Removing the hint file
# will cause lvm to generate a new one. Disable hints if PVs will
# be copied onto devices using non-lvm commands, like dd.
#
#
# Accepted values:
# all
# Use all hints.
# none
# Use no hints.
#
#
# This configuration option has an automatic default value.
# hints = "all"
@@ -118,10 +118,10 @@ devices {
# Prefer the name with the least number of slashes.
# Prefer a name that is a symlink.
# Prefer the path with least value in lexicographical order.
#
#
# Example
# preferred_names = [ "^/dev/mpath/", "^/dev/mapper/mpath", "^/dev/[hs]d" ]
#
#
# This configuration option does not have a default value defined.
# Configuration option devices/filter.
@@ -139,7 +139,7 @@ devices {
# then the device is accepted. Be careful mixing 'a' and 'r' patterns,
# as the combination might produce unexpected results (test changes.)
# Run vgscan after changing the filter to regenerate the cache.
#
#
# Example
# Accept every block device:
# filter = [ "a|.*|" ]
@@ -151,7 +151,7 @@ devices {
# filter = [ "a|loop|", "r|/dev/hdc|", "a|/dev/ide|", "r|.*|" ]
# Use anchors to be very specific:
# filter = [ "a|^/dev/hda8$|", "r|.*|" ]
#
#
# This configuration option has an automatic default value.
# filter = [ "a|.*|" ]
@@ -169,10 +169,10 @@ devices {
# List of additional acceptable block device types.
# These are of device type names from /proc/devices, followed by the
# maximum number of partitions.
#
#
# Example
# types = [ "fd", 16 ]
#
#
# This configuration option is advanced.
# This configuration option does not have a default value defined.
@@ -214,7 +214,7 @@ devices {
# Configuration option devices/md_component_checks.
# The checks LVM should use to detect MD component devices.
# MD component devices are block devices used by MD software RAID.
#
#
# Accepted values:
# auto
# LVM will skip scanning the end of devices when it has other
@@ -225,7 +225,7 @@ devices {
# full
# LVM will scan the start and end of devices for MD superblocks.
# This requires an extra read at the end of devices.
#
#
# This configuration option has an automatic default value.
# md_component_checks = "auto"
@@ -367,7 +367,7 @@ allocation {
# defined here, it will check whether any of them are attached to the
# PVs concerned and then seek to match those PV tags between existing
# extents and new extents.
#
#
# Example
# Use the special tag "@*" as a wildcard to match any PV tag:
# cling_tag_list = [ "@*" ]
@@ -375,7 +375,7 @@ allocation {
# PVs are tagged with either @site1 or @site2 to indicate where
# they are situated:
# cling_tag_list = [ "@site1", "@site2" ]
#
#
# This configuration option does not have a default value defined.
# Configuration option allocation/maximise_cling.
@@ -434,25 +434,25 @@ allocation {
# Configuration option allocation/cache_metadata_format.
# Sets default metadata format for new cache.
#
#
# Accepted values:
# 0 Automatically detected best available format
# 1 Original format
# 2 Improved 2nd. generation format
#
#
# This configuration option has an automatic default value.
# cache_metadata_format = 0
# Configuration option allocation/cache_mode.
# The default cache mode used for new cache.
#
#
# Accepted values:
# writethrough
# Data blocks are immediately written from the cache to disk.
# writeback
# Data blocks are written from the cache back to disk after some
# delay to improve performance.
#
#
# This setting replaces allocation/cache_pool_cachemode.
# This configuration option has an automatic default value.
# cache_mode = "writethrough"
@@ -493,13 +493,6 @@ allocation {
# This configuration option has an automatic default value.
# thin_pool_metadata_require_separate_pvs = 0
# Configuration option allocation/thin_pool_crop_metadata.
# Older version of lvm2 cropped pool's metadata size to 15.81 GiB.
# This is slightly less then the actual maximum 15.88 GiB.
# For compatibility with older version and use of cropped size set to 1.
# This configuration option has an automatic default value.
# thin_pool_crop_metadata = 0
# Configuration option allocation/thin_pool_zero.
# Thin pool data chunks are zeroed before they are first used.
# Zeroing with a larger thin pool chunk size reduces performance.
@@ -508,18 +501,18 @@ allocation {
# Configuration option allocation/thin_pool_discards.
# The discards behaviour of thin pool volumes.
#
#
# Accepted values:
# ignore
# nopassdown
# passdown
#
#
# This configuration option has an automatic default value.
# thin_pool_discards = "passdown"
# Configuration option allocation/thin_pool_chunk_size_policy.
# The chunk size calculation policy for thin pool volumes.
#
#
# Accepted values:
# generic
# If thin_pool_chunk_size is defined, use it. Otherwise, calculate
@@ -531,7 +524,7 @@ allocation {
# the chunk size for performance based on device hints exposed in
# sysfs - the optimal_io_size. The chunk size is always at least
# 512KiB.
#
#
# This configuration option has an automatic default value.
# thin_pool_chunk_size_policy = "generic"
@@ -944,7 +937,8 @@ global {
# a volume group's metadata, instead of always granting the read-only
# requests immediately, delay them to allow the read-write requests to
# be serviced. Without this setting, write access may be stalled by a
# high volume of read-only requests. This option only affects file locks.
# high volume of read-only requests. This option only affects
# locking_type 1 viz. local file-based locking.
prioritise_write_locks = 1
# Configuration option global/library_dir.
@@ -968,7 +962,7 @@ global {
# Configuration option global/mirror_segtype_default.
# The segment type used by the short mirroring option -m.
# The --type mirror|raid1 option overrides this setting.
#
#
# Accepted values:
# mirror
# The original RAID1 implementation from LVM/DM. It is
@@ -988,16 +982,16 @@ global {
# handling a failure. This mirror implementation is not
# cluster-aware and cannot be used in a shared (active/active)
# fashion in a cluster.
#
#
mirror_segtype_default = "@DEFAULT_MIRROR_SEGTYPE@"
# Configuration option global/support_mirrored_mirror_log.
# Enable mirrored 'mirror' log type for testing.
#
#
# This type is deprecated to create or convert to but can
# be enabled to test that activation of existing mirrored
# logs and conversion to disk/core works.
#
#
# Not supported for regular operation!
# This configuration option has an automatic default value.
# support_mirrored_mirror_log = 0
@@ -1008,7 +1002,7 @@ global {
# The --stripes/-i and --mirrors/-m options can both be specified
# during the creation of a logical volume to use both striping and
# mirroring for the LV. There are two different implementations.
#
#
# Accepted values:
# raid10
# LVM uses MD's RAID10 personality through DM. This is the
@@ -1018,7 +1012,7 @@ global {
# is done by creating a mirror LV on top of striped sub-LVs,
# effectively creating a RAID 0+1 array. The layering is suboptimal
# in terms of providing redundancy and performance.
#
#
raid10_segtype_default = "@DEFAULT_RAID10_SEGTYPE@"
# Configuration option global/sparse_segtype_default.
@@ -1026,7 +1020,7 @@ global {
# The --type snapshot|thin option overrides this setting.
# The combination of -V and -L options creates a sparse LV. There are
# two different implementations.
#
#
# Accepted values:
# snapshot
# The original snapshot implementation from LVM/DM. It uses an old
@@ -1038,7 +1032,7 @@ global {
# bigger minimal chunk size (64KiB) and uses a separate volume for
# metadata. It has better performance, especially when more data
# is used. It also supports full snapshots.
#
#
sparse_segtype_default = "@DEFAULT_SPARSE_SEGTYPE@"
# Configuration option global/lvdisplay_shows_full_device_path.
@@ -1136,20 +1130,20 @@ global {
# causing problems. Features include: block_size, discards,
# discards_non_power_2, external_origin, metadata_resize,
# external_origin_extend, error_if_no_space.
#
#
# Example
# thin_disabled_features = [ "discards", "block_size" ]
#
#
# This configuration option does not have a default value defined.
# Configuration option global/cache_disabled_features.
# Features to not use in the cache driver.
# This can be helpful for testing, or to avoid using a feature that is
# causing problems. Features include: policy_mq, policy_smq, metadata2.
#
#
# Example
# cache_disabled_features = [ "policy_smq" ]
#
#
# This configuration option does not have a default value defined.
# Configuration option global/cache_check_executable.
@@ -1201,16 +1195,6 @@ global {
# This configuration option has an automatic default value.
# vdo_format_options = [ "" ]
# Configuration option global/vdo_disabled_features.
# Features to not use in the vdo driver.
# This can be helpful for testing, or to avoid using a feature that is
# causing problems. Features include: online_rename
#
# Example
# vdo_disabled_features = [ "online_rename" ]
#
# This configuration option does not have a default value defined.
# Configuration option global/fsadm_executable.
# The full path to the fsadm command.
# LVM uses this command to help with lvresize -r operations.
@@ -1223,7 +1207,7 @@ global {
# or vgimport.) A VG on shared storage devices is accessible only to
# the host with a matching system ID. See 'man lvmsystemid' for
# information on limitations and correct usage.
#
#
# Accepted values:
# none
# The host has no system ID.
@@ -1240,7 +1224,7 @@ global {
# file
# Use the contents of another file (system_id_file) to set the
# system ID.
#
#
system_id_source = "none"
# Configuration option global/system_id_file.
@@ -1366,7 +1350,7 @@ activation {
# If this list is defined, an LV is only activated if it matches an
# entry in this list. If this list is undefined, it imposes no limits
# on LV activation (all are allowed).
#
#
# Accepted values:
# vgname
# The VG name is matched exactly and selects all LVs in the VG.
@@ -1380,10 +1364,10 @@ activation {
# or VG. See tags/hosttags. If any host tags exist but volume_list
# is not defined, a default single-entry list containing '@*'
# is assumed.
#
#
# Example
# volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
#
#
# This configuration option does not have a default value defined.
# Configuration option activation/auto_activation_volume_list.
@@ -1403,7 +1387,7 @@ activation {
# commands run directly by a user. A user may also use the 'a' flag
# directly to perform auto-activation. Also see pvscan(8) for more
# information about auto-activation.
#
#
# Accepted values:
# vgname
# The VG name is matched exactly and selects all LVs in the VG.
@@ -1417,10 +1401,10 @@ activation {
# or VG. See tags/hosttags. If any host tags exist but volume_list
# is not defined, a default single-entry list containing '@*'
# is assumed.
#
#
# Example
# auto_activation_volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
#
#
# This configuration option does not have a default value defined.
# Configuration option activation/read_only_volume_list.
@@ -1429,7 +1413,7 @@ activation {
# against this list, and if it matches, it is activated in read-only
# mode. This overrides the permission setting stored in the metadata,
# e.g. from --permission rw.
#
#
# Accepted values:
# vgname
# The VG name is matched exactly and selects all LVs in the VG.
@@ -1443,10 +1427,10 @@ activation {
# or VG. See tags/hosttags. If any host tags exist but volume_list
# is not defined, a default single-entry list containing '@*'
# is assumed.
#
#
# Example
# read_only_volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
#
#
# This configuration option does not have a default value defined.
# Configuration option activation/raid_region_size.
@@ -1469,13 +1453,13 @@ activation {
# Configuration option activation/readahead.
# Setting to use when there is no readahead setting in metadata.
#
#
# Accepted values:
# none
# Disable readahead.
# auto
# Use default value chosen by kernel.
#
#
# This configuration option has an automatic default value.
# readahead = "auto"
@@ -1487,7 +1471,7 @@ activation {
# performed by dmeventd automatically, and the steps perfomed by the
# manual command lvconvert --repair --use-policies.
# Automatic handling requires dmeventd to be monitoring the LV.
#
#
# Accepted values:
# warn
# Use the system log to warn the user that a device in the RAID LV
@@ -1498,7 +1482,7 @@ activation {
# allocate
# Attempt to use any extra physical volumes in the VG as spares and
# replace faulty devices.
#
#
raid_fault_policy = "warn"
# Configuration option activation/mirror_image_fault_policy.
@@ -1510,7 +1494,7 @@ activation {
# determines the steps perfomed by dmeventd automatically, and the steps
# performed by the manual command lvconvert --repair --use-policies.
# Automatic handling requires dmeventd to be monitoring the LV.
#
#
# Accepted values:
# remove
# Simply remove the faulty device and run without it. If the log
@@ -1535,7 +1519,7 @@ activation {
# the redundant nature of the mirror. This policy acts like
# 'remove' if no suitable device and space can be allocated for the
# replacement.
#
#
mirror_image_fault_policy = "remove"
# Configuration option activation/mirror_log_fault_policy.
@@ -1550,26 +1534,26 @@ activation {
# The minimum value is 50 (a smaller value is treated as 50.)
# Also see snapshot_autoextend_percent.
# Automatic extension requires dmeventd to be monitoring the LV.
#
#
# Example
# Using 70% autoextend threshold and 20% autoextend size, when a 1G
# snapshot exceeds 700M, it is extended to 1.2G, and when it exceeds
# 840M, it is extended to 1.44G:
# snapshot_autoextend_threshold = 70
#
#
snapshot_autoextend_threshold = 100
# Configuration option activation/snapshot_autoextend_percent.
# Auto-extending a snapshot adds this percent extra space.
# The amount of additional space added to a snapshot is this
# percent of its current size.
#
#
# Example
# Using 70% autoextend threshold and 20% autoextend size, when a 1G
# snapshot exceeds 700M, it is extended to 1.2G, and when it exceeds
# 840M, it is extended to 1.44G:
# snapshot_autoextend_percent = 20
#
#
snapshot_autoextend_percent = 20
# Configuration option activation/thin_pool_autoextend_threshold.
@@ -1578,26 +1562,26 @@ activation {
# The minimum value is 50 (a smaller value is treated as 50.)
# Also see thin_pool_autoextend_percent.
# Automatic extension requires dmeventd to be monitoring the LV.
#
#
# Example
# Using 70% autoextend threshold and 20% autoextend size, when a 1G
# thin pool exceeds 700M, it is extended to 1.2G, and when it exceeds
# 840M, it is extended to 1.44G:
# thin_pool_autoextend_threshold = 70
#
#
thin_pool_autoextend_threshold = 100
# Configuration option activation/thin_pool_autoextend_percent.
# Auto-extending a thin pool adds this percent extra space.
# The amount of additional space added to a thin pool is this
# percent of its current size.
#
#
# Example
# Using 70% autoextend threshold and 20% autoextend size, when a 1G
# thin pool exceeds 700M, it is extended to 1.2G, and when it exceeds
# 840M, it is extended to 1.44G:
# thin_pool_autoextend_percent = 20
#
#
thin_pool_autoextend_percent = 20
# Configuration option activation/vdo_pool_autoextend_threshold.
@@ -1606,13 +1590,13 @@ activation {
# The minimum value is 50 (a smaller value is treated as 50.)
# Also see vdo_pool_autoextend_percent.
# Automatic extension requires dmeventd to be monitoring the LV.
#
#
# Example
# Using 70% autoextend threshold and 20% autoextend size, when a 10G
# VDO pool exceeds 7G, it is extended to 12G, and when it exceeds
# 8.4G, it is extended to 14.4G:
# vdo_pool_autoextend_threshold = 70
#
#
# This configuration option has an automatic default value.
# vdo_pool_autoextend_threshold = 100
@@ -1620,7 +1604,7 @@ activation {
# Auto-extending a VDO pool adds this percent extra space.
# The amount of additional space added to a VDO pool is this
# percent of its current size.
#
#
# Example
# Using 70% autoextend threshold and 20% autoextend size, when a 10G
# VDO pool exceeds 7G, it is extended to 12G, and when it exceeds
@@ -1639,10 +1623,10 @@ activation {
# pages corresponding to lines that match are not pinned. On some
# systems, locale-archive was found to make up over 80% of the memory
# used by the process.
#
#
# Example
# mlock_filter = [ "locale/locale-archive", "gconv/gconv-modules.cache" ]
#
#
# This configuration option is advanced.
# This configuration option does not have a default value defined.
@@ -1683,7 +1667,7 @@ activation {
# Configuration option activation/activation_mode.
# How LVs with missing devices are activated.
# The --activationmode option overrides this setting.
#
#
# Accepted values:
# complete
# Only allow activation of an LV if all of the Physical Volumes it
@@ -1698,7 +1682,7 @@ activation {
# could cause data loss with a portion of the LV inaccessible.
# This setting should not normally be used, but may sometimes
# assist with data recovery.
#
#
activation_mode = "degraded"
# Configuration option activation/lock_start_list.
@@ -1746,7 +1730,7 @@ activation {
# Configuration option metadata/pvmetadatacopies.
# Number of copies of metadata to store on each PV.
# The --pvmetadatacopies option overrides this setting.
#
#
# Accepted values:
# 2
# Two copies of the VG metadata are stored on the PV, one at the
@@ -1756,7 +1740,7 @@ activation {
# 0
# No copies of VG metadata are stored on the PV. This may be
# useful for VGs containing large numbers of PVs.
#
#
# This configuration option is advanced.
# This configuration option has an automatic default value.
# pvmetadatacopies = 1
@@ -1906,7 +1890,7 @@ activation {
# sequences are copied verbatim. Each special character sequence is
# introduced by the '%' character and such sequence is then
# substituted with a value as described below.
#
#
# Accepted values:
# %a
# The abbreviated name of the day of the week according to the
@@ -2029,7 +2013,7 @@ activation {
# The timezone name or abbreviation.
# %%
# A literal '%' character.
#
#
# This configuration option has an automatic default value.
# time_format = "%Y-%m-%d %T %z"
@@ -2298,12 +2282,12 @@ dmeventd {
# applied to the local machine as a 'host tag'. If this subsection is
# empty (has no host_list), then the subsection name is always applied
# as a 'host tag'.
#
#
# Example
# The host tag foo is given to all hosts, and the host tag
# bar is given to the hosts named machine1 and machine2.
# tags { foo { } bar { host_list = [ "machine1", "machine2" ] } }
#
#
# This configuration section has variable name.
# This configuration section has an automatic default value.
# tag {

View File

@@ -28,13 +28,13 @@ local {
# main configuration file, e.g. lvm.conf. When used, it must be set to
# a unique value among all hosts sharing access to the storage,
# e.g. a host name.
#
#
# Example
# Set no system ID:
# system_id = ""
# Set the system_id to a specific name:
# system_id = "host1"
#
#
# This configuration option has an automatic default value.
# system_id = ""

2
configure vendored
View File

@@ -11962,7 +11962,7 @@ $as_echo_n "checking for $am_display_PYTHON version... " >&6; }
if ${am_cv_python_version+:} false; then :
$as_echo_n "(cached) " >&6
else
am_cv_python_version=`$PYTHON -c "import sys; print('%u.%u' % sys.version_info[:2])"`
am_cv_python_version=`$PYTHON -c "import sys; sys.stdout.write(sys.version[:3])"`
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_python_version" >&5
$as_echo "$am_cv_python_version" >&6; }

View File

@@ -896,9 +896,8 @@ static int read_adopt_file(struct list_head *vg_lockd)
goto fail;
memset(vg_uuid, 0, sizeof(vg_uuid));
memset(lm_type_str, 0, sizeof(lm_type_str));
if (sscanf(adopt_line, "VG: %63s %64s %15s %64s",
if (sscanf(adopt_line, "VG: %63s %64s %16s %64s",
vg_uuid, ls->vg_name, lm_type_str, ls->vg_args) != 4) {
goto fail;
}
@@ -917,9 +916,8 @@ static int read_adopt_file(struct list_head *vg_lockd)
r->type = LD_RT_LV;
memset(vg_uuid, 0, sizeof(vg_uuid));
memset(mode, 0, sizeof(mode));
if (sscanf(adopt_line, "LV: %64s %64s %s %7s %u",
if (sscanf(adopt_line, "LV: %64s %64s %s %8s %u",
vg_uuid, r->name, r->lv_args, mode, &r->version) != 5) {
goto fail;
}

View File

@@ -92,12 +92,6 @@ const char **cmdargv_ctr(const struct lvmpolld_lv *pdlv, const char *lvm_binary,
if (!add_to_cmd_arr(&cmd_argv, "-An", &i))
goto err;
if (pdlv->devicesfile) {
if (!add_to_cmd_arr(&cmd_argv, "--devicesfile", &i) ||
!add_to_cmd_arr(&cmd_argv, pdlv->devicesfile, &i))
goto err;
}
/* terminating NULL */
if (!add_to_cmd_arr(&cmd_argv, NULL, &i))
goto err;

View File

@@ -555,15 +555,14 @@ static struct lvmpolld_lv *construct_pdlv(request req, struct lvmpolld_state *ls
const char *interval, const char *id,
const char *vgname, const char *lvname,
const char *sysdir, enum poll_type type,
unsigned abort_polling, unsigned uinterval,
const char *devicesfile)
unsigned abort_polling, unsigned uinterval)
{
const char **cmdargv, **cmdenvp;
struct lvmpolld_lv *pdlv;
unsigned handle_missing_pvs = daemon_request_int(req, LVMPD_PARM_HANDLE_MISSING_PVS, 0);
pdlv = pdlv_create(ls, id, vgname, lvname, sysdir, type,
interval, uinterval, pdst, devicesfile);
interval, uinterval, pdst);
if (!pdlv) {
ERROR(ls, "%s: %s", PD_LOG_PREFIX, "failed to create internal LV data structure.");
@@ -622,7 +621,6 @@ static response poll_init(client_handle h, struct lvmpolld_state *ls, request re
const char *lvname = daemon_request_str(req, LVMPD_PARM_LVNAME, NULL);
const char *vgname = daemon_request_str(req, LVMPD_PARM_VGNAME, NULL);
const char *sysdir = daemon_request_str(req, LVMPD_PARM_SYSDIR, NULL);
const char *devicesfile = daemon_request_str(req, LVMPD_PARM_DEVICESFILE, NULL);
unsigned abort_polling = daemon_request_int(req, LVMPD_PARM_ABORT, 0);
assert(type < POLL_TYPE_MAX);
@@ -682,7 +680,7 @@ static response poll_init(client_handle h, struct lvmpolld_state *ls, request re
pdlv->init_rq_count++; /* safe. protected by store lock */
} else {
pdlv = construct_pdlv(req, ls, pdst, interval, id, vgname,
lvname, sysdir, type, abort_polling, 2 * uinterval, devicesfile);
lvname, sysdir, type, abort_polling, 2 * uinterval);
if (!pdlv) {
pdst_unlock(pdst);
free(id);

View File

@@ -93,13 +93,11 @@ struct lvmpolld_lv *pdlv_create(struct lvmpolld_state *ls, const char *id,
const char *vgname, const char *lvname,
const char *sysdir, enum poll_type type,
const char *sinterval, unsigned pdtimeout,
struct lvmpolld_store *pdst,
const char *devicesfile)
struct lvmpolld_store *pdst)
{
char *lvmpolld_id = strdup(id), /* copy */
*full_lvname = _construct_full_lvname(vgname, lvname), /* copy */
*lvm_system_dir_env = _construct_lvm_system_dir_env(sysdir); /* copy */
char *devicesfile_dup = devicesfile ? strdup(devicesfile) : NULL;
struct lvmpolld_lv tmp = {
.ls = ls,
@@ -107,7 +105,6 @@ struct lvmpolld_lv *pdlv_create(struct lvmpolld_state *ls, const char *id,
.lvmpolld_id = lvmpolld_id,
.lvid = _get_lvid(lvmpolld_id, sysdir),
.lvname = full_lvname,
.devicesfile = devicesfile_dup,
.lvm_system_dir_env = lvm_system_dir_env,
.sinterval = strdup(sinterval), /* copy */
.pdtimeout = pdtimeout < MIN_POLLING_TIMEOUT ? MIN_POLLING_TIMEOUT : pdtimeout,
@@ -127,7 +124,6 @@ struct lvmpolld_lv *pdlv_create(struct lvmpolld_state *ls, const char *id,
return pdlv;
err:
free((void *)devicesfile_dup);
free((void *)full_lvname);
free((void *)lvmpolld_id);
free((void *)lvm_system_dir_env);
@@ -140,7 +136,6 @@ err:
void pdlv_destroy(struct lvmpolld_lv *pdlv)
{
free((void *)pdlv->lvmpolld_id);
free((void *)pdlv->devicesfile);
free((void *)pdlv->lvname);
free((void *)pdlv->sinterval);
free((void *)pdlv->lvm_system_dir_env);

View File

@@ -49,7 +49,6 @@ struct lvmpolld_lv {
const enum poll_type type;
const char *const lvid;
const char *const lvmpolld_id;
const char *const devicesfile;
const char *const lvname; /* full vg/lv name */
const unsigned pdtimeout; /* in seconds */
const char *const sinterval;
@@ -102,8 +101,7 @@ struct lvmpolld_lv *pdlv_create(struct lvmpolld_state *ls, const char *id,
const char *vgname, const char *lvname,
const char *sysdir, enum poll_type type,
const char *sinterval, unsigned pdtimeout,
struct lvmpolld_store *pdst,
const char *devicesfile);
struct lvmpolld_store *pdst);
/* only call with appropriate struct lvmpolld_store lock held */
void pdlv_destroy(struct lvmpolld_lv *pdlv);

View File

@@ -35,7 +35,6 @@
#define LVMPD_PARM_SYSDIR "sysdir"
#define LVMPD_PARM_VALUE "value" /* either retcode or signal value */
#define LVMPD_PARM_VGNAME "vgname"
#define LVMPD_PARM_DEVICESFILE "devicesfile"
#define LVMPD_RESP_FAILED "failed"
#define LVMPD_RESP_FINISHED "finished"

View File

@@ -1072,10 +1072,10 @@ int dm_tree_node_add_replicator_dev_target(struct dm_tree_node *node,
#define DM_THIN_MIN_DATA_BLOCK_SIZE (UINT32_C(128))
#define DM_THIN_MAX_DATA_BLOCK_SIZE (UINT32_C(2097152))
/*
* Max supported size for thin pool metadata device (17045913600 bytes)
* Max supported size for thin pool metadata device (17112760320 bytes)
* Limitation is hardcoded into the kernel and bigger device size
* is not accepted.
* drivers/md/dm-thin-metadata.h THIN_METADATA_MAX_SECTORS
* But here DM_THIN_MAX_METADATA_SIZE got defined incorrectly
* Correct size is (UINT64_C(255) * ((1 << 14) - 64) * (4096 / (1 << 9)))
*/
#define DM_THIN_MAX_METADATA_SIZE (UINT64_C(255) * (1 << 14) * (4096 / (1 << 9)) - 256 * 1024)
@@ -1088,16 +1088,6 @@ int dm_tree_node_add_thin_pool_target(struct dm_tree_node *node,
uint64_t low_water_mark,
unsigned skip_block_zeroing);
int dm_tree_node_add_thin_pool_target_v1(struct dm_tree_node *node,
uint64_t size,
uint64_t transaction_id,
const char *metadata_uuid,
const char *pool_uuid,
uint32_t data_block_size,
uint64_t low_water_mark,
unsigned skip_block_zeroing,
unsigned crop_metadata);
/* Supported messages for thin provision target */
typedef enum {
DM_THIN_MESSAGE_CREATE_SNAP, /* device_id, origin_id */

View File

@@ -3978,24 +3978,6 @@ int dm_tree_node_add_thin_pool_target(struct dm_tree_node *node,
uint32_t data_block_size,
uint64_t low_water_mark,
unsigned skip_block_zeroing)
{
return dm_tree_node_add_thin_pool_target_v1(node, size, transaction_id,
metadata_uuid, pool_uuid,
data_block_size,
low_water_mark,
skip_block_zeroing,
1);
}
int dm_tree_node_add_thin_pool_target_v1(struct dm_tree_node *node,
uint64_t size,
uint64_t transaction_id,
const char *metadata_uuid,
const char *pool_uuid,
uint32_t data_block_size,
uint64_t low_water_mark,
unsigned skip_block_zeroing,
unsigned crop_metadata)
{
struct load_segment *seg, *mseg;
uint64_t devsize = 0;
@@ -4023,18 +4005,17 @@ int dm_tree_node_add_thin_pool_target_v1(struct dm_tree_node *node,
if (!_link_tree_nodes(node, seg->metadata))
return_0;
if (crop_metadata)
/* FIXME: more complex target may need more tweaks */
dm_list_iterate_items(mseg, &seg->metadata->props.segs) {
devsize += mseg->size;
if (devsize > DM_THIN_MAX_METADATA_SIZE) {
log_debug_activation("Ignoring %" PRIu64 " of device.",
devsize - DM_THIN_MAX_METADATA_SIZE);
mseg->size -= (devsize - DM_THIN_MAX_METADATA_SIZE);
devsize = DM_THIN_MAX_METADATA_SIZE;
/* FIXME: drop remaining segs */
}
/* FIXME: more complex target may need more tweaks */
dm_list_iterate_items(mseg, &seg->metadata->props.segs) {
devsize += mseg->size;
if (devsize > DM_THIN_MAX_METADATA_SIZE) {
log_debug_activation("Ignoring %" PRIu64 " of device.",
devsize - DM_THIN_MAX_METADATA_SIZE);
mseg->size -= (devsize - DM_THIN_MAX_METADATA_SIZE);
devsize = DM_THIN_MAX_METADATA_SIZE;
/* FIXME: drop remaining segs */
}
}
if (!(seg->pool = dm_tree_find_node_by_uuid(node->dtree, pool_uuid))) {
log_error("Missing pool uuid %s.", pool_uuid);

View File

@@ -29,7 +29,6 @@ SOURCES =\
device/bcache.c \
device/bcache-utils.c \
device/dev-cache.c \
device/device_id.c \
device/dev-ext.c \
device/dev-io.c \
device/dev-md.c \
@@ -53,7 +52,6 @@ SOURCES =\
filters/filter-usable.c \
filters/filter-internal.c \
filters/filter-signature.c \
filters/filter-deviceid.c \
format_text/archive.c \
format_text/archiver.c \
format_text/export.c \

View File

@@ -466,11 +466,6 @@ static int _passes_readonly_filter(struct cmd_context *cmd,
return _lv_passes_volumes_filter(cmd, lv, cn, activation_read_only_volume_list_CFG);
}
int lv_passes_readonly_filter(const struct logical_volume *lv)
{
return _passes_readonly_filter(lv->vg->cmd, lv);
}
int library_version(char *version, size_t size)
{
if (!activation())

View File

@@ -208,8 +208,6 @@ int lvs_in_vg_opened(const struct volume_group *vg);
int lv_is_active(const struct logical_volume *lv);
int lv_passes_readonly_filter(const struct logical_volume *lv);
/* Check is any component LV is active */
const struct logical_volume *lv_component_is_active(const struct logical_volume *lv);
const struct logical_volume *lv_holder_is_active(const struct logical_volume *lv);

View File

@@ -261,7 +261,7 @@ static int _info_run(const char *dlid, struct dm_info *dminfo,
int dmtask;
int with_flush; /* TODO: arg for _info_run */
void *target = NULL;
uint64_t target_start, target_length, start, length, length_crop = 0;
uint64_t target_start, target_length, start, length;
char *target_name, *target_params;
const char *devname;
@@ -297,7 +297,7 @@ static int _info_run(const char *dlid, struct dm_info *dminfo,
/* Uses max DM_THIN_MAX_METADATA_SIZE sectors for metadata device */
if (lv_is_thin_pool_metadata(seg_status->seg->lv) &&
(length > DM_THIN_MAX_METADATA_SIZE))
length_crop = DM_THIN_MAX_METADATA_SIZE;
length = DM_THIN_MAX_METADATA_SIZE;
/* Uses virtual size with headers for VDO pool device */
if (lv_is_vdo_pool(seg_status->seg->lv))
@@ -310,9 +310,7 @@ static int _info_run(const char *dlid, struct dm_info *dminfo,
target = dm_get_next_target(dmt, target, &target_start,
&target_length, &target_name, &target_params);
if ((start == target_start) &&
((length == target_length) ||
(length_crop && (length_crop == target_length))))
if ((start == target_start) && (length == target_length))
break; /* Keep target_params when matching segment is found */
target_params = NULL; /* Marking this target_params unusable */
@@ -2272,31 +2270,21 @@ static int _pool_callback(struct dm_tree_node *node,
const struct pool_cb_data *data = cb_data;
const struct logical_volume *pool_lv = data->pool_lv;
const struct logical_volume *mlv = first_seg(pool_lv)->metadata_lv;
struct cmd_context *cmd = pool_lv->vg->cmd;
long buf[64 / sizeof(long)]; /* buffer for short disk header (64B) */
int args = 0;
char *mpath;
const char *argv[19] = { /* Max supported 15 args */
find_config_tree_str_allow_empty(cmd, data->exec, NULL)
find_config_tree_str_allow_empty(pool_lv->vg->cmd, data->exec, NULL)
};
if (!*argv[0]) /* *_check tool is unconfigured/disabled with "" setting */
return 1;
if (lv_is_cache_vol(pool_lv)) {
if (!(mpath = lv_dmpath_suffix_dup(data->dm->mem, pool_lv, "-cmeta"))) {
log_error("Failed to build device path for checking cachevol metadata %s.",
display_lvname(pool_lv));
return 0;
}
} else {
if (!(mpath = lv_dmpath_dup(data->dm->mem, mlv))) {
log_error("Failed to build device path for checking pool metadata %s.",
display_lvname(mlv));
return 0;
}
if (!(mpath = lv_dmpath_dup(data->dm->mem, mlv))) {
log_error("Failed to build device path for checking pool metadata %s.",
display_lvname(mlv));
return 0;
}
log_debug("Running check command on %s", mpath);
if (data->skip_zero) {
if ((fd = open(mpath, O_RDONLY)) < 0) {
@@ -2324,7 +2312,7 @@ static int _pool_callback(struct dm_tree_node *node,
}
}
if (!(cn = find_config_tree_array(cmd, data->opts, NULL))) {
if (!(cn = find_config_tree_array(mlv->vg->cmd, data->opts, NULL))) {
log_error(INTERNAL_ERROR "Unable to find configuration for pool check options.");
return 0;
}
@@ -2346,7 +2334,7 @@ static int _pool_callback(struct dm_tree_node *node,
argv[++args] = mpath;
if (!(ret = exec_cmd(cmd, (const char * const *)argv,
if (!(ret = exec_cmd(pool_lv->vg->cmd, (const char * const *)argv,
&status, 0))) {
if (status == ENOENT) {
log_warn("WARNING: Check is skipped, please install recommended missing binary %s!",
@@ -2355,7 +2343,7 @@ static int _pool_callback(struct dm_tree_node *node,
}
if ((data->version.maj || data->version.min || data->version.patch) &&
!_check_tool_version(cmd, argv[0],
!_check_tool_version(pool_lv->vg->cmd, argv[0],
data->version.maj, data->version.min, data->version.patch)) {
log_warn("WARNING: Check is skipped, please upgrade installed version of %s!",
argv[0]);
@@ -2399,6 +2387,10 @@ static int _pool_register_callback(struct dev_manager *dm,
return 1;
#endif
/* Skip for single-device cache pool */
if (lv_is_cache(lv) && lv_is_cache_vol(first_seg(lv)->pool_lv))
return 1;
if (!(data = dm_pool_zalloc(dm->mem, sizeof(*data)))) {
log_error("Failed to allocated path for callback.");
return 0;
@@ -3491,12 +3483,6 @@ static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
!_pool_register_callback(dm, dnode, lv))
return_0;
if (lv_is_cache(lv) && lv_is_cache_vol(first_seg(lv)->pool_lv) &&
/* Register callback only for layer activation or non-layered cache LV */
(layer || !lv_layer(lv)) &&
!_pool_register_callback(dm, dnode, lv))
return_0;
/*
* Update tables for ANY PVMOVE holders for active LV where the name starts with 'pvmove',
* but it's not anymore PVMOVE LV and also it's not a PVMOVE _mimage LV.

76
lib/cache/lvmcache.c vendored
View File

@@ -18,7 +18,6 @@
#include "lib/cache/lvmcache.h"
#include "lib/commands/toolcontext.h"
#include "lib/device/dev-cache.h"
#include "lib/device/device_id.h"
#include "lib/locking/locking.h"
#include "lib/metadata/metadata.h"
#include "lib/mm/memlock.h"
@@ -508,25 +507,6 @@ static const char *_get_pvsummary_device_hint(char *pvid)
return NULL;
}
static const char *_get_pvsummary_device_id(char *pvid, const char **device_id_type)
{
char pvid_s[ID_LEN + 1] __attribute__((aligned(8)));
struct lvmcache_vginfo *vginfo;
struct pv_list *pvl;
dm_list_iterate_items(vginfo, &_vginfos) {
dm_list_iterate_items(pvl, &vginfo->pvsummaries) {
(void) dm_strncpy(pvid_s, (char *) &pvl->pv->id, sizeof(pvid_s));
if (!strcmp(pvid_s, pvid)) {
*device_id_type = pvl->pv->device_id_type;
return pvl->pv->device_id;
}
}
}
return NULL;
}
/*
* Check if any PVs in vg->pvs have the same PVID as any
* entries in _unused_duplicates.
@@ -632,8 +612,6 @@ static void _choose_duplicates(struct cmd_context *cmd,
struct device_list *devl, *devl_safe, *devl_add, *devl_del;
struct lvmcache_info *info;
struct device *dev1, *dev2;
const char *device_id = NULL, *device_id_type = NULL;
const char *idname1 = NULL, *idname2 = NULL;
uint32_t dev1_major, dev1_minor, dev2_major, dev2_minor;
uint64_t dev1_size, dev2_size, pvsummary_size;
int in_subsys1, in_subsys2;
@@ -642,7 +620,6 @@ static void _choose_duplicates(struct cmd_context *cmd,
int has_lv1, has_lv2;
int same_size1, same_size2;
int same_name1 = 0, same_name2 = 0;
int same_id1 = 0, same_id2 = 0;
int prev_unchosen1, prev_unchosen2;
int change;
@@ -773,19 +750,6 @@ next:
same_name2 = !strcmp(device_hint, dev_name(dev2));
}
if ((device_id = _get_pvsummary_device_id(devl->dev->pvid, &device_id_type))) {
uint16_t idtype = idtype_from_str(device_id_type);
if (idtype) {
idname1 = device_id_system_read(cmd, dev1, idtype);
idname2 = device_id_system_read(cmd, dev2, idtype);
}
if (idname1)
same_id1 = !strcmp(idname1, device_id);
if (idname2)
same_id2 = !strcmp(idname2, device_id);
}
has_lv1 = (dev1->flags & DEV_USED_FOR_LV) ? 1 : 0;
has_lv2 = (dev2->flags & DEV_USED_FOR_LV) ? 1 : 0;
@@ -804,12 +768,6 @@ next:
dev_name(dev2), dev2_major, dev2_minor,
device_hint ?: "none");
log_debug_cache("PV %s: device_id %s. %s is %s. %s is %s.",
devl->dev->pvid,
device_id ?: ".",
dev_name(dev1), idname1 ?: ".",
dev_name(dev2), idname2 ?: ".");
log_debug_cache("PV %s: size %llu. %s is %llu. %s is %llu.",
devl->dev->pvid,
(unsigned long long)pvsummary_size,
@@ -850,13 +808,6 @@ next:
} else if (prev_unchosen2 && !prev_unchosen1) {
/* keep 1 (NB when unchosen is set we unprefer) */
reason = "of previous preference";
} else if (same_id1 && !same_id2) {
/* keep 1 */
reason = "device id";
} else if (same_id2 && !same_id1) {
/* change to 2 */
change = 1;
reason = "device id";
} else if (has_lv1 && !has_lv2) {
/* keep 1 */
reason = "device is used by LV";
@@ -1105,14 +1056,12 @@ int lvmcache_label_scan(struct cmd_context *cmd)
{
struct dm_list del_cache_devs;
struct dm_list add_cache_devs;
struct dm_list renamed_devs;
struct lvmcache_info *info;
struct lvmcache_vginfo *vginfo;
struct device_list *devl;
int vginfo_count = 0;
int r = 0;
dm_list_init(&renamed_devs);
int r = 0;
log_debug_cache("Finding VG info");
@@ -1126,24 +1075,13 @@ int lvmcache_label_scan(struct cmd_context *cmd)
* Do the actual scanning. This populates lvmcache
* with infos/vginfos based on reading headers from
* each device, and a vg summary from each mda.
*
* Note that this will *skip* scanning a device if
* an info struct already exists in lvmcache for
* the device.
*/
label_scan(cmd);
/*
* When devnames are used as device ids (which is dispreferred),
* changing/unstable devnames can lead to entries in the devices file
* not being matched to a dev even if the PV is present on the system.
* Or, a devices file entry may have been matched to the wrong device
* (with the previous name) that does not have the PVID specified in
* the entry. This function detects that problem, scans labels on all
* devs on the system to find the missing PVIDs, and corrects the
* devices file. We then need to run label scan on these correct
* devices.
*/
device_ids_find_renamed_devs(cmd, &renamed_devs, NULL, 0);
if (!dm_list_empty(&renamed_devs))
label_scan_devs(cmd, cmd->filter, &renamed_devs);
/*
* _choose_duplicates() returns:
*
@@ -2784,10 +2722,6 @@ const char *dev_filtered_reason(struct device *dev)
return "device is too small (pv_min_size)";
if (dev->filtered_flags & DEV_FILTERED_UNUSABLE)
return "device is not in a usable state";
if (dev->filtered_flags & DEV_FILTERED_DEVICES_FILE)
return "device is not in devices file";
if (dev->filtered_flags & DEV_FILTERED_DEVICES_LIST)
return "device is not in devices list";
/* flag has not been added here */
if (dev->filtered_flags)

View File

@@ -32,7 +32,6 @@
#include "lib/cache/lvmcache.h"
#include "lib/format_text/archiver.h"
#include "lib/lvmpolld/lvmpolld-client.h"
#include "lib/device/device_id.h"
#include <locale.h>
#include <sys/stat.h>
@@ -1067,7 +1066,7 @@ static int _init_dev_cache(struct cmd_context *cmd)
return 1;
}
#define MAX_FILTERS 11
#define MAX_FILTERS 10
static struct dev_filter *_init_filter_chain(struct cmd_context *cmd)
{
@@ -1086,9 +1085,6 @@ static struct dev_filter *_init_filter_chain(struct cmd_context *cmd)
* sysfs filter. Only available on 2.6 kernels. Non-critical.
* Listed first because it's very efficient at eliminating
* unavailable devices.
*
* TODO: I suspect that using the lvm_type and device_id
* filters before this one may be more efficient.
*/
if (find_config_tree_bool(cmd, devices_sysfs_scan_CFG, NULL)) {
if ((filters[nr_filt] = sysfs_filter_create()))
@@ -1127,13 +1123,6 @@ static struct dev_filter *_init_filter_chain(struct cmd_context *cmd)
}
nr_filt++;
/* filter based on the device_ids saved in the devices file */
if (!(filters[nr_filt] = deviceid_filter_create(cmd))) {
log_error("Failed to create deviceid device filter");
goto bad;
}
nr_filt++;
/* usable device filter. Required. */
if (!(filters[nr_filt] = usable_filter_create(cmd, cmd->dev_types, FILTER_MODE_NO_LVMETAD))) {
log_error("Failed to create usabled device filter");
@@ -1728,8 +1717,6 @@ struct cmd_context *create_toolcontext(unsigned is_clvmd,
if (!_init_dev_cache(cmd))
goto_out;
devices_file_init(cmd);
memlock_init(cmd);
if (!_init_formats(cmd))
@@ -1855,7 +1842,6 @@ int refresh_toolcontext(struct cmd_context *cmd)
_destroy_segtypes(&cmd->segtypes);
_destroy_formats(cmd, &cmd->formats);
devices_file_exit(cmd);
if (!dev_cache_exit())
stack;
_destroy_dev_types(cmd);
@@ -1935,8 +1921,6 @@ int refresh_toolcontext(struct cmd_context *cmd)
if (!_init_dev_cache(cmd))
return_0;
devices_file_init(cmd);
if (!_init_formats(cmd))
return_0;
@@ -1986,7 +1970,6 @@ void destroy_toolcontext(struct cmd_context *cmd)
_destroy_filters(cmd);
if (cmd->mem)
dm_pool_destroy(cmd->mem);
devices_file_exit(cmd);
dev_cache_exit();
_destroy_dev_types(cmd);
_destroy_tags(cmd);

View File

@@ -182,12 +182,6 @@ struct cmd_context {
unsigned pvscan_recreate_hints:1; /* enable special case hint handling for pvscan --cache */
unsigned scan_lvs:1;
unsigned wipe_outdated_pvs:1;
unsigned enable_devices_list:1; /* command is using --devices option */
unsigned enable_devices_file:1; /* command is using devices file */
unsigned create_edit_devices_file:1; /* command expects to create and/or edit devices file */
unsigned edit_devices_file:1; /* command expects to edit devices file */
unsigned filter_deviceid_skip:1; /* don't use filter-deviceid */
unsigned filter_regex_with_devices_file:1; /* use filter-regex even when devices file is enabled */
unsigned filter_nodata_only:1; /* only use filters that do not require data from the dev */
/*
@@ -195,11 +189,7 @@ struct cmd_context {
*/
struct dev_filter *filter;
struct dm_list hints;
struct dm_list use_devices; /* struct dev_use for each entry in devices file */
const char *md_component_checks;
const char *search_for_devnames; /* config file setting */
const char *devicesfile; /* from --devicesfile option */
struct dm_list deviceslist; /* from --devices option, struct dm_str_list */
/*
* Configuration.
@@ -231,7 +221,6 @@ struct cmd_context {
char system_dir[PATH_MAX];
char dev_dir[PATH_MAX];
char proc_dir[PATH_MAX];
char devices_file_path[PATH_MAX];
/*
* Reporting.

View File

@@ -1738,7 +1738,7 @@ static int _out_prefix_fn(const struct dm_config_node *cn, const char *line, voi
continue;
commentline[0] = '\0';
}
fprintf(out->fp, "%s#%s%s\n", line, commentline[0] ? " " : "", commentline);
fprintf(out->fp, "%s# %s\n", line, commentline);
/* withsummary prints only the first comment line. */
if (!out->tree_spec->withcomments)
break;

View File

@@ -288,28 +288,6 @@ cfg_array(devices_preferred_names_CFG, "preferred_names", devices_CFG_SECTION, C
"preferred_names = [ \"^/dev/mpath/\", \"^/dev/mapper/mpath\", \"^/dev/[hs]d\" ]\n"
"#\n")
cfg(devices_use_devicesfile_CFG, "use_devicesfile", devices_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_BOOL, DEFAULT_USE_DEVICES_FILE, vsn(2, 3, 14), NULL, 0, NULL,
"Enable or disable the use of a devices file.\n"
"When enabled, lvm will only use devices that\n"
"are lised in the devices file.\n")
cfg(devices_devicesfile_CFG, "devicesfile", devices_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_STRING, DEFAULT_DEVICES_FILE, vsn(2, 3, 14), NULL, 0, NULL,
"The name of the file listing devices that LVM should use.\n")
cfg(devices_search_for_devnames_CFG, "search_for_devnames", devices_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_STRING, DEFAULT_SEARCH_FOR_DEVNAMES, vsn(2, 3, 14), NULL, 0, NULL,
"Look outside of the devices file for missing devname entries.\n"
"A devname entry is used for a device that does not have a stable\n"
"device id, e.g. wwid, so the unstable device name is used as\n"
"the device id. After reboot, or if the device is reattached,\n"
"the device name may change, in which case lvm will not find\n"
"the expected PV on the device listed in the devices file.\n"
"This setting controls whether lvm will search other devices,\n"
"outside the devices file, to look for the missing PV on a\n"
"renamed device. If \"none\", lvm will not look at other devices,\n"
"and the PV may appear to be missing. If \"auto\", lvm will look\n"
"at other devices, but only those that are likely to have the PV.\n"
"If \"all\", lvm will look at all devices on the system.\n")
cfg_array(devices_filter_CFG, "filter", devices_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_STRING, "#Sa|.*|", vsn(1, 0, 0), NULL, 0, NULL,
"Limit the block devices that are used by LVM commands.\n"
"This is a list of regular expressions used to accept or reject block\n"
@@ -650,11 +628,6 @@ cfg(allocation_cache_pool_max_chunks_CFG, "cache_pool_max_chunks", allocation_CF
cfg(allocation_thin_pool_metadata_require_separate_pvs_CFG, "thin_pool_metadata_require_separate_pvs", allocation_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_BOOL, DEFAULT_THIN_POOL_METADATA_REQUIRE_SEPARATE_PVS, vsn(2, 2, 89), NULL, 0, NULL,
"Thin pool metadata and data will always use different PVs.\n")
cfg(allocation_thin_pool_crop_metadata_CFG, "thin_pool_crop_metadata", allocation_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_BOOL, DEFAULT_THIN_POOL_CROP_METADATA, vsn(2, 3, 12), NULL, 0, NULL,
"Older version of lvm2 cropped pool's metadata size to 15.81 GiB.\n"
"This is slightly less then the actual maximum 15.88 GiB.\n"
"For compatibility with older version and use of cropped size set to 1.\n")
cfg(allocation_thin_pool_zero_CFG, "thin_pool_zero", allocation_CFG_SECTION, CFG_PROFILABLE | CFG_PROFILABLE_METADATA | CFG_DEFAULT_COMMENTED, CFG_TYPE_BOOL, DEFAULT_THIN_POOL_ZERO, vsn(2, 2, 99), NULL, 0, NULL,
"Thin pool data chunks are zeroed before they are first used.\n"
"Zeroing with a larger thin pool chunk size reduces performance.\n")
@@ -1009,7 +982,8 @@ cfg(global_prioritise_write_locks_CFG, "prioritise_write_locks", global_CFG_SECT
"a volume group's metadata, instead of always granting the read-only\n"
"requests immediately, delay them to allow the read-write requests to\n"
"be serviced. Without this setting, write access may be stalled by a\n"
"high volume of read-only requests. This option only affects file locks.\n")
"high volume of read-only requests. This option only affects\n"
"locking_type 1 viz. local file-based locking.\n")
cfg(global_library_dir_CFG, "library_dir", global_CFG_SECTION, CFG_DEFAULT_UNDEFINED, CFG_TYPE_STRING, NULL, vsn(1, 0, 0), NULL, 0, NULL,
"Search this directory first for shared libraries.\n")
@@ -1233,15 +1207,6 @@ cfg(global_vdo_format_executable_CFG, "vdo_format_executable", global_CFG_SECTIO
cfg_array(global_vdo_format_options_CFG, "vdo_format_options", global_CFG_SECTION, CFG_ALLOW_EMPTY | CFG_DEFAULT_COMMENTED, CFG_TYPE_STRING, DEFAULT_VDO_FORMAT_OPTIONS_CONFIG, VDO_1ST_VSN, NULL, 0, NULL,
"List of options passed added to standard vdoformat command.\n")
cfg_array(global_vdo_disabled_features_CFG, "vdo_disabled_features", global_CFG_SECTION, CFG_ALLOW_EMPTY | CFG_DEFAULT_UNDEFINED, CFG_TYPE_STRING, NULL, vsn(2, 3, 11), NULL, 0, NULL,
"Features to not use in the vdo driver.\n"
"This can be helpful for testing, or to avoid using a feature that is\n"
"causing problems. Features include: online_rename\n"
"#\n"
"Example\n"
"vdo_disabled_features = [ \"online_rename\" ]\n"
"#\n")
cfg(global_fsadm_executable_CFG, "fsadm_executable", global_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_STRING, DEFAULT_FSADM_PATH, vsn(2, 2, 170), "@FSADM_PATH@", 0, NULL,
"The full path to the fsadm command.\n"
"LVM uses this command to help with lvresize -r operations.\n")

View File

@@ -118,8 +118,6 @@
#define DEFAULT_THIN_REPAIR_OPTION1 ""
#define DEFAULT_THIN_REPAIR_OPTIONS_CONFIG "#S" DEFAULT_THIN_REPAIR_OPTION1
#define DEFAULT_THIN_POOL_METADATA_REQUIRE_SEPARATE_PVS 0
#define DEFAULT_THIN_POOL_CROP_METADATA 0
#define DEFAULT_THIN_POOL_MAX_METADATA_SIZE_V1_KB (UINT64_C(255) * ((1 << 14) - 64) * 4) /* KB */ /* 0x3f8040 blocks */
#define DEFAULT_THIN_POOL_MAX_METADATA_SIZE (DM_THIN_MAX_METADATA_SIZE / 2) /* KB */
#define DEFAULT_THIN_POOL_MIN_METADATA_SIZE 2048 /* KB */
#define DEFAULT_THIN_POOL_OPTIMAL_METADATA_SIZE (128 * 1024) /* KB */
@@ -323,9 +321,4 @@
#define DEFAULT_MD_COMPONENT_CHECKS "auto"
#define DEFAULT_USE_DEVICES_FILE 0
#define DEFAULT_DEVICES_FILE "system.devices"
#define DEFAULT_SEARCH_FOR_DEVNAMES "auto"
#endif /* _LVM_DEFAULTS_H */

View File

@@ -16,7 +16,6 @@
#include "base/memory/zalloc.h"
#include "lib/misc/lib.h"
#include "lib/device/dev-type.h"
#include "lib/device/device_id.h"
#include "lib/datastruct/btree.h"
#include "lib/config/config.h"
#include "lib/commands/toolcontext.h"
@@ -69,13 +68,11 @@ static void _dev_init(struct device *dev)
dev->bcache_fd = -1;
dev->bcache_di = -1;
dev->read_ahead = -1;
dev->part = -1;
dev->ext.enabled = 0;
dev->ext.src = DEV_EXT_NONE;
dm_list_init(&dev->aliases);
dm_list_init(&dev->ids);
}
void dev_destroy_file(struct device *dev)
@@ -355,7 +352,7 @@ static int _add_alias(struct device *dev, const char *path)
return 1;
}
int get_sysfs_value(const char *path, char *buf, size_t buf_size, int error_if_no_value)
static int _get_sysfs_value(const char *path, char *buf, size_t buf_size, int error_if_no_value)
{
FILE *fp;
size_t len;
@@ -396,7 +393,7 @@ static int _get_dm_uuid_from_sysfs(char *buf, size_t buf_size, int major, int mi
return 0;
}
return get_sysfs_value(path, buf, buf_size, 0);
return _get_sysfs_value(path, buf, buf_size, 0);
}
static struct dm_list *_get_or_add_list_by_index_key(struct dm_hash_table *idx, const char *key)
@@ -477,7 +474,7 @@ static struct device *_get_device_for_sysfs_dev_name_using_devno(const char *dev
return NULL;
}
if (!get_sysfs_value(path, buf, sizeof(buf), 1))
if (!_get_sysfs_value(path, buf, sizeof(buf), 1))
return_NULL;
if (sscanf(buf, "%d:%d", &major, &minor) != 2) {
@@ -975,7 +972,7 @@ static int _dev_cache_iterate_sysfs_for_index(const char *path)
return r;
}
static int dev_cache_index_devs(void)
int dev_cache_index_devs(void)
{
static int sysfs_has_dev_block = -1;
char path[PATH_MAX];
@@ -1324,20 +1321,12 @@ int dev_cache_check_for_open_devices(void)
int dev_cache_exit(void)
{
struct device *dev;
struct dm_hash_node *n;
int num_open = 0;
if (_cache.names) {
if (_cache.names)
if ((num_open = _check_for_open_devices(1)) > 0)
log_error(INTERNAL_ERROR "%d device(s) were left open and have been closed.", num_open);
dm_hash_iterate(n, _cache.names) {
dev = (struct device *) dm_hash_get_data(_cache.names, n);
free_dids(&dev->ids);
}
}
if (_cache.mem)
dm_pool_destroy(_cache.mem);
@@ -1439,151 +1428,60 @@ struct device *dev_hash_get(const char *name)
return (struct device *) dm_hash_lookup(_cache.names, name);
}
static void _remove_alias(struct device *dev, const char *name)
{
struct dm_str_list *strl;
dm_list_iterate_items(strl, &dev->aliases) {
if (!strcmp(strl->str, name)) {
dm_list_del(&strl->list);
return;
}
}
}
/*
* Check that paths for this dev still refer to the same dev_t. This is known
* to drop invalid paths in the case where lvm deactivates an LV, which causes
* that LV path to go away, but that LV path is not removed from dev-cache (it
* probably should be). Later a new path to a different LV is added to
* dev-cache, where the new LV has the same major:minor as the previously
* deactivated LV. The new LV will find the existing struct dev, and that
* struct dev will have dev->aliases entries that refer to the name of the old
* deactivated LV. Those old paths are all invalid and are dropped here.
*/
static void _verify_aliases(struct device *dev, const char *newname)
{
struct dm_str_list *strl, *strl2;
struct stat st;
dm_list_iterate_items_safe(strl, strl2, &dev->aliases) {
/* newname was just stat'd and added by caller */
if (newname && !strcmp(strl->str, newname))
continue;
if (stat(strl->str, &st) || (st.st_rdev != dev->dev)) {
log_debug("Drop invalid path %s for %d:%d (new path %s).",
strl->str, (int)MAJOR(dev->dev), (int)MINOR(dev->dev), newname ?: "");
dm_hash_remove(_cache.names, strl->str);
dm_list_del(&strl->list);
}
}
}
struct device *dev_cache_get(struct cmd_context *cmd, const char *name, struct dev_filter *f)
{
struct device *dev = (struct device *) dm_hash_lookup(_cache.names, name);
struct stat st;
int ret;
struct stat buf;
struct device *d = (struct device *) dm_hash_lookup(_cache.names, name);
int info_available = 0;
int ret = 1;
/*
* DEV_REGULAR means that is "dev" is actually a file, not a device.
* FIXME: I don't think dev-cache is used for files any more and this
* can be dropped?
*/
if (dev && (dev->flags & DEV_REGULAR))
return dev;
/*
* The requested path is invalid, remove any dev-cache
* info for it.
*/
if (stat(name, &st)) {
if (dev) {
log_print("Device path %s is invalid for %d:%d %s.",
name, (int)MAJOR(dev->dev), (int)MINOR(dev->dev), dev_name(dev));
if (d && (d->flags & DEV_REGULAR))
return d;
/* If the entry's wrong, remove it */
if (stat(name, &buf) < 0) {
if (d)
dm_hash_remove(_cache.names, name);
log_sys_very_verbose("stat", name);
d = NULL;
} else
info_available = 1;
_remove_alias(dev, name);
/* Remove any other names in dev->aliases that are incorrect. */
_verify_aliases(dev, NULL);
}
return NULL;
}
if (!S_ISBLK(st.st_mode)) {
log_debug("Not a block device %s.", name);
return NULL;
}
/*
* dev-cache has incorrect info for the requested path.
* Remove incorrect info and then add new dev-cache entry.
*/
if (dev && (st.st_rdev != dev->dev)) {
log_print("Device path %s does not match %d:%d %s.",
name, (int)MAJOR(dev->dev), (int)MINOR(dev->dev), dev_name(dev));
if (d && (buf.st_rdev != d->dev)) {
dm_hash_remove(_cache.names, name);
_remove_alias(dev, name);
/* Remove any other names in dev->aliases that are incorrect. */
_verify_aliases(dev, NULL);
/* Add new dev-cache entry next. */
dev = NULL;
d = NULL;
}
/*
* Either add a new struct dev for st_rdev and name,
* or add name as a new alias for an existing struct dev
* for st_rdev.
*/
if (!dev) {
_insert_dev(name, st.st_rdev);
/* Get the struct dev that was just added. */
dev = (struct device *) dm_hash_lookup(_cache.names, name);
if (!dev) {
log_error("Failed to get device %s", name);
return NULL;
if (!d) {
_insert(name, info_available ? &buf : NULL, 0, obtain_device_list_from_udev());
d = (struct device *) dm_hash_lookup(_cache.names, name);
if (!d) {
log_debug_devs("Device name not found in dev_cache repeat dev_cache_scan for %s", name);
dev_cache_scan();
d = (struct device *) dm_hash_lookup(_cache.names, name);
}
_verify_aliases(dev, name);
}
/*
* The caller passed a filter if they only want the dev if it
* passes filters.
*/
if (!f)
return dev;
ret = f->passes_filter(cmd, f, dev, NULL);
/*
* This might happen if this function is called before
* filters can do i/o. I don't think this will happen
* any longer and this EAGAIN case can be removed.
*/
if (ret == -EAGAIN) {
log_debug_devs("dev_cache_get filter deferred %s", dev_name(dev));
dev->flags |= DEV_FILTER_AFTER_SCAN;
ret = 1;
}
if (!ret) {
log_debug_devs("dev_cache_get filter excludes %s", dev_name(dev));
if (!d)
return NULL;
if (d && (d->flags & DEV_REGULAR))
return d;
if (f && !(d->flags & DEV_REGULAR)) {
ret = f->passes_filter(cmd, f, d, NULL);
if (ret == -EAGAIN) {
log_debug_devs("get device by name defer filter %s", dev_name(d));
d->flags |= DEV_FILTER_AFTER_SCAN;
ret = 1;
}
}
return dev;
if (f && !(d->flags & DEV_REGULAR) && !ret)
return NULL;
return d;
}
static struct device *_dev_cache_seek_devt(dev_t dev)
@@ -1751,259 +1649,3 @@ bool dev_cache_has_md_with_end_superblock(struct dev_types *dt)
return false;
}
static int _setup_devices_list(struct cmd_context *cmd)
{
struct dm_str_list *strl;
struct dev_use *du;
/*
* For each --devices arg, add a du to cmd->use_devices.
* The du has devname is the devices arg value.
*/
dm_list_iterate_items(strl, &cmd->deviceslist) {
if (!(du = zalloc(sizeof(struct dev_use))))
return_0;
if (!(du->devname = strdup(strl->str)))
return_0;
dm_list_add(&cmd->use_devices, &du->list);
}
return 1;
}
int setup_devices_file(struct cmd_context *cmd)
{
char dirpath[PATH_MAX];
const char *filename = NULL;
struct stat st;
int rv;
if (cmd->devicesfile) {
/* --devicesfile <filename> or "" has been set which overrides
lvm.conf settings use_devicesfile and devicesfile. */
if (!strlen(cmd->devicesfile))
cmd->enable_devices_file = 0;
else {
cmd->enable_devices_file = 1;
filename = cmd->devicesfile;
}
} else {
if (!find_config_tree_bool(cmd, devices_use_devicesfile_CFG, NULL))
cmd->enable_devices_file = 0;
else {
cmd->enable_devices_file = 1;
filename = find_config_tree_str(cmd, devices_devicesfile_CFG, NULL);
if (!validate_name(filename)) {
log_error("Invalid devices file name from config setting \"%s\".", filename);
return 0;
}
}
}
if (!cmd->enable_devices_file)
return 1;
if (dm_snprintf(dirpath, sizeof(dirpath), "%s/devices", cmd->system_dir) < 0) {
log_error("Failed to copy devices dir path");
return 0;
}
if (stat(dirpath, &st)) {
log_debug("Creating %s.", dirpath);
dm_prepare_selinux_context(dirpath, S_IFDIR);
rv = mkdir(dirpath, 0755);
dm_prepare_selinux_context(NULL, 0);
if ((rv < 0) && stat(dirpath, &st)) {
log_error("Failed to create %s %d", dirpath, errno);
return 0;
}
}
if (dm_snprintf(cmd->devices_file_path, sizeof(cmd->devices_file_path),
"%s/devices/%s", cmd->system_dir, filename) < 0) {
log_error("Failed to copy devices file path");
return 0;
}
return 1;
}
/*
* Add all system devices to dev-cache, and attempt to
* match all devices_file entries to dev-cache entries.
*/
int setup_devices(struct cmd_context *cmd)
{
int file_exists;
int lock_mode = 0;
if (cmd->enable_devices_list) {
if (!_setup_devices_list(cmd))
return_0;
goto scan;
}
if (!setup_devices_file(cmd))
return_0;
if (!cmd->enable_devices_file)
goto scan;
file_exists = devices_file_exists(cmd);
/*
* Removing the devices file is another way of disabling the use of
* a devices file, unless the command creates the devices file.
*/
if (!file_exists && !cmd->create_edit_devices_file) {
log_print("Devices file not found, ignoring.");
cmd->enable_devices_file = 0;
goto scan;
}
if (!file_exists) {
/* pvcreate/vgcreate/vgimportdevices/lvmdevices-add
create a new devices file here if it doesn't exist.
They have the create_edit_devices_file flag set.
First they create/lock-ex the devices file lockfile.
Other commands will not use a devices file if none exists. */
lock_mode = LOCK_EX;
if (!lock_devices_file(cmd, lock_mode)) {
log_error("Failed to lock the devices file to create.");
return 0;
}
if (!devices_file_touch(cmd)) {
log_error("Failed to create the devices file.");
return 0;
}
} else {
/* Commands that intend to edit the devices file have
edit_devices_file or create_edit_devices_file set (create if
they can also create a new devices file) and lock it ex
here prior to reading. Other commands that intend to just
read the devices file lock sh. */
lock_mode = (cmd->create_edit_devices_file || cmd->edit_devices_file) ? LOCK_EX : LOCK_SH;
if (!lock_devices_file(cmd, lock_mode)) {
log_error("Failed to lock the devices file.");
return 0;
}
}
/*
* Read the list of device ids that lvm can use.
* Adds a struct dev_id to cmd->use_devices for each one.
*/
if (!device_ids_read(cmd)) {
log_error("Failed to read the devices file.");
return 0;
}
/*
* When the command is editing the devices file, it acquires
* the ex lock above, will later call device_ids_write(), and
* then unlock the lock after writing the file.
* When the command is just reading the devices file, it's
* locked sh above just before reading the file, and unlocked
* here after reading.
*/
if (lock_mode && (lock_mode == LOCK_SH))
unlock_devices_file(cmd);
scan:
/*
* Add a 'struct device' to dev-cache for each device available on the system.
* This will not open or read any devices, but may look at sysfs properties.
* This list of devs comes from looking /dev entries, or from asking libudev.
* TODO: or from /proc/partitions?
*
* TODO: dev_cache_scan() optimization: start by looking only at
* devnames listed in the devices_file, and if the device_ids for
* those all match we won't need any others.
* Exceptions: the command wants a new device for pvcreate, or
* device_ids don't match the devnames.
*/
dev_cache_scan();
/*
* Match entries from cmd->use_devices with device structs in dev-cache.
*/
device_ids_match(cmd);
return 1;
}
/*
* The alternative to setup_devices() when the command is interested
* in using only one PV.
*
* Add one system device to dev-cache, and attempt to
* match its dev-cache entry to a devices_file entry.
*/
int setup_device(struct cmd_context *cmd, const char *devname)
{
struct stat buf;
struct device *dev;
if (cmd->enable_devices_list) {
if (!_setup_devices_list(cmd))
return_0;
goto scan;
}
if (!setup_devices_file(cmd))
return_0;
if (!cmd->enable_devices_file)
goto scan;
if (!devices_file_exists(cmd)) {
log_print("Devices file not found, ignoring.");
cmd->enable_devices_file = 0;
goto scan;
}
if (!lock_devices_file(cmd, LOCK_SH)) {
log_error("Failed to lock the devices file to read.");
return 0;
}
if (!device_ids_read(cmd)) {
log_error("Failed to read the devices file.");
return 0;
}
unlock_devices_file(cmd);
scan:
if (stat(devname, &buf) < 0) {
log_error("Cannot access device %s.", devname);
return 0;
}
if (!S_ISBLK(buf.st_mode)) {
log_error("Invaild device type %s.", devname);
return 0;
}
if (!_insert_dev(devname, buf.st_rdev))
return_0;
if (!(dev = (struct device *) dm_hash_lookup(_cache.names, devname)))
return_0;
/* Match this device to an entry in devices_file so it will not
be rejected by filter-deviceid. */
if (cmd->enable_devices_file)
device_ids_match_dev(cmd, dev);
return 1;
}

View File

@@ -34,6 +34,7 @@ struct dev_filter {
const char *name;
};
int dev_cache_index_devs(void);
struct dm_list *dev_cache_get_dev_list_for_vgid(const char *vgid);
struct dm_list *dev_cache_get_dev_list_for_lvid(const char *lvid);
@@ -74,10 +75,4 @@ void dev_cache_failed_path(struct device *dev, const char *path);
bool dev_cache_has_md_with_end_superblock(struct dev_types *dt);
int get_sysfs_value(const char *path, char *buf, size_t buf_size, int error_if_no_value);
int setup_devices_file(struct cmd_context *cmd);
int setup_devices(struct cmd_context *cmd);
int setup_device(struct cmd_context *cmd, const char *devname);
#endif

View File

@@ -21,7 +21,6 @@
#include "lib/metadata/metadata.h"
#include "lib/device/bcache.h"
#include "lib/label/label.h"
#include "lib/commands/toolcontext.h"
#ifdef BLKID_WIPING_SUPPORT
#include <blkid.h>
@@ -68,31 +67,6 @@ int dev_is_pmem(struct device *dev)
return is_pmem ? 1 : 0;
}
/*
* An nvme device has major number 259 (BLKEXT), minor number <minor>,
* and reading /sys/dev/block/259:<minor>/device/dev shows a character
* device cmajor:cminor where cmajor matches the major number of the
* nvme character device entry in /proc/devices. Checking all of that
* is excessive and unnecessary compared to just comparing /dev/name*.
*/
int dev_is_nvme(struct dev_types *dt, struct device *dev)
{
struct dm_str_list *strl;
if (dev->flags & DEV_IS_NVME)
return 1;
dm_list_iterate_items(strl, &dev->aliases) {
if (!strncmp(strl->str, "/dev/nvme", 9)) {
log_debug("Found nvme device %s", dev_name(dev));
dev->flags |= DEV_IS_NVME;
return 1;
}
}
return 0;
}
int dev_is_lv(struct device *dev)
{
FILE *fp;
@@ -328,9 +302,6 @@ int dev_subsystem_part_major(struct dev_types *dt, struct device *dev)
const char *dev_subsystem_name(struct dev_types *dt, struct device *dev)
{
if (dev->flags & DEV_IS_NVME)
return "NVME";
if (MAJOR(dev->dev) == dt->device_mapper_major)
return "DM";
@@ -377,6 +348,7 @@ int major_is_scsi_device(struct dev_types *dt, int major)
return (dt->dev_type_array[major].flags & PARTITION_SCSI_DEVICE) ? 1 : 0;
}
static int _loop_is_with_partscan(struct device *dev)
{
FILE *fp;
@@ -408,45 +380,6 @@ static int _loop_is_with_partscan(struct device *dev)
return partscan;
}
int dev_get_partition_number(struct device *dev, int *num)
{
char path[PATH_MAX];
char buf[8] = { 0 };
dev_t devt = dev->dev;
struct stat sb;
if (dev->part != -1) {
*num = dev->part;
return 1;
}
if (dm_snprintf(path, sizeof(path), "%sdev/block/%d:%d/partition",
dm_sysfs_dir(), (int)MAJOR(devt), (int)MINOR(devt)) < 0) {
log_error("Failed to create sysfs path for %s", dev_name(dev));
return 0;
}
if (stat(path, &sb)) {
dev->part = 0;
*num = 0;
return 1;
}
if (!get_sysfs_value(path, buf, sizeof(buf), 0)) {
log_error("Failed to read sysfs path for %s", dev_name(dev));
return 0;
}
if (!buf[0]) {
log_error("Failed to read sysfs partition value for %s", dev_name(dev));
return 0;
}
dev->part = atoi(buf);
*num = dev->part;
return 1;
}
/* See linux/genhd.h and fs/partitions/msdos */
#define PART_MAGIC 0xAA55
#define PART_MAGIC_OFFSET UINT64_C(0x1FE)
@@ -465,28 +398,6 @@ struct partition {
uint32_t nr_sects;
} __attribute__((packed));
static int _has_sys_partition(struct device *dev)
{
char path[PATH_MAX];
struct stat info;
int major = (int) MAJOR(dev->dev);
int minor = (int) MINOR(dev->dev);
/* check if dev is a partition */
if (dm_snprintf(path, sizeof(path), "%s/dev/block/%d:%d/partition",
dm_sysfs_dir(), major, minor) < 0) {
log_error("dm_snprintf partition failed");
return 0;
}
if (stat(path, &info) == -1) {
if (errno != ENOENT)
log_sys_error("stat", path);
return 0;
}
return 1;
}
static int _is_partitionable(struct dev_types *dt, struct device *dev)
{
int parts = major_max_partitions(dt, MAJOR(dev->dev));
@@ -503,13 +414,6 @@ static int _is_partitionable(struct dev_types *dt, struct device *dev)
_loop_is_with_partscan(dev))
return 1;
if (dev_is_nvme(dt, dev)) {
/* If this dev is already a partition then it's not partitionable. */
if (_has_sys_partition(dev))
return 0;
return 1;
}
if ((parts <= 1) || (MINOR(dev->dev) % parts))
return 0;
@@ -653,17 +557,10 @@ int dev_get_primary_dev(struct dev_types *dt, struct device *dev, dev_t *result)
char path[PATH_MAX];
char temp_path[PATH_MAX];
char buffer[64];
struct stat info;
FILE *fp = NULL;
int parts, residue, size, ret = 0;
/*
* /dev/nvme devs don't use the major:minor numbering like
* block dev types that have their own major number, so
* the calculation based on minor number doesn't work.
*/
if (dev_is_nvme(dt, dev))
goto sys_partition;
/*
* Try to get the primary dev out of the
* list of known device types first.
@@ -679,14 +576,23 @@ int dev_get_primary_dev(struct dev_types *dt, struct device *dev, dev_t *result)
goto out;
}
sys_partition:
/*
* If we can't get the primary dev out of the list of known device
* types, try to look at sysfs directly then. This is more complex
* way and it also requires certain sysfs layout to be present
* which might not be there in old kernels!
*/
if (!_has_sys_partition(dev)) {
/* check if dev is a partition */
if (dm_snprintf(path, sizeof(path), "%s/dev/block/%d:%d/partition",
sysfs_dir, major, minor) < 0) {
log_error("dm_snprintf partition failed");
goto out;
}
if (stat(path, &info) == -1) {
if (errno != ENOENT)
log_sys_error("stat", path);
*result = dev->dev;
ret = 1;
goto out; /* dev is not a partition! */

View File

@@ -83,7 +83,6 @@ int dev_is_md_with_end_superblock(struct dev_types *dt, struct device *dev);
int major_max_partitions(struct dev_types *dt, int major);
int dev_is_partitioned(struct dev_types *dt, struct device *dev);
int dev_get_primary_dev(struct dev_types *dt, struct device *dev, dev_t *result);
int dev_get_partition_number(struct device *dev, int *num);
/* Various device properties */
unsigned long dev_alignment_offset(struct dev_types *dt, struct device *dev);
@@ -96,8 +95,6 @@ int dev_is_rotational(struct dev_types *dt, struct device *dev);
int dev_is_pmem(struct device *dev);
int dev_is_nvme(struct dev_types *dt, struct device *dev);
int dev_is_lv(struct device *dev);
int get_fs_block_size(struct device *dev, uint32_t *fs_block_size);

View File

@@ -38,8 +38,6 @@
#define DEV_SCAN_FOUND_LABEL 0x00010000 /* label scan read dev and found label */
#define DEV_IS_MD_COMPONENT 0x00020000 /* device is an md component */
#define DEV_UDEV_INFO_MISSING 0x00040000 /* we have no udev info for this device */
#define DEV_IS_NVME 0x00080000 /* set if dev is nvme */
#define DEV_MATCHED_USE_ID 0x00100000 /* matched an entry from cmd->use_devices */
/*
* Support for external device info.
@@ -58,44 +56,12 @@ struct dev_ext {
void *handle;
};
#define DEV_ID_TYPE_SYS_WWID 0x0001
#define DEV_ID_TYPE_SYS_SERIAL 0x0002
#define DEV_ID_TYPE_MPATH_UUID 0x0003
#define DEV_ID_TYPE_MD_UUID 0x0004
#define DEV_ID_TYPE_LOOP_FILE 0x0005
#define DEV_ID_TYPE_CRYPT_UUID 0x0006
#define DEV_ID_TYPE_LVMLV_UUID 0x0007
#define DEV_ID_TYPE_DEVNAME 0x0008
/* A device ID of a certain type for a device. */
struct dev_id {
struct dm_list list;
struct device *dev;
uint16_t idtype;
char *idname;
};
/* A device listed in devices file that lvm should use. */
struct dev_use {
struct dm_list list;
struct device *dev;
int part;
uint16_t idtype;
char *idname;
char *devname;
char *pvid;
};
/*
* All devices in LVM will be represented by one of these.
* pointer comparisons are valid.
*/
struct device {
struct dm_list aliases; /* struct dm_str_list */
struct dm_list ids; /* struct dev_id, different entries for different idtypes */
struct dev_id *id; /* points to the the ids entry being used for this dev */
dev_t dev;
/* private */
@@ -106,7 +72,6 @@ struct device {
int read_ahead;
int bcache_fd;
int bcache_di;
int part; /* partition number */
uint32_t flags;
uint32_t filtered_flags;
unsigned size_seqno;

File diff suppressed because it is too large Load Diff

View File

@@ -1,53 +0,0 @@
/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef _LVM_DEVICE_ID_H
#define _LVM_DEVICE_ID_H
void free_du(struct dev_use *du);
void free_dus(struct dm_list *list);
void free_did(struct dev_id *did);
void free_dids(struct dm_list *list);
const char *idtype_to_str(uint16_t idtype);
uint16_t idtype_from_str(const char *str);
const char *dev_idtype(struct device *dev);
const char *dev_id(struct device *dev);
int device_ids_use_devname(struct cmd_context *cmd);
int device_ids_read(struct cmd_context *cmd);
int device_ids_write(struct cmd_context *cmd);
int device_id_add(struct cmd_context *cmd, struct device *dev, const char *pvid,
const char *idtype_arg, const char *id_arg);
void device_id_pvremove(struct cmd_context *cmd, struct device *dev);
void device_ids_match(struct cmd_context *cmd);
int device_ids_match_dev(struct cmd_context *cmd, struct device *dev);
void device_ids_validate(struct cmd_context *cmd, int *device_ids_invalid, int noupdate);
int device_ids_version_unchanged(struct cmd_context *cmd);
void device_ids_find_renamed_devs(struct cmd_context *cmd, struct dm_list *dev_list, int *search_count, int noupdate);
const char *device_id_system_read(struct cmd_context *cmd, struct device *dev, uint16_t idtype);
struct dev_use *get_du_for_dev(struct cmd_context *cmd, struct device *dev);
struct dev_use *get_du_for_pvid(struct cmd_context *cmd, const char *pvid);
char *devices_file_version(void);
int devices_file_exists(struct cmd_context *cmd);
int devices_file_touch(struct cmd_context *cmd);
int lock_devices_file(struct cmd_context *cmd, int mode);
int lock_devices_file_try(struct cmd_context *cmd, int mode, int *held);
void unlock_devices_file(struct cmd_context *cmd);
void devices_file_init(struct cmd_context *cmd);
void devices_file_exit(struct cmd_context *cmd);
#endif

View File

@@ -399,7 +399,7 @@ int lvdisplay_full(struct cmd_context *cmd,
void *handle __attribute__((unused)))
{
struct lvinfo info;
int inkernel, snap_active = 0, partial = 0, raid_is_avail = 1;
int inkernel, snap_active = 0;
char uuid[64] __attribute__((aligned(8)));
const char *access_str;
struct lv_segment *snap_seg = NULL, *mirror_seg = NULL;
@@ -558,18 +558,11 @@ int lvdisplay_full(struct cmd_context *cmd,
log_print("LV VDO Pool name %s", seg_lv(seg, 0)->name);
}
if (lv_is_partial(lv))
partial = 1;
if (lv_is_raid(lv))
raid_is_avail = raid_is_available(lv) ? 1 : 0;
if (inkernel && info.suspended)
log_print("LV Status suspended");
else if (activation())
log_print("LV Status %savailable%s",
(inkernel && raid_is_avail) ? "" : "NOT ",
partial ? " (partial)" : "");
log_print("LV Status %savailable",
inkernel ? "" : "NOT ");
/********* FIXME lv_number
log_print("LV # %u", lv->lv_number + 1);

View File

@@ -1,69 +0,0 @@
/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2012 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "base/memory/zalloc.h"
#include "lib/misc/lib.h"
#include "lib/filters/filter.h"
#include "lib/commands/toolcontext.h"
static int _passes_deviceid_filter(struct cmd_context *cmd, struct dev_filter *f, struct device *dev, const char *use_filter_name)
{
dev->filtered_flags &= ~DEV_FILTERED_DEVICES_FILE;
dev->filtered_flags &= ~DEV_FILTERED_DEVICES_LIST;
if (!cmd->enable_devices_file && !cmd->enable_devices_list)
return 1;
if (cmd->filter_deviceid_skip)
return 1;
if (dev->flags & DEV_MATCHED_USE_ID)
return 1;
if (cmd->enable_devices_file)
dev->filtered_flags |= DEV_FILTERED_DEVICES_FILE;
else if (cmd->enable_devices_list)
dev->filtered_flags |= DEV_FILTERED_DEVICES_LIST;
log_debug_devs("%s: Skipping (deviceid)", dev_name(dev));
return 0;
}
static void _destroy_deviceid_filter(struct dev_filter *f)
{
if (f->use_count)
log_error(INTERNAL_ERROR "Destroying deviceid filter while in use %u times.", f->use_count);
free(f);
}
struct dev_filter *deviceid_filter_create(struct cmd_context *cmd)
{
struct dev_filter *f;
if (!(f = zalloc(sizeof(struct dev_filter)))) {
log_error("deviceid filter allocation failed");
return NULL;
}
f->passes_filter = _passes_deviceid_filter;
f->destroy = _destroy_deviceid_filter;
f->use_count = 0;
f->name = "deviceid";
log_debug_devs("deviceid filter initialised.");
return f;
}

View File

@@ -16,7 +16,6 @@
#include "lib/misc/lib.h"
#include "lib/filters/filter.h"
#include "lib/activate/activate.h"
#include "lib/commands/toolcontext.h"
#ifdef UDEV_SYNC_SUPPORT
#include <libudev.h>
#include "lib/device/dev-ext-udev-constants.h"
@@ -28,6 +27,7 @@
#define MPATH_PREFIX "mpath-"
struct mpath_priv {
struct dm_pool *mem;
struct dev_filter f;
@@ -35,9 +35,6 @@ struct mpath_priv {
struct dm_hash_table *hash;
};
/*
* given "/dev/foo" return "foo"
*/
static const char *_get_sysfs_name(struct device *dev)
{
const char *name;
@@ -56,11 +53,6 @@ static const char *_get_sysfs_name(struct device *dev)
return name;
}
/*
* given major:minor
* readlink translates /sys/dev/block/major:minor to /sys/.../foo
* from /sys/.../foo return "foo"
*/
static const char *_get_sysfs_name_by_devt(const char *sysfs_dir, dev_t devno,
char *buf, size_t buf_size)
{
@@ -68,7 +60,7 @@ static const char *_get_sysfs_name_by_devt(const char *sysfs_dir, dev_t devno,
char path[PATH_MAX];
int size;
if (dm_snprintf(path, sizeof(path), "%sdev/block/%d:%d", sysfs_dir,
if (dm_snprintf(path, sizeof(path), "%s/dev/block/%d:%d", sysfs_dir,
(int) MAJOR(devno), (int) MINOR(devno)) < 0) {
log_error("Sysfs path string is too long.");
return NULL;
@@ -110,28 +102,27 @@ static int _get_sysfs_string(const char *path, char *buffer, int max_size)
return r;
}
static int _get_sysfs_dm_mpath(struct dev_types *dt, const char *sysfs_dir, const char *holder_name)
static int _get_sysfs_get_major_minor(const char *sysfs_dir, const char *kname, int *major, int *minor)
{
char path[PATH_MAX];
char buffer[128];
char path[PATH_MAX], buffer[64];
if (dm_snprintf(path, sizeof(path), "%sblock/%s/dm/uuid", sysfs_dir, holder_name) < 0) {
if (dm_snprintf(path, sizeof(path), "%s/block/%s/dev", sysfs_dir, kname) < 0) {
log_error("Sysfs path string is too long.");
return 0;
}
buffer[0] = '\0';
if (!_get_sysfs_string(path, buffer, sizeof(buffer)))
return_0;
if (!strncmp(buffer, MPATH_PREFIX, 6))
return 1;
if (sscanf(buffer, "%d:%d", major, minor) != 2) {
log_error("Failed to parse major minor from %s", buffer);
return 0;
}
return 0;
return 1;
}
static int _get_holder_name(const char *dir, char *name, int max_size)
static int _get_parent_mpath(const char *dir, char *name, int max_size)
{
struct dirent *d;
DIR *dr;
@@ -164,7 +155,7 @@ static int _get_holder_name(const char *dir, char *name, int max_size)
}
#ifdef UDEV_SYNC_SUPPORT
static int _udev_dev_is_mpath_component(struct device *dev)
static int _udev_dev_is_mpath(struct device *dev)
{
const char *value;
struct dev_ext *ext;
@@ -183,148 +174,95 @@ static int _udev_dev_is_mpath_component(struct device *dev)
return 0;
}
#else
static int _udev_dev_is_mpath_component(struct device *dev)
static int _udev_dev_is_mpath(struct device *dev)
{
return 0;
}
#endif
static int _native_dev_is_mpath_component(struct cmd_context *cmd, struct dev_filter *f, struct device *dev)
static int _native_dev_is_mpath(struct dev_filter *f, struct device *dev)
{
struct mpath_priv *mp = (struct mpath_priv *) f->private;
struct dev_types *dt = mp->dt;
const char *part_name;
const char *name; /* e.g. "sda" for "/dev/sda" */
char link_path[PATH_MAX]; /* some obscure, unpredictable sysfs path */
char holders_path[PATH_MAX]; /* e.g. "/sys/block/sda/holders/" */
char dm_dev_path[PATH_MAX]; /* e.g. "/dev/dm-1" */
char holder_name[128] = { 0 }; /* e.g. "dm-1" */
const char *sysfs_dir = dm_sysfs_dir();
int dev_major = MAJOR(dev->dev);
int dev_minor = MINOR(dev->dev);
int dm_dev_major;
int dm_dev_minor;
const char *part_name, *name;
struct stat info;
char path[PATH_MAX], parent_name[PATH_MAX];
const char *sysfs_dir = dm_sysfs_dir();
int major = MAJOR(dev->dev);
int minor = MINOR(dev->dev);
dev_t primary_dev;
long look;
/* Limit this filter to SCSI or NVME devices */
if (!major_is_scsi_device(dt, dev_major) && !dev_is_nvme(dt, dev))
/* Limit this filter only to SCSI devices */
if (!major_is_scsi_device(dt, MAJOR(dev->dev)))
return 0;
switch (dev_get_primary_dev(dt, dev, &primary_dev)) {
case 2: /* The dev is partition. */
part_name = dev_name(dev); /* name of original dev for log_debug msg */
/* gets "foo" for "/dev/foo" where "/dev/foo" comes from major:minor */
if (!(name = _get_sysfs_name_by_devt(sysfs_dir, primary_dev, link_path, sizeof(link_path))))
if (!(name = _get_sysfs_name_by_devt(sysfs_dir, primary_dev, parent_name, sizeof(parent_name))))
return_0;
log_debug_devs("%s: Device is a partition, using primary "
"device %s for mpath component detection",
part_name, name);
break;
case 1: /* The dev is already a primary dev. Just continue with the dev. */
/* gets "foo" for "/dev/foo" */
if (!(name = _get_sysfs_name(dev)))
return_0;
break;
default: /* 0, error. */
log_warn("Failed to get primary device for %d:%d.", dev_major, dev_minor);
log_warn("Failed to get primary device for %d:%d.", major, minor);
return 0;
}
if (dm_snprintf(holders_path, sizeof(holders_path), "%sblock/%s/holders", sysfs_dir, name) < 0) {
if (dm_snprintf(path, sizeof(path), "%s/block/%s/holders", sysfs_dir, name) < 0) {
log_warn("Sysfs path to check mpath is too long.");
return 0;
}
/* also will filter out partitions */
if (stat(holders_path, &info))
if (stat(path, &info))
return 0;
if (!S_ISDIR(info.st_mode)) {
log_warn("Path %s is not a directory.", holders_path);
log_warn("Path %s is not a directory.", path);
return 0;
}
/*
* If holders dir contains an entry such as "dm-1", then this sets
* holder_name to "dm-1".
*
* If holders dir is empty, return 0 (this is generally where
* devs that are not mpath components return.)
*/
if (!_get_holder_name(holders_path, holder_name, sizeof(holder_name)))
if (!_get_parent_mpath(path, parent_name, sizeof(parent_name)))
return 0;
if (dm_snprintf(dm_dev_path, sizeof(dm_dev_path), "%s/%s", cmd->dev_dir, holder_name) < 0) {
log_warn("dm device path to check mpath is too long.");
return 0;
}
if (!_get_sysfs_get_major_minor(sysfs_dir, parent_name, &major, &minor))
return_0;
/*
* stat "/dev/dm-1" which is the holder of the dev we're checking
* dm_dev_major:dm_dev_minor come from stat("/dev/dm-1")
*/
if (stat(dm_dev_path, &info)) {
log_debug("filter-mpath %s holder %s stat result %d",
dev_name(dev), dm_dev_path, errno);
if (major != dt->device_mapper_major)
return 0;
}
dm_dev_major = (int)MAJOR(info.st_rdev);
dm_dev_minor = (int)MINOR(info.st_rdev);
if (dm_dev_major != dt->device_mapper_major) {
log_debug_devs("filter-mpath %s holder %s %d:%d does not have dm major",
dev_name(dev), dm_dev_path, dm_dev_major, dm_dev_minor);
return 0;
}
/*
* Save the result of checking that "/dev/dm-1" is an mpath device
* to avoid repeating it for each path component.
* The minor number of "/dev/dm-1" is added to the hash table with
* const value 2 meaning that dm minor 1 (for /dev/dm-1) is a multipath dev
* and const value 1 meaning that dm minor 1 is not a multipath dev.
*/
look = (long) dm_hash_lookup_binary(mp->hash, &dm_dev_minor, sizeof(dm_dev_minor));
/* Avoid repeated detection of multipath device and use first checked result */
look = (long) dm_hash_lookup_binary(mp->hash, &minor, sizeof(minor));
if (look > 0) {
log_debug_devs("filter-mpath %s holder %s %u:%u already checked as %sbeing mpath.",
dev_name(dev), holder_name, dm_dev_major, dm_dev_minor, (look > 1) ? "" : "not ");
log_debug_devs("%s(%u:%u): already checked as %sbeing mpath.",
parent_name, major, minor, (look > 1) ? "" : "not ");
return (look > 1) ? 1 : 0;
}
/*
* Returns 1 if /sys/block/<holder_name>/dm/uuid indicates that
* <holder_name> is a dm device with dm uuid prefix mpath-.
* When true, <holder_name> will be something like "dm-1".
*
* (Is a hash table worth it to avoid reading one sysfs file?)
*/
if (_get_sysfs_dm_mpath(dt, sysfs_dir, holder_name)) {
log_debug_devs("filter-mpath %s holder %s %u:%u ignore mpath component",
dev_name(dev), holder_name, dm_dev_major, dm_dev_minor);
(void) dm_hash_insert_binary(mp->hash, &dm_dev_minor, sizeof(dm_dev_minor), (void*)2);
if (lvm_dm_prefix_check(major, minor, MPATH_PREFIX)) {
(void) dm_hash_insert_binary(mp->hash, &minor, sizeof(minor), (void*)2);
return 1;
}
(void) dm_hash_insert_binary(mp->hash, &dm_dev_minor, sizeof(dm_dev_minor), (void*)1);
(void) dm_hash_insert_binary(mp->hash, &minor, sizeof(minor), (void*)1);
return 0;
}
static int _dev_is_mpath_component(struct cmd_context *cmd, struct dev_filter *f, struct device *dev)
static int _dev_is_mpath(struct dev_filter *f, struct device *dev)
{
if (dev->ext.src == DEV_EXT_NONE)
return _native_dev_is_mpath_component(cmd, f, dev);
return _native_dev_is_mpath(f, dev);
if (dev->ext.src == DEV_EXT_UDEV)
return _udev_dev_is_mpath_component(dev);
return _udev_dev_is_mpath(dev);
log_error(INTERNAL_ERROR "Missing hook for mpath recognition "
"using external device info source %s", dev_ext_name(dev));
@@ -334,11 +272,11 @@ static int _dev_is_mpath_component(struct cmd_context *cmd, struct dev_filter *f
#define MSG_SKIPPING "%s: Skipping mpath component device"
static int _ignore_mpath_component(struct cmd_context *cmd, struct dev_filter *f, struct device *dev, const char *use_filter_name)
static int _ignore_mpath(struct cmd_context *cmd, struct dev_filter *f, struct device *dev, const char *use_filter_name)
{
dev->filtered_flags &= ~DEV_FILTERED_MPATH_COMPONENT;
if (_dev_is_mpath_component(cmd, f, dev) == 1) {
if (_dev_is_mpath(f, dev) == 1) {
if (dev->ext.src == DEV_EXT_NONE)
log_debug_devs(MSG_SKIPPING, dev_name(dev));
else
@@ -365,8 +303,8 @@ static void _destroy(struct dev_filter *f)
struct dev_filter *mpath_filter_create(struct dev_types *dt)
{
const char *sysfs_dir = dm_sysfs_dir();
struct mpath_priv *mp;
struct dm_pool *mem;
struct mpath_priv *mp;
struct dm_hash_table *hash;
if (!*sysfs_dir) {
@@ -390,13 +328,19 @@ struct dev_filter *mpath_filter_create(struct dev_types *dt)
goto bad;
}
mp->f.passes_filter = _ignore_mpath_component;
if (!(mp = dm_pool_zalloc(mem, sizeof(*mp)))) {
log_error("mpath filter allocation failed.");
goto bad;
}
mp->f.passes_filter = _ignore_mpath;
mp->f.destroy = _destroy;
mp->f.use_count = 0;
mp->f.private = mp;
mp->f.name = "mpath";
mp->dt = dt;
mp->mem = mem;
mp->dt = dt;
mp->hash = hash;
log_debug_devs("mpath filter initialised.");

View File

@@ -15,7 +15,6 @@
#include "lib/misc/lib.h"
#include "lib/filters/filter.h"
#include "lib/commands/toolcontext.h"
struct rfilter {
struct dm_pool *mem;
@@ -154,14 +153,6 @@ static int _accept_p(struct cmd_context *cmd, struct dev_filter *f, struct devic
dev->filtered_flags &= ~DEV_FILTERED_REGEX;
if (cmd->enable_devices_list)
return 1;
if (cmd->enable_devices_file && !cmd->filter_regex_with_devices_file) {
/* TODO: print a notice if the filter is set to something and we ignore it here. */
return 1;
}
dm_list_iterate_items(sl, &dev->aliases) {
m = dm_regex_match(rf->engine, sl->str);

View File

@@ -30,7 +30,6 @@ struct dev_filter *partitioned_filter_create(struct dev_types *dt);
struct dev_filter *persistent_filter_create(struct dev_types *dt, struct dev_filter *f);
struct dev_filter *sysfs_filter_create(void);
struct dev_filter *signature_filter_create(struct dev_types *dt);
struct dev_filter *deviceid_filter_create(struct cmd_context *cmd);
struct dev_filter *internal_filter_create(void);
int internal_filter_allow(struct dm_pool *mem, struct device *dev);
@@ -64,7 +63,5 @@ struct dev_filter *usable_filter_create(struct cmd_context *cmd, struct dev_type
#define DEV_FILTERED_DEVTYPE 0x00000100
#define DEV_FILTERED_MINSIZE 0x00000200
#define DEV_FILTERED_UNUSABLE 0x00000400
#define DEV_FILTERED_DEVICES_FILE 0x00000800
#define DEV_FILTERED_DEVICES_LIST 0x00001000
#endif /* _LVM_FILTER_H */

View File

@@ -23,7 +23,6 @@
#include "lib/metadata/segtype.h"
#include "lib/format_text/text_export.h"
#include "lib/commands/toolcontext.h"
#include "lib/device/device_id.h"
#include "libdaemon/client/config-util.h"
#include <stdarg.h>
@@ -556,11 +555,6 @@ static int _print_pvs(struct formatter *f, struct volume_group *vg)
dm_escape_double_quotes(buffer, pv_dev_name(pv)));
outnl(f);
if (dev_idtype(pv->dev) && dev_id(pv->dev)) {
outf(f, "device_id_type = \"%s\"", dev_idtype(pv->dev));
outf(f, "device_id = \"%s\"", dev_id(pv->dev));
}
if (!_print_flag_config(f, pv->status, PV_FLAGS))
return_0;

View File

@@ -72,7 +72,6 @@ static const struct flag _lv_flags[] = {
{LV_ACTIVATION_SKIP, "ACTIVATION_SKIP", COMPATIBLE_FLAG},
{LV_ERROR_WHEN_FULL, "ERROR_WHEN_FULL", COMPATIBLE_FLAG},
{LV_METADATA_FORMAT, "METADATA_FORMAT", SEGTYPE_FLAG},
{LV_CROP_METADATA, "CROP_METADATA", SEGTYPE_FLAG},
{LV_CACHE_VOL, "CACHE_VOL", COMPATIBLE_FLAG},
{LV_CACHE_USES_CACHEVOL, "CACHE_USES_CACHEVOL", SEGTYPE_FLAG},
{LV_NOSCAN, NULL, 0},

View File

@@ -188,7 +188,7 @@ static int _read_pv(struct cmd_context *cmd,
struct physical_volume *pv;
struct pv_list *pvl;
const struct dm_config_value *cv;
const char *str;
const char *device_hint;
uint64_t size, ba_start;
if (!(pvl = dm_pool_zalloc(mem, sizeof(*pvl))) ||
@@ -233,21 +233,11 @@ static int _read_pv(struct cmd_context *cmd,
return 0;
}
if (dm_config_get_str(pvn, "device", &str)) {
if (!(pv->device_hint = dm_pool_strdup(mem, str)))
if (dm_config_get_str(pvn, "device", &device_hint)) {
if (!(pv->device_hint = dm_pool_strdup(mem, device_hint)))
log_error("Failed to allocate memory for device hint in read_pv.");
}
if (dm_config_get_str(pvn, "device_id", &str)) {
if (!(pv->device_id = dm_pool_strdup(mem, str)))
log_error("Failed to allocate memory for device_id in read_pv.");
}
if (dm_config_get_str(pvn, "device_id_type", &str)) {
if (!(pv->device_id_type = dm_pool_strdup(mem, str)))
log_error("Failed to allocate memory for device_id_type in read_pv.");
}
if (!_read_uint64(pvn, "pe_start", &pv->pe_start)) {
log_error("Couldn't read extent start value (pe_start) "
"for physical volume.");
@@ -316,7 +306,7 @@ static int _read_pvsummary(struct cmd_context *cmd,
{
struct physical_volume *pv;
struct pv_list *pvl;
const char *str;
const char *device_hint;
if (!(pvl = dm_pool_zalloc(mem, sizeof(*pvl))) ||
!(pvl->pv = dm_pool_zalloc(mem, sizeof(*pvl->pv))))
@@ -336,19 +326,9 @@ static int _read_pvsummary(struct cmd_context *cmd,
!_read_uint64(pvn, "dev_size", &pv->size))
log_warn("Couldn't read dev size for physical volume.");
if (dm_config_get_str(pvn, "device", &str)) {
if (!(pv->device_hint = dm_pool_strdup(mem, str)))
log_error("Failed to allocate memory for device hint in read_pv_sum.");
}
if (dm_config_get_str(pvn, "device_id", &str)) {
if (!(pv->device_id = dm_pool_strdup(mem, str)))
log_error("Failed to allocate memory for device_id in read_pv_sum.");
}
if (dm_config_get_str(pvn, "device_id_type", &str)) {
if (!(pv->device_id_type = dm_pool_strdup(mem, str)))
log_error("Failed to allocate memory for device_id_type in read_pv_sum.");
if (dm_config_get_str(pvn, "device", &device_hint)) {
if (!(pv->device_hint = dm_pool_strdup(mem, device_hint)))
log_error("Failed to allocate memory for device hint in read_pv.");
}
dm_list_add(&vgsummary->pvsummaries, &pvl->list);

View File

@@ -145,7 +145,6 @@
#include "lib/activate/activate.h"
#include "lib/label/hints.h"
#include "lib/device/dev-type.h"
#include "lib/device/device_id.h"
#include <sys/stat.h>
#include <fcntl.h>
@@ -167,10 +166,8 @@ static const char *_newhints_file = DEFAULT_RUN_DIR "/newhints";
* than they were built with. Increase the minor number
* when adding features that older lvm versions can just
* ignore while continuing to use the other content.
*
* MAJOR 2: add devices_file
*/
#define HINTS_VERSION_MAJOR 2
#define HINTS_VERSION_MAJOR 1
#define HINTS_VERSION_MINOR 1
#define HINT_LINE_LEN (PATH_MAX + NAME_LEN + ID_LEN + 64)
@@ -715,9 +712,8 @@ static int _read_hint_file(struct cmd_context *cmd, struct dm_list *hints, int *
break;
}
if (hv_major != HINTS_VERSION_MAJOR) {
log_debug("ignore hints with version %d.%d current %d.%d",
hv_major, hv_minor, HINTS_VERSION_MAJOR, HINTS_VERSION_MINOR);
if (hv_major > HINTS_VERSION_MAJOR) {
log_debug("ignore hints with newer major version %d.%d", hv_major, hv_minor);
*needs_refresh = 1;
break;
}
@@ -762,25 +758,6 @@ static int _read_hint_file(struct cmd_context *cmd, struct dm_list *hints, int *
continue;
}
keylen = strlen("devices_file:");
if (!strncmp(_hint_line, "devices_file:", keylen)) {
const char *df_hint = _hint_line + keylen;
const char *df_config = find_config_tree_str(cmd, devices_devicesfile_CFG, NULL);
/* when a devices file is not used, hints should have devices_file:. */
if (!cmd->enable_devices_file || !df_hint || !df_config) {
if (df_hint[0] != '.') {
log_debug("ignore hints with different devices_file: not enabled vs %s", df_hint);
*needs_refresh = 1;
break;
}
} else if (strcmp(df_hint, df_config)) {
log_debug("ignore hints with different devices_file: %s vs %s", df_hint, df_config);
*needs_refresh = 1;
break;
}
continue;
}
keylen = strlen("devs_hash:");
if (!strncmp(_hint_line, "devs_hash:", keylen)) {
if (sscanf(_hint_line + keylen, "%u %u", &read_hash, &read_count) != 2) {
@@ -850,12 +827,8 @@ static int _read_hint_file(struct cmd_context *cmd, struct dm_list *hints, int *
if (!(iter = dev_iter_create(NULL, 0)))
return 0;
while ((dev = dev_iter_get(cmd, iter))) {
if (cmd->enable_devices_file && !get_du_for_dev(cmd, dev))
continue;
if (!_dev_in_hint_hash(cmd, dev))
continue;
(void) dm_strncpy(devpath, dev_name(dev), sizeof(devpath));
calc_hash = calc_crc(calc_hash, (const uint8_t *)devpath, strlen(devpath));
calc_count++;
@@ -914,7 +887,6 @@ int write_hint_file(struct cmd_context *cmd, int newhints)
struct device *dev;
const char *vgname;
char *filter_str = NULL;
const char *config_devices_file = NULL;
uint32_t hash = INITIAL_CRC;
uint32_t count = 0;
time_t t;
@@ -975,19 +947,6 @@ int write_hint_file(struct cmd_context *cmd, int newhints)
fprintf(fp, "scan_lvs:%d\n", cmd->scan_lvs);
/*
* Only associate hints with the default/system devices file.
* If no default/system devices file is used, "." is set.
* If we are using a devices file other than the config setting
* (from --devicesfile), then we should not be using hints and
* shouldn't get here.
*/
config_devices_file = find_config_tree_str(cmd, devices_devicesfile_CFG, NULL);
if (cmd->enable_devices_file && !cmd->devicesfile && config_devices_file)
fprintf(fp, "devices_file:%s\n", config_devices_file);
else
fprintf(fp, "devices_file:.\n");
/*
* iterate through all devs and write a line for each
* dev flagged DEV_SCAN_FOUND_LABEL
@@ -1005,9 +964,6 @@ int write_hint_file(struct cmd_context *cmd, int newhints)
* 2. add PVs to the hint file
*/
while ((dev = dev_iter_get(cmd, iter))) {
if (cmd->enable_devices_file && !get_du_for_dev(cmd, dev))
continue;
if (!_dev_in_hint_hash(cmd, dev)) {
if (dev->flags & DEV_SCAN_FOUND_LABEL) {
/* should never happen */
@@ -1372,7 +1328,7 @@ int get_hints(struct cmd_context *cmd, struct dm_list *hints_out, int *newhints,
}
/*
* couldn't read file for some reason, not normal, just skip using hints
* couln't read file for some reason, not normal, just skip using hints
*/
if (!_read_hint_file(cmd, &hints_list, &needs_refresh)) {
log_debug("get_hints: read fail");
@@ -1397,6 +1353,7 @@ int get_hints(struct cmd_context *cmd, struct dm_list *hints_out, int *newhints,
/* create new hints after scan */
*newhints = NEWHINTS_REFRESH;
return 0;
}
/*

View File

@@ -25,7 +25,6 @@
#include "lib/label/hints.h"
#include "lib/metadata/metadata.h"
#include "lib/format_text/layout.h"
#include "lib/device/device_id.h"
#include <sys/stat.h>
#include <fcntl.h>
@@ -806,6 +805,16 @@ static int _scan_list(struct cmd_context *cmd, struct dev_filter *f,
}
}
/*
* This will search the system's /dev for new path names and
* could help us reopen the device if it finds a new preferred
* path name for this dev's major:minor. It does that by
* inserting a new preferred path name on dev->aliases. open
* uses the first name from that list.
*/
log_debug_devs("Scanning refreshing device paths.");
dev_cache_scan();
/* Put devs that failed to open back on the original list to retry. */
dm_list_splice(devs, &reopen_devs);
goto scan_more;
@@ -927,12 +936,6 @@ static void _prepare_open_file_limit(struct cmd_context *cmd, unsigned int num_d
#endif
}
/*
* Currently the only caller is pvck which probably doesn't need
* deferred filters checked after the read... it wants to know if
* anything has the pvid, even a dev that might be filtered.
*/
int label_scan_for_pvid(struct cmd_context *cmd, char *pvid, struct device **dev_out)
{
char buf[LABEL_SIZE] __attribute__((aligned(8)));
@@ -945,20 +948,7 @@ int label_scan_for_pvid(struct cmd_context *cmd, char *pvid, struct device **dev
dm_list_init(&devs);
/*
* Creates a list of available devices, does not open or read any,
* and does not filter them.
*/
if (!setup_devices(cmd)) {
log_error("Failed to set up devices.");
return 0;
}
/*
* Iterating over all available devices with cmd->filter filters
* devices; those returned from dev_iter_get are the devs that
* pass filters, and are those we can use.
*/
dev_cache_scan();
if (!(iter = dev_iter_create(cmd->filter, 0))) {
log_error("Scanning failed to get devices.");
@@ -1032,7 +1022,6 @@ int label_scan(struct cmd_context *cmd)
struct device_list *devl, *devl2;
struct device *dev;
uint64_t max_metadata_size_bytes;
int device_ids_invalid = 0;
int using_hints;
int create_hints = 0; /* NEWHINTS_NONE */
@@ -1049,17 +1038,12 @@ int label_scan(struct cmd_context *cmd)
}
/*
* Creates a list of available devices, does not open or read any,
* and does not filter them. The list of all available devices
* is kept in "dev-cache", and comes from /dev entries or libudev.
* The list of devs found here needs to be filtered to get the
* list of devs we can use. The dev_iter calls using cmd->filter
* are what filters the devs.
* dev_cache_scan() creates a list of devices on the system
* (saved in in dev-cache) which we can iterate through to
* search for LVM devs. The dev cache list either comes from
* looking at dev nodes under /dev, or from udev.
*/
if (!setup_devices(cmd)) {
log_error("Failed to set up devices.");
return 0;
}
dev_cache_scan();
/*
* If we know that there will be md components with an end
@@ -1166,12 +1150,6 @@ int label_scan(struct cmd_context *cmd)
*/
_scan_list(cmd, cmd->filter, &scan_devs, 0, NULL);
/*
* Check if the devices_file content is up to date and
* if not update it.
*/
device_ids_validate(cmd, &device_ids_invalid, 0);
/*
* Metadata could be larger than total size of bcache, and bcache
* cannot currently be resized during the command. If this is the
@@ -1215,7 +1193,7 @@ int label_scan(struct cmd_context *cmd)
* rest of the devs.
*/
if (using_hints) {
if (device_ids_invalid || !validate_hints(cmd, &hints_list)) {
if (!validate_hints(cmd, &hints_list)) {
log_debug("Will scan %d remaining devices", dm_list_size(&all_devs));
_scan_list(cmd, cmd->filter, &all_devs, 0, NULL);
free_hints(&hints_list);
@@ -1265,11 +1243,6 @@ int label_scan(struct cmd_context *cmd)
free(devl);
}
dm_list_iterate_items_safe(devl, devl2, &filtered_devs) {
dm_list_del(&devl->list);
free(devl);
}
/*
* If hints were not available/usable, then we scanned all devs,
* and we now know which are PVs. Save this list of PVs we've
@@ -1277,7 +1250,7 @@ int label_scan(struct cmd_context *cmd)
* (create_hints variable has NEWHINTS_X value which indicates
* the reason for creating the new hints.)
*/
if (create_hints && !device_ids_invalid)
if (create_hints)
write_hint_file(cmd, create_hints);
return 1;

View File

@@ -112,6 +112,7 @@ void label_scan_invalidate(struct device *dev);
void label_scan_invalidate_lv(struct cmd_context *cmd, struct logical_volume *lv);
void label_scan_drop(struct cmd_context *cmd);
void label_scan_destroy(struct cmd_context *cmd);
void label_scan_confirm(struct device *dev);
int label_scan_setup_bcache(void);
int label_scan_open(struct device *dev);
int label_scan_open_excl(struct device *dev);

View File

@@ -247,13 +247,6 @@ static int _process_poll_init(const struct cmd_context *cmd, const char *poll_ty
goto out_req;
}
if (parms->devicesfile[0] &&
!(daemon_request_extend(req, LVMPD_PARM_DEVICESFILE " = %s",
parms->devicesfile, NULL))) {
log_error("Failed to create %s request." , poll_type);
goto out_req;
}
rep = daemon_send(_lvmpolld, req);
if (rep.error) {

View File

@@ -60,7 +60,6 @@ struct daemon_parms {
const char *progress_title;
uint64_t lv_type;
struct poll_functions *poll_fns;
char devicesfile[128];
};
int poll_daemon(struct cmd_context *cmd, unsigned background,

View File

@@ -204,7 +204,6 @@ int update_cache_pool_params(struct cmd_context *cmd,
unsigned attr,
uint32_t pool_data_extents,
uint32_t *pool_metadata_extents,
struct logical_volume *metadata_lv,
int *chunk_size_calc_method, uint32_t *chunk_size)
{
uint64_t min_meta_size;
@@ -253,26 +252,39 @@ int update_cache_pool_params(struct cmd_context *cmd,
if (!validate_cache_chunk_size(cmd, *chunk_size))
return_0;
min_meta_size = _cache_min_metadata_size((uint64_t) pool_data_extents * extent_size, *chunk_size);
/* Round up to extent size */
if (min_meta_size % extent_size)
min_meta_size += extent_size - min_meta_size % extent_size;
if (!pool_metadata_size)
pool_metadata_size = min_meta_size;
if (pool_metadata_size > (2 * DEFAULT_CACHE_POOL_MAX_METADATA_SIZE)) {
pool_metadata_size = 2 * DEFAULT_CACHE_POOL_MAX_METADATA_SIZE;
if (*pool_metadata_extents)
log_warn("WARNING: Maximum supported pool metadata size is %s.",
display_size(cmd, pool_metadata_size));
} else if (pool_metadata_size < min_meta_size) {
if (*pool_metadata_extents)
log_warn("WARNING: Minimum required pool metadata size is %s "
"(needs extra %s).",
display_size(cmd, min_meta_size),
display_size(cmd, min_meta_size - pool_metadata_size));
pool_metadata_size = min_meta_size;
}
if (!(*pool_metadata_extents =
extents_from_size(cmd, pool_metadata_size, extent_size)))
return_0;
if ((uint64_t) *chunk_size > (uint64_t) pool_data_extents * extent_size) {
log_error("Size of %s data volume cannot be smaller than chunk size %s.",
segtype->name, display_size(cmd, *chunk_size));
return 0;
}
min_meta_size = _cache_min_metadata_size((uint64_t) pool_data_extents * extent_size, *chunk_size);
min_meta_size = dm_round_up(min_meta_size, extent_size);
if (!pool_metadata_size)
pool_metadata_size = min_meta_size;
if (!update_pool_metadata_min_max(cmd, extent_size,
min_meta_size,
(2 * DEFAULT_CACHE_POOL_MAX_METADATA_SIZE),
&pool_metadata_size,
metadata_lv,
pool_metadata_extents))
return_0;
log_verbose("Preferred pool metadata size %s.",
display_size(cmd, (uint64_t)*pool_metadata_extents * extent_size));

View File

@@ -773,13 +773,9 @@ int lv_add_integrity_to_raid(struct logical_volume *lv, struct integrity_setting
bad:
log_error("Failed to add integrity.");
if (revert_meta_lvs) {
for (s = 0; s < DEFAULT_RAID_MAX_IMAGES; s++) {
if (!imeta_lvs[s])
continue;
if (!lv_remove(imeta_lvs[s]))
log_error("New integrity metadata LV may require manual removal.");
}
for (s = 0; s < revert_meta_lvs; s++) {
if (!lv_remove(imeta_lvs[s]))
log_error("New integrity metadata LV may require manual removal.");
}
if (!vg_write(vg) || !vg_commit(vg))
@@ -899,52 +895,12 @@ int lv_get_raid_integrity_settings(struct logical_volume *lv, struct integrity_s
return 0;
}
int lv_raid_integrity_total_mismatches(struct cmd_context *cmd,
const struct logical_volume *lv,
uint64_t *mismatches)
{
struct logical_volume *lv_image;
struct lv_segment *seg, *seg_image;
uint32_t s;
uint64_t mismatches_image;
uint64_t total = 0;
int errors = 0;
if (!lv_is_raid(lv))
return 0;
seg = first_seg(lv);
for (s = 0; s < seg->area_count; s++) {
lv_image = seg_lv(seg, s);
seg_image = first_seg(lv_image);
if (!seg_is_integrity(seg_image))
continue;
mismatches_image = 0;
if (!lv_integrity_mismatches(cmd, lv_image, &mismatches_image))
errors++;
total += mismatches_image;
}
*mismatches = total;
if (errors)
return 0;
return 1;
}
int lv_integrity_mismatches(struct cmd_context *cmd,
const struct logical_volume *lv,
uint64_t *mismatches)
{
struct lv_with_info_and_seg_status status;
if (lv_is_raid(lv) && lv_raid_has_integrity((struct logical_volume *)lv))
return lv_raid_integrity_total_mismatches(cmd, lv, mismatches);
if (!lv_is_integrity(lv))
return_0;

View File

@@ -1034,37 +1034,6 @@ char *lv_dmpath_dup(struct dm_pool *mem, const struct logical_volume *lv)
return repstr;
}
/* maybe factor a common function with lv_dmpath_dup */
char *lv_dmpath_suffix_dup(struct dm_pool *mem, const struct logical_volume *lv,
const char *suffix)
{
char *name;
char *repstr;
size_t len;
if (!*lv->vg->name)
return dm_pool_strdup(mem, "");
if (!(name = dm_build_dm_name(mem, lv->vg->name, lv->name, NULL))) {
log_error("dm_build_dm_name failed");
return NULL;
}
len = strlen(dm_dir()) + strlen(name) + strlen(suffix) + 2;
if (!(repstr = dm_pool_zalloc(mem, len))) {
log_error("dm_pool_alloc failed");
return NULL;
}
if (dm_snprintf(repstr, len, "%s/%s%s", dm_dir(), name, suffix) < 0) {
log_error("lv_dmpath snprintf failed");
return NULL;
}
return repstr;
}
char *lv_uuid_dup(struct dm_pool *mem, const struct logical_volume *lv)
{
return id_format_and_copy(mem ? mem : lv->vg->vgmem, &lv->lvid.id[1]);

View File

@@ -194,9 +194,6 @@ char *lv_lock_args_dup(struct dm_pool *mem, const struct logical_volume *lv);
char *lvseg_kernel_discards_dup_with_info_and_seg_status(struct dm_pool *mem, const struct lv_with_info_and_seg_status *lvdm);
char *lv_time_dup(struct dm_pool *mem, const struct logical_volume *lv, int iso_mode);
char *lv_dmpath_suffix_dup(struct dm_pool *mem, const struct logical_volume *lv,
const char *suffix);
typedef enum {
PERCENT_GET_DATA = 0,
PERCENT_GET_METADATA,

View File

@@ -1470,8 +1470,6 @@ static int _lv_reduce(struct logical_volume *lv, uint32_t extents, int delete)
struct logical_volume *external_lv = NULL;
int is_raid10 = 0;
uint32_t data_copies = 0;
struct lv_list *lvl;
int is_last_pool = lv_is_pool(lv);
if (!dm_list_empty(&lv->segments)) {
seg = first_seg(lv);
@@ -1583,28 +1581,6 @@ static int _lv_reduce(struct logical_volume *lv, uint32_t extents, int delete)
!lv_update_and_reload(external_lv))
return_0;
/* When removing last pool, automatically drop the spare volume */
if (is_last_pool && lv->vg->pool_metadata_spare_lv) {
/* TODO: maybe use a list of pools or a counter to avoid linear search through VG */
dm_list_iterate_items(lvl, &lv->vg->lvs)
if (lv_is_thin_type(lvl->lv) ||
lv_is_cache_type(lvl->lv)) {
is_last_pool = 0;
break;
}
if (is_last_pool) {
/* This is purely internal LV volume, no question */
if (!deactivate_lv(lv->vg->cmd, lv->vg->pool_metadata_spare_lv)) {
log_error("Unable to deactivate spare logical volume %s.",
display_lvname(lv->vg->pool_metadata_spare_lv));
return 0;
}
if (!lv_remove(lv->vg->pool_metadata_spare_lv))
return_0;
}
}
return 1;
}
@@ -1874,13 +1850,11 @@ static uint32_t _mirror_log_extents(uint32_t region_size, uint32_t pe_size, uint
/* Is there enough total space or should we give up immediately? */
static int _sufficient_pes_free(struct alloc_handle *ah, struct dm_list *pvms,
uint32_t allocated, uint32_t log_still_needed,
uint32_t extents_still_needed)
uint32_t allocated, uint32_t extents_still_needed)
{
uint32_t area_extents_needed = (extents_still_needed - allocated) * ah->area_count / ah->area_multiple;
uint32_t parity_extents_needed = (extents_still_needed - allocated) * ah->parity_count / ah->area_multiple;
uint32_t metadata_extents_needed = (ah->alloc_and_split_meta ? 0 : ah->metadata_area_count * RAID_METADATA_AREA_LEN) +
(log_still_needed ? ah->log_len : 0); /* One each */
uint32_t metadata_extents_needed = ah->alloc_and_split_meta ? 0 : ah->metadata_area_count * RAID_METADATA_AREA_LEN + ah->log_len; /* One each */
uint64_t total_extents_needed = (uint64_t)area_extents_needed + parity_extents_needed + metadata_extents_needed;
uint32_t free_pes = pv_maps_size(pvms);
@@ -2109,18 +2083,15 @@ static int _alloc_parallel_area(struct alloc_handle *ah, uint32_t max_to_allocat
aa[smeta].pv = pva->map->pv;
aa[smeta].pe = pva->start;
aa[smeta].len = ah->log_len;
if (aa[smeta].len > pva->count) {
log_error("Metadata does not fit on a single PV.");
return 0;
}
log_debug_alloc("Allocating parallel metadata area %" PRIu32
" on %s start PE %" PRIu32
" length %" PRIu32 ".",
(smeta - (ah->area_count + ah->parity_count)),
pv_dev_name(aa[smeta].pv), aa[smeta].pe,
aa[smeta].len);
ah->log_len);
consume_pv_area(pva, aa[smeta].len);
consume_pv_area(pva, ah->log_len);
dm_list_add(&ah->alloced_areas[smeta], &aa[smeta].list);
}
aa[s].len = (ah->alloc_and_split_meta && !ah->split_metadata_is_allocated) ? len - ah->log_len : len;
@@ -3388,9 +3359,7 @@ static int _allocate(struct alloc_handle *ah,
old_allocated = alloc_state.allocated;
log_debug_alloc("Trying allocation using %s policy.", get_alloc_string(alloc));
if (!ah->approx_alloc && !_sufficient_pes_free(ah, pvms, alloc_state.allocated,
alloc_state.log_area_count_still_needed,
ah->new_extents))
if (!ah->approx_alloc && !_sufficient_pes_free(ah, pvms, alloc_state.allocated, ah->new_extents))
goto_out;
_init_alloc_parms(ah, &alloc_parms, alloc, prev_lvseg,
@@ -4741,8 +4710,6 @@ int lv_rename_update(struct cmd_context *cmd, struct logical_volume *lv,
struct lv_names lv_names = { .old = lv->name };
int old_lv_is_historical = lv_is_historical(lv);
int historical;
unsigned attrs;
const struct segment_type *segtype;
/*
* rename is not allowed on sub LVs except for pools
@@ -4768,15 +4735,9 @@ int lv_rename_update(struct cmd_context *cmd, struct logical_volume *lv,
}
if (lv_is_vdo_pool(lv) && lv_is_active(lv_lock_holder(lv))) {
segtype = first_seg(lv)->segtype;
if (!segtype->ops->target_present ||
!segtype->ops->target_present(lv->vg->cmd, NULL, &attrs) ||
!(attrs & VDO_FEATURE_ONLINE_RENAME)) {
log_error("Cannot rename active VDOPOOL volume %s, "
"VDO target feature support is missing.",
display_lvname(lv));
return 0;
}
log_error("Cannot rename active VDOPOOL volume %s.",
display_lvname(lv));
return 0;
}
if (update_mda && !archive(vg))
@@ -5419,8 +5380,6 @@ static int _lvresize_adjust_extents(struct logical_volume *lv,
uint32_t existing_extents;
uint32_t seg_size = 0;
uint32_t new_extents;
uint64_t max_metadata_size;
thin_crop_metadata_t crop;
int reducing = 0;
seg_last = last_seg(lv);
@@ -5581,33 +5540,6 @@ static int _lvresize_adjust_extents(struct logical_volume *lv,
return 1;
}
}
} else if (lv_is_thin_pool_metadata(lv)) {
if (!(seg = get_only_segment_using_this_lv(lv)))
return_0;
max_metadata_size = get_thin_pool_max_metadata_size(cmd, vg->profile, &crop);
if (((uint64_t)lp->extents * vg->extent_size) > max_metadata_size) {
lp->extents = (max_metadata_size + vg->extent_size - 1) / vg->extent_size;
log_print_unless_silent("Reached maximum pool metadata size %s (%" PRIu32 " extents).",
display_size(vg->cmd, max_metadata_size), lp->extents);
}
if (existing_logical_extents >= lp->extents)
lp->extents = existing_logical_extents;
crop = get_thin_pool_crop_metadata(cmd, crop, (uint64_t)lp->extents * vg->extent_size);
if (seg->crop_metadata != crop) {
seg->crop_metadata = crop;
seg->lv->status |= LV_CROP_METADATA;
/* Crop change require reload even if there no size change */
lp->size_changed = 1;
log_print_unless_silent("Thin pool will use metadata without cropping.");
}
if (!(seg_size = lp->extents - existing_logical_extents))
return 1; /* No change in metadata size */
}
} else { /* If reducing, find stripes, stripesize & size of last segment */
if (lp->stripes || lp->stripe_size || lp->mirrors)
@@ -6497,8 +6429,10 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv,
struct logical_volume *lock_lv = lv;
struct lv_segment *cache_seg = NULL;
int ask_discard;
struct lv_list *lvl;
struct seg_list *sl;
struct lv_segment *seg = first_seg(lv);
int is_last_pool = lv_is_pool(lv);
vg = lv->vg;
@@ -6620,6 +6554,9 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv,
*/
struct logical_volume *cachevol_lv = first_seg(lv)->pool_lv;
if (lv_is_cache_pool(cachevol_lv))
is_last_pool = 1;
if (!archive(vg))
return_0;
@@ -6726,6 +6663,25 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv,
return 0;
}
if (is_last_pool && vg->pool_metadata_spare_lv) {
/* When removed last pool, also remove the spare */
dm_list_iterate_items(lvl, &vg->lvs)
if (lv_is_pool_metadata(lvl->lv)) {
is_last_pool = 0;
break;
}
if (is_last_pool) {
/* This is purely internal LV volume, no question */
if (!deactivate_lv(cmd, vg->pool_metadata_spare_lv)) {
log_error("Unable to deactivate spare logical volume %s.",
display_lvname(vg->pool_metadata_spare_lv));
return 0;
}
if (!lv_remove(vg->pool_metadata_spare_lv))
return_0;
}
}
/* store it on disks */
if (!vg_write(vg) || !vg_commit(vg))
return_0;
@@ -7785,10 +7741,10 @@ retry_with_dev_set:
#endif
if (!dev_set_bytes(dev, UINT64_C(0), (size_t) zero_sectors << SECTOR_SHIFT, wp.zero_value)) {
sigint_restore();
log_error("%s logical volume %s with value %d and size %s.",
log_error("%s %s of logical volume %s with value %d.",
sigint_caught() ? "Interrupted initialization" : "Failed to initialize",
display_lvname(lv), wp.zero_value,
display_size(lv->vg->cmd, zero_sectors));
display_size(lv->vg->cmd, zero_sectors),
display_lvname(lv), wp.zero_value);
return 0;
}
}
@@ -7987,12 +7943,6 @@ static int _should_wipe_lv(struct lvcreate_params *lp,
first_seg(first_seg(lv)->pool_lv)->zero_new_blocks))
return 0;
if (warn && (lv_passes_readonly_filter(lv))) {
log_warn("WARNING: Read-only activated logical volume %s not zeroed.",
display_lvname(lv));
return 0;
}
/* Cannot zero read-only volume */
if ((lv->status & LVM_WRITE) &&
(lp->zero || lp->wipe_signatures))
@@ -8434,8 +8384,6 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
first_seg(lv)->chunk_size = lp->chunk_size;
first_seg(lv)->zero_new_blocks = lp->zero_new_blocks;
first_seg(lv)->discards = lp->discards;
if ((first_seg(lv)->crop_metadata = lp->crop_metadata) == THIN_CROP_METADATA_NO)
lv->status |= LV_CROP_METADATA;
if (!recalculate_pool_chunk_size_with_dev_hints(lv, lp->thin_chunk_size_calc_policy)) {
stack;
goto revert_new_lv;

View File

@@ -495,8 +495,6 @@ static void _check_lv_segment(struct logical_volume *lv, struct lv_segment *seg,
seg_error("sets discards");
if (!dm_list_empty(&seg->thin_messages))
seg_error("sets thin_messages list");
if (seg->lv->status & LV_CROP_METADATA)
seg_error("sets CROP_METADATA flag");
}
if (seg_is_thin_volume(seg)) {

View File

@@ -26,7 +26,6 @@
#include "lib/metadata/vg.h"
#include "lib/metadata/lv.h"
#include "lib/misc/lvm-percent.h"
#include <stdbool.h>
#define MAX_STRIPES 128U
#define SECTOR_SHIFT 9L
@@ -144,7 +143,6 @@
#define LV_REMOVE_AFTER_RESHAPE UINT64_C(0x0400000000000000) /* LV needs to be removed after a shrinking reshape */
#define LV_METADATA_FORMAT UINT64_C(0x0800000000000000) /* LV has segments with metadata format */
#define LV_CROP_METADATA UINT64_C(0x0000000000000400) /* LV - also VG CLUSTERED */
#define LV_RESHAPE UINT64_C(0x1000000000000000) /* Ongoing reshape (number of stripes, stripesize or raid algorithm change):
used as SEGTYPE_FLAG to prevent activation on old runtime */
@@ -327,12 +325,6 @@ typedef enum {
THIN_DISCARDS_PASSDOWN,
} thin_discards_t;
typedef enum {
THIN_CROP_METADATA_UNSELECTED = 0, /* 'auto' selects */
THIN_CROP_METADATA_NO,
THIN_CROP_METADATA_YES,
} thin_crop_metadata_t;
typedef enum {
CACHE_MODE_UNSELECTED = 0,
CACHE_MODE_WRITETHROUGH,
@@ -510,7 +502,6 @@ struct lv_segment {
uint64_t transaction_id; /* For thin_pool, thin */
thin_zero_t zero_new_blocks; /* For thin_pool */
thin_discards_t discards; /* For thin_pool */
thin_crop_metadata_t crop_metadata; /* For thin_pool */
struct dm_list thin_messages; /* For thin_pool */
struct logical_volume *external_lv; /* For thin */
struct logical_volume *pool_lv; /* For thin, cache */
@@ -894,8 +885,6 @@ int update_thin_pool_params(struct cmd_context *cmd,
unsigned attr,
uint32_t pool_data_extents,
uint32_t *pool_metadata_extents,
struct logical_volume *metadata_lv,
unsigned *crop_metadata,
int *chunk_size_calc_method, uint32_t *chunk_size,
thin_discards_t *discards, thin_zero_t *zero_new_blocks);
@@ -1022,7 +1011,6 @@ struct lvcreate_params {
uint64_t permission; /* all */
unsigned error_when_full; /* when segment supports it */
thin_crop_metadata_t crop_metadata;
uint32_t read_ahead; /* all */
int approx_alloc; /* all */
alloc_policy_t alloc; /* all */
@@ -1320,7 +1308,6 @@ int update_cache_pool_params(struct cmd_context *cmd,
unsigned attr,
uint32_t pool_data_extents,
uint32_t *pool_metadata_extents,
struct logical_volume *metadata_lv,
int *chunk_size_calc_method, uint32_t *chunk_size);
int validate_lv_cache_chunk_size(struct logical_volume *pool_lv, uint32_t chunk_size);
int validate_lv_cache_create_pool(const struct logical_volume *pool_lv);
@@ -1446,6 +1433,5 @@ int lv_extend_integrity_in_raid(struct logical_volume *lv, struct dm_list *pvh);
int lv_get_raid_integrity_settings(struct logical_volume *lv, struct integrity_settings **isettings);
int integrity_mode_set(const char *mode, struct integrity_settings *settings);
int lv_integrity_mismatches(struct cmd_context *cmd, const struct logical_volume *lv, uint64_t *mismatches);
int lv_raid_integrity_total_mismatches(struct cmd_context *cmd, const struct logical_volume *lv, uint64_t *mismatches);
#endif

View File

@@ -459,33 +459,14 @@ int move_pv(struct volume_group *vg_from, struct volume_group *vg_to,
return _move_pv(vg_from, vg_to, pv_name, 1);
}
struct vg_from_to {
struct volume_group *from;
struct volume_group *to;
};
static int _move_pvs_used_by_lv_cb(struct logical_volume *lv, void *data)
{
struct vg_from_to *v = (struct vg_from_to*) data;
struct lv_segment *lvseg;
unsigned s;
dm_list_iterate_items(lvseg, &lv->segments)
for (s = 0; s < lvseg->area_count; s++)
if (seg_type(lvseg, s) == AREA_PV)
if (!_move_pv(v->from, v->to,
pv_dev_name(seg_pv(lvseg, s)), 0))
return_0;
return 1;
}
int move_pvs_used_by_lv(struct volume_group *vg_from,
struct volume_group *vg_to,
const char *lv_name)
{
struct vg_from_to data = { .from = vg_from, .to = vg_to };
struct lv_segment *lvseg;
unsigned s;
struct lv_list *lvl;
struct logical_volume *lv;
/* FIXME: handle tags */
if (!(lvl = find_lv_in_vg(vg_from, lv_name))) {
@@ -494,22 +475,28 @@ int move_pvs_used_by_lv(struct volume_group *vg_from,
return 0;
}
if (vg_bad_status_bits(vg_from, RESIZEABLE_VG)) {
log_error("Cannot move PV(s) from non resize volume group %s.", vg_from->name);
if (vg_bad_status_bits(vg_from, RESIZEABLE_VG) ||
vg_bad_status_bits(vg_to, RESIZEABLE_VG))
return 0;
dm_list_iterate_items(lvseg, &lvl->lv->segments) {
if (lvseg->log_lv)
if (!move_pvs_used_by_lv(vg_from, vg_to,
lvseg->log_lv->name))
return_0;
for (s = 0; s < lvseg->area_count; s++) {
if (seg_type(lvseg, s) == AREA_PV) {
if (!_move_pv(vg_from, vg_to,
pv_dev_name(seg_pv(lvseg, s)), 0))
return_0;
} else if (seg_type(lvseg, s) == AREA_LV) {
lv = seg_lv(lvseg, s);
if (!move_pvs_used_by_lv(vg_from, vg_to,
lv->name))
return_0;
}
}
}
if (vg_bad_status_bits(vg_to, RESIZEABLE_VG)) {
log_error("Cannot move PV(s) to non resize volume group %s.", vg_to->name);
return 0;
}
if (!for_each_sub_lv(lvl->lv, _move_pvs_used_by_lv_cb, &data))
return_0;
if (!_move_pvs_used_by_lv_cb(lvl->lv, &data))
return_0;
return 1;
}
@@ -1929,10 +1916,6 @@ static int _lv_each_dependency(struct logical_volume *lv,
return_0;
if (lvseg->metadata_lv && !fn(lvseg->metadata_lv, data))
return_0;
if (lvseg->writecache && !fn(lvseg->writecache, data))
return_0;
if (lvseg->integrity_meta_dev && !fn(lvseg->integrity_meta_dev, data))
return_0;
for (s = 0; s < lvseg->area_count; ++s) {
if (seg_type(lvseg, s) == AREA_LV && !fn(seg_lv(lvseg,s), data))
return_0;

View File

@@ -512,21 +512,8 @@ int pool_below_threshold(const struct lv_segment *pool_seg);
int pool_check_overprovisioning(const struct logical_volume *lv);
int create_pool(struct logical_volume *pool_lv, const struct segment_type *segtype,
struct alloc_handle *ah, uint32_t stripes, uint32_t stripe_size);
uint64_t get_thin_pool_max_metadata_size(struct cmd_context *cmd, struct profile *profile,
thin_crop_metadata_t *crop);
thin_crop_metadata_t get_thin_pool_crop_metadata(struct cmd_context *cmd,
thin_crop_metadata_t crop,
uint64_t metadata_size);
uint64_t estimate_thin_pool_metadata_size(uint32_t data_extents, uint32_t extent_size, uint32_t chunk_size);
int update_pool_metadata_min_max(struct cmd_context *cmd,
uint32_t extent_size,
uint64_t min_metadata_size, /* required min */
uint64_t max_metadata_size, /* writable max */
uint64_t *metadata_size, /* current calculated */
struct logical_volume *metadata_lv, /* name of converted LV or NULL */
uint32_t *metadata_extents); /* resulting extent count */
/*
* Begin skeleton for external LVM library
*/

View File

@@ -697,8 +697,6 @@ static struct logical_volume *_alloc_pool_metadata_spare(struct volume_group *vg
int handle_pool_metadata_spare(struct volume_group *vg, uint32_t extents,
struct dm_list *pvh, int poolmetadataspare)
{
/* Max usable size of any spare volume is currently 16GiB rouned to extent size */
const uint64_t MAX_SIZE = (UINT64_C(2 * 16) * 1024 * 1024 + vg->extent_size - 1) / vg->extent_size;
struct logical_volume *lv = vg->pool_metadata_spare_lv;
uint32_t seg_mirrors;
struct lv_segment *seg;
@@ -708,11 +706,8 @@ int handle_pool_metadata_spare(struct volume_group *vg, uint32_t extents,
/* Find maximal size of metadata LV */
dm_list_iterate_items(lvl, &vg->lvs)
if (lv_is_pool_metadata(lvl->lv) &&
(lvl->lv->le_count > extents)) {
(lvl->lv->le_count > extents))
extents = lvl->lv->le_count;
if (extents >= MAX_SIZE)
break;
}
if (!poolmetadataspare) {
/* TODO: Not showing when lvm.conf would define 'n' ? */
@@ -723,9 +718,6 @@ int handle_pool_metadata_spare(struct volume_group *vg, uint32_t extents,
return 1;
}
if (extents > MAX_SIZE)
extents = MAX_SIZE;
if (!lv) {
if (!_alloc_pool_metadata_spare(vg, extents, pvh))
return_0;
@@ -750,52 +742,6 @@ int handle_pool_metadata_spare(struct volume_group *vg, uint32_t extents,
return 1;
}
int update_pool_metadata_min_max(struct cmd_context *cmd,
uint32_t extent_size,
uint64_t min_metadata_size, /* required min */
uint64_t max_metadata_size, /* writable max */
uint64_t *metadata_size, /* current calculated */
struct logical_volume *metadata_lv, /* name of converted LV or NULL */
uint32_t *metadata_extents) /* resulting extent count */
{
max_metadata_size = dm_round_up(max_metadata_size, extent_size);
min_metadata_size = dm_round_up(min_metadata_size, extent_size);
if (*metadata_size > max_metadata_size) {
if (metadata_lv) {
log_print_unless_silent("Size %s of pool metadata volume %s is bigger then maximum usable size %s.",
display_size(cmd, *metadata_size),
display_lvname(metadata_lv),
display_size(cmd, max_metadata_size));
} else {
if (*metadata_extents)
log_print_unless_silent("Reducing pool metadata size %s to maximum usable size %s.",
display_size(cmd, *metadata_size),
display_size(cmd, max_metadata_size));
*metadata_size = max_metadata_size;
}
} else if (*metadata_size < min_metadata_size) {
if (metadata_lv) {
log_error("Can't use volume %s with size %s as pool metadata. Minimal required size is %s.",
display_lvname(metadata_lv),
display_size(cmd, *metadata_size),
display_size(cmd, min_metadata_size));
return 0;
} else {
if (*metadata_extents)
log_print_unless_silent("Extending pool metadata size %s to required minimal size %s.",
display_size(cmd, *metadata_size),
display_size(cmd, min_metadata_size));
*metadata_size = min_metadata_size;
}
}
if (!(*metadata_extents = extents_from_size(cmd, *metadata_size, extent_size)))
return_0;
return 1;
}
int vg_set_pool_metadata_spare(struct logical_volume *lv)
{
char new_name[NAME_LEN];

View File

@@ -52,20 +52,6 @@ char *pv_tags_dup(const struct physical_volume *pv)
return tags_format_and_copy(pv->vg->vgmem, &pv->tags);
}
char *pv_deviceid_dup(struct dm_pool *mem, const struct physical_volume *pv)
{
if (!pv->device_id)
return NULL;
return dm_pool_strdup(mem, pv->device_id);
}
char *pv_deviceidtype_dup(struct dm_pool *mem, const struct physical_volume *pv)
{
if (!pv->device_id_type)
return NULL;
return dm_pool_strdup(mem, pv->device_id_type);
}
const struct format_type *pv_format_type(const struct physical_volume *pv)
{
return pv_field(pv, fmt);

View File

@@ -27,8 +27,6 @@ struct physical_volume {
struct id old_id; /* Set during pvchange -u. */
struct device *dev;
const char *device_hint; /* primary name last time metadata was written */
const char *device_id;
const char *device_id_type;
const struct format_type *fmt;
struct format_instance *fid;
@@ -79,8 +77,6 @@ char *pv_attr_dup(struct dm_pool *mem, const struct physical_volume *pv);
const char *pv_dev_name(const struct physical_volume *pv);
char *pv_uuid_dup(struct dm_pool *mem, const struct physical_volume *pv);
char *pv_tags_dup(const struct physical_volume *pv);
char *pv_deviceid_dup(struct dm_pool *mem, const struct physical_volume *pv);
char *pv_deviceidtype_dup(struct dm_pool *mem, const struct physical_volume *pv);
uint64_t pv_size(const struct physical_volume *pv);
uint64_t pv_size_field(const struct physical_volume *pv);
uint64_t pv_dev_size(const struct physical_volume *pv);

View File

@@ -326,7 +326,6 @@ struct segment_type *init_unknown_segtype(struct cmd_context *cmd,
#ifdef RAID_INTERNAL
int init_raid_segtypes(struct cmd_context *cmd, struct segtype_library *seglib);
bool raid_is_available(const struct logical_volume *lv);
#endif
#define THIN_FEATURE_DISCARDS (1U << 0)
@@ -350,8 +349,6 @@ int init_cache_segtypes(struct cmd_context *cmd, struct segtype_library *seglib)
int init_vdo_segtypes(struct cmd_context *cmd, struct segtype_library *seglib);
#endif
#define VDO_FEATURE_ONLINE_RENAME (1U << 0) /* version 6.2.3 */
int init_writecache_segtypes(struct cmd_context *cmd, struct segtype_library *seglib);
int init_integrity_segtypes(struct cmd_context *cmd, struct segtype_library *seglib);

View File

@@ -610,9 +610,9 @@ static uint64_t _estimate_metadata_size(uint32_t data_extents, uint32_t extent_s
}
/* Estimate maximal supportable thin pool data size for given chunk_size */
static uint64_t _estimate_max_data_size(uint64_t max_metadata_size, uint32_t chunk_size)
static uint64_t _estimate_max_data_size(uint32_t chunk_size)
{
return max_metadata_size * chunk_size * SECTOR_SIZE / UINT64_C(64);
return chunk_size * (DEFAULT_THIN_POOL_MAX_METADATA_SIZE * 2) * SECTOR_SIZE / UINT64_C(64);
}
/* Estimate thin pool chunk size from data and metadata size (in sector units) */
@@ -662,38 +662,6 @@ int get_default_allocation_thin_pool_chunk_size(struct cmd_context *cmd, struct
return 1;
}
/* Return max supported metadata size with selected cropping */
uint64_t get_thin_pool_max_metadata_size(struct cmd_context *cmd, struct profile *profile,
thin_crop_metadata_t *crop)
{
*crop = find_config_tree_bool(cmd, allocation_thin_pool_crop_metadata_CFG, profile) ?
THIN_CROP_METADATA_YES : THIN_CROP_METADATA_NO;
return (*crop == THIN_CROP_METADATA_NO) ?
(2 * DEFAULT_THIN_POOL_MAX_METADATA_SIZE_V1_KB) : (2 * DEFAULT_THIN_POOL_MAX_METADATA_SIZE);
}
/*
* With existing crop method, check if the metadata_size would need cropping.
* If not, set UNSELECTED, otherwise print some verbose info about selected cropping
*/
thin_crop_metadata_t get_thin_pool_crop_metadata(struct cmd_context *cmd,
thin_crop_metadata_t crop,
uint64_t metadata_size)
{
const uint64_t crop_size = (2 * DEFAULT_THIN_POOL_MAX_METADATA_SIZE);
if (metadata_size > crop_size) {
if (crop == THIN_CROP_METADATA_NO)
log_verbose("Using metadata size without cropping.");
else
log_verbose("Cropping metadata size to %s.", display_size(cmd, crop_size));
} else
crop = THIN_CROP_METADATA_UNSELECTED;
return crop;
}
int update_thin_pool_params(struct cmd_context *cmd,
struct profile *profile,
uint32_t extent_size,
@@ -701,13 +669,10 @@ int update_thin_pool_params(struct cmd_context *cmd,
unsigned attr,
uint32_t pool_data_extents,
uint32_t *pool_metadata_extents,
struct logical_volume *metadata_lv,
thin_crop_metadata_t *crop_metadata,
int *chunk_size_calc_method, uint32_t *chunk_size,
thin_discards_t *discards, thin_zero_t *zero_new_blocks)
{
uint64_t pool_metadata_size;
uint64_t max_metadata_size;
uint64_t pool_metadata_size = (uint64_t) *pool_metadata_extents * extent_size;
uint32_t estimate_chunk_size;
uint64_t max_pool_data_size;
const char *str;
@@ -737,9 +702,7 @@ int update_thin_pool_params(struct cmd_context *cmd,
*zero_new_blocks = find_config_tree_bool(cmd, allocation_thin_pool_zero_CFG, profile)
? THIN_ZERO_YES : THIN_ZERO_NO;
max_metadata_size = get_thin_pool_max_metadata_size(cmd, profile, crop_metadata);
if (!*pool_metadata_extents) {
if (!pool_metadata_size) {
if (!*chunk_size) {
if (!get_default_allocation_thin_pool_chunk_size(cmd, profile,
chunk_size,
@@ -760,20 +723,20 @@ int update_thin_pool_params(struct cmd_context *cmd,
} else {
pool_metadata_size = _estimate_metadata_size(pool_data_extents, extent_size, *chunk_size);
if (pool_metadata_size > max_metadata_size) {
if (pool_metadata_size > (DEFAULT_THIN_POOL_MAX_METADATA_SIZE * 2)) {
/* Suggest bigger chunk size */
estimate_chunk_size =
_estimate_chunk_size(pool_data_extents, extent_size,
max_metadata_size, attr);
(DEFAULT_THIN_POOL_MAX_METADATA_SIZE * 2), attr);
log_warn("WARNING: Chunk size is too small for pool, suggested minimum is %s.",
display_size(cmd, estimate_chunk_size));
}
}
/* Round up to extent size silently */
pool_metadata_size = dm_round_up(pool_metadata_size, extent_size);
if (pool_metadata_size % extent_size)
pool_metadata_size += extent_size - pool_metadata_size % extent_size;
} else {
pool_metadata_size = (uint64_t) *pool_metadata_extents * extent_size;
estimate_chunk_size = _estimate_chunk_size(pool_data_extents, extent_size,
pool_metadata_size, attr);
@@ -788,19 +751,7 @@ int update_thin_pool_params(struct cmd_context *cmd,
}
}
/* Use not rounded max for data size */
max_pool_data_size = _estimate_max_data_size(max_metadata_size, *chunk_size);
if (!update_pool_metadata_min_max(cmd, extent_size,
2 * DEFAULT_THIN_POOL_MIN_METADATA_SIZE,
max_metadata_size,
&pool_metadata_size,
metadata_lv,
pool_metadata_extents))
return_0;
*crop_metadata = get_thin_pool_crop_metadata(cmd, *crop_metadata, pool_metadata_size);
max_pool_data_size = _estimate_max_data_size(*chunk_size);
if ((max_pool_data_size / extent_size) < pool_data_extents) {
log_error("Selected chunk size %s cannot address more then %s of thin pool data space.",
display_size(cmd, *chunk_size), display_size(cmd, max_pool_data_size));
@@ -813,6 +764,22 @@ int update_thin_pool_params(struct cmd_context *cmd,
if (!validate_thin_pool_chunk_size(cmd, *chunk_size))
return_0;
if (pool_metadata_size > (2 * DEFAULT_THIN_POOL_MAX_METADATA_SIZE)) {
pool_metadata_size = 2 * DEFAULT_THIN_POOL_MAX_METADATA_SIZE;
if (*pool_metadata_extents)
log_warn("WARNING: Maximum supported pool metadata size is %s.",
display_size(cmd, pool_metadata_size));
} else if (pool_metadata_size < (2 * DEFAULT_THIN_POOL_MIN_METADATA_SIZE)) {
pool_metadata_size = 2 * DEFAULT_THIN_POOL_MIN_METADATA_SIZE;
if (*pool_metadata_extents)
log_warn("WARNING: Minimum supported pool metadata size is %s.",
display_size(cmd, pool_metadata_size));
}
if (!(*pool_metadata_extents =
extents_from_size(cmd, pool_metadata_size, extent_size)))
return_0;
if ((uint64_t) *chunk_size > (uint64_t) pool_data_extents * extent_size) {
log_error("Size of %s data volume cannot be smaller than chunk size %s.",
segtype->name, display_size(cmd, *chunk_size));
@@ -991,5 +958,12 @@ int validate_thin_pool_chunk_size(struct cmd_context *cmd, uint32_t chunk_size)
uint64_t estimate_thin_pool_metadata_size(uint32_t data_extents, uint32_t extent_size, uint32_t chunk_size)
{
return _estimate_metadata_size(data_extents, extent_size, chunk_size);
uint64_t sz = _estimate_metadata_size(data_extents, extent_size, chunk_size);
if (sz > (2 * DEFAULT_THIN_POOL_MAX_METADATA_SIZE))
sz = 2 * DEFAULT_THIN_POOL_MAX_METADATA_SIZE;
else if (sz < (2 * DEFAULT_THIN_POOL_MIN_METADATA_SIZE))
sz = 2 * DEFAULT_THIN_POOL_MIN_METADATA_SIZE;
return sz;
}

View File

@@ -75,7 +75,7 @@ static int _get_writecache_kernel_status(struct cmd_context *cmd,
return 0;
}
if (!lv_info_with_seg_status(cmd, first_seg(lv), &status, 0, 0)) {
if (!lv_info_with_seg_status(cmd, first_seg(lv), &status, 1, 1)) {
log_error("Failed to get device mapper status for %s", display_lvname(lv));
goto fail;
}
@@ -434,12 +434,8 @@ int lv_writecache_set_cleaner(struct logical_volume *lv)
seg->writecache_settings.cleaner_set = 1;
if (lv_is_active(lv)) {
if (!vg_write(lv->vg) || !vg_commit(lv->vg)) {
log_error("Failed to update VG.");
return 0;
}
if (!lv_writecache_message(lv, "cleaner")) {
log_error("Failed to set writecache cleaner for %s.", display_lvname(lv));
if (!lv_update_and_reload(lv)) {
log_error("Failed to update VG and reload LV.");
return 0;
}
} else {

View File

@@ -469,63 +469,6 @@ static int _check_feature(const struct raid_feature *feature, uint32_t maj, uint
(maj == feature->maj && min == feature->min && patchlevel >= feature->patchlevel);
}
/* Check availability of raid10 taking data copies into consideration. */
static bool _raid10_is_available(const struct logical_volume *lv)
{
uint32_t i, rebuilds_per_group = 0, s;
const uint32_t copies = 2; /* FIXME: we only support 2-way mirrors (i.e. 2 data copies) in RAID10 for now. */
struct lv_segment *seg = first_seg(lv); /* We only have one segment in RaidLVs for now. */
for (i = 0; i < seg->area_count * copies; ++i) {
s = i % seg->area_count;
if (!(i % copies))
rebuilds_per_group = 0;
if (seg_type(seg, s) == AREA_LV &&
(lv_is_partial(seg_lv(seg, s)) ||
lv_is_virtual(seg_lv(seg, s))))
rebuilds_per_group++;
if (rebuilds_per_group >= copies)
return false;
}
return true;
}
/*
* Return true in case RaidLV with specific RAID level is available.
*
* - raid0: all legs have to be live
* - raid1 : minimum of 1 leg live
* - raid4/5: maximum of 1 leg unavailable
* - raid6: maximum of 2 legs unavailable
* - raid10: minimum of 1 leg per mirror group available
*
*/
bool raid_is_available(const struct logical_volume *lv)
{
uint32_t s, missing_legs = 0;
struct lv_segment *seg = first_seg(lv); /* We only have one segment in RaidLVs for now. */
/* Be cautious about bogus calls. */
if (!seg || !seg_is_raid(seg))
return false;
if (seg_is_any_raid10(seg))
return _raid10_is_available(lv);
/* Count missing RAID legs */
for (s = 0; s < seg->area_count; ++s)
if (seg_type(seg, s) == AREA_LV &&
lv_is_partial(seg_lv(seg, s)))
missing_legs++;
/* Degradation: segtype raid1 may miss legs-1, raid0/4/5/6 may loose parity devices. */
return missing_legs <= (seg_is_raid1(seg) ? seg->area_count - 1 : seg->segtype->parity_devs);
}
static int _raid_target_present(struct cmd_context *cmd,
const struct lv_segment *seg __attribute__((unused)),
unsigned *attributes)

View File

@@ -206,8 +206,6 @@ FIELD(PVS, pv, SIZ, "BA Start", ba_start, 0, size64, pv_ba_start, "Offset to the
FIELD(PVS, pv, SIZ, "BA Size", ba_size, 0, size64, pv_ba_size, "Size of PV Bootloader Area in current units.", 0)
FIELD(PVS, pv, BIN, "PInUse", id, 0, pvinuse, pv_in_use, "Set if PV is used.", 0)
FIELD(PVS, pv, BIN, "Duplicate", id, 0, pvduplicate, pv_duplicate, "Set if PV is an unchosen duplicate.", 0)
FIELD(PVS, pv, STR, "DeviceID", id, 0, pvdeviceid, pv_device_id, "Device ID such as the WWID.", 0)
FIELD(PVS, pv, STR, "DeviceIDType", id, 0, pvdeviceidtype, pv_device_id_type, "Type of device ID such as WWID.", 0)
/*
* End of PVS type fields
*/

View File

@@ -238,10 +238,6 @@ GET_PV_NUM_PROPERTY_FN(pv_ba_start, SECTOR_SIZE * pv->ba_start)
#define _pv_ba_start_set prop_not_implemented_set
GET_PV_NUM_PROPERTY_FN(pv_ba_size, SECTOR_SIZE * pv->ba_size)
#define _pv_ba_size_set prop_not_implemented_set
GET_PV_STR_PROPERTY_FN(pv_device_id, pv->device_id)
#define _pv_device_id_set prop_not_implemented_set
GET_PV_STR_PROPERTY_FN(pv_device_id_type, pv->device_id_type)
#define _pv_device_id_type_set prop_not_implemented_set
#define _pv_allocatable_set prop_not_implemented_set
#define _pv_allocatable_get prop_not_implemented_get

View File

@@ -3338,10 +3338,6 @@ static int _integritymismatches_disp(struct dm_report *rh __attribute__((unused)
if (lv_is_integrity(lv) && lv_integrity_mismatches(lv->vg->cmd, lv, &mismatches))
return dm_report_field_uint64(rh, field, &mismatches);
if (lv_is_raid(lv) && lv_raid_has_integrity(lv) &&
lv_raid_integrity_total_mismatches(lv->vg->cmd, lv, &mismatches))
return dm_report_field_uint64(rh, field, &mismatches);
return _field_set_value(field, "", &GET_TYPE_RESERVED_VALUE(num_undef_64));
}
@@ -3511,42 +3507,6 @@ static int _pvduplicate_disp(struct dm_report *rh, struct dm_pool *mem,
return _binary_disp(rh, mem, field, duplicate, GET_FIRST_RESERVED_NAME(pv_duplicate_y), private);
}
static int _pvdeviceid_disp(struct dm_report *rh, struct dm_pool *mem,
struct dm_report_field *field,
const void *data, void *private)
{
const struct physical_volume *pv = (const struct physical_volume *) data;
char *repstr;
if (!pv->device_id)
return _field_set_value(field, "", NULL);
if (!(repstr = pv_deviceid_dup(mem, pv))) {
log_error("Failed to allocate buffer.");
return 0;
}
return _field_set_value(field, repstr, NULL);
}
static int _pvdeviceidtype_disp(struct dm_report *rh, struct dm_pool *mem,
struct dm_report_field *field,
const void *data, void *private)
{
const struct physical_volume *pv = (const struct physical_volume *) data;
char *repstr;
if (!pv->device_id_type)
return _field_set_value(field, "", NULL);
if (!(repstr = pv_deviceidtype_dup(mem, pv))) {
log_error("Failed to allocate buffer.");
return 0;
}
return _field_set_value(field, repstr, NULL);
}
static int _vgpermissions_disp(struct dm_report *rh, struct dm_pool *mem,
struct dm_report_field *field,
const void *data, void *private)

View File

@@ -86,7 +86,6 @@ static int _thin_pool_text_import(struct lv_segment *seg,
struct logical_volume *pool_data_lv, *pool_metadata_lv;
const char *discards_str = NULL;
uint32_t zero = 0;
uint32_t crop = 0;
if (!dm_config_get_str(sn, "metadata", &lv_name))
return SEG_LOG_ERROR("Metadata must be a string in");
@@ -132,13 +131,6 @@ static int _thin_pool_text_import(struct lv_segment *seg,
seg->zero_new_blocks = (zero) ? THIN_ZERO_YES : THIN_ZERO_NO;
if (dm_config_has_node(sn, "crop_metadata")) {
if (!dm_config_get_uint32(sn, "crop_metadata", &crop))
return SEG_LOG_ERROR("Could not read crop_metadata for");
seg->crop_metadata = (crop) ? THIN_CROP_METADATA_YES : THIN_CROP_METADATA_NO;
seg->lv->status |= LV_CROP_METADATA;
}
/* Read messages */
for (; sn; sn = sn->sib)
if (!(sn->v) && !_thin_pool_add_message(seg, sn->key, sn->child))
@@ -185,9 +177,6 @@ static int _thin_pool_text_export(const struct lv_segment *seg, struct formatter
return 0;
}
if (seg->crop_metadata != THIN_CROP_METADATA_UNSELECTED)
outf(f, "crop_metadata = %u", (seg->crop_metadata == THIN_CROP_METADATA_YES) ? 1 : 0);
dm_list_iterate_items(tmsg, &seg->thin_messages) {
/* Extra validation */
switch (tmsg->type) {
@@ -318,12 +307,11 @@ static int _thin_pool_add_target_line(struct dev_manager *dm,
else
low_water_mark = 0;
if (!dm_tree_node_add_thin_pool_target_v1(node, len,
seg->transaction_id,
metadata_dlid, pool_dlid,
seg->chunk_size, low_water_mark,
(seg->zero_new_blocks == THIN_ZERO_YES) ? 0 : 1,
(seg->crop_metadata == THIN_CROP_METADATA_YES) ? 1 : 0))
if (!dm_tree_node_add_thin_pool_target(node, len,
seg->transaction_id,
metadata_dlid, pool_dlid,
seg->chunk_size, low_water_mark,
(seg->zero_new_blocks == THIN_ZERO_YES) ? 0 : 1))
return_0;
if (attr & THIN_FEATURE_DISCARDS) {

View File

@@ -25,7 +25,6 @@
#include "lib/metadata/segtype.h"
#include "base/memory/zalloc.h"
static const char _vdo_module[] = MODULE_NAME_VDO;
static unsigned _feature_mask;
static int _bad_field(const char *field)
@@ -392,21 +391,18 @@ static int _vdo_target_present(struct cmd_context *cmd,
static const struct feature {
uint32_t maj;
uint32_t min;
uint32_t patchlevel;
unsigned vdo_feature;
const char *feature;
} _features[] = {
{ 6, 2, 3, VDO_FEATURE_ONLINE_RENAME, "online_rename" },
{ 1, 1, 0, "" },
//{ 9, 9, VDO_FEATURE_RESIZE, "resize" },
};
static const char _lvmconf[] = "global/vdo_disabled_features";
//static const char _lvmconf[] = "global/vdo_disabled_features";
static int _vdo_checked = 0;
static int _vdo_present = 0;
static unsigned _vdo_attrs = 0;
uint32_t i, maj, min, patchlevel;
const struct segment_type *segtype;
const struct dm_config_node *cn;
const struct dm_config_value *cv;
const char *str;
if (!activation())
return 0;
@@ -423,8 +419,8 @@ static int _vdo_target_present(struct cmd_context *cmd,
}
if (maj < 6 || (maj == 6 && min < 2)) {
log_warn("WARNING: Target %s version %u.%u.%u is too old.",
_vdo_module, maj, min, patchlevel);
log_warn("WARNING: VDO target version %u.%u.%u is too old.",
maj, min, patchlevel);
return 0;
}
@@ -441,41 +437,15 @@ static int _vdo_target_present(struct cmd_context *cmd,
/* Prepare for adding supported features */
for (i = 0; i < DM_ARRAY_SIZE(_features); ++i)
if ((maj > _features[i].maj) ||
((maj == _features[i].maj) && (min > _features[i].min)) ||
((maj == _features[i].maj) && (min == _features[i].min) && (patchlevel >= _features[i].patchlevel)))
(maj == _features[i].maj && min >= _features[i].min))
_vdo_attrs |= _features[i].vdo_feature;
else
log_very_verbose("Target %s does not support %s.",
_vdo_module,
TARGET_NAME_VDO,
_features[i].feature);
}
if (attributes) {
if (!_feature_mask) {
/* Support runtime lvm.conf changes, N.B. avoid 32 feature */
if ((cn = find_config_tree_array(cmd, global_vdo_disabled_features_CFG, NULL))) {
for (cv = cn->v; cv; cv = cv->next) {
if (cv->type != DM_CFG_STRING) {
log_warn("WARNING: Ignoring invalid string in config file %s.",
_lvmconf);
continue;
}
str = cv->v.str;
if (!*str)
continue;
for (i = 0; i < DM_ARRAY_SIZE(_features); ++i)
if (strcasecmp(str, _features[i].feature) == 0)
_feature_mask |= _features[i].vdo_feature;
}
}
_feature_mask = ~_feature_mask;
for (i = 0; i < DM_ARRAY_SIZE(_features); ++i)
if ((_vdo_attrs & _features[i].vdo_feature) &&
!(_feature_mask & _features[i].vdo_feature))
log_very_verbose("Target %s %s support disabled by %s.",
_vdo_module,
_features[i].feature, _lvmconf);
}
*attributes = _vdo_attrs & _feature_mask;
}
@@ -486,7 +456,7 @@ static int _vdo_modules_needed(struct dm_pool *mem,
const struct lv_segment *seg __attribute__((unused)),
struct dm_list *modules)
{
if (!str_list_add(mem, modules, _vdo_module)) {
if (!str_list_add(mem, modules, MODULE_NAME_VDO)) {
log_error("String list allocation failed for VDO module.");
return 0;
}

View File

@@ -255,7 +255,7 @@ static int _target_present(struct cmd_context *cmd,
return 0;
}
if (min >= 3) {
if (min >= 2) {
_writecache_cleaner_supported = 1;
_writecache_max_age_supported = 1;
}

View File

@@ -1 +0,0 @@
dm_tree_node_add_thin_pool_target_v1

View File

@@ -1968,10 +1968,10 @@ int dm_tree_node_add_replicator_dev_target(struct dm_tree_node *node,
#define DM_THIN_MIN_DATA_BLOCK_SIZE (UINT32_C(128))
#define DM_THIN_MAX_DATA_BLOCK_SIZE (UINT32_C(2097152))
/*
* Max supported size for thin pool metadata device (17045913600 bytes)
* Max supported size for thin pool metadata device (17112760320 bytes)
* Limitation is hardcoded into the kernel and bigger device size
* is not accepted.
* drivers/md/dm-thin-metadata.h THIN_METADATA_MAX_SECTORS
* But here DM_THIN_MAX_METADATA_SIZE got defined incorrectly
* Correct size is (UINT64_C(255) * ((1 << 14) - 64) * (4096 / (1 << 9)))
*/
#define DM_THIN_MAX_METADATA_SIZE (UINT64_C(255) * (1 << 14) * (4096 / (1 << 9)) - 256 * 1024)
@@ -1984,16 +1984,6 @@ int dm_tree_node_add_thin_pool_target(struct dm_tree_node *node,
uint64_t low_water_mark,
unsigned skip_block_zeroing);
int dm_tree_node_add_thin_pool_target_v1(struct dm_tree_node *node,
uint64_t size,
uint64_t transaction_id,
const char *metadata_uuid,
const char *pool_uuid,
uint32_t data_block_size,
uint64_t low_water_mark,
unsigned skip_block_zeroing,
unsigned crop_metadata);
/* Supported messages for thin provision target */
typedef enum {
DM_THIN_MESSAGE_CREATE_SNAP, /* device_id, origin_id */

View File

@@ -3502,24 +3502,6 @@ int dm_tree_node_add_thin_pool_target(struct dm_tree_node *node,
uint32_t data_block_size,
uint64_t low_water_mark,
unsigned skip_block_zeroing)
{
return dm_tree_node_add_thin_pool_target_v1(node, size, transaction_id,
metadata_uuid, pool_uuid,
data_block_size,
low_water_mark,
skip_block_zeroing,
1);
}
int dm_tree_node_add_thin_pool_target_v1(struct dm_tree_node *node,
uint64_t size,
uint64_t transaction_id,
const char *metadata_uuid,
const char *pool_uuid,
uint32_t data_block_size,
uint64_t low_water_mark,
unsigned skip_block_zeroing,
unsigned crop_metadata)
{
struct load_segment *seg, *mseg;
uint64_t devsize = 0;
@@ -3547,18 +3529,17 @@ int dm_tree_node_add_thin_pool_target_v1(struct dm_tree_node *node,
if (!_link_tree_nodes(node, seg->metadata))
return_0;
if (crop_metadata)
/* FIXME: more complex target may need more tweaks */
dm_list_iterate_items(mseg, &seg->metadata->props.segs) {
devsize += mseg->size;
if (devsize > DM_THIN_MAX_METADATA_SIZE) {
log_debug_activation("Ignoring %" PRIu64 " of device.",
devsize - DM_THIN_MAX_METADATA_SIZE);
mseg->size -= (devsize - DM_THIN_MAX_METADATA_SIZE);
devsize = DM_THIN_MAX_METADATA_SIZE;
/* FIXME: drop remaining segs */
}
/* FIXME: more complex target may need more tweaks */
dm_list_iterate_items(mseg, &seg->metadata->props.segs) {
devsize += mseg->size;
if (devsize > DM_THIN_MAX_METADATA_SIZE) {
log_debug_activation("Ignoring %" PRIu64 " of device.",
devsize - DM_THIN_MAX_METADATA_SIZE);
mseg->size -= (devsize - DM_THIN_MAX_METADATA_SIZE);
devsize = DM_THIN_MAX_METADATA_SIZE;
/* FIXME: drop remaining segs */
}
}
if (!(seg->pool = dm_tree_find_node_by_uuid(node->dtree, pool_uuid))) {
log_error("Missing pool uuid %s.", pool_uuid);

View File

@@ -45,7 +45,7 @@ MAN8=lvm.8 lvmdump.8 lvm-fullreport.8 lvm-lvpoll.8 \
vgck.8 vgcreate.8 vgconvert.8 vgdisplay.8 vgexport.8 vgextend.8 \
vgimport.8 vgimportclone.8 vgmerge.8 vgmknodes.8 vgreduce.8 vgremove.8 \
vgrename.8 vgs.8 vgscan.8 vgsplit.8 \
lvmsar.8 lvmsadc.8 lvmdiskscan.8 lvmdevices.8 vgimportdevices.8
lvmsar.8 lvmsadc.8 lvmdiskscan.8
MAN8SO=lvm-config.8 lvm-dumpconfig.8
MAN8DM=dmsetup.8 dmstats.8
MAN8CLUSTER=

View File

@@ -772,10 +772,6 @@ Add a cache to an LV, using a specified cache device.
.br
.RS 4
.ad l
[ \fB-c\fP|\fB--chunksize\fP \fISize\fP[k|UNIT] ]
.ad b
.br
.ad l
[ \fB--cachesize\fP \fISize\fP[m|UNIT] ]
.ad b
.br

View File

@@ -303,16 +303,6 @@ afterwards. Some underlying devices perform better with fua, some with
nofua. Testing is necessary to determine which.
Applicable only to persistent memory.
.IP \[bu] 2
cleaner = 0|1
Setting cleaner=1 enables the writecache cleaner mode in which data is
gradually flushed from the cache. If this is done prior to detaching the
writecache, then the splitcache command will have little or no flushing to
perform. If not done beforehand, the splitcache command enables the
cleaner mode and waits for flushing to complete before detaching the
writecache. Adding cleaner=0 to the splitcache command will skip the
cleaner mode, and any required flushing is performed in device suspend.
.SS dm-cache with separate data and metadata LVs

View File

@@ -1,33 +0,0 @@
The LVM devices file lists devices that lvm can use. The default file is
/etc/lvm/devices/system.devices, and the lvmdevices(8) command is used to
add or remove device entries. If the file does not exist, or if lvm.conf
includes use_devicesfile=0, then lvm will not use a devices file.
When not using a devices file, lvm defaults to using all devices on the
system, and allows the lvm.conf filter to limit the full set of system
devices. When using a devices file, lvm does not use the filter setting.
To use a device with lvm, that device should be added to the devices file
with the command lvmdevices --adddev. To prevent lvm from seeing or using
a device, remove it from the devices file with lvmdevices --deldev. (To
help the transition to the devices file, the pvcreate command will also
add new PVs to the file.)
LVM records entries in the devices file using hardware-specific IDs, such
as the WWID or serial number. This avoids problems with unstable device
names, and allows lvm to find its devices without scanning headers from
other devices on the system. Virtual device types (e.g. multipath, crypt,
md, loop) are identified with subsystem-specific ids. When a device has
no hardware or subystem ID, lvm falls back to recording it based on the
device name. This results in less optimal behavior when the device name
changes -- lvm will scan devices outside the devices file to locate a
"missing" PV on a new device name (this only applies to devices using the
name as an ID.)
It is recommended to use lvm commands to make changes to the file to
ensure correct and consistent information.
Multiple devices files may be created, each containing different sets of
devices. The --devicesfile <name> command line option is used to specify
which devices file the command should use.

View File

View File

@@ -394,7 +394,7 @@ the pmspare LV.
\&
If thin pool metadata is damaged, it may be repairable.
Checking and repairing thin pool metadata is analogous to
Checking and repairing thin pool metadata is analagous to
running fsck/repair on a file system.
When a thin pool LV is activated, lvm runs the thin_check command
@@ -437,24 +437,14 @@ copy to the VG's pmspare LV.
If step 1 is successful, the thin pool metadata LV is replaced
with the pmspare LV containing the corrected metadata.
The previous thin pool metadata LV, containing the damaged metadata,
becomes visible with the new name ThinPoolLV_metaN (where N is 0,1,...).
becomes visible with the new name ThinPoolLV_tmetaN (where N is 0,1,...).
If the repair works, the thin pool LV and its thin LVs can be activated.
User should manually check if repaired thin pool kernel metadata
has all data for all lvm2 known LVs by individual activation of
every thin LV. When all works, user should continue with fsck of
all filesystems present these such volumes.
Once the thin pool is considered fully functional user may remove ThinPoolLV_metaN
(the LV containing the damaged thin pool metadata) for possible
space reuse.
For a better performance it may be useful to pvmove the new repaired metadata LV
(written to previous pmspare volume) to a better PV (i.e. SSD)
If the repair works, the thin pool LV and its thin LVs can be activated,
and the LV containing the damaged thin pool metadata can be removed.
It may be useful to move the new metadata LV (previously pmspare) to a
better PV.
If the repair operation fails, the thin pool LV and its thin LVs
are not accessible and it may be necessary to restore their content
from a backup. In such case the content of unmodified original damaged
ThinPoolLV_metaN volume can be used by your support for more
advanced recovery methods.
If the repair does not work, the thin pool LV and its thin LVs are lost.
If metadata is manually restored with thin_repair directly,
the pool metadata LV can be manually swapped with another LV
@@ -462,9 +452,6 @@ containing new metadata:
.B lvconvert --thinpool VG/ThinPoolLV --poolmetadata VG/NewThinMetaLV
Note: Thin pool metadata is compact so even small corruptions
in them may result in significant portions of mappings to be lost.
It is recommended to use fast resilient storage for them.
.SS Activation of thin snapshots
@@ -562,7 +549,7 @@ Command to extend thin pool data space:
.fi
Other methods of increasing free data space in a thin pool LV
include removing a thin LV and its related snapshots, or running
include removing a thin LV and its related snapsots, or running
fstrim on the file system using a thin LV.
@@ -702,7 +689,7 @@ with two configuration settings:
.B thin_pool_autoextend_threshold
.br
is a percentage full value that defines when the thin pool LV should be
extended. Setting this to 100 disables automatic extension. The minimum
extended. Setting this to 100 disables automatic extention. The minimum
value is 50.
.BR lvm.conf (5)
@@ -729,7 +716,7 @@ the --ignoremonitoring option can be used. With this option, the command
will not ask dmeventd to monitor the thin pool LV.
.IP \[bu]
Setting thin_pool_autoextend_threshold to 100 disables automatic
Setting thin_pool_autoextend_threshould to 100 disables automatic
extension of thin pool LVs, even if they are being monitored by dmeventd.
.P
@@ -1104,7 +1091,7 @@ The default value is shown by:
The amount of thin metadata depends on how many blocks are shared between
thin LVs (i.e. through snapshots). A thin pool with many snapshots may
need a larger metadata LV. Thin pool metadata LV sizes can be from 2MiB
to approximately 16GiB.
to 16GiB.
When using lvcreate to create what will become a thin metadata LV, the
size is specified with the -L|--size option.
@@ -1119,14 +1106,6 @@ needed, so it is recommended to start with a size of 1GiB which should be
enough for all practical purposes. A thin pool metadata LV can later be
manually or automatically extended if needed.
Configurable setting
.BR lvm.conf (5)
.BR allocation / thin_pool_crop_metadata
gives control over cropping to 15.81GiB to stay backward compatible with older
versions of lvm2. With enabled cropping there can be observed some problems when
using volumes above this size with thin tools (i.e. thin_repair).
Cropping should be enabled only when compatibility is required.
.SS Create a thin snapshot of an external, read only LV

View File

@@ -1,29 +1,32 @@
.TH "LVMVDO" "7" "LVM TOOLS #VERSION#" "Red Hat, Inc" "\""
.SH NAME
lvmvdo \(em Support for Virtual Data Optimizer in LVM
lvmvdo \(em LVM Virtual Data Optimizer support
.SH DESCRIPTION
VDO is software that provides inline
VDO (which includes kvdo and vdo) is software that provides inline
block-level deduplication, compression, and thin provisioning capabilities
for primary storage.
Deduplication is a technique for reducing the consumption of storage
resources by eliminating multiple copies of duplicate blocks. Compression
takes the individual unique blocks and shrinks them. These reduced blocks are then efficiently packed together into
physical blocks. Thin provisioning manages the mapping from logical blocks
presented by VDO to where the data has actually been physically stored,
and also eliminates any blocks of all zeroes.
takes the individual unique blocks and shrinks them with coding
algorithms; these reduced blocks are then efficiently packed together into
physical blocks. Thin provisioning manages the mapping from LBAs presented
by VDO to where the data has actually been stored, and also eliminates any
blocks of all zeroes.
With deduplication, instead of writing the same data more than once, VDO detects and records each
duplicate block as a reference to the original
block. VDO maintains a mapping from Logical Block Addresses (LBA) (used by the
With deduplication, instead of writing the same data more than once each
duplicate block is detected and recorded as a reference to the original
block. VDO maintains a mapping from logical block addresses (used by the
storage layer above VDO) to physical block addresses (used by the storage
layer under VDO). After deduplication, multiple logical block addresses
may be mapped to the same physical block address; these are called shared
blocks and are reference-counted by the software.
With compression, VDO compresses multiple blocks (or shared blocks)
with the fast LZ4 algorithm, and bins them together where possible so that
With VDO's compression, multiple blocks (or shared blocks) are compressed
with the fast LZ4 algorithm, and binned together where possible so that
multiple compressed blocks fit within a 4 KB block on the underlying
storage. Mapping from LBA is to a physical block address and index within
it for the desired compressed data. All compressed blocks are individually
@@ -36,55 +39,65 @@ allocated for storing the new block data to ensure that other logical
block addresses that are mapped to the shared physical block are not
modified.
To use VDO with \fBlvm\fP(8), you must install the standard VDO user-space tools
\fBvdoformat\fP(8) and the currently non-standard kernel VDO module
"\fIkvdo\fP".
For usage of VDO with \fBlvm\fP(8) standard VDO userspace tools
\fBvdoformat\fP(8) and currently non-standard kernel VDO module
"\fIkvdo\fP" needs to be installed on the system.
The "\fIkvdo\fP" module implements fine-grained storage virtualization,
thin provisioning, block sharing, and compression.
The "\fIuds\fP" module provides memory-efficient duplicate
identification. The user-space tools include \fBvdostats\fP(8)
for extracting statistics from VDO volumes.
.SH VDO TERMS
thin provisioning, block sharing, and compression;
the "\fIuds\fP" module provides memory-efficient duplicate
identification. The userspace tools include \fBvdostats\fP(8)
for extracting statistics from those volumes.
.SH VDO Terms
.TP
VDODataLV
.br
VDO data LV
.br
A large hidden LV with the _vdata suffix. It is created in a VG
large hidden LV with suffix _vdata created in a VG.
.br
used by the VDO kernel target to store all data and metadata blocks.
used by VDO target to store all data and metadata blocks.
.TP
VDOPoolLV
.br
VDO pool LV
.br
A pool for virtual VDOLV(s), which are the size of used VDODataLV.
maintains virtual for LV(s) stored in attached VDO data LV
and it has same size.
.br
Only a single VDOLV is currently supported.
contains VDOLV(s) (currently supports only a single VDOLV).
.TP
VDOLV
.br
VDO LV
.br
Created from VDOPoolLV.
created from VDOPoolLV
.br
Appears blank after creation.
.SH VDO USAGE
appears blank after creation
.SH VDO Usage
The primary methods for using VDO with lvm2:
.SS 1. Create a VDOPoolLV and a VDOLV
Create a VDOPoolLV that will hold VDO data, and a
virtual size VDOLV that the user can use. If you do not specify the virtual size,
then the VDOLV is created with the maximum size that
always fits into data volume even if no
deduplication or compression can happen
(i.e. it can hold the incompressible content of /dev/urandom).
If you do not specify the name of VDOPoolLV, it is taken from
.SS 1. Create VDOPoolLV with VDOLV
Create a VDOPoolLV that will hold VDO data together with
virtual size VDOLV, that user can use. When the virtual size
is not specified, then such LV is created with maximum size that
always fits into data volume even if there cannot happen any
deduplication and compression
(i.e. it can hold uncompressible content of /dev/urandom).
When the name of VDOPoolLV is not specified, it is taken from
the sequence of vpool0, vpool1 ...
Note: The performance of TRIM/Discard operations is slow for large
volumes of VDO type. Please try to avoid sending discard requests unless
necessary because it might take considerable amount of time to finish the discard
Note: As the performance of TRIM/Discard operation is slow for large
volumes of VDO type, please try to avoid sending discard requests unless
necessary as it may take considerable amount of time to finish discard
operation.
.nf
@@ -93,19 +106,22 @@ operation.
.fi
.I Example
.br
.nf
# lvcreate --type vdo -n vdo0 -L 10G -V 100G vg/vdopool0
# mkfs.ext4 -E nodiscard /dev/vg/vdo0
.fi
.SS 2. Convert an existing LV into VDOPoolLV
Convert an already created or existing LV into a VDOPoolLV, which is a volume
that can hold data and metadata.
You will be prompted to confirm such conversion because it \fBIRREVERSIBLY
DESTROYS\fP the content of such volume and the volume is immediately
formatted by \fBvdoformat\fP(8) as a VDO pool data volume. You can
specify the virtual size of the VDOLV associated with this VDOPoolLV.
If you do not specify the virtual size, it will be set to the maximum size
that can keep 100% incompressible data there.
.SS 2. Create VDOPoolLV from conversion of an existing LV into VDODataLV
Convert an already created/existing LV into a volume that can hold
VDO data and metadata (a volume reference by VDOPoolLV).
User will be prompted to confirm such conversion as it is \fBIRREVERSIBLY
DESTROYING\fP content of such volume, as it's being immediately
formatted by \fBvdoformat\fP(8) as VDO pool data volume. User can
specify virtual size of associated VDOLV with this VDOPoolLV.
When the virtual size is not specified, it will be set to the maximum size
that can keep 100% uncompressible data there.
.nf
.B lvconvert --type vdo-pool -n VDOLV -V VirtualSize VG/VDOPoolLV
@@ -113,18 +129,22 @@ that can keep 100% incompressible data there.
.fi
.I Example
.br
.nf
# lvconvert --type vdo-pool -n vdo0 -V10G vg/ExistingLV
# lvconvert --type vdo-pool -n vdo0 -V10G vg/existinglv
.fi
.SS 3. Change the default settings used for creating a VDOPoolLV
VDO allows to set a large variety of options. Lots of these settings
can be specified in lvm.conf or profile settings. You can prepare
a number of different profiles in the #DEFAULT_SYS_DIR#/profile directory
and just specify the profile file name.
Check the output of \fBlvmconfig --type full\fP for a detailed description
of all individual VDO settings.
.SS 3. Change default settings used for creating VDOPoolLV
VDO allows to set large variety of options. Lots of these settings
can be specified by lvm.conf or profile settings. User can prepare
number of different profiles in #DEFAULT_SYS_DIR#/profile directory
and just specify profile file name.
Check output of \fBlvmconfig --type full\fP for detailed description
of all individual vdo settings.
.I Example
.br
.nf
# cat <<EOF > #DEFAULT_SYS_DIR#/profile/vdo_create.profile
allocation {
@@ -153,8 +173,10 @@ EOF
# lvcreate --vdo -L10G --metadataprofile vdo_create vg/vdopool0
# lvcreate --vdo -L10G --config 'allocation/vdo_cpu_threads=4' vg/vdopool1
.fi
.SS 4. Change the compression and deduplication of a VDOPoolLV
Disable or enable the compression and deduplication for VDOPoolLV
.SS 4. Change compression and deduplication of VDOPoolLV
Disable or enable compression and deduplication for VDO pool LV
(the volume that maintains all VDO LV(s) associated with it).
.nf
@@ -162,20 +184,24 @@ Disable or enable the compression and deduplication for VDOPoolLV
.fi
.I Example
.br
.nf
# lvchange --compression n vg/vdopool0
# lvchange --deduplication y vg/vdopool1
# lvchange --compression n vg/vdpool0
# lvchange --deduplication y vg/vdpool1
.fi
.SS 5. Checking the usage of VDOPoolLV
To quickly check how much data on a VDOPoolLV is already consumed,
use \fBlvs\fP(8). The Data% field reports how much data is occupied
in the content of the virtual data for the VDOLV and how much space is already
consumed with all the data and metadata blocks in the VDOPoolLV.
For a detailed description, use the \fBvdostats\fP(8) command.
.SS 5. Checking usage of VDOPoolLV
To quickly check how much data of VDOPoolLV are already consumed
use \fBlvs\fP(8). Field Data% will report how much data occupies
content of virtual data for VDOLV and how much space is already
consumed with all the data and metadata blocks in VDOPoolLV.
For a detailed description use \fBvdostats\fP(8) command.
Note: \fBvdostats\fP(8) currently understands only /dev/mapper device names.
.I Example
.br
.nf
# lvcreate --type vdo -L10G -V20G -n vdo0 vg/vdopool0
# mkfs.ext4 -E nodiscard /dev/vg/vdo0
@@ -193,36 +219,35 @@ Note: \fBvdostats\fP(8) currently understands only /dev/mapper device names.
data blocks used : 79
...
.fi
.SS 6. Extending the VDOPoolLV size
You can add more space to hold VDO data and metadata by
extending the VDODataLV using the commands
\fBlvresize\fP(8) and \fBlvextend\fP(8).
The extension needs to add at least one new VDO slab. You can configure
the slab size with the \fBallocation/vdo_slab_size_mb\fP setting.
You can also enable automatic size extension of a monitored VDOPoolLV
with the \fBactivation/vdo_pool_autoextend_percent\fP and
\fBactivation/vdo_pool_autoextend_threshold\fP settings.
.SS 6. Extending VDOPoolLV size
Note: You cannot reduce the size of a VDOPoolLV.
Adding more space to hold VDO data and metadata can be made via
extension of VDODataLV with commands
\fBlvresize\fP(8), \fBlvextend\fP(8).
Note: You cannot change the size of a cached VDOPoolLV.
Note: Size of VDOPoolLV cannot be reduced.
Note: Size of cached VDOPoolLV cannot be changed.
.nf
.B lvextend -L+AddingSize VG/VDOPoolLV
.fi
.I Example
.br
.nf
# lvextend -L+50G vg/vdopool0
# lvresize -L300G vg/vdopool1
.fi
.SS 7. Extending or reducing the VDOLV size
You can extend or reduce a virtual VDO LV as a standard LV with the
\fBlvresize\fP(8), \fBlvextend\fP(8), and \fBlvreduce\fP(8) commands.
Note: The reduction needs to process TRIM for reduced disk area
to unmap used data blocks from the VDOPoolLV, which might take
.SS 7. Extending or reducing VDOLV size
VDO LV can be extended or reduced as standard LV with commands
\fBlvresize\fP(8), \fBlvextend\fP(8), \fBlvreduce\fP(8).
Note: Reduction needs to process TRIM for reduced disk area
to unmap used data blocks from VDOPoolLV and it may take
a long time.
.nf
@@ -231,122 +256,96 @@ a long time.
.fi
.I Example
.br
.nf
# lvextend -L+50G vg/vdo0
# lvreduce -L-50G vg/vdo1
# lvresize -L200G vg/vdo2
.fi
.SS 8. Component activation of a VDODataLV
You can activate a VDODataLV separately as a component LV for examination
purposes. The activation of the VDODataLV activates the data LV in read-only mode,
and the data LV cannot be modified.
If the VDODataLV is active as a component, any upper LV using this volume CANNOT
be activated. You have to deactivate the VDODataLV first to continue to use the VDOPoolLV.
.SS 8. Component activation of VDODataLV
VDODataLV can be activated separately as component LV for examination
purposes. It activates data LV in read-only mode and cannot be modified.
If the VDODataLV is active as component, any upper LV using this volume CANNOT
be activated. User has to deactivate VDODataLV first to continue to use VDOPoolLV.
.I Example
.br
.nf
# lvchange -ay vg/vpool0_vdata
# lvchange -an vg/vpool0_vdata
.fi
.SH VDO TOPICS
.SH VDO Topics
.SS 1. Stacking VDO
You can convert or stack a VDOPooLV with these currently supported
volume types: linear, stripe, raid, and cache with cachepool.
.SS 2. VDOPoolLV on top of raid
Using a raid type LV for a VDODataLV.
User can convert/stack VDO with existing volumes.
.SS 2. VDO on top of raid
Using Raid type LV for VDO Data LV.
.I Example
.br
.nf
# lvcreate --type raid1 -L 5G -n vdopool vg
# lvconvert --type vdo-pool -V 10G vg/vdopool
# lvcreate --type raid1 -L 5G -n vpool vg
# lvconvert --type vdo-pool -V 10G vg/vpool
.fi
.SS 3. Caching a VDODataLV or a VDOPoolLV
VDODataLV (accepts also VDOPoolLV) caching provides a mechanism
to accelerate reads and writes of already compressed and deduplicated
data blocks together with VDO metadata.
A cached VDO data LV cannot be currently resized. Also, the threshold
based automatic resize will not work.
.SS 3. Caching VDODataLV, VDOPoolLV
VDO Pool LV (accepts also VDOPoolLV) caching provides mechanism
to accelerate read and write of already compressed and deduplicated
blocks together with vdo metadata.
Cached VDO Data LV cannot be currently resized (also automatic
resize will not work).
.I Example
.br
.nf
# lvcreate --type vdo -L 5G -V 10G -n vdo1 vg/vdopool
# lvcreate --type cache-pool -L 1G -n cachepool vg
# lvconvert --cache --cachepool vg/cachepool vg/vdopool
# lvconvert --uncache vg/vdopool
# lvcreate --type vdo -L 5G -V 10G -n vdo1 vg/vpool
# lvcreate --type cache-pool -L 1G -n cpool vg
# lvconvert --cache --cachepool vg/cpool vg/vpool
# lvconvert --uncache vg/vpool
.fi
.SS 4. Caching a VDOLV
VDO LV cache allow you to 'cache' a device for better performance before
it hits the processing of the VDO Pool LV layer.
.SS 4. Caching VDOLV
VDO LV cache allow users to 'cache' device for better perfomance before
it hits processing of VDO Pool LV layer.
.I Example
.br
.nf
# lvcreate --type vdo -L 5G -V 10G -n vdo1 vg/vdopool
# lvcreate --type cache-pool -L 1G -n cachepool vg
# lvconvert --cache --cachepool vg/cachepool vg/vdo1
# lvcreate -L 5G -V 10G -n vdo1 vg/vpool
# lvcreate --type cache-pool -L 1G -n cpool vg
# lvconvert --cache --cachepool vg/cpool vg/vdo1
# lvconvert --uncache vg/vdo1
.fi
.SS 5. Usage of Discard/TRIM with a VDOLV
You can discard data on a VDO LV and reduce used blocks on a VDOPoolLV.
However, the current performance of discard operations is still not optimal
and takes a considerable amount of time and CPU.
Unless you really need it, you should avoid using discard.
When a block device is going to be rewritten,
its blocks will be automatically reused for new data.
Discard is useful in situations when user knows that the given portion of a VDO LV
.SS 5. Usage of Discard/TRIM with VDOLV
User can discard data in VDO LV and reduce used blocks in VDOPoolLV.
However present performance of discard operation is still not optimal
and takes considerable amount of time and CPU.
So unless it's really needed users should avoid usage of discard.
When block device is going to be rewritten,
block will be automatically reused for new data.
Discard is useful in situation, when it is known the given portion of a VDO LV
is not going to be used and the discarded space can be used for block
provisioning in other regions of the VDO LV.
For the same reason, you should avoid using mkfs with discard for
a freshly created VDO LV to save a lot of time that this operation would
take otherwise as device is already expected to be empty.
.SS 6. Memory usage
The VDO target requires 370 MiB of RAM plus an additional 268 MiB
per each 1 TiB of physical storage managed by the volume.
provisioning in other regions of VDO LV.
For the same reason, user should avoid using mkfs with discard for
freshly created VDO LV to save a lot of time this operation would
take otherwise as device after create empty.
UDS requires a minimum of 250 MiB of RAM,
which is also the default amount that deduplication uses.
.br
The memory required for the UDS index is determined by the index type
and the required size of the deduplication window and
is controlled by the \fBallocation/vdo_use_sparse_index\fP setting.
With enabled UDS sparse indexing, it relies on the temporal locality of data
and attempts to retain only the most relevant index entries in memory and
can maintain a deduplication window that is ten times larger
than with dense while using the same amount of memory.
Although the sparse index provides the greatest coverage,
the dense index provides more deduplication advice.
For most workloads, given the same amount of memory,
the difference in deduplication rates between dense
and sparse indexes is negligible.
A dense index with 1 GiB of RAM maintains a 1 TiB deduplication window,
while a sparse index with 1 GiB of RAM maintains a 10 TiB deduplication window.
In general, 1 GiB is sufficient for 4 TiB of physical space with
a dense index and 40 TiB with a sparse index.
.SS 7. Storage space requirements
You can configure a VDOPoolLV to use up to 256 TiB of physical storage.
Only a certain part of the physical storage is usable to store data.
This section provides the calculations to determine the usable size
of a VDO-managed volume.
The VDO target requires storage for two types of VDO metadata and for the UDS index:
.TP
\(bu
The first type of VDO metadata uses approximately 1 MiB for each 4 GiB
of physical storage plus an additional 1 MiB per slab.
.TP
\(bu
The second type of VDO metadata consumes approximately 1.25 MiB
for each 1 GiB of logical storage, rounded up to the nearest slab.
.TP
\(bu
The amount of storage required for the UDS index depends on the type of index
and the amount of RAM allocated to the index. For each 1 GiB of RAM,
a dense UDS index uses 17 GiB of storage and a sparse UDS index will use
170 GiB of storage.
\&
.SH SEE ALSO
.BR lvm (8),

View File

@@ -1,5 +0,0 @@
Add all PVs from a VG to the devices file. This is the equivalent of
using lvmdevices --adddev to add each PV to the devices file individually.
vgimportdevices will also add the device IDs to the VG metadata (which the
lvmdevices command does not do.)

View File

@@ -1,7 +1,6 @@
vgsplit moves one or more PVs from a source VG (the first VG arg) to a
destination VG (the second VG arg). The PV(s) to move are named after the
source and destination VGs, or an LV is named, in which case the PVs
underlying the LV are moved.
vgsplit moves one or more PVs from a source VG to a destination VG. The
PVs can be specified explicitly or implicitly by naming an LV, in which
case on PVs underlying the LV are moved.
If the destination VG does not exist, a new VG is created (command options
can be used to specify properties of the new VG, also see

View File

@@ -8,10 +8,9 @@ vgsplit - Move physical volumes into a new or existing volume group
[ \fIoption_args\fP ]
.br
.SH DESCRIPTION
vgsplit moves one or more PVs from a source VG (the first VG arg) to a
destination VG (the second VG arg). The PV(s) to move are named after the
source and destination VGs, or an LV is named, in which case the PVs
underlying the LV are moved.
vgsplit moves one or more PVs from a source VG to a destination VG. The
PVs can be specified explicitly or implicitly by naming an LV, in which
case on PVs underlying the LV are moved.
If the destination VG does not exist, a new VG is created (command options
can be used to specify properties of the new VG, also see

View File

@@ -798,7 +798,6 @@ fi
CHECK=""
RESIZE=""
NEWSIZE=""
while [ "$#" -ne 0 ]
do
@@ -812,11 +811,8 @@ do
"-y"|"--yes") YES="-y" ;;
"-l"|"--lvresize") DO_LVRESIZE=1 ;;
"-c"|"--cryptresize") DO_CRYPTRESIZE=1 ;;
"check") test -z "${2-}" && error "Missing <device>. (see: $TOOL --help)"
CHECK=$2 ; shift ;;
"resize") test -z "${2-}" && error "Missing <device>. (see: $TOOL --help)"
RESIZE=$2 ; shift
if test -n "${2-}" ; then NEWSIZE="${2-}" ; shift ; fi ;;
"check") CHECK=$2 ; shift ;;
"resize") RESIZE=$2 ; NEWSIZE=$3 ; shift 2 ;;
*) error "Wrong argument \"$1\". (see: $TOOL --help)"
esac
shift

View File

@@ -109,7 +109,6 @@ help:
@echo " LVM_TEST_CACHE_REPAIR_CMD Command for cache_repair [$(LVM_TEST_CACHE_REPAIR_CMD)]."
@echo " LVM_TEST_CACHE_RESTORE_CMD Command for cache_restore [$(LVM_TEST_CACHE_RESTORE_CMD)]."
@echo " LVM_TEST_UNLIMITED Set to get unlimited test log (>32MB)"
@echo " LVM_TEST_DEVICE_LIST File path listing real devs that tests can use."
@echo " LVM_VALGRIND Enable valgrind testing, execs $$"VALGRIND.
@echo " LVM_VALGRIND_DMEVENTD Enable valgrind testing of dmeventd (1)."
@echo " LVM_VALGRIND_LVMPOLLD Enable valgrind testing of lvmpolld (1)."

View File

@@ -657,24 +657,6 @@ prepare_ramdisk() {
touch RAMDISK
}
prepare_real_devs() {
aux lvmconf 'devices/scan = "/dev"'
touch REAL_DEVICES
if test -n "$LVM_TEST_DEVICE_LIST"; then
local count=0
while read path; do
REAL_DEVICES[$count]=$path
count=$(( count + 1 ))
aux extend_filter "a|$path|"
dd if=/dev/zero of="$path" bs=32k count=1
wipefs -a "$path" 2>/dev/null || true
done < $LVM_TEST_DEVICE_LIST
fi
printf "%s\\n" "${REAL_DEVICES[@]}" > REAL_DEVICES
}
# A drop-in replacement for prepare_loop() that uses scsi_debug to create
# a ramdisk-based SCSI device upon which all LVM devices will be created
# - scripts must take care not to use a DEV_SIZE that will enduce OOM-killer
@@ -927,6 +909,7 @@ prepare_devs() {
echo "ok"
}
common_dev_() {
local tgtype=$1
local dev=$2
@@ -1488,7 +1471,6 @@ thin_restore_needs_more_volumes() {
case $("$LVM_TEST_THIN_RESTORE_CMD" -V) in
# With older version of thin-tool we got slightly more compact metadata
0.[0-6]*|0.7.0*) return 0 ;;
0.8.5-2.el7) return 0 ;;
esac
return 1
}

View File

@@ -252,11 +252,6 @@ skip() {
exit 200
}
get_real_devs() {
REAL_DEVICES=( $(<REAL_DEVICES) )
export REAL_DEVICES
}
get_devs() {
local IFS=$IFS_NL
DEVICES=( $(<DEVICES) )
@@ -270,21 +265,10 @@ prepare_test_vars() {
lv=LV
for i in {1..16}; do
eval "dev$i=\"$DM_DEV_DIR/mapper/${PREFIX}pv$i\""
eval "lv$i=\"LV$i\""
eval "vg$i=\"${PREFIX}vg$i\""
done
if test -n "$LVM_TEST_DEVICE_LIST"; then
local count=0
while read path; do
count=$(( count + 1 ))
eval "dev$count=\"$path\""
done < $LVM_TEST_DEVICE_LIST
else
for i in {1..16}; do
eval "dev$i=\"$DM_DEV_DIR/mapper/${PREFIX}pv$i\""
done
fi
}
if test -z "${abs_top_builddir+varset}" && test -z "${installed_testsuite+varset}"; then

View File

@@ -144,19 +144,6 @@ lvconvert -y --type cache --cachedevice "$dev2" $vg/$lv1
check lv_field $vg/$lv1 segtype cache
check lv_field $vg/${lv1}_cache_cvol segtype linear -a
check lv_field $vg/${lv1}_cache_cvol lv_size "60.00m"
lvs -o chunksize $vg/$lv1 |tee out
grep 64.00k out
lvchange -ay $vg/$lv1
lvchange -an $vg/$lv1
lvremove $vg/$lv1
lvcreate -n $lv1 -l8 -an $vg "$dev1"
lvconvert -y --type cache --cachedevice "$dev2" --chunksize 128k $vg/$lv1
check lv_field $vg/$lv1 segtype cache
check lv_field $vg/${lv1}_cache_cvol segtype linear -a
check lv_field $vg/${lv1}_cache_cvol lv_size "60.00m"
lvs -o chunksize $vg/$lv1 |tee out
grep 128.00k out
lvchange -ay $vg/$lv1
lvchange -an $vg/$lv1
lvremove $vg/$lv1

View File

@@ -1,53 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2021 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
SKIP_WITH_LVMPOLLD=1
. lib/inittest
aux have_cache 1 10 0 || skip
aux prepare_vg 3
#
# This lvconvert command will deactivate LV1, then internally create a new
# lv, lvol0, as a poolmetadataspare, then activate lvol0 to zero it.
# lvol0 will get the same major:minor that LV1 had. When the code gets
# the struct dev for lvol0, the new path to lvol0 is added to the
# dev-cache with it's major:minor. That major:minor already exists in
# dev-cache and has the stale LV1 as an alias. So the path to lvol0 is
# added as an alias to the existing struct dev (with the correct
# major:minor), but that struct dev has the stale LV1 path on its aliases
# list. The code will now validate all the aliases before returning the
# dev for lvol0, and will find that the LV1 path is stale and remove it
# from the aliases. That will prevent the stale path from being used for
# the dev in place of the new path.
#
# The preferred_name is set to /dev/mapper so that if the stale path still
# exists, that stale path would be used as the name for the dev, and the
# wiping code would fail to open that stale name.
#
lvcreate -n $lv1 -L32M $vg "$dev1"
lvcreate -n $lv2 -L16M $vg "$dev2"
lvconvert -y --type cache-pool --poolmetadata $lv2 --cachemode writeback $vg/$lv1 --config='devices { preferred_names=["/dev/mapper/"] }'
lvremove -y $vg/$lv1
lvcreate -n $lv1 -L32M $vg "$dev1"
lvcreate -n $lv2 -L16M $vg "$dev2"
lvconvert -y --type cache-pool --poolmetadata $lv2 $vg/$lv1
lvremove -y $vg/$lv1
# TODO: add more validation of dev aliases being specified as command
# args in combination with various preferred_names settings.
vgremove -ff $vg

View File

@@ -1,522 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2020 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
test_description='devices file'
. lib/inittest
aux prepare_devs 7
RUNDIR="/run"
test -d "$RUNDIR" || RUNDIR="/var/run"
PVS_ONLINE_DIR="$RUNDIR/lvm/pvs_online"
VGS_ONLINE_DIR="$RUNDIR/lvm/vgs_online"
PVS_LOOKUP_DIR="$RUNDIR/lvm/pvs_lookup"
_clear_online_files() {
# wait till udev is finished
aux udev_wait
rm -f "$PVS_ONLINE_DIR"/*
rm -f "$VGS_ONLINE_DIR"/*
rm -f "$PVS_LOOKUP_DIR"/*
}
DFDIR="$LVM_SYSTEM_DIR/devices"
mkdir $DFDIR
#
# Test with use_devicesfile=0 (no devices file is being applied by default)
#
aux lvmconf 'devices/use_devicesfile = 0'
# create one VG in a non-system devices file
vgcreate --devicesfile test.devices $vg1 $dev1
vgextend --devicesfile test.devices $vg1 $dev2
# create two VGs outside the special devices file
vgcreate $vg2 $dev3 $dev4
vgcreate $vg3 $dev5 $dev6
PVID1=`pvs $dev1 --noheading -o uuid | tr -d - | awk '{print $1}'`
PVID2=`pvs $dev2 --noheading -o uuid | tr -d - | awk '{print $1}'`
PVID3=`pvs $dev3 --noheading -o uuid | tr -d - | awk '{print $1}'`
PVID4=`pvs $dev4 --noheading -o uuid | tr -d - | awk '{print $1}'`
PVID5=`pvs $dev5 --noheading -o uuid | tr -d - | awk '{print $1}'`
PVID6=`pvs $dev6 --noheading -o uuid | tr -d - | awk '{print $1}'`
lvcreate -l4 -an -i2 -n $lv1 $vg1
lvcreate -l4 -an -i2 -n $lv2 $vg2
lvcreate -l4 -an -i2 -n $lv3 $vg3
# verify devices file is working
vgs --devicesfile test.devices $vg1
not vgs --devicesfile test.devices $vg2
not vgs --devicesfile test.devices $vg2
# devicesfile and devices cannot be used together
not vgs --devicesfile test.devices --devices $dev1,$dev1 $vg1
# verify correct vgs are seen / not seen when devices are specified
vgs --devices $dev1,$dev2 $vg1
vgs --devices $dev3,$dev4 $vg2
vgs --devices $dev5,$dev6 $vg3
not vgs --devices $dev1,$dev2 $vg2
not vgs --devices $dev1,$dev2 $vg3
not vgs --devices $dev1,$dev2 $vg2
not vgs --devices $dev5,$dev6 $vg2
not vgs --devices $dev1,$dev2 $vg3
not vgs --devices $dev3,$dev4 $vg3
vgs --devices $dev1,$dev2 |tee out
grep $vg1 out
not grep $vg2 out
not grep $vg3 out
vgs --devices $dev3,$dev4 |tee out
not grep $vg1 out
grep $vg2 out
not grep $vg3 out
# verify correct pvs are seen / not seen when devices are specified
pvs --devices $dev1,$dev2 $dev1 $dev2
pvs --devices $dev3,$dev4 $dev3 $dev4
pvs --devices $dev5,$dev6 $dev5 $dev6
not pvs --devices $dev1,$dev2 $dev3 $dev4
not pvs --devices $dev1,$dev2 $dev5 $dev6
not pvs --devices $dev3,$dev4 $dev1 $dev2 $dev5 $dev6
pvs --devices $dev1,$dev2 |tee out
grep $dev1 out
grep $dev2 out
not grep $dev3 out
not grep $dev4 out
not grep $dev5 out
not grep $dev6 out
pvs --devices $dev3,$dev4 |tee out
not grep $dev1 out
not grep $dev2 out
grep $dev3 out
grep $dev4 out
not grep $dev5 out
not grep $dev6 out
# verify correct lvs are activated / not activated when devices are specified
vgchange --devices $dev1,$dev2 -ay
check lv_field $vg1/$lv1 lv_active "active"
check lv_field $vg2/$lv2 lv_active ""
check lv_field $vg3/$lv3 lv_active ""
vgchange --devices $dev1,$dev2 -an
check lv_field $vg1/$lv1 lv_active ""
vgchange --devices $dev3,$dev4 -ay
check lv_field $vg1/$lv1 lv_active ""
check lv_field $vg2/$lv2 lv_active "active"
check lv_field $vg3/$lv3 lv_active ""
vgchange --devices $dev3,$dev4 -an
check lv_field $vg2/$lv2 lv_active ""
# verify devices covering multiple vgs
vgs --devices $dev1,$dev2,$dev3,$dev4 $vg1 $vg2 |tee out
grep $vg1 out
grep $vg2 out
not grep $vg3 out
vgs --devices $dev1,$dev2,$dev3,$dev4,$dev5,$dev6 $vg1 $vg2 $vg3 |tee out
grep $vg1 out
grep $vg2 out
grep $vg3 out
# verify vgs seen when incomplete devices are specified
vgs --devices $dev1 $vg1
vgs --devices $dev3 $vg2
vgs --devices $dev5 $vg3
# incomplete vg because of --devices is the same as vg incomplete because
# of missing device
not lvcreate --devices $dev1 -l1 $vg1
not lvchange --devices $dev1 -ay $vg1/$lv1
not lvextend --devices $dev1 -l+1 $vg1/$lv1
not vgremove --devices $dev1 $vg1
not lvcreate --devices $dev3 -l1 $vg2
not lvchange --devices $dev3 -ay $vg2/$lv2
not lvextend --devices $dev3 -l+1 $vg2/$lv2
not vgremove --devices $dev3 $vg2
# verify various commands with --devices for vg in a devicesfile
not lvcreate --devices $dev1,$dev2 -l1 -n $lv2 -an $vg1 $dev7
lvcreate --devices $dev1,$dev2 -l1 -n $lv2 -an $vg1
lvs --devices $dev1,$dev2 $vg1/$lv2
lvextend --devices $dev1,$dev2 -l2 $vg1/$lv2
lvchange --devices $dev1,$dev2 -ay $vg1/$lv2
lvchange --devices $dev1,$dev2 -an $vg1/$lv2
lvremove --devices $dev1,$dev2 $vg1/$lv2
vgchange --devices $dev1,$dev2 -ay $vg1
vgchange --devices $dev1,$dev2 -an $vg1
not vgextend --devices $dev1,$dev2 $vg1 $dev7
vgextend --devices $dev1,$dev2,$dev7 $vg1 $dev7
vgreduce --devices $dev1,$dev2,$dev7 $vg1 $dev7
vgexport --devices $dev1,$dev2 $vg1
vgimport --devices $dev1,$dev2 $vg1
not pvremove --devices $dev1,$dev2 $dev7
not pvcreate --devices $dev1,$dev2 $dev7
not vgcreate --devices $dev1,$dev2 $vg7 $dev7
pvremove --devices $dev7 $dev7
pvcreate --devices $dev7 $dev7
vgcreate --devices $dev7 $vg7 $dev7
vgremove --devices $dev7 $vg7
pvremove --devices $dev7 $dev7
# verify various commands with --devices for vg not in a devicesfile
not lvcreate --devices $dev3,$dev4 -l1 -n $lv4 -an $vg2 $dev7
lvcreate --devices $dev3,$dev4 -l1 -n $lv4 -an $vg2
lvs --devices $dev3,$dev4 $vg2/$lv4
lvextend --devices $dev3,$dev4 -l2 $vg2/$lv4
lvchange --devices $dev3,$dev4 -ay $vg2/$lv4
lvchange --devices $dev3,$dev4 -an $vg2/$lv4
lvremove --devices $dev3,$dev4 $vg2/$lv4
vgchange --devices $dev3,$dev4 -ay $vg2
vgchange --devices $dev3,$dev4 -an $vg2
not vgextend --devices $dev3,$dev4 $vg2 $dev7
vgextend --devices $dev3,$dev4,$dev7 $vg2 $dev7
vgreduce --devices $dev3,$dev4,$dev7 $vg2 $dev7
vgexport --devices $dev3,$dev4 $vg2
vgimport --devices $dev3,$dev4 $vg2
not pvremove --devices $dev3,$dev4 $dev7
not pvcreate --devices $dev3,$dev4 $dev7
not vgcreate --devices $dev3,$dev4 $vg7 $dev7
pvremove --devices $dev7 $dev7
pvcreate --devices $dev7 $dev7
vgcreate --devices $dev7 $vg7 $dev7
vgremove --devices $dev7 $vg7
pvremove --devices $dev7 $dev7
# verify pvscan with devices file and devices list
# arg not in devices file
_clear_online_files
pvscan --devicesfile test.devices --cache -aay $dev3
not ls "$RUNDIR/lvm/pvs_online/$PVID3"
pvscan --devicesfile test.devices --cache -aay $dev4
not ls "$RUNDIR/lvm/pvs_online/$PVID4"
check lv_field $vg1/$lv1 lv_active ""
check lv_field $vg2/$lv2 lv_active ""
# arg in devices file
_clear_online_files
pvscan --devicesfile test.devices --cache $dev1
pvscan --devicesfile test.devices --cache $dev2
ls "$RUNDIR/lvm/pvs_online/$PVID1"
ls "$RUNDIR/lvm/pvs_online/$PVID2"
# autoactivate with devices file
_clear_online_files
pvscan --devicesfile test.devices --cache -aay $dev1
pvscan --devicesfile test.devices --cache -aay $dev2
check lv_field $vg1/$lv1 lv_active "active"
vgchange -an $vg1
# autoactivate with no devices file
_clear_online_files
pvscan --cache -aay $dev3
pvscan --cache -aay $dev4
check lv_field $vg2/$lv2 lv_active "active"
vgchange -an $vg2
# arg not in devices list
_clear_online_files
pvscan --devices $dev1,$dev2 --cache $dev3
not ls "$RUNDIR/lvm/pvs_online/$PVID3"
pvscan --devices $dev4 --cache $dev3
not ls "$RUNDIR/lvm/pvs_online/$PVID3"
pvscan --devices $dev5 --cache $dev3
not ls "$RUNDIR/lvm/pvs_online/$PVID3"
# arg in devices list
_clear_online_files
pvscan --devices $dev3 --cache -aay $dev3
pvscan --devices $dev4 --cache -aay $dev4
check lv_field $vg2/$lv2 lv_active "active"
vgchange -an $vg2
# verify --devicesfile and --devices are not affected by a filter
# hide_dev excludes using existing filter
aux hide_dev $dev2
aux hide_dev $dev4
pvs --devicesfile test.devices $dev1
pvs --devicesfile test.devices $dev2
not pvs --devicesfile test.devices $dev3
not pvs --devicesfile test.devices $dev4
pvs --devices $dev1 $dev1
pvs --devices $dev2 $dev2
pvs --devices $dev3 $dev3
pvs --devices $dev4 $dev4
pvs --devices $dev5 $dev5
pvs --devices $dev1,$dev2,$dev3,$dev4,$dev5 $dev1 $dev2 $dev3 $dev4 $dev5 | tee out
grep $dev1 out
grep $dev2 out
grep $dev3 out
grep $dev4 out
grep $dev5 out
vgchange --devices $dev1,$dev2 -ay $vg1
check lv_field $vg1/$lv1 lv_active "active"
lvchange --devices $dev1,$dev2 -an $vg1/$lv1
vgchange --devices $dev3,$dev4 -ay $vg2
check lv_field $vg2/$lv2 lv_active "active"
lvchange --devices $dev3,$dev4 -an $vg2/$lv2
aux unhide_dev $dev2
aux unhide_dev $dev4
vgchange --devicesfile "" -an
vgremove --devicesfile "" -y $vg1
vgremove --devicesfile "" -y $vg2
vgremove --devicesfile "" -y $vg3
#
# Test with use_devicesfile=1 (system devices file is in use by default)
#
aux lvmconf 'devices/use_devicesfile = 1'
DF="$DFDIR/system.devices"
touch $DF
# create one VG in a non-system devices file
vgcreate --devicesfile test.devices $vg1 $dev1 $dev2
# create one VG in the default system devices file
vgcreate $vg2 $dev3 $dev4
# create one VG in neither devices file
vgcreate --devicesfile "" $vg3 $dev5 $dev6
lvcreate --devicesfile test.devices -l4 -an -i2 -n $lv1 $vg1
lvcreate -l4 -an -i2 -n $lv2 $vg2
lvcreate --devicesfile "" -l4 -an -i2 -n $lv3 $vg3
# system.devices only sees vg2
vgs |tee out
not grep $vg1 out
grep $vg2 out
not grep $vg3 out
not vgs $vg1
vgs $vg2
not vgs $vg3
pvs |tee out
not grep $dev1 out
not grep $dev2 out
grep $dev3 out
grep $dev4 out
not grep $dev5 out
not grep $dev6 out
# test.devices only sees vg1
vgs --devicesfile test.devices |tee out
grep $vg1 out
not grep $vg2 out
not grep $vg3 out
pvs --devicesfile test.devices |tee out
grep $dev1 out
grep $dev2 out
not grep $dev3 out
not grep $dev4 out
not grep $dev5 out
not grep $dev6 out
# no devices file sees all
vgs --devicesfile "" |tee out
grep $vg1 out
grep $vg2 out
grep $vg3 out
vgs --devicesfile "" $vg1
vgs --devicesfile "" $vg2
vgs --devicesfile "" $vg3
pvs --devicesfile "" |tee out
grep $dev1 out
grep $dev2 out
grep $dev3 out
grep $dev4 out
grep $dev5 out
grep $dev6 out
vgchange -ay
lvs --devicesfile test.devices -o active $vg1/$lv1 |tee out
not grep active out
lvs -o active $vg2/$lv2 |tee out
grep active out
lvs --devicesfile "" -o active $vg3/$lv3 |tee out
not grep active out
vgchange -an
lvs -o active $vg2/$lv2 |tee out
not grep active out
vgchange --devicesfile test.devices -ay
lvs --devicesfile test.devices -o active $vg1/$lv1 |tee out
grep active out
lvs -o active $vg2/$lv2 |tee out
not grep active out
lvs --devicesfile "" -o active $vg3/$lv3 |tee out
not grep active out
vgchange --devicesfile test.devices -an
lvs --devicesfile test.devices -o active $vg1/$lv1 |tee out
not grep active out
# --devices overrides all three cases:
# always gives access to the specified devices
# always denies access to unspecified devices
vgs --devices $dev1,$dev2 $vg1
vgs --devices $dev3,$dev4 $vg2
vgs --devices $dev5,$dev6 $vg3
pvs --devices $dev1 $dev1
pvs --devices $dev3 $dev3
pvs --devices $dev5 $dev5
not pvs --devices $dev1 $dev1 $dev2 |tee out
grep $dev1 out
not grep $dev2 out
not pvs --devices $dev3 $dev3 $dev4 |tee out
grep $dev3 out
not grep $dev4 out
not pvs --devices $dev5 $dev1 $dev2 $dev3 $dev4 $dev5 |tee out
grep $dev5 out
not grep $dev1 out
not grep $dev2 out
not grep $dev3 out
not grep $dev4 out
not grep $dev6 out
pvs --devices $dev1,$dev2,$dev3,$dev4,$dev5 $dev5 |tee out
grep $dev5 out
not grep $dev1 out
not grep $dev2 out
not grep $dev3 out
not grep $dev4 out
not grep $dev6 out
pvs --devices $dev1,$dev2,$dev3,$dev4,$dev5 $dev1 $dev2 $dev3 $dev4 $dev5 |tee out
grep $dev1 out
grep $dev2 out
grep $dev3 out
grep $dev4 out
grep $dev5 out
vgchange --devices $dev1,$dev2 -ay
lvs --devices $dev1,$dev2,$dev3,$dev4,$dev5,$dev6 -o name,active | grep active |tee out
grep $lv1 out
not grep $lv2 out
not grep $lv3 out
vgchange --devices $dev1,$dev2 -an
lvs --devices $dev1,$dev2,$dev3,$dev4,$dev5,$dev6 -o name,active | tee out
not grep active out
vgchange --devices $dev3,$dev4 -ay
lvs --devices $dev1,$dev2,$dev3,$dev4,$dev5,$dev6 -o name,active | grep active |tee out
not grep $lv1 out
grep $lv2 out
not grep $lv3 out
vgchange --devices $dev3,$dev4 -an
lvs --devices $dev1,$dev2,$dev3,$dev4,$dev5,$dev6 -o name,active |tee out
not grep active out
vgchange --devices $dev5,$dev6 -ay
lvs --devices $dev1,$dev2,$dev3,$dev4,$dev5,$dev6 -o name,active | grep active |tee out
not grep $lv1 out
not grep $lv2 out
grep $lv3 out
vgchange --devices $dev5,$dev6 -an
lvs --devices $dev1,$dev2,$dev3,$dev4,$dev5,$dev6 -o name,active |tee out
not grep active out
lvcreate --devices $dev1,$dev2 -l1 -an -n $lv4 $vg1
lvremove --devices $dev1,$dev2 $vg1/$lv4
lvcreate --devices $dev3,$dev4 -l1 -an -n $lv4 $vg2
lvremove --devices $dev3,$dev4 $vg2/$lv4
lvcreate --devices $dev5,$dev6 -l1 -an -n $lv4 $vg3
lvremove --devices $dev5,$dev6 $vg3/$lv4
not vgchange --devices $dev1,$dev2 -ay $vg2
not vgchange --devices $dev1,$dev2 -ay $vg3
not vgchange --devices $dev3,$dev4 -ay $vg1
not vgchange --devices $dev3,$dev4 -ay $vg3
not vgchange --devices $dev5,$dev6 -ay $vg1
not vgchange --devices $dev5,$dev6 -ay $vg2
not lvcreate --devices $dev1,$dev2 -an -l1 $vg2
not lvcreate --devices $dev1,$dev2 -an -l1 $vg3
not lvcreate --devices $dev3,$dev4 -an -l1 $vg1
not lvcreate --devices $dev3,$dev4 -an -l1 $vg3
not lvcreate --devices $dev5,$dev6 -an -l1 $vg1
not lvcreate --devices $dev5,$dev6 -an -l1 $vg2
# autoactivate devs in default devices file
_clear_online_files
pvscan --cache -aay $dev3
pvscan --cache -aay $dev4
check lv_field $vg2/$lv2 lv_active "active"
vgchange -an $vg2
pvscan --cache -aay $dev1
not ls "$RUNDIR/lvm/pvs_online/$PVID1"
pvscan --cache -aay $dev2
not ls "$RUNDIR/lvm/pvs_online/$PVID2"
pvscan --cache -aay $dev5
not ls "$RUNDIR/lvm/pvs_online/$PVID5"
_clear_online_files
pvscan --devices $dev3 --cache -aay $dev3
pvscan --devices $dev3,$dev4 --cache -aay $dev4
lvs --devices $dev3,$dev4 -o active $vg2/$lv2 | grep active
vgchange --devices $dev3,$dev4 -an $vg2
not vgchange -ay $vg1
vgchange --devicesfile test.devices -ay $vg1
lvs --devices $dev1,$dev2,$dev3,$dev4,$dev5,$dev6 -o name,active | grep active |tee out
grep $lv1 out
not grep $lv2 out
not grep $lv3 out
vgchange -ay $vg2
lvs --devices $dev1,$dev2,$dev3,$dev4,$dev5,$dev6 -o name,active | grep active |tee out
grep $lv1 out
grep $lv2 out
not grep $lv3 out
not vgchange -ay $vg3
vgchange --devicesfile "" -ay $vg3
lvs --devices $dev1,$dev2,$dev3,$dev4,$dev5,$dev6 -o name,active | grep active |tee out
grep $lv1 out
grep $lv2 out
grep $lv3 out
vgchange -an
lvs --devices $dev1,$dev2,$dev3,$dev4,$dev5,$dev6 -o name,active | grep active |tee out
grep $lv1 out
not grep $lv2 out
grep $lv3 out
vgchange -ay
lvs --devices $dev1,$dev2,$dev3,$dev4,$dev5,$dev6 -o name,active | grep active |tee out
grep $lv1 out
grep $lv2 out
grep $lv3 out
vgchange --devicesfile "" -an
lvs --devices $dev1,$dev2,$dev3,$dev4,$dev5,$dev6 -o name,active |tee out
not grep active out
not vgremove $vg1
not vgremove $vg3
vgremove -y $vg2
vgremove --devicesfile test.devices -y $vg1
vgremove --devicesfile "" -y $vg3

View File

@@ -1,476 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2020 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
test_description='devices file with devnames'
. lib/inittest
aux lvmconf 'devices/hints = "none"'
aux prepare_devs 7
RUNDIR="/run"
test -d "$RUNDIR" || RUNDIR="/var/run"
PVS_ONLINE_DIR="$RUNDIR/lvm/pvs_online"
VGS_ONLINE_DIR="$RUNDIR/lvm/vgs_online"
PVS_LOOKUP_DIR="$RUNDIR/lvm/pvs_lookup"
_clear_online_files() {
# wait till udev is finished
aux udev_wait
rm -f "$PVS_ONLINE_DIR"/*
rm -f "$VGS_ONLINE_DIR"/*
rm -f "$PVS_LOOKUP_DIR"/*
}
DFDIR="$LVM_SYSTEM_DIR/devices"
mkdir $DFDIR
DF="$DFDIR/system.devices"
ORIG="$DFDIR/orig.devices"
aux lvmconf 'devices/use_devicesfile = 1'
not ls $DF
pvcreate $dev1
ls $DF
grep $dev1 $DF
grep IDTYPE=devname $DF
pvcreate $dev2
grep $dev2 $DF
pvcreate $dev3
grep $dev3 $DF
vgcreate $vg1 $dev1 $dev2
# PVID with dashes for matching pvs -o+uuid output
OPVID1=`pvs $dev1 --noheading -o uuid | awk '{print $1}'`
OPVID2=`pvs $dev2 --noheading -o uuid | awk '{print $1}'`
OPVID3=`pvs $dev3 --noheading -o uuid | awk '{print $1}'`
# PVID without dashes for matching devices file fields
PVID1=`pvs $dev1 --noheading -o uuid | tr -d - | awk '{print $1}'`
PVID2=`pvs $dev2 --noheading -o uuid | tr -d - | awk '{print $1}'`
PVID3=`pvs $dev3 --noheading -o uuid | tr -d - | awk '{print $1}'`
lvmdevices --deldev $dev3
not grep $dev3 $DF
not grep $PVID3 $DF
not pvs $dev3
cp $DF $ORIG
lvcreate -l4 -an -i2 -n $lv1 $vg1
#
# when wrong idname devname is outside DF it's corrected if search_for=1
# by a general cmd, or by lvmdevices --addpvid
#
# when wrong idname devname is outside DF it's not found or corrected if
# search_for=0 by a general cmd, but will be by lvmdevices --addpvid
#
# when wrong idname devname is inside DF it's corrected if search_for=0|1
# by a general cmd, or by lvmdevices --addpvid
#
# pvscan --cache -aay does not update DF when devname= is wrong
#
# pvscan --cache -aay when idname devname is wrong:
# every dev is read and then skipped if pvid is not in DF
#
# commands still work with incorrect devname=
# . and they automatically correct the devname=
#
#
# idname changes to become incorrect, devname remains unchanged and correct
# . change idname to something outside DF
# . change idname to match another DF entry
# . swap idname of two DF entries
#
# edit DF idname, s/dev1/dev3/, where new dev is not in DF
sed -e "s|IDNAME=$dev1|IDNAME=$dev3|" $ORIG > $DF
cat $DF
# pvs reports correct info
pvs -o+uuid | tee pvs.out
grep $vg1 pvs.out > out
not grep $OPVID3 out
not grep $dev3 out
grep $OPVID1 out |tee out2
grep $dev1 out2
# pvs fixed the DF
not grep $PVID3 $DF
not grep $dev3 $DF
grep $PVID1 $DF |tee out
grep IDNAME=$dev1 out
cat $DF
sed -e "s|IDNAME=$dev1|IDNAME=$dev3|" $ORIG > $DF
cat $DF
# lvcreate uses correct dev
lvcreate -l1 -n $lv2 -an $vg1 $dev1
# lvcreate fixed the DF
not grep $PVID3 $DF
not grep $dev3 $DF
grep $PVID1 $DF |tee out
grep IDNAME=$dev1 out
# pvs reports correct dev
pvs -o+uuid | tee pvs.out
grep $vg1 pvs.out > out
not grep $OPVID3 out
not grep $dev3 out
grep $OPVID1 out |tee out2
grep $dev1 out2
lvremove $vg1/$lv2
cat $DF
sed -e "s|IDNAME=$dev1|IDNAME=$dev3|" $ORIG > $DF
cat $DF
# lvmdevices fixes the DF
lvmdevices --update
not grep $PVID3 $DF
not grep $dev3 $DF
grep $PVID1 $DF |tee out
grep IDNAME=$dev1 out
cat $DF
# edit DF idname, s/dev1/dev2/, creating two entries with same idname
sed -e "s|IDNAME=$dev1|IDNAME=$dev2|" $ORIG > $DF
cat $DF
# pvs reports correct info
pvs -o+uuid | tee pvs.out
grep $vg1 pvs.out > out
grep $OPVID1 out |tee out2
grep $dev1 out2
grep $OPVID2 out |tee out2
grep $dev2 out2
# pvs fixed the DF
grep $PVID1 $DF |tee out
grep IDNAME=$dev1 out
grep $PVID2 $DF |tee out
grep IDNAME=$dev2 out
cat $DF
sed -e "s|IDNAME=$dev1|IDNAME=$dev2|" $ORIG > $DF
cat $DF
# lvcreate uses correct dev
lvcreate -l1 -n $lv2 -an $vg1 $dev1
# lvcreate fixed the DF
grep $PVID1 $DF |tee out
grep IDNAME=$dev1 out
grep $PVID2 $DF |tee out
grep IDNAME=$dev2 out
# pvs reports correct info
pvs -o+uuid | tee pvs.out
grep $vg1 pvs.out > out
grep $OPVID1 out |tee out2
grep $dev1 out2
grep $OPVID2 out |tee out2
grep $dev2 out2
lvremove $vg1/$lv2
cat $DF
sed -e "s|IDNAME=$dev1|IDNAME=$dev2|" $ORIG > $DF
cat $DF
# lvmdevices fixes the DF
lvmdevices --update
grep $PVID1 $DF |tee out
grep IDNAME=$dev1 out
grep $PVID2 $DF |tee out
grep IDNAME=$dev2 out
cat $DF
# edit DF idname, swap dev1 and dev2
sed -e "s|IDNAME=$dev1|IDNAME=tmpname|" $ORIG > tmp1.devices
sed -e "s|IDNAME=$dev2|IDNAME=$dev1|" tmp1.devices > tmp2.devices
sed -e "s|IDNAME=tmpname|IDNAME=$dev2|" tmp2.devices > $DF
cat $DF
# pvs reports correct info
pvs -o+uuid | tee pvs.out
grep $vg1 pvs.out > out
grep $OPVID1 out |tee out2
grep $dev1 out2
grep $OPVID2 out |tee out2
grep $dev2 out2
# pvs fixed the DF
grep $PVID1 $DF |tee out
grep IDNAME=$dev1 out
grep $PVID2 $DF |tee out
grep IDNAME=$dev2 out
cat $DF
sed -e "s|IDNAME=$dev1|IDNAME=tmpname|" $ORIG > tmp1.devices
sed -e "s|IDNAME=$dev2|IDNAME=$dev1|" tmp1.devices > tmp2.devices
sed -e "s|IDNAME=tmpname|IDNAME=$dev2|" tmp2.devices > $DF
cat $DF
# lvcreate uses correct dev
lvcreate -l1 -n $lv2 -an $vg1 $dev1
# lvcreate fixed the DF
grep $PVID1 $DF |tee out
grep IDNAME=$dev1 out
grep $PVID2 $DF |tee out
grep IDNAME=$dev2 out
# pvs reports correct info
pvs -o+uuid | tee pvs.out
grep $vg1 pvs.out > out
grep $OPVID1 out |tee out2
grep $dev1 out2
grep $OPVID2 out |tee out2
grep $dev2 out2
lvremove $vg1/$lv2
cat $DF
sed -e "s|IDNAME=$dev1|IDNAME=tmpname|" $ORIG > tmp1.devices
sed -e "s|IDNAME=$dev2|IDNAME=$dev1|" tmp1.devices > tmp2.devices
sed -e "s|IDNAME=tmpname|IDNAME=$dev2|" tmp2.devices > $DF
cat $DF
# lvmdevices fixes the DF
lvmdevices --update
grep $PVID1 $DF |tee out
grep IDNAME=$dev1 out
grep $PVID2 $DF |tee out
grep IDNAME=$dev2 out
cat $DF
#
# idname remains correct, devname changes to become incorrect
# . change devname to something outside DF
# . change devname to match another DF entry
# . swap devname of two DF entries
#
# edit DF devname, s/dev1/dev3/, where new dev is not in DF
sed -e "s|DEVNAME=$dev1|DEVNAME=$dev3|" $ORIG > $DF
cat $DF
# pvs reports correct info
pvs -o+uuid | tee pvs.out
grep $vg1 pvs.out > out
not grep $OPVID3 out
not grep $dev3 out
grep $OPVID1 out |tee out2
grep $dev1 out2
# pvs fixed the DF
not grep $PVID3 $DF
not grep $dev3 $DF
grep $PVID1 $DF |tee out
grep DEVNAME=$dev1 out
cat $DF
sed -e "s|DEVNAME=$dev1|DEVNAME=$dev3|" $ORIG > $DF
cat $DF
# lvmdevices fixes the DF
lvmdevices --update
not grep $PVID3 $DF
not grep $dev3 $DF
grep $PVID1 $DF |tee out
grep IDNAME=$dev1 out
cat $DF
# edit DF devname, s/dev1/dev2/, creating two entries with same devname
sed -e "s|DEVNAME=$dev1|DEVNAME=$dev2|" $ORIG > $DF
cat $DF
# pvs reports correct info
pvs -o+uuid | tee pvs.out
grep $vg1 pvs.out > out
grep $OPVID1 out |tee out2
grep $dev1 out2
grep $OPVID2 out |tee out2
grep $dev2 out2
# pvs fixed the DF
grep $PVID1 $DF |tee out
grep DEVNAME=$dev1 out
grep $PVID2 $DF |tee out
grep DEVNAME=$dev2 out
cat $DF
sed -e "s|DEVNAME=$dev1|DEVNAME=$dev2|" $ORIG > $DF
cat $DF
# lvmdevices fixes the DF
lvmdevices --update
grep $PVID1 $DF |tee out
grep IDNAME=$dev1 out
grep $PVID2 $DF |tee out
grep IDNAME=$dev2 out
cat $DF
# edit DF devname, swap dev1 and dev2
sed -e "s|DEVNAME=$dev1|DEVNAME=tmpname|" $ORIG > tmp1.devices
sed -e "s|DEVNAME=$dev2|DEVNAME=$dev1|" tmp1.devices > tmp2.devices
sed -e "s|DEVNAME=tmpname|DEVNAME=$dev2|" tmp2.devices > $DF
cat $DF
# pvs reports correct info
pvs -o+uuid | tee pvs.out
grep $vg1 pvs.out > out
grep $OPVID1 out |tee out2
grep $dev1 out2
grep $OPVID2 out |tee out2
grep $dev2 out2
# pvs fixed the DF
grep $PVID1 $DF |tee out
grep DEVNAME=$dev1 out
grep $PVID2 $DF |tee out
grep DEVNAME=$dev2 out
cat $DF
sed -e "s|DEVNAME=$dev1|DEVNAME=tmpname|" $ORIG > tmp1.devices
sed -e "s|DEVNAME=$dev2|DEVNAME=$dev1|" tmp1.devices > tmp2.devices
sed -e "s|DEVNAME=tmpname|DEVNAME=$dev2|" tmp2.devices > $DF
cat $DF
# lvmdevices fixes the DF
lvmdevices --update
grep $PVID1 $DF |tee out
grep IDNAME=$dev1 out
grep $PVID2 $DF |tee out
grep IDNAME=$dev2 out
cat $DF
#
# idname and devname change, both become incorrect
# . change idname&devname to something outside DF
# . change idname&devname to match another DF entry
# . swap idname&devname of two DF entries
#
# edit DF idname&devname, s/dev1/dev3/, where new dev is not in DF
sed -e "s|DEVNAME=$dev1|DEVNAME=$dev3|" $ORIG > tmp1.devices
sed -e "s|IDNAME=$dev1|IDNAME=$dev3|" tmp1.devices > $DF
cat $DF
# pvs reports correct info
pvs -o+uuid | tee pvs.out
grep $vg1 pvs.out > out
not grep $OPVID3 out
not grep $dev3 out
grep $OPVID1 out |tee out2
grep $dev1 out2
# pvs fixed the DF
not grep $PVID3 $DF
not grep $dev3 $DF
grep $PVID1 $DF |tee out
grep DEVNAME=$dev1 out
grep IDNAME=$dev1 out
cat $DF
sed -e "s|DEVNAME=$dev1|DEVNAME=$dev3|" $ORIG > tmp1.devices
sed -e "s|IDNAME=$dev1|IDNAME=$dev3|" tmp1.devices > $DF
cat $DF
# lvmdevices fixes the DF
lvmdevices --update
not grep $PVID3 $DF
not grep $dev3 $DF
grep $PVID1 $DF |tee out
grep DEVNAME=$dev1 out
grep IDNAME=$dev1 out
cat $DF
# edit DF idname&devname, s/dev1/dev2/, creating two entries with same devname
sed -e "s|DEVNAME=$dev1|DEVNAME=$dev2|" tmp1.devices > $DF
sed -e "s|IDNAME=$dev1|IDNAME=$dev2|" tmp1.devices > $DF
cat $DF
# pvs reports correct info
pvs -o+uuid | tee pvs.out
grep $vg1 pvs.out > out
grep $OPVID1 out |tee out2
grep $dev1 out2
grep $OPVID2 out |tee out2
grep $dev2 out2
# pvs fixed the DF
grep $PVID1 $DF |tee out
grep DEVNAME=$dev1 out
grep IDNAME=$dev1 out
grep $PVID2 $DF |tee out
grep DEVNAME=$dev2 out
grep IDNAME=$dev2 out
cat $DF
sed -e "s|DEVNAME=$dev1|DEVNAME=$dev2|" tmp1.devices > $DF
sed -e "s|IDNAME=$dev1|IDNAME=$dev2|" tmp1.devices > $DF
cat $DF
# lvmdevices fixes the DF
lvmdevices --update
grep $PVID1 $DF |tee out
grep DEVNAME=$dev1 out
grep IDNAME=$dev1 out
grep $PVID2 $DF |tee out
grep DEVNAME=$dev2 out
grep IDNAME=$dev2 out
cat $DF
# edit DF devname, swap dev1 and dev2
sed -e "s|DEVNAME=$dev1|DEVNAME=tmpname|" $ORIG > tmp1.devices
sed -e "s|DEVNAME=$dev2|DEVNAME=$dev1|" tmp1.devices > tmp2.devices
sed -e "s|DEVNAME=tmpname|DEVNAME=$dev2|" tmp2.devices > tmp3.devices
sed -e "s|IDNAME=$dev1|IDNAME=tmpname|" tmp3.devices > tmp4.devices
sed -e "s|IDNAME=$dev2|IDNAME=$dev1|" tmp4.devices > tmp5.devices
sed -e "s|IDNAME=tmpname|IDNAME=$dev2|" tmp5.devices > $DF
cat $DF
# pvs reports correct info
pvs -o+uuid | tee pvs.out
grep $vg1 pvs.out > out
grep $OPVID1 out |tee out2
grep $dev1 out2
grep $OPVID2 out |tee out2
grep $dev2 out2
# pvs fixed the DF
grep $PVID1 $DF |tee out
grep DEVNAME=$dev1 out
grep IDNAME=$dev1 out
grep $PVID2 $DF |tee out
grep DEVNAME=$dev2 out
grep IDNAME=$dev2 out
cat $DF
sed -e "s|DEVNAME=$dev1|DEVNAME=tmpname|" $ORIG > tmp1.devices
sed -e "s|DEVNAME=$dev2|DEVNAME=$dev1|" tmp1.devices > tmp2.devices
sed -e "s|DEVNAME=tmpname|DEVNAME=$dev2|" tmp2.devices > tmp3.devices
sed -e "s|IDNAME=$dev1|IDNAME=tmpname|" tmp3.devices > tmp4.devices
sed -e "s|IDNAME=$dev2|IDNAME=$dev1|" tmp4.devices > tmp5.devices
sed -e "s|IDNAME=tmpname|IDNAME=$dev2|" tmp5.devices > $DF
cat $DF
# lvmdevices fixes the DF
lvmdevices --update
grep $PVID1 $DF |tee out
grep DEVNAME=$dev1 out
grep IDNAME=$dev1 out
grep $PVID2 $DF |tee out
grep DEVNAME=$dev2 out
grep IDNAME=$dev2 out
cat $DF
#
# other tests:
# pvscan --cache -aay when idname and/or devname are wrong
# DF entry for device that's not a PV which changes name
# check hint file is correct when devnames are changing
# test with/without hints enabled
# s/dev1/dev3/ where dev3 is outside DF and is not a PV
# find case where df is updated in both validate and find_renamed_devs
# get_hints skips hints because unmatched device ids
# validate_hints skips hints because invalid device ids
# partitions of mpath and loop
#
vgremove -ff $vg1

View File

@@ -1,547 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2020 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
test_description='devices file with real devs'
. lib/inittest
#
# To use this test, add two or more devices with real device ids,
# e.g. wwids, to a file, e.g.
# $ cat /tmp/devs
# /dev/sdb
# /dev/sdc
# /dev/sdd
#
# Specify this file as LVM_TEST_DEVICE_LIST=/tmp/devs
# when running the test.
#
# This test will wipe these devices.
#
if [ -z ${LVM_TEST_DEVICE_LIST+x} ]; then echo "LVM_TEST_DEVICE_LIST is unset" && skip; else echo "LVM_TEST_DEVICE_LIST is set to '$LVM_TEST_DEVICE_LIST'"; fi
test -e "$LVM_TEST_DEVICE_LIST" || skip
num_devs=$(cat $LVM_TEST_DEVICE_LIST | wc -l)
aux prepare_real_devs
aux lvmconf 'devices/dir = "/dev"'
aux lvmconf 'devices/use_devicesfile = 1'
DFDIR="$LVM_SYSTEM_DIR/devices"
DF="$DFDIR/system.devices"
mkdir $DFDIR
not ls $DF
get_real_devs
wipe_all() {
for dev in "${REAL_DEVICES[@]}"; do
wipefs -a $dev
done
}
wipe_all
# check each dev is added correctly to df
for dev in "${REAL_DEVICES[@]}"; do
pvcreate $dev
ls $DF
pvs -o+uuid $dev
maj=$(get pv_field "$dev" major)
min=$(get pv_field "$dev" minor)
pvid=`pvs $dev --noheading -o uuid | tr -d - | awk '{print $1}'`
sys_wwid_file="/sys/dev/block/$maj:$min/device/wwid"
sys_serial_file="/sys/dev/block/$maj:$min/device/serial"
sys_dm_uuid_file="/sys/dev/block/$maj:$min/dm/uuid"
sys_md_uuid_file="/sys/dev/block/$maj:$min/md/uuid"
sys_loop_file="/sys/dev/block/$maj:$min/loop/backing_file"
if test -e $sys_wwid_file; then
sys_file=$sys_wwid_file
idtype="sys_wwid"
elif test -e $sys_serial_file; then
sys_file=$sys_serial_file
idtype="sys_serial"
elif test -e $sys_dm_uuid_file; then
sys_file=$sys_dm_uuid_file
idtype="mpath_uuid"
elif test -e $sys_md_uuid_file; then
sys_file=$sys_md_uuid_file
idtype="md_uuid"
elif test -e $sys_loop_file; then
sys_file=$sys_loop_file
idtype="loop_file"
else
echo "no id type for device"
skip
fi
idname=$(< $sys_file)
rm -f idline
grep IDNAME=$idname $DF | tee idline
grep IDTYPE=$idtype idline
grep DEVNAME=$dev idline
grep PVID=$pvid idline
done
cp $DF df2
# vgcreate from existing pvs, already in df
vgcreate $vg ${REAL_DEVICES[@]}
vgremove $vg
rm $DF
# vgcreate from existing pvs, adding to df
vgcreate $vg ${REAL_DEVICES[@]}
grep IDNAME $DF > df.ids
grep IDNAME df2 > df2.ids
diff df.ids df2.ids
# check device id metadata fields
for dev in "${REAL_DEVICES[@]}"; do
grep $dev $DF
deviceid=`pvs $dev --noheading -o deviceid | awk '{print $1}'`
deviceidtype=`pvs $dev --noheading -o deviceidtype | awk '{print $1}'`
grep $dev $DF | grep $deviceid
grep $dev $DF | grep $deviceidtype
lvcreate -l1 $vg $dev
done
vgchange -an $vg
vgremove -y $vg
# check pvremove leaves devs in df but without pvid
for dev in "${REAL_DEVICES[@]}"; do
maj=$(get pv_field "$dev" major)
min=$(get pv_field "$dev" minor)
pvid=`pvs $dev --noheading -o uuid | tr -d - | awk '{print $1}'`
pvremove $dev
grep $dev $DF
not grep $pvid $DF
done
# Many of remaining tests require two or three devices
test $num_devs -gt 2 || skip
# check vgextend adds new dev to df, vgreduce leaves dev in df
rm $DF
vgcreate $vg $dev1
vgextend $vg $dev2
grep $dev1 $DF
grep $dev2 $DF
id1=`pvs $dev1 --noheading -o deviceid | awk '{print $1}'`
id2=`pvs $dev2 --noheading -o deviceid | awk '{print $1}'`
grep $id1 $DF
grep $id2 $DF
vgreduce $vg $dev2
grep $dev2 $DF
vgremove $vg
# check devs are not visible to lvm until added to df
rm $DF
# df needs to exist otherwise devicesfile feature turned off
touch $DF
not pvs $dev1
not pvs $dev2
pvs -a |tee all
not grep $dev1 all
not grep $dev2 all
not grep $dev1 $DF
not grep $dev2 $DF
pvcreate $dev1
pvs $dev1
not pvs $dev2
pvs -a |tee all
grep $dev1 all
not grep $dev2 all
grep $dev1 $DF
not grep $dev2 $DF
pvcreate $dev2
pvs $dev1
pvs $dev2
pvs -a |tee all
grep $dev1 all
grep $dev2 all
grep $dev1 $DF
grep $dev2 $DF
vgcreate $vg $dev1
pvs $dev1
pvs $dev2
pvs -a |tee all
grep $dev1 all
grep $dev2 all
grep $dev1 $DF
grep $dev2 $DF
vgextend $vg $dev2
pvs $dev1
pvs $dev2
pvs -a |tee all
grep $dev1 all
grep $dev2 all
grep $dev1 $DF
grep $dev2 $DF
# check vgimportdevices VG
rm $DF
wipe_all
vgcreate $vg ${REAL_DEVICES[@]}
rm $DF
touch $DF
for dev in "${REAL_DEVICES[@]}"; do
not pvs $dev
done
vgimportdevices $vg
for dev in "${REAL_DEVICES[@]}"; do
pvs $dev
done
# check vgimportdevices -a
rm $DF
wipe_all
vgcreate $vg1 $dev1
vgcreate $vg2 $dev2
rm $DF
vgimportdevices -a
vgs $vg1
vgs $vg2
pvs $dev1
pvs $dev2
# check vgimportclone --importdevices
rm $DF
wipe_all
vgcreate $vg1 $dev1
vgimportdevices $vg1
dd if=$dev1 of=$dev2 bs=1M count=1
pvs $dev1
not pvs $dev2
grep $dev1 $DF
not grep $dev2 $DF
not vgimportclone $dev2
not grep $dev2 $DF
vgimportclone --basevgname $vg2 --importdevices $dev2
pvid1=`pvs $dev1 --noheading -o uuid | tr -d - | awk '{print $1}'`
pvid2=`pvs $dev2 --noheading -o uuid | tr -d - | awk '{print $1}'`
test "$pvid1" != "$pvid2" || die "same uuid"
id1=`pvs $dev1 --noheading -o deviceid | tr -d - | awk '{print $1}'`
id2=`pvs $dev2 --noheading -o deviceid | tr -d - | awk '{print $1}'`
test "$id1" != "$id2" || die "same device id"
grep $dev1 $DF
grep $dev2 $DF
grep $pvid1 $DF
grep $pvid2 $DF
grep $id1 $DF
grep $id2 $DF
vgs $vg1
vgs $vg2
#
# check lvmdevices
#
wipe_all
rm $DF
# set up pvs and save pvids/deviceids
count=0
for dev in "${REAL_DEVICES[@]}"; do
pvcreate $dev
vgcreate ${vg}_${count} $dev
pvid=`pvs $dev --noheading -o uuid | tr -d - | awk '{print $1}'`
did=`pvs $dev --noheading -o deviceid | awk '{print $1}'`
echo dev $dev pvid $pvid did $did
PVIDS[$count]=$pvid
DEVICEIDS[$count]=$did
count=$(( count + 1 ))
done
rm $DF
not lvmdevices
touch $DF
lvmdevices
# check lvmdevices --adddev
count=0
for dev in "${REAL_DEVICES[@]}"; do
pvid=${PVIDS[$count]}
did=${DEVICEIDS[$count]}
not pvs $dev
lvmdevices --adddev $dev
lvmdevices |tee out
grep $dev out |tee idline
grep $pvid idline
grep $did idline
grep $dev $DF
pvs $dev
count=$(( count + 1 ))
done
# check lvmdevices --deldev
count=0
for dev in "${REAL_DEVICES[@]}"; do
pvid=${PVIDS[$count]}
did=${DEVICEIDS[$count]}
pvs $dev
lvmdevices --deldev $dev
lvmdevices |tee out
not grep $dev out
not grep $pvid out
not grep $did out
not grep $dev $DF
not pvs $dev
count=$(( count + 1 ))
done
# check lvmdevices --addpvid
count=0
for dev in "${REAL_DEVICES[@]}"; do
pvid=${PVIDS[$count]}
did=${DEVICEIDS[$count]}
not pvs $dev
lvmdevices --addpvid $pvid
lvmdevices |tee out
grep $dev out |tee idline
grep $pvid idline
grep $did idline
grep $dev $DF
pvs $dev
count=$(( count + 1 ))
done
# check lvmdevices --delpvid
count=0
for dev in "${REAL_DEVICES[@]}"; do
pvid=${PVIDS[$count]}
did=${DEVICEIDS[$count]}
pvs $dev
lvmdevices --delpvid $pvid
lvmdevices |tee out
not grep $dev out
not grep $pvid out
not grep $did out
not grep $dev $DF
not pvs $dev
count=$(( count + 1 ))
done
# wrong pvid in df
rm $DF
pvid1=${PVIDS[0]}
pvid2=${PVIDS[1]}
did1=${DEVICEIDS[0]}
did2=${DEVICEIDS[1]}
lvmdevices --adddev $dev1
lvmdevices --adddev $dev2
# test bad pvid
cp $DF $DF.orig
rm $DF
sed "s/$pvid1/badpvid/" "$DF.orig" |tee $DF
not grep $pvid1 $DF
grep $did1 $DF
lvmdevices --check 2>&1|tee out
grep $dev1 out
grep badpvid out
grep $pvid1 out
not grep $dev2 out
lvmdevices |tee out
grep $dev1 out |tee out1
grep badpvid out1
not grep $pvid1 out1
grep $dev2 out
lvmdevices --update
lvmdevices 2>&1|tee out
grep $dev1 out
grep $dev2 out
not grep badpvid
grep $pvid1 out
grep $did1 out
grep $pvid1 $DF
grep $did1 $DF
# wrong deviceid in df
# the devicesfile logic and behavior is based on the idname being
# the primary identifier that we trust over everything else, i.e.
# we'll never assume that the deviceid is wrong and some other
# field is correct, and "fix" the deviceid. We always assume the
# deviceid correct and other values are wrong (since pvid and devname
# have known, common ways of becoming wrong, but the deviceid doesn't
# really have any known way of becoming wrong apart from random
# file corruption.)
# So, if the deviceid *is* corrupted, as we do here, then standard
# commands won't correct it. We need to use delpvid/addpvid explicitly
# to say that we are targetting the given pvid.
rm $DF
sed "s/$did1/baddid/" "$DF.orig" |tee $DF
lvmdevices --check 2>&1|tee out
grep $dev1 out
grep baddid out
not grep $dev2 out
lvmdevices 2>&1|tee out
grep $pvid1 out
grep $pvid2 out
grep baddid out
grep $did2 out
grep $dev2 out
lvmdevices --delpvid $pvid1
lvmdevices --addpvid $pvid1
lvmdevices |tee out
grep $dev1 out
grep $dev2 out
not grep baddid
grep $pvid1 out
grep $did1 out
grep $pvid1 $DF
grep $did1 $DF
# wrong devname in df, this is expected to become incorrect regularly
# given inconsistent dev names after reboot
rm $DF
d1=$(basename $dev1)
d3=$(basename $dev3)
sed "s/$d1/$d3/" "$DF.orig" |tee $DF
lvmdevices --check |tee out
grep $dev1 out
lvmdevices --update
lvmdevices |tee out
grep $dev1 out |tee out1
grep $pvid1 out1
grep $did1 out1
grep $dev2 out |tee out2
grep $pvid2 out2
grep $did2 out2
# swap devnames for two existing entries
rm $DF
d1=$(basename $dev1)
d2=$(basename $dev2)
sed "s/$d1/tmp/" "$DF.orig" |tee ${DF}_1
sed "s/$d2/$d1/" "${DF}_1" |tee ${DF}_2
sed "s/tmp/$d2/" "${DF}_2" |tee $DF
rm ${DF}_1 ${DF}_2
lvmdevices --check |tee out
grep $dev1 out
grep $dev2 out
lvmdevices --update
lvmdevices |tee out
grep $dev1 out |tee out1
grep $pvid1 out1
grep $did1 out1
grep $dev2 out |tee out2
grep $pvid2 out2
grep $did2 out2
# ordinary command is not confused by wrong devname and fixes
# the wrong devname in df
rm $DF
d1=$(basename $dev1)
d3=$(basename $dev3)
sed "s/$d1/$d3/" "$DF.orig" |tee $DF
lvmdevices --check |tee out
grep $dev1 out
pvs -o+uuid,deviceid | grep $vg |tee out
grep $dev1 out |tee out1
grep $dev2 out |tee out2
grep $did1 out1
grep $did2 out2
not grep $dev3 out
# same dev info reported after df is fixed
pvs -o+uuid,deviceid | grep $vg |tee out3
diff out out3
pvid=`pvs $dev1 --noheading -o uuid | tr -d - | awk '{print $1}'`
test "$pvid" == "$pvid1" || die "wrong uuid"
pvid=`pvs $dev2 --noheading -o uuid | tr -d - | awk '{print $1}'`
test "$pvid" == "$pvid2" || die "wrong uuid"
lvmdevices |tee out
grep $dev1 out |tee out1
grep $pvid1 out1
grep $did1 out1
grep $dev2 out |tee out2
grep $pvid2 out2
grep $did2 out2
# pvscan --cache doesn't fix wrong devname but still works correctly with
# the correct device
wipe_all
rm $DF

View File

@@ -96,19 +96,9 @@ lvcreate -n $lv1 -L20M $vg
lvcreate -n ${lv1}bar -L10M $vg
trap 'cleanup_mounted_and_teardown' EXIT
# prints help
fsadm
# check needs arg
not fsadm check
if check_missing ext2; then
mkfs.ext2 -b4096 -j "$dev_vg_lv"
# Check 'check' works
fsadm check $vg_lv
# Check 'resize' without size parameter works
fsadm resize $vg_lv
fsadm --lvresize resize $vg_lv 30M
# Fails - not enough space for 4M fs
not fsadm -y --lvresize resize "$dev_vg_lv" 4M

View File

@@ -1,212 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2018 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
SKIP_WITH_LVMPOLLD=1
. lib/inittest
aux have_integrity 1 5 0 || skip
which mkfs.xfs || skip
which xfs_growfs || skip
mnt="mnt"
mkdir -p $mnt
aux prepare_devs 3 40
# Use awk instead of anoyingly long log out from printf
#printf "%0.sA" {1..16384} >> fileA
awk 'BEGIN { while (z++ < 16384) printf "A" }' > fileA
awk 'BEGIN { while (z++ < 16384) printf "B" }' > fileB
awk 'BEGIN { while (z++ < 16384) printf "C" }' > fileC
_prepare_vg() {
# zero devs so we are sure to find the correct file data
# on the underlying devs when corrupting it
dd if=/dev/zero of="$dev1" bs=1M oflag=direct || true
dd if=/dev/zero of="$dev2" bs=1M oflag=direct || true
dd if=/dev/zero of="$dev3" bs=1M oflag=direct || true
vgcreate $SHARED $vg "$dev1" "$dev2" "$dev3"
pvs
}
_test1() {
mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv1"
mount "$DM_DEV_DIR/$vg/$lv1" $mnt
# we don't want fileA to be located too early in the fs,
# otherwise activating the LV will trigger the corruption
# to be found and corrected, leaving nothing for syncaction
# to find and correct.
dd if=/dev/urandom of=$mnt/rand16M bs=1M count=16
cp fileA $mnt
cp fileB $mnt
cp fileC $mnt
umount $mnt
lvchange -an $vg/$lv1
xxd "$dev1" > dev1.txt
# corrupt fileB
sed -e 's/4242 4242 4242 4242 4242 4242 4242 4242/4242 4242 4242 4242 4242 4242 4242 4243/' dev1.txt > dev1.bad
rm -f dev1.txt
xxd -r dev1.bad > "$dev1"
rm -f dev1.bad
lvchange -ay $vg/$lv1
lvs -o integritymismatches $vg/${lv1}_rimage_0 |tee mismatch
grep 0 mismatch
lvchange --syncaction check $vg/$lv1
_wait_recalc $vg/$lv1
lvs -o integritymismatches $vg/${lv1}_rimage_0 |tee mismatch
not grep 0 mismatch
mount "$DM_DEV_DIR/$vg/$lv1" $mnt
cmp -b $mnt/fileA fileA
cmp -b $mnt/fileB fileB
cmp -b $mnt/fileC fileC
umount $mnt
}
_test2() {
mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv1"
mount "$DM_DEV_DIR/$vg/$lv1" $mnt
# we don't want fileA to be located too early in the fs,
# otherwise activating the LV will trigger the corruption
# to be found and corrected, leaving nothing for syncaction
# to find and correct.
dd if=/dev/urandom of=$mnt/rand16M bs=1M count=16
cp fileA $mnt
cp fileB $mnt
cp fileC $mnt
umount $mnt
lvchange -an $vg/$lv1
# corrupt fileB and fileC on dev1
xxd "$dev1" > dev1.txt
sed -e 's/4242 4242 4242 4242 4242 4242 4242 4242/4242 4242 4242 4242 4242 4242 4242 4243/' dev1.txt > dev1.bad
sed -e 's/4343 4343 4343 4343 4343 4343 4343 4343/4444 4444 4444 4444 4444 4444 4444 4444/' dev1.txt > dev1.bad
rm -f dev1.txt
xxd -r dev1.bad > "$dev1"
rm -f dev1.bad
# corrupt fileA on dev2
xxd "$dev2" > dev2.txt
sed -e 's/4141 4141 4141 4141 4141 4141 4141 4141/4141 4141 4141 4141 4141 4141 4145 4141/' dev2.txt > dev2.bad
rm -f dev2.txt
xxd -r dev2.bad > "$dev2"
rm -f dev2.bad
lvchange -ay $vg/$lv1
lvs -o integritymismatches $vg/${lv1}_rimage_0 |tee mismatch
grep 0 mismatch
lvs -o integritymismatches $vg/${lv1}_rimage_1 |tee mismatch
grep 0 mismatch
lvchange --syncaction check $vg/$lv1
_wait_recalc $vg/$lv1
lvs -o integritymismatches $vg/${lv1}_rimage_0 |tee mismatch
not grep 0 mismatch
lvs -o integritymismatches $vg/${lv1}_rimage_1 |tee mismatch
not grep 0 mismatch
mount "$DM_DEV_DIR/$vg/$lv1" $mnt
cmp -b $mnt/fileA fileA
cmp -b $mnt/fileB fileB
cmp -b $mnt/fileC fileC
umount $mnt
}
_sync_percent() {
local checklv=$1
get lv_field "$checklv" sync_percent | cut -d. -f1
}
_wait_recalc() {
local checklv=$1
for i in $(seq 1 10) ; do
sync=$(_sync_percent "$checklv")
echo "sync_percent is $sync"
if test "$sync" = "100"; then
return
fi
sleep 1
done
# TODO: There is some strange bug, first leg of RAID with integrity
# enabled never gets in sync. I saw this in BB, but not when executing
# the commands manually
if test -z "$sync"; then
echo "TEST\ WARNING: Resync of dm-integrity device '$checklv' failed"
dmsetup status "$DM_DEV_DIR/mapper/${checklv/\//-}"
exit
fi
echo "timeout waiting for recalc"
return 1
}
_prepare_vg
lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 6 $vg "$dev1" "$dev2"
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/$lv1
_test1
lvs -o integritymismatches $vg/$lv1 |tee mismatch
not grep 0 mismatch
lvchange -an $vg/$lv1
lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
vgremove -ff $vg
_prepare_vg
lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 6 $vg "$dev1" "$dev2"
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/$lv1
_test2
lvs -o integritymismatches $vg/$lv1 |tee mismatch
not grep 0 mismatch
lvchange -an $vg/$lv1
lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
vgremove -ff $vg
_prepare_vg
lvcreate --type raid5 --raidintegrity y -n $lv1 -l 6 $vg "$dev1" "$dev2" "$dev3"
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/${lv1}_rimage_2
_wait_recalc $vg/$lv1
_test1
lvs -o integritymismatches $vg/$lv1 |tee mismatch
not grep 0 mismatch
lvchange -an $vg/$lv1
lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
vgremove -ff $vg

View File

@@ -46,14 +46,12 @@ _prepare_vg() {
pvs
}
_test_fs_with_read_repair() {
_test_fs_with_error() {
mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv1"
mount "$DM_DEV_DIR/$vg/$lv1" $mnt
cp randA $mnt
cp randB $mnt
cp randC $mnt
# add original data
cp fileA $mnt
cp fileB $mnt
cp fileC $mnt
@@ -61,21 +59,137 @@ _test_fs_with_read_repair() {
umount $mnt
lvchange -an $vg/$lv1
for dev in "$@"; do
xxd "$dev" > dev.txt
# corrupt fileB
sed -e 's/4242 4242 4242 4242 4242 4242 4242 4242/4242 4242 4242 4242 4242 4242 4242 4243/' dev.txt > dev.bad
rm -f dev.txt
xxd -r dev.bad > "$dev"
rm -f dev.bad
done
# corrupt the original data on the underying dev
# flip one bit in fileB, changing a 0x42 to 0x43
# the bit is changed in the last 4096 byte block
# of the file, so when reading back the file we
# will get the first three 4096 byte blocks, for
# a total of 12288 bytes before getting an error
# on the last 4096 byte block.
xxd "$dev1" > dev1.txt
tac dev1.txt > dev1.rev
rm -f dev1.txt
sed -e '0,/4242 4242 4242 4242 4242 4242 4242 4242/ s/4242 4242 4242 4242 4242 4242 4242 4242/4242 4242 4242 4242 4242 4242 4242 4243/' dev1.rev > dev1.rev.bad
rm -f dev1.rev
tac dev1.rev.bad > dev1.bad
rm -f dev1.rev.bad
xxd -r dev1.bad > "$dev1"
rm -f dev1.bad
lvchange -ay $vg/$lv1
mount "$DM_DEV_DIR/$vg/$lv1" $mnt
# read complete fileA which was not corrupted
dd if=$mnt/fileA of=tmp bs=1k
ls -l tmp
stat -c %s tmp
cmp -b fileA tmp
rm tmp
# read partial fileB which was corrupted
not dd if=$mnt/fileB of=tmp bs=1k
ls -l tmp
stat -c %s tmp | grep 12288
not cmp -b fileB tmp
rm tmp
umount $mnt
}
_test_fs_with_read_repair() {
mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv1"
mount "$DM_DEV_DIR/$vg/$lv1" $mnt
# add original data
cp fileA $mnt
cp fileB $mnt
cp fileC $mnt
umount $mnt
lvchange -an $vg/$lv1
# FIXME: this is only finding/corrupting the bit with raid1
# other raid levels may require looking at a different dev.
# (Attempt this xxd/tac/sed/xxd on each dev in the LV?)
xxd "$dev1" > dev1.txt
tac dev1.txt > dev1.rev
rm -f dev1.txt
sed -e '0,/4242 4242 4242 4242 4242 4242 4242 4242/ s/4242 4242 4242 4242 4242 4242 4242 4242/4242 4242 4242 4242 4242 4242 4242 4243/' dev1.rev > dev1.rev.bad
rm -f dev1.rev
tac dev1.rev.bad > dev1.bad
rm -f dev1.rev.bad
xxd -r dev1.bad > "$dev1"
rm -f dev1.bad
lvchange -ay $vg/$lv1
mount "$DM_DEV_DIR/$vg/$lv1" $mnt
# read complete fileA which was not corrupted
dd if=$mnt/fileA of=tmp bs=1k
ls -l tmp
stat -c %s tmp | grep 16384
cmp -b fileA tmp
rm tmp
# read complete fileB, corruption is corrected by raid
dd if=$mnt/fileB of=tmp bs=1k
ls -l tmp
stat -c %s tmp | grep 16384
cmp -b fileB tmp
rm tmp
umount $mnt
}
_test_fs_with_syncaction_check() {
mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv1"
mount "$DM_DEV_DIR/$vg/$lv1" $mnt
# add original data
cp fileA $mnt
cp fileB $mnt
cp fileC $mnt
umount $mnt
lvchange -an $vg/$lv1
# FIXME: this is only finding/corrupting the bit with raid1
# other raid levels may require looking at a different dev.
# (Attempt this xxd/tac/sed/xxd on each dev in the LV?)
xxd "$dev1" > dev1.txt
tac dev1.txt > dev1.rev
rm -f dev1.txt
sed -e '0,/4242 4242 4242 4242 4242 4242 4242 4242/ s/4242 4242 4242 4242 4242 4242 4242 4242/4242 4242 4242 4242 4242 4242 4242 4243/' dev1.rev > dev1.rev.bad
rm -f dev1.rev
tac dev1.rev.bad > dev1.bad
rm -f dev1.rev.bad
xxd -r dev1.bad > "$dev1"
rm -f dev1.bad
lvchange -ay $vg/$lv1
lvchange --syncaction check $vg/$lv1
mount "$DM_DEV_DIR/$vg/$lv1" $mnt
cmp -b $mnt/fileA fileA
cmp -b $mnt/fileB fileB
cmp -b $mnt/fileC fileC
# read complete fileA which was not corrupted
dd if=$mnt/fileA of=tmp bs=1k
ls -l tmp
stat -c %s tmp | grep 16384
cmp -b fileA tmp
rm tmp
# read complete fileB
dd if=$mnt/fileB of=tmp bs=1k
ls -l tmp
stat -c %s tmp | grep 16384
cmp -b fileB tmp
rm tmp
umount $mnt
}
@@ -168,105 +282,170 @@ _wait_recalc() {
# it is detected by integrity and corrected by raid.
_prepare_vg
lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg "$dev1" "$dev2"
lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/$lv1
_test_fs_with_read_repair "$dev1"
lvs -o integritymismatches $vg/${lv1}_rimage_0 |tee mismatch
not grep 0 mismatch
lvs -o integritymismatches $vg/$lv1 |tee mismatch
not grep 0 mismatch
_test_fs_with_read_repair
lvs -o integritymismatches $vg/${lv1}_rimage_0
lvs -o integritymismatches $vg/${lv1}_rimage_1
lvchange -an $vg/$lv1
lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
vgremove -ff $vg
_prepare_vg
lvcreate --type raid1 -m2 --raidintegrity y -n $lv1 -l 8 $vg "$dev1" "$dev2" "$dev3"
lvcreate --type raid1 -m2 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/${lv1}_rimage_2
_wait_recalc $vg/$lv1
_test_fs_with_read_repair "$dev1" "$dev2"
lvs -o integritymismatches $vg/${lv1}_rimage_0 |tee mismatch
not grep 0 mismatch
lvs -o integritymismatches $vg/$lv1 |tee mismatch
not grep 0 mismatch
lvchange -an $vg/$lv1
lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
vgremove -ff $vg
_prepare_vg
lvcreate --type raid4 --raidintegrity y -n $lv1 -l 8 $vg "$dev1" "$dev2" "$dev3"
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/${lv1}_rimage_2
_wait_recalc $vg/$lv1
_test_fs_with_read_repair "$dev1" "$dev2" "$dev3"
_test_fs_with_read_repair
lvs -o integritymismatches $vg/${lv1}_rimage_0
lvs -o integritymismatches $vg/${lv1}_rimage_1
lvs -o integritymismatches $vg/${lv1}_rimage_2
lvs -o integritymismatches $vg/$lv1 |tee mismatch
not grep 0 mismatch
lvchange -an $vg/$lv1
lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
vgremove -ff $vg
_prepare_vg
lvcreate --type raid5 --raidintegrity y -n $lv1 -l 8 $vg "$dev1" "$dev2" "$dev3"
lvcreate --type raid4 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/${lv1}_rimage_2
_wait_recalc $vg/$lv1
_test_fs_with_read_repair "$dev1" "$dev2" "$dev3"
_test_fs_with_read_repair
lvs -o integritymismatches $vg/${lv1}_rimage_0
lvs -o integritymismatches $vg/${lv1}_rimage_1
lvs -o integritymismatches $vg/${lv1}_rimage_2
lvs -o integritymismatches $vg/$lv1 |tee mismatch
not grep 0 mismatch
lvchange -an $vg/$lv1
lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
vgremove -ff $vg
_prepare_vg
lvcreate --type raid6 --raidintegrity y -n $lv1 -l 8 $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5"
lvcreate --type raid5 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/${lv1}_rimage_2
_test_fs_with_read_repair
lvs -o integritymismatches $vg/${lv1}_rimage_0
lvs -o integritymismatches $vg/${lv1}_rimage_1
lvs -o integritymismatches $vg/${lv1}_rimage_2
lvchange -an $vg/$lv1
lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
vgremove -ff $vg
_prepare_vg
lvcreate --type raid6 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/${lv1}_rimage_2
_wait_recalc $vg/${lv1}_rimage_3
_wait_recalc $vg/${lv1}_rimage_4
_wait_recalc $vg/$lv1
_test_fs_with_read_repair "$dev1" "$dev2" "$dev3" "$dev4" "$dev5"
_test_fs_with_read_repair
lvs -o integritymismatches $vg/${lv1}_rimage_0
lvs -o integritymismatches $vg/${lv1}_rimage_1
lvs -o integritymismatches $vg/${lv1}_rimage_2
lvs -o integritymismatches $vg/${lv1}_rimage_3
lvs -o integritymismatches $vg/${lv1}_rimage_4
lvs -o integritymismatches $vg/$lv1 |tee mismatch
not grep 0 mismatch
lvchange -an $vg/$lv1
lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
vgremove -ff $vg
_prepare_vg
lvcreate --type raid10 --raidintegrity y -n $lv1 -l 8 $vg "$dev1" "$dev2" "$dev3" "$dev4"
lvcreate --type raid10 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/${lv1}_rimage_2
_wait_recalc $vg/${lv1}_rimage_3
_wait_recalc $vg/$lv1
_test_fs_with_read_repair "$dev1" "$dev3"
_test_fs_with_read_repair
lvs -o integritymismatches $vg/${lv1}_rimage_0
lvs -o integritymismatches $vg/${lv1}_rimage_1
lvs -o integritymismatches $vg/${lv1}_rimage_2
lvs -o integritymismatches $vg/${lv1}_rimage_3
lvchange -an $vg/$lv1
lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
vgremove -ff $vg
# Test corrupting data on an image and verifying that
# it is detected and corrected using syncaction check
_prepare_vg
lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_test_fs_with_syncaction_check
lvs -o integritymismatches $vg/${lv1}_rimage_0
lvs -o integritymismatches $vg/${lv1}_rimage_1
check lv_field $vg/${lv1}_rimage_0 integritymismatches "1"
check lv_field $vg/${lv1}_rimage_1 integritymismatches "0"
lvchange -an $vg/$lv1
lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
vgremove -ff $vg
_prepare_vg
lvcreate --type raid4 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/${lv1}_rimage_2
_test_fs_with_syncaction_check
lvs -o integritymismatches $vg/${lv1}_rimage_0
lvs -o integritymismatches $vg/${lv1}_rimage_1
lvs -o integritymismatches $vg/${lv1}_rimage_2
check lv_field $vg/${lv1}_rimage_0 integritymismatches "2"
check lv_field $vg/${lv1}_rimage_1 integritymismatches "0"
check lv_field $vg/${lv1}_rimage_2 integritymismatches "0"
lvchange -an $vg/$lv1
lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
vgremove -ff $vg
_prepare_vg
lvcreate --type raid5 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/${lv1}_rimage_2
_test_fs_with_syncaction_check
lvs -o integritymismatches $vg/${lv1}_rimage_0
lvs -o integritymismatches $vg/${lv1}_rimage_1
lvs -o integritymismatches $vg/${lv1}_rimage_2
lvchange -an $vg/$lv1
lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
vgremove -ff $vg
_prepare_vg
lvcreate --type raid6 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/${lv1}_rimage_2
_wait_recalc $vg/${lv1}_rimage_3
_wait_recalc $vg/${lv1}_rimage_4
_test_fs_with_syncaction_check
lvs -o integritymismatches $vg/${lv1}_rimage_0
lvs -o integritymismatches $vg/${lv1}_rimage_1
lvs -o integritymismatches $vg/${lv1}_rimage_2
lvs -o integritymismatches $vg/${lv1}_rimage_3
lvs -o integritymismatches $vg/${lv1}_rimage_4
lvchange -an $vg/$lv1
lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
vgremove -ff $vg
_prepare_vg
lvcreate --type raid10 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/${lv1}_rimage_2
_wait_recalc $vg/${lv1}_rimage_3
_test_fs_with_syncaction_check
lvs -o integritymismatches $vg/${lv1}_rimage_0
lvs -o integritymismatches $vg/${lv1}_rimage_1
lvs -o integritymismatches $vg/${lv1}_rimage_2
lvs -o integritymismatches $vg/${lv1}_rimage_3
lvs -o integritymismatches $vg/$lv1 |tee mismatch
not grep 0 mismatch
lvchange -an $vg/$lv1
lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
@@ -278,7 +457,6 @@ _prepare_vg
lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/$lv1
_add_new_data_to_mnt
lvconvert --raidintegrity n $vg/$lv1
_add_more_data_to_mnt
@@ -293,8 +471,6 @@ _prepare_vg
lvcreate --type raid4 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/${lv1}_rimage_2
_wait_recalc $vg/$lv1
_add_new_data_to_mnt
lvconvert --raidintegrity n $vg/$lv1
_add_more_data_to_mnt
@@ -309,8 +485,6 @@ _prepare_vg
lvcreate --type raid5 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/${lv1}_rimage_2
_wait_recalc $vg/$lv1
_add_new_data_to_mnt
lvconvert --raidintegrity n $vg/$lv1
_add_more_data_to_mnt
@@ -325,10 +499,6 @@ _prepare_vg
lvcreate --type raid6 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/${lv1}_rimage_2
_wait_recalc $vg/${lv1}_rimage_3
_wait_recalc $vg/${lv1}_rimage_4
_wait_recalc $vg/$lv1
_add_new_data_to_mnt
lvconvert --raidintegrity n $vg/$lv1
_add_more_data_to_mnt
@@ -343,7 +513,6 @@ _prepare_vg
lvcreate --type raid10 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/$lv1
_add_new_data_to_mnt
lvconvert --raidintegrity n $vg/$lv1
_add_more_data_to_mnt
@@ -358,7 +527,6 @@ vgremove -ff $vg
_prepare_vg
lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg
_wait_recalc $vg/$lv1
_add_new_data_to_mnt
lvconvert --raidintegrity y $vg/$lv1
_wait_recalc $vg/${lv1}_rimage_0
@@ -373,7 +541,6 @@ vgremove -ff $vg
_prepare_vg
lvcreate --type raid4 -n $lv1 -l 8 $vg
_wait_recalc $vg/$lv1
_add_new_data_to_mnt
lvconvert --raidintegrity y $vg/$lv1
_wait_recalc $vg/${lv1}_rimage_0
@@ -388,7 +555,6 @@ vgremove -ff $vg
_prepare_vg
lvcreate --type raid5 -n $lv1 -l 8 $vg
_wait_recalc $vg/$lv1
_add_new_data_to_mnt
lvconvert --raidintegrity y $vg/$lv1
_wait_recalc $vg/${lv1}_rimage_0
@@ -403,12 +569,6 @@ vgremove -ff $vg
_prepare_vg
lvcreate --type raid6 -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/${lv1}_rimage_2
_wait_recalc $vg/${lv1}_rimage_3
_wait_recalc $vg/${lv1}_rimage_4
_wait_recalc $vg/$lv1
_add_new_data_to_mnt
lvconvert --raidintegrity y $vg/$lv1
_wait_recalc $vg/${lv1}_rimage_0
@@ -423,7 +583,6 @@ vgremove -ff $vg
_prepare_vg
lvcreate --type raid10 -n $lv1 -l 8 $vg
_wait_recalc $vg/$lv1
_add_new_data_to_mnt
lvconvert --raidintegrity y $vg/$lv1
_wait_recalc $vg/${lv1}_rimage_0
@@ -442,7 +601,6 @@ _prepare_vg
lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/$lv1
lvs -a -o+devices $vg
_add_new_data_to_mnt
umount $mnt
@@ -466,10 +624,6 @@ _prepare_vg
lvcreate --type raid6 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/${lv1}_rimage_2
_wait_recalc $vg/${lv1}_rimage_3
_wait_recalc $vg/${lv1}_rimage_4
_wait_recalc $vg/$lv1
lvs -a -o+devices $vg
_add_new_data_to_mnt
umount $mnt
@@ -495,7 +649,6 @@ _prepare_vg
lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/$lv1
lvs -a -o+devices $vg
_add_new_data_to_mnt
lvextend -l 16 $vg/$lv1
@@ -515,8 +668,6 @@ _prepare_vg
lvcreate --type raid5 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/${lv1}_rimage_2
_wait_recalc $vg/$lv1
lvs -a -o+devices $vg
_add_new_data_to_mnt
lvextend -l 16 $vg/$lv1
@@ -536,7 +687,6 @@ _prepare_vg
lvcreate --type raid10 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/$lv1
lvs -a -o+devices $vg
_add_new_data_to_mnt
lvextend -l 16 $vg/$lv1
@@ -558,7 +708,6 @@ _prepare_vg
lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/$lv1
lvs -a -o+devices $vg
_add_new_data_to_mnt
lvconvert -y -m+1 $vg/$lv1
@@ -581,7 +730,6 @@ lvcreate --type raid1 -m2 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/${lv1}_rimage_2
_wait_recalc $vg/$lv1
lvs -a -o+devices $vg
_add_new_data_to_mnt
lvconvert -y -m-1 $vg/$lv1
@@ -600,7 +748,6 @@ _prepare_vg
lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/$lv1
lvs -a -o+devices $vg
_add_new_data_to_mnt
not lvconvert -y -m-1 $vg/$lv1
@@ -621,36 +768,23 @@ vgremove -ff $vg
# Repeat many of the tests above using bitmap mode
_prepare_vg
lvcreate --type raid1 -m1 --raidintegrity y --raidintegritymode bitmap -n $lv1 -l 8 $vg "$dev1 "$dev2"
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/$lv1
_test_fs_with_read_repair "$dev1"
lvs -o integritymismatches $vg/${lv1}_rimage_0 |tee mismatch
not grep 0 mismatch
lvs -o integritymismatches $vg/$lv1 |tee mismatch
not grep 0 mismatch
lvcreate --type raid1 -m1 --raidintegrity y --raidintegritymode bitmap -n $lv1 -l 8 $vg
_test_fs_with_read_repair
lvs -o integritymismatches $vg/${lv1}_rimage_0
lvs -o integritymismatches $vg/${lv1}_rimage_1
lvchange -an $vg/$lv1
lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
vgremove -ff $vg
_prepare_vg
lvcreate --type raid6 --raidintegrity y --raidintegritymode bitmap -n $lv1 -l 8 $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5"
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/${lv1}_rimage_2
_wait_recalc $vg/${lv1}_rimage_3
_wait_recalc $vg/${lv1}_rimage_4
_wait_recalc $vg/$lv1
_test_fs_with_read_repair "$dev1" "$dev2" "$dev3" "$dev4" "$dev5"
lvcreate --type raid6 --raidintegrity y --raidintegritymode bitmap -n $lv1 -l 8 $vg
_test_fs_with_read_repair
lvs -o integritymismatches $vg/${lv1}_rimage_0
lvs -o integritymismatches $vg/${lv1}_rimage_1
lvs -o integritymismatches $vg/${lv1}_rimage_2
lvs -o integritymismatches $vg/${lv1}_rimage_3
lvs -o integritymismatches $vg/${lv1}_rimage_4
lvs -o integritymismatches $vg/$lv1 |tee mismatch
not grep 0 mismatch
lvchange -an $vg/$lv1
lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
@@ -658,7 +792,7 @@ vgremove -ff $vg
# remove from active lv
_prepare_vg
lvcreate --type raid1 -m1 --raidintegrity y --raidintegritymode bitmap -n $lv1 -l 8 $vg "$dev1" "$dev2"
lvcreate --type raid1 -m1 --raidintegrity y --raidintegritymode bitmap -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_add_new_data_to_mnt

View File

@@ -22,7 +22,7 @@ which mkfs.ext4 || skip
aux have_raid 1 13 2 || skip
case "$(uname -r)" in
5.[891]*|3.10.0-862*) die "Cannot run this test on unfixed kernel." ;;
5.[89]*|3.10.0-862*) die "Cannot run this test on unfixed kernel." ;;
esac
mount_dir="mnt"

View File

@@ -128,7 +128,7 @@ lvcreate -L1T -n $lv1 $vg
lvcreate -L32G -n $lv2 $vg
# Warning about bigger then needed
lvconvert --yes --thinpool $vg/$lv1 --poolmetadata $vg/$lv2 2>&1 | tee err
grep -i "maximum" err
grep "WARNING: Maximum" err
lvremove -f $vg

View File

@@ -27,6 +27,7 @@ aux prepare_vg 5 80000
aux lvmconf 'global/cache_disabled_features = [ "policy_smq" ]'
#######################
# Cache_Pool creation #
#######################
@@ -172,16 +173,17 @@ dmsetup table ${vg}-$lv1 | grep cache # ensure it is loaded in kernel
lvremove -f $vg
# Check minimum cache pool metadata size
lvcreate -l 1 --type cache-pool --poolmetadatasize 1 $vg 2>&1 | tee out
grep -i "minimal" out
# Check minimum cache pool metadata size
lvcreate -l 1 --type cache-pool --poolmetadatasize 1 $vg 2>out
grep "WARNING: Minimum" out
# FIXME: This test is failing in allocator with smaller VG sizes
lvcreate -l 1 --type cache-pool --poolmetadatasize 17G $vg 2>&1 | tee out
grep -i "maximum" out
lvcreate -l 1 --type cache-pool --poolmetadatasize 17G $vg 2>out
grep "WARNING: Maximum" out
lvremove -f $vg
########################################
# Cache conversion and r/w permissions #
########################################

View File

@@ -31,14 +31,14 @@ vgcreate $SHARED -s 64K "$vg" "${DEVICES[@]}"
# Size 0 is not valid
invalid lvcreate -L4M --chunksize 128 --poolmetadatasize 0 -T $vg/pool1 2>out
lvcreate -Zn -L4M --chunksize 128 --poolmetadatasize 16k -T $vg/pool1 2>&1 >out
grep -i "minimal" out
lvcreate -Zn -L4M --chunksize 128 --poolmetadatasize 16k -T $vg/pool1 2>out
grep "WARNING: Minimum" out
# FIXME: metadata allocation fails, if PV doesn't have at least 16GB
# i.e. pool metadata device cannot be multisegment
lvcreate -Zn -L4M --chunksize 64k --poolmetadatasize 17G -T $vg/pool2 2>&1 >out
grep "maximum" out
lvcreate -Zn -L4M --chunksize 64k --poolmetadatasize 17G -T $vg/pool2 2>out
grep "WARNING: Maximum" out
check lv_field $vg/pool1_tmeta size "2.00m"
check lv_field $vg/pool2_tmeta size "<15.88g"
check lv_field $vg/pool2_tmeta size "15.81g"
# Check we do report correct percent values.
lvcreate --type zero -L3G $vg -n pool3

View File

@@ -27,35 +27,13 @@ aux can_use_16T || skip
aux have_thin 1 0 0 || skip
which mkfs.ext4 || skip
# 16T device
aux prepare_pvs 2 8388608
aux prepare_pvs 1 16777216
get_devs
# gives 16777215M device
vgcreate $SHARED -s 4M "$vg" "${DEVICES[@]}"
vgcreate $SHARED -s 4K "$vg" "${DEVICES[@]}"
# For 1st. pass only single PV
lvcreate -l100%PV --name $lv1 $vg "$dev2"
not lvcreate -T -L15.995T --poolmetadatasize 5G $vg/pool
for i in 1 0
do
SIZE=$(get vg_field "$vg" vg_free --units m)
SIZE=${SIZE%%\.*}
# ~16T - 2 * 5G + something -> should not fit
not lvcreate -Zn -T -L$(( SIZE - 2 * 5 * 1024 + 1 )) --poolmetadatasize 5G $vg/pool
check vg_field "$vg" lv_count "$i"
# Should fit data + metadata + pmspare
lvcreate -Zn -T -L$(( SIZE - 2 * 5 * 1024 )) --poolmetadatasize 5G $vg/pool
check vg_field "$vg" vg_free "0"
lvs -ao+seg_pe_ranges $vg
# Remove everything for 2nd. pass
lvremove -ff $vg
done
lvs -ao+seg_pe_ranges $vg
vgremove -ff $vg

View File

@@ -32,9 +32,22 @@ export MKE2FS_CONFIG="$TESTDIR/lib/mke2fs.conf"
aux prepare_vg 1 9000
lvcreate --vdo -L4G -V2G --name $lv1 $vg/vpool
lvcreate --vdo -L4G -V2G --name $lv1 $vg/vpool1
# Test caching VDOPoolLV
lvcreate -H -L10 $vg/vpool
lvcreate -H -L10 $vg/vpool1
# Current VDO target driver cannot handle online rename
# once this will be supported - update this test
not lvrename $vg/vpool1 $vg/vpool 2>&1 | tee out
grep "Cannot rename" out
lvchange -an $vg
# Ofline should work
lvrename $vg/vpool1 $vg/vpool
lvchange -ay $vg
mkfs.ext4 -E nodiscard "$DM_DEV_DIR/$vg/$lv1"

Some files were not shown because too many files have changed in this diff Show More