1
0
mirror of git://sourceware.org/git/lvm2.git synced 2026-01-28 12:32:45 +03:00

Compare commits

..

1 Commits

Author SHA1 Message Date
David Teigland
580039b686 cache: repair and writeback for cachevol
For a dm-cache LV with an attached cachevol using writeback,
'lvconvert --repair LV' will:

. detach the cachevol
. run cache_repair from the cachevol to a temp LV (or file)
. run cache_writeback to copy blocks from the cachevol
  back to the original LV, using the repaired metadata
2021-04-19 16:52:29 -05:00
121 changed files with 1059 additions and 4894 deletions

View File

@@ -1 +1 @@
2.03.13(2)-git (2021-05-07)
2.03.12(2)-git (2021-01-08)

View File

@@ -1 +1 @@
1.02.179-git (2021-05-07)
1.02.177-git (2021-01-08)

View File

@@ -1,18 +1,9 @@
Version 2.03.13 -
===============================
Simplified handling of archive() and backup() internal calls.
Fix load of kvdo target when it is not present in memory (2.03.12).
Version 2.03.12 - 07th May 2021
===============================
Allow attaching cache to thin data volume.
Fix memleak when generating list of outdated pvs.
Version 2.03.12 -
===================================
Better hyphenation usage in man pages.
Replace use of deprecated security_context_t with char*.
Configure supports AIO_LIBS and AIO_CFLAGS.
Improve build process for static builds.
New --setautoactivation option to modify LV or VG auto activation.
New metadata based autoactivation property for LVs and VGs.
Improve signal handling with lvmpolld.
Signal handler can interrupt command also for SIGTERM.
Lvreduce --yes support.
@@ -28,7 +19,6 @@ Version 2.03.12 - 07th May 2021
Merge polling does not fail, when LV is found to be already merged.
Poll volumes with at least 100ms delays.
Do not flush dm cache when cached LV is going to be removed.
New lvmlockctl_kill_command configuration option.
Support interruption while waiting on device close before deactivation.
Flush thin-pool messages before removing more thin volumes.
Improve hash function with less collisions and make it faster.
@@ -42,7 +32,6 @@ Version 2.03.12 - 07th May 2021
Add devices file feature, off by default for now.
Support extension of writecached volumes.
Fix problem with unbound variable usage within fsadm.
Fix IMSM MD RAID detection on 4k devices.
Check for presence of VDO target before starting any conversion.
Support metatadata profiles with volume VDO pool conversions.
Support -Zn for conversion of already formated VDO pools.
@@ -59,11 +48,11 @@ Version 2.03.12 - 07th May 2021
Allocation prints better error when metadata cannot fit on a single PV.
Pvmove can better resolve full thin-pool tree move.
Limit pool metadata spare to 16GiB.
Improves conversion and allocation of pool metadata.
Improves convertsion and allocation of pool metadata.
Support thin pool metadata 15.88GiB, adds 64MiB, thin_pool_crop_metadata=0.
Enhance lvdisplay to report raid available/partial.
Enhance lvdisplay to report raid availiable/partial.
Support online rename of VDO pools.
Improve removal of pmspare when last pool is removed.
Imporove removal of pmspare when last pool is removed.
Fix problem with wiping of converted LVs.
Fix memleak in scanning (2.03.11).
Fix corner case allocation for thin-pools.
@@ -266,6 +255,7 @@ Version 2.03.00 - 10th October 2018
Remove clvmd
Remove lvmlib (api)
Remove lvmetad
lvconvert: provide possible layouts between linear and striped/raid
Use versionsort to fix archive file expiry beyond 100000 files.
Version 2.02.178-rc1 - 24th May 2018

View File

@@ -1,8 +1,5 @@
Version 1.02.179 -
================================
Version 1.02.177 - 07th May 2021
================================
Version 1.02.177 -
====================================
Configure proceeds without libaio to allow build of device-mapper only.
Fix symbol versioning build with -O2 -flto.
Add dm_tree_node_add_thin_pool_target_v1 with crop_metadata support.

View File

@@ -1084,14 +1084,13 @@ global {
# Configuration option global/event_activation.
# Activate LVs based on system-generated device events.
# When a PV appears on the system, a system-generated uevent triggers
# the lvm2-pvscan service which runs the pvscan --cache -aay command.
# If the new PV completes a VG, pvscan autoactivates LVs in the VG.
# When event_activation is disabled, the lvm2-activation services are
# generated and run at fixed points during system startup. These
# services run vgchange -aay to autoactivate LVs in VGs that happen
# to be present at that point in time.
# See the --setautoactivation option or the auto_activation_volume_list
# When a device appears on the system, a system-generated event runs
# the pvscan command to activate LVs if the new PV completes the VG.
# When event_activation is disabled, the system will generally run
# a direct activation command to activate LVs in complete VGs.
# Activation commands that are run by the system, either from events
# or at fixed points during startup, use autoactivation (-aay). See
# the --setautoactivation option or the auto_activation_volume_list
# setting to configure autoactivation for specific VGs or LVs.
# This configuration option has an automatic default value.
# event_activation = 1

173
configure vendored
View File

@@ -747,7 +747,6 @@ BUILD_DMFILEMAPD
BUILD_LOCKDDLM_CONTROL
BUILD_LOCKDDLM
BUILD_LOCKDSANLOCK
BUILD_LOCKDIDM
BUILD_LVMLOCKD
BUILD_LVMPOLLD
BUILD_LVMDBUSD
@@ -783,8 +782,6 @@ LOCKD_DLM_LIBS
LOCKD_DLM_CFLAGS
LOCKD_SANLOCK_LIBS
LOCKD_SANLOCK_CFLAGS
LOCKD_IDM_LIBS
LOCKD_IDM_CFLAGS
VALGRIND_LIBS
VALGRIND_CFLAGS
GENPNG
@@ -949,7 +946,6 @@ enable_lvmpolld
enable_lvmlockd_sanlock
enable_lvmlockd_dlm
enable_lvmlockd_dlmcontrol
enable_lvmlockd_idm
enable_use_lvmlockd
with_lvmlockd_pidfile
enable_use_lvmpolld
@@ -1023,8 +1019,6 @@ LOCKD_DLM_CFLAGS
LOCKD_DLM_LIBS
LOCKD_DLM_CONTROL_CFLAGS
LOCKD_DLM_CONTROL_LIBS
LOCKD_IDM_CFLAGS
LOCKD_IDM_LIBS
NOTIFY_DBUS_CFLAGS
NOTIFY_DBUS_LIBS
BLKID_CFLAGS
@@ -1684,7 +1678,6 @@ Optional Features:
--enable-lvmlockd-dlm enable the LVM lock daemon using dlm
--enable-lvmlockd-dlmcontrol
enable lvmlockd remote refresh using libdlmcontrol
--enable-lvmlockd-idm enable the LVM lock daemon using idm
--disable-use-lvmlockd disable usage of LVM lock daemon
--disable-use-lvmpolld disable usage of LVM Poll Daemon
--enable-dmfilemapd enable the dmstats filemap daemon
@@ -1839,10 +1832,6 @@ Some influential environment variables:
C compiler flags for LOCKD_DLM_CONTROL, overriding pkg-config
LOCKD_DLM_CONTROL_LIBS
linker flags for LOCKD_DLM_CONTROL, overriding pkg-config
LOCKD_IDM_CFLAGS
C compiler flags for LOCKD_IDM, overriding pkg-config
LOCKD_IDM_LIBS
linker flags for LOCKD_IDM, overriding pkg-config
NOTIFY_DBUS_CFLAGS
C compiler flags for NOTIFY_DBUS, overriding pkg-config
NOTIFY_DBUS_LIBS
@@ -3135,7 +3124,6 @@ case "$host_os" in
LOCKDSANLOCK=no
LOCKDDLM=no
LOCKDDLM_CONTROL=no
LOCKDIDM=no
ODIRECT=yes
DM_IOCTLS=yes
SELINUX=yes
@@ -11203,167 +11191,6 @@ $as_echo "#define LOCKDDLM_CONTROL_SUPPORT 1" >>confdefs.h
BUILD_LVMLOCKD=yes
fi
################################################################################
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build lvmlockdidm" >&5
$as_echo_n "checking whether to build lvmlockdidm... " >&6; }
# Check whether --enable-lvmlockd-idm was given.
if test "${enable_lvmlockd_idm+set}" = set; then :
enableval=$enable_lvmlockd_idm; LOCKDIDM=$enableval
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $LOCKDIDM" >&5
$as_echo "$LOCKDIDM" >&6; }
BUILD_LOCKDIDM=$LOCKDIDM
if test "$BUILD_LOCKDIDM" = yes; then
pkg_failed=no
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for LOCKD_IDM" >&5
$as_echo_n "checking for LOCKD_IDM... " >&6; }
if test -n "$LOCKD_IDM_CFLAGS"; then
pkg_cv_LOCKD_IDM_CFLAGS="$LOCKD_IDM_CFLAGS"
elif test -n "$PKG_CONFIG"; then
if test -n "$PKG_CONFIG" && \
{ { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libseagate_ilm >= 0.1.0\""; } >&5
($PKG_CONFIG --exists --print-errors "libseagate_ilm >= 0.1.0") 2>&5
ac_status=$?
$as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
test $ac_status = 0; }; then
pkg_cv_LOCKD_IDM_CFLAGS=`$PKG_CONFIG --cflags "libseagate_ilm >= 0.1.0" 2>/dev/null`
test "x$?" != "x0" && pkg_failed=yes
else
pkg_failed=yes
fi
else
pkg_failed=untried
fi
if test -n "$LOCKD_IDM_LIBS"; then
pkg_cv_LOCKD_IDM_LIBS="$LOCKD_IDM_LIBS"
elif test -n "$PKG_CONFIG"; then
if test -n "$PKG_CONFIG" && \
{ { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libseagate_ilm >= 0.1.0\""; } >&5
($PKG_CONFIG --exists --print-errors "libseagate_ilm >= 0.1.0") 2>&5
ac_status=$?
$as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
test $ac_status = 0; }; then
pkg_cv_LOCKD_IDM_LIBS=`$PKG_CONFIG --libs "libseagate_ilm >= 0.1.0" 2>/dev/null`
test "x$?" != "x0" && pkg_failed=yes
else
pkg_failed=yes
fi
else
pkg_failed=untried
fi
if test $pkg_failed = yes; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
_pkg_short_errors_supported=yes
else
_pkg_short_errors_supported=no
fi
if test $_pkg_short_errors_supported = yes; then
LOCKD_IDM_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "libseagate_ilm >= 0.1.0" 2>&1`
else
LOCKD_IDM_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "libseagate_ilm >= 0.1.0" 2>&1`
fi
# Put the nasty error message in config.log where it belongs
echo "$LOCKD_IDM_PKG_ERRORS" >&5
$bailout
elif test $pkg_failed = untried; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
$bailout
else
LOCKD_IDM_CFLAGS=$pkg_cv_LOCKD_IDM_CFLAGS
LOCKD_IDM_LIBS=$pkg_cv_LOCKD_IDM_LIBS
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
fi
pkg_failed=no
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for BLKID" >&5
$as_echo_n "checking for BLKID... " >&6; }
if test -n "$BLKID_CFLAGS"; then
pkg_cv_BLKID_CFLAGS="$BLKID_CFLAGS"
elif test -n "$PKG_CONFIG"; then
if test -n "$PKG_CONFIG" && \
{ { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"blkid >= 2.24\""; } >&5
($PKG_CONFIG --exists --print-errors "blkid >= 2.24") 2>&5
ac_status=$?
$as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
test $ac_status = 0; }; then
pkg_cv_BLKID_CFLAGS=`$PKG_CONFIG --cflags "blkid >= 2.24" 2>/dev/null`
test "x$?" != "x0" && pkg_failed=yes
else
pkg_failed=yes
fi
else
pkg_failed=untried
fi
if test -n "$BLKID_LIBS"; then
pkg_cv_BLKID_LIBS="$BLKID_LIBS"
elif test -n "$PKG_CONFIG"; then
if test -n "$PKG_CONFIG" && \
{ { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"blkid >= 2.24\""; } >&5
($PKG_CONFIG --exists --print-errors "blkid >= 2.24") 2>&5
ac_status=$?
$as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
test $ac_status = 0; }; then
pkg_cv_BLKID_LIBS=`$PKG_CONFIG --libs "blkid >= 2.24" 2>/dev/null`
test "x$?" != "x0" && pkg_failed=yes
else
pkg_failed=yes
fi
else
pkg_failed=untried
fi
if test $pkg_failed = yes; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
_pkg_short_errors_supported=yes
else
_pkg_short_errors_supported=no
fi
if test $_pkg_short_errors_supported = yes; then
BLKID_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "blkid >= 2.24" 2>&1`
else
BLKID_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "blkid >= 2.24" 2>&1`
fi
# Put the nasty error message in config.log where it belongs
echo "$BLKID_PKG_ERRORS" >&5
$bailout
elif test $pkg_failed = untried; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
$bailout
else
BLKID_CFLAGS=$pkg_cv_BLKID_CFLAGS
BLKID_LIBS=$pkg_cv_BLKID_LIBS
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
HAVE_LOCKD_IDM=yes
fi
$as_echo "#define LOCKDIDM_SUPPORT 1" >>confdefs.h
BUILD_LVMLOCKD=yes
fi
################################################################################
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build lvmlockd" >&5
$as_echo_n "checking whether to build lvmlockd... " >&6; }

View File

@@ -41,7 +41,6 @@ case "$host_os" in
LOCKDSANLOCK=no
LOCKDDLM=no
LOCKDDLM_CONTROL=no
LOCKDIDM=no
ODIRECT=yes
DM_IOCTLS=yes
SELINUX=yes
@@ -990,25 +989,6 @@ if test "$BUILD_LOCKDDLM_CONTROL" = yes; then
BUILD_LVMLOCKD=yes
fi
################################################################################
dnl -- Build lvmlockdidm
AC_MSG_CHECKING(whether to build lvmlockdidm)
AC_ARG_ENABLE(lvmlockd-idm,
AC_HELP_STRING([--enable-lvmlockd-idm],
[enable the LVM lock daemon using idm]),
LOCKDIDM=$enableval)
AC_MSG_RESULT($LOCKDIDM)
BUILD_LOCKDIDM=$LOCKDIDM
dnl -- Look for Seagate IDM libraries
if test "$BUILD_LOCKDIDM" = yes; then
PKG_CHECK_MODULES(LOCKD_IDM, libseagate_ilm >= 0.1.0, [HAVE_LOCKD_IDM=yes], $bailout)
PKG_CHECK_MODULES(BLKID, blkid >= 2.24, [HAVE_LOCKD_IDM=yes], $bailout)
AC_DEFINE([LOCKDIDM_SUPPORT], 1, [Define to 1 to include code that uses lvmlockd IDM option.])
BUILD_LVMLOCKD=yes
fi
################################################################################
dnl -- Build lvmlockd
AC_MSG_CHECKING(whether to build lvmlockd)

View File

@@ -1742,8 +1742,7 @@ static void _init_thread_signals(void)
sigset_t my_sigset;
struct sigaction act = { .sa_handler = _sig_alarm };
if (sigaction(SIGALRM, &act, NULL))
log_sys_debug("sigaction", "SIGLARM");
sigaction(SIGALRM, &act, NULL);
sigfillset(&my_sigset);
/* These are used for exiting */

View File

@@ -30,11 +30,6 @@ ifeq ("@BUILD_LOCKDDLM@", "yes")
LOCK_LIBS += -ldlmcontrol
endif
ifeq ("@BUILD_LOCKDIDM@", "yes")
SOURCES += lvmlockd-idm.c
LOCK_LIBS += -lseagate_ilm -lblkid
endif
SOURCES2 = lvmlockctl.c
TARGETS = lvmlockd lvmlockctl

View File

@@ -987,22 +987,18 @@ static int read_options(int argc, char *argv[])
break;
case 'k':
kill_vg = 1;
free(arg_vg_name);
arg_vg_name = strdup(optarg);
break;
case 'r':
drop_vg = 1;
free(arg_vg_name);
arg_vg_name = strdup(optarg);
break;
case 'E':
gl_enable = 1;
free(arg_vg_name);
arg_vg_name = strdup(optarg);
break;
case 'D':
gl_disable = 1;
free(arg_vg_name);
arg_vg_name = strdup(optarg);
break;
case 'S':

View File

@@ -421,63 +421,6 @@ struct lockspace *alloc_lockspace(void)
return ls;
}
static char **alloc_pvs_path(struct pvs *pvs, int num)
{
if (!num)
return NULL;
pvs->path = malloc(sizeof(char *) * num);
if (!pvs->path)
return NULL;
memset(pvs->path, 0x0, sizeof(char *) * num);
return pvs->path;
}
static void free_pvs_path(struct pvs *pvs)
{
int i;
for (i = 0; i < pvs->num; i++) {
if (!pvs->path[i])
continue;
free((char *)pvs->path[i]);
pvs->path[i] = NULL;
}
if (!pvs->path) {
free(pvs->path);
pvs->path = NULL;
}
}
static char **alloc_and_copy_pvs_path(struct pvs *dst, struct pvs *src)
{
int i;
if (!alloc_pvs_path(dst, src->num))
return NULL;
dst->num = 0;
for (i = 0; i < src->num; i++) {
if (!src->path[i] || !strcmp(src->path[i], "none"))
continue;
dst->path[dst->num] = strdup(src->path[i]);
if (!dst->path[dst->num]) {
log_error("out of memory for copying pvs path");
goto failed;
}
dst->num++;
}
return dst->path;
failed:
free_pvs_path(dst);
return NULL;
}
static struct action *alloc_action(void)
{
struct action *act;
@@ -567,9 +510,6 @@ static void free_action(struct action *act)
free(act->path);
act->path = NULL;
}
free_pvs_path(&act->pvs);
pthread_mutex_lock(&unused_struct_mutex);
if (unused_action_count >= MAX_UNUSED_ACTION) {
free(act);
@@ -624,12 +564,9 @@ static int setup_structs(void)
struct lock *lk;
int data_san = lm_data_size_sanlock();
int data_dlm = lm_data_size_dlm();
int data_idm = lm_data_size_idm();
int i;
resource_lm_data_size = data_san > data_dlm ? data_san : data_dlm;
resource_lm_data_size = resource_lm_data_size > data_idm ?
resource_lm_data_size : data_idm;
pthread_mutex_init(&unused_struct_mutex, NULL);
INIT_LIST_HEAD(&unused_action);
@@ -746,8 +683,6 @@ static const char *lm_str(int x)
return "dlm";
case LD_LM_SANLOCK:
return "sanlock";
case LD_LM_IDM:
return "idm";
default:
return "lm_unknown";
}
@@ -1033,8 +968,6 @@ static int lm_prepare_lockspace(struct lockspace *ls, struct action *act)
rv = lm_prepare_lockspace_dlm(ls);
else if (ls->lm_type == LD_LM_SANLOCK)
rv = lm_prepare_lockspace_sanlock(ls);
else if (ls->lm_type == LD_LM_IDM)
rv = lm_prepare_lockspace_idm(ls);
else
return -1;
@@ -1051,8 +984,6 @@ static int lm_add_lockspace(struct lockspace *ls, struct action *act, int adopt)
rv = lm_add_lockspace_dlm(ls, adopt);
else if (ls->lm_type == LD_LM_SANLOCK)
rv = lm_add_lockspace_sanlock(ls, adopt);
else if (ls->lm_type == LD_LM_IDM)
rv = lm_add_lockspace_idm(ls, adopt);
else
return -1;
@@ -1069,8 +1000,6 @@ static int lm_rem_lockspace(struct lockspace *ls, struct action *act, int free_v
rv = lm_rem_lockspace_dlm(ls, free_vg);
else if (ls->lm_type == LD_LM_SANLOCK)
rv = lm_rem_lockspace_sanlock(ls, free_vg);
else if (ls->lm_type == LD_LM_IDM)
rv = lm_rem_lockspace_idm(ls, free_vg);
else
return -1;
@@ -1088,9 +1017,6 @@ static int lm_lock(struct lockspace *ls, struct resource *r, int mode, struct ac
rv = lm_lock_dlm(ls, r, mode, vb_out, adopt);
else if (ls->lm_type == LD_LM_SANLOCK)
rv = lm_lock_sanlock(ls, r, mode, vb_out, retry, adopt);
else if (ls->lm_type == LD_LM_IDM)
rv = lm_lock_idm(ls, r, mode, vb_out, act->lv_uuid,
&act->pvs, adopt);
else
return -1;
@@ -1108,8 +1034,6 @@ static int lm_convert(struct lockspace *ls, struct resource *r,
rv = lm_convert_dlm(ls, r, mode, r_version);
else if (ls->lm_type == LD_LM_SANLOCK)
rv = lm_convert_sanlock(ls, r, mode, r_version);
else if (ls->lm_type == LD_LM_IDM)
rv = lm_convert_idm(ls, r, mode, r_version);
else
return -1;
@@ -1127,8 +1051,6 @@ static int lm_unlock(struct lockspace *ls, struct resource *r, struct action *ac
rv = lm_unlock_dlm(ls, r, r_version, lmu_flags);
else if (ls->lm_type == LD_LM_SANLOCK)
rv = lm_unlock_sanlock(ls, r, r_version, lmu_flags);
else if (ls->lm_type == LD_LM_IDM)
rv = lm_unlock_idm(ls, r, r_version, lmu_flags);
else
return -1;
@@ -1143,8 +1065,6 @@ static int lm_hosts(struct lockspace *ls, int notify)
return lm_hosts_dlm(ls, notify);
else if (ls->lm_type == LD_LM_SANLOCK)
return lm_hosts_sanlock(ls, notify);
else if (ls->lm_type == LD_LM_IDM)
return lm_hosts_idm(ls, notify);
return -1;
}
@@ -1154,8 +1074,6 @@ static void lm_rem_resource(struct lockspace *ls, struct resource *r)
lm_rem_resource_dlm(ls, r);
else if (ls->lm_type == LD_LM_SANLOCK)
lm_rem_resource_sanlock(ls, r);
else if (ls->lm_type == LD_LM_IDM)
lm_rem_resource_idm(ls, r);
}
static int lm_find_free_lock(struct lockspace *ls, uint64_t *free_offset, int *sector_size, int *align_size)
@@ -1164,8 +1082,6 @@ static int lm_find_free_lock(struct lockspace *ls, uint64_t *free_offset, int *s
return 0;
else if (ls->lm_type == LD_LM_SANLOCK)
return lm_find_free_lock_sanlock(ls, free_offset, sector_size, align_size);
else if (ls->lm_type == LD_LM_IDM)
return 0;
return -1;
}
@@ -1774,8 +1690,8 @@ static int res_update(struct lockspace *ls, struct resource *r,
}
/*
* For DLM and IDM locking scheme, there is nothing to deallocate when freeing a
* LV, the LV will simply be unlocked by rem_resource.
* There is nothing to deallocate when freeing a dlm LV, the LV
* will simply be unlocked by rem_resource.
*/
static int free_lv(struct lockspace *ls, struct resource *r)
@@ -1784,8 +1700,6 @@ static int free_lv(struct lockspace *ls, struct resource *r)
return lm_free_lv_sanlock(ls, r);
else if (ls->lm_type == LD_LM_DLM)
return 0;
else if (ls->lm_type == LD_LM_IDM)
return 0;
else
return -EINVAL;
}
@@ -1886,7 +1800,9 @@ static void res_process(struct lockspace *ls, struct resource *r,
act->result = -EINVAL;
list_del(&act->list);
add_client_result(act);
} else if (act->op == LD_OP_LOCK && act->mode == LD_LK_UN) {
}
if (act->op == LD_OP_LOCK && act->mode == LD_LK_UN) {
rv = res_unlock(ls, r, act);
if (rv == -ENOENT && (act->flags & LD_AF_UNLOCK_CANCEL))
@@ -2844,8 +2760,6 @@ out_act:
ls->drop_vg = drop_vg;
if (ls->lm_type == LD_LM_DLM && !strcmp(ls->name, gl_lsname_dlm))
global_dlm_lockspace_exists = 0;
if (ls->lm_type == LD_LM_IDM && !strcmp(ls->name, gl_lsname_idm))
global_idm_lockspace_exists = 0;
/*
* Avoid a name collision of the same lockspace is added again before
@@ -2937,8 +2851,6 @@ static void gl_ls_name(char *ls_name)
memcpy(ls_name, gl_lsname_dlm, MAX_NAME);
else if (gl_use_sanlock)
memcpy(ls_name, gl_lsname_sanlock, MAX_NAME);
else if (gl_use_idm)
memcpy(ls_name, gl_lsname_idm, MAX_NAME);
else
memset(ls_name, 0, MAX_NAME);
}
@@ -2967,20 +2879,9 @@ static int add_lockspace_thread(const char *ls_name,
strncpy(ls->name, ls_name, MAX_NAME);
ls->lm_type = lm_type;
if (act) {
if (act)
ls->start_client_id = act->client_id;
/*
* Copy PV list to lockspact structure, so this is
* used for VG locking for idm scheme.
*/
if (lm_type == LD_LM_IDM &&
!alloc_and_copy_pvs_path(&ls->pvs, &act->pvs)) {
free(ls);
return -ENOMEM;
}
}
if (vg_uuid)
strncpy(ls->vg_uuid, vg_uuid, 64);
@@ -3007,18 +2908,6 @@ static int add_lockspace_thread(const char *ls_name,
pthread_mutex_lock(&lockspaces_mutex);
ls2 = find_lockspace_name(ls->name);
if (ls2) {
/*
* If find an existed lockspace, we need to update the PV list
* based on the latest information, and release for the old
* PV list in case it keeps stale information.
*/
free_pvs_path(&ls2->pvs);
if (lm_type == LD_LM_IDM &&
!alloc_and_copy_pvs_path(&ls2->pvs, &ls->pvs)) {
log_debug("add_lockspace_thread %s fails to allocate pvs", ls->name);
rv = -ENOMEM;
}
if (ls2->thread_stop) {
log_debug("add_lockspace_thread %s exists and stopping", ls->name);
rv = -EAGAIN;
@@ -3031,7 +2920,6 @@ static int add_lockspace_thread(const char *ls_name,
}
pthread_mutex_unlock(&lockspaces_mutex);
free_resource(r);
free_pvs_path(&ls->pvs);
free(ls);
return rv;
}
@@ -3045,8 +2933,6 @@ static int add_lockspace_thread(const char *ls_name,
if (ls->lm_type == LD_LM_DLM && !strcmp(ls->name, gl_lsname_dlm))
global_dlm_lockspace_exists = 1;
if (ls->lm_type == LD_LM_IDM && !strcmp(ls->name, gl_lsname_idm))
global_idm_lockspace_exists = 1;
list_add_tail(&ls->list, &lockspaces);
pthread_mutex_unlock(&lockspaces_mutex);
@@ -3057,7 +2943,6 @@ static int add_lockspace_thread(const char *ls_name,
list_del(&ls->list);
pthread_mutex_unlock(&lockspaces_mutex);
free_resource(r);
free_pvs_path(&ls->pvs);
free(ls);
return rv;
}
@@ -3066,15 +2951,16 @@ static int add_lockspace_thread(const char *ls_name,
}
/*
* There is no variant for sanlock because, with sanlock, the global
* lockspace is one of the vg lockspaces.
* There is no add_sanlock_global_lockspace or
* rem_sanlock_global_lockspace because with sanlock,
* the global lockspace is one of the vg lockspaces.
*/
static int add_global_lockspace(char *ls_name, int lm_type,
struct action *act)
static int add_dlm_global_lockspace(struct action *act)
{
int rv;
if (global_dlm_lockspace_exists || global_idm_lockspace_exists)
if (global_dlm_lockspace_exists)
return 0;
/*
@@ -3082,9 +2968,9 @@ static int add_global_lockspace(char *ls_name, int lm_type,
* lock request, insert an internal gl sh lock request?
*/
rv = add_lockspace_thread(ls_name, NULL, NULL, lm_type, NULL, act);
rv = add_lockspace_thread(gl_lsname_dlm, NULL, NULL, LD_LM_DLM, NULL, act);
if (rv < 0)
log_debug("add_global_lockspace add_lockspace_thread %d", rv);
log_debug("add_dlm_global_lockspace add_lockspace_thread %d", rv);
/*
* EAGAIN may be returned for a short period because
@@ -3097,12 +2983,12 @@ static int add_global_lockspace(char *ls_name, int lm_type,
}
/*
* When DLM or IDM locking scheme is used for global lock, if the global
* lockspace is the only one left, then stop it. This is not used for
* an explicit rem_lockspace action from the client, only for auto
* remove.
* If dlm gl lockspace is the only one left, then stop it.
* This is not used for an explicit rem_lockspace action from
* the client, only for auto remove.
*/
static int rem_global_lockspace(char *ls_name)
static int rem_dlm_global_lockspace(void)
{
struct lockspace *ls, *ls_gl = NULL;
int others = 0;
@@ -3110,7 +2996,7 @@ static int rem_global_lockspace(char *ls_name)
pthread_mutex_lock(&lockspaces_mutex);
list_for_each_entry(ls, &lockspaces, list) {
if (!strcmp(ls->name, ls_name)) {
if (!strcmp(ls->name, gl_lsname_dlm)) {
ls_gl = ls;
continue;
}
@@ -3142,26 +3028,6 @@ out:
return rv;
}
static int add_dlm_global_lockspace(struct action *act)
{
return add_global_lockspace(gl_lsname_dlm, LD_LM_DLM, act);
}
static int rem_dlm_global_lockspace(void)
{
return rem_global_lockspace(gl_lsname_dlm);
}
static int add_idm_global_lockspace(struct action *act)
{
return add_global_lockspace(gl_lsname_idm, LD_LM_IDM, act);
}
static int rem_idm_global_lockspace(void)
{
return rem_global_lockspace(gl_lsname_idm);
}
/*
* When the first dlm lockspace is added for a vg, automatically add a separate
* dlm lockspace for the global lock.
@@ -3187,9 +3053,6 @@ static int add_lockspace(struct action *act)
if (gl_use_dlm) {
rv = add_dlm_global_lockspace(act);
return rv;
} else if (gl_use_idm) {
rv = add_idm_global_lockspace(act);
return rv;
} else {
return -EINVAL;
}
@@ -3198,8 +3061,6 @@ static int add_lockspace(struct action *act)
if (act->rt == LD_RT_VG) {
if (gl_use_dlm)
add_dlm_global_lockspace(NULL);
else if (gl_use_idm)
add_idm_global_lockspace(NULL);
vg_ls_name(act->vg_name, ls_name);
@@ -3267,15 +3128,14 @@ static int rem_lockspace(struct action *act)
pthread_mutex_unlock(&lockspaces_mutex);
/*
* For DLM and IDM locking scheme, the global lockspace was
* automatically added when the first vg lockspace was added,
* now reverse that by automatically removing the dlm global
* lockspace when the last vg lockspace is removed.
* The dlm global lockspace was automatically added when
* the first dlm vg lockspace was added, now reverse that
* by automatically removing the dlm global lockspace when
* the last dlm vg lockspace is removed.
*/
if (rt == LD_RT_VG && gl_use_dlm)
rem_dlm_global_lockspace();
else if (rt == LD_RT_VG && gl_use_idm)
rem_idm_global_lockspace();
return 0;
}
@@ -3399,7 +3259,6 @@ static int for_each_lockspace(int do_stop, int do_free, int do_force)
if (ls->free_vg) {
/* In future we may need to free ls->actions here */
free_ls_resources(ls);
free_pvs_path(&ls->pvs);
free(ls);
free_count++;
}
@@ -3413,7 +3272,6 @@ static int for_each_lockspace(int do_stop, int do_free, int do_force)
if (!gl_type_static) {
gl_use_dlm = 0;
gl_use_sanlock = 0;
gl_use_idm = 0;
}
}
pthread_mutex_unlock(&lockspaces_mutex);
@@ -3489,9 +3347,6 @@ static int work_init_vg(struct action *act)
rv = lm_init_vg_sanlock(ls_name, act->vg_name, act->flags, act->vg_args);
else if (act->lm_type == LD_LM_DLM)
rv = lm_init_vg_dlm(ls_name, act->vg_name, act->flags, act->vg_args);
else if (act->lm_type == LD_LM_IDM)
/* Non't do anything for IDM when initialize VG */
rv = 0;
else
rv = -EINVAL;
@@ -3595,8 +3450,6 @@ static int work_init_lv(struct action *act)
} else if (act->lm_type == LD_LM_DLM) {
return 0;
} else if (act->lm_type == LD_LM_IDM) {
return 0;
} else {
log_error("init_lv ls_name %s bad lm_type %d", ls_name, act->lm_type);
return -EINVAL;
@@ -3660,29 +3513,20 @@ static void *worker_thread_main(void *arg_in)
if (act->op == LD_OP_RUNNING_LM) {
int run_sanlock = lm_is_running_sanlock();
int run_dlm = lm_is_running_dlm();
int run_idm = lm_is_running_idm();
if (daemon_test) {
run_sanlock = gl_use_sanlock;
run_dlm = gl_use_dlm;
run_idm = gl_use_idm;
}
/*
* It's not possible to enable multiple locking schemes
* for global lock, otherwise, it must be conflict and
* reports it!
*/
if ((run_sanlock + run_dlm + run_idm) >= 2)
if (run_sanlock && run_dlm)
act->result = -EXFULL;
else if (!run_sanlock && !run_dlm && !run_idm)
else if (!run_sanlock && !run_dlm)
act->result = -ENOLCK;
else if (run_sanlock)
act->result = LD_LM_SANLOCK;
else if (run_dlm)
act->result = LD_LM_DLM;
else if (run_idm)
act->result = LD_LM_IDM;
add_client_result(act);
} else if ((act->op == LD_OP_LOCK) && (act->flags & LD_AF_SEARCH_LS)) {
@@ -3970,9 +3814,6 @@ static int client_send_result(struct client *cl, struct action *act)
} else if (gl_use_dlm) {
if (!gl_lsname_dlm[0])
strcat(result_flags, "NO_GL_LS,");
} else if (gl_use_idm) {
if (!gl_lsname_idm[0])
strcat(result_flags, "NO_GL_LS,");
} else {
int found_lm = 0;
@@ -3980,8 +3821,6 @@ static int client_send_result(struct client *cl, struct action *act)
found_lm++;
if (lm_support_sanlock() && lm_is_running_sanlock())
found_lm++;
if (lm_support_idm() && lm_is_running_idm())
found_lm++;
if (!found_lm)
strcat(result_flags, "NO_GL_LS,NO_LM");
@@ -4157,13 +3996,11 @@ static int add_lock_action(struct action *act)
if (gl_use_sanlock && (act->op == LD_OP_ENABLE || act->op == LD_OP_DISABLE)) {
vg_ls_name(act->vg_name, ls_name);
} else {
if (!gl_use_dlm && !gl_use_sanlock && !gl_use_idm) {
if (!gl_use_dlm && !gl_use_sanlock) {
if (lm_is_running_dlm())
gl_use_dlm = 1;
else if (lm_is_running_sanlock())
gl_use_sanlock = 1;
else if (lm_is_running_idm())
gl_use_idm = 1;
}
gl_ls_name(ls_name);
}
@@ -4211,17 +4048,6 @@ static int add_lock_action(struct action *act)
add_dlm_global_lockspace(NULL);
goto retry;
} else if (act->op == LD_OP_LOCK && act->rt == LD_RT_GL && act->mode != LD_LK_UN && gl_use_idm) {
/*
* Automatically start the idm global lockspace when
* a command tries to acquire the global lock.
*/
log_debug("lockspace \"%s\" not found for idm gl, adding...", ls_name);
act->flags |= LD_AF_SEARCH_LS;
act->flags |= LD_AF_WAIT_STARTING;
add_idm_global_lockspace(NULL);
goto retry;
} else if (act->op == LD_OP_LOCK && act->mode == LD_LK_UN) {
log_debug("lockspace \"%s\" not found for unlock ignored", ls_name);
return -ENOLS;
@@ -4442,8 +4268,6 @@ static int str_to_lm(const char *str)
return LD_LM_SANLOCK;
if (!strcmp(str, "dlm"))
return LD_LM_DLM;
if (!strcmp(str, "idm"))
return LD_LM_IDM;
return -2;
}
@@ -4779,14 +4603,12 @@ static void client_recv_action(struct client *cl)
const char *vg_sysid;
const char *path;
const char *str;
struct pvs pvs;
char buf[17]; /* "path[%d]\0", %d outputs signed integer so max to 10 bytes */
int64_t val;
uint32_t opts = 0;
int result = 0;
int cl_pid;
int op, rt, lm, mode;
int rv, i;
int rv;
buffer_init(&req.buffer);
@@ -4875,13 +4697,11 @@ static void client_recv_action(struct client *cl)
if (!cl->name[0] && cl_name)
strncpy(cl->name, cl_name, MAX_NAME);
if (!gl_use_dlm && !gl_use_sanlock && !gl_use_idm && (lm > 0)) {
if (!gl_use_dlm && !gl_use_sanlock && (lm > 0)) {
if (lm == LD_LM_DLM && lm_support_dlm())
gl_use_dlm = 1;
else if (lm == LD_LM_SANLOCK && lm_support_sanlock())
gl_use_sanlock = 1;
else if (lm == LD_LM_IDM && lm_support_idm())
gl_use_idm = 1;
log_debug("set gl_use_%s", lm_str(lm));
}
@@ -4938,40 +4758,6 @@ static void client_recv_action(struct client *cl)
if (val)
act->host_id = val;
/* Create PV list for idm */
if (lm == LD_LM_IDM) {
memset(&pvs, 0x0, sizeof(pvs));
pvs.num = daemon_request_int(req, "path_num", 0);
log_error("pvs_num = %d", pvs.num);
if (!pvs.num)
goto skip_pvs_path;
/* Receive the pv list which is transferred from LVM command */
if (!alloc_pvs_path(&pvs, pvs.num)) {
log_error("fail to allocate pvs path");
rv = -ENOMEM;
goto out;
}
for (i = 0; i < pvs.num; i++) {
snprintf(buf, sizeof(buf), "path[%d]", i);
pvs.path[i] = (char *)daemon_request_str(req, buf, NULL);
}
if (!alloc_and_copy_pvs_path(&act->pvs, &pvs)) {
log_error("fail to allocate pvs path");
rv = -ENOMEM;
goto out;
}
if (pvs.path)
free(pvs.path);
pvs.path = NULL;
}
skip_pvs_path:
act->max_retries = daemon_request_int(req, "max_retries", DEFAULT_MAX_RETRIES);
dm_config_destroy(req.cft);
@@ -4993,12 +4779,6 @@ skip_pvs_path:
goto out;
}
if (lm == LD_LM_IDM && !lm_support_idm()) {
log_debug("idm not supported");
rv = -EPROTONOSUPPORT;
goto out;
}
if (act->op == LD_OP_LOCK && act->mode != LD_LK_UN)
cl->lock_ops = 1;
@@ -5188,7 +4968,7 @@ static void *client_thread_main(void *arg_in)
}
out:
if (adopt_opt && lock_acquire_written)
(void) unlink(adopt_file);
unlink(adopt_file);
return NULL;
}
@@ -5597,7 +5377,6 @@ static void adopt_locks(void)
}
list_del(&ls->list);
free_pvs_path(&ls->pvs);
free(ls);
}
@@ -5638,7 +5417,6 @@ static void adopt_locks(void)
if (rv < 0) {
log_error("Failed to create lockspace thread for VG %s", ls->vg_name);
list_del(&ls->list);
free_pvs_path(&ls->pvs);
free(ls);
free_action(act);
count_start_fail++;
@@ -5962,13 +5740,13 @@ static void adopt_locks(void)
if (count_start_fail || count_adopt_fail)
goto fail;
(void) unlink(adopt_file);
unlink(adopt_file);
write_adopt_file();
log_debug("adopt_locks done");
return;
fail:
(void) unlink(adopt_file);
unlink(adopt_file);
log_error("adopt_locks failed, reset host");
}
@@ -6081,7 +5859,6 @@ static int main_loop(daemon_state *ds_arg)
}
strcpy(gl_lsname_dlm, S_NAME_GL_DLM);
strcpy(gl_lsname_idm, S_NAME_GL_IDM);
INIT_LIST_HEAD(&lockspaces);
pthread_mutex_init(&lockspaces_mutex, NULL);
@@ -6335,8 +6112,6 @@ int main(int argc, char *argv[])
gl_use_dlm = 1;
else if (lm == LD_LM_SANLOCK && lm_support_sanlock())
gl_use_sanlock = 1;
else if (lm == LD_LM_IDM && lm_support_idm())
gl_use_idm = 1;
else {
fprintf(stderr, "invalid gl-type option\n");
exit(EXIT_FAILURE);

View File

@@ -1,837 +0,0 @@
/*
* Copyright (C) 2020-2021 Seagate Ltd.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*/
#define _XOPEN_SOURCE 500 /* pthread */
#define _ISOC99_SOURCE
#include "tools/tool.h"
#include "daemon-server.h"
#include "lib/mm/xlate.h"
#include "lvmlockd-internal.h"
#include "daemons/lvmlockd/lvmlockd-client.h"
#include "ilm.h"
#include <blkid/blkid.h>
#include <ctype.h>
#include <dirent.h>
#include <errno.h>
#include <poll.h>
#include <regex.h>
#include <stddef.h>
#include <syslog.h>
#include <sys/sysmacros.h>
#include <time.h>
#define IDM_TIMEOUT 60000 /* unit: millisecond, 60 seconds */
/*
* Each lockspace thread has its own In-Drive Mutex (IDM) lock manager's
* connection. After established socket connection, the lockspace has
* been created in IDM lock manager and afterwards use the socket file
* descriptor to send any requests for lock related operations.
*/
struct lm_idm {
int sock; /* IDM lock manager connection */
};
struct rd_idm {
struct idm_lock_id id;
struct idm_lock_op op;
uint64_t vb_timestamp;
struct val_blk *vb;
};
int lm_data_size_idm(void)
{
return sizeof(struct rd_idm);
}
static uint64_t read_utc_us(void)
{
struct timespec cur_time;
clock_gettime(CLOCK_REALTIME, &cur_time);
/*
* Convert to microseconds unit. IDM reserves the MSB in 8 bytes
* and the low 56 bits are used for timestamp; 56 bits can support
* calendar year to 2284, so it has 260 years for overflow. Thus it
* is quite safe for overflow issue when wrote this code.
*/
return cur_time.tv_sec * 1000000 + cur_time.tv_nsec / 1000;
}
static int uuid_read_format(char *uuid_str, const char *buffer)
{
int out = 0;
/* just strip out any dashes */
while (*buffer) {
if (*buffer == '-') {
buffer++;
continue;
}
if (out >= 32) {
log_error("Too many characters to be uuid.");
return -1;
}
uuid_str[out++] = *buffer;
buffer++;
}
if (out != 32) {
log_error("Couldn't read uuid: incorrect number of "
"characters.");
return -1;
}
return 0;
}
#define SYSFS_ROOT "/sys"
#define BUS_SCSI_DEVS "/bus/scsi/devices"
static struct idm_lock_op glb_lock_op;
static void lm_idm_free_dir_list(struct dirent **dir_list, int dir_num)
{
int i;
for (i = 0; i < dir_num; ++i)
free(dir_list[i]);
free(dir_list);
}
static int lm_idm_scsi_directory_select(const struct dirent *s)
{
regex_t regex;
int ret;
/* Only select directory with the format x:x:x:x */
ret = regcomp(&regex, "^[0-9]+:[0-9]+:[0-9]+:[0-9]+$", REG_EXTENDED);
if (ret)
return 0;
ret = regexec(&regex, s->d_name, 0, NULL, 0);
if (!ret) {
regfree(&regex);
return 1;
}
regfree(&regex);
return 0;
}
static int lm_idm_scsi_find_block_dirctory(const char *block_path)
{
struct stat stats;
if ((stat(block_path, &stats) >= 0) && S_ISDIR(stats.st_mode))
return 0;
return -1;
}
static int lm_idm_scsi_block_node_select(const struct dirent *s)
{
if (DT_LNK != s->d_type && DT_DIR != s->d_type)
return 0;
if (DT_DIR == s->d_type) {
/* Skip this directory: '.' and parent: '..' */
if (!strcmp(s->d_name, ".") || !strcmp(s->d_name, ".."))
return 0;
}
return 1;
}
static int lm_idm_scsi_find_block_node(const char *blk_path, char **blk_dev)
{
struct dirent **dir_list;
int dir_num;
dir_num = scandir(blk_path, &dir_list, lm_idm_scsi_block_node_select, NULL);
if (dir_num < 0) {
log_error("Cannot find valid directory entry in %s", blk_path);
return -1;
}
/*
* Should have only one block name under the path, if the dir_num is
* not 1 (e.g. 0 or any number bigger than 1), it must be wrong and
* should never happen.
*/
if (dir_num == 1)
*blk_dev = strdup(dir_list[0]->d_name);
else
*blk_dev = NULL;
lm_idm_free_dir_list(dir_list, dir_num);
if (!*blk_dev)
return -1;
return dir_num;
}
static int lm_idm_scsi_search_propeller_partition(char *dev)
{
int i, nparts;
blkid_probe pr;
blkid_partlist ls;
int found = -1;
pr = blkid_new_probe_from_filename(dev);
if (!pr) {
log_error("%s: failed to create a new libblkid probe", dev);
return -1;
}
/* Binary interface */
ls = blkid_probe_get_partitions(pr);
if (!ls) {
log_error("%s: failed to read partitions", dev);
return -1;
}
/* List partitions */
nparts = blkid_partlist_numof_partitions(ls);
if (!nparts)
goto done;
for (i = 0; i < nparts; i++) {
const char *p;
blkid_partition par = blkid_partlist_get_partition(ls, i);
p = blkid_partition_get_name(par);
if (p) {
log_debug("partition name='%s'", p);
if (!strcmp(p, "propeller"))
found = blkid_partition_get_partno(par);
}
if (found >= 0)
break;
}
done:
blkid_free_probe(pr);
return found;
}
static char *lm_idm_scsi_get_block_device_node(const char *scsi_path)
{
char *blk_path = NULL;
char *blk_dev = NULL;
char *dev_node = NULL;
int ret;
/*
* Locate the "block" directory, such like:
* /sys/bus/scsi/devices/1:0:0:0/block
*/
ret = asprintf(&blk_path, "%s/%s", scsi_path, "block");
if (ret < 0) {
log_error("Fail to allocate block path for %s", scsi_path);
goto fail;
}
ret = lm_idm_scsi_find_block_dirctory(blk_path);
if (ret < 0) {
log_error("Fail to find block path %s", blk_path);
goto fail;
}
/*
* Locate the block device name, such like:
* /sys/bus/scsi/devices/1:0:0:0/block/sdb
*
* After return from this function and if it makes success,
* the global variable "blk_dev" points to the block device
* name, in this example it points to string "sdb".
*/
ret = lm_idm_scsi_find_block_node(blk_path, &blk_dev);
if (ret < 0) {
log_error("Fail to find block node");
goto fail;
}
ret = asprintf(&dev_node, "/dev/%s", blk_dev);
if (ret < 0) {
log_error("Fail to allocate memory for blk node path");
goto fail;
}
ret = lm_idm_scsi_search_propeller_partition(dev_node);
if (ret < 0)
goto fail;
free(blk_path);
free(blk_dev);
return dev_node;
fail:
free(blk_path);
free(blk_dev);
free(dev_node);
return NULL;
}
static int lm_idm_get_gl_lock_pv_list(void)
{
struct dirent **dir_list;
char scsi_bus_path[PATH_MAX];
char *drive_path;
int i, dir_num, ret;
if (glb_lock_op.drive_num)
return 0;
snprintf(scsi_bus_path, sizeof(scsi_bus_path), "%s%s",
SYSFS_ROOT, BUS_SCSI_DEVS);
dir_num = scandir(scsi_bus_path, &dir_list,
lm_idm_scsi_directory_select, NULL);
if (dir_num < 0) { /* scsi mid level may not be loaded */
log_error("Attached devices: none");
return -1;
}
for (i = 0; i < dir_num; i++) {
char *scsi_path;
ret = asprintf(&scsi_path, "%s/%s", scsi_bus_path,
dir_list[i]->d_name);
if (ret < 0) {
log_error("Fail to allocate memory for scsi directory");
goto failed;
}
if (glb_lock_op.drive_num >= ILM_DRIVE_MAX_NUM) {
log_error("Global lock: drive number %d exceeds limitation (%d) ?!",
glb_lock_op.drive_num, ILM_DRIVE_MAX_NUM);
free(scsi_path);
goto failed;
}
drive_path = lm_idm_scsi_get_block_device_node(scsi_path);
if (!drive_path) {
free(scsi_path);
continue;
}
glb_lock_op.drives[glb_lock_op.drive_num] = drive_path;
glb_lock_op.drive_num++;
free(scsi_path);
}
lm_idm_free_dir_list(dir_list, dir_num);
return 0;
failed:
lm_idm_free_dir_list(dir_list, dir_num);
for (i = 0; i < glb_lock_op.drive_num; i++) {
if (glb_lock_op.drives[i]) {
free(glb_lock_op.drives[i]);
glb_lock_op.drives[i] = NULL;
}
}
return -1;
}
static void lm_idm_update_vb_timestamp(uint64_t *vb_timestamp)
{
uint64_t utc_us = read_utc_us();
/*
* It's possible that the multiple nodes have no clock
* synchronization with microsecond prcision and the time
* is going backward. For this case, simply increment the
* existing timestamp and write out to drive.
*/
if (*vb_timestamp >= utc_us)
(*vb_timestamp)++;
else
*vb_timestamp = utc_us;
}
int lm_prepare_lockspace_idm(struct lockspace *ls)
{
struct lm_idm *lm = NULL;
lm = malloc(sizeof(struct lm_idm));
if (!lm) {
log_error("S %s prepare_lockspace_idm fail to allocate lm_idm for %s",
ls->name, ls->vg_name);
return -ENOMEM;
}
memset(lm, 0x0, sizeof(struct lm_idm));
ls->lm_data = lm;
log_debug("S %s prepare_lockspace_idm done", ls->name);
return 0;
}
int lm_add_lockspace_idm(struct lockspace *ls, int adopt)
{
char killpath[IDM_FAILURE_PATH_LEN];
char killargs[IDM_FAILURE_ARGS_LEN];
struct lm_idm *lmi = (struct lm_idm *)ls->lm_data;
int rv;
if (daemon_test)
return 0;
if (!strcmp(ls->name, S_NAME_GL_IDM)) {
/*
* Prepare the pv list for global lock, if the drive contains
* "propeller" partition, then this drive will be considered
* as a member of pv list.
*/
rv = lm_idm_get_gl_lock_pv_list();
if (rv < 0) {
log_error("S %s add_lockspace_idm fail to get pv list for glb lock",
ls->name);
return -EIO;
} else {
log_error("S %s add_lockspace_idm get pv list for glb lock",
ls->name);
}
}
/*
* Construct the execution path for command "lvmlockctl" by using the
* path to the lvm binary and appending "lockctl".
*/
memset(killpath, 0, sizeof(killpath));
snprintf(killpath, IDM_FAILURE_PATH_LEN, "%slockctl", LVM_PATH);
/* Pass the argument "--kill vg_name" for killpath */
memset(killargs, 0, sizeof(killargs));
snprintf(killargs, IDM_FAILURE_ARGS_LEN, "--kill %s", ls->vg_name);
/* Connect with IDM lock manager per every lockspace. */
rv = ilm_connect(&lmi->sock);
if (rv < 0) {
log_error("S %s add_lockspace_idm fail to connect the lock manager %d",
ls->name, lmi->sock);
lmi->sock = 0;
rv = -EMANAGER;
goto fail;
}
rv = ilm_set_killpath(lmi->sock, killpath, killargs);
if (rv < 0) {
log_error("S %s add_lockspace_idm fail to set kill path %d",
ls->name, rv);
rv = -EMANAGER;
goto fail;
}
log_debug("S %s add_lockspace_idm kill path is: \"%s %s\"",
ls->name, killpath, killargs);
log_debug("S %s add_lockspace_idm done", ls->name);
return 0;
fail:
if (lmi && lmi->sock)
close(lmi->sock);
if (lmi)
free(lmi);
return rv;
}
int lm_rem_lockspace_idm(struct lockspace *ls, int free_vg)
{
struct lm_idm *lmi = (struct lm_idm *)ls->lm_data;
int i, rv = 0;
if (daemon_test)
goto out;
rv = ilm_disconnect(lmi->sock);
if (rv < 0)
log_error("S %s rem_lockspace_idm error %d", ls->name, rv);
/* Release pv list for global lock */
if (!strcmp(ls->name, "lvm_global")) {
for (i = 0; i < glb_lock_op.drive_num; i++) {
if (glb_lock_op.drives[i]) {
free(glb_lock_op.drives[i]);
glb_lock_op.drives[i] = NULL;
}
}
}
out:
free(lmi);
ls->lm_data = NULL;
return rv;
}
static int lm_add_resource_idm(struct lockspace *ls, struct resource *r)
{
struct rd_idm *rdi = (struct rd_idm *)r->lm_data;
if (r->type == LD_RT_GL || r->type == LD_RT_VG) {
rdi->vb = zalloc(sizeof(struct val_blk));
if (!rdi->vb)
return -ENOMEM;
}
return 0;
}
int lm_rem_resource_idm(struct lockspace *ls, struct resource *r)
{
struct rd_idm *rdi = (struct rd_idm *)r->lm_data;
if (rdi->vb)
free(rdi->vb);
memset(rdi, 0, sizeof(struct rd_idm));
r->lm_init = 0;
return 0;
}
static int to_idm_mode(int ld_mode)
{
switch (ld_mode) {
case LD_LK_EX:
return IDM_MODE_EXCLUSIVE;
case LD_LK_SH:
return IDM_MODE_SHAREABLE;
default:
break;
};
return -1;
}
int lm_lock_idm(struct lockspace *ls, struct resource *r, int ld_mode,
struct val_blk *vb_out, char *lv_uuid, struct pvs *pvs,
int adopt)
{
struct lm_idm *lmi = (struct lm_idm *)ls->lm_data;
struct rd_idm *rdi = (struct rd_idm *)r->lm_data;
char **drive_path = NULL;
uint64_t timestamp;
int reset_vb = 0;
int rv, i;
if (!r->lm_init) {
rv = lm_add_resource_idm(ls, r);
if (rv < 0)
return rv;
r->lm_init = 1;
}
rdi->op.mode = to_idm_mode(ld_mode);
if (rv < 0) {
log_error("lock_idm invalid mode %d", ld_mode);
return -EINVAL;
}
log_debug("S %s R %s lock_idm", ls->name, r->name);
if (daemon_test) {
if (rdi->vb) {
vb_out->version = le16_to_cpu(rdi->vb->version);
vb_out->flags = le16_to_cpu(rdi->vb->flags);
vb_out->r_version = le32_to_cpu(rdi->vb->r_version);
}
return 0;
}
rdi->op.timeout = IDM_TIMEOUT;
/*
* Generate the UUID string, for RT_VG, it only needs to generate
* UUID string for VG level, for RT_LV, it needs to generate
* UUID strings for both VG and LV levels. At the end, these IDs
* are used as identifier for IDM in drive firmware.
*/
if (r->type == LD_RT_VG || r->type == LD_RT_LV)
log_debug("S %s R %s VG uuid %s", ls->name, r->name, ls->vg_uuid);
if (r->type == LD_RT_LV)
log_debug("S %s R %s LV uuid %s", ls->name, r->name, lv_uuid);
memset(&rdi->id, 0x0, sizeof(struct idm_lock_id));
if (r->type == LD_RT_VG) {
uuid_read_format(rdi->id.vg_uuid, ls->vg_uuid);
} else if (r->type == LD_RT_LV) {
uuid_read_format(rdi->id.vg_uuid, ls->vg_uuid);
uuid_read_format(rdi->id.lv_uuid, lv_uuid);
}
/*
* Establish the drive path list for lock, since different lock type
* has different drive list; the GL lock uses the global pv list,
* the VG lock uses the pv list spanned for the whole volume group,
* the LV lock uses the pv list for the logical volume.
*/
switch (r->type) {
case LD_RT_GL:
drive_path = glb_lock_op.drives;
rdi->op.drive_num = glb_lock_op.drive_num;
break;
case LD_RT_VG:
drive_path = (char **)ls->pvs.path;
rdi->op.drive_num = ls->pvs.num;
break;
case LD_RT_LV:
drive_path = (char **)pvs->path;
rdi->op.drive_num = pvs->num;
break;
default:
break;
}
if (!drive_path) {
log_error("S %s R %s cannot find the valid drive path array",
ls->name, r->name);
return -EINVAL;
}
if (rdi->op.drive_num >= ILM_DRIVE_MAX_NUM) {
log_error("S %s R %s exceeds limitation for drive path array",
ls->name, r->name);
return -EINVAL;
}
for (i = 0; i < rdi->op.drive_num; i++)
rdi->op.drives[i] = drive_path[i];
log_debug("S %s R %s mode %d drive_num %d timeout %d",
ls->name, r->name, rdi->op.mode,
rdi->op.drive_num, rdi->op.timeout);
for (i = 0; i < rdi->op.drive_num; i++)
log_debug("S %s R %s drive path[%d] %s",
ls->name, r->name, i, rdi->op.drives[i]);
rv = ilm_lock(lmi->sock, &rdi->id, &rdi->op);
if (rv < 0) {
log_debug("S %s R %s lock_idm acquire mode %d rv %d",
ls->name, r->name, ld_mode, rv);
return -ELOCKIO;
}
if (rdi->vb) {
rv = ilm_read_lvb(lmi->sock, &rdi->id, (char *)&timestamp,
sizeof(uint64_t));
/*
* If fail to read value block, which might be caused by drive
* failure, notify up layer to invalidate metadata.
*/
if (rv < 0) {
log_error("S %s R %s lock_idm get_lvb error %d",
ls->name, r->name, rv);
reset_vb = 1;
/* Reset timestamp */
rdi->vb_timestamp = 0;
/*
* If the cached timestamp mismatches with the stored value
* in the IDM, this means another host has updated timestamp
* for the new VB. Let's reset VB and notify up layer to
* invalidate metadata.
*/
} else if (rdi->vb_timestamp != timestamp) {
log_debug("S %s R %s lock_idm get lvb timestamp %lu:%lu",
ls->name, r->name, rdi->vb_timestamp,
timestamp);
rdi->vb_timestamp = timestamp;
reset_vb = 1;
}
if (reset_vb == 1) {
memset(rdi->vb, 0, sizeof(struct val_blk));
memset(vb_out, 0, sizeof(struct val_blk));
/*
* The lock is still acquired, but the vb values has
* been invalidated.
*/
rv = 0;
goto out;
}
/* Otherwise, copy the cached VB to up layer */
memcpy(vb_out, rdi->vb, sizeof(struct val_blk));
}
out:
return rv;
}
int lm_convert_idm(struct lockspace *ls, struct resource *r,
int ld_mode, uint32_t r_version)
{
struct lm_idm *lmi = (struct lm_idm *)ls->lm_data;
struct rd_idm *rdi = (struct rd_idm *)r->lm_data;
int mode, rv;
if (rdi->vb && r_version && (r->mode == LD_LK_EX)) {
if (!rdi->vb->version) {
/* first time vb has been written */
rdi->vb->version = VAL_BLK_VERSION;
}
rdi->vb->r_version = r_version;
log_debug("S %s R %s convert_idm set r_version %u",
ls->name, r->name, r_version);
lm_idm_update_vb_timestamp(&rdi->vb_timestamp);
log_debug("S %s R %s convert_idm vb %x %x %u timestamp %lu",
ls->name, r->name, rdi->vb->version, rdi->vb->flags,
rdi->vb->r_version, rdi->vb_timestamp);
}
mode = to_idm_mode(ld_mode);
if (mode < 0) {
log_error("S %s R %s convert_idm invalid mode %d",
ls->name, r->name, ld_mode);
return -EINVAL;
}
log_debug("S %s R %s convert_idm", ls->name, r->name);
if (daemon_test)
return 0;
if (rdi->vb && r_version && (r->mode == LD_LK_EX)) {
rv = ilm_write_lvb(lmi->sock, &rdi->id,
(char *)rdi->vb_timestamp, sizeof(uint64_t));
if (rv < 0) {
log_error("S %s R %s convert_idm write lvb error %d",
ls->name, r->name, rv);
return -ELMERR;
}
}
rv = ilm_convert(lmi->sock, &rdi->id, mode);
if (rv < 0)
log_error("S %s R %s convert_idm convert error %d",
ls->name, r->name, rv);
return rv;
}
int lm_unlock_idm(struct lockspace *ls, struct resource *r,
uint32_t r_version, uint32_t lmu_flags)
{
struct lm_idm *lmi = (struct lm_idm *)ls->lm_data;
struct rd_idm *rdi = (struct rd_idm *)r->lm_data;
int rv;
if (rdi->vb && r_version && (r->mode == LD_LK_EX)) {
if (!rdi->vb->version) {
/* first time vb has been written */
rdi->vb->version = VAL_BLK_VERSION;
}
if (r_version)
rdi->vb->r_version = r_version;
lm_idm_update_vb_timestamp(&rdi->vb_timestamp);
log_debug("S %s R %s unlock_idm vb %x %x %u timestamp %lu",
ls->name, r->name, rdi->vb->version, rdi->vb->flags,
rdi->vb->r_version, rdi->vb_timestamp);
}
log_debug("S %s R %s unlock_idm", ls->name, r->name);
if (daemon_test)
return 0;
if (rdi->vb && r_version && (r->mode == LD_LK_EX)) {
rv = ilm_write_lvb(lmi->sock, &rdi->id,
(char *)&rdi->vb_timestamp, sizeof(uint64_t));
if (rv < 0) {
log_error("S %s R %s unlock_idm set_lvb error %d",
ls->name, r->name, rv);
return -ELMERR;
}
}
rv = ilm_unlock(lmi->sock, &rdi->id);
if (rv < 0)
log_error("S %s R %s unlock_idm error %d", ls->name, r->name, rv);
return rv;
}
int lm_hosts_idm(struct lockspace *ls, int notify)
{
struct resource *r;
struct lm_idm *lmi = (struct lm_idm *)ls->lm_data;
struct rd_idm *rdi;
int count, self, found_others = 0;
int rv;
list_for_each_entry(r, &ls->resources, list) {
if (!r->lm_init)
continue;
rdi = (struct rd_idm *)r->lm_data;
rv = ilm_get_host_count(lmi->sock, &rdi->id, &rdi->op,
&count, &self);
if (rv < 0) {
log_error("S %s lm_hosts_idm error %d", ls->name, rv);
return rv;
}
/* Fixup: need to reduce self count */
if (count > found_others)
found_others = count;
}
return found_others;
}
int lm_get_lockspaces_idm(struct list_head *ls_rejoin)
{
/* TODO: Need to add support for adoption. */
return -1;
}
int lm_is_running_idm(void)
{
int sock, rv;
if (daemon_test)
return gl_use_idm;
rv = ilm_connect(&sock);
if (rv < 0) {
log_error("Fail to connect seagate IDM lock manager %d", rv);
return 0;
}
ilm_disconnect(sock);
return 1;
}

View File

@@ -20,7 +20,6 @@
#define R_NAME_GL "GLLK"
#define R_NAME_VG "VGLK"
#define S_NAME_GL_DLM "lvm_global"
#define S_NAME_GL_IDM "lvm_global"
#define LVM_LS_PREFIX "lvm_" /* ls name is prefix + vg_name */
/* global lockspace name for sanlock is a vg name */
@@ -30,7 +29,6 @@ enum {
LD_LM_UNUSED = 1, /* place holder so values match lib/locking/lvmlockd.h */
LD_LM_DLM = 2,
LD_LM_SANLOCK = 3,
LD_LM_IDM = 4,
};
/* operation types */
@@ -120,11 +118,6 @@ struct client {
*/
#define DEFAULT_MAX_RETRIES 4
struct pvs {
char **path;
int num;
};
struct action {
struct list_head list;
uint32_t client_id;
@@ -147,7 +140,6 @@ struct action {
char vg_args[MAX_ARGS+1];
char lv_args[MAX_ARGS+1];
char vg_sysid[MAX_NAME+1];
struct pvs pvs; /* PV list for idm */
};
struct resource {
@@ -192,7 +184,6 @@ struct lockspace {
uint64_t free_lock_offset; /* for sanlock, start search for free lock here */
int free_lock_sector_size; /* for sanlock */
int free_lock_align_size; /* for sanlock */
struct pvs pvs; /* for idm: PV list */
uint32_t start_client_id; /* client_id that started the lockspace */
pthread_t thread; /* makes synchronous lock requests */
@@ -334,13 +325,10 @@ static inline int list_empty(const struct list_head *head)
EXTERN int gl_type_static;
EXTERN int gl_use_dlm;
EXTERN int gl_use_sanlock;
EXTERN int gl_use_idm;
EXTERN int gl_vg_removed;
EXTERN char gl_lsname_dlm[MAX_NAME+1];
EXTERN char gl_lsname_sanlock[MAX_NAME+1];
EXTERN char gl_lsname_idm[MAX_NAME+1];
EXTERN int global_dlm_lockspace_exists;
EXTERN int global_idm_lockspace_exists;
EXTERN int daemon_test; /* run as much as possible without a live lock manager */
EXTERN int daemon_debug;
@@ -631,102 +619,4 @@ static inline int lm_support_sanlock(void)
#endif /* sanlock support */
#ifdef LOCKDIDM_SUPPORT
int lm_data_size_idm(void);
int lm_init_vg_idm(char *ls_name, char *vg_name, uint32_t flags, char *vg_args);
int lm_prepare_lockspace_idm(struct lockspace *ls);
int lm_add_lockspace_idm(struct lockspace *ls, int adopt);
int lm_rem_lockspace_idm(struct lockspace *ls, int free_vg);
int lm_lock_idm(struct lockspace *ls, struct resource *r, int ld_mode,
struct val_blk *vb_out, char *lv_uuid, struct pvs *pvs,
int adopt);
int lm_convert_idm(struct lockspace *ls, struct resource *r,
int ld_mode, uint32_t r_version);
int lm_unlock_idm(struct lockspace *ls, struct resource *r,
uint32_t r_version, uint32_t lmu_flags);
int lm_hosts_idm(struct lockspace *ls, int notify);
int lm_get_lockspaces_idm(struct list_head *ls_rejoin);
int lm_is_running_idm(void);
int lm_rem_resource_idm(struct lockspace *ls, struct resource *r);
static inline int lm_support_idm(void)
{
return 1;
}
#else
static inline int lm_data_size_idm(void)
{
return -1;
}
static inline int lm_init_vg_idm(char *ls_name, char *vg_name, uint32_t flags,
char *vg_args)
{
return -1;
}
static inline int lm_prepare_lockspace_idm(struct lockspace *ls)
{
return -1;
}
static inline int lm_add_lockspace_idm(struct lockspace *ls, int adopt)
{
return -1;
}
static inline int lm_rem_lockspace_idm(struct lockspace *ls, int free_vg)
{
return -1;
}
static inline int lm_lock_idm(struct lockspace *ls, struct resource *r, int ld_mode,
struct val_blk *vb_out, char *lv_uuid, struct pvs *pvs,
int adopt)
{
return -1;
}
static inline int lm_convert_idm(struct lockspace *ls, struct resource *r,
int ld_mode, uint32_t r_version)
{
return -1;
}
static inline int lm_unlock_idm(struct lockspace *ls, struct resource *r,
uint32_t r_version, uint32_t lmu_flags)
{
return -1;
}
static inline int lm_hosts_idm(struct lockspace *ls, int notify)
{
return -1;
}
static inline int lm_get_lockspaces_idm(struct list_head *ls_rejoin)
{
return -1;
}
static inline int lm_is_running_idm(void)
{
return 0;
}
static inline int lm_rem_resource_idm(struct lockspace *ls, struct resource *r)
{
return -1;
}
static inline int lm_support_idm(void)
{
return 0;
}
#endif /* Seagate IDM support */
#endif /* _LVM_LVMLOCKD_INTERNAL_H */

View File

@@ -3774,7 +3774,7 @@ static struct selection_node *_parse_selection(struct dm_report *rh,
struct field_selection *fs;
struct selection_node *sn;
const char *ws, *we; /* field name */
const char *vs = NULL, *ve = NULL; /* value */
const char *vs, *ve; /* value */
const char *last;
uint32_t flags, field_num;
int implicit;

View File

@@ -561,9 +561,6 @@
/* Define to 1 to include code that uses lvmlockd sanlock option. */
#undef LOCKDSANLOCK_SUPPORT
/* Define to 1 to include code that uses lvmlockd IDM option. */
#undef LOCKDIDM_SUPPORT
/* Define to 1 if `lstat' dereferences a symlink specified with a trailing
slash. */
#undef LSTAT_FOLLOWS_SLASHED_SYMLINK

View File

@@ -574,9 +574,13 @@ int module_present(struct cmd_context *cmd, const char *target_name)
}
#ifdef MODPROBE_CMD
if (strcmp(target_name, TARGET_NAME_VDO) == 0)
argv[1] = MODULE_NAME_VDO; /* ATM kvdo is without dm- prefix */
else if (dm_snprintf(module, sizeof(module), "dm-%s", target_name) < 0) {
if (strcmp(target_name, MODULE_NAME_VDO) == 0) {
argv[1] = target_name; /* ATM kvdo is without dm- prefix */
if ((ret = exec_cmd(cmd, argv, NULL, 0)))
return ret;
}
if (dm_snprintf(module, sizeof(module), "dm-%s", target_name) < 0) {
log_error("module_present module name too long: %s",
target_name);
return 0;

117
lib/cache/lvmcache.c vendored
View File

@@ -88,9 +88,6 @@ static int _vgs_locked = 0;
static int _found_duplicate_vgnames = 0;
static int _outdated_warning = 0;
static const char *_scan_lock_global_file = DEFAULT_RUN_DIR "/scan_lock_global";
static int _scan_lock_global_file_exists = 0;
int lvmcache_init(struct cmd_context *cmd)
{
/*
@@ -2745,115 +2742,19 @@ bool lvmcache_scan_mismatch(struct cmd_context *cmd, const char *vgname, const c
return true;
}
/*
* max_size_bytes and max_size_percent may come from different areas and
* different vgs because of different area sizes.
*/
static uint64_t _max_metadata_size_bytes;
static dm_percent_t _max_metadata_size_percent = DM_PERCENT_INVALID;
static uint64_t _max_metadata_size;
void lvmcache_save_metadata_size_bytes(uint64_t val)
void lvmcache_save_metadata_size(uint64_t val)
{
if (!_max_metadata_size_bytes)
_max_metadata_size_bytes = val;
else if (_max_metadata_size_bytes < val)
_max_metadata_size_bytes = val;
if (!_max_metadata_size)
_max_metadata_size = val;
else if (_max_metadata_size < val)
_max_metadata_size = val;
}
uint64_t lvmcache_max_metadata_size_bytes(void)
uint64_t lvmcache_max_metadata_size(void)
{
return _max_metadata_size_bytes;
}
/*
* TODO: enable/disable scan_lock_global with config setting:
* y: always use it
* n: never use it
* auto (default): use based on /run/lvm/scan_lock_global
*/
void lvmcache_save_metadata_size_percent(uint64_t meta_size, uint64_t mdah_size)
{
dm_percent_t pc = dm_make_percent(meta_size, mdah_size);
if (pc == DM_PERCENT_INVALID || pc == DM_PERCENT_FAILED ||
pc == DM_PERCENT_0 || pc == DM_PERCENT_1)
return;
if (_max_metadata_size_percent == DM_PERCENT_INVALID) {
_max_metadata_size_percent = pc;
return;
}
if (_max_metadata_size_percent < pc)
_max_metadata_size_percent = pc;
}
/*
* TODO: make the percent at which scan_lock_global is used
* configurable?
*/
#define SCAN_LOCK_GLOBAL_METADATA_PERCENT (DM_PERCENT_1 * 25)
void set_scan_lock_global(struct cmd_context *cmd)
{
FILE *fp;
if (_max_metadata_size_percent == DM_PERCENT_INVALID)
return;
if (_max_metadata_size_percent >= SCAN_LOCK_GLOBAL_METADATA_PERCENT) {
if (_scan_lock_global_file_exists)
return;
log_debug("Creating %s.", _scan_lock_global_file);
if (!(fp = fopen(_scan_lock_global_file, "w")))
return;
if (fclose(fp))
stack;
} else {
if (_scan_lock_global_file_exists) {
log_debug("Unlinking %s.", _scan_lock_global_file);
if (unlink(_scan_lock_global_file))
stack;
}
}
}
int do_scan_lock_global(struct cmd_context *cmd, int *gl_ex)
{
struct stat buf;
if (cmd->nolocking)
return 0;
/* global lock is already held */
if (cmd->lockf_global_ex)
return 0;
if (!stat(_scan_lock_global_file, &buf)) {
_scan_lock_global_file_exists = 1;
/*
* Tell the caller to use sh or ex. A command that may write
* vg metadata should use ex, otherwise sh.
*
* lockd_vg_default_sh/LOCKD_VG_SH is set for commands that
* do not modify vg metadata.
*
* FIXME: this variable/flag was previously used only for
* lvmlockd locking logic, but is now more general, so
* it should be renamed.
*/
if (cmd->lockd_vg_default_sh)
*gl_ex = 0;
else
*gl_ex = 1;
return 1;
}
return 0;
return _max_metadata_size;
}
int lvmcache_vginfo_has_pvid(struct lvmcache_vginfo *vginfo, char *pvid)
@@ -2917,7 +2818,7 @@ void lvmcache_get_outdated_devs(struct cmd_context *cmd,
}
dm_list_iterate_items(info, &vginfo->outdated_infos) {
if (!(devl = dm_pool_zalloc(cmd->mem, sizeof(*devl))))
if (!(devl = zalloc(sizeof(*devl))))
return;
devl->dev = info->dev;
dm_list_add(devs, &devl->list);

View File

@@ -183,9 +183,8 @@ bool lvmcache_scan_mismatch(struct cmd_context *cmd, const char *vgname, const c
int lvmcache_vginfo_has_pvid(struct lvmcache_vginfo *vginfo, char *pvid);
uint64_t lvmcache_max_metadata_size_bytes(void);
void lvmcache_save_metadata_size_bytes(uint64_t val);
void lvmcache_save_metadata_size_percent(uint64_t meta_size, uint64_t mdah_size);
uint64_t lvmcache_max_metadata_size(void);
void lvmcache_save_metadata_size(uint64_t val);
int dev_in_device_list(struct device *dev, struct dm_list *head);
@@ -227,8 +226,4 @@ void lvmcache_extra_md_component_checks(struct cmd_context *cmd);
unsigned int lvmcache_vg_info_count(void);
void set_scan_lock_global(struct cmd_context *cmd);
int do_scan_lock_global(struct cmd_context *cmd, int *gl_ex);
#endif

View File

@@ -966,8 +966,8 @@ static void _destroy_config(struct cmd_context *cmd)
/* CONFIG_FILE/CONFIG_MERGED_FILES */
if ((cft = remove_config_tree_by_source(cmd, CONFIG_MERGED_FILES)))
config_destroy(cft);
else if ((cft = remove_config_tree_by_source(cmd, CONFIG_FILE)))
config_destroy(cft);
else
remove_config_tree_by_source(cmd, CONFIG_FILE);
dm_list_iterate_items(cfl, &cmd->config_files)
config_destroy(cfl->cft);
@@ -1106,7 +1106,7 @@ static struct dev_filter *_init_filter_chain(struct cmd_context *cmd)
/* global regex filter. Optional. */
if ((cn = find_config_tree_node(cmd, devices_global_filter_CFG, NULL))) {
if (!(filters[nr_filt] = regex_filter_create(cn->v, 0, 1))) {
if (!(filters[nr_filt] = regex_filter_create(cn->v))) {
log_error("Failed to create global regex device filter");
goto bad;
}
@@ -1115,7 +1115,7 @@ static struct dev_filter *_init_filter_chain(struct cmd_context *cmd)
/* regex filter. Optional. */
if ((cn = find_config_tree_node(cmd, devices_filter_CFG, NULL))) {
if (!(filters[nr_filt] = regex_filter_create(cn->v, 1, 0))) {
if (!(filters[nr_filt] = regex_filter_create(cn->v))) {
log_error("Failed to create regex device filter");
goto bad;
}

View File

@@ -256,7 +256,6 @@ struct cmd_context {
unsigned rand_seed;
struct dm_list pending_delete; /* list of LVs for removal */
struct dm_pool *pending_delete_mem; /* memory pool for pending deletes */
int early_lock_vg_mode;
};
/*

View File

@@ -293,13 +293,13 @@ static int _compare_paths(const char *path0, const char *path1)
/* We prefer symlinks - they exist for a reason!
* So we prefer a shorter path before the first symlink in the name.
* FIXME Configuration option to invert this? */
while (s0 && s1) {
if ((s0 = strchr(s0, '/')))
while (s0) {
s0 = strchr(s0, '/');
s1 = strchr(s1, '/');
if (s0) {
*s0 = '\0';
if ((s1 = strchr(s1, '/')))
*s1 = '\0';
}
if (lstat(p0, &stat0)) {
log_sys_very_verbose("lstat", p0);
return 1;
@@ -312,10 +312,10 @@ static int _compare_paths(const char *path0, const char *path1)
return 0;
if (!S_ISLNK(stat0.st_mode) && S_ISLNK(stat1.st_mode))
return 1;
if (s0)
if (s0) {
*s0++ = '/';
if (s1)
*s1++ = '/';
}
}
/* ASCII comparison */
@@ -1490,7 +1490,7 @@ struct device *dev_cache_get(struct cmd_context *cmd, const char *name, struct d
* Remove incorrect info and then add new dev-cache entry.
*/
if (dev && (st.st_rdev != dev->dev)) {
log_debug("Device path %s does not match %d:%d %s.",
log_print("Device path %s does not match %d:%d %s.",
name, (int)MAJOR(dev->dev), (int)MINOR(dev->dev), dev_name(dev));
dm_hash_remove(_cache.names, name);
@@ -1510,8 +1510,7 @@ struct device *dev_cache_get(struct cmd_context *cmd, const char *name, struct d
* for st_rdev.
*/
if (!dev) {
if (!_insert_dev(name, st.st_rdev))
return_NULL;
_insert_dev(name, st.st_rdev);
/* Get the struct dev that was just added. */
dev = (struct device *) dm_hash_lookup(_cache.names, name);
@@ -1711,11 +1710,13 @@ static int _setup_devices_list(struct cmd_context *cmd)
*/
dm_list_iterate_items(strl, &cmd->deviceslist) {
if (!(du = dm_pool_zalloc(cmd->mem, sizeof(struct dev_use))))
if (!(du = zalloc(sizeof(struct dev_use))))
return_0;
if (!(du->devname = dm_pool_strdup(cmd->mem, strl->str)))
if (!(du->devname = strdup(strl->str))) {
free(du);
return_0;
}
dm_list_add(&cmd->use_devices, &du->list);
}

View File

@@ -55,7 +55,7 @@ static int _dev_has_md_magic(struct device *dev, uint64_t sb_offset)
static int _dev_has_imsm_magic(struct device *dev, uint64_t devsize_sectors)
{
char imsm_signature[IMSM_SIG_LEN];
uint64_t off;
uint64_t off = (devsize_sectors * 512) - 1024;
unsigned int physical_block_size = 0;
unsigned int logical_block_size = 0;

View File

@@ -701,23 +701,23 @@ out:
}
#ifdef BLKID_WIPING_SUPPORT
int get_fs_block_size(const char *pathname, uint32_t *fs_block_size)
int get_fs_block_size(struct device *dev, uint32_t *fs_block_size)
{
char *block_size_str = NULL;
if ((block_size_str = blkid_get_tag_value(NULL, "BLOCK_SIZE", pathname))) {
if ((block_size_str = blkid_get_tag_value(NULL, "BLOCK_SIZE", dev_name(dev)))) {
*fs_block_size = (uint32_t)atoi(block_size_str);
free(block_size_str);
log_debug("Found blkid BLOCK_SIZE %u for fs on %s", *fs_block_size, pathname);
log_debug("Found blkid BLOCK_SIZE %u for fs on %s", *fs_block_size, dev_name(dev));
return 1;
} else {
log_debug("No blkid BLOCK_SIZE for fs on %s", pathname);
log_debug("No blkid BLOCK_SIZE for fs on %s", dev_name(dev));
*fs_block_size = 0;
return 0;
}
}
#else
int get_fs_block_size(const char *pathname, uint32_t *fs_block_size)
int get_fs_block_size(struct device *dev, uint32_t *fs_block_size)
{
log_debug("Disabled blkid BLOCK_SIZE for fs.");
*fs_block_size = 0;

View File

@@ -100,6 +100,6 @@ int dev_is_nvme(struct dev_types *dt, struct device *dev);
int dev_is_lv(struct device *dev);
int get_fs_block_size(const char *pathname, uint32_t *fs_block_size);
int get_fs_block_size(struct device *dev, uint32_t *fs_block_size);
#endif

View File

@@ -325,12 +325,8 @@ const char *device_id_system_read(struct cmd_context *cmd, struct device *dev, u
else if (idtype == DEV_ID_TYPE_MD_UUID)
_read_sys_block(cmd, dev, "md/uuid", sysbuf, sizeof(sysbuf));
else if (idtype == DEV_ID_TYPE_LOOP_FILE) {
else if (idtype == DEV_ID_TYPE_LOOP_FILE)
_read_sys_block(cmd, dev, "loop/backing_file", sysbuf, sizeof(sysbuf));
/* if backing file is deleted, fall back to devname */
if (strstr(sysbuf, "(deleted)"))
sysbuf[0] = '\0';
}
else if (idtype == DEV_ID_TYPE_DEVNAME) {
if (!(idname = strdup(dev_name(dev))))
@@ -527,7 +523,7 @@ int device_ids_read(struct cmd_context *cmd)
_copy_idline_str(line, _devices_file_systemid, sizeof(_devices_file_systemid));
log_debug("read devices file systemid %s", _devices_file_systemid);
if ((!cmd->system_id && _devices_file_systemid[0]) ||
(cmd->system_id && strcmp(cmd->system_id, _devices_file_systemid))) {
strcmp(cmd->system_id, _devices_file_systemid)) {
log_warn("WARNING: ignoring devices file with wrong system id %s vs local %s.",
_devices_file_systemid[0] ? _devices_file_systemid : "none", cmd->system_id ?: "none");
free_dus(&cmd->use_devices);
@@ -683,7 +679,7 @@ int device_ids_write(struct cmd_context *cmd)
goto out;
}
(void) unlink(tmppath); /* in case a previous file was left */
unlink(tmppath); /* in case a previous file was left */
if (!(fp = fopen(tmppath, "w+"))) {
log_warn("Cannot open tmp devices_file to write.");
@@ -931,7 +927,6 @@ int device_id_add(struct cmd_context *cmd, struct device *dev, const char *pvid_
/*
* Choose the device_id type for the device being added.
*
* 0. use an idtype specified by the user
* 1. use an idtype specific to a special/virtual device type
* e.g. loop, mpath, crypt, lvmlv, md, etc.
* 2. use an idtype specified by user option.
@@ -940,24 +935,6 @@ int device_id_add(struct cmd_context *cmd, struct device *dev, const char *pvid_
* 5. use devname as the last resort.
*/
if (idtype_arg) {
if (!(idtype = idtype_from_str(idtype_arg)))
log_warn("WARNING: ignoring unknown device_id type %s.", idtype_arg);
else {
if (id_arg) {
if ((idname = strdup(id_arg)))
goto id_done;
log_warn("WARNING: ignoring device_id name %s.", id_arg);
}
if ((idname = device_id_system_read(cmd, dev, idtype)))
goto id_done;
log_warn("WARNING: ignoring deviceidtype %s which is not available for device.", idtype_arg);
idtype = 0;
}
}
if (MAJOR(dev->dev) == cmd->dev_types->device_mapper_major) {
if (_dev_has_mpath_uuid(cmd, dev, &idname)) {
idtype = DEV_ID_TYPE_MPATH_UUID;
@@ -991,6 +968,19 @@ int device_id_add(struct cmd_context *cmd, struct device *dev, const char *pvid_
log_warn("Missing support for DRBD idtype");
}
if (idtype_arg) {
if (!(idtype = idtype_from_str(idtype_arg)))
log_warn("WARNING: ignoring unknown device_id type %s.", idtype_arg);
else {
if (id_arg) {
if (!(idname = strdup(id_arg)))
stack;
goto id_done;
}
goto id_name;
}
}
/*
* No device-specific, existing, or user-specified idtypes,
* so use first available of sys_wwid / sys_serial / devname.
@@ -1018,24 +1008,21 @@ id_done:
break;
}
}
if (found_id && idname && strcmp(id->idname, idname)) {
if (found_id && !strcmp(id->idname, idname)) {
free((char *)idname);
} else if (found_id && strcmp(id->idname, idname)) {
dm_list_del(&id->list);
free_did(id);
found_id = 0;
}
if (!found_id) {
if (!(id = zalloc(sizeof(struct dev_id)))) {
free((char *)idname);
if (!(id = zalloc(sizeof(struct dev_id))))
return_0;
}
id->idtype = idtype;
id->idname = (char *)idname;
id->dev = dev;
dm_list_add(&dev->ids, &id->list);
} else
free((char*)idname);
}
dev->id = id;
dev->flags |= DEV_MATCHED_USE_ID;
@@ -1087,8 +1074,7 @@ id_done:
if (du_devid && (du_devid != du_dev)) {
log_warn("WARNING: device %s (%s) and %s (%s) have duplicate device ID.",
dev_name(dev), id->idname,
(du_pvid && du_pvid->dev) ? dev_name(du_pvid->dev) : "none",
du_pvid ? du_pvid->idname : "");
du_pvid->dev ? dev_name(du_pvid->dev) : "none", du_pvid->idname);
}
if (du_pvid && (du_pvid != du_dev)) {
@@ -1922,7 +1908,6 @@ void device_ids_find_renamed_devs(struct cmd_context *cmd, struct dm_list *dev_l
*/
dm_list_iterate_items(devl, &search_devs) {
dev = devl->dev;
int has_pvid;
/*
* We only need to check devs that would use ID_TYPE_DEVNAME
@@ -1946,17 +1931,11 @@ void device_ids_find_renamed_devs(struct cmd_context *cmd, struct dm_list *dev_l
/*
* Reads 4K from the start of the disk.
* Returns 0 if the dev cannot be read.
* Looks for LVM header, and sets dev->pvid if the device is a PV.
* Sets has_pvid=1 if the dev has an lvm PVID.
* Returns 0 if the dev has no lvm label or no PVID.
* This loop may look at and skip many non-LVM devices.
*/
if (!label_read_pvid(dev, &has_pvid)) {
no_pvid++;
continue;
}
if (!has_pvid) {
if (!label_read_pvid(dev)) {
no_pvid++;
continue;
}

View File

@@ -95,8 +95,6 @@ const char *get_lock_type_string(lock_type_t lock_type)
return "dlm";
case LOCK_TYPE_SANLOCK:
return "sanlock";
case LOCK_TYPE_IDM:
return "idm";
}
return "invalid";
}
@@ -113,8 +111,6 @@ lock_type_t get_lock_type_from_string(const char *str)
return LOCK_TYPE_DLM;
if (!strcmp(str, "sanlock"))
return LOCK_TYPE_SANLOCK;
if (!strcmp(str, "idm"))
return LOCK_TYPE_IDM;
return LOCK_TYPE_INVALID;
}

View File

@@ -21,10 +21,6 @@ struct rfilter {
struct dm_pool *mem;
dm_bitset_t accept;
struct dm_regex *engine;
unsigned config_filter:1;
unsigned config_global_filter:1;
unsigned warned_filter:1;
unsigned warned_global_filter:1;
};
static int _extract_pattern(struct dm_pool *mem, const char *pat,
@@ -161,18 +157,8 @@ static int _accept_p(struct cmd_context *cmd, struct dev_filter *f, struct devic
if (cmd->enable_devices_list)
return 1;
if (cmd->enable_devices_file && !cmd->filter_regex_with_devices_file) {
/* can't warn in create_filter because enable_devices_file is set later */
if (rf->config_filter && !rf->warned_filter) {
log_warn("Please remove the lvm.conf filter, it is ignored with the devices file.");
rf->warned_filter = 1;
}
if (rf->config_global_filter && !rf->warned_global_filter) {
log_warn("Please remove the lvm.conf global_filter, it is ignored with the devices file.");
rf->warned_global_filter = 1;
}
if (cmd->enable_devices_file && !cmd->filter_regex_with_devices_file)
return 1;
}
dm_list_iterate_items(sl, &dev->aliases) {
m = dm_regex_match(rf->engine, sl->str);
@@ -213,7 +199,7 @@ static void _regex_destroy(struct dev_filter *f)
dm_pool_destroy(rf->mem);
}
struct dev_filter *regex_filter_create(const struct dm_config_value *patterns, int config_filter, int config_global_filter)
struct dev_filter *regex_filter_create(const struct dm_config_value *patterns)
{
struct dm_pool *mem = dm_pool_create("filter regex", 10 * 1024);
struct rfilter *rf;
@@ -227,9 +213,6 @@ struct dev_filter *regex_filter_create(const struct dm_config_value *patterns, i
rf->mem = mem;
rf->config_filter = config_filter;
rf->config_global_filter = config_global_filter;
if (!_build_matcher(rf, patterns))
goto_bad;

View File

@@ -44,7 +44,7 @@ void internal_filter_clear(void);
* r|.*| - reject everything else
*/
struct dev_filter *regex_filter_create(const struct dm_config_value *patterns, int config_filter, int config_global_filter);
struct dev_filter *regex_filter_create(const struct dm_config_value *patterns);
typedef enum {
FILTER_MODE_NO_LVMETAD,

View File

@@ -279,6 +279,7 @@ int backup_locally(struct volume_group *vg)
int backup(struct volume_group *vg)
{
vg->needs_backup = 0;
/* Unlock memory if possible */
memlock_unlock(vg->cmd);

View File

@@ -1646,9 +1646,7 @@ int read_metadata_location_summary(const struct format_type *fmt,
vgsummary->mda_size = rlocn->size;
/* Keep track of largest metadata size we find. */
lvmcache_save_metadata_size_bytes(rlocn->size);
/* Keep track of the most full metadata area. */
lvmcache_save_metadata_size_percent(rlocn->size, mdah->size);
lvmcache_save_metadata_size(rlocn->size);
lvmcache_lookup_mda(vgsummary);

View File

@@ -128,7 +128,7 @@ static int _read_id(struct id *id, const struct dm_config_node *cn, const char *
return 1;
}
static int _read_flag_config(const struct dm_config_node *n, uint64_t *status, enum pv_vg_lv_e type)
static int _read_flag_config(const struct dm_config_node *n, uint64_t *status, int type)
{
const struct dm_config_value *cv;
*status = 0;

View File

@@ -1288,14 +1288,12 @@ check:
*/
int get_hints(struct cmd_context *cmd, struct dm_list *hints_out, int *newhints,
struct dm_list *devs_in, struct dm_list *devs_out, char **vgname_out)
struct dm_list *devs_in, struct dm_list *devs_out)
{
struct dm_list hints_list;
int needs_refresh = 0;
char *vgname = NULL;
*vgname_out = NULL;
dm_list_init(&hints_list);
/* Decide below if the caller should create new hints. */
@@ -1435,7 +1433,7 @@ int get_hints(struct cmd_context *cmd, struct dm_list *hints_out, int *newhints,
dm_list_splice(hints_out, &hints_list);
*vgname_out = vgname;
free(vgname);
return 1;
}

View File

@@ -33,7 +33,7 @@ void clear_hint_file(struct cmd_context *cmd);
void invalidate_hints(struct cmd_context *cmd);
int get_hints(struct cmd_context *cmd, struct dm_list *hints, int *newhints,
struct dm_list *devs_in, struct dm_list *devs_out, char **vgname_out);
struct dm_list *devs_in, struct dm_list *devs_out);
int validate_hints(struct cmd_context *cmd, struct dm_list *hints);

View File

@@ -1032,7 +1032,6 @@ int label_scan(struct cmd_context *cmd)
struct dev_iter *iter;
struct device_list *devl, *devl2;
struct device *dev;
char *vgname_hint = NULL;
uint64_t max_metadata_size_bytes;
int device_ids_invalid = 0;
int using_hints;
@@ -1098,7 +1097,7 @@ int label_scan(struct cmd_context *cmd)
* so this will usually do nothing.
*/
label_scan_invalidate(dev);
}
};
dev_iter_destroy(iter);
/*
@@ -1138,54 +1137,21 @@ int label_scan(struct cmd_context *cmd)
* by using hints which tell us which devices are PVs, which
* are the only devices we actually need to scan. Without
* hints we need to scan all devs to find which are PVs.
*/
if (!get_hints(cmd, &hints_list, &create_hints, &all_devs, &scan_devs, &vgname_hint)) {
dm_list_splice(&scan_devs, &all_devs);
dm_list_init(&hints_list);
using_hints = 0;
} else
using_hints = 1;
/*
* If the command is using hints and a single vgname
*
* TODO: if the command is using hints and a single vgname
* arg, we can also take the vg lock here, prior to scanning.
* This means we would not need to rescan the PVs in the VG
* in vg_read (skip lvmcache_label_rescan_vg) after the
* vg lock is usually taken. (Some commands are already
* able to avoid rescan in vg_read, but locking early would
* apply to more cases.)
*
* TODO: we don't know exactly which vg lock mode (read or write)
* the command will use in vg_read() for the normal lock_vol(),
* but we could make a fairly accurate guess of READ/WRITE based
* on looking at the command name. If we guess wrong we can
* just unlock_vg and lock_vol again with the correct mode in
* vg_read().
*/
if (vgname_hint) {
uint32_t lck_type = LCK_VG_WRITE;
log_debug("Early lock vg");
/* FIXME: borrowing this lockd flag which should be
quite close to what we want, based on the command name.
Need to do proper mode selection here, and then check
in case the later lock_vol in vg_read wants different. */
if (cmd->lockd_vg_default_sh)
lck_type = LCK_VG_READ;
if (!lock_vol(cmd, vgname_hint, lck_type, NULL)) {
log_warn("Could not pre-lock VG %s.", vgname_hint);
/* not an error since this is just an optimization */
} else {
/* Save some state indicating that the vg lock
is already held so that the normal lock_vol()
will know. */
cmd->early_lock_vg_mode = lck_type;
}
free(vgname_hint);
}
if (!get_hints(cmd, &hints_list, &create_hints, &all_devs, &scan_devs)) {
dm_list_splice(&scan_devs, &all_devs);
dm_list_init(&hints_list);
using_hints = 0;
} else
using_hints = 1;
/*
* If the total number of devices exceeds the soft open file
@@ -1221,7 +1187,7 @@ int label_scan(struct cmd_context *cmd)
* If the largest metadata is within 1MB of the bcache size, then start
* warning.
*/
max_metadata_size_bytes = lvmcache_max_metadata_size_bytes();
max_metadata_size_bytes = lvmcache_max_metadata_size();
if (max_metadata_size_bytes + (1024 * 1024) > _current_bcache_size_bytes) {
/* we want bcache to be 1MB larger than the max metadata seen */
@@ -1236,14 +1202,6 @@ int label_scan(struct cmd_context *cmd)
(unsigned long long)want_size_kb);
}
/*
* If vg metadata is using a large percentage of a metadata area, then
* create /run/lvm/scan_lock_global to tell future lvm commands to
* begin doing lock_global() prior to scanning to avoid problems due to
* metadata wrapping between label_scan and vg_read.
*/
set_scan_lock_global(cmd);
dm_list_init(&cmd->hints);
/*
@@ -1319,7 +1277,7 @@ int label_scan(struct cmd_context *cmd)
* Read the header of the disk and if it's a PV
* save the pvid in dev->pvid.
*/
int label_read_pvid(struct device *dev, int *has_pvid)
int label_read_pvid(struct device *dev)
{
char buf[4096] __attribute__((aligned(8)));
struct label_header *lh;
@@ -1338,17 +1296,14 @@ int label_read_pvid(struct device *dev, int *has_pvid)
*/
if (!dev_read_bytes(dev, 0, 4096, buf)) {
label_scan_invalidate(dev);
return_0;
return 0;
}
if (has_pvid)
*has_pvid = 0;
lh = (struct label_header *)(buf + 512);
if (memcmp(lh->id, LABEL_ID, sizeof(lh->id))) {
/* Not an lvm deice */
label_scan_invalidate(dev);
return 1;
return 0;
}
/*
@@ -1358,12 +1313,9 @@ int label_read_pvid(struct device *dev, int *has_pvid)
if (memcmp(lh->type, LVM2_LABEL, sizeof(lh->type))) {
/* Not an lvm deice */
label_scan_invalidate(dev);
return 1;
return 0;
}
if (has_pvid)
*has_pvid = 1;
pvh = (struct pv_header *)(buf + 512 + 32);
memcpy(dev->pvid, pvh->pv_uuid, ID_LEN);
return 1;
@@ -1705,7 +1657,7 @@ bool dev_write_bytes(struct device *dev, uint64_t start, size_t len, void *data)
_scan_dev_close(dev);
dev->flags |= DEV_BCACHE_WRITE;
(void) label_scan_open(dev); /* checked later */
label_scan_open(dev);
}
if (dev->bcache_di < 0) {

View File

@@ -117,7 +117,7 @@ int label_scan_open(struct device *dev);
int label_scan_open_excl(struct device *dev);
int label_scan_open_rw(struct device *dev);
int label_scan_reopen_rw(struct device *dev);
int label_read_pvid(struct device *dev, int *has_pvid);
int label_read_pvid(struct device *dev);
int label_scan_for_pvid(struct cmd_context *cmd, char *pvid, struct device **dev_out);

View File

@@ -203,11 +203,6 @@ int lock_vol(struct cmd_context *cmd, const char *vol, uint32_t flags, const str
if (is_orphan_vg(vol))
return 1;
if (!is_global && cmd->early_lock_vg_mode && (lck_type != LCK_UNLOCK)) {
log_debug("VG was locked early.");
return 1;
}
if (!_blocking_supported)
flags |= LCK_NONBLOCK;
@@ -359,8 +354,10 @@ static int _lockf_global(struct cmd_context *cmd, const char *mode, int convert,
if (!strcmp(mode, "ex")) {
flags |= LCK_WRITE;
if (cmd->lockf_global_ex)
if (cmd->lockf_global_ex) {
log_warn("global flock already held ex");
return 1;
}
ret = lock_vol(cmd, VG_GLOBAL, flags, NULL);
if (ret)

View File

@@ -56,11 +56,8 @@ int lock_vol(struct cmd_context *cmd, const char *vol, uint32_t flags, const str
#define unlock_vg(cmd, vg, vol) \
do { \
if (is_real_vg(vol)) { \
if (!sync_local_dev_names(cmd)) \
stack; \
vg_backup_if_needed(vg); \
} \
if (is_real_vg(vol) && !sync_local_dev_names(cmd)) \
stack; \
if (!lock_vol(cmd, vol, LCK_VG_UNLOCK, NULL)) \
stack; \
} while (0)

View File

@@ -25,11 +25,6 @@ static int _use_lvmlockd = 0; /* is 1 if command is configured to use lv
static int _lvmlockd_connected = 0; /* is 1 if command is connected to lvmlockd */
static int _lvmlockd_init_failed = 0; /* used to suppress further warnings */
struct lvmlockd_pvs {
char **path;
int num;
};
void lvmlockd_set_socket(const char *sock)
{
_lvmlockd_socket = sock;
@@ -183,34 +178,18 @@ static int _lockd_result(daemon_reply reply, int *result, uint32_t *lockd_flags)
return 1;
}
static daemon_reply _lockd_send_with_pvs(const char *req_name,
const struct lvmlockd_pvs *lock_pvs, ...)
static daemon_reply _lockd_send(const char *req_name, ...)
{
va_list ap;
daemon_reply repl;
daemon_request req;
int i;
char key[32];
const char *val;
va_list ap;
req = daemon_request_make(req_name);
va_start(ap, lock_pvs);
va_start(ap, req_name);
daemon_request_extend_v(req, ap);
va_end(ap);
/* Pass PV list */
if (lock_pvs && lock_pvs->num) {
daemon_request_extend(req, "path_num = " FMTd64,
(int64_t)(lock_pvs)->num, NULL);
for (i = 0; i < lock_pvs->num; i++) {
snprintf(key, sizeof(key), "path[%d] = %%s", i);
val = lock_pvs->path[i] ? lock_pvs->path[i] : "none";
daemon_request_extend(req, key, val, NULL);
}
}
repl = daemon_send(_lvmlockd, req);
daemon_request_destroy(req);
@@ -218,166 +197,6 @@ static daemon_reply _lockd_send_with_pvs(const char *req_name,
return repl;
}
#define _lockd_send(req_name, args...) \
_lockd_send_with_pvs(req_name, NULL, ##args)
static int _lockd_retrive_vg_pv_num(struct volume_group *vg)
{
struct pv_list *pvl;
int num = 0;
dm_list_iterate_items(pvl, &vg->pvs)
num++;
return num;
}
static void _lockd_retrive_vg_pv_list(struct volume_group *vg,
struct lvmlockd_pvs *lock_pvs)
{
struct pv_list *pvl;
int pv_num, i;
memset(lock_pvs, 0x0, sizeof(*lock_pvs));
pv_num = _lockd_retrive_vg_pv_num(vg);
if (!pv_num) {
log_error("Fail to any PVs for VG %s", vg->name);
return;
}
/* Allocate buffer for PV list */
lock_pvs->path = zalloc(sizeof(*lock_pvs->path) * pv_num);
if (!lock_pvs->path) {
log_error("Fail to allocate PV list for VG %s", vg->name);
return;
}
i = 0;
dm_list_iterate_items(pvl, &vg->pvs) {
lock_pvs->path[i] = strdup(pv_dev_name(pvl->pv));
if (!lock_pvs->path[i]) {
log_error("Fail to allocate PV path for VG %s", vg->name);
goto fail;
}
log_debug("VG %s find PV device %s", vg->name, lock_pvs->path[i]);
i++;
}
lock_pvs->num = pv_num;
return;
fail:
for (i = 0; i < pv_num; i++) {
if (!lock_pvs->path[i])
continue;
free(lock_pvs->path[i]);
}
free(lock_pvs->path);
return;
}
static int _lockd_retrive_lv_pv_num(struct volume_group *vg,
const char *lv_name)
{
struct logical_volume *lv = find_lv(vg, lv_name);
struct pv_list *pvl;
int num;
if (!lv)
return 0;
num = 0;
dm_list_iterate_items(pvl, &vg->pvs) {
if (lv_is_on_pv(lv, pvl->pv))
num++;
}
return num;
}
static void _lockd_retrive_lv_pv_list(struct volume_group *vg,
const char *lv_name,
struct lvmlockd_pvs *lock_pvs)
{
struct logical_volume *lv = find_lv(vg, lv_name);
struct pv_list *pvl;
int pv_num, i = 0;
memset(lock_pvs, 0x0, sizeof(*lock_pvs));
/* Cannot find any existed LV? */
if (!lv)
return;
pv_num = _lockd_retrive_lv_pv_num(vg, lv_name);
if (!pv_num) {
/*
* Fixup for 'lvcreate --type error -L1 -n $lv1 $vg', in this
* case, the drive path list is empty since it doesn't establish
* the structure 'pvseg->lvseg->lv->name'.
*
* So create drive path list with all drives in the VG.
*/
log_error("Fail to find any PVs for %s/%s, try to find PVs from VG instead",
vg->name, lv_name);
_lockd_retrive_vg_pv_list(vg, lock_pvs);
return;
}
/* Allocate buffer for PV list */
lock_pvs->path = malloc(sizeof(*lock_pvs->path) * pv_num);
if (!lock_pvs->path) {
log_error("Fail to allocate PV list for %s/%s", vg->name, lv_name);
return;
}
dm_list_iterate_items(pvl, &vg->pvs) {
if (lv_is_on_pv(lv, pvl->pv)) {
lock_pvs->path[i] = strdup(pv_dev_name(pvl->pv));
if (!lock_pvs->path[i]) {
log_error("Fail to allocate PV path for LV %s/%s",
vg->name, lv_name);
goto fail;
}
log_debug("Find PV device %s for LV %s/%s",
lock_pvs->path[i], vg->name, lv_name);
i++;
}
}
lock_pvs->num = pv_num;
return;
fail:
for (i = 0; i < pv_num; i++) {
if (!lock_pvs->path[i])
continue;
free(lock_pvs->path[i]);
lock_pvs->path[i] = NULL;
}
free(lock_pvs->path);
lock_pvs->path = NULL;
lock_pvs->num = 0;
return;
}
static void _lockd_free_pv_list(struct lvmlockd_pvs *lock_pvs)
{
int i;
for (i = 0; i < lock_pvs->num; i++) {
free(lock_pvs->path[i]);
lock_pvs->path[i] = NULL;
}
free(lock_pvs->path);
lock_pvs->path = NULL;
lock_pvs->num = 0;
}
/*
* result/lockd_flags are values returned from lvmlockd.
*
@@ -408,7 +227,6 @@ static int _lockd_request(struct cmd_context *cmd,
const char *lv_lock_args,
const char *mode,
const char *opts,
const struct lvmlockd_pvs *lock_pvs,
int *result,
uint32_t *lockd_flags)
{
@@ -433,8 +251,7 @@ static int _lockd_request(struct cmd_context *cmd,
cmd_name = "none";
if (vg_name && lv_name) {
reply = _lockd_send_with_pvs(req_name,
lock_pvs,
reply = _lockd_send(req_name,
"cmd = %s", cmd_name,
"pid = " FMTd64, (int64_t) pid,
"mode = %s", mode,
@@ -454,8 +271,7 @@ static int _lockd_request(struct cmd_context *cmd,
req_name, mode, vg_name, lv_name, *result, *lockd_flags);
} else if (vg_name) {
reply = _lockd_send_with_pvs(req_name,
lock_pvs,
reply = _lockd_send(req_name,
"cmd = %s", cmd_name,
"pid = " FMTd64, (int64_t) pid,
"mode = %s", mode,
@@ -472,8 +288,7 @@ static int _lockd_request(struct cmd_context *cmd,
req_name, mode, vg_name, *result, *lockd_flags);
} else {
reply = _lockd_send_with_pvs(req_name,
lock_pvs,
reply = _lockd_send(req_name,
"cmd = %s", cmd_name,
"pid = " FMTd64, (int64_t) pid,
"mode = %s", mode,
@@ -738,8 +553,7 @@ static int _deactivate_sanlock_lv(struct cmd_context *cmd, struct volume_group *
return 1;
}
static int _init_vg(struct cmd_context *cmd, struct volume_group *vg,
const char *lock_type)
static int _init_vg_dlm(struct cmd_context *cmd, struct volume_group *vg)
{
daemon_reply reply;
const char *reply_str;
@@ -755,7 +569,7 @@ static int _init_vg(struct cmd_context *cmd, struct volume_group *vg,
reply = _lockd_send("init_vg",
"pid = " FMTd64, (int64_t) getpid(),
"vg_name = %s", vg->name,
"vg_lock_type = %s", lock_type,
"vg_lock_type = %s", "dlm",
NULL);
if (!_lockd_result(reply, &result, NULL)) {
@@ -775,12 +589,10 @@ static int _init_vg(struct cmd_context *cmd, struct volume_group *vg,
log_error("VG %s init failed: invalid parameters for dlm", vg->name);
break;
case -EMANAGER:
log_error("VG %s init failed: lock manager %s is not running",
vg->name, lock_type);
log_error("VG %s init failed: lock manager dlm is not running", vg->name);
break;
case -EPROTONOSUPPORT:
log_error("VG %s init failed: lock manager %s is not supported by lvmlockd",
vg->name, lock_type);
log_error("VG %s init failed: lock manager dlm is not supported by lvmlockd", vg->name);
break;
case -EEXIST:
log_error("VG %s init failed: a lockspace with the same name exists", vg->name);
@@ -804,7 +616,7 @@ static int _init_vg(struct cmd_context *cmd, struct volume_group *vg,
goto out;
}
vg->lock_type = lock_type;
vg->lock_type = "dlm";
vg->lock_args = vg_lock_args;
if (!vg_write(vg) || !vg_commit(vg)) {
@@ -819,16 +631,6 @@ out:
return ret;
}
static int _init_vg_dlm(struct cmd_context *cmd, struct volume_group *vg)
{
return _init_vg(cmd, vg, "dlm");
}
static int _init_vg_idm(struct cmd_context *cmd, struct volume_group *vg)
{
return _init_vg(cmd, vg, "idm");
}
static int _init_vg_sanlock(struct cmd_context *cmd, struct volume_group *vg, int lv_lock_count)
{
daemon_reply reply;
@@ -992,7 +794,7 @@ out:
/* called after vg_remove on disk */
static int _free_vg(struct cmd_context *cmd, struct volume_group *vg)
static int _free_vg_dlm(struct cmd_context *cmd, struct volume_group *vg)
{
daemon_reply reply;
uint32_t lockd_flags = 0;
@@ -1018,27 +820,16 @@ static int _free_vg(struct cmd_context *cmd, struct volume_group *vg)
}
if (!ret)
log_error("%s: lock type %s lvmlockd result %d",
__func__, vg->lock_type, result);
log_error("_free_vg_dlm lvmlockd result %d", result);
daemon_reply_destroy(reply);
return 1;
}
static int _free_vg_dlm(struct cmd_context *cmd, struct volume_group *vg)
{
return _free_vg(cmd, vg);
}
static int _free_vg_idm(struct cmd_context *cmd, struct volume_group *vg)
{
return _free_vg(cmd, vg);
}
/* called before vg_remove on disk */
static int _busy_vg(struct cmd_context *cmd, struct volume_group *vg)
static int _busy_vg_dlm(struct cmd_context *cmd, struct volume_group *vg)
{
daemon_reply reply;
uint32_t lockd_flags = 0;
@@ -1073,24 +864,13 @@ static int _busy_vg(struct cmd_context *cmd, struct volume_group *vg)
}
if (!ret)
log_error("%s: lock type %s lvmlockd result %d", __func__,
vg->lock_type, result);
log_error("_busy_vg_dlm lvmlockd result %d", result);
out:
daemon_reply_destroy(reply);
return ret;
}
static int _busy_vg_dlm(struct cmd_context *cmd, struct volume_group *vg)
{
return _busy_vg(cmd, vg);
}
static int _busy_vg_idm(struct cmd_context *cmd, struct volume_group *vg)
{
return _busy_vg(cmd, vg);
}
/* called before vg_remove on disk */
static int _free_vg_sanlock(struct cmd_context *cmd, struct volume_group *vg)
@@ -1196,8 +976,6 @@ int lockd_init_vg(struct cmd_context *cmd, struct volume_group *vg,
return _init_vg_dlm(cmd, vg);
case LOCK_TYPE_SANLOCK:
return _init_vg_sanlock(cmd, vg, lv_lock_count);
case LOCK_TYPE_IDM:
return _init_vg_idm(cmd, vg);
default:
log_error("Unknown lock_type.");
return 0;
@@ -1239,8 +1017,7 @@ int lockd_free_vg_before(struct cmd_context *cmd, struct volume_group *vg,
* When removing (not changing), each LV is locked
* when it is removed, they do not need checking here.
*/
if (lock_type_num == LOCK_TYPE_DLM || lock_type_num == LOCK_TYPE_SANLOCK ||
lock_type_num == LOCK_TYPE_IDM) {
if (lock_type_num == LOCK_TYPE_DLM || lock_type_num == LOCK_TYPE_SANLOCK) {
if (changing && !_lockd_all_lvs(cmd, vg)) {
log_error("Cannot change VG %s with active LVs", vg->name);
return 0;
@@ -1264,9 +1041,6 @@ int lockd_free_vg_before(struct cmd_context *cmd, struct volume_group *vg,
case LOCK_TYPE_SANLOCK:
/* returning an error will prevent vg_remove() */
return _free_vg_sanlock(cmd, vg);
case LOCK_TYPE_IDM:
/* returning an error will prevent vg_remove() */
return _busy_vg_idm(cmd, vg);
default:
log_error("Unknown lock_type.");
return 0;
@@ -1285,9 +1059,6 @@ void lockd_free_vg_final(struct cmd_context *cmd, struct volume_group *vg)
case LOCK_TYPE_DLM:
_free_vg_dlm(cmd, vg);
break;
case LOCK_TYPE_IDM:
_free_vg_idm(cmd, vg);
break;
default:
log_error("Unknown lock_type.");
}
@@ -1319,7 +1090,6 @@ int lockd_start_vg(struct cmd_context *cmd, struct volume_group *vg, int start_i
int host_id = 0;
int result;
int ret;
struct lvmlockd_pvs lock_pvs;
memset(uuid, 0, sizeof(uuid));
@@ -1355,15 +1125,7 @@ int lockd_start_vg(struct cmd_context *cmd, struct volume_group *vg, int start_i
host_id = find_config_tree_int(cmd, local_host_id_CFG, NULL);
}
/*
* Create the VG's PV list when start the VG, the PV list
* is passed to lvmlockd, and the the PVs path will be used
* to send SCSI commands for idm locking scheme.
*/
if (!strcmp(vg->lock_type, "idm")) {
_lockd_retrive_vg_pv_list(vg, &lock_pvs);
reply = _lockd_send_with_pvs("start_vg",
&lock_pvs,
reply = _lockd_send("start_vg",
"pid = " FMTd64, (int64_t) getpid(),
"vg_name = %s", vg->name,
"vg_lock_type = %s", vg->lock_type,
@@ -1373,20 +1135,6 @@ int lockd_start_vg(struct cmd_context *cmd, struct volume_group *vg, int start_i
"host_id = " FMTd64, (int64_t) host_id,
"opts = %s", start_init ? "start_init" : "none",
NULL);
_lockd_free_pv_list(&lock_pvs);
} else {
reply = _lockd_send_with_pvs("start_vg",
NULL,
"pid = " FMTd64, (int64_t) getpid(),
"vg_name = %s", vg->name,
"vg_lock_type = %s", vg->lock_type,
"vg_lock_args = %s", vg->lock_args ?: "none",
"vg_uuid = %s", uuid[0] ? uuid : "none",
"version = " FMTd64, (int64_t) vg->seqno,
"host_id = " FMTd64, (int64_t) host_id,
"opts = %s", start_init ? "start_init" : "none",
NULL);
}
if (!_lockd_result(reply, &result, &lockd_flags)) {
ret = 0;
@@ -1614,7 +1362,7 @@ int lockd_global_create(struct cmd_context *cmd, const char *def_mode, const cha
req:
if (!_lockd_request(cmd, "lock_gl",
NULL, vg_lock_type, NULL, NULL, NULL, NULL, mode, NULL,
NULL, &result, &lockd_flags)) {
&result, &lockd_flags)) {
/* No result from lvmlockd, it is probably not running. */
log_error("Global lock failed: check that lvmlockd is running.");
return 0;
@@ -1850,7 +1598,7 @@ int lockd_global(struct cmd_context *cmd, const char *def_mode)
if (!_lockd_request(cmd, "lock_gl",
NULL, NULL, NULL, NULL, NULL, NULL, mode, opts,
NULL, &result, &lockd_flags)) {
&result, &lockd_flags)) {
/* No result from lvmlockd, it is probably not running. */
/* We don't care if an unlock fails. */
@@ -2118,7 +1866,7 @@ int lockd_vg(struct cmd_context *cmd, const char *vg_name, const char *def_mode,
if (!_lockd_request(cmd, "lock_vg",
vg_name, NULL, NULL, NULL, NULL, NULL, mode, NULL,
NULL, &result, &lockd_flags)) {
&result, &lockd_flags)) {
/*
* No result from lvmlockd, it is probably not running.
* Decide if it is ok to continue without a lock in
@@ -2378,7 +2126,6 @@ int lockd_lv_name(struct cmd_context *cmd, struct volume_group *vg,
uint32_t lockd_flags;
int refreshed = 0;
int result;
struct lvmlockd_pvs lock_pvs;
/*
* Verify that when --readonly is used, no LVs should be activated or used.
@@ -2444,28 +2191,13 @@ int lockd_lv_name(struct cmd_context *cmd, struct volume_group *vg,
retry:
log_debug("lockd LV %s/%s mode %s uuid %s", vg->name, lv_name, mode, lv_uuid);
/* Pass PV list for IDM lock type */
if (!strcmp(vg->lock_type, "idm")) {
_lockd_retrive_lv_pv_list(vg, lv_name, &lock_pvs);
if (!_lockd_request(cmd, "lock_lv",
vg->name, vg->lock_type, vg->lock_args,
lv_name, lv_uuid, lock_args, mode, opts,
&lock_pvs, &result, &lockd_flags)) {
_lockd_free_pv_list(&lock_pvs);
/* No result from lvmlockd, it is probably not running. */
log_error("Locking failed for LV %s/%s", vg->name, lv_name);
return 0;
}
_lockd_free_pv_list(&lock_pvs);
} else {
if (!_lockd_request(cmd, "lock_lv",
vg->name, vg->lock_type, vg->lock_args,
lv_name, lv_uuid, lock_args, mode, opts,
NULL, &result, &lockd_flags)) {
/* No result from lvmlockd, it is probably not running. */
log_error("Locking failed for LV %s/%s", vg->name, lv_name);
return 0;
}
if (!_lockd_request(cmd, "lock_lv",
vg->name, vg->lock_type, vg->lock_args,
lv_name, lv_uuid, lock_args, mode, opts,
&result, &lockd_flags)) {
/* No result from lvmlockd, it is probably not running. */
log_error("Locking failed for LV %s/%s", vg->name, lv_name);
return 0;
}
/* The lv was not active/locked. */
@@ -2947,7 +2679,6 @@ int lockd_init_lv(struct cmd_context *cmd, struct volume_group *vg, struct logic
return 1;
case LOCK_TYPE_SANLOCK:
case LOCK_TYPE_DLM:
case LOCK_TYPE_IDM:
break;
default:
log_error("lockd_init_lv: unknown lock_type.");
@@ -3090,8 +2821,6 @@ int lockd_init_lv(struct cmd_context *cmd, struct volume_group *vg, struct logic
lv->lock_args = "pending";
else if (!strcmp(vg->lock_type, "dlm"))
lv->lock_args = "dlm";
else if (!strcmp(vg->lock_type, "idm"))
lv->lock_args = "idm";
return 1;
}
@@ -3107,7 +2836,6 @@ int lockd_free_lv(struct cmd_context *cmd, struct volume_group *vg,
return 1;
case LOCK_TYPE_DLM:
case LOCK_TYPE_SANLOCK:
case LOCK_TYPE_IDM:
if (!lock_args)
return 1;
return _free_lv(cmd, vg, lv_name, lv_id, lock_args);
@@ -3279,10 +3007,6 @@ const char *lockd_running_lock_type(struct cmd_context *cmd, int *found_multiple
log_debug("lvmlockd found dlm");
lock_type = "dlm";
break;
case LOCK_TYPE_IDM:
log_debug("lvmlockd found idm");
lock_type = "idm";
break;
default:
log_error("Failed to find a running lock manager.");
break;

View File

@@ -326,6 +326,7 @@ static int _set_integrity_block_size(struct cmd_context *cmd, struct logical_vol
int lbs_4k, int lbs_512, int pbs_4k, int pbs_512)
{
char pathname[PATH_MAX];
struct device *fs_dev;
uint32_t fs_block_size = 0;
int rv;
@@ -370,6 +371,10 @@ static int _set_integrity_block_size(struct cmd_context *cmd, struct logical_vol
log_error("Path name too long to get LV block size %s", display_lvname(lv));
goto bad;
}
if (!(fs_dev = dev_cache_get(cmd, pathname, NULL))) {
log_error("Device for LV not found to check block size %s", display_lvname(lv));
goto bad;
}
/*
* get_fs_block_size() returns the libblkid BLOCK_SIZE value,
@@ -382,7 +387,7 @@ static int _set_integrity_block_size(struct cmd_context *cmd, struct logical_vol
* value the block size, but it's possible values are not the same
* as xfs's, and do not seem to relate directly to the device LBS.
*/
rv = get_fs_block_size(pathname, &fs_block_size);
rv = get_fs_block_size(fs_dev, &fs_block_size);
if (!rv || !fs_block_size) {
int use_bs;

View File

@@ -4834,6 +4834,9 @@ int lv_rename_update(struct cmd_context *cmd, struct logical_volume *lv,
}
}
if (update_mda && !archive(vg))
return_0;
if (old_lv_is_historical) {
/*
* Historical LVs have neither sub LVs nor any
@@ -6143,6 +6146,9 @@ int lv_resize(struct logical_volume *lv,
if (!lockd_lv_resize(cmd, lock_lv, "ex", 0, lp))
return_0;
if (!archive(vg))
return_0;
/* Remove any striped raid reshape space for LV resizing */
if (aux_lv && first_seg(aux_lv)->reshape_len)
if (!lv_raid_free_reshape_space(aux_lv))
@@ -6178,6 +6184,8 @@ int lv_resize(struct logical_volume *lv,
/* Update lvm pool metadata (drop messages). */
if (!update_pool_lv(lock_lv, 0))
goto_bad;
backup(vg);
}
/* Check for over provisioning when extended */
@@ -6550,7 +6558,7 @@ static int _lv_remove_check_in_use(struct logical_volume *lv, force_t force)
{
struct volume_group *vg = lv->vg;
const char *volume_type = "";
char buffer[50 + NAME_LEN * 2] = "";
char buffer[50 * NAME_LEN * 2] = "";
int active;
int issue_discards =
(vg->cmd->current_settings.issue_discards &&
@@ -6684,25 +6692,6 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv,
return_0;
}
/* if thin pool data lv is writecache, then detach and remove the writecache */
if (lv_is_thin_pool(lv)) {
struct logical_volume *data_lv = data_lv_from_thin_pool(lv);
if (data_lv && lv_is_writecache(data_lv)) {
struct logical_volume *cachevol_lv = first_seg(data_lv)->writecache;
if (!lv_detach_writecache_cachevol(data_lv, 1)) {
log_error("Failed to detach writecache from %s", display_lvname(data_lv));
return 0;
}
if (!lv_remove_single(cmd, cachevol_lv, force, 1)) {
log_error("Failed to remove cachevol %s.", display_lvname(cachevol_lv));
return 0;
}
}
}
if (lv_is_writecache(lv)) {
struct logical_volume *cachevol_lv = first_seg(lv)->writecache;
@@ -6711,6 +6700,9 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv,
return 0;
}
if (!archive(vg))
return_0;
if (!lv_detach_writecache_cachevol(lv, 1)) {
log_error("Failed to detach writecache from %s", display_lvname(lv));
return 0;
@@ -6731,6 +6723,9 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv,
/* FIXME Review and fix the snapshot error paths! */
return_0;
if (!archive(vg))
return_0;
/* Special case removing a striped raid LV with allocated reshape space */
if (seg && seg->reshape_len) {
if (!(seg->segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_STRIPED)))
@@ -6826,8 +6821,7 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv,
display_lvname(pool_lv));
}
if (!lockd_lv(cmd, lv, "un", LDLV_PERSISTENT))
log_warn("WARNING: Failed to unlock %s.", display_lvname(lv));
lockd_lv(cmd, lock_lv, "un", LDLV_PERSISTENT);
lockd_free_lv(cmd, vg, lv->name, &lv->lvid.id[1], lv->lock_args);
if (!suppress_remove_message && (visible || historical)) {
@@ -7022,7 +7016,7 @@ no_remove:
static int _lv_update_and_reload(struct logical_volume *lv, int origin_only)
{
struct volume_group *vg = lv->vg;
int r = 0;
int do_backup = 0, r = 0;
const struct logical_volume *lock_lv = lv_lock_holder(lv);
log_very_verbose("Updating logical volume %s on disk(s)%s.",
@@ -7046,6 +7040,8 @@ static int _lv_update_and_reload(struct logical_volume *lv, int origin_only)
return 0;
} else if (!(r = vg_commit(vg)))
stack; /* !vg_commit() has implict vg_revert() */
else
do_backup = 1;
log_very_verbose("Updating logical volume %s in kernel.",
display_lvname(lock_lv));
@@ -7056,6 +7052,9 @@ static int _lv_update_and_reload(struct logical_volume *lv, int origin_only)
r = 0;
}
if (do_backup && !critical_section())
backup(vg);
return r;
}
@@ -8439,6 +8438,9 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
return NULL;
}
if (!archive(vg))
return_NULL;
if (pool_lv && segtype_is_thin_volume(create_segtype)) {
/* Ensure all stacked messages are submitted */
if ((pool_is_active(pool_lv) || is_change_activating(lp->activate)) &&
@@ -8588,6 +8590,8 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
/* Pool created metadata LV, but better avoid recover when vg_write/commit fails */
return_NULL;
backup(vg);
if (test_mode()) {
log_verbose("Test mode: Skipping activation, zeroing and signature wiping.");
goto out;
@@ -8598,6 +8602,8 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
if (!lv_add_integrity_to_raid(lv, &lp->integrity_settings, lp->pvh, NULL))
goto revert_new_lv;
backup(vg);
}
/* Do not scan this LV until properly zeroed/wiped. */
@@ -8697,6 +8703,7 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
goto revert_new_lv;
}
}
backup(vg);
if (!lv_active_change(cmd, lv, lp->activate)) {
log_error("Failed to activate thin %s.", lv->name);
@@ -8817,6 +8824,7 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
if (!vg_write(vg) || !vg_commit(vg))
return_NULL; /* Metadata update fails, deep troubles */
backup(vg);
/*
* FIXME We do not actually need snapshot-origin as an active device,
* as virtual origin is already 'hidden' private device without
@@ -8850,8 +8858,7 @@ deactivate_and_revert_new_lv:
}
revert_new_lv:
if (!lockd_lv(cmd, lv, "un", LDLV_PERSISTENT))
log_warn("WARNING: Failed to unlock %s.", display_lvname(lv));
lockd_lv(cmd, lv, "un", LDLV_PERSISTENT);
lockd_free_lv(vg->cmd, vg, lv->name, &lv->lvid.id[1], lv->lock_args);
/* FIXME Better to revert to backup of metadata? */
@@ -8860,6 +8867,8 @@ revert_new_lv:
!lv_remove(lv) || !vg_write(vg) || !vg_commit(vg))
log_error("Manual intervention may be required to remove "
"abandoned LV(s) before retrying.");
else
backup(vg);
return NULL;
}

View File

@@ -356,7 +356,6 @@ typedef enum {
LOCK_TYPE_CLVM = 1,
LOCK_TYPE_DLM = 2,
LOCK_TYPE_SANLOCK = 3,
LOCK_TYPE_IDM = 4,
} lock_type_t;
struct cmd_context;
@@ -928,8 +927,6 @@ int handle_pool_metadata_spare(struct volume_group *vg, uint32_t extents,
int vg_set_pool_metadata_spare(struct logical_volume *lv);
int vg_remove_pool_metadata_spare(struct volume_group *vg);
struct logical_volume *data_lv_from_thin_pool(struct logical_volume *pool_lv);
int attach_thin_external_origin(struct lv_segment *seg,
struct logical_volume *external_lv);
int detach_thin_external_origin(struct lv_segment *seg);

View File

@@ -614,6 +614,9 @@ int vg_remove_check(struct volume_group *vg)
return 0;
}
if (!archive(vg))
return 0;
return 1;
}
@@ -991,7 +994,6 @@ static void _vg_move_cached_precommitted_to_committed(struct volume_group *vg)
release_vg(vg->vg_committed);
vg->vg_committed = vg->vg_precommitted;
vg->vg_precommitted = NULL;
vg->needs_backup = 1;
}
int lv_has_unknown_segments(const struct logical_volume *lv)
@@ -2233,13 +2235,6 @@ static int _validate_lv_lock_args(struct logical_volume *lv)
lv->vg->name, display_lvname(lv), lv->lock_args);
r = 0;
}
} else if (!strcmp(lv->vg->lock_type, "idm")) {
if (strcmp(lv->lock_args, "idm")) {
log_error(INTERNAL_ERROR "LV %s/%s has invalid lock_args \"%s\"",
lv->vg->name, display_lvname(lv), lv->lock_args);
r = 0;
}
}
return r;
@@ -2574,8 +2569,7 @@ int vg_validate(struct volume_group *vg)
r = 0;
}
if (strcmp(vg->lock_type, "sanlock") && strcmp(vg->lock_type, "dlm") &&
strcmp(vg->lock_type, "idm")) {
if (strcmp(vg->lock_type, "sanlock") && strcmp(vg->lock_type, "dlm")) {
log_error(INTERNAL_ERROR "VG %s has unknown lock_type %s",
vg->name, vg->lock_type);
r = 0;
@@ -2976,9 +2970,6 @@ int vg_write(struct volume_group *vg)
if (vg->cmd->wipe_outdated_pvs)
_wipe_outdated_pvs(vg->cmd, vg);
if (!vg_is_archived(vg) && vg->vg_committed && !archive(vg->vg_committed))
return_0;
if (critical_section())
log_error(INTERNAL_ERROR
"Writing metadata in critical section.");
@@ -3166,7 +3157,8 @@ int vg_commit(struct volume_group *vg)
dm_list_init(&vg->msg_list);
vg->needs_write_and_commit = 0;
}
}
vg->needs_backup = 0;
}
/* If at least one mda commit succeeded, it was committed */
return ret;
@@ -4363,8 +4355,6 @@ int is_lockd_type(const char *lock_type)
return 1;
if (!strcmp(lock_type, "sanlock"))
return 1;
if (!strcmp(lock_type, "idm"))
return 1;
return 0;
}
@@ -4412,9 +4402,6 @@ int lv_on_pmem(struct logical_volume *lv)
dm_list_iterate_items(seg, &lv->segments) {
for (s = 0; s < seg->area_count; s++) {
if (seg_type(seg, s) != AREA_PV)
continue;
pv = seg_pv(seg, s);
if (dev_is_pmem(lv->vg->cmd->dev_types, pv->dev)) {
@@ -4967,8 +4954,7 @@ struct volume_group *vg_read(struct cmd_context *cmd, const char *vg_name, const
log_very_verbose("Reading orphan VG %s.", vg_name);
vg = vg_read_orphans(cmd, vg_name);
*error_flags = 0;
if (error_vg)
*error_vg = NULL;
*error_vg = NULL;
return vg;
}

View File

@@ -623,6 +623,9 @@ int pv_resize_single(struct cmd_context *cmd,
const char *vg_name = pv->vg_name;
int vg_needs_pv_write = 0;
if (!archive(vg))
goto out;
if (!(pv->fmt->features & FMT_RESIZE_PV)) {
log_error("Physical volume %s format does not support resizing.",
pv_name);
@@ -687,6 +690,7 @@ int pv_resize_single(struct cmd_context *cmd,
"volume group \"%s\"", pv_name, vg_name);
goto out;
}
backup(vg);
}
log_print_unless_silent("Physical volume \"%s\" changed", pv_name);

View File

@@ -2210,6 +2210,9 @@ static int _vg_write_lv_suspend_commit_backup(struct volume_group *vg,
} else if (!(r = vg_commit(vg)))
stack; /* !vg_commit() has implicit vg_revert() */
if (r && do_backup)
backup(vg);
return r;
}
@@ -2218,6 +2221,8 @@ static int _vg_write_commit_backup(struct volume_group *vg)
if (!vg_write(vg) || !vg_commit(vg))
return_0;
backup(vg);
return 1;
}
@@ -2842,6 +2847,7 @@ static int _raid_add_images(struct logical_volume *lv,
display_lvname(lv));
return 0;
}
backup(lv->vg);
}
return 1;
@@ -3166,6 +3172,8 @@ static int _raid_remove_images(struct logical_volume *lv, int yes,
if (!lv_update_and_reload_origin(lv))
return_0;
backup(lv->vg);
return 1;
}
@@ -3423,6 +3431,8 @@ int lv_raid_split(struct logical_volume *lv, int yes, const char *split_name,
if (!vg_write(lv->vg) || !vg_commit(lv->vg))
return_0;
backup(lv->vg);
return 1;
}
@@ -3905,6 +3915,8 @@ static int _eliminate_extracted_lvs_optional_write_vg(struct volume_group *vg,
if (vg_write_requested) {
if (!vg_write(vg) || !vg_commit(vg))
return_0;
backup(vg);
}
/* Wait for events following any deactivation. */

View File

@@ -21,18 +21,6 @@
#include "lib/config/defaults.h"
#include "lib/display/display.h"
struct logical_volume *data_lv_from_thin_pool(struct logical_volume *pool_lv)
{
struct lv_segment *seg_thinpool = first_seg(pool_lv);
if (!seg_thinpool || !seg_is_thin_pool(seg_thinpool)) {
log_error(INTERNAL_ERROR "data_lv_from_thin_pool arg not thin pool %s", pool_lv->name);
return NULL;
}
return seg_thinpool->areas[0].u.lv.lv;
}
/* TODO: drop unused no_update */
int attach_pool_message(struct lv_segment *pool_seg, dm_thin_message_t type,
struct logical_volume *lv, uint32_t delete_id,

View File

@@ -694,6 +694,9 @@ int vgreduce_single(struct cmd_context *cmd, struct volume_group *vg,
pvl = find_pv_in_vg(vg, name);
if (!archive(vg))
goto_bad;
log_verbose("Removing \"%s\" from volume group \"%s\"", name, vg->name);
if (pvl)
@@ -739,6 +742,8 @@ int vgreduce_single(struct cmd_context *cmd, struct volume_group *vg,
goto bad;
}
backup(vg);
log_print_unless_silent("Removed \"%s\" from volume group \"%s\"",
name, vg->name);
}
@@ -750,12 +755,3 @@ bad:
release_vg(orphan_vg);
return r;
}
void vg_backup_if_needed(struct volume_group *vg)
{
if (!vg || !vg->needs_backup)
return;
vg->needs_backup = 0;
backup(vg->vg_committed);
}

View File

@@ -170,7 +170,6 @@ uint32_t vg_mda_used_count(const struct volume_group *vg);
uint32_t vg_mda_copies(const struct volume_group *vg);
int vg_set_mda_copies(struct volume_group *vg, uint32_t mda_copies);
char *vg_profile_dup(const struct volume_group *vg);
void vg_backup_if_needed(struct volume_group *vg);
/*
* Returns visible LV count - number of LVs from user perspective

View File

@@ -653,21 +653,18 @@ static int _daemonise(struct filemap_monitor *fm)
}
if (!_verbose) {
if ((fd = open("/dev/null", O_RDWR)) == -1) {
_early_log("Error opening /dev/null.");
if (close(STDIN_FILENO))
_early_log("Error closing stdin");
if (close(STDOUT_FILENO))
_early_log("Error closing stdout");
if (close(STDERR_FILENO))
_early_log("Error closing stderr");
if ((open("/dev/null", O_RDONLY) < 0) ||
(open("/dev/null", O_WRONLY) < 0) ||
(open("/dev/null", O_WRONLY) < 0)) {
_early_log("Error opening stdio streams.");
return 0;
}
if ((dup2(fd, STDIN_FILENO) == -1) ||
(dup2(fd, STDOUT_FILENO) == -1) ||
(dup2(fd, STDERR_FILENO) == -1)) {
if (fd > STDERR_FILENO)
(void) close(fd);
_early_log("Error redirecting stdin/out/err to null.");
return 0;
}
if (fd > STDERR_FILENO)
(void) close(fd);
}
/* TODO: Use libdaemon/server/daemon-server.c _daemonise() */
for (fd = (int) sysconf(_SC_OPEN_MAX) - 1; fd > STDERR_FILENO; fd--) {

View File

@@ -3773,7 +3773,7 @@ static struct selection_node *_parse_selection(struct dm_report *rh,
struct field_selection *fs;
struct selection_node *sn;
const char *ws, *we; /* field name */
const char *vs = NULL, *ve = NULL; /* value */
const char *vs, *ve; /* value */
const char *last;
uint32_t flags, field_num;
int implicit;

View File

@@ -128,10 +128,6 @@ all_man: man
$(MAN5) $(MAN7) $(MAN8) $(MAN8SO) $(MAN8DM) $(MAN8CLUSTER) $(MAN8SYSTEMD_GENERATORS): Makefile
$(MANGENERATOR):
@echo " [MAKE] $<"
$(Q) $(MAKE) -C $(top_builddir) tools
# Test whether or not the man page generator works
$(TESTMAN): $(MANGENERATOR) Makefile
@echo " [TSTMAN] $@"

View File

@@ -25,14 +25,13 @@ fsadm \(em utility to resize or check filesystem on a device
.
.SH DESCRIPTION
.
fsadm utility checks or resizes the filesystem on a device (can be
also dm-crypt encrypted device).
fsadm utility checks or resizes the filesystem on a device.
It tries to use the same API for
.BR ext2 ,
.BR ext3 ,
.BR ext4 ,
.BR ReiserFS
and
.RB and
.BR XFS
filesystem.
.
@@ -51,12 +50,6 @@ Bypass some sanity checks.
Display the help text.
.
.TP
.BR -l | --lvresize
Resize also given lvm2 logical volume. More volume management
functionality is provided with complementary \fBlvresize\fP(8) and the option
.BR -r | --resizefs.
.
.TP
.BR -n | --dry-run
Print commands without running them.
.

View File

@@ -551,7 +551,7 @@ Attach a writecache to an LV, converts the LV to type writecache.
.RE
.P
.RS 4
LV1 types: linear striped thinpool raid
LV1 types: linear striped raid
.RE
.P
\(em
@@ -605,7 +605,7 @@ Add a writecache to an LV, using a specified cache device.
.RE
.P
.RS 4
LV1 types: linear striped thinpool raid
LV1 types: linear striped raid
.RE
.P
\(em
@@ -1650,22 +1650,23 @@ LV1 types: mirror
.P
\(em
.P
Convert LV to a thin LV, using the original LV as an external origin.
Convert LV to a thin LV, using the original LV as an external origin
.br
(infers --type thin).
.br
.P
\fBlvconvert\fP \fB-T\fP|\fB--thin\fP \fB--thinpool\fP \fILV\fP \fILV1\fP
.br
.RS 4
.ad l
[ \fB--type thin\fP ] (implied)
.br
.br
[ \fB-r\fP|\fB--readahead\fP \fBauto\fP|\fBnone\fP|\fINumber\fP ]
.br
[ \fB-c\fP|\fB--chunksize\fP \fISize\fP[k|UNIT] ]
.br
[ \fB-Z\fP|\fB--zero\fP \fBy\fP|\fBn\fP ]
.br
[ \fB--type\fP \fBthin\fP ]
.br
[ \fB--originname\fP \fILV\fP\fI_new\fP ]
.br
[ \fB--poolmetadata\fP \fILV\fP ]
@@ -1686,22 +1687,21 @@ LV1 types: linear striped thin cache raid error zero
.P
\(em
.P
Attach a cache pool to an LV.
Attach a cache pool to an LV (infers --type cache).
.br
.P
\fBlvconvert\fP \fB-H\fP|\fB--cache\fP \fB--cachepool\fP \fILV\fP \fILV1\fP
.br
.RS 4
.ad l
[ \fB--type cache\fP ] (implied)
.br
.br
[ \fB-Z\fP|\fB--zero\fP \fBy\fP|\fBn\fP ]
.br
[ \fB-r\fP|\fB--readahead\fP \fBauto\fP|\fBnone\fP|\fINumber\fP ]
.br
[ \fB-c\fP|\fB--chunksize\fP \fISize\fP[k|UNIT] ]
.br
[ \fB--type\fP \fBcache\fP ]
.br
[ \fB--cachemetadataformat\fP \fBauto\fP|\fB1\fP|\fB2\fP ]
.br
[ \fB--cachemode\fP \fBwritethrough\fP|\fBwriteback\fP|\fBpassthrough\fP ]
@@ -1766,9 +1766,6 @@ Convert LV to type vdopool.
.br
.RS 4
.ad l
[ \fB--type vdo-pool\fP ] (implied)
.br
.br
[ \fB-r\fP|\fB--readahead\fP \fBauto\fP|\fBnone\fP|\fINumber\fP ]
.br
[ \fB-Z\fP|\fB--zero\fP \fBy\fP|\fBn\fP ]
@@ -1777,6 +1774,8 @@ Convert LV to type vdopool.
.br
[ \fB-V\fP|\fB--virtualsize\fP \fISize\fP[m|UNIT] ]
.br
[ \fB--type\fP \fBvdo-pool\fP ]
.br
[ \fB--metadataprofile\fP \fIString\fP ]
.br
[ \fB--compression\fP \fBy\fP|\fBn\fP ]
@@ -1876,13 +1875,12 @@ origin LV (first arg) to reverse a splitsnapshot command.
.br
.RS 4
.ad l
[ \fB--type snapshot\fP ] (implied)
.br
.br
[ \fB-c\fP|\fB--chunksize\fP \fISize\fP[k|UNIT] ]
.br
[ \fB-Z\fP|\fB--zero\fP \fBy\fP|\fBn\fP ]
.br
[ \fB--type\fP \fBsnapshot\fP ]
.br
[ COMMON_OPTIONS ]
.ad b
.RE

View File

@@ -225,10 +225,10 @@ Create a linear LV.
.br
.RS 4
.ad l
[ \fB--type linear\fP ] (implied)
.br
[ \fB-l\fP|\fB--extents\fP \fINumber\fP[PERCENT] ]
.br
[ \fB--type\fP \fBlinear\fP ]
.br
[ COMMON_OPTIONS ]
.ad b
.RE
@@ -238,19 +238,19 @@ Create a linear LV.
.P
\(em
.P
Create a striped LV.
Create a striped LV (infers --type striped).
.br
.P
\fBlvcreate\fP \fB-i\fP|\fB--stripes\fP \fINumber\fP \fB-L\fP|\fB--size\fP \fISize\fP[m|UNIT] \fIVG\fP
.br
.RS 4
.ad l
[ \fB--type striped\fP ] (implied)
.br
[ \fB-l\fP|\fB--extents\fP \fINumber\fP[PERCENT] ]
.br
[ \fB-I\fP|\fB--stripesize\fP \fISize\fP[k|UNIT] ]
.br
[ \fB--type\fP \fBstriped\fP ]
.br
[ COMMON_OPTIONS ]
.ad b
.RE
@@ -260,17 +260,17 @@ Create a striped LV.
.P
\(em
.P
Create a raid1 or mirror LV.
Create a raid1 or mirror LV (infers --type raid1|mirror).
.br
.P
\fBlvcreate\fP \fB-m\fP|\fB--mirrors\fP \fINumber\fP \fB-L\fP|\fB--size\fP \fISize\fP[m|UNIT] \fIVG\fP
.br
.RS 4
.ad l
[ \fB--type raid1\fP|\fBmirror\fP ] (implied)
.br
[ \fB-l\fP|\fB--extents\fP \fINumber\fP[PERCENT] ]
.br
[ \fB-i\fP|\fB--stripes\fP \fINumber\fP ]
.br
[ \fB-I\fP|\fB--stripesize\fP \fISize\fP[k|UNIT] ]
.br
[ \fB-R\fP|\fB--regionsize\fP \fISize\fP[m|UNIT] ]
@@ -336,8 +336,6 @@ Create a raid10 LV.
.br
.RS 4
.ad l
[ \fB--type raid10\fP ] (implied)
.br
[ \fB-l\fP|\fB--extents\fP \fINumber\fP[PERCENT] ]
.br
[ \fB-I\fP|\fB--stripesize\fP \fISize\fP[k|UNIT] ]
@@ -364,8 +362,6 @@ Create a COW snapshot LV of an origin LV.
.br
.RS 4
.ad l
[ \fB--type snapshot\fP ] (implied)
.br
[ \fB-l\fP|\fB--extents\fP \fINumber\fP[PERCENT] ]
.br
[ \fB-i\fP|\fB--stripes\fP \fINumber\fP ]
@@ -374,6 +370,8 @@ Create a COW snapshot LV of an origin LV.
.br
[ \fB-c\fP|\fB--chunksize\fP \fISize\fP[k|UNIT] ]
.br
[ \fB--type\fP \fBsnapshot\fP ]
.br
[ COMMON_OPTIONS ]
.ad b
.RE
@@ -457,33 +455,33 @@ Create a cache pool.
.P
\(em
.P
Create a thin LV in a thin pool.
Create a thin LV in a thin pool (infers --type thin).
.br
.P
\fBlvcreate\fP \fB-V\fP|\fB--virtualsize\fP \fISize\fP[m|UNIT] \fB--thinpool\fP \fILV\fP \fIVG\fP
.br
.RS 4
.ad l
[ \fB--type thin\fP ] (implied)
.br
.br
[ \fB-T\fP|\fB--thin\fP ]
.br
[ \fB--type\fP \fBthin\fP ]
.br
[ COMMON_OPTIONS ]
.ad b
.RE
.P
\(em
.P
Create a thin LV that is a snapshot of an existing thin LV.
Create a thin LV that is a snapshot of an existing thin LV
.br
(infers --type thin).
.br
.P
\fBlvcreate\fP \fB-s\fP|\fB--snapshot\fP \fILV1\fP
.br
.RS 4
.ad l
[ \fB--type thin\fP ] (implied)
.br
[ \fB--type\fP \fBthin\fP ]
.br
[ COMMON_OPTIONS ]
.ad b
@@ -1687,15 +1685,13 @@ Create a sparse COW snapshot LV of a virtual origin LV
.P
\(em
.P
Create a thin pool.
Create a thin pool (infers --type thin-pool).
.br
.P
\fBlvcreate\fP \fB-T\fP|\fB--thin\fP \fB-L\fP|\fB--size\fP \fISize\fP[m|UNIT] \fIVG\fP
.br
.RS 4
.ad l
[ \fB--type thin-pool\fP ] (implied)
.br
[ \fB-l\fP|\fB--extents\fP \fINumber\fP[PERCENT] ]
.br
[ \fB-i\fP|\fB--stripes\fP \fINumber\fP ]
@@ -1704,6 +1700,8 @@ Create a thin pool.
.br
[ \fB-c\fP|\fB--chunksize\fP \fISize\fP[k|UNIT] ]
.br
[ \fB--type\fP \fBthin-pool\fP ]
.br
[ \fB--discards\fP \fBpassdown\fP|\fBnopassdown\fP|\fBignore\fP ]
.br
[ \fB--errorwhenfull\fP \fBy\fP|\fBn\fP ]
@@ -1721,15 +1719,15 @@ Create a thin pool.
.P
\(em
.P
Create a thin pool named in --thinpool.
Create a thin pool named by the --thinpool arg
.br
(infers --type thin-pool).
.br
.P
\fBlvcreate\fP \fB-L\fP|\fB--size\fP \fISize\fP[m|UNIT] \fB--thinpool\fP \fILV\fP\fI_new\fP \fIVG\fP
.br
.RS 4
.ad l
[ \fB--type thin-pool\fP ] (implied)
.br
[ \fB-l\fP|\fB--extents\fP \fINumber\fP[PERCENT] ]
.br
[ \fB-i\fP|\fB--stripes\fP \fINumber\fP ]
@@ -1740,6 +1738,8 @@ Create a thin pool named in --thinpool.
.br
[ \fB-c\fP|\fB--chunksize\fP \fISize\fP[k|UNIT] ]
.br
[ \fB--type\fP \fBthin-pool\fP ]
.br
[ \fB--discards\fP \fBpassdown\fP|\fBnopassdown\fP|\fBignore\fP ]
.br
[ \fB--errorwhenfull\fP \fBy\fP|\fBn\fP ]
@@ -1841,18 +1841,19 @@ LV1 types: thinpool
.P
Create a thin LV in the thin pool named in the first arg
.br
(also see --thinpool for naming pool.)
(variant, infers --type thin, also see --thinpool for
.br
naming pool.)
.br
.P
\fBlvcreate\fP \fB-V\fP|\fB--virtualsize\fP \fISize\fP[m|UNIT] \fILV1\fP
.br
.RS 4
.ad l
[ \fB--type thin\fP ] (implied)
.br
.br
[ \fB-T\fP|\fB--thin\fP ]
.br
[ \fB--type\fP \fBthin\fP ]
.br
[ COMMON_OPTIONS ]
.ad b
.RE
@@ -1882,15 +1883,16 @@ LV1 types: thin
.P
\(em
.P
Create a thin LV that is a snapshot of an existing thin LV.
Create a thin LV that is a snapshot of an existing thin LV
.br
(infers --type thin).
.br
.P
\fBlvcreate\fP \fB-T\fP|\fB--thin\fP \fILV1\fP
.br
.RS 4
.ad l
[ \fB--type thin\fP ] (implied)
.br
[ \fB--type\fP \fBthin\fP ]
.br
[ COMMON_OPTIONS ]
.ad b
@@ -1902,15 +1904,16 @@ LV1 types: thin
.P
\(em
.P
Create a thin LV that is a snapshot of an external origin LV.
Create a thin LV that is a snapshot of an external origin LV
.br
(infers --type thin).
.br
.P
\fBlvcreate\fP \fB-s\fP|\fB--snapshot\fP \fB--thinpool\fP \fILV\fP \fILV\fP
.br
.RS 4
.ad l
[ \fB--type thin\fP ] (implied)
.br
[ \fB--type\fP \fBthin\fP ]
.br
[ COMMON_OPTIONS ]
.ad b
@@ -1925,8 +1928,6 @@ Create a VDO LV with VDO pool.
.br
.RS 4
.ad l
[ \fB--type vdo\fP ] (implied)
.br
[ \fB-l\fP|\fB--extents\fP \fINumber\fP[PERCENT] ]
.br
[ \fB-i\fP|\fB--stripes\fP \fINumber\fP ]
@@ -1935,6 +1936,8 @@ Create a VDO LV with VDO pool.
.br
[ \fB-V\fP|\fB--virtualsize\fP \fISize\fP[m|UNIT] ]
.br
[ \fB--type\fP \fBvdo\fP ]
.br
[ \fB--vdopool\fP \fILV\fP\fI_new\fP ]
.br
[ \fB--compression\fP \fBy\fP|\fBn\fP ]
@@ -1957,8 +1960,6 @@ Create a VDO LV with VDO pool.
.br
.RS 4
.ad l
[ \fB--type vdo\fP ] (implied)
.br
[ \fB-l\fP|\fB--extents\fP \fINumber\fP[PERCENT] ]
.br
[ \fB-i\fP|\fB--stripes\fP \fINumber\fP ]
@@ -1967,6 +1968,10 @@ Create a VDO LV with VDO pool.
.br
[ \fB-V\fP|\fB--virtualsize\fP \fISize\fP[m|UNIT] ]
.br
[ \fB--vdo\fP ]
.br
[ \fB--type\fP \fBvdo\fP ]
.br
[ \fB--compression\fP \fBy\fP|\fBn\fP ]
.br
[ \fB--deduplication\fP \fBy\fP|\fBn\fP ]
@@ -2021,7 +2026,9 @@ where the new thin pool is named by the --thinpool arg.
.P
Create a thin LV, first creating a thin pool for it,
.br
where the new thin pool is named by --thinpool.
where the new thin pool is named by the --thinpool arg
.br
(variant, infers --type thin).
.br
.P
\fBlvcreate\fP \fB-V\fP|\fB--virtualsize\fP \fISize\fP[m|UNIT] \fB-L\fP|\fB--size\fP \fISize\fP[m|UNIT]
@@ -2031,8 +2038,6 @@ where the new thin pool is named by --thinpool.
.br
.RS 4
.ad l
[ \fB--type thin\fP ] (implied)
.br
[ \fB-l\fP|\fB--extents\fP \fINumber\fP[PERCENT] ]
.br
[ \fB-i\fP|\fB--stripes\fP \fINumber\fP ]
@@ -2043,6 +2048,8 @@ where the new thin pool is named by --thinpool.
.br
[ \fB-c\fP|\fB--chunksize\fP \fISize\fP[k|UNIT] ]
.br
[ \fB--type\fP \fBthin\fP ]
.br
[ \fB--discards\fP \fBpassdown\fP|\fBnopassdown\fP|\fBignore\fP ]
.br
[ \fB--errorwhenfull\fP \fBy\fP|\fBn\fP ]
@@ -2109,7 +2116,7 @@ where the new thin pool is named in the first arg,
.br
or the new thin pool name is generated when the first
.br
arg is a VG name.
arg is a VG name (variant, infers --type thin).
.br
.P
\fBlvcreate\fP \fB-T\fP|\fB--thin\fP \fB-V\fP|\fB--virtualsize\fP \fISize\fP[m|UNIT]
@@ -2119,8 +2126,6 @@ arg is a VG name.
.br
.RS 4
.ad l
[ \fB--type thin\fP ] (implied)
.br
[ \fB-l\fP|\fB--extents\fP \fINumber\fP[PERCENT] ]
.br
[ \fB-i\fP|\fB--stripes\fP \fINumber\fP ]
@@ -2129,6 +2134,8 @@ arg is a VG name.
.br
[ \fB-c\fP|\fB--chunksize\fP \fISize\fP[k|UNIT] ]
.br
[ \fB--type\fP \fBthin\fP ]
.br
[ \fB--discards\fP \fBpassdown\fP|\fBnopassdown\fP|\fBignore\fP ]
.br
[ \fB--errorwhenfull\fP \fBy\fP|\fBn\fP ]
@@ -2146,11 +2153,15 @@ arg is a VG name.
.P
\(em
.P
Create a thin LV, first creating a thin pool for it.
Create a thin LV, first creating a thin pool for it
.br
(infers --type thin).
.br
Create a sparse snapshot of a virtual origin LV
.br
Chooses type thin or snapshot according to
(infers --type snapshot).
.br
Chooses --type thin or --type snapshot according to
.br
config setting sparse_segtype_default.
.br
@@ -2159,8 +2170,6 @@ config setting sparse_segtype_default.
.br
.RS 4
.ad l
[ \fB--type thin\fP|\fBsnapshot\fP ] (implied)
.br
[ \fB-l\fP|\fB--extents\fP \fINumber\fP[PERCENT] ]
.br
[ \fB-i\fP|\fB--stripes\fP \fINumber\fP ]
@@ -2173,6 +2182,8 @@ config setting sparse_segtype_default.
.br
[ \fB-c\fP|\fB--chunksize\fP \fISize\fP[k|UNIT] ]
.br
[ \fB--type\fP \fIString\fP ]
.br
[ \fB--discards\fP \fBpassdown\fP|\fBnopassdown\fP|\fBignore\fP ]
.br
[ \fB--errorwhenfull\fP \fBy\fP|\fBn\fP ]
@@ -2192,15 +2203,15 @@ config setting sparse_segtype_default.
.P
Create a new LV, then attach the specified cachepool
.br
which converts the new LV to type cache.
which converts the new LV to type cache
.br
(variant, infers --type cache.)
.br
.P
\fBlvcreate\fP \fB-L\fP|\fB--size\fP \fISize\fP[m|UNIT] \fB--cachepool\fP \fILV\fP \fIVG\fP
.br
.RS 4
.ad l
[ \fB--type cache\fP ] (implied)
.br
[ \fB-l\fP|\fB--extents\fP \fINumber\fP[PERCENT] ]
.br
[ \fB-i\fP|\fB--stripes\fP \fINumber\fP ]
@@ -2211,6 +2222,8 @@ which converts the new LV to type cache.
.br
[ \fB-c\fP|\fB--chunksize\fP \fISize\fP[k|UNIT] ]
.br
[ \fB--type\fP \fBcache\fP ]
.br
[ \fB--cachemode\fP \fBwritethrough\fP|\fBwriteback\fP|\fBpassthrough\fP ]
.br
[ \fB--cachepolicy\fP \fIString\fP ]
@@ -2289,8 +2302,6 @@ and attach it to the LV arg (alternative, use lvconvert.)
.br
.RS 4
.ad l
[ \fB--type cache\fP ] (implied)
.br
[ \fB-l\fP|\fB--extents\fP \fINumber\fP[PERCENT] ]
.br
[ \fB-c\fP|\fB--chunksize\fP \fISize\fP[k|UNIT] ]
@@ -2299,6 +2310,8 @@ and attach it to the LV arg (alternative, use lvconvert.)
.br
[ \fB-I\fP|\fB--stripesize\fP \fISize\fP[k|UNIT] ]
.br
[ \fB--type\fP \fBcache\fP ]
.br
[ \fB--cachemode\fP \fBwritethrough\fP|\fBwriteback\fP|\fBpassthrough\fP ]
.br
[ \fB--cachepolicy\fP \fIString\fP ]

View File

@@ -14,9 +14,3 @@ space on PV /dev/sdk3. This is equivalent to specifying
Extend an LV by 16MiB using specific physical extents.
.br
.B lvextend -L+16m vg01/lvol01 /dev/sda:8-9 /dev/sdb:8-9
.P
Extend an LV to use all remaining free space in volume group
and all resize its filesystem with
.BR fsadm (8).
.br
.B lvextend -l+100%FREE -r vg01/lvol01

View File

@@ -456,7 +456,7 @@ output in JSON format. See \fBlvmreport\fP(7) for more information.
.HP
\fB-r\fP|\fB--resizefs\fP
.br
Resize underlying filesystem together with the LV using \fBfsadm\fP(8).
Resize underlying filesystem together with the LV using fsadm(8).
.
.HP
\fB-L\fP|\fB--size\fP [\fB+\fP]\fISize\fP[m|UNIT]

View File

@@ -288,7 +288,6 @@ The LV name may also not contain any of the following strings:
.RB ' _cdata ',
.RB ' _cmeta ',
.RB ' _corig ',
.RB ' _iorig ',
.RB ' _mimage ',
.RB ' _mlog ',
.RB ' _pmspare ',

View File

@@ -9,18 +9,18 @@ remove it from the devices file with lvmdevices --deldev. The
vgimportdevices(8) command adds all PVs from a VG to the devices file,
and updates the VG metadata to include device IDs of the PVs.
.P
Commands that add new devices to the devices file necessarily look outside
the existing devices file to find the devices being added. pvcreate,
vgcreate, and vgextend also look outside the devices file to create new
PVs and add those PVs to the devices file.
Commands adding new devices to the devices file necessarily look outside
the existing devices file to find the devices to add. pvcreate, vgcreate,
and vgextend also look outside the devices file to create new PVs and add
them to the devices file.
.P
LVM records devices in the devices file using hardware-specific IDs, such
as the WWID, and attempts to use subsystem-specific IDs for virtual device
types (which also aim to be as unique and stable as possible.) These
device IDs are also written in the VG metadata. When no hardware or
types (which also aim to be as unique and stable as possible.)
These device IDs are also written in the VG metadata. When no hardware or
virtual ID is available, lvm falls back using the unstable device name as
the device ID. When devnames are used as IDs, lvm performs extra scanning
to find devices if their devname changes, e.g. after reboot.
the device ID. When devnames are used, lvm performs extra scanning to
find devices if their devname changes, e.g. after reboot.
.P
When proper device IDs are used, an lvm command will not look at devices
outside the devices file, but when devnames are used as a fallback, lvm
@@ -34,13 +34,12 @@ overriding the devices file. The listed devices act as a sort of devices
file in terms of limiting which devices lvm will see and use. Devices
that are not listed will appear to be missing to the lvm command.
.P
Multiple devices files can be kept \fI#DEFAULT_SYS_DIR#/devices\fP, which
allows lvm to be used with different sets of devices. For example, system
devices do not need to be exposed to a specific application, and the
application can use lvm on its own devices that are not exposed to the
system. The option --devicesfile <filename> is used to select the devices
file to use with the command. Without the option set, the default system
devices file is used.
Multiple devices files can be kept in \fI#DEFAULT_SYS_DIR#/devices\fP, which allows lvm
to be used with different sets of devices, e.g. system devices do not need
to be exposed to a specific application, and the application can use lvm on
its own devices that are not exposed to the system. The option
--devicesfile <filename> is used to select the devices file to use with the
command. Without the option set, the default system devices file is used.
.P
Setting --devicesfile "" causes lvm to not use a devices file.
.P
@@ -60,42 +59,3 @@ if it does not yet exist.
.P
It is recommended to use lvm commands to make changes to the devices file to
ensure proper updates.
.P
The device ID and device ID type are included in the VG metadata and can
be reported with pvs -o deviceid,deviceidtype. (Note that the lvmdevices
command does not update VG metadata, but subsequent lvm commands modifying
the metadata will include the device ID.)
.P
Possible device ID types are:
.br
.IP \[bu] 2
.B sys_wwid
uses the wwid reported by sysfs. This is the first choice for non-virtual
devices.
.IP \[bu] 2
.B sys_serial
uses the serial number reported by sysfs. This is the second choice for
non-virtual devices.
.IP \[bu] 2
.B mpath_uuid
is used for dm multipath devices, reported by sysfs.
.IP \[bu] 2
.B crypt_uuid
is used for dm crypt devices, reported by sysfs.
.IP \[bu] 2
.B md_uuid
is used for md devices, reported by sysfs.
.B lvmlv_uuid
is used if a PV is placed on top of an lvm LV, reported by sysfs.
.IP \[bu] 2
.B loop_file
is used for loop devices, the backing file name repored by sysfs.
.IP \[bu] 2
.B devname
the device name is used if no other type applies.
.P
The default choice for device ID type can be overriden using lvmdevices
--addev --deviceidtype <type>. If the specified type is available for the
device it will be used, otherwise the device will be added using the type
that would otherwise be chosen.

View File

@@ -234,7 +234,7 @@ output in JSON format. See \fBlvmreport\fP(7) for more information.
.HP
\fB-r\fP|\fB--resizefs\fP
.br
Resize underlying filesystem together with the LV using \fBfsadm\fP(8).
Resize underlying filesystem together with the LV using fsadm(8).
.
.HP
\fB-L\fP|\fB--size\fP [\fB-\fP]\fISize\fP[m|UNIT]

View File

@@ -1,10 +1,6 @@
.
.SH EXAMPLES
.
Extend an LV by 16MB using specific physical extents.
Extend an LV by 16MB using specific physical extents:
.br
.B lvresize -L+16M vg1/lv1 /dev/sda:0-1 /dev/sdb:0-1
.P
Resize an LV to use 50% of the size volume group.
.br
.B lvresize -l50%VG vg1/lv1

View File

@@ -409,7 +409,7 @@ output in JSON format. See \fBlvmreport\fP(7) for more information.
.HP
\fB-r\fP|\fB--resizefs\fP
.br
Resize underlying filesystem together with the LV using \fBfsadm\fP(8).
Resize underlying filesystem together with the LV using fsadm(8).
.
.HP
\fB-L\fP|\fB--size\fP [\fB+\fP|\fB-\fP]\fISize\fP[m|UNIT]

View File

@@ -63,7 +63,6 @@
.BR lvmlockctl (8),
.BR cmirrord (8),
.BR lvmdbusd (8),
.BR fsadm (8),
.P
.BR lvmsystemid (7),
.BR lvmreport (7),

View File

@@ -383,7 +383,7 @@ detect_device_size() {
test -n "$DEVSIZE" || error "Cannot read size of device \"$VOLUME\"."
SSSIZE=$("$BLOCKDEV" --getss "$VOLUME" || true)
test -n "$SSSIZE" || error "Cannot read sector size of device \"$VOLUME\"."
DEVSIZE=$(( DEVSIZE * SSSIZE ))
DEVSIZE=$(( $DEVSIZE * $SSSIZE ))
fi
}

View File

@@ -163,7 +163,7 @@ if (( clustered )); then
{
for i in nodes status services; do
cap_i=$(echo "$i"|tr "[:lower:]" "[:upper:]")
cap_i=$(echo "$i"|tr a-z A-Z)
echo "$cap_i:"
echo "----------------------------------"
log "cman_tool $i 2>> \"$log\""

View File

@@ -18,11 +18,11 @@
# following external commands are used throughout the script
# echo and test are internal in bash at least
RM="rm"
BASENAME="basename"
MKTEMP="mktemp"
READLINK="readlink"
GETOPT="getopt"
RM=rm
BASENAME=basename
MKTEMP=mktemp
READLINK=readlink
GETOPT=getopt
# user may override lvm location by setting LVM_BINARY
LVM=${LVM_BINARY:-lvm}
@@ -145,7 +145,7 @@ do
shift
;;
-v|--verbose)
VERBOSE_COUNT=$(( VERBOSE_COUNT + 1 ))
let VERBOSE_COUNT=VERBOSE_COUNT+1
if [ -z "$VERBOSE" ]
then
VERBOSE="-v"

View File

@@ -85,7 +85,6 @@ help:
@echo " check_all_lvmpolld Run all tests with lvmpolld daemon."
@echo " check_lvmlockd_sanlock Run tests with lvmlockd and sanlock."
@echo " check_lvmlockd_dlm Run tests with lvmlockd and dlm."
@echo " check_lvmlockd_idm Run tests with lvmlockd and idm."
@echo " check_lvmlockd_test Run tests with lvmlockd --test."
@echo " run-unit-test Run only unit tests (root not needed)."
@echo " clean Clean dir."
@@ -93,7 +92,6 @@ help:
@echo -e "\nSupported variables:"
@echo " LVM_TEST_AUX_TRACE Set for verbose messages for aux scripts []."
@echo " LVM_TEST_BACKING_DEVICE Set device used for testing (see also LVM_TEST_DIR)."
@echo " LVM_TEST_MULTI_HOST Set multiple hosts used for testing."
@echo " LVM_TEST_CAN_CLOBBER_DMESG Allow to clobber dmesg buffer without /dev/kmsg. (1)"
@echo " LVM_TEST_DEVDIR Set to '/dev' to run on real /dev."
@echo " LVM_TEST_PREFER_BRD Prefer using brd (ramdisk) over loop for testing [1]."
@@ -170,14 +168,6 @@ check_lvmlockd_dlm: .tests-stamp
--flavours udev-lvmlockd-dlm --only shell/aa-lvmlockd-dlm-prepare.sh,$(T),shell/zz-lvmlockd-dlm-remove.sh --skip $(S)
endif
ifeq ("@BUILD_LVMLOCKD@", "yes")
check_lvmlockd_idm: .tests-stamp
$(INSTALL_PROGRAM) lib/idm_inject_failure $(EXECDIR)
VERBOSE=$(VERBOSE) ./lib/runner \
--testdir . --outdir $(LVM_TEST_RESULTS) \
--flavours udev-lvmlockd-idm --only shell/aa-lvmlockd-idm-prepare.sh,$(T),shell/zz-lvmlockd-idm-remove.sh --skip $(S)
endif
ifeq ("@BUILD_LVMLOCKD@", "yes")
check_lvmlockd_test: .tests-stamp
VERBOSE=$(VERBOSE) ./lib/runner \
@@ -199,7 +189,6 @@ LIB_FLAVOURS = \
flavour-udev-lvmpolld\
flavour-udev-lvmlockd-sanlock\
flavour-udev-lvmlockd-dlm\
flavour-udev-lvmlockd-idm\
flavour-udev-lvmlockd-test\
flavour-udev-vanilla
@@ -271,10 +260,6 @@ lib/securetest: lib/dmsecuretest.o .lib-dir-stamp
@echo " [CC] $@"
$(Q) $(CC) -g $(CFLAGS) $(LDFLAGS) $(EXTRA_EXEC_LDFLAGS) $(ELDFLAGS) -o $@ $< -L$(interfacebuilddir) -ldevmapper $(LIBS)
lib/idm_inject_failure: lib/idm_inject_failure.o .lib-dir-stamp
@echo " [CC] $@"
$(Q) $(CC) -g $(CFLAGS) $(LDFLAGS) $(EXTRA_EXEC_LDFLAGS) $(ELDFLAGS) -o $@ $< $(INTERNAL_LIBS) $(LIBS) -lseagate_ilm
lib/runner.o: $(wildcard $(srcdir)/lib/*.h)
CFLAGS_runner.o += $(EXTRA_EXEC_CFLAGS)

View File

@@ -18,7 +18,7 @@ SKIP_WITH_CLVMD=1
. lib/inittest
# Unsupported with valgrid testing
test "${LVM_VALGRIND:-0}" -eq 0 || skip "Unsupported with valgrind"
test ${LVM_VALGRIND:-0} -eq 0 || skip "Unsupported with valgrind"
# NOTE: Some tests, namely anything with vdo, and
# api/dbus_test_lv_interface_cache_lv.sh, require larger PVs

View File

@@ -119,20 +119,6 @@ prepare_sanlock() {
fi
}
prepare_idm() {
if pgrep seagate_ilm; then
echo "Cannot run while existing seagate_ilm process exists"
exit 1
fi
seagate_ilm -D 0 -l 0 -L 7 -E 7 -S 7
if ! pgrep seagate_ilm; then
echo "Failed to start seagate_ilm"
exit 1
fi
}
prepare_lvmlockd() {
if pgrep lvmlockd ; then
echo "Cannot run while existing lvmlockd process exists"
@@ -149,11 +135,6 @@ prepare_lvmlockd() {
echo "starting lvmlockd for dlm"
lvmlockd
elif test -n "$LVM_TEST_LOCK_TYPE_IDM"; then
# make check_lvmlockd_idm
echo "starting lvmlockd for idm"
lvmlockd -g idm
elif test -n "$LVM_TEST_LVMLOCKD_TEST_DLM"; then
# make check_lvmlockd_test
echo "starting lvmlockd --test (dlm)"
@@ -163,12 +144,6 @@ prepare_lvmlockd() {
# FIXME: add option for this combination of --test and sanlock
echo "starting lvmlockd --test (sanlock)"
lvmlockd --test -g sanlock -o 2
elif test -n "$LVM_TEST_LVMLOCKD_TEST_IDM"; then
# make check_lvmlockd_test
echo "starting lvmlockd --test (idm)"
lvmlockd --test -g idm
else
echo "not starting lvmlockd"
exit 0
@@ -611,7 +586,7 @@ teardown() {
}
# Remove any metadata archives and backups from this test on system
rm -f /etc/lvm/archive/"${PREFIX}"* /etc/lvm/backup/"${PREFIX}"*
rm -f /etc/lvm/archive/${PREFIX}* /etc/lvm/backup/${PREFIX}*
echo "ok"
}
@@ -694,7 +669,7 @@ prepare_real_devs() {
aux extend_filter "a|$path|"
dd if=/dev/zero of="$path" bs=32k count=1
wipefs -a "$path" 2>/dev/null || true
done < "$LVM_TEST_DEVICE_LIST"
done < $LVM_TEST_DEVICE_LIST
fi
printf "%s\\n" "${REAL_DEVICES[@]}" > REAL_DEVICES
}
@@ -835,9 +810,9 @@ cleanup_md_dev() {
# try to find and remove any DM device on top of cleaned MD
# assume /dev/mdXXX is 9:MINOR
local minor=${mddev##/dev/md}
for i in $(dmsetup table | grep 9:"$minor" | cut -d: -f1) ; do
dmsetup remove "$i" || {
dmsetup --force remove "$i" || true
for i in $(dmsetup table | grep 9:$minor | cut -d: -f1) ; do
dmsetup remove $i || {
dmsetup --force remove $i || true
}
done
@@ -867,7 +842,7 @@ wipefs_a() {
shift
if test -n "$LVM_TEST_DEVICES_FILE"; then
lvmdevices --deldev "$dev" || true
lvmdevices --deldev $dev || true
fi
if test -f HAVE_WIPEFS ; then
@@ -891,46 +866,21 @@ wipefs_a() {
fi
if test -n "$LVM_TEST_DEVICES_FILE"; then
lvmdevices --adddev "$dev" || true
lvmdevices --adddev $dev || true
fi
udev_wait
}
cleanup_idm_context() {
local dev=$1
if [ -n "$LVM_TEST_LOCK_TYPE_IDM" ]; then
sg_dev=`sg_map26 ${dev}`
echo "Cleanup IDM context for drive ${dev} ($sg_dev)"
sg_raw -v -r 512 -o /tmp/idm_tmp_data.bin $sg_dev \
88 00 01 00 00 00 00 20 FF 01 00 00 00 01 00 00
sg_raw -v -s 512 -i /tmp/idm_tmp_data.bin $sg_dev \
8E 00 FF 00 00 00 00 00 00 00 00 00 00 01 00 00
rm /tmp/idm_tmp_data.bin
fi
}
prepare_backing_dev() {
local size=${1=32}
shift
if test -n "$LVM_TEST_BACKING_DEVICE"; then
IFS=',' read -r -a BACKING_DEVICE_ARRAY <<< "$LVM_TEST_BACKING_DEVICE"
for d in "${BACKING_DEVICE_ARRAY[@]}"; do
if test ! -b "$d"; then
echo "Device $d doesn't exist!"
return 1
fi
done
fi
if test -f BACKING_DEV; then
BACKING_DEV=$(< BACKING_DEV)
return 0
elif test -n "$LVM_TEST_BACKING_DEVICE"; then
BACKING_DEV=${BACKING_DEVICE_ARRAY[0]}
elif test -b "$LVM_TEST_BACKING_DEVICE"; then
BACKING_DEV=$LVM_TEST_BACKING_DEVICE
echo "$BACKING_DEV" > BACKING_DEV
return 0
elif test "${LVM_TEST_PREFER_BRD-1}" = "1" && \
@@ -978,14 +928,7 @@ prepare_devs() {
local dev="$DM_DEV_DIR/mapper/$name"
DEVICES[$count]=$dev
count=$(( count + 1 ))
# If the backing device number can meet the requirement for PV devices,
# then allocate a dedicated backing device for PV; otherwise, rollback
# to use single backing device for device-mapper.
if [ -n "$LVM_TEST_BACKING_DEVICE" ] && [ $n -le ${#BACKING_DEVICE_ARRAY[@]} ]; then
echo 0 $size linear "${BACKING_DEVICE_ARRAY[$(( count - 1 ))]}" $(( header_shift * 2048 )) > "$name.table"
else
echo 0 $size linear "$BACKING_DEV" $(( ( i - 1 ) * size + ( header_shift * 2048 ) )) > "$name.table"
fi
echo 0 $size linear "$BACKING_DEV" $(( ( i - 1 ) * size + ( header_shift * 2048 ) )) > "$name.table"
dmsetup create -u "TEST-$name" "$name" "$name.table" || touch CREATE_FAILED &
test -f CREATE_FAILED && break;
done
@@ -1003,16 +946,6 @@ prepare_devs() {
return $?
fi
if [ -n "$LVM_TEST_BACKING_DEVICE" ]; then
for d in "${BACKING_DEVICE_ARRAY[@]}"; do
cnt=$((`blockdev --getsize64 $d` / 1024 / 1024))
cnt=$(( cnt < 1000 ? cnt : 1000 ))
dd if=/dev/zero of="$d" bs=1MB count=$cnt
wipefs -a "$d" 2>/dev/null || true
cleanup_idm_context "$d"
done
fi
# non-ephemeral devices need to be cleared between tests
test -f LOOP -o -f RAMDISK || for d in "${DEVICES[@]}"; do
# ensure disk header is always zeroed
@@ -1021,10 +954,10 @@ prepare_devs() {
done
if test -n "$LVM_TEST_DEVICES_FILE"; then
mkdir -p "$TESTDIR/etc/lvm/devices" || true
rm "$TESTDIR/etc/lvm/devices/system.devices" || true
mkdir -p $TESTDIR/etc/lvm/devices || true
rm $TESTDIR/etc/lvm/devices/system.devices || true
for d in "${DEVICES[@]}"; do
lvmdevices --adddev "$dev" || true
lvmdevices --adddev $dev || true
done
fi

View File

@@ -157,7 +157,7 @@ mirror_nonredundant() {
attr=$(get lv_field "$lv" attr)
(echo "$attr" | grep "^......m...$" >/dev/null) || {
if (echo "$attr" | grep "^o.........$" >/dev/null) &&
lvs -a "$1" | grep -F "[${2}_mimage" >/dev/null; then
lvs -a $1 | grep -F "[${2}_mimage" >/dev/null; then
echo "TEST WARNING: $lv is a snapshot origin and looks like a mirror,"
echo "assuming it is actually a mirror"
else
@@ -439,7 +439,7 @@ raid_leg_status() {
# Ignore inconsisten raid status 0/xxxxx idle
for i in {100..0} ; do
st=( $(dmsetup status "$1-$2") ) || die "Unable to get status of $vg/$lv1"
st=( $(dmsetup status $1-$2) ) || die "Unable to get status of $vg/$lv1"
b=( $(echo "${st[6]}" | sed s:/:' ':) )
[ "${b[0]}" = "0" ] || {
test "${st[5]}" = "$3" || break
@@ -448,7 +448,7 @@ raid_leg_status() {
sleep .1
done
die "$1-$2 status ${st[5]} != $3 (${st[*]})"
die "$1-$2 status ${st[5]} != $3 ($st)"
}
grep_dmsetup() {
@@ -456,11 +456,6 @@ grep_dmsetup() {
grep -q "${@:3}" out || die "Expected output \"" "${@:3}" "\" from dmsetup $1 not found!"
}
grep_lvmlockd_dump() {
lvmlockctl --dump | tee out
grep -q "${@:1}" out || die "Expected output \"" "${@:1}" "\" from lvmlockctl --dump not found!"
}
#set -x
unset LVM_VALGRIND
"$@"

View File

@@ -1,5 +0,0 @@
export LVM_TEST_LOCKING=1
export LVM_TEST_LVMPOLLD=1
export LVM_TEST_LVMLOCKD=1
export LVM_TEST_LOCK_TYPE_IDM=1
export LVM_TEST_DEVDIR=/dev

View File

@@ -1,55 +0,0 @@
/*
* Copyright (C) 2020-2021 Seagate Ltd.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*/
#include <errno.h>
#include <limits.h>
#include <signal.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/inotify.h>
#include <uuid/uuid.h>
#include <unistd.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <ilm.h>
int main(int argc, char *argv[])
{
int pecent = atoi(argv[1]);
int ret, s;
ret = ilm_connect(&s);
if (ret == 0) {
printf("ilm_connect: SUCCESS\n");
} else {
printf("ilm_connect: FAIL\n");
exit(-1);
}
ret = ilm_inject_fault(s, pecent);
if (ret == 0) {
printf("ilm_inject_fault (100): SUCCESS\n");
} else {
printf("ilm_inject_fault (100): FAIL\n");
exit(-1);
}
ret = ilm_disconnect(s);
if (ret == 0) {
printf("ilm_disconnect: SUCCESS\n");
} else {
printf("ilm_disconnect: FAIL\n");
exit(-1);
}
return 0;
}

View File

@@ -31,8 +31,6 @@ LVM_TEST_BACKING_DEVICE=${LVM_TEST_BACKING_DEVICE-}
LVM_TEST_DEVDIR=${LVM_TEST_DEVDIR-}
LVM_TEST_NODEBUG=${LVM_TEST_NODEBUG-}
LVM_TEST_LVM1=${LVM_TEST_LVM1-}
LVM_TEST_FAILURE=${LVM_TEST_FAILURE-}
LVM_TEST_MULTI_HOST=${LVM_TEST_MULTI_HOST-}
# TODO: LVM_TEST_SHARED
SHARED=${SHARED-}
@@ -42,7 +40,6 @@ LVM_TEST_LVMPOLLD=${LVM_TEST_LVMPOLLD-}
LVM_TEST_DEVICES_FILE=${LVM_TEST_DEVICES_FILE-}
LVM_TEST_LOCK_TYPE_DLM=${LVM_TEST_LOCK_TYPE_DLM-}
LVM_TEST_LOCK_TYPE_SANLOCK=${LVM_TEST_LOCK_TYPE_SANLOCK-}
LVM_TEST_LOCK_TYPE_IDM=${LVM_TEST_LOCK_TYPE_IDM-}
SKIP_WITHOUT_CLVMD=${SKIP_WITHOUT_CLVMD-}
SKIP_WITH_CLVMD=${SKIP_WITH_CLVMD-}
@@ -65,10 +62,9 @@ test -n "$SKIP_WITH_LVMLOCKD" && test -n "$LVM_TEST_LVMLOCKD" && initskip
unset CDPATH
export LVM_TEST_BACKING_DEVICE LVM_TEST_DEVDIR LVM_TEST_NODEBUG LVM_TEST_FAILURE
export LVM_TEST_MULTI_HOST
export LVM_TEST_BACKING_DEVICE LVM_TEST_DEVDIR LVM_TEST_NODEBUG
export LVM_TEST_LVMLOCKD LVM_TEST_LVMLOCKD_TEST
export LVM_TEST_LVMPOLLD LVM_TEST_LOCK_TYPE_DLM LVM_TEST_LOCK_TYPE_SANLOCK LVM_TEST_LOCK_TYPE_IDM
export LVM_TEST_LVMPOLLD LVM_TEST_LOCK_TYPE_DLM LVM_TEST_LOCK_TYPE_SANLOCK
export LVM_TEST_DEVICES_FILE
# grab some common utilities
. lib/utils

View File

@@ -279,7 +279,7 @@ prepare_test_vars() {
while read path; do
count=$(( count + 1 ))
eval "dev$count=\"$path\""
done < "$LVM_TEST_DEVICE_LIST"
done < $LVM_TEST_DEVICE_LIST
else
for i in {1..16}; do
eval "dev$i=\"$DM_DEV_DIR/mapper/${PREFIX}pv$i\""

View File

@@ -1,20 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2021 Seagate. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
test_description='Set up things to run tests with idm'
. lib/inittest
[ -z "$LVM_TEST_LOCK_TYPE_IDM" ] && skip;
aux prepare_idm
aux prepare_lvmlockd

View File

@@ -1,58 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2020 Seagate, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
SKIP_WITH_LVMPOLLD=1
. lib/inittest
[ -z "$LVM_TEST_FAILURE" ] && skip;
aux prepare_devs 3
aux extend_filter_LVMTEST
vgcreate $SHARED $vg "$dev1" "$dev2" "$dev3"
# Create new logic volume
lvcreate -a ey --zero n -l 50%FREE -n $lv1 $vg
DRIVE1=`dmsetup deps -o devname $dev1 | awk '{gsub(/[()]/,""); print $4;}' | sed 's/[0-9]*$//'`
DRIVE2=`dmsetup deps -o devname $dev2 | awk '{gsub(/[()]/,""); print $4;}' | sed 's/[0-9]*$//'`
DRIVE3=`dmsetup deps -o devname $dev3 | awk '{gsub(/[()]/,""); print $4;}' | sed 's/[0-9]*$//'`
HOST1=`readlink /sys/block/$DRIVE1 | awk -F'/' '{print $6}'`
HOST2=`readlink /sys/block/$DRIVE2 | awk -F'/' '{print $6}'`
HOST3=`readlink /sys/block/$DRIVE3 | awk -F'/' '{print $6}'`
# Emulate fabric failure
echo 1 > /sys/block/$DRIVE1/device/delete
[ -f /sys/block/$DRIVE2/device/delete ] && echo 1 > /sys/block/$DRIVE2/device/delete
[ -f /sys/block/$DRIVE3/device/delete ] && echo 1 > /sys/block/$DRIVE3/device/delete
# Wait for 10s and will not lead to timeout
sleep 10
# Rescan drives so can probe the deleted drives and join back them
echo "- - -" > /sys/class/scsi_host/${HOST1}/scan
echo "- - -" > /sys/class/scsi_host/${HOST2}/scan
echo "- - -" > /sys/class/scsi_host/${HOST3}/scan
not check grep_lvmlockd_dump "S lvm_$vg kill_vg"
# The previous device-mapper are removed, but LVM still can directly
# access VGs from the specified physical drives. So enable drives
# for these drives.
aux extend_filter_LVMTEST "a|/dev/$DRIVE1*|" "a|/dev/$DRIVE2*|" "a|/dev/$DRIVE3*|"
aux lvmconf "devices/allow_changes_with_duplicate_pvs = 1"
lvcreate -a n --zero n -l 10 -n $lv2 $vg
vgremove -ff $vg

View File

@@ -1,78 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2020 Seagate, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
SKIP_WITH_LVMPOLLD=1
. lib/inittest
[ -z "$LVM_TEST_LOCK_TYPE_IDM" ] && skip;
[ -z "$LVM_TEST_FAILURE" ] && skip;
aux prepare_devs 2
aux extend_filter_LVMTEST
DRIVE1=`dmsetup deps -o devname $dev1 | awk '{gsub(/[()]/,""); print $4;}' | sed 's/[0-9]*$//'`
DRIVE2=`dmsetup deps -o devname $dev2 | awk '{gsub(/[()]/,""); print $4;}' | sed 's/[0-9]*$//'`
[ "$(basename -- $DRIVE1)" = "$(basename -- $DRIVE2)" ] && die "Need to pass two different drives!?"
# The previous device-mapper are removed, but LVM still can directly
# access VGs from the specified physical drives. So enable drives
# for these drives.
aux extend_filter_LVMTEST "a|/dev/$DRIVE1*|" "a|/dev/$DRIVE2*|"
aux lvmconf "devices/allow_changes_with_duplicate_pvs = 1"
vgcreate $SHARED $vg "$dev1" "$dev2"
# Create new logic volume
lvcreate -a ey --zero n -l 100%FREE -n $lv1 $vg
drive_list=($DRIVE1)
# Find all drives with the same WWN and delete them from system,
# so that we can emulate the same drive with multiple paths are
# disconnected with system.
drive_wwn=`udevadm info /dev/${DRIVE1} | awk -F= '/E: ID_WWN=/ {print $2}'`
for dev in /dev/*; do
if [ -b "$dev" ] && [[ ! "$dev" =~ [0-9] ]]; then
wwn=`udevadm info "${dev}" | awk -F= '/E: ID_WWN=/ {print $2}'`
if [ "$wwn" = "$drive_wwn" ]; then
base_name="$(basename -- ${dev})"
drive_list+=("$base_name")
host_list+=(`readlink /sys/block/$base_name | awk -F'/' '{print $6}'`)
fi
fi
done
for d in "${drive_list[@]}"; do
[ -f /sys/block/$d/device/delete ] && echo 1 > /sys/block/$d/device/delete
done
# Fail to create new logic volume
not lvcreate -a n --zero n -l 1 -n $lv2 $vg
# Wait for lock time out caused by drive failure
sleep 70
not check grep_lvmlockd_dump "S lvm_$vg kill_vg"
# Rescan drives so can probe the deleted drives and join back them
for h in "${host_list[@]}"; do
[ -f /sys/class/scsi_host/${h}/scan ] && echo "- - -" > /sys/class/scsi_host/${h}/scan
done
# After the drive is reconnected, $vg should be visible again.
vgchange --lock-start
lvremove -f $vg/$lv1
lvcreate -a ey --zero n -l 1 -n $lv2 $vg
vgremove -ff $vg

View File

@@ -1,74 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2020 Seagate, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
SKIP_WITH_LVMPOLLD=1
. lib/inittest
[ -z "$LVM_TEST_LOCK_TYPE_IDM" ] && skip;
[ -z "$LVM_TEST_FAILURE" ] && skip;
aux prepare_devs 1
aux extend_filter_LVMTEST
DRIVE1=`dmsetup deps -o devname $dev1 | awk '{gsub(/[()]/,""); print $4;}' | sed 's/[0-9]*$//'`
# The previous device-mapper are removed, but LVM still can directly
# access VGs from the specified physical drives. So enable drives
# for these drives.
aux extend_filter_LVMTEST "a|/dev/$DRIVE1*|"
aux lvmconf "devices/allow_changes_with_duplicate_pvs = 1"
vgcreate $SHARED $vg "$dev1"
# Create new logic volume
lvcreate -a ey --zero n -l 1 -n $lv1 $vg
drive_list=($DRIVE1)
# Find all drives with the same WWN and delete them from system,
# so that we can emulate the same drive with multiple paths are
# disconnected with system.
drive_wwn=`udevadm info /dev/${DRIVE1} | awk -F= '/E: ID_WWN=/ {print $2}'`
for dev in /dev/*; do
if [ -b "$dev" ] && [[ ! "$dev" =~ [0-9] ]]; then
wwn=`udevadm info "${dev}" | awk -F= '/E: ID_WWN=/ {print $2}'`
if [ "$wwn" = "$drive_wwn" ]; then
base_name="$(basename -- ${dev})"
drive_list+=("$base_name")
host_list+=(`readlink /sys/block/$base_name | awk -F'/' '{print $6}'`)
fi
fi
done
for d in "${drive_list[@]}"; do
[ -f /sys/block/$d/device/delete ] && echo 1 > /sys/block/$d/device/delete
done
# Fail to create new logic volume
not lvcreate -a n --zero n -l 1 -n $lv2 $vg
# Wait for lock time out caused by drive failure
sleep 70
check grep_lvmlockd_dump "S lvm_$vg kill_vg"
lvmlockctl --drop $vg
# Rescan drives so can probe the deleted drives and join back them
for h in "${host_list[@]}"; do
[ -f /sys/class/scsi_host/${h}/scan ] && echo "- - -" > /sys/class/scsi_host/${h}/scan
done
# After the drive is reconnected, $vg should be visible again.
vgchange --lock-start
vgremove -ff $vg

View File

@@ -1,80 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2020 Seagate, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
SKIP_WITH_LVMPOLLD=1
. lib/inittest
[ -z "$LVM_TEST_LOCK_TYPE_IDM" ] && skip;
[ -z "$LVM_TEST_FAILURE" ] && skip;
aux prepare_devs 3
aux extend_filter_LVMTEST
DRIVE1=`dmsetup deps -o devname $dev1 | awk '{gsub(/[()]/,""); print $4;}' | sed 's/[0-9]*$//'`
DRIVE2=`dmsetup deps -o devname $dev2 | awk '{gsub(/[()]/,""); print $4;}' | sed 's/[0-9]*$//'`
DRIVE3=`dmsetup deps -o devname $dev3 | awk '{gsub(/[()]/,""); print $4;}' | sed 's/[0-9]*$//'`
if [ "$DRIVE1" = "$DRIVE2" ] || [ "$DRIVE1" = "$DRIVE3" ] || [ "$DRIVE2" = "$DRIVE3" ]; then
die "Need to pass three different drives!?"
fi
# The previous device-mapper are removed, but LVM still can directly
# access VGs from the specified physical drives. So enable drives
# for these drives.
aux extend_filter_LVMTEST "a|/dev/$DRIVE1*|" "a|/dev/$DRIVE2*|" "a|/dev/$DRIVE3*|"
aux lvmconf "devices/allow_changes_with_duplicate_pvs = 1"
vgcreate $SHARED $vg "$dev1" "$dev2" "$dev3"
# Create new logic volume and deactivate it
lvcreate -a y --zero n -l 1 -n $lv1 $vg
# Inject failure 40% so cannot send partially request to drives
idm_inject_failure 40
# Wait for 40s, but the lock will not be time out
sleep 40
# Inject failure with 0% so can access drives
idm_inject_failure 0
# Deactivate logic volume due to locking failure
lvchange $vg/$lv1 -a n
# Inject failure 100% so cannot send request to drives
idm_inject_failure 100
# Wait for 70s but should have no any alive locks
sleep 70
# Inject failure with 0% so can access drives
idm_inject_failure 0
# Activate logic volume
lvchange $vg/$lv1 -a y
# Inject failure so cannot send request to drives
idm_inject_failure 100
# Wait for 70s but will not time out
sleep 70
# Inject failure with 0% so can access drives
idm_inject_failure 0
check grep_lvmlockd_dump "S lvm_$vg kill_vg"
lvmlockctl --drop $vg
vgchange --lock-start
vgremove -f $vg

View File

@@ -60,7 +60,7 @@ done
# raid1 supports resynchronization
lvcreate --type raid1 -m 2 -Zn -l 4 -n $lv1 $vg
should check raid_leg_status $vg $lv1 "aaa"
check raid_leg_status $vg $lv1 "aaa"
_sync "AAA"
# raid1 supports --nosync
@@ -72,7 +72,7 @@ for r in $segtypes
do
# raid4/5 support resynchronization
lvcreate --type $r -Zn -i 3 -L10 -n $lv1 $vg
should check raid_leg_status $vg $lv1 "aaaa"
check raid_leg_status $vg $lv1 "aaaa"
_sync "AAAA"
# raid4/5 support --nosync
@@ -83,7 +83,7 @@ done
# raid6 supports resynchronization
lvcreate --type raid6 -Zn -i 3 -l 4 -n $lv1 $vg
should check raid_leg_status $vg $lv1 "aaaaa"
check raid_leg_status $vg $lv1 "aaaaa"
_sync "AAAAA"
# raid6 rejects --nosync; it has to initialize P- and Q-Syndromes
@@ -91,7 +91,7 @@ not lvcreate --type raid6 --nosync -Zn -i 3 -l 1 -n $lv1 $vg
# raid10 supports resynchronization
lvcreate --type raid10 -m 1 -Zn -i 3 -L10 -n $lv1 $vg
should check raid_leg_status $vg $lv1 "aaaaaa"
check raid_leg_status $vg $lv1 "aaaaaa"
_sync "AAAAAA"
# raid10 supports --nosync

View File

@@ -1,170 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2017-2020 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
SKIP_WITH_LVMPOLLD=1
# lvextend thin pool data that has cache|writecache attached
. lib/inittest
do_test()
{
local tp=$1
local lvt=$2
# create some initial data
lvchange -ay $vg/$lvt
mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lvt"
mount "$DM_DEV_DIR/$vg/$lvt" "$mount_dir"
cp pattern "$mount_dir/pattern1"
dd if=/dev/urandom of="$mount_dir/rand100M" bs=1M count=100 conv=fdatasync
cp pattern "$mount_dir/pattern2"
# extend while mounted
lvextend -L+64M $vg/${tp}_tdata "$dev4"
lvs -a $vg -o+devices
# verify initial data
diff pattern "$mount_dir/pattern1"
diff pattern "$mount_dir/pattern2"
dd of=/dev/null if="$mount_dir/rand100M" bs=1M count=100
# add more data
cp pattern "$mount_dir/pattern3"
dd if=/dev/urandom of="$mount_dir/rand8M" bs=1M count=8 conv=fdatasync
# restart the LV
umount "$mount_dir"
lvchange -an $vg/$lvt
lvchange -an $vg/$tp
lvchange -ay $vg/$lvt
mount "$DM_DEV_DIR/$vg/$lvt" "$mount_dir"
# verify all data
diff pattern "$mount_dir/pattern1"
diff pattern "$mount_dir/pattern2"
diff pattern "$mount_dir/pattern3"
dd of=/dev/null if="$mount_dir/rand100M" bs=1M count=100
dd of=/dev/null if="$mount_dir/rand8M" bs=1M count=8
# extend again while inactive
umount "$mount_dir"
lvchange -an $vg/$lvt
lvchange -an $vg/$tp
lvextend -L+64M $vg/${tp}_tdata "$dev5"
lvs -a $vg -o+devices
lvchange -ay $vg/$lvt
mount "$DM_DEV_DIR/$vg/$lvt" "$mount_dir"
# verify all data
diff pattern "$mount_dir/pattern1"
diff pattern "$mount_dir/pattern2"
diff pattern "$mount_dir/pattern3"
dd of=/dev/null if="$mount_dir/rand100M" bs=1M count=100
dd of=/dev/null if="$mount_dir/rand8M" bs=1M count=8
# add more data
cp pattern "$mount_dir/pattern4"
# remove the cache
lvconvert --splitcache $vg/${tp}_tdata
# verify all data
diff pattern "$mount_dir/pattern1"
diff pattern "$mount_dir/pattern2"
diff pattern "$mount_dir/pattern3"
diff pattern "$mount_dir/pattern4"
dd of=/dev/null if="$mount_dir/rand100M" bs=1M count=100
dd of=/dev/null if="$mount_dir/rand8M" bs=1M count=8
umount "$mount_dir"
lvchange -an $vg/$lvt
lvchange -an $vg/$tp
lvchange -ay $vg/$lvt
mount "$DM_DEV_DIR/$vg/$lvt" "$mount_dir"
# verify all data
diff pattern "$mount_dir/pattern1"
diff pattern "$mount_dir/pattern2"
diff pattern "$mount_dir/pattern3"
diff pattern "$mount_dir/pattern4"
dd of=/dev/null if="$mount_dir/rand100M" bs=1M count=100
dd of=/dev/null if="$mount_dir/rand8M" bs=1M count=8
umount "$mount_dir"
lvchange -an $vg/$lvt
lvchange -an $vg/$tp
lvremove $vg/$lvt
lvremove $vg/$tp
lvremove -y $vg
}
aux have_cache 1 10 0 || skip
aux have_writecache 1 0 0 || skip
which mkfs.xfs || skip
mount_dir="mnt"
mkdir -p "$mount_dir"
aux prepare_devs 6 70 # want 64M of usable space from each dev
# generate random data
dd if=/dev/urandom of=pattern bs=512K count=1
vgcreate $SHARED $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6"
# test extending cache|writecache on thin pool data
# test type cache|writecache
# cache with cachepool|cachevol
# cache with writeback|writethrough
# lv1 is thinpool LV: 128M
# lv2 is fast LV: 64M
# lv3 is thin LV: 1G
# attach writecache to thinpool data
lvcreate --type thin-pool -n $lv1 -L128M --poolmetadataspare n $vg "$dev1" "$dev2"
lvcreate --type thin -n $lv3 -V1G --thinpool $lv1 $vg
lvcreate -n $lv2 -L64M -an $vg "$dev3"
lvconvert -y --type writecache --cachevol $lv2 $vg/$lv1
lvs -a $vg -o+devices
do_test $lv1 $lv3
# attach cache/writeback (cachevol) to thinpool data
lvcreate --type thin-pool -n $lv1 -L128M --poolmetadataspare n $vg "$dev1" "$dev2"
lvcreate --type thin -n $lv3 -V1G --thinpool $lv1 $vg
lvcreate -n $lv2 -L64M -an $vg "$dev3"
lvconvert -y --type cache --cachevol $lv2 --cachemode writeback $vg/$lv1
lvs -a $vg -o+devices
do_test $lv1 $lv3
# attach cache/writethrough (cachevol) to thinpool data
lvcreate --type thin-pool -n $lv1 -L128M --poolmetadataspare n $vg "$dev1" "$dev2"
lvcreate --type thin -n $lv3 -V1G --thinpool $lv1 $vg
lvcreate -n $lv2 -L64M -an $vg "$dev3"
lvconvert -y --type cache --cachevol $lv2 --cachemode writethrough $vg/$lv1
lvs -a $vg -o+devices
do_test $lv1 $lv3
# attach cache (cachepool) to thinpool data
lvcreate --type thin-pool -n $lv1 -L128M --poolmetadataspare n $vg "$dev1" "$dev2"
lvcreate --type thin -n $lv3 -V1G --thinpool $lv1 $vg
lvcreate -y --type cache-pool -n $lv2 -L64M --poolmetadataspare n $vg "$dev3" "$dev6"
lvconvert -y --type cache --cachepool $lv2 --poolmetadataspare n $vg/$lv1
lvs -a $vg -o+devices
do_test $lv1 $lv3
# FIXME: test these thin pool data extensions done by dmeventd
vgremove -f $vg

View File

@@ -1,150 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2017-2020 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
SKIP_WITH_LVMPOLLD=1
# lvextend LV with cache|writecache
. lib/inittest
do_test()
{
# create some initial data
lvchange -ay $vg/$lv1
mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv1"
mount "$DM_DEV_DIR/$vg/$lv1" "$mount_dir"
cp pattern "$mount_dir/pattern1"
dd if=/dev/urandom of="$mount_dir/rand100M" bs=1M count=100 conv=fdatasync
cp pattern "$mount_dir/pattern2"
# extend while mounted
lvextend -L+64M $vg/$lv1 "$dev4"
lvs -a $vg -o+devices
# verify initial data
diff pattern "$mount_dir/pattern1"
diff pattern "$mount_dir/pattern2"
dd of=/dev/null if="$mount_dir/rand100M" bs=1M count=100
# add more data
cp pattern "$mount_dir/pattern3"
dd if=/dev/urandom of="$mount_dir/rand8M" bs=1M count=8 conv=fdatasync
# restart the LV
umount "$mount_dir"
lvchange -an $vg/$lv1
lvchange -ay $vg/$lv1
mount "$DM_DEV_DIR/$vg/$lv1" "$mount_dir"
# verify all data
diff pattern "$mount_dir/pattern1"
diff pattern "$mount_dir/pattern2"
diff pattern "$mount_dir/pattern3"
dd of=/dev/null if="$mount_dir/rand100M" bs=1M count=100
dd of=/dev/null if="$mount_dir/rand8M" bs=1M count=8
# extend again while inactive
umount "$mount_dir"
lvchange -an $vg/$lv1
lvextend -L+64M $vg/$lv1 "$dev5"
lvs -a $vg -o+devices
lvchange -ay $vg/$lv1
mount "$DM_DEV_DIR/$vg/$lv1" "$mount_dir"
# verify all data
diff pattern "$mount_dir/pattern1"
diff pattern "$mount_dir/pattern2"
diff pattern "$mount_dir/pattern3"
dd of=/dev/null if="$mount_dir/rand100M" bs=1M count=100
dd of=/dev/null if="$mount_dir/rand8M" bs=1M count=8
# add more data
cp pattern "$mount_dir/pattern4"
# remove the cache
lvconvert --splitcache $vg/$lv1
# verify all data
diff pattern "$mount_dir/pattern1"
diff pattern "$mount_dir/pattern2"
diff pattern "$mount_dir/pattern3"
diff pattern "$mount_dir/pattern4"
dd of=/dev/null if="$mount_dir/rand100M" bs=1M count=100
dd of=/dev/null if="$mount_dir/rand8M" bs=1M count=8
umount "$mount_dir"
lvchange -an $vg/$lv1
lvchange -ay $vg/$lv1
mount "$DM_DEV_DIR/$vg/$lv1" "$mount_dir"
# verify all data
diff pattern "$mount_dir/pattern1"
diff pattern "$mount_dir/pattern2"
diff pattern "$mount_dir/pattern3"
diff pattern "$mount_dir/pattern4"
dd of=/dev/null if="$mount_dir/rand100M" bs=1M count=100
dd of=/dev/null if="$mount_dir/rand8M" bs=1M count=8
umount "$mount_dir"
lvchange -an $vg/$lv1
lvremove $vg/$lv1
lvremove -y $vg
}
aux have_cache 1 10 0 || skip
aux have_writecache 1 0 0 || skip
which mkfs.xfs || skip
mount_dir="mnt"
mkdir -p "$mount_dir"
aux prepare_devs 6 66 # want 64M of usable space from each dev
# generate random data
dd if=/dev/urandom of=pattern bs=512K count=1
vgcreate $SHARED $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6"
# test type cache|writecache
# cache with cachepool|cachevol
# cache with writeback|writethrough
# lv1 is main LV: 128M
# lv2 is fast LV: 64M
lvcreate -n $lv1 -L128M -an $vg "$dev1" "$dev2"
lvcreate -n $lv2 -L64M -an $vg "$dev3"
lvconvert -y --type writecache --cachevol $lv2 $vg/$lv1
lvs -a $vg -o+devices
do_test
lvcreate -n $lv1 -L128M -an $vg "$dev1" "$dev2"
lvcreate -n $lv2 -L64M -an $vg "$dev3"
lvconvert -y --type cache --cachevol $lv2 --cachemode writeback $vg/$lv1
lvs -a $vg -o+devices
do_test
lvcreate -n $lv1 -L128M -an $vg "$dev1" "$dev2"
lvcreate -n $lv2 -L64M -an $vg "$dev3"
lvconvert -y --type cache --cachevol $lv2 --cachemode writethrough $vg/$lv1
lvs -a $vg -o+devices
do_test
lvcreate -n $lv1 -L128M -an $vg "$dev1" "$dev2"
lvcreate -y --type cache-pool -n $lv2 -L64M --poolmetadataspare n $vg "$dev3" "$dev6"
lvconvert -y --type cache --cachepool $lv2 --poolmetadataspare n $vg/$lv1
lvs -a $vg -o+devices
do_test
vgremove -f $vg

View File

@@ -36,12 +36,6 @@ LOCKARGS2="dlm"
LOCKARGS3="dlm"
fi
if test -n "$LVM_TEST_LOCK_TYPE_IDM" ; then
LOCKARGS1="idm"
LOCKARGS2="idm"
LOCKARGS3="idm"
fi
aux prepare_devs 5
vgcreate --shared $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5"

View File

@@ -1,37 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2020~2021 Seagate, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
SKIP_WITH_LVMPOLLD=1
. lib/inittest
[ -z "$LVM_TEST_FAILURE" ] && skip;
aux prepare_vg 3
# Create new logic volume
lvcreate -a ey --zero n -l 1 -n $lv1 $vg
# Emulate lvmlockd abnormally exiting
killall -9 lvmlockd
systemctl start lvm2-lvmlockd
vgchange --lock-start $vg
lvchange -a n $vg/$lv1
lvchange -a sy $vg/$lv1
lvcreate -a ey --zero n -l 1 -n $lv2 $vg
lvchange -a n $vg/$lv2
vgremove -ff $vg

View File

@@ -1,71 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2017-2020 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
SKIP_WITH_LVMPOLLD=1
. lib/inittest
aux have_cache 1 10 0 || skip
aux have_writecache 1 0 0 || skip
which mkfs.xfs || skip
aux prepare_devs 6 70 # want 64M of usable space from each dev
vgcreate $SHARED $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6"
# lv1 is thinpool LV: 128M
# lv2 is fast LV: 64M
# lv3 is thin LV: 1G
#
# Test lvremove of a thinpool that uses cache|writecache on data
#
# attach writecache to thinpool data
lvcreate --type thin-pool -n $lv1 -L128M --poolmetadataspare n $vg "$dev1" "$dev2"
lvcreate --type thin -n $lv3 -V1G --thinpool $lv1 $vg
lvcreate -n $lv2 -L64M -an $vg "$dev3"
lvconvert -y --type writecache --cachevol $lv2 $vg/$lv1
lvchange -ay $vg/$lv1
lvs -a $vg
mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv3"
lvremove -y $vg/$lv1
# attach cache/writeback (cachevol) to thinpool data
lvcreate --type thin-pool -n $lv1 -L128M --poolmetadataspare n $vg "$dev1" "$dev2"
lvcreate --type thin -n $lv3 -V1G --thinpool $lv1 $vg
lvcreate -n $lv2 -L64M -an $vg "$dev3"
lvconvert -y --type cache --cachevol $lv2 --cachemode writeback $vg/$lv1
lvchange -ay $vg/$lv1
mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv3"
lvremove -y $vg/$lv1
# attach cache/writethrough (cachevol) to thinpool data
lvcreate --type thin-pool -n $lv1 -L128M --poolmetadataspare n $vg "$dev1" "$dev2"
lvcreate --type thin -n $lv3 -V1G --thinpool $lv1 $vg
lvcreate -n $lv2 -L64M -an $vg "$dev3"
lvconvert -y --type cache --cachevol $lv2 --cachemode writethrough $vg/$lv1
lvchange -ay $vg/$lv1
mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv3"
lvremove -y $vg/$lv1
# attach cache (cachepool) to thinpool data
lvcreate --type thin-pool -n $lv1 -L128M --poolmetadataspare n $vg "$dev1" "$dev2"
lvcreate --type thin -n $lv3 -V1G --thinpool $lv1 $vg
lvcreate -y --type cache-pool -n $lv2 -L64M --poolmetadataspare n $vg "$dev3" "$dev6"
lvconvert -y --type cache --cachepool $lv2 --poolmetadataspare n $vg/$lv1
lvchange -ay $vg/$lv1
mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv3"
lvremove -y $vg/$lv1
vgremove -f $vg

View File

@@ -1,87 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2021 Seagate, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# This testing script is for multi-hosts testing.
#
# On the host A:
# make check_lvmlockd_idm \
# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
# LVM_TEST_MULTI_HOST=1 T=multi_hosts_lv_ex_timeout_hosta.sh
# On the host B:
# make check_lvmlockd_idm \
# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
# LVM_TEST_MULTI_HOST=1 T=multi_hosts_lv_ex_timeout_hostb.sh
SKIP_WITH_LVMPOLLD=1
. lib/inittest
[ -z "$LVM_TEST_MULTI_HOST" ] && skip;
IFS=',' read -r -a BLKDEVS <<< "$LVM_TEST_BACKING_DEVICE"
for d in "${BLKDEVS[@]}"; do
aux extend_filter_LVMTEST "a|$d|"
done
aux lvmconf "devices/allow_changes_with_duplicate_pvs = 1"
for d in "${BLKDEVS[@]}"; do
dd if=/dev/zero of="$d" bs=32k count=1
wipefs -a "$d" 2>/dev/null || true
sg_dev=`sg_map26 ${d}`
if [ -n "$LVM_TEST_LOCK_TYPE_IDM" ]; then
echo "Cleanup IDM context for drive ${d} ($sg_dev)"
sg_raw -v -r 512 -o /tmp/idm_tmp_data.bin $sg_dev \
88 00 01 00 00 00 00 20 FF 01 00 00 00 01 00 00
sg_raw -v -s 512 -i /tmp/idm_tmp_data.bin $sg_dev \
8E 00 FF 00 00 00 00 00 00 00 00 00 00 01 00 00
rm /tmp/idm_tmp_data.bin
fi
done
for i in $(seq 1 ${#BLKDEVS[@]}); do
vgcreate $SHARED TESTVG$i ${BLKDEVS[$(( i - 1 ))]}
lvcreate -a n --zero n -l 1 -n foo TESTVG$i
lvchange -a ey TESTVG$i/foo
done
for d in "${BLKDEVS[@]}"; do
drive_wwn=`udevadm info $d | awk -F= '/E: ID_WWN=/ {print $2}'`
for dev in /dev/*; do
if [ -b "$dev" ] && [[ ! "$dev" =~ [0-9] ]]; then
wwn=`udevadm info "${dev}" | awk -F= '/E: ID_WWN=/ {print $2}'`
if [ "$wwn" = "$drive_wwn" ]; then
base_name="$(basename -- ${dev})"
drive_list+=("$base_name")
host_list+=(`readlink /sys/block/$base_name | awk -F'/' '{print $6}'`)
fi
fi
done
done
for d in "${drive_list[@]}"; do
[ -f /sys/block/$d/device/delete ] && echo 1 > /sys/block/$d/device/delete
done
sleep 100
for i in $(seq 1 ${#BLKDEVS[@]}); do
check grep_lvmlockd_dump "S lvm_TESTVG$i kill_vg"
lvmlockctl --drop TESTVG$i
done
# Rescan drives so can probe the deleted drives and join back them
for h in "${host_list[@]}"; do
[ -f /sys/class/scsi_host/${h}/scan ] && echo "- - -" > /sys/class/scsi_host/${h}/scan
done

View File

@@ -1,56 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2021 Seagate, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# This testing script is for multi-hosts testing.
#
# On the host A:
# make check_lvmlockd_idm \
# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
# LVM_TEST_MULTI_HOST=1 T=multi_hosts_lv_ex_timeout_hosta.sh
# On the host B:
# make check_lvmlockd_idm \
# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
# LVM_TEST_MULTI_HOST=1 T=multi_hosts_lv_ex_timeout_hostb.sh
SKIP_WITH_LVMPOLLD=1
. lib/inittest
[ -z "$LVM_TEST_MULTI_HOST" ] && skip;
IFS=',' read -r -a BLKDEVS <<< "$LVM_TEST_BACKING_DEVICE"
for d in "${BLKDEVS[@]}"; do
aux extend_filter_LVMTEST "a|$d|"
done
aux lvmconf "devices/allow_changes_with_duplicate_pvs = 1"
vgchange --lock-start
vgdisplay
for i in $(seq 1 ${#BLKDEVS[@]}); do
not lvchange -a ey TESTVG$i/foo
done
# Sleep for 70 seconds so the previous lease is expired
sleep 70
for i in $(seq 1 ${#BLKDEVS[@]}); do
lvchange -a ey TESTVG$i/foo
lvchange -a n TESTVG$i/foo
done
for i in $(seq 1 ${#BLKDEVS[@]}); do
vgremove -f TESTVG$i
done

View File

@@ -1,78 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2020 Seagate, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# This testing script is for multi-hosts testing, the paired scripts
# are: multi_hosts_lv_hosta.sh / multi_hosts_lv_hostb.sh
#
# On the host A:
# make check_lvmlockd_idm \
# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
# LVM_TEST_MULTI_HOST=1 T=multi_hosts_lv_hosta.sh
# On the host B:
# make check_lvmlockd_idm \
# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
# LVM_TEST_MULTI_HOST=1 T=multi_hosts_lv_hostb.sh
SKIP_WITH_LVMPOLLD=1
. lib/inittest
[ -z "$LVM_TEST_MULTI_HOST" ] && skip;
IFS=',' read -r -a BLKDEVS <<< "$LVM_TEST_BACKING_DEVICE"
for d in "${BLKDEVS[@]}"; do
aux extend_filter_LVMTEST "a|$d|"
done
aux lvmconf "devices/allow_changes_with_duplicate_pvs = 1"
BLKDEVS_NUM=${#BLKDEVS[@]}
for d in "${BLKDEVS[@]}"; do
dd if=/dev/zero of="$d" bs=32k count=1
wipefs -a "$d" 2>/dev/null || true
sg_dev=`sg_map26 ${d}`
if [ -n "$LVM_TEST_LOCK_TYPE_IDM" ]; then
echo "Cleanup IDM context for drive ${d} ($sg_dev)"
sg_raw -v -r 512 -o /tmp/idm_tmp_data.bin $sg_dev \
88 00 01 00 00 00 00 20 FF 01 00 00 00 01 00 00
sg_raw -v -s 512 -i /tmp/idm_tmp_data.bin $sg_dev \
8E 00 FF 00 00 00 00 00 00 00 00 00 00 01 00 00
rm /tmp/idm_tmp_data.bin
fi
done
#aux prepare_pvs $BLKDEVS_NUM 6400
for i in $(seq 1 ${#BLKDEVS[@]}); do
echo $i
d="dev$i"
vgcreate $SHARED TESTVG$i ${BLKDEVS[$(( i - 1 ))]}
for j in {1..20}; do
lvcreate -a n --zero n -l 1 -n foo$j TESTVG$i
done
done
for i in $(seq 1 ${#BLKDEVS[@]}); do
for j in {1..20}; do
lvchange -a ey TESTVG$i/foo$j
done
done
for i in $(seq 1 ${#BLKDEVS[@]}); do
for j in {1..20}; do
lvchange -a n TESTVG$i/foo$j
done
done

View File

@@ -1,61 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2020 Seagate, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# This testing script is for multi-hosts testing, the paired scripts
# are: multi_hosts_lv_hosta.sh / multi_hosts_lv_hostb.sh
#
# On the host A:
# make check_lvmlockd_idm \
# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
# LVM_TEST_MULTI_HOST=1 T=multi_hosts_lv_hosta.sh
# On the host B:
# make check_lvmlockd_idm \
# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
# LVM_TEST_MULTI_HOST=1 T=multi_hosts_lv_hostb.sh
SKIP_WITH_LVMPOLLD=1
. lib/inittest
[ -z "$LVM_TEST_MULTI_HOST" ] && skip;
IFS=',' read -r -a BLKDEVS <<< "$LVM_TEST_BACKING_DEVICE"
for d in "${BLKDEVS[@]}"; do
aux extend_filter_LVMTEST "a|$d|"
done
aux lvmconf "devices/allow_changes_with_duplicate_pvs = 1"
vgchange --lock-start
for i in $(seq 1 ${#BLKDEVS[@]}); do
for j in {1..20}; do
lvchange -a sy TESTVG$i/foo$j
done
done
for i in $(seq 1 ${#BLKDEVS[@]}); do
for j in {1..20}; do
lvchange -a ey TESTVG$i/foo$j
done
done
for i in $(seq 1 ${#BLKDEVS[@]}); do
for j in {1..20}; do
lvchange -a n TESTVG$i/foo$j
done
done
for i in $(seq 1 ${#BLKDEVS[@]}); do
vgremove -f TESTVG$i
done

View File

@@ -1,87 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2021 Seagate, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# This testing script is for multi-hosts testing.
#
# On the host A:
# make check_lvmlockd_idm \
# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
# LVM_TEST_MULTI_HOST=1 T=multi_hosts_lv_sh_timeout_hosta.sh
# On the host B:
# make check_lvmlockd_idm \
# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
# LVM_TEST_MULTI_HOST=1 T=multi_hosts_lv_sh_timeout_hostb.sh
SKIP_WITH_LVMPOLLD=1
. lib/inittest
[ -z "$LVM_TEST_MULTI_HOST" ] && skip;
IFS=',' read -r -a BLKDEVS <<< "$LVM_TEST_BACKING_DEVICE"
for d in "${BLKDEVS[@]}"; do
aux extend_filter_LVMTEST "a|$d|"
done
aux lvmconf "devices/allow_changes_with_duplicate_pvs = 1"
for d in "${BLKDEVS[@]}"; do
dd if=/dev/zero of="$d" bs=32k count=1
wipefs -a "$d" 2>/dev/null || true
sg_dev=`sg_map26 ${d}`
if [ -n "$LVM_TEST_LOCK_TYPE_IDM" ]; then
echo "Cleanup IDM context for drive ${d} ($sg_dev)"
sg_raw -v -r 512 -o /tmp/idm_tmp_data.bin $sg_dev \
88 00 01 00 00 00 00 20 FF 01 00 00 00 01 00 00
sg_raw -v -s 512 -i /tmp/idm_tmp_data.bin $sg_dev \
8E 00 FF 00 00 00 00 00 00 00 00 00 00 01 00 00
rm /tmp/idm_tmp_data.bin
fi
done
for i in $(seq 1 ${#BLKDEVS[@]}); do
vgcreate $SHARED TESTVG$i ${BLKDEVS[$(( i - 1 ))]}
lvcreate -a n --zero n -l 1 -n foo TESTVG$i
lvchange -a sy TESTVG$i/foo
done
for d in "${BLKDEVS[@]}"; do
drive_wwn=`udevadm info $d | awk -F= '/E: ID_WWN=/ {print $2}'`
for dev in /dev/*; do
if [ -b "$dev" ] && [[ ! "$dev" =~ [0-9] ]]; then
wwn=`udevadm info "${dev}" | awk -F= '/E: ID_WWN=/ {print $2}'`
if [ "$wwn" = "$drive_wwn" ]; then
base_name="$(basename -- ${dev})"
drive_list+=("$base_name")
host_list+=(`readlink /sys/block/$base_name | awk -F'/' '{print $6}'`)
fi
fi
done
done
for d in "${drive_list[@]}"; do
[ -f /sys/block/$d/device/delete ] && echo 1 > /sys/block/$d/device/delete
done
sleep 100
for i in $(seq 1 ${#BLKDEVS[@]}); do
check grep_lvmlockd_dump "S lvm_TESTVG$i kill_vg"
lvmlockctl --drop TESTVG$i
done
# Rescan drives so can probe the deleted drives and join back them
for h in "${host_list[@]}"; do
[ -f /sys/class/scsi_host/${h}/scan ] && echo "- - -" > /sys/class/scsi_host/${h}/scan
done

View File

@@ -1,56 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2021 Seagate, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# This testing script is for multi-hosts testing.
#
# On the host A:
# make check_lvmlockd_idm \
# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
# LVM_TEST_MULTI_HOST=1 T=multi_hosts_lv_ex_timeout_hosta.sh
# On the host B:
# make check_lvmlockd_idm \
# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
# LVM_TEST_MULTI_HOST=1 T=multi_hosts_lv_ex_timeout_hostb.sh
SKIP_WITH_LVMPOLLD=1
. lib/inittest
[ -z "$LVM_TEST_MULTI_HOST" ] && skip;
IFS=',' read -r -a BLKDEVS <<< "$LVM_TEST_BACKING_DEVICE"
for d in "${BLKDEVS[@]}"; do
aux extend_filter_LVMTEST "a|$d|"
done
aux lvmconf "devices/allow_changes_with_duplicate_pvs = 1"
vgchange --lock-start
vgdisplay
for i in $(seq 1 ${#BLKDEVS[@]}); do
lvchange -a sy TESTVG$i/foo
done
# Sleep for 70 seconds so the previous lease is expired
sleep 70
for i in $(seq 1 ${#BLKDEVS[@]}); do
lvchange -a ey TESTVG$i/foo
lvchange -a n TESTVG$i/foo
done
for i in $(seq 1 ${#BLKDEVS[@]}); do
vgremove -f TESTVG$i
done

View File

@@ -1,45 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2020 Seagate, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# This testing script is for multi-hosts testing, the paired scripts
# are: multi_hosts_vg_hosta.sh / multi_hosts_vg_hostb.sh
#
# On the host A:
# make check_lvmlockd_idm \
# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
# LVM_TEST_MULTI_HOST=1 T=multi_hosts_vg_hosta.sh
# On the host B:
# make check_lvmlockd_idm \
# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
# LVM_TEST_MULTI_HOST=1 T=multi_hosts_vg_hostb.sh
SKIP_WITH_LVMPOLLD=1
. lib/inittest
[ -z "$LVM_TEST_MULTI_HOST" ] && skip;
IFS=',' read -r -a BLKDEVS <<< "$LVM_TEST_BACKING_DEVICE"
for d in "${BLKDEVS[@]}"; do
aux extend_filter_LVMTEST "a|$d|"
done
aux lvmconf "devices/allow_changes_with_duplicate_pvs = 1"
i=0
for d in "${BLKDEVS[@]}"; do
echo $i
i=$((i+1))
vgcreate $SHARED TESTVG$i $d
vgchange -a n TESTVG$i
done

View File

@@ -1,52 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2020 Seagate, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# This testing script is for multi-hosts testing, the paired scripts
# are: multi_hosts_vg_hosta.sh / multi_hosts_vg_hostb.sh
#
# On the host A:
# make check_lvmlockd_idm \
# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
# LVM_TEST_MULTI_HOST=1 T=multi_hosts_vg_hosta.sh
# On the host B:
# make check_lvmlockd_idm \
# LVM_TEST_BACKING_DEVICE=/dev/sdj3,/dev/sdk3,/dev/sdl3 \
# LVM_TEST_MULTI_HOST=1 T=multi_hosts_vg_hostb.sh
SKIP_WITH_LVMPOLLD=1
. lib/inittest
[ -z "$LVM_TEST_MULTI_HOST" ] && skip;
IFS=',' read -r -a BLKDEVS <<< "$LVM_TEST_BACKING_DEVICE"
for d in "${BLKDEVS[@]}"; do
aux extend_filter_LVMTEST "a|$d|"
done
aux lvmconf "devices/allow_changes_with_duplicate_pvs = 1"
vgchange --lock-start
i=0
for d in "${BLKDEVS[@]}"; do
i=$((i+1))
check vg_field TESTVG$i lv_count 0
done
i=0
for d in "${BLKDEVS[@]}"; do
i=$((i+1))
vgchange -a ey TESTVG$i
vgremove -ff TESTVG$i
done

View File

@@ -1,111 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2021 Seagate, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
SKIP_WITH_LVMPOLLD=1
. lib/inittest
aux prepare_devs 6
get_devs
pvcreate -M2 "${DEVICES[@]}"
vgcreate --shared -M2 "$vg1" "$dev1" "$dev2" "$dev3"
vgcreate --shared -M2 "$vg2" "$dev4" "$dev5" "$dev6"
test_vg_thread1()
{
for i in {1..1000}
do
# Create new logic volume and deactivate it
lvcreate -a n --zero n -l 1 -n foo $vg1
# Set minor number
lvchange $vg1/foo -My --major=255 --minor=123
# Activate logic volume
lvchange $vg1/foo -a y
# Extend logic volume with 10%
lvextend -l+10 $vg1/foo
# Deactivate logic volume
lvchange $vg1/foo -a n
# Deactivate volume group
vgchange $vg1 -a n
# Activate volume group with shareable mode
vgchange $vg1 -a sy
# lvextend fails due to mismatched lock mode
not lvextend -l+10 $vg1/foo
# Promote volume group to exclusive mode
vgchange $vg1 -a ey
lvreduce -f -l-4 $vg1/foo
lvchange -an $vg1/foo
lvremove $vg1/foo
done
}
test_vg_thread2()
{
for i in {1..1000}
do
# Create new logic volume and deactivate it
lvcreate -a n --zero n -l 1 -n foo $vg2
# Set minor number
lvchange $vg2/foo -My --major=255 --minor=124
# Activate logic volume
lvchange $vg2/foo -a y
# Extend logic volume with 10%
lvextend -l+10 $vg2/foo
# Deactivate logic volume
lvchange $vg2/foo -a n
# Deactivate volume group
vgchange $vg2 -a n
# Activate volume group with shareable mode
vgchange $vg2 -a sy
# lvextend fails due to mismatched lock mode
not lvextend -l+10 $vg2/foo
# Promote volume group to exclusive mode
vgchange $vg2 -a ey
lvreduce -f -l-4 $vg2/foo
lvchange -an $vg2/foo
lvremove $vg2/foo
done
}
test_vg_thread1 &
WAITPID=$!
test_vg_thread2 &
WAITPID="$WAITPID "$!
wait $WAITPID
vgremove -ff $vg1
vgremove -ff $vg2

View File

@@ -1,93 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2021 Seagate, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
SKIP_WITH_LVMPOLLD=1
. lib/inittest
aux prepare_devs 8
get_devs
pvcreate -M2 "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6"
test_vg_thread1()
{
for i in {1..1000}
do
vgcreate --shared -M2 "$vg1" "$dev1" "$dev2" "$dev3"
vgremove -ff $vg1
done
}
test_vg_thread2()
{
vgcreate --shared -M2 "$vg2" "$dev4" "$dev5" "$dev6"
for i in {1..1000}
do
# Create new logic volume and deactivate it
lvcreate -a n --zero n -l 1 -n foo $vg2
# Set minor number
lvchange $vg2/foo -My --major=255 --minor=124
# Activate logic volume
lvchange $vg2/foo -a y
# Extend logic volume with 10%
lvextend -l+10 $vg2/foo
# Deactivate logic volume
lvchange $vg2/foo -a n
# Deactivate volume group
vgchange $vg2 -a n
# Activate volume group with shareable mode
vgchange $vg2 -a sy
# lvextend fails due to mismatched lock mode
not lvextend -l+10 $vg2/foo
# Promote volume group to exclusive mode
vgchange $vg2 -a ey
lvreduce -f -l-4 $vg2/foo
lvchange -an $vg2/foo
lvremove $vg2/foo
done
vgremove -ff $vg2
}
test_vg_thread3()
{
for i in {1..1000}
do
pvcreate -M2 "$dev7" "$dev8"
pvremove "$dev7"
pvremove "$dev8"
done
}
test_vg_thread1 &
WAITPID=$!
test_vg_thread2 &
WAITPID="$WAITPID "$!
test_vg_thread3 &
WAITPID="$WAITPID "$!
wait $WAITPID

View File

@@ -1,59 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2021 Seagate, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
SKIP_WITH_LVMPOLLD=1
. lib/inittest
aux prepare_vg 3
for i in {1..1000}
do
# Create new logic volume and deactivate it
lvcreate -a n --zero n -l 1 -n foo $vg
# Set minor number
lvchange $vg/foo -My --major=255 --minor=123
# Activate logic volume
lvchange $vg/foo -a y
# Check device mapper
dmsetup info $vg-foo | tee info
grep -E "^Major, minor: *[0-9]+, 123" info
# Extend logic volume with 10%
lvextend -l+10 $vg/foo
# Deactivate logic volume
lvchange $vg/foo -a n
# Deactivate volume group
vgchange $vg -a n
# Activate volume group with shareable mode
vgchange $vg -a sy
# lvextend fails due to mismatched lock mode
not lvextend -l+10 $vg/foo
# Promote volume group to exclusive mode
vgchange $vg -a ey
lvreduce -f -l-4 $vg/foo
lvchange -an $vg/foo
lvremove $vg/foo
done
vgremove -ff $vg

View File

@@ -10,7 +10,7 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# Test dm-writecache and dm-cache with different block size combinations
# Test writecache usage
SKIP_WITH_LVMPOLLD=1
@@ -94,7 +94,6 @@ _verify_data_on_lv() {
lvchange -an $vg/$lv1
}
# Check that the LBS/PBS that were set up is accurately reported for the devs.
_check_env() {
check sysfs "$(< SCSI_DEBUG_DEV)" queue/logical_block_size "$1"
@@ -106,33 +105,24 @@ _check_env() {
blockdev --getpbsz "$dev2"
}
#
# _run_test $BS1 $BS2 $type $optname "..."
#
# $BS1: the xfs sectsz is verified to match $BS1, after mkfs
# $BS2: the lv1 LBS is verified to match $BS2, after cache is added to lv1
# $type is cache or writecache to use in lvconvert --type $type
# $optname is either --cachevol or --cachepool to use in lvconvert
# "..." a sector size option to use in mkfs.xfs
#
_run_test() {
vgcreate $SHARED $vg "$dev1"
vgextend $vg "$dev2"
lvcreate -n $lv1 -l 8 -an $vg "$dev1"
lvcreate -n $lv2 -l 4 -an $vg "$dev2"
lvchange -ay $vg/$lv1
mkfs.xfs -f $5 "$DM_DEV_DIR/$vg/$lv1" |tee out
mkfs.xfs -f $2 "$DM_DEV_DIR/$vg/$lv1" |tee out
grep "sectsz=$1" out
_add_new_data_to_mnt
lvconvert --yes --type $3 $4 $lv2 $vg/$lv1
lvconvert --yes --type writecache --cachevol $lv2 $vg/$lv1
blockdev --getss "$DM_DEV_DIR/$vg/$lv1" |tee out
grep "$2" out
grep "$1" out
blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
_add_more_data_to_mnt
_verify_data_on_mnt
lvconvert --splitcache $vg/$lv1
check lv_field $vg/$lv1 segtype linear
check lv_field $vg/$lv2 segtype linear
blockdev --getss "$DM_DEV_DIR/$vg/$lv1"
blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
_verify_data_on_mnt
@@ -146,7 +136,7 @@ _run_test() {
vgremove $vg
}
# Setup: LBS 512, PBS 512
# scsi_debug devices with 512 LBS 512 PBS
aux prepare_scsi_debug_dev 256 || skip
aux prepare_devs 2 64
@@ -160,58 +150,43 @@ vgremove -ff $vg
_check_env "512" "512"
# lbs 512, pbs 512, xfs 512, wc bs 512
_run_test 512 512 "writecache" "--cachevol" ""
# lbs 512, pbs 512, xfs 512, cache bs 512
_run_test 512 512 "cache" "--cachevol" ""
_run_test 512 512 "cache" "--cachepool" ""
# lbs 512, pbs 512, xfs 512, wc 512
_run_test 512 ""
# lbs 512, pbs 512, xfs -s 4096, wc bs 4096
_run_test 4096 4096 "writecache" "--cachevol" "-s size=4096"
# lbs 512, pbs 512, xfs -s 4096, cache bs 512
_run_test 4096 512 "cache" "--cachevol" "-s size=4096"
_run_test 4096 512 "cache" "--cachepool" "-s size=4096"
# lbs 512, pbs 512, xfs -s 4096, wc 4096
_run_test 4096 "-s size=4096"
aux cleanup_scsi_debug_dev
# Setup: LBS 512, PBS 4096
# lbs=512, pbs=4096
aux prepare_scsi_debug_dev 256 sector_size=512 physblk_exp=3
aux prepare_devs 2 64
_check_env "512" "4096"
# lbs 512, pbs 4k, xfs 4k, wc bs 4k
_run_test 4096 4096 "writecache" "--cachevol" ""
# lbs 512, pbs 4k, xfs 4k, cache bs 512
_run_test 4096 512 "cache" "--cachevol" ""
_run_test 4096 512 "cache" "--cachepool" ""
# lbs 512, pbs 4k, xfs 4k, wc 4k
_run_test 4096 ""
# lbs 512, pbs 4k, xfs -s 512, wc bs 512
_run_test 512 512 "writecache" "--cachevol" "-s size=512"
# lbs 512, pbs 4k, xfs -s 512, cache bs 512
_run_test 512 512 "cache" "--cachevol" "-s size=512"
_run_test 512 512 "cache" "--cachepool" "-s size=512"
# lbs 512, pbs 4k, xfs -s 512, wc 512
_run_test 512 "-s size=512"
aux cleanup_scsi_debug_dev
# Setup: LBS 4096, PBS 4096
# scsi_debug devices with 4K LBS and 4K PBS
aux prepare_scsi_debug_dev 256 sector_size=4096
aux prepare_devs 2 64
_check_env "4096" "4096"
# lbs 4k, pbs 4k, xfs 4k, wc bs 4k
_run_test 4096 4096 "writecache" "--cachevol" ""
# lbs 4k, pbs 4k, xfs 4k, cache bs 4k
_run_test 4096 4096 "cache" "--cachevol" ""
_run_test 4096 4096 "cache" "--cachepool" ""
# lbs 4k, pbs 4k, xfs 4k, wc 4k
_run_test 4096 ""
aux cleanup_scsi_debug_dev
# Setup: LBS 512, PBS 512
# scsi_debug devices with 512 LBS 512 PBS
aux prepare_scsi_debug_dev 256 || skip
aux prepare_devs 2 64
@@ -247,4 +222,3 @@ lvremove $vg/$lv2
vgremove $vg
aux cleanup_scsi_debug_dev

View File

@@ -1,232 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2018 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# Test dm-writecache and dm-cache with different block size combinations
SKIP_WITH_LVMPOLLD=1
. lib/inittest
aux have_writecache 1 0 0 || skip
which mkfs.xfs || skip
mnt="mnt"
mkdir -p $mnt
awk 'BEGIN { while (z++ < 16384) printf "A" }' > fileA
awk 'BEGIN { while (z++ < 16384) printf "B" }' > fileB
awk 'BEGIN { while (z++ < 16384) printf "C" }' > fileC
# generate random data
dd if=/dev/urandom of=randA bs=512K count=2
dd if=/dev/urandom of=randB bs=512K count=3
dd if=/dev/urandom of=randC bs=512K count=4
_add_new_data_to_mnt() {
mount "$DM_DEV_DIR/$vg/$lv1" $mnt
# add original data
cp randA $mnt
cp randB $mnt
cp randC $mnt
mkdir $mnt/1
cp fileA $mnt/1
cp fileB $mnt/1
cp fileC $mnt/1
mkdir $mnt/2
cp fileA $mnt/2
cp fileB $mnt/2
cp fileC $mnt/2
sync
}
_add_more_data_to_mnt() {
mkdir $mnt/more
cp fileA $mnt/more
cp fileB $mnt/more
cp fileC $mnt/more
cp randA $mnt/more
cp randB $mnt/more
cp randC $mnt/more
sync
}
_verify_data_on_mnt() {
diff randA $mnt/randA
diff randB $mnt/randB
diff randC $mnt/randC
diff fileA $mnt/1/fileA
diff fileB $mnt/1/fileB
diff fileC $mnt/1/fileC
diff fileA $mnt/2/fileA
diff fileB $mnt/2/fileB
diff fileC $mnt/2/fileC
}
_verify_more_data_on_mnt() {
diff randA $mnt/more/randA
diff randB $mnt/more/randB
diff randC $mnt/more/randC
diff fileA $mnt/more/fileA
diff fileB $mnt/more/fileB
diff fileC $mnt/more/fileC
}
_verify_data_on_lv() {
lvchange -ay $vg/$lv1
mount "$DM_DEV_DIR/$vg/$lv1" $mnt
_verify_data_on_mnt
rm $mnt/randA
rm $mnt/randB
rm $mnt/randC
rm -rf $mnt/1
rm -rf $mnt/2
umount $mnt
lvchange -an $vg/$lv1
}
# Check that the LBS ($1) and PBS ($2) are accurately reported.
_check_env() {
check sysfs "$(< SCSI_DEBUG_DEV)" queue/logical_block_size "$1"
check sysfs "$(< SCSI_DEBUG_DEV)" queue/physical_block_size "$2"
blockdev --getss "$dev1"
blockdev --getpbsz "$dev1"
blockdev --getss "$dev2"
blockdev --getpbsz "$dev2"
}
#
# _run_test $BD1 $BD2 $type $optname "..."
#
# $BD1: device to place the main LV on
# $BD2: device to place the cache on
# $type is cache or writecache to use in lvconvert --type $type
# $optname is either --cachevol or --cachepool to use in lvconvert
# "..." a sector size option to use in mkfs.xfs
#
_run_test() {
vgcreate $SHARED $vg "$1"
vgextend $vg "$2"
lvcreate -n $lv1 -l 8 -an $vg "$1"
lvcreate -n $lv2 -l 4 -an $vg "$2"
lvchange -ay $vg/$lv1
mkfs.xfs -f $5 "$DM_DEV_DIR/$vg/$lv1" |tee out
_add_new_data_to_mnt
lvconvert --yes --type $3 $4 $lv2 $vg/$lv1
# TODO: check expected LBS of LV1
# blockdev --getss "$DM_DEV_DIR/$vg/$lv1" |tee out
# grep "$N" out
# TODO: check expected PBS of LV1
# blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1" |tee out
# grep "$N" out
_add_more_data_to_mnt
_verify_data_on_mnt
lvconvert --splitcache $vg/$lv1
check lv_field $vg/$lv1 segtype linear
blockdev --getss "$DM_DEV_DIR/$vg/$lv1"
blockdev --getpbsz "$DM_DEV_DIR/$vg/$lv1"
_verify_data_on_mnt
_verify_more_data_on_mnt
umount $mnt
lvchange -an $vg/$lv1
lvchange -an $vg/$lv2
_verify_data_on_lv
lvremove $vg/$lv1
lvremove $vg/$lv2
vgremove $vg
}
# Setup: dev1 LBS 512, PBS 4096 (using scsi-debug)
# dev2 LBS 512, PBS 4096 (using scsi-debug)
# dev3 LBS 512, PBS 512 (using loop)
# dev4 LBS 512, PBS 512 (using loop)
#
aux prepare_scsi_debug_dev 256 sector_size=512 physblk_exp=3
aux prepare_devs 2 64
# loopa/loopb have LBS 512 PBS 512
which fallocate || skip
fallocate -l 64M loopa
fallocate -l 64M loopb
for i in {1..5}; do
LOOP1=$(losetup -f loopa --show || true)
test -n "$LOOP1" && break
done
for i in {1..5} ; do
LOOP2=$(losetup -f loopb --show || true)
test -n "$LOOP2" && break
done
# prepare devX mapping so it works for real & fake dev dir
d=3
for i in "$LOOP1" "$LOOP2"; do
echo "$i"
m=${i##*loop}
test -e "$DM_DEV_DIR/loop$m" || mknod "$DM_DEV_DIR/loop$m" b 7 "$m"
eval "dev$d=\"$DM_DEV_DIR/loop$m\""
d=$(( d + 1 ))
done
# verify dev1/dev2 have LBS 512 PBS 4096
_check_env "512" "4096"
# verify dev3/dev4 have LBS 512 PBS 512
blockdev --getss "$LOOP1" | grep 512
blockdev --getss "$LOOP2" | grep 512
blockdev --getpbsz "$LOOP1" | grep 512
blockdev --getpbsz "$LOOP2" | grep 512
aux extend_filter "a|$dev3|" "a|$dev4|"
# place main LV on dev1 with LBS 512, PBS 4096
# and the cache on dev3 with LBS 512, PBS 512
_run_test "$dev1" "$dev3" "writecache" "--cachevol" ""
_run_test "$dev1" "$dev3" "cache" "--cachevol" ""
_run_test "$dev1" "$dev3" "cache" "--cachepool" ""
# place main LV on dev3 with LBS 512, PBS 512
# and the cache on dev1 with LBS 512, PBS 4096
_run_test "$dev3" "$dev1" "writecache" "--cachevol" ""
_run_test "$dev3" "$dev1" "cache" "--cachevol" ""
_run_test "$dev3" "$dev1" "cache" "--cachepool" ""
# place main LV on dev1 with LBS 512, PBS 4096
# and the cache on dev3 with LBS 512, PBS 512
# and force xfs sectsz 512
_run_test "$dev1" "$dev3" "writecache" "--cachevol" "-s size=512"
_run_test "$dev1" "$dev3" "cache" "--cachevol" "-s size=512"
_run_test "$dev1" "$dev3" "cache" "--cachepool" "-s size=512"
# place main LV on dev3 with LBS 512, PBS 512
# and the cache on dev1 with LBS 512, PBS 4096
# and force xfs sectsz 4096
_run_test "$dev3" "$dev1" "writecache" "--cachevol" "-s size=4096"
_run_test "$dev3" "$dev1" "cache" "--cachevol" "-s size=4096"
_run_test "$dev3" "$dev1" "cache" "--cachepool" "-s size=4096"
losetup -d "$LOOP1" || true
losetup -d "$LOOP2" || true
rm loopa loopb
aux cleanup_scsi_debug_dev

View File

@@ -1,29 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2021 Seagate. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
test_description='Remove the idm test setup'
. lib/inittest
[ -z "$LVM_TEST_LOCK_TYPE_IDM" ] && skip;
# FIXME: collect debug logs (only if a test failed?)
# lvmlockctl -d > lvmlockd-debug.txt
# dlm_tool dump > dlm-debug.txt
lvmlockctl --stop-lockspaces
sleep 1
killall lvmlockd
sleep 1
killall lvmlockd || true
sleep 1
killall seagate_ilm

View File

@@ -228,11 +228,6 @@ arg(detachprofile_ARG, '\0', "detachprofile", 0, 0, 0,
"Detaches a metadata profile from a VG or LV.\n"
"See \\fBlvm.conf\\fP(5) for more information about profiles.\n")
arg(deviceidtype_ARG, '\0', "deviceidtype", string_VAL, 0, 0,
"The type of device ID to use for the device.\n"
"If the specified type is available for the device,\n"
"then it will override the default type that lvm would use.\n")
arg(devices_ARG, '\0', "devices", pv_VAL, ARG_GROUPABLE, 0,
"Devices that the command can use. This option can be repeated\n"
"or accepts a comma separated list of devices. This overrides\n"
@@ -278,6 +273,9 @@ arg(errorwhenfull_ARG, '\0', "errorwhenfull", bool_VAL, 0, 0,
"(Also see dm-thin-pool kernel module option no_space_timeout.)\n"
"See \\fBlvmthin\\fP(7) for more information.\n")
arg(file_long_ARG, '\0', "file", string_VAL, 0, 0,
"File name.\n")
arg(force_long_ARG, '\0', "force", 0, ARG_COUNTABLE, 0,
"Force metadata restore even with thin pool LVs.\n"
"Use with extreme caution. Most changes to thin metadata\n"
@@ -1391,7 +1389,7 @@ arg(readahead_ARG, 'r', "readahead", readahead_VAL, 0, 0,
"\\fBnone\\fP is equivalent to zero.\n")
arg(resizefs_ARG, 'r', "resizefs", 0, 0, 0,
"Resize underlying filesystem together with the LV using \\fBfsadm\\fP(8).\n")
"Resize underlying filesystem together with the LV using fsadm(8).\n")
/* Not used */
arg(reset_ARG, 'R', "reset", 0, 0, 0, NULL)

View File

@@ -163,22 +163,6 @@
# RULE: LV_type1 and lv_is_prop1
#
#
# AUTOTYPE: <segtype>
# The cmd def implies the type. Optionally using --type foo
# is not wrong, but it's redundant. If --type is specified
# it is not used in matching a user command to the cmd def,
# but once a user cmd is matched to the cmd def, a specified
# type is compared to the AUTOTYPE to ensure they match.
# We avoid including --type foo in the OO list because doing
# so often makes the cmd def redundant with another cmd def
# that has --type foo in its required_options. We want a user
# command to only match a single cmd def.
# Usually, a user command with --type foo will match a cmd def
# that includes --type foo in its required_options.
#
# For lvcreate cmd defs, each should either include --type foo
# in required_options, or it should include AUTOTYPE foo
# (and not include --type in OO).
#
# For efficiency, sets of options can be defined and reused
@@ -461,11 +445,11 @@ RULE: --poolmetadata not --readahead --stripesize --stripes_long
# alternate form of lvconvert --type thin
lvconvert --thin --thinpool LV LV_linear_striped_raid_cache_thin_error_zero
OO: --originname LV_new, OO_LVCONVERT_POOL, OO_LVCONVERT
OO: --type thin, --originname LV_new, OO_LVCONVERT_POOL, OO_LVCONVERT
ID: lvconvert_to_thin_with_external
DESC: Convert LV to a thin LV, using the original LV as an external origin.
DESC: Convert LV to a thin LV, using the original LV as an external origin
DESC: (infers --type thin).
FLAGS: SECONDARY_SYNTAX
AUTOTYPE: thin
RULE: all and lv_is_visible
RULE: all not lv_is_locked lv_is_raid_with_integrity
RULE: --poolmetadata not --readahead --stripesize --stripes_long
@@ -482,18 +466,17 @@ RULE: --poolmetadata not --readahead --stripesize --stripes_long
# alternate form of lvconvert --type cache
lvconvert --cache --cachepool LV LV_linear_striped_raid_thinpool_vdo_vdopool_vdopooldata
OO: OO_LVCONVERT_CACHE, OO_LVCONVERT_POOL, OO_LVCONVERT
OO: --type cache, OO_LVCONVERT_CACHE, OO_LVCONVERT_POOL, OO_LVCONVERT
ID: lvconvert_to_cache_with_cachepool
DESC: Attach a cache pool to an LV.
DESC: Attach a cache pool to an LV (infers --type cache).
RULE: all and lv_is_visible
RULE: all not lv_is_raid_with_integrity
RULE: --poolmetadata not --readahead --stripesize --stripes_long
FLAGS: SECONDARY_SYNTAX
AUTOTYPE: cache
---
lvconvert --type writecache --cachevol LV LV_linear_striped_raid_thinpool
lvconvert --type writecache --cachevol LV LV_linear_striped_raid
OO: OO_LVCONVERT, --cachesettings String
ID: lvconvert_to_writecache
DESC: Attach a writecache to an LV, converts the LV to type writecache.
@@ -520,7 +503,7 @@ FLAGS: SECONDARY_SYNTAX
---
lvconvert --type writecache --cachedevice PV LV_linear_striped_raid_thinpool
lvconvert --type writecache --cachedevice PV LV_linear_striped_raid
OO: OO_LVCONVERT, --cachesize SizeMB, --cachesettings String
ID: lvconvert_to_writecache_with_device
DESC: Add a writecache to an LV, using a specified cache device.
@@ -534,7 +517,7 @@ RULE: all and lv_is_visible
---
lvconvert --type thin-pool LV_linear_striped_raid_cache_writecache_error_zero
lvconvert --type thin-pool LV_linear_striped_raid_cache_error_zero
OO: --stripes_long Number, --stripesize SizeKB,
OO_LVCONVERT_THINPOOL, OO_LVCONVERT_POOL, OO_LVCONVERT
OP: PV ...
@@ -566,8 +549,8 @@ RULE: --poolmetadata not --readahead --stripesize --stripes_long
# This command syntax is deprecated, and the primary forms
# of creating a pool or swapping metadata should be used.
lvconvert --thinpool LV_linear_striped_raid_cache_writecache_thinpool
OO: --stripes_long Number, --stripesize SizeKB,
lvconvert --thinpool LV_linear_striped_raid_cache_thinpool
OO: --type thin-pool, --stripes_long Number, --stripesize SizeKB,
OO_LVCONVERT_THINPOOL, OO_LVCONVERT_POOL, OO_LVCONVERT
OP: PV ...
ID: lvconvert_to_thinpool_or_swap_metadata
@@ -577,7 +560,6 @@ FLAGS: PREVIOUS_SYNTAX
RULE: all and lv_is_visible
RULE: all not lv_is_raid_with_integrity
RULE: --poolmetadata not --readahead --stripesize --stripes_long
AUTOTYPE: thin-pool
---
@@ -611,17 +593,6 @@ RULE: all not lv_is_raid_with_integrity
# This command syntax is deprecated, and the primary forms
# of creating a pool or swapping metadata should be used.
# FIXME
# AUTOTYPE: cache-pool doesn't work here.
# A strange command matches this cmd def:
# lvconvert --type cache-pool --cachepool LV
# where the LV is already a cache pool. That command
# seems to be used to change properties on an existing cache pool.
# The command lvconvert --type cache-pool LV will also change
# properties on an existing cache pool.
# Neither seems like a logical command to change properties
# of an LV, wouldn't lvchange do that?
lvconvert --cachepool LV_linear_striped_raid_cachepool_error_zero
OO: --type cache-pool, OO_LVCONVERT_CACHE, OO_LVCONVERT_POOL, OO_LVCONVERT
OP: PV ...
@@ -643,13 +614,12 @@ RULE: all and lv_is_visible
RULE: all not lv_is_locked lv_is_origin lv_is_merging_origin lv_is_external_origin lv_is_virtual lv_is_raid_with_integrity
lvconvert --vdopool LV_linear_striped_raid_cache
OO: OO_LVCONVERT_VDO, OO_LVCONVERT, --name LV_new, --virtualsize SizeMB,
OO: --type vdo-pool, OO_LVCONVERT_VDO, OO_LVCONVERT, --name LV_new, --virtualsize SizeMB,
ID: lvconvert_to_vdopool_param
DESC: Convert LV to type vdopool.
RULE: all and lv_is_visible
RULE: all not lv_is_locked lv_is_origin lv_is_merging_origin lv_is_external_origin lv_is_virtual lv_is_raid_with_integrity
FLAGS: SECONDARY_SYNTAX
AUTOTYPE: vdo-pool
---
@@ -742,14 +712,13 @@ RULE: all not lv_is_locked lv_is_pvmove
RULE: all and lv_is_visible
lvconvert --snapshot LV LV_linear_striped
OO: --chunksize SizeKB, --zero Bool, OO_LVCONVERT
OO: --type snapshot, --chunksize SizeKB, --zero Bool, OO_LVCONVERT
ID: lvconvert_combine_split_snapshot
DESC: Combine a former COW snapshot (second arg) with a former
DESC: origin LV (first arg) to reverse a splitsnapshot command.
RULE: all not lv_is_locked lv_is_pvmove
RULE: all and lv_is_visible
FLAGS: SECONDARY_SYNTAX
AUTOTYPE: snapshot
---
@@ -777,12 +746,12 @@ AUTOTYPE: snapshot
# and the LV type is known.
lvconvert --repair LV_cache_cachepool_mirror_raid_thinpool
OO: --usepolicies, --interval Number, --poolmetadataspare Bool, OO_LVCONVERT
OO: --usepolicies, --interval Number, --poolmetadataspare Bool, --file_long String, OO_LVCONVERT
OP: PV ...
ID: lvconvert_repair
DESC: Replace failed PVs in a raid or mirror LV.
DESC: Repair a thin pool.
DESC: Repair a cache pool.
DESC: Repair a cache pool or cache.
RULE: all not lv_is_locked lv_is_pvmove
RULE: --poolmetadataspare and LV_cache LV_cachepool LV_thinpool
@@ -867,12 +836,11 @@ DESC: Create a linear LV.
FLAGS: SECONDARY_SYNTAX
lvcreate --size SizeMB VG
OO: OO_LVCREATE
OO: --type linear, OO_LVCREATE
OP: PV ...
IO: --mirrors 0, --stripes 1
ID: lvcreate_linear
DESC: Create a linear LV.
AUTOTYPE: linear
---
@@ -912,11 +880,10 @@ FLAGS: SECONDARY_SYNTAX
# R13 (just --stripes)
lvcreate --stripes Number --size SizeMB VG
OO: --stripesize SizeKB, OO_LVCREATE
OO: --stripesize SizeKB, --type striped, OO_LVCREATE
OP: PV ...
ID: lvcreate_striped
DESC: Create a striped LV.
AUTOTYPE: striped
DESC: Create a striped LV (infers --type striped).
# R5,R7 (--type mirror with or without --mirrors)
lvcreate --type mirror --size SizeMB VG
@@ -930,13 +897,11 @@ FLAGS: SECONDARY_SYNTAX
# R14 (just --mirrors)
# alternate form of lvcreate --type raid1|mirror
lvcreate --mirrors PNumber --size SizeMB VG
OO: --stripesize SizeKB, --mirrorlog MirrorLog, --regionsize RegionSize, --minrecoveryrate SizeKB, --maxrecoveryrate SizeKB, OO_LVCREATE
OO: --stripes Number, --stripesize SizeKB,
--mirrorlog MirrorLog, --regionsize RegionSize, --minrecoveryrate SizeKB, --maxrecoveryrate SizeKB, OO_LVCREATE
OP: PV ...
IO: --stripes 1
ID: lvcreate_mirror_or_raid1
DESC: Create a raid1 or mirror LV.
AUTOTYPE: raid1
AUTOTYPE: mirror
DESC: Create a raid1 or mirror LV (infers --type raid1|mirror).
# R9,R10,R11,R12 (--type raid with any use of --stripes/--mirrors)
lvcreate --type raid --size SizeMB VG
@@ -948,14 +913,11 @@ ID: lvcreate_raid_any
DESC: Create a raid LV (a specific raid level must be used, e.g. raid1).
# R15 (--stripes and --mirrors which implies raid10)
# FIXME: --mirrors N --stripes 1 is raid1|mirror and should only
# match the cmd def above for raid1|mirror with IO: --stripes 1
lvcreate --mirrors PNumber --stripes Number --size SizeMB VG
OO: --stripesize SizeKB, --regionsize RegionSize, --minrecoveryrate SizeKB, --maxrecoveryrate SizeKB, OO_LVCREATE
OP: PV ...
ID: lvcreate_raid_any
DESC: Create a raid10 LV.
AUTOTYPE: raid10
---
@@ -975,12 +937,11 @@ DESC: (also see --snapshot).
FLAGS: SECONDARY_SYNTAX
lvcreate --snapshot --size SizeMB LV
OO: --stripes Number, --stripesize SizeKB,
OO: --type snapshot, --stripes Number, --stripesize SizeKB,
--chunksize SizeKB, OO_LVCREATE
OP: PV ...
ID: lvcreate_cow_snapshot
DESC: Create a COW snapshot LV of an origin LV.
AUTOTYPE: snapshot
---
@@ -1005,24 +966,24 @@ DESC: Create a thin pool.
# alternate form of lvcreate --type thin-pool
lvcreate --thin --size SizeMB VG
OO: --stripes Number, --stripesize SizeKB, OO_LVCREATE_THINPOOL, OO_LVCREATE_POOL, OO_LVCREATE
OO: --stripes Number, --stripesize SizeKB,
--type thin-pool, OO_LVCREATE_THINPOOL, OO_LVCREATE_POOL, OO_LVCREATE
OP: PV ...
IO: --mirrors 0
ID: lvcreate_thinpool
DESC: Create a thin pool.
DESC: Create a thin pool (infers --type thin-pool).
FLAGS: SECONDARY_SYNTAX
AUTOTYPE: thin-pool
# alternate form of lvcreate --type thin-pool
lvcreate --size SizeMB --thinpool LV_new VG
OO: --stripes Number, --stripesize SizeKB,
--thin, OO_LVCREATE_THINPOOL, OO_LVCREATE_POOL, OO_LVCREATE
--thin, --type thin-pool, OO_LVCREATE_THINPOOL, OO_LVCREATE_POOL, OO_LVCREATE
OP: PV ...
IO: --mirrors 0
ID: lvcreate_thinpool
DESC: Create a thin pool named in --thinpool.
DESC: Create a thin pool named by the --thinpool arg
DESC: (infers --type thin-pool).
FLAGS: SECONDARY_SYNTAX
AUTOTYPE: thin-pool
---
@@ -1071,21 +1032,20 @@ FLAGS: SECONDARY_SYNTAX
# alternate form of lvcreate --type thin
lvcreate --virtualsize SizeMB --thinpool LV_thinpool VG
OO: --thin, OO_LVCREATE
OO: --type thin, --thin, OO_LVCREATE
IO: --mirrors 0
ID: lvcreate_thin_vol
DESC: Create a thin LV in a thin pool.
AUTOTYPE: thin
DESC: Create a thin LV in a thin pool (infers --type thin).
# alternate form of lvcreate --type thin
lvcreate --virtualsize SizeMB LV_thinpool
OO: --thin, OO_LVCREATE
OO: --type thin, --thin, OO_LVCREATE
IO: --mirrors 0
ID: lvcreate_thin_vol
DESC: Create a thin LV in the thin pool named in the first arg
DESC: (also see --thinpool for naming pool.)
DESC: (variant, infers --type thin, also see --thinpool for
DESC: naming pool.)
FLAGS: SECONDARY_SYNTAX
AUTOTYPE: thin
---
@@ -1098,20 +1058,20 @@ FLAGS: SECONDARY_SYNTAX
# alternate form of lvcreate --type thin
lvcreate --thin LV_thin
OO: OO_LVCREATE
OO: --type thin, OO_LVCREATE
IO: --mirrors 0
ID: lvcreate_thin_snapshot
DESC: Create a thin LV that is a snapshot of an existing thin LV.
DESC: Create a thin LV that is a snapshot of an existing thin LV
DESC: (infers --type thin).
FLAGS: SECONDARY_SYNTAX
AUTOTYPE: thin
# alternate form of lvcreate --type thin
lvcreate --snapshot LV_thin
OO: OO_LVCREATE
OO: --type thin, OO_LVCREATE
IO: --mirrors 0
ID: lvcreate_thin_snapshot
DESC: Create a thin LV that is a snapshot of an existing thin LV.
AUTOTYPE: thin
DESC: Create a thin LV that is a snapshot of an existing thin LV
DESC: (infers --type thin).
lvcreate --type thin --thinpool LV_thinpool LV
OO: --thin, OO_LVCREATE
@@ -1121,12 +1081,12 @@ DESC: Create a thin LV that is a snapshot of an external origin LV.
# alternate form of lvcreate --type thin --thinpool LV_thinpool LV
lvcreate --snapshot --thinpool LV_thinpool LV
OO: OO_LVCREATE
OO: --type thin, OO_LVCREATE
IO: --mirrors 0
ID: lvcreate_thin_snapshot_of_external
DESC: Create a thin LV that is a snapshot of an external origin LV.
DESC: Create a thin LV that is a snapshot of an external origin LV
DESC: (infers --type thin).
FLAGS: SECONDARY_SYNTAX
AUTOTYPE: thin
---
@@ -1140,23 +1100,21 @@ DESC: Create a LV that returns VDO when used.
lvcreate --vdo --size SizeMB VG
OO: --stripes Number, --stripesize SizeKB,
--virtualsize SizeMB, --vdopool LV_new, OO_LVCREATE_VDO, OO_LVCREATE
--type vdo, --virtualsize SizeMB, --vdopool LV_new, OO_LVCREATE_VDO, OO_LVCREATE
OP: PV ...
IO: --mirrors 0
ID: lvcreate_vdo_vol
DESC: Create a VDO LV with VDO pool.
FLAGS: SECONDARY_SYNTAX
AUTOTYPE: vdo
lvcreate --vdopool LV_new --size SizeMB VG
OO: --stripes Number, --stripesize SizeKB,
--virtualsize SizeMB, OO_LVCREATE_VDO, OO_LVCREATE
--vdo, --type vdo, --virtualsize SizeMB, OO_LVCREATE_VDO, OO_LVCREATE
OP: PV ...
IO: --mirrors 0
ID: lvcreate_vdo_vol
DESC: Create a VDO LV with VDO pool.
FLAGS: SECONDARY_SYNTAX
AUTOTYPE: vdo
---
@@ -1189,14 +1147,14 @@ FLAGS: SECONDARY_SYNTAX
# alternate form of lvcreate --type thin
lvcreate --virtualsize SizeMB --size SizeMB --thinpool LV_new VG
OO: --stripes Number, --stripesize SizeKB,
--thin, OO_LVCREATE_THINPOOL, OO_LVCREATE_POOL, OO_LVCREATE
--type thin, --thin, OO_LVCREATE_THINPOOL, OO_LVCREATE_POOL, OO_LVCREATE
OP: PV ...
IO: --mirrors 0
ID: lvcreate_thin_vol_and_thinpool
DESC: Create a thin LV, first creating a thin pool for it,
DESC: where the new thin pool is named by --thinpool.
DESC: where the new thin pool is named by the --thinpool arg
DESC: (variant, infers --type thin).
FLAGS: SECONDARY_SYNTAX
AUTOTYPE: thin
# alternate form of lvcreate --type thin
lvcreate --type thin --virtualsize SizeMB --size SizeMB LV_new|VG
@@ -1214,32 +1172,32 @@ FLAGS: SECONDARY_SYNTAX
# alternate form of lvcreate --type thin
lvcreate --thin --virtualsize SizeMB --size SizeMB LV_new|VG
OO: --stripes Number, --stripesize SizeKB,
OO_LVCREATE_THINPOOL, OO_LVCREATE_POOL, OO_LVCREATE
--type thin, OO_LVCREATE_THINPOOL, OO_LVCREATE_POOL, OO_LVCREATE
OP: PV ...
IO: --mirrors 0
ID: lvcreate_thin_vol_and_thinpool
DESC: Create a thin LV, first creating a thin pool for it,
DESC: where the new thin pool is named in the first arg,
DESC: or the new thin pool name is generated when the first
DESC: arg is a VG name.
DESC: arg is a VG name (variant, infers --type thin).
FLAGS: SECONDARY_SYNTAX
AUTOTYPE: thin
---
lvcreate --size SizeMB --virtualsize SizeMB VG
OO: --stripes Number, --stripesize SizeKB, --snapshot, --thin,
OO: --stripes Number, --stripesize SizeKB,
--type String, --snapshot, --thin,
OO_LVCREATE_THINPOOL, OO_LVCREATE_POOL, OO_LVCREATE
OP: PV ...
IO: --mirrors 0
ID: lvcreate_thin_vol_with_thinpool_or_sparse_snapshot
DESC: Create a thin LV, first creating a thin pool for it.
DESC: Create a thin LV, first creating a thin pool for it
DESC: (infers --type thin).
DESC: Create a sparse snapshot of a virtual origin LV
DESC: Chooses type thin or snapshot according to
DESC: (infers --type snapshot).
DESC: Chooses --type thin or --type snapshot according to
DESC: config setting sparse_segtype_default.
FLAGS: SECONDARY_SYNTAX
AUTOTYPE: thin
AUTOTYPE: snapshot
---
@@ -1259,13 +1217,13 @@ DESC: which converts the new LV to type cache.
# (omits the --type cache option which is inferred)
lvcreate --size SizeMB --cachepool LV_cachepool VG
OO: --stripes Number, --stripesize SizeKB,
--cache, OO_LVCREATE_CACHE, OO_LVCREATE
--cache, --type cache, OO_LVCREATE_CACHE, OO_LVCREATE
OP: PV ...
ID: lvcreate_and_attach_cachepool_v2
DESC: Create a new LV, then attach the specified cachepool
DESC: which converts the new LV to type cache.
DESC: which converts the new LV to type cache
DESC: (variant, infers --type cache.)
FLAGS: SECONDARY_SYNTAX
AUTOTYPE: cache
# alternate form of lvcreate --type cache
# (moves cachepool from option arg to position arg,
@@ -1302,7 +1260,7 @@ FLAGS: SECONDARY_SYNTAX
# the LV type is known.
lvcreate --cache --size SizeMB LV
OO: OO_LVCREATE_CACHE, OO_LVCREATE_POOL, OO_LVCREATE,
OO: --type cache, OO_LVCREATE_CACHE, OO_LVCREATE_POOL, OO_LVCREATE,
--stripes Number, --stripesize SizeKB
OP: PV ...
ID: lvcreate_new_plus_old_cachepool_or_lvconvert_old_plus_new_cachepool
@@ -1312,7 +1270,6 @@ DESC: (variant, use --type cache and --cachepool.)
DESC: When the LV arg is not a cachepool, then create a new cachepool
DESC: and attach it to the LV arg (alternative, use lvconvert.)
FLAGS: SECONDARY_SYNTAX
AUTOTYPE: cache
---
@@ -1430,7 +1387,6 @@ ID: lvmdevices_update
DESC: Update the devices file to fix incorrect values.
lvmdevices --adddev PV
OO: --deviceidtype String
ID: lvmdevices_edit
DESC: Add a device to the devices file.

View File

@@ -645,13 +645,6 @@ static int _is_desc_line(char *str)
return 0;
}
static int _is_autotype_line(char *str)
{
if (!strncmp(str, "AUTOTYPE:", 6))
return 1;
return 0;
}
static int _is_flags_line(char *str)
{
if (!strncmp(str, "FLAGS:", 6))
@@ -1216,19 +1209,6 @@ static void _add_flags(struct command *cmd, char *line)
cmd->cmd_flags |= CMD_FLAG_PREVIOUS_SYNTAX;
}
static void _add_autotype(struct cmd_context *cmdtool, struct command *cmd, char *line)
{
int line_argc;
char *line_argv[MAX_LINE_ARGC];
_split_line(line, &line_argc, line_argv, ' ');
if (cmd->autotype)
cmd->autotype2 = dm_pool_strdup(cmdtool->libmem, line_argv[1]);
else
cmd->autotype = dm_pool_strdup(cmdtool->libmem, line_argv[1]);
}
#define MAX_RULE_OPTS 64
static void _add_rule(struct cmd_context *cmdtool, struct command *cmd, char *line)
@@ -1555,11 +1535,6 @@ int define_commands(struct cmd_context *cmdtool, const char *run_name)
continue;
}
if (_is_autotype_line(line_argv[0]) && !skip && cmd) {
_add_autotype(cmdtool, cmd, line_orig);
continue;
}
if (_is_flags_line(line_argv[0]) && !skip && cmd) {
_add_flags(cmd, line_orig);
continue;
@@ -1976,14 +1951,6 @@ void print_usage(struct command *cmd, int longhelp, int desc_first)
goto op_count;
if (cmd->oo_count) {
if (cmd->autotype) {
printf("\n\t");
if (!cmd->autotype2)
printf("[ --type %s ] (implied)", cmd->autotype);
else
printf("[ --type %s|%s ] (implied)", cmd->autotype, cmd->autotype2);
}
if (include_extents) {
printf("\n\t[ -l|--extents ");
_print_val_usage(cmd, extents_ARG, opt_names[extents_ARG].val_enum);
@@ -2146,7 +2113,6 @@ void print_usage_common_lvm(struct command_name *cname, struct command *cmd)
void print_usage_common_cmd(struct command_name *cname, struct command *cmd)
{
int oo, opt_enum;
int found_common_command = 0;
/*
* when there's more than one variant, options that
@@ -2156,18 +2122,6 @@ void print_usage_common_cmd(struct command_name *cname, struct command *cmd)
if (cname->variants < 2)
return;
for (opt_enum = 0; opt_enum < ARG_COUNT; opt_enum++) {
if (!cname->common_options[opt_enum])
continue;
if (_is_lvm_all_opt(opt_enum))
continue;
found_common_command = 1;
break;
}
if (!found_common_command)
return;
printf(" Common options for command:");
/* print options with short opts */
@@ -2226,7 +2180,7 @@ void print_usage_common_cmd(struct command_name *cname, struct command *cmd)
printf(" ]");
}
printf("\n\n");
printf(".P\n");
}
void print_usage_notes(struct command_name *cname)
@@ -2535,7 +2489,7 @@ static const char *_man_long_opt_name(const char *cmdname, int opt_enum)
}
if (strchr(long_opt, '[')) {
for (i = 0; i < sizeof(long_opt_name) - 1; ++long_opt, ++i) {
for (i = 0; i < sizeof(long_opt_name); ++long_opt, ++i) {
if (i < (sizeof(long_opt_name) - 8))
switch(*long_opt) {
case '[':
@@ -2773,15 +2727,6 @@ static void _print_man_usage(char *lvmname, struct command *cmd)
printf(".RS 4\n");
printf(".ad l\n");
if (cmd->autotype) {
if (!cmd->autotype2)
printf("[ \\fB--type %s\\fP ] (implied)\n", cmd->autotype);
else
printf("[ \\fB--type %s\\fP|\\fB%s\\fP ] (implied)\n", cmd->autotype, cmd->autotype2);
printf(".br\n");
sep = 1;
}
if (include_extents) {
/*
* NB we don't just pass extents_VAL here because the
@@ -3007,7 +2952,6 @@ static void _print_man_usage_common_cmd(struct command *cmd)
{
struct command_name *cname;
int i, sep, oo, opt_enum;
int found_common_command = 0;
if (!(cname = _find_command_name(cmd->name)))
return;
@@ -3015,18 +2959,6 @@ static void _print_man_usage_common_cmd(struct command *cmd)
if (cname->variants < 2)
return;
for (opt_enum = 0; opt_enum < ARG_COUNT; opt_enum++) {
if (!cname->common_options[opt_enum])
continue;
if (_is_lvm_all_opt(opt_enum))
continue;
found_common_command = 1;
break;
}
if (!found_common_command)
return;
printf("Common options for command:\n");
printf(".\n");
@@ -3722,211 +3654,6 @@ static void _print_man_secondary(char *name)
}
}
static void _print_opt_list(const char *prefix, int *opt_list, int opt_count)
{
int i;
int opt_enum;
printf("%s ", prefix);
for (i = 0; i < opt_count; i++) {
opt_enum = opt_list[i];
printf(" %s", opt_names[opt_enum].long_opt);
}
printf("\n");
}
/* return 1 if the lists do not match, 0 if they match */
static int _compare_opt_lists(int *list1, int count1, int *list2, int count2, const char *type1_str, const char *type2_str)
{
int i, j;
if (count1 != count2)
return 1;
for (i = 0; i < count1; i++) {
for (j = 0; j < count2; j++) {
/* lists do not match if one has --type foo and the other --type bar */
if ((list1[i] == type_ARG) && (list2[j] == type_ARG) &&
type1_str && type2_str && strcmp(type1_str, type2_str)) {
return 1;
}
if (list1[i] == list2[j])
goto next;
}
return 1;
next:
;
}
return 0;
}
static int _compare_cmds(struct command *cmd1, struct command *cmd2, int *all_req_opts)
{
const char *cmd1_type_str = NULL;
const char *cmd2_type_str = NULL;
int opt_list_1[ARG_COUNT] = { 0 };
int opt_list_2[ARG_COUNT] = { 0 };
int opt_count_1 = 0;
int opt_count_2 = 0;
int i, j;
int r = 1;
/* different number of required pos items means different cmds */
if (cmd1->rp_count != cmd2->rp_count)
return 1;
/* different types of required pos items means different cmds */
for (i = 0; i < cmd1->rp_count; i++) {
if (cmd1->required_pos_args[i].def.val_bits != cmd2->required_pos_args[i].def.val_bits)
return 1;
}
/* create opt list from cmd1 */
for (i = 0; i < cmd1->ro_count; i++) {
if (!all_req_opts[cmd1->required_opt_args[i].opt])
continue;
opt_list_1[opt_count_1++] = cmd1->required_opt_args[i].opt;
if (cmd1->required_opt_args[i].opt == type_ARG)
cmd1_type_str = cmd1->required_opt_args[i].def.str;
}
/* create opt list from cmd2 */
for (i = 0; i < cmd2->ro_count; i++) {
if (!all_req_opts[cmd2->required_opt_args[i].opt])
continue;
opt_list_2[opt_count_2++] = cmd2->required_opt_args[i].opt;
if (cmd2->required_opt_args[i].opt == type_ARG)
cmd2_type_str = cmd2->required_opt_args[i].def.str;
}
/* "--type foo" and "--type bar" are different */
if (cmd1_type_str && cmd2_type_str && strcmp(cmd1_type_str, cmd2_type_str))
return 1;
/* compare opt_list_1 and opt_list_2 */
if (!_compare_opt_lists(opt_list_1, opt_count_1, opt_list_2, opt_count_2, NULL, NULL)) {
log_error("Repeated commands %s %s", cmd1->command_id, cmd2->command_id);
log_error("cmd1: %s", cmd1->desc);
log_error("cmd2: %s", cmd2->desc);
_print_opt_list("cmd1 options: ", opt_list_1, opt_count_1);
_print_opt_list("cmd2 options: ", opt_list_2, opt_count_2);
printf("\n");
r = 0;
}
/* check if cmd1 matches cmd2 + one of its oo */
for (i = 0; i < cmd2->oo_count; i++) {
/* for each cmd2 optional_opt_arg, add it to opt_list_2
and compare opt_list_1 and opt_list_2 again */
/* cmd1 "--type foo" and cmd2 OO "--type bar" are different */
if (cmd2->optional_opt_args[i].opt == type_ARG) {
if (cmd2->optional_opt_args[i].def.str && cmd1_type_str &&
strcmp(cmd2->optional_opt_args[i].def.str, cmd1_type_str))
return 1;
}
opt_list_2[opt_count_2] = cmd2->optional_opt_args[i].opt;
if (!_compare_opt_lists(opt_list_1, opt_count_1, opt_list_2, opt_count_2+1, NULL, NULL)) {
log_error("Repeated commands %s %s", cmd1->command_id, cmd2->command_id);
log_error("cmd1: %s", cmd1->desc);
log_error("cmd2: %s", cmd2->desc);
log_error("Included cmd2 OO: %s", opt_names[cmd2->optional_opt_args[i].opt].long_opt);
_print_opt_list("cmd1 options: ", opt_list_1, opt_count_1);
_print_opt_list("cmd2 options: ", opt_list_2, opt_count_2+1);
printf("\n");
r = 0;
}
}
/* check if cmd1 + an oo matches cmd2 + an oo */
if (!cmd1_type_str) {
for (i = 0; i < cmd1->oo_count; i++) {
if (cmd1->optional_opt_args[i].opt == type_ARG)
cmd1_type_str = cmd1->optional_opt_args[i].def.str;
}
}
if (!cmd2_type_str) {
for (j = 0; j < cmd2->oo_count; j++) {
if (cmd2->optional_opt_args[j].opt == type_ARG)
cmd2_type_str = cmd2->optional_opt_args[j].def.str;
}
}
for (i = 0; i < cmd1->oo_count; i++) {
for (j = 0; j < cmd2->oo_count; j++) {
if (cmd1->optional_opt_args[i].opt == cmd2->optional_opt_args[j].opt)
continue;
opt_list_1[opt_count_1] = cmd1->optional_opt_args[i].opt;
opt_list_2[opt_count_2] = cmd2->optional_opt_args[j].opt;
if (!_compare_opt_lists(opt_list_1, opt_count_1+1, opt_list_2, opt_count_2+1, cmd1_type_str, cmd2_type_str)) {
log_error("Repeated commands %s %s", cmd1->command_id, cmd2->command_id);
log_error("cmd1: %s", cmd1->desc);
log_error("cmd2: %s", cmd2->desc);
log_error("Included cmd1 OO: %s and cmd2 OO: %s",
opt_names[cmd1->optional_opt_args[i].opt].long_opt,
opt_names[cmd2->optional_opt_args[j].opt].long_opt);
_print_opt_list("cmd1 options: ", opt_list_1, opt_count_1+1);
_print_opt_list("cmd2 options: ", opt_list_2, opt_count_2+1);
printf("\n");
r = 0;
}
}
}
return r;
}
static int _check_overlap(void)
{
int all_req_opts[ARG_COUNT] = { 0 };
struct command *cmd1, *cmd2;
int i, j;
int r = 1;
for (i = 0; i < COMMAND_COUNT; i++) {
cmd1 = &commands[i];
for (j = 0; j < cmd1->ro_count; j++)
all_req_opts[cmd1->required_opt_args[j].opt] = 1;
}
for (i = 0; i < COMMAND_COUNT; i++) {
cmd1 = &commands[i];
if (cmd1->any_ro_count)
continue;
for (j = 0; j < COMMAND_COUNT; j++) {
if (i == j)
continue;
cmd2 = &commands[j];
if (cmd2->any_ro_count)
continue;
if (strcmp(cmd1->name, cmd2->name))
continue;
if (!_compare_cmds(cmd1, cmd2, all_req_opts))
r = 0;
}
}
return r;
}
#define STDOUT_BUF_SIZE (MAX_MAN_DESC + 4 * 1024)
int main(int argc, char *argv[])
@@ -3937,14 +3664,12 @@ int main(int argc, char *argv[])
char *stdout_buf;
int primary = 0;
int secondary = 0;
int check = 0;
int r = 0;
size_t sz = STDOUT_BUF_SIZE;
static struct option long_options[] = {
{"primary", no_argument, 0, 'p' },
{"secondary", no_argument, 0, 's' },
{"check", no_argument, 0, 'c' },
{0, 0, 0, 0 }
};
@@ -3959,7 +3684,7 @@ int main(int argc, char *argv[])
int c;
int option_index = 0;
c = getopt_long(argc, argv, "psc", long_options, &option_index);
c = getopt_long(argc, argv, "ps", long_options, &option_index);
if (c == -1)
break;
@@ -3972,14 +3697,11 @@ int main(int argc, char *argv[])
case 's':
secondary = 1;
break;
case 'c':
check = 1;
break;
}
}
if (!primary && !secondary && !check) {
log_error("Usage: %s --primary|--secondary|--check <command> [/path/to/description-file].", argv[0]);
if (!primary && !secondary) {
log_error("Usage: %s --primary|--secondary <command> [/path/to/description-file].", argv[0]);
goto out_free;
}
@@ -3988,7 +3710,7 @@ int main(int argc, char *argv[])
log_error("Out of memory.");
goto out_free;
}
} else if (!check) {
} else {
log_error("Missing command name.");
goto out_free;
}
@@ -3998,8 +3720,7 @@ int main(int argc, char *argv[])
define_commands(&cmdtool, NULL);
if (!check)
configure_command_option_values(cmdname);
configure_command_option_values(cmdname);
factor_common_options();
@@ -4008,8 +3729,6 @@ int main(int argc, char *argv[])
else if (secondary) {
r = 1;
_print_man_secondary(cmdname);
} else if (check) {
r = _check_overlap();
}
out_free:

Some files were not shown because too many files have changed in this diff Show More