1
0
mirror of git://sourceware.org/git/lvm2.git synced 2024-12-21 13:34:40 +03:00

Add lvmlockd

This commit is contained in:
David Teigland 2015-03-05 14:00:44 -06:00
parent a32d5a4afc
commit fe70b03de2
94 changed files with 15680 additions and 441 deletions

246
configure vendored
View File

@ -636,6 +636,7 @@ kerneldir
interface
CMIRRORD_PIDFILE
CLVMD_PIDFILE
LVMLOCKD_PIDFILE
LVMPOLLD_PIDFILE
LVMETAD_PIDFILE
DMEVENTD_PIDFILE
@ -693,6 +694,7 @@ DMEVENTD_PATH
DMEVENTD
DL_LIBS
DEVMAPPER
DEFAULT_USE_LVMLOCKD
DEFAULT_USE_LVMPOLLD
DEFAULT_USE_LVMETAD
DEFAULT_USE_BLKID_WIPING
@ -722,6 +724,7 @@ CLDWHOLEARCHIVE
CLDNOWHOLEARCHIVE
CLDFLAGS
CACHE
BUILD_LVMLOCKD
BUILD_LVMPOLLD
BUILD_LVMETAD
BUILD_DMEVENTD
@ -740,6 +743,10 @@ SYSTEMD_LIBS
SYSTEMD_CFLAGS
BLKID_LIBS
BLKID_CFLAGS
LOCKD_DLM_LIBS
LOCKD_DLM_CFLAGS
LOCKD_SANLOCK_LIBS
LOCKD_SANLOCK_CFLAGS
VALGRIND_LIBS
VALGRIND_CFLAGS
CUNIT_LIBS
@ -916,6 +923,9 @@ with_lvmetad_pidfile
enable_lvmpolld
enable_use_lvmpolld
with_lvmpolld_pidfile
enable_lvmlockd
enable_use_lvmlockd
with_lvmlockd_pidfile
enable_blkid_wiping
enable_udev_systemd_background_jobs
enable_udev_sync
@ -994,6 +1004,10 @@ CUNIT_CFLAGS
CUNIT_LIBS
VALGRIND_CFLAGS
VALGRIND_LIBS
LOCKD_SANLOCK_CFLAGS
LOCKD_SANLOCK_LIBS
LOCKD_DLM_CFLAGS
LOCKD_DLM_LIBS
BLKID_CFLAGS
BLKID_LIBS
SYSTEMD_CFLAGS
@ -1632,6 +1646,8 @@ Optional Features:
--disable-use-lvmetad disable usage of LVM Metadata Daemon
--enable-lvmpolld enable the LVM Polling Daemon
--disable-use-lvmpolld disable usage of LVM Poll Daemon
--enable-lvmlockd enable the LVM lock daemon
--disable-use-lvmlockd disable usage of LVM lock daemon
--disable-blkid_wiping disable libblkid detection of signatures when wiping
and use native code instead
--disable-udev-systemd-background-jobs
@ -1725,6 +1741,8 @@ Optional Packages:
lvmetad pidfile [PID_DIR/lvmetad.pid]
--with-lvmpolld-pidfile=PATH
lvmpolld pidfile [PID_DIR/lvmpolld.pid]
--with-lvmlockd-pidfile=PATH
lvmlockd pidfile [PID_DIR/lvmlockd.pid]
--with-localedir=DIR locale-dependent data [DATAROOTDIR/locale]
--with-confdir=DIR configuration files in DIR [/etc]
--with-staticdir=DIR static binaries in DIR [EPREFIX/sbin]
@ -1809,6 +1827,14 @@ Some influential environment variables:
C compiler flags for VALGRIND, overriding pkg-config
VALGRIND_LIBS
linker flags for VALGRIND, overriding pkg-config
LOCKD_SANLOCK_CFLAGS
C compiler flags for LOCKD_SANLOCK, overriding pkg-config
LOCKD_SANLOCK_LIBS
linker flags for LOCKD_SANLOCK, overriding pkg-config
LOCKD_DLM_CFLAGS
C compiler flags for LOCKD_DLM, overriding pkg-config
LOCKD_DLM_LIBS
linker flags for LOCKD_DLM, overriding pkg-config
BLKID_CFLAGS
C compiler flags for BLKID, overriding pkg-config
BLKID_LIBS linker flags for BLKID, overriding pkg-config
@ -3042,6 +3068,7 @@ case "$host_os" in
DEVMAPPER=yes
LVMETAD=no
LVMPOLLD=no
LVMLOCKD=no
ODIRECT=yes
DM_IOCTLS=yes
SELINUX=yes
@ -10966,6 +10993,207 @@ _ACEOF
################################################################################
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build lvmlockd" >&5
$as_echo_n "checking whether to build lvmlockd... " >&6; }
# Check whether --enable-lvmlockd was given.
if test "${enable_lvmlockd+set}" = set; then :
enableval=$enable_lvmlockd; LVMLOCKD=$enableval
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $LVMLOCKD" >&5
$as_echo "$LVMLOCKD" >&6; }
BUILD_LVMLOCKD=$LVMLOCKD
if test "$BUILD_LVMLOCKD" = yes; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking defaults for use_lvmlockd" >&5
$as_echo_n "checking defaults for use_lvmlockd... " >&6; }
# Check whether --enable-use_lvmlockd was given.
if test "${enable_use_lvmlockd+set}" = set; then :
enableval=$enable_use_lvmlockd; case ${enableval} in
yes) DEFAULT_USE_LVMLOCKD=1 ;;
*) DEFAULT_USE_LVMLOCKD=0 ;;
esac
else
DEFAULT_USE_LVMLOCKD=1
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $DEFAULT_USE_LVMLOCKD" >&5
$as_echo "$DEFAULT_USE_LVMLOCKD" >&6; }
$as_echo "#define LVMLOCKD_SUPPORT 1" >>confdefs.h
# Check whether --with-lvmlockd-pidfile was given.
if test "${with_lvmlockd_pidfile+set}" = set; then :
withval=$with_lvmlockd_pidfile; LVMLOCKD_PIDFILE=$withval
else
LVMLOCKD_PIDFILE="$DEFAULT_PID_DIR/lvmlockd.pid"
fi
cat >>confdefs.h <<_ACEOF
#define LVMLOCKD_PIDFILE "$LVMLOCKD_PIDFILE"
_ACEOF
else
DEFAULT_USE_LVMLOCKD=0
fi
cat >>confdefs.h <<_ACEOF
#define DEFAULT_USE_LVMLOCKD $DEFAULT_USE_LVMLOCKD
_ACEOF
################################################################################
if test "$BUILD_LVMLOCKD" = yes; then
pkg_failed=no
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for LOCKD_SANLOCK" >&5
$as_echo_n "checking for LOCKD_SANLOCK... " >&6; }
if test -n "$LOCKD_SANLOCK_CFLAGS"; then
pkg_cv_LOCKD_SANLOCK_CFLAGS="$LOCKD_SANLOCK_CFLAGS"
elif test -n "$PKG_CONFIG"; then
if test -n "$PKG_CONFIG" && \
{ { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libsanlock_client\""; } >&5
($PKG_CONFIG --exists --print-errors "libsanlock_client") 2>&5
ac_status=$?
$as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
test $ac_status = 0; }; then
pkg_cv_LOCKD_SANLOCK_CFLAGS=`$PKG_CONFIG --cflags "libsanlock_client" 2>/dev/null`
test "x$?" != "x0" && pkg_failed=yes
else
pkg_failed=yes
fi
else
pkg_failed=untried
fi
if test -n "$LOCKD_SANLOCK_LIBS"; then
pkg_cv_LOCKD_SANLOCK_LIBS="$LOCKD_SANLOCK_LIBS"
elif test -n "$PKG_CONFIG"; then
if test -n "$PKG_CONFIG" && \
{ { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libsanlock_client\""; } >&5
($PKG_CONFIG --exists --print-errors "libsanlock_client") 2>&5
ac_status=$?
$as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
test $ac_status = 0; }; then
pkg_cv_LOCKD_SANLOCK_LIBS=`$PKG_CONFIG --libs "libsanlock_client" 2>/dev/null`
test "x$?" != "x0" && pkg_failed=yes
else
pkg_failed=yes
fi
else
pkg_failed=untried
fi
if test $pkg_failed = yes; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
_pkg_short_errors_supported=yes
else
_pkg_short_errors_supported=no
fi
if test $_pkg_short_errors_supported = yes; then
LOCKD_SANLOCK_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "libsanlock_client" 2>&1`
else
LOCKD_SANLOCK_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "libsanlock_client" 2>&1`
fi
# Put the nasty error message in config.log where it belongs
echo "$LOCKD_SANLOCK_PKG_ERRORS" >&5
$bailout
elif test $pkg_failed = untried; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
$bailout
else
LOCKD_SANLOCK_CFLAGS=$pkg_cv_LOCKD_SANLOCK_CFLAGS
LOCKD_SANLOCK_LIBS=$pkg_cv_LOCKD_SANLOCK_LIBS
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
HAVE_LOCKD_SANLOCK=yes
fi
pkg_failed=no
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for LOCKD_DLM" >&5
$as_echo_n "checking for LOCKD_DLM... " >&6; }
if test -n "$LOCKD_DLM_CFLAGS"; then
pkg_cv_LOCKD_DLM_CFLAGS="$LOCKD_DLM_CFLAGS"
elif test -n "$PKG_CONFIG"; then
if test -n "$PKG_CONFIG" && \
{ { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libdlm\""; } >&5
($PKG_CONFIG --exists --print-errors "libdlm") 2>&5
ac_status=$?
$as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
test $ac_status = 0; }; then
pkg_cv_LOCKD_DLM_CFLAGS=`$PKG_CONFIG --cflags "libdlm" 2>/dev/null`
test "x$?" != "x0" && pkg_failed=yes
else
pkg_failed=yes
fi
else
pkg_failed=untried
fi
if test -n "$LOCKD_DLM_LIBS"; then
pkg_cv_LOCKD_DLM_LIBS="$LOCKD_DLM_LIBS"
elif test -n "$PKG_CONFIG"; then
if test -n "$PKG_CONFIG" && \
{ { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libdlm\""; } >&5
($PKG_CONFIG --exists --print-errors "libdlm") 2>&5
ac_status=$?
$as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
test $ac_status = 0; }; then
pkg_cv_LOCKD_DLM_LIBS=`$PKG_CONFIG --libs "libdlm" 2>/dev/null`
test "x$?" != "x0" && pkg_failed=yes
else
pkg_failed=yes
fi
else
pkg_failed=untried
fi
if test $pkg_failed = yes; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
_pkg_short_errors_supported=yes
else
_pkg_short_errors_supported=no
fi
if test $_pkg_short_errors_supported = yes; then
LOCKD_DLM_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "libdlm" 2>&1`
else
LOCKD_DLM_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "libdlm" 2>&1`
fi
# Put the nasty error message in config.log where it belongs
echo "$LOCKD_DLM_PKG_ERRORS" >&5
$bailout
elif test $pkg_failed = untried; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
$bailout
else
LOCKD_DLM_CFLAGS=$pkg_cv_LOCKD_DLM_CFLAGS
LOCKD_DLM_LIBS=$pkg_cv_LOCKD_DLM_LIBS
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
HAVE_LOCKD_DLM=yes
fi
fi
################################################################################
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable libblkid detection of signatures when wiping" >&5
$as_echo_n "checking whether to enable libblkid detection of signatures when wiping... " >&6; }
# Check whether --enable-blkid_wiping was given.
@ -13282,12 +13510,15 @@ LVM_LIBAPI=`echo "$VER" | $AWK -F '[()]' '{print $2}'`
################################################################################
ac_config_files="$ac_config_files Makefile make.tmpl daemons/Makefile daemons/clvmd/Makefile daemons/cmirrord/Makefile daemons/dmeventd/Makefile daemons/dmeventd/libdevmapper-event.pc daemons/dmeventd/plugins/Makefile daemons/dmeventd/plugins/lvm2/Makefile daemons/dmeventd/plugins/raid/Makefile daemons/dmeventd/plugins/mirror/Makefile daemons/dmeventd/plugins/snapshot/Makefile daemons/dmeventd/plugins/thin/Makefile daemons/lvmetad/Makefile daemons/lvmpolld/Makefile conf/Makefile conf/example.conf conf/lvmlocal.conf conf/command_profile_template.profile conf/metadata_profile_template.profile include/.symlinks include/Makefile lib/Makefile lib/format1/Makefile lib/format_pool/Makefile lib/locking/Makefile lib/mirror/Makefile lib/replicator/Makefile lib/misc/lvm-version.h lib/raid/Makefile lib/snapshot/Makefile lib/thin/Makefile lib/cache_segtype/Makefile libdaemon/Makefile libdaemon/client/Makefile libdaemon/server/Makefile libdm/Makefile libdm/libdevmapper.pc liblvm/Makefile liblvm/liblvm2app.pc man/Makefile po/Makefile python/Makefile python/setup.py scripts/blkdeactivate.sh scripts/blk_availability_init_red_hat scripts/blk_availability_systemd_red_hat.service scripts/clvmd_init_red_hat scripts/cmirrord_init_red_hat scripts/dm_event_systemd_red_hat.service scripts/dm_event_systemd_red_hat.socket scripts/lvm2_cluster_activation_red_hat.sh scripts/lvm2_cluster_activation_systemd_red_hat.service scripts/lvm2_clvmd_systemd_red_hat.service scripts/lvm2_cmirrord_systemd_red_hat.service scripts/lvm2_lvmetad_init_red_hat scripts/lvm2_lvmetad_systemd_red_hat.service scripts/lvm2_lvmetad_systemd_red_hat.socket scripts/lvm2_lvmpolld_init_red_hat scripts/lvm2_lvmpolld_systemd_red_hat.service scripts/lvm2_lvmpolld_systemd_red_hat.socket scripts/lvm2_monitoring_init_red_hat scripts/lvm2_monitoring_systemd_red_hat.service scripts/lvm2_pvscan_systemd_red_hat@.service scripts/lvm2_tmpfiles_red_hat.conf scripts/Makefile test/Makefile test/api/Makefile test/unit/Makefile tools/Makefile udev/Makefile unit-tests/datastruct/Makefile unit-tests/regex/Makefile unit-tests/mm/Makefile"
ac_config_files="$ac_config_files Makefile make.tmpl daemons/Makefile daemons/clvmd/Makefile daemons/cmirrord/Makefile daemons/dmeventd/Makefile daemons/dmeventd/libdevmapper-event.pc daemons/dmeventd/plugins/Makefile daemons/dmeventd/plugins/lvm2/Makefile daemons/dmeventd/plugins/raid/Makefile daemons/dmeventd/plugins/mirror/Makefile daemons/dmeventd/plugins/snapshot/Makefile daemons/dmeventd/plugins/thin/Makefile daemons/lvmetad/Makefile daemons/lvmpolld/Makefile daemons/lvmlockd/Makefile conf/Makefile conf/example.conf conf/lvmlocal.conf conf/command_profile_template.profile conf/metadata_profile_template.profile include/.symlinks include/Makefile lib/Makefile lib/format1/Makefile lib/format_pool/Makefile lib/locking/Makefile lib/mirror/Makefile lib/replicator/Makefile lib/misc/lvm-version.h lib/raid/Makefile lib/snapshot/Makefile lib/thin/Makefile lib/cache_segtype/Makefile libdaemon/Makefile libdaemon/client/Makefile libdaemon/server/Makefile libdm/Makefile libdm/libdevmapper.pc liblvm/Makefile liblvm/liblvm2app.pc man/Makefile po/Makefile python/Makefile python/setup.py scripts/blkdeactivate.sh scripts/blk_availability_init_red_hat scripts/blk_availability_systemd_red_hat.service scripts/clvmd_init_red_hat scripts/cmirrord_init_red_hat scripts/dm_event_systemd_red_hat.service scripts/dm_event_systemd_red_hat.socket scripts/lvm2_cluster_activation_red_hat.sh scripts/lvm2_cluster_activation_systemd_red_hat.service scripts/lvm2_clvmd_systemd_red_hat.service scripts/lvm2_cmirrord_systemd_red_hat.service scripts/lvm2_lvmetad_init_red_hat scripts/lvm2_lvmetad_systemd_red_hat.service scripts/lvm2_lvmetad_systemd_red_hat.socket scripts/lvm2_lvmpolld_init_red_hat scripts/lvm2_lvmpolld_systemd_red_hat.service scripts/lvm2_lvmpolld_systemd_red_hat.socket scripts/lvm2_lvmlockd_systemd_red_hat.service scripts/lvm2_lvmlocking_systemd_red_hat.service scripts/lvm2_monitoring_init_red_hat scripts/lvm2_monitoring_systemd_red_hat.service scripts/lvm2_pvscan_systemd_red_hat@.service scripts/lvm2_tmpfiles_red_hat.conf scripts/Makefile test/Makefile test/api/Makefile test/unit/Makefile tools/Makefile udev/Makefile unit-tests/datastruct/Makefile unit-tests/regex/Makefile unit-tests/mm/Makefile"
cat >confcache <<\_ACEOF
# This file is a shell script that caches the results of configure
@ -13997,6 +14228,7 @@ do
"daemons/dmeventd/plugins/thin/Makefile") CONFIG_FILES="$CONFIG_FILES daemons/dmeventd/plugins/thin/Makefile" ;;
"daemons/lvmetad/Makefile") CONFIG_FILES="$CONFIG_FILES daemons/lvmetad/Makefile" ;;
"daemons/lvmpolld/Makefile") CONFIG_FILES="$CONFIG_FILES daemons/lvmpolld/Makefile" ;;
"daemons/lvmlockd/Makefile") CONFIG_FILES="$CONFIG_FILES daemons/lvmlockd/Makefile" ;;
"conf/Makefile") CONFIG_FILES="$CONFIG_FILES conf/Makefile" ;;
"conf/example.conf") CONFIG_FILES="$CONFIG_FILES conf/example.conf" ;;
"conf/lvmlocal.conf") CONFIG_FILES="$CONFIG_FILES conf/lvmlocal.conf" ;;
@ -14043,6 +14275,8 @@ do
"scripts/lvm2_lvmpolld_init_red_hat") CONFIG_FILES="$CONFIG_FILES scripts/lvm2_lvmpolld_init_red_hat" ;;
"scripts/lvm2_lvmpolld_systemd_red_hat.service") CONFIG_FILES="$CONFIG_FILES scripts/lvm2_lvmpolld_systemd_red_hat.service" ;;
"scripts/lvm2_lvmpolld_systemd_red_hat.socket") CONFIG_FILES="$CONFIG_FILES scripts/lvm2_lvmpolld_systemd_red_hat.socket" ;;
"scripts/lvm2_lvmlockd_systemd_red_hat.service") CONFIG_FILES="$CONFIG_FILES scripts/lvm2_lvmlockd_systemd_red_hat.service" ;;
"scripts/lvm2_lvmlocking_systemd_red_hat.service") CONFIG_FILES="$CONFIG_FILES scripts/lvm2_lvmlocking_systemd_red_hat.service" ;;
"scripts/lvm2_monitoring_init_red_hat") CONFIG_FILES="$CONFIG_FILES scripts/lvm2_monitoring_init_red_hat" ;;
"scripts/lvm2_monitoring_systemd_red_hat.service") CONFIG_FILES="$CONFIG_FILES scripts/lvm2_monitoring_systemd_red_hat.service" ;;
"scripts/lvm2_pvscan_systemd_red_hat@.service") CONFIG_FILES="$CONFIG_FILES scripts/lvm2_pvscan_systemd_red_hat@.service" ;;
@ -14668,3 +14902,13 @@ if test "$ODIRECT" != yes; then :
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: O_DIRECT disabled: low-memory pvmove may lock up" >&5
$as_echo "$as_me: WARNING: O_DIRECT disabled: low-memory pvmove may lock up" >&2;}
fi
if test "$BUILD_LVMLOCKD" == yes && test "$BUILD_LVMPOLLD" == no; then :
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: lvmlockd requires lvmpolld" >&5
$as_echo "$as_me: WARNING: lvmlockd requires lvmpolld" >&2;}
fi
if test "$BUILD_LVMLOCKD" == yes && test "$BUILD_LVMETAD" == no; then :
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: lvmlockd requires lvmetad" >&5
$as_echo "$as_me: WARNING: lvmlockd requires lvmetad" >&2;}
fi

View File

@ -39,6 +39,7 @@ case "$host_os" in
DEVMAPPER=yes
LVMETAD=no
LVMPOLLD=no
LVMLOCKD=no
ODIRECT=yes
DM_IOCTLS=yes
SELINUX=yes
@ -1138,6 +1139,50 @@ AC_DEFINE_UNQUOTED(DEFAULT_USE_LVMPOLLD, [$DEFAULT_USE_LVMPOLLD],
[Use lvmpolld by default.])
################################################################################
dnl -- Build lvmlockd
AC_MSG_CHECKING(whether to build lvmlockd)
AC_ARG_ENABLE(lvmlockd,
AC_HELP_STRING([--enable-lvmlockd],
[enable the LVM lock daemon]),
LVMLOCKD=$enableval)
AC_MSG_RESULT($LVMLOCKD)
BUILD_LVMLOCKD=$LVMLOCKD
if test "$BUILD_LVMLOCKD" = yes; then
AC_MSG_CHECKING([defaults for use_lvmlockd])
AC_ARG_ENABLE(use_lvmlockd,
AC_HELP_STRING([--disable-use-lvmlockd],
[disable usage of LVM lock daemon]),
[case ${enableval} in
yes) DEFAULT_USE_LVMLOCKD=1 ;;
*) DEFAULT_USE_LVMLOCKD=0 ;;
esac], DEFAULT_USE_LVMLOCKD=1)
AC_MSG_RESULT($DEFAULT_USE_LVMLOCKD)
AC_DEFINE([LVMLOCKD_SUPPORT], 1, [Define to 1 to include code that uses lvmlockd.])
AC_ARG_WITH(lvmlockd-pidfile,
AC_HELP_STRING([--with-lvmlockd-pidfile=PATH],
[lvmlockd pidfile [PID_DIR/lvmlockd.pid]]),
LVMLOCKD_PIDFILE=$withval,
LVMLOCKD_PIDFILE="$DEFAULT_PID_DIR/lvmlockd.pid")
AC_DEFINE_UNQUOTED(LVMLOCKD_PIDFILE, ["$LVMLOCKD_PIDFILE"],
[Path to lvmlockd pidfile.])
else
DEFAULT_USE_LVMLOCKD=0
fi
AC_DEFINE_UNQUOTED(DEFAULT_USE_LVMLOCKD, [$DEFAULT_USE_LVMLOCKD],
[Use lvmlockd by default.])
################################################################################
dnl -- Look for sanlock and dlm libraries
if test "$BUILD_LVMLOCKD" = yes; then
PKG_CHECK_MODULES(LOCKD_SANLOCK, libsanlock_client, [HAVE_LOCKD_SANLOCK=yes], $bailout)
PKG_CHECK_MODULES(LOCKD_DLM, libdlm, [HAVE_LOCKD_DLM=yes], $bailout)
fi
################################################################################
dnl -- Enable blkid wiping functionality
AC_MSG_CHECKING(whether to enable libblkid detection of signatures when wiping)
AC_ARG_ENABLE(blkid_wiping,
@ -1758,6 +1803,7 @@ AC_SUBST(BUILD_CMIRRORD)
AC_SUBST(BUILD_DMEVENTD)
AC_SUBST(BUILD_LVMETAD)
AC_SUBST(BUILD_LVMPOLLD)
AC_SUBST(BUILD_LVMLOCKD)
AC_SUBST(CACHE)
AC_SUBST(CFLAGS)
AC_SUBST(CFLOW_CMD)
@ -1798,6 +1844,7 @@ AC_SUBST(DEFAULT_SYS_DIR)
AC_SUBST(DEFAULT_USE_BLKID_WIPING)
AC_SUBST(DEFAULT_USE_LVMETAD)
AC_SUBST(DEFAULT_USE_LVMPOLLD)
AC_SUBST(DEFAULT_USE_LVMLOCKD)
AC_SUBST(DEVMAPPER)
AC_SUBST(DLM_CFLAGS)
AC_SUBST(DLM_LIBS)
@ -1875,6 +1922,7 @@ AC_SUBST(WRITE_INSTALL)
AC_SUBST(DMEVENTD_PIDFILE)
AC_SUBST(LVMETAD_PIDFILE)
AC_SUBST(LVMPOLLD_PIDFILE)
AC_SUBST(LVMLOCKD_PIDFILE)
AC_SUBST(CLVMD_PIDFILE)
AC_SUBST(CMIRRORD_PIDFILE)
AC_SUBST(interface)
@ -1909,6 +1957,7 @@ daemons/dmeventd/plugins/snapshot/Makefile
daemons/dmeventd/plugins/thin/Makefile
daemons/lvmetad/Makefile
daemons/lvmpolld/Makefile
daemons/lvmlockd/Makefile
conf/Makefile
conf/example.conf
conf/lvmlocal.conf
@ -1955,6 +2004,8 @@ scripts/lvm2_lvmetad_systemd_red_hat.socket
scripts/lvm2_lvmpolld_init_red_hat
scripts/lvm2_lvmpolld_systemd_red_hat.service
scripts/lvm2_lvmpolld_systemd_red_hat.socket
scripts/lvm2_lvmlockd_systemd_red_hat.service
scripts/lvm2_lvmlocking_systemd_red_hat.service
scripts/lvm2_monitoring_init_red_hat
scripts/lvm2_monitoring_systemd_red_hat.service
scripts/lvm2_pvscan_systemd_red_hat@.service
@ -1982,3 +2033,9 @@ AS_IF([test -n "$CACHE_CONFIGURE_WARN"],
AS_IF([test "$ODIRECT" != yes],
[AC_MSG_WARN([O_DIRECT disabled: low-memory pvmove may lock up])])
AS_IF([test "$BUILD_LVMLOCKD" == yes && test "$BUILD_LVMPOLLD" == no],
[AC_MSG_WARN([lvmlockd requires lvmpolld])])
AS_IF([test "$BUILD_LVMLOCKD" == yes && test "$BUILD_LVMETAD" == no],
[AC_MSG_WARN([lvmlockd requires lvmetad])])

View File

@ -15,7 +15,7 @@ srcdir = @srcdir@
top_srcdir = @top_srcdir@
top_builddir = @top_builddir@
.PHONY: dmeventd clvmd cmirrord lvmetad lvmpolld
.PHONY: dmeventd clvmd cmirrord lvmetad lvmpolld lvmlockd
ifneq ("@CLVMD@", "none")
SUBDIRS += clvmd
@ -40,8 +40,12 @@ ifeq ("@BUILD_LVMPOLLD@", "yes")
SUBDIRS += lvmpolld
endif
ifeq ("@BUILD_LVMLOCKD@", "yes")
SUBDIRS += lvmlockd
endif
ifeq ($(MAKECMDGOALS),distclean)
SUBDIRS = clvmd cmirrord dmeventd lvmetad lvmpolld
SUBDIRS = clvmd cmirrord dmeventd lvmetad lvmpolld lvmlockd
endif
include $(top_builddir)/make.tmpl

View File

@ -0,0 +1,53 @@
#
# Copyright (C) 2014-2015 Red Hat, Inc.
#
# This file is part of LVM2.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU Lesser General Public License v.2.1.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
srcdir = @srcdir@
top_srcdir = @top_srcdir@
top_builddir = @top_builddir@
SOURCES = \
lvmlockd-core.c \
lvmlockd-sanlock.c \
lvmlockd-dlm.c
TARGETS = lvmlockd lvmlockctl
.PHONY: install_lvmlockd
include $(top_builddir)/make.tmpl
INCLUDES += -I$(top_srcdir)/libdaemon/server
LVMLIBS = -ldaemonserver $(LVMINTERNAL_LIBS) -ldevmapper
LIBS += $(PTHREAD_LIBS) -ldlm_lt -lsanlock_client -lrt
LDFLAGS += -L$(top_builddir)/libdaemon/server
CLDFLAGS += -L$(top_builddir)/libdaemon/server
lvmlockd: $(OBJECTS) $(top_builddir)/libdaemon/client/libdaemonclient.a \
$(top_builddir)/libdaemon/server/libdaemonserver.a
$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(OBJECTS) $(LVMLIBS) $(LIBS)
lvmlockctl: lvmlockctl.o $(top_builddir)/libdaemon/client/libdaemonclient.a \
$(top_builddir)/libdaemon/server/libdaemonserver.a
$(CC) $(CFLAGS) $(LDFLAGS) -o $@ lvmlockctl.o $(LVMLIBS)
install_lvmlockd: lvmlockd
$(INSTALL_PROGRAM) -D $< $(sbindir)/$(<F)
install_lvmlockctl: lvmlockctl
$(INSTALL_PROGRAM) -D $< $(sbindir)/$(<F)
install_lvm2: install_lvmlockd install_lvmlockctl
install: install_lvm2

View File

@ -0,0 +1,635 @@
#define _GNU_SOURCE
#include "configure.h"
#include "lvmlockd-client.h"
#include <stdio.h>
#include <stdint.h>
#include <stddef.h>
#include <stdlib.h>
#include <unistd.h>
#include <getopt.h>
#include <string.h>
#include <signal.h>
#include <errno.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/socket.h>
#include <sys/un.h>
static int quit;
static int info;
static int dump;
static int wait_opt;
static int force_opt;
static int gl_enable;
static int gl_disable;
static int stop_lockspaces;
static char *able_vg_name;
#define DUMP_SOCKET_NAME "lvmlockd-dump.sock"
#define DUMP_BUF_SIZE (1024 * 1024)
static char dump_buf[DUMP_BUF_SIZE];
static int dump_len;
static struct sockaddr_un dump_addr;
static socklen_t dump_addrlen;
daemon_handle _lvmlockd;
#define log_debug(fmt, args...) \
do { \
printf(fmt "\n", ##args); \
} while (0)
#define log_error(fmt, args...) \
do { \
printf(fmt "\n", ##args); \
} while (0)
#define MAX_LINE 512
/* copied from lvmlockd-internal.h */
#define MAX_NAME 64
#define MAX_ARGS 64
/*
* lvmlockd dumps the client info before the lockspaces,
* so we can look up client info when printing lockspace info.
*/
#define MAX_CLIENTS 100
struct client_info {
uint32_t client_id;
int pid;
char name[MAX_NAME+1];
};
static struct client_info clients[MAX_CLIENTS];
static int num_clients;
static void save_client_info(char *line)
{
uint32_t pid = 0;
int fd = 0;
int pi = 0;
uint32_t client_id = 0;
char name[MAX_NAME+1] = { 0 };
sscanf(line, "info=client pid=%u fd=%d pi=%d id=%u name=%s",
&pid, &fd, &pi, &client_id, name);
clients[num_clients].client_id = client_id;
clients[num_clients].pid = pid;
strcpy(clients[num_clients].name, name);
num_clients++;
}
static void find_client_info(uint32_t client_id, uint32_t *pid, char *cl_name)
{
int i;
for (i = 0; i < num_clients; i++) {
if (clients[i].client_id == client_id) {
*pid = clients[i].pid;
strcpy(cl_name, clients[i].name);
return;
}
}
}
static void format_info_ls(char *line)
{
char ls_name[MAX_NAME+1] = { 0 };
char vg_name[MAX_NAME+1] = { 0 };
char vg_uuid[MAX_NAME+1] = { 0 };
char vg_sysid[MAX_NAME+1] = { 0 };
char lock_args[MAX_ARGS+1] = { 0 };
char lock_type[MAX_NAME+1] = { 0 };
sscanf(line, "info=ls ls_name=%s vg_name=%s vg_uuid=%s vg_sysid=%s vg_args=%s lm_type=%s",
ls_name, vg_name, vg_uuid, vg_sysid, lock_args, lock_type);
printf("\n");
printf("VG %s lock_type=%s %s\n", vg_name, lock_type, vg_uuid);
printf("LS %s %s\n", lock_type, ls_name);
}
static void format_info_ls_action(char *line)
{
uint32_t client_id = 0;
char flags[MAX_NAME+1] = { 0 };
char version[MAX_NAME+1] = { 0 };
char op[MAX_NAME+1] = { 0 };
uint32_t pid = 0;
char cl_name[MAX_NAME+1] = { 0 };
sscanf(line, "info=ls_action client_id=%u %s %s op=%s",
&client_id, flags, version, op);
find_client_info(client_id, &pid, cl_name);
printf("OP %s pid %u (%s)", op, pid, cl_name);
}
static void format_info_r(char *line, char *r_name_out, char *r_type_out)
{
char r_name[MAX_NAME+1] = { 0 };
char r_type[4] = { 0 };
char mode[4] = { 0 };
char sh_count[MAX_NAME+1] = { 0 };
uint32_t ver = 0;
sscanf(line, "info=r name=%s type=%s mode=%s %s version=%u",
r_name, r_type, mode, sh_count, &ver);
/* when mode is not un, wait and print each lk line */
if (strcmp(mode, "un")) {
strcpy(r_name_out, r_name);
strcpy(r_type_out, r_type);
return;
}
/* when mode is un, there will be no lk lines, so print now */
if (!strcmp(r_type, "gl")) {
printf("LK GL un ver %4u\n", ver);
} else if (!strcmp(r_type, "vg")) {
printf("LK VG un ver %4u\n", ver);
} else if (!strcmp(r_type, "lv")) {
printf("LK LV un %s\n", r_name);
}
}
static void format_info_lk(char *line, char *r_name, char *r_type)
{
char mode[4] = { 0 };
uint32_t ver = 0;
char flags[MAX_NAME+1] = { 0 };
uint32_t client_id = 0;
uint32_t pid = 0;
char cl_name[MAX_NAME+1] = { 0 };
if (!r_name[0] || !r_type[0]) {
printf("format_info_lk error r_name %s r_type %s\n", r_name, r_type);
printf("%s\n", line);
return;
}
sscanf(line, "info=lk mode=%s version=%u %s client_id=%u",
mode, &ver, flags, &client_id);
find_client_info(client_id, &pid, cl_name);
if (!strcmp(r_type, "gl")) {
printf("LK GL %s ver %4u pid %u (%s)\n", mode, ver, pid, cl_name);
} else if (!strcmp(r_type, "vg")) {
printf("LK VG %s ver %4u pid %u (%s)\n", mode, ver, pid, cl_name);
} else if (!strcmp(r_type, "lv")) {
printf("LK LV %s %s\n", mode, r_name);
}
}
static void format_info_r_action(char *line, char *r_name, char *r_type)
{
uint32_t client_id = 0;
char flags[MAX_NAME+1] = { 0 };
char version[MAX_NAME+1] = { 0 };
char op[MAX_NAME+1] = { 0 };
char rt[4] = { 0 };
char mode[4] = { 0 };
char lm[MAX_NAME+1] = { 0 };
char result[MAX_NAME+1] = { 0 };
char lm_rv[MAX_NAME+1] = { 0 };
uint32_t pid = 0;
char cl_name[MAX_NAME+1] = { 0 };
if (!r_name[0] || !r_type[0]) {
printf("format_info_r_action error r_name %s r_type %s\n", r_name, r_type);
printf("%s\n", line);
return;
}
sscanf(line, "info=r_action client_id=%u %s %s op=%s rt=%s mode=%s %s %s %s",
&client_id, flags, version, op, rt, mode, lm, result, lm_rv);
find_client_info(client_id, &pid, cl_name);
if (strcmp(op, "lock")) {
printf("OP %s pid %u (%s)", op, pid, cl_name);
return;
}
if (!strcmp(r_type, "gl")) {
printf("LW GL %s ver %4u pid %u (%s)\n", mode, 0, pid, cl_name);
} else if (!strcmp(r_type, "vg")) {
printf("LW VG %s ver %4u pid %u (%s)\n", mode, 0, pid, cl_name);
} else if (!strcmp(r_type, "lv")) {
printf("LW LV %s %s\n", mode, r_name);
}
}
static void format_info_line(char *line)
{
char r_name[MAX_NAME+1];
char r_type[MAX_NAME+1];
if (!strncmp(line, "info=structs ", strlen("info=structs "))) {
printf("%s\n", line);
} else if (!strncmp(line, "info=client ", strlen("info=client "))) {
save_client_info(line);
} else if (!strncmp(line, "info=ls ", strlen("info=ls "))) {
format_info_ls(line);
} else if (!strncmp(line, "info=ls_action ", strlen("info=ls_action "))) {
format_info_ls_action(line);
} else if (!strncmp(line, "info=r ", strlen("info=r "))) {
memset(r_name, 0, sizeof(r_name));
memset(r_type, 0, sizeof(r_type));
format_info_r(line, r_name, r_type);
} else if (!strncmp(line, "info=lk ", strlen("info=lk "))) {
/* will use info from previous r */
format_info_lk(line, r_name, r_type);
} else if (!strncmp(line, "info=r_action ", strlen("info=r_action "))) {
/* will use info from previous r */
format_info_r_action(line, r_name, r_type);
} else {
printf("UN %s\n", line);
}
}
static void format_info(void)
{
char line[MAX_LINE];
int i, j;
j = 0;
memset(line, 0, sizeof(line));
for (i = 0; i < dump_len; i++) {
line[j++] = dump_buf[i];
if ((line[j-1] == '\n') || (line[j-1] == '\0')) {
format_info_line(line);
j = 0;
memset(line, 0, sizeof(line));
}
}
}
static daemon_reply _lvmlockd_send(const char *req_name, ...)
{
va_list ap;
daemon_reply repl;
daemon_request req;
req = daemon_request_make(req_name);
va_start(ap, req_name);
daemon_request_extend_v(req, ap);
va_end(ap);
repl = daemon_send(_lvmlockd, req);
daemon_request_destroy(req);
return repl;
}
/* See the same in lib/locking/lvmlockd.c */
#define NO_LOCKD_RESULT -1000
static int _lvmlockd_result(daemon_reply reply, int *result)
{
int reply_result;
const char *reply_flags;
const char *lock_type;
if (reply.error) {
log_error("lvmlockd_result reply error %d", reply.error);
return 0;
}
if (strcmp(daemon_reply_str(reply, "response", ""), "OK")) {
log_error("lvmlockd_result bad response");
return 0;
}
reply_result = daemon_reply_int(reply, "op_result", NO_LOCKD_RESULT);
if (reply_result == -1000) {
log_error("lvmlockd_result no op_result");
return 0;
}
/* The lock_type that lvmlockd used for locking. */
lock_type = daemon_reply_str(reply, "lock_type", "none");
*result = reply_result;
reply_flags = daemon_reply_str(reply, "result_flags", NULL);
log_debug("lvmlockd_result %d %s lm %s", reply_result, reply_flags, lock_type);
return 1;
}
static int do_quit(void)
{
daemon_reply reply;
int rv = 0;
reply = daemon_send_simple(_lvmlockd, "quit", NULL);
if (reply.error) {
log_error("reply error %d", reply.error);
rv = reply.error;
}
daemon_reply_destroy(reply);
return rv;
}
static int setup_dump_socket(void)
{
int s, rv;
s = socket(AF_LOCAL, SOCK_DGRAM, 0);
if (s < 0)
return s;
memset(&dump_addr, 0, sizeof(dump_addr));
dump_addr.sun_family = AF_LOCAL;
strcpy(&dump_addr.sun_path[1], DUMP_SOCKET_NAME);
dump_addrlen = sizeof(sa_family_t) + strlen(dump_addr.sun_path+1) + 1;
rv = bind(s, (struct sockaddr *) &dump_addr, dump_addrlen);
if (rv < 0)
return rv;
return s;
}
static int do_dump(const char *req_name)
{
daemon_reply reply;
int result;
int fd, rv = 0;
fd = setup_dump_socket();
if (fd < 0) {
log_error("socket error %d", fd);
return fd;
}
reply = daemon_send_simple(_lvmlockd, req_name, NULL);
if (reply.error) {
log_error("reply error %d", reply.error);
rv = reply.error;
goto out;
}
result = daemon_reply_int(reply, "result", 0);
dump_len = daemon_reply_int(reply, "dump_len", 0);
daemon_reply_destroy(reply);
if (result < 0) {
rv = result;
log_error("result %d", result);
}
if (!dump_len)
goto out;
memset(dump_buf, 0, sizeof(dump_buf));
rv = recvfrom(fd, dump_buf, dump_len, MSG_WAITALL,
(struct sockaddr *)&dump_addr, &dump_addrlen);
if (rv < 0) {
log_error("recvfrom error %d %d", rv, errno);
rv = -errno;
goto out;
}
rv = 0;
if ((info && dump) || !strcmp(req_name, "dump"))
printf("%s\n", dump_buf);
else
format_info();
out:
close(fd);
return rv;
}
static int do_able(const char *req_name)
{
daemon_reply reply;
int result;
int rv;
reply = _lvmlockd_send(req_name,
"cmd = %s", "lvmlock",
"pid = %d", getpid(),
"vg_name = %s", able_vg_name,
NULL);
if (!_lvmlockd_result(reply, &result)) {
log_error("lvmlockd result %d", result);
rv = result;
} else {
rv = 0;
}
daemon_reply_destroy(reply);
return rv;
}
static int do_stop_lockspaces(void)
{
daemon_reply reply;
char opts[32];
int result;
int rv;
memset(opts, 0, sizeof(opts));
if (wait_opt)
strcat(opts, "wait ");
if (force_opt)
strcat(opts, "force ");
reply = _lvmlockd_send("stop_all",
"cmd = %s", "lvmlock",
"pid = %d", getpid(),
"opts = %s", opts[0] ? opts : "none",
NULL);
if (!_lvmlockd_result(reply, &result)) {
log_error("lvmlockd result %d", result);
rv = result;
} else {
rv = 0;
}
daemon_reply_destroy(reply);
return rv;
}
static void print_usage(void)
{
printf("lvmlockctl options\n");
printf("Options:\n");
printf("--help | -h\n");
printf(" Show this help information.\n");
printf("--quit | -q\n");
printf(" Tell lvmlockd to quit.\n");
printf("--info | -i\n");
printf(" Print lock state information from lvmlockd.\n");
printf("--dump | -d\n");
printf(" Print log buffer from lvmlockd.\n");
printf("--wait | -w 0|1\n");
printf(" Wait option for other commands.\n");
printf("--force | -f 0|1>\n");
printf(" Force option for other commands.\n");
printf("--stop-lockspaces | -S\n");
printf(" Stop all lockspaces.\n");
printf("--gl-enable <vg_name>\n");
printf(" Tell lvmlockd to enable the global lock in a sanlock vg.\n");
printf("--gl-disable <vg_name>\n");
printf(" Tell lvmlockd to disable the global lock in a sanlock vg.\n");
}
static int read_options(int argc, char *argv[])
{
int option_index = 0;
int c;
static struct option long_options[] = {
{"help", no_argument, 0, 'h' },
{"quit", no_argument, 0, 'q' },
{"info", no_argument, 0, 'i' },
{"dump", no_argument, 0, 'd' },
{"wait", required_argument, 0, 'w' },
{"force", required_argument, 0, 'f' },
{"gl-enable", required_argument, 0, 'E' },
{"gl-disable", required_argument, 0, 'D' },
{"stop-lockspaces", no_argument, 0, 'S' },
{0, 0, 0, 0 }
};
if (argc == 1) {
print_usage();
exit(0);
}
while (1) {
c = getopt_long(argc, argv, "hqidE:D:w:S", long_options, &option_index);
if (c == -1)
break;
switch (c) {
case 'h':
/* --help */
print_usage();
exit(0);
case 'q':
/* --quit */
quit = 1;
break;
case 'i':
/* --info */
info = 1;
break;
case 'd':
/* --dump */
dump = 1;
break;
case 'w':
wait_opt = atoi(optarg);
break;
case 'E':
gl_enable = 1;
able_vg_name = strdup(optarg);
break;
case 'D':
gl_disable = 1;
able_vg_name = strdup(optarg);
break;
case 'S':
stop_lockspaces = 1;
break;
default:
print_usage();
exit(1);
}
}
return 0;
}
int main(int argc, char **argv)
{
int rv = 0;
rv = read_options(argc, argv);
if (rv < 0)
return rv;
_lvmlockd = lvmlockd_open(NULL);
if (_lvmlockd.socket_fd < 0 || _lvmlockd.error) {
log_error("lvmlockd open error %d", _lvmlockd.error);
return -1;
}
if (quit) {
rv = do_quit();
goto out;
}
if (info) {
rv = do_dump("info");
goto out;
}
if (dump) {
rv = do_dump("dump");
goto out;
}
if (gl_enable) {
rv = do_able("enable_gl");
goto out;
}
if (gl_disable) {
rv = do_able("disable_gl");
goto out;
}
if (stop_lockspaces) {
rv = do_stop_lockspaces();
goto out;
}
out:
lvmlockd_close(_lvmlockd);
return rv;
}

View File

@ -0,0 +1,49 @@
/*
* Copyright (C) 2014 Red Hat, Inc.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*/
#ifndef _LVM_LVMLOCKD_CLIENT_H
#define _LVM_LVMLOCKD_CLIENT_H
#include "daemon-client.h"
#define LVMLOCKD_SOCKET DEFAULT_RUN_DIR "/lvmlockd.socket"
/* Wrappers to open/close connection */
static inline daemon_handle lvmlockd_open(const char *sock)
{
daemon_info lvmlockd_info = {
.path = "lvmlockd",
.socket = sock ?: LVMLOCKD_SOCKET,
.protocol = "lvmlockd",
.protocol_version = 1,
.autostart = 0
};
return daemon_open(lvmlockd_info);
}
static inline void lvmlockd_close(daemon_handle h)
{
return daemon_close(h);
}
/*
* Errors returned as the lvmlockd result value.
*/
#define ENOLS 210 /* lockspace not found */
#define ESTARTING 211 /* lockspace is starting */
#define EARGS 212
#define EHOSTID 213
#define EMANAGER 214
#define EPREPARE 215
#define ELOCKD 216
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,666 @@
/*
* Copyright (C) 2014 Red Hat, Inc.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*/
#define _XOPEN_SOURCE 500 /* pthread */
#define _ISOC99_SOURCE
#define _GNU_SOURCE
#include <assert.h>
#include <pthread.h>
#include <stdint.h>
#include <stddef.h>
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
#include <poll.h>
#include <errno.h>
#include <string.h>
#include <endian.h>
#include <fcntl.h>
#include <byteswap.h>
#include <syslog.h>
#include <dirent.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/socket.h>
#include "configure.h"
#include "daemon-server.h"
#include "daemon-log.h"
#include "xlate.h"
#include "lvmlockd-internal.h"
#include "lvmlockd-client.h"
/*
* Using synchronous _wait dlm apis so do not define _REENTRANT and
* link with non-threaded version of library, libdlm_lt.
*/
#include "libdlm.h"
struct lm_dlm {
dlm_lshandle_t *dh;
};
struct rd_dlm {
struct dlm_lksb lksb;
struct val_blk *vb;
};
int lm_data_size_dlm(void)
{
return sizeof(struct rd_dlm);
}
/*
* lock_args format
*
* vg_lock_args format for dlm is
* vg_version_string:undefined:cluster_name
*
* lv_lock_args are not used for dlm
*
* version_string is MAJOR.MINOR.PATCH
* undefined may contain ":"
*/
#define VG_LOCK_ARGS_MAJOR 1
#define VG_LOCK_ARGS_MINOR 0
#define VG_LOCK_ARGS_PATCH 0
static int cluster_name_from_args(char *vg_args, char *clustername)
{
return last_string_from_args(vg_args, clustername);
}
static int check_args_version(char *vg_args)
{
unsigned int major = 0;
int rv;
rv = version_from_args(vg_args, &major, NULL, NULL);
if (rv < 0) {
log_error("check_args_version %s error %d", vg_args, rv);
return rv;
}
if (major > VG_LOCK_ARGS_MAJOR) {
log_error("check_args_version %s major %d %d", vg_args, major, VG_LOCK_ARGS_MAJOR);
return -1;
}
return 0;
}
/* This will be set after dlm_controld is started. */
#define DLM_CLUSTER_NAME_PATH "/sys/kernel/config/dlm/cluster/cluster_name"
static int read_cluster_name(char *clustername)
{
char *n;
int fd;
int rv;
if (daemon_test) {
sprintf(clustername, "%s", "test");
return 0;
}
fd = open(DLM_CLUSTER_NAME_PATH, O_RDONLY);
if (fd < 0) {
log_debug("read_cluster_name: open error %d, check dlm_controld", fd);
return fd;
}
rv = read(fd, clustername, MAX_ARGS - 1);
if (rv < 0) {
log_error("read_cluster_name: cluster name read error %d, check dlm_controld", fd);
close(fd);
return rv;
}
n = strstr(clustername, "\n");
if (n)
*n = '\0';
close(fd);
return 0;
}
int lm_init_vg_dlm(char *ls_name, char *vg_name, uint32_t flags, char *vg_args)
{
char clustername[MAX_ARGS];
char lock_args_version[MAX_ARGS];
int rv;
memset(clustername, 0, sizeof(clustername));
memset(lock_args_version, 0, sizeof(lock_args_version));
snprintf(lock_args_version, MAX_ARGS, "%u.%u.%u",
VG_LOCK_ARGS_MAJOR, VG_LOCK_ARGS_MINOR, VG_LOCK_ARGS_PATCH);
rv = read_cluster_name(clustername);
if (rv < 0)
return -EMANAGER;
if (strlen(clustername) + strlen(lock_args_version) + 2 > MAX_ARGS) {
log_error("init_vg_dlm args too long");
return -EARGS;
}
snprintf(vg_args, MAX_ARGS, "%s:%s", lock_args_version, clustername);
rv = 0;
log_debug("init_vg_dlm done %s vg_args %s", ls_name, vg_args);
return rv;
}
int lm_prepare_lockspace_dlm(struct lockspace *ls)
{
char sys_clustername[MAX_ARGS];
char arg_clustername[MAX_ARGS];
struct lm_dlm *lmd;
int rv;
memset(sys_clustername, 0, sizeof(sys_clustername));
memset(arg_clustername, 0, sizeof(arg_clustername));
rv = read_cluster_name(sys_clustername);
if (rv < 0)
return -EMANAGER;
if (!ls->vg_args[0]) {
/* global lockspace has no vg args */
goto skip_args;
}
rv = check_args_version(ls->vg_args);
if (rv < 0)
return -EARGS;
rv = cluster_name_from_args(ls->vg_args, arg_clustername);
if (rv < 0) {
log_error("prepare_lockspace_dlm %s no cluster name from args %s", ls->name, ls->vg_args);
return -EARGS;
}
if (strcmp(sys_clustername, arg_clustername)) {
log_error("prepare_lockspace_dlm %s mismatching cluster names sys %s arg %s",
ls->name, sys_clustername, arg_clustername);
return -EARGS;
}
skip_args:
lmd = malloc(sizeof(struct lm_dlm));
if (!lmd)
return -ENOMEM;
ls->lm_data = lmd;
return 0;
}
int lm_add_lockspace_dlm(struct lockspace *ls, int adopt)
{
struct lm_dlm *lmd = (struct lm_dlm *)ls->lm_data;
if (daemon_test)
return 0;
if (adopt)
lmd->dh = dlm_open_lockspace(ls->name);
else
lmd->dh = dlm_new_lockspace(ls->name, 0600, DLM_LSFL_NEWEXCL);
if (!lmd->dh) {
log_error("add_lockspace_dlm %s adopt %d error", ls->name, adopt);
free(lmd);
ls->lm_data = NULL;
return -1;
}
return 0;
}
int lm_rem_lockspace_dlm(struct lockspace *ls, int free_vg)
{
struct lm_dlm *lmd = (struct lm_dlm *)ls->lm_data;
int rv;
if (daemon_test)
goto out;
/*
* If free_vg is set, it means we are doing vgremove, and we may want
* to tell any other nodes to leave the lockspace. This is not really
* necessary since there should be no harm in having an unused
* lockspace sitting around. A new "notification lock" would need to
* be added with a callback to signal this.
*/
rv = dlm_release_lockspace(ls->name, lmd->dh, 1);
if (rv < 0) {
log_error("rem_lockspace_dlm error %d", rv);
return rv;
}
out:
free(lmd);
ls->lm_data = NULL;
if (!strcmp(ls->name, gl_lsname_dlm)) {
gl_running_dlm = 0;
gl_auto_dlm = 0;
}
return 0;
}
static int lm_add_resource_dlm(struct lockspace *ls, struct resource *r, int with_lock_nl)
{
struct lm_dlm *lmd = (struct lm_dlm *)ls->lm_data;
struct rd_dlm *rdd = (struct rd_dlm *)r->lm_data;
uint32_t flags = 0;
char *buf;
int rv;
if (r->type == LD_RT_GL || r->type == LD_RT_VG) {
buf = malloc(sizeof(struct val_blk) + DLM_LVB_LEN);
if (!buf)
return -ENOMEM;
memset(buf, 0, sizeof(struct val_blk) + DLM_LVB_LEN);
rdd->vb = (struct val_blk *)buf;
rdd->lksb.sb_lvbptr = buf + sizeof(struct val_blk);
flags |= LKF_VALBLK;
}
if (!with_lock_nl)
goto out;
/* because this is a new NL lock request */
flags |= LKF_EXPEDITE;
if (daemon_test)
goto out;
rv = dlm_ls_lock_wait(lmd->dh, LKM_NLMODE, &rdd->lksb, flags,
r->name, strlen(r->name),
0, NULL, NULL, NULL);
if (rv < 0) {
log_error("S %s R %s add_resource_dlm lock error %d", ls->name, r->name, rv);
return rv;
}
out:
return 0;
}
int lm_rem_resource_dlm(struct lockspace *ls, struct resource *r)
{
struct lm_dlm *lmd = (struct lm_dlm *)ls->lm_data;
struct rd_dlm *rdd = (struct rd_dlm *)r->lm_data;
struct dlm_lksb *lksb;
int rv = 0;
if (daemon_test)
goto out;
lksb = &rdd->lksb;
if (!lksb->sb_lkid)
goto out;
rv = dlm_ls_unlock_wait(lmd->dh, lksb->sb_lkid, 0, lksb);
if (rv < 0) {
log_error("S %s R %s rem_resource_dlm unlock error %d", ls->name, r->name, rv);
}
out:
if (rdd->vb)
free(rdd->vb);
memset(rdd, 0, sizeof(struct rd_dlm));
r->lm_init = 0;
return rv;
}
static int to_dlm_mode(int ld_mode)
{
switch (ld_mode) {
case LD_LK_EX:
return LKM_EXMODE;
case LD_LK_SH:
return LKM_PRMODE;
};
return -1;
}
static int lm_adopt_dlm(struct lockspace *ls, struct resource *r, int ld_mode,
uint32_t *r_version)
{
struct lm_dlm *lmd = (struct lm_dlm *)ls->lm_data;
struct rd_dlm *rdd = (struct rd_dlm *)r->lm_data;
struct dlm_lksb *lksb;
uint32_t flags = 0;
int mode;
int rv;
*r_version = 0;
if (!r->lm_init) {
rv = lm_add_resource_dlm(ls, r, 0);
if (rv < 0)
return rv;
r->lm_init = 1;
}
lksb = &rdd->lksb;
flags |= LKF_PERSISTENT;
flags |= LKF_ORPHAN;
if (rdd->vb)
flags |= LKF_VALBLK;
mode = to_dlm_mode(ld_mode);
if (mode < 0) {
log_error("adopt_dlm invalid mode %d", ld_mode);
rv = -EINVAL;
goto fail;
}
log_debug("S %s R %s adopt_dlm", ls->name, r->name);
if (daemon_test)
return 0;
/*
* dlm returns 0 for success, -EAGAIN if an orphan is
* found with another mode, and -ENOENT if no orphan.
*
* cast/bast/param are (void *)1 because the kernel
* returns errors if some are null.
*/
rv = dlm_ls_lockx(lmd->dh, mode, lksb, flags,
r->name, strlen(r->name), 0,
(void *)1, (void *)1, (void *)1,
NULL, NULL);
if (rv == -EAGAIN) {
log_debug("S %s R %s adopt_dlm adopt mode %d try other mode",
ls->name, r->name, ld_mode);
rv = -EUCLEAN;
goto fail;
}
if (rv < 0) {
log_debug("S %s R %s adopt_dlm mode %d flags %x error %d errno %d",
ls->name, r->name, mode, flags, rv, errno);
goto fail;
}
/*
* FIXME: For GL/VG locks we probably want to read the lvb,
* especially if adopting an ex lock, because when we
* release this adopted ex lock we may want to write new
* lvb values based on the current lvb values (at lease
* in the GL case where we increment the current values.)
*
* It should be possible to read the lvb by requesting
* this lock in the same mode it's already in.
*/
return rv;
fail:
lm_rem_resource_dlm(ls, r);
return rv;
}
/*
* Use PERSISTENT so that if lvmlockd exits while holding locks,
* the locks will remain orphaned in the dlm, still protecting what
* they were acquired to protect.
*/
int lm_lock_dlm(struct lockspace *ls, struct resource *r, int ld_mode,
uint32_t *r_version, int adopt)
{
struct lm_dlm *lmd = (struct lm_dlm *)ls->lm_data;
struct rd_dlm *rdd = (struct rd_dlm *)r->lm_data;
struct dlm_lksb *lksb;
struct val_blk vb;
uint32_t flags = 0;
uint16_t vb_version;
int mode;
int rv;
if (adopt) {
/* When adopting, we don't follow the normal method
of acquiring a NL lock then converting it to the
desired mode. */
return lm_adopt_dlm(ls, r, ld_mode, r_version);
}
if (!r->lm_init) {
rv = lm_add_resource_dlm(ls, r, 1);
if (rv < 0)
return rv;
r->lm_init = 1;
}
lksb = &rdd->lksb;
flags |= LKF_CONVERT;
flags |= LKF_NOQUEUE;
flags |= LKF_PERSISTENT;
if (rdd->vb)
flags |= LKF_VALBLK;
mode = to_dlm_mode(ld_mode);
if (mode < 0) {
log_error("lock_dlm invalid mode %d", ld_mode);
return -EINVAL;
}
log_debug("S %s R %s lock_dlm", ls->name, r->name);
if (daemon_test) {
*r_version = 0;
return 0;
}
rv = dlm_ls_lock_wait(lmd->dh, mode, lksb, flags,
r->name, strlen(r->name),
0, NULL, NULL, NULL);
if (rv == -EAGAIN) {
log_error("S %s R %s lock_dlm mode %d rv EAGAIN", ls->name, r->name, mode);
return -EAGAIN;
}
if (rv < 0) {
log_error("S %s R %s lock_dlm error %d", ls->name, r->name, rv);
return rv;
}
if (rdd->vb) {
if (lksb->sb_flags & DLM_SBF_VALNOTVALID) {
log_debug("S %s R %s lock_dlm VALNOTVALID", ls->name, r->name);
memset(rdd->vb, 0, sizeof(struct val_blk));
*r_version = 0;
goto out;
}
memcpy(&vb, lksb->sb_lvbptr, sizeof(struct val_blk));
vb_version = le16_to_cpu(vb.version);
if (vb_version && ((vb_version & 0xFF00) > (VAL_BLK_VERSION & 0xFF00))) {
log_error("S %s R %s lock_dlm ignore vb_version %x",
ls->name, r->name, vb_version);
*r_version = 0;
free(rdd->vb);
rdd->vb = NULL;
lksb->sb_lvbptr = NULL;
goto out;
}
*r_version = le32_to_cpu(vb.r_version);
memcpy(rdd->vb, &vb, sizeof(vb)); /* rdd->vb saved as le */
log_debug("S %s R %s lock_dlm get r_version %u",
ls->name, r->name, *r_version);
}
out:
return 0;
}
int lm_convert_dlm(struct lockspace *ls, struct resource *r,
int ld_mode, uint32_t r_version)
{
struct lm_dlm *lmd = (struct lm_dlm *)ls->lm_data;
struct rd_dlm *rdd = (struct rd_dlm *)r->lm_data;
struct dlm_lksb *lksb = &rdd->lksb;
uint32_t mode;
uint32_t flags = 0;
int rv;
log_debug("S %s R %s convert_dlm", ls->name, r->name);
flags |= LKF_CONVERT;
flags |= LKF_NOQUEUE;
flags |= LKF_PERSISTENT;
if (rdd->vb && r_version && (r->mode == LD_LK_EX)) {
if (!rdd->vb->version) {
/* first time vb has been written */
rdd->vb->version = cpu_to_le16(VAL_BLK_VERSION);
}
rdd->vb->r_version = cpu_to_le32(r_version);
memcpy(lksb->sb_lvbptr, rdd->vb, sizeof(struct val_blk));
log_debug("S %s R %s convert_dlm set r_version %u",
ls->name, r->name, r_version);
flags |= LKF_VALBLK;
}
mode = to_dlm_mode(ld_mode);
if (daemon_test)
return 0;
rv = dlm_ls_lock_wait(lmd->dh, mode, lksb, flags,
r->name, strlen(r->name),
0, NULL, NULL, NULL);
if (rv == -EAGAIN) {
/* FIXME: When does this happen? Should something different be done? */
log_error("S %s R %s convert_dlm mode %d rv EAGAIN", ls->name, r->name, mode);
return -EAGAIN;
}
if (rv < 0) {
log_error("S %s R %s convert_dlm error %d", ls->name, r->name, rv);
}
return rv;
}
int lm_unlock_dlm(struct lockspace *ls, struct resource *r,
uint32_t r_version, uint32_t lmuf_flags)
{
struct lm_dlm *lmd = (struct lm_dlm *)ls->lm_data;
struct rd_dlm *rdd = (struct rd_dlm *)r->lm_data;
struct dlm_lksb *lksb = &rdd->lksb;
uint32_t flags = 0;
int rv;
log_debug("S %s R %s unlock_dlm r_version %u flags %x",
ls->name, r->name, r_version, lmuf_flags);
/*
* Do not set PERSISTENT, because we don't need an orphan
* NL lock to protect anything.
*/
flags |= LKF_CONVERT;
if (rdd->vb && r_version && (r->mode == LD_LK_EX)) {
if (!rdd->vb->version) {
/* first time vb has been written */
rdd->vb->version = cpu_to_le16(VAL_BLK_VERSION);
}
if (r_version)
rdd->vb->r_version = cpu_to_le32(r_version);
memcpy(lksb->sb_lvbptr, rdd->vb, sizeof(struct val_blk));
log_debug("S %s R %s unlock_dlm set r_version %u",
ls->name, r->name, r_version);
flags |= LKF_VALBLK;
}
if (daemon_test)
return 0;
rv = dlm_ls_lock_wait(lmd->dh, LKM_NLMODE, lksb, flags,
r->name, strlen(r->name),
0, NULL, NULL, NULL);
if (rv < 0) {
log_error("S %s R %s unlock_dlm error %d", ls->name, r->name, rv);
}
return rv;
}
/*
* This list could be read from dlm_controld via libdlmcontrol,
* but it's simpler to get it from sysfs.
*/
#define DLM_LOCKSPACES_PATH "/sys/kernel/config/dlm/cluster/spaces"
int lm_get_lockspaces_dlm(struct list_head *ls_rejoin)
{
struct lockspace *ls;
struct dirent *de;
DIR *ls_dir;
if (!(ls_dir = opendir(DLM_LOCKSPACES_PATH)))
return -ECONNREFUSED;
while ((de = readdir(ls_dir))) {
if (de->d_name[0] == '.')
continue;
if (strncmp(de->d_name, LVM_LS_PREFIX, strlen(LVM_LS_PREFIX)))
continue;
if (!(ls = alloc_lockspace())) {
closedir(ls_dir);
return -ENOMEM;
}
ls->lm_type = LD_LM_DLM;
strncpy(ls->name, de->d_name, MAX_NAME);
strncpy(ls->vg_name, ls->name + strlen(LVM_LS_PREFIX), MAX_NAME);
list_add_tail(&ls->list, ls_rejoin);
}
closedir(ls_dir);
return 0;
}
int lm_is_running_dlm(void)
{
char sys_clustername[MAX_ARGS];
int rv;
memset(sys_clustername, 0, sizeof(sys_clustername));
rv = read_cluster_name(sys_clustername);
if (rv < 0)
return 0;
return 1;
}

View File

@ -0,0 +1,373 @@
/*
* Copyright (C) 2014 Red Hat, Inc.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*/
#ifndef _LVM_LVMLOCKD_INTERNAL_H
#define _LVM_LVMLOCKD_INTERNAL_H
#define MAX_NAME 64
#define MAX_ARGS 64
#define R_NAME_GL_DISABLED "_GLLK_disabled"
#define R_NAME_GL "GLLK"
#define R_NAME_VG "VGLK"
#define S_NAME_GL_DLM "lvm_global"
#define LVM_LS_PREFIX "lvm_" /* ls name is prefix + vg_name */
/* global lockspace name for sanlock is a vg name */
/* lock manager types */
enum {
LD_LM_NONE = 0,
LD_LM_UNUSED = 1, /* place holder so values match lib/locking/lvmlockd.h */
LD_LM_DLM = 2,
LD_LM_SANLOCK = 3,
};
/* operation types */
enum {
LD_OP_HELLO = 1,
LD_OP_QUIT,
LD_OP_INIT,
LD_OP_FREE,
LD_OP_START,
LD_OP_STOP,
LD_OP_LOCK,
LD_OP_UPDATE,
LD_OP_CLOSE,
LD_OP_ENABLE,
LD_OP_DISABLE,
LD_OP_START_WAIT,
LD_OP_STOP_ALL,
LD_OP_DUMP_INFO,
LD_OP_DUMP_LOG,
LD_OP_RENAME_BEFORE,
LD_OP_RENAME_FINAL,
LD_OP_RUNNING_LM,
LD_OP_FIND_FREE_LOCK,
LD_OP_FORGET_VG_NAME,
};
/* resource types */
enum {
LD_RT_GL = 1,
LD_RT_VG,
LD_RT_LV,
};
/* lock modes, more restrictive must be larger value */
enum {
LD_LK_IV = -1,
LD_LK_UN = 0,
LD_LK_NL = 1,
LD_LK_SH = 2,
LD_LK_EX = 3,
};
struct list_head {
struct list_head *next, *prev;
};
struct client {
struct list_head list;
pthread_mutex_t mutex;
int pid;
int fd;
int pi;
uint32_t id;
unsigned int recv : 1;
unsigned int dead : 1;
unsigned int poll_ignore : 1;
char name[MAX_NAME+1];
};
#define LD_AF_PERSISTENT 0x00000001
#define LD_AF_UNUSED 0x00000002 /* use me */
#define LD_AF_UNLOCK_CANCEL 0x00000004
#define LD_AF_NEXT_VERSION 0x00000008
#define LD_AF_WAIT 0x00000010
#define LD_AF_FORCE 0x00000020
#define LD_AF_EX_DISABLE 0x00000040
#define LD_AF_ENABLE 0x00000080
#define LD_AF_DISABLE 0x00000100
#define LD_AF_SEARCH_LS 0x00000200
#define LD_AF_WAIT_STARTING 0x00001000
#define LD_AF_DUP_GL_LS 0x00002000
#define LD_AF_INACTIVE_LS 0x00004000
#define LD_AF_ADD_LS_ERROR 0x00008000
#define LD_AF_ADOPT 0x00010000
/*
* Number of times to repeat a lock request after
* a lock conflict (-EAGAIN) if unspecified in the
* request.
*/
#define DEFAULT_MAX_RETRIES 4
struct action {
struct list_head list;
uint32_t client_id;
uint32_t flags; /* LD_AF_ */
uint32_t version;
uint64_t host_id;
int8_t op; /* operation type LD_OP_ */
int8_t rt; /* resource type LD_RT_ */
int8_t mode; /* lock mode LD_LK_ */
int8_t lm_type; /* lock manager: LM_DLM, LM_SANLOCK */
int retries;
int max_retries;
int result;
int lm_rv; /* return value from lm_ function */
char vg_uuid[64];
char vg_name[MAX_NAME+1];
char lv_name[MAX_NAME+1];
char lv_uuid[MAX_NAME+1];
char vg_args[MAX_ARGS];
char lv_args[MAX_ARGS];
char vg_sysid[MAX_NAME+1];
};
struct resource {
struct list_head list; /* lockspace.resources */
char name[MAX_NAME+1]; /* vg name or lv name */
int8_t type; /* resource type LD_RT_ */
int8_t mode;
unsigned int sh_count; /* number of sh locks on locks list */
uint32_t version;
unsigned int lm_init : 1; /* lm_data is initialized */
unsigned int adopt : 1; /* temp flag in remove_inactive_lvs */
unsigned int version_zero_valid : 1;
struct list_head locks;
struct list_head actions;
struct val_blk *vb;
char lv_args[MAX_ARGS];
char lm_data[0]; /* lock manager specific data */
};
#define LD_LF_PERSISTENT 0x00000001
struct lock {
struct list_head list; /* resource.locks */
int8_t mode; /* lock mode LD_LK_ */
uint32_t version;
uint32_t flags; /* LD_LF_ */
uint32_t client_id; /* may be 0 for persistent or internal locks */
};
struct lockspace {
struct list_head list; /* lockspaces */
char name[MAX_NAME+1];
char vg_name[MAX_NAME+1];
char vg_uuid[64];
char vg_args[MAX_ARGS]; /* lock manager specific args */
char vg_sysid[MAX_NAME+1];
int8_t lm_type; /* lock manager: LM_DLM, LM_SANLOCK */
void *lm_data;
uint64_t host_id;
uint64_t free_lock_offset; /* start search for free lock here */
uint32_t start_client_id; /* client_id that started the lockspace */
pthread_t thread; /* makes synchronous lock requests */
pthread_cond_t cond;
pthread_mutex_t mutex;
unsigned int create_fail : 1;
unsigned int create_done : 1;
unsigned int thread_work : 1;
unsigned int thread_stop : 1;
unsigned int thread_done : 1;
unsigned int sanlock_gl_enabled: 1;
unsigned int sanlock_gl_dup: 1;
struct list_head actions; /* new client actions */
struct list_head resources; /* resource/lock state for gl/vg/lv */
};
#define VAL_BLK_VERSION 0x0101
struct val_blk {
uint16_t version;
uint16_t flags;
uint32_t r_version;
};
/* lm_unlock flags */
#define LMUF_FREE_VG 0x00000001
struct lockspace *alloc_lockspace(void);
int lockspaces_empty(void);
int last_string_from_args(char *args_in, char *last);
int version_from_args(char *args, unsigned int *major, unsigned int *minor, unsigned int *patch);
int lm_init_vg_dlm(char *ls_name, char *vg_name, uint32_t flags, char *vg_args);
int lm_prepare_lockspace_dlm(struct lockspace *ls);
int lm_add_lockspace_dlm(struct lockspace *ls, int adopt);
int lm_rem_lockspace_dlm(struct lockspace *ls, int free_vg);
int lm_lock_dlm(struct lockspace *ls, struct resource *r, int ld_mode,
uint32_t *r_version, int adopt);
int lm_convert_dlm(struct lockspace *ls, struct resource *r,
int ld_mode, uint32_t r_version);
int lm_unlock_dlm(struct lockspace *ls, struct resource *r,
uint32_t r_version, uint32_t lmu_flags);
int lm_rem_resource_dlm(struct lockspace *ls, struct resource *r);
int lm_get_lockspaces_dlm(struct list_head *ls_rejoin);
int lm_data_size_dlm(void);
int lm_is_running_dlm(void);
int lm_init_vg_sanlock(char *ls_name, char *vg_name, uint32_t flags, char *vg_args);
int lm_init_lv_sanlock(char *ls_name, char *vg_name, char *lv_name, char *vg_args, char *lv_args, uint64_t free_offset);
int lm_free_lv_sanlock(struct lockspace *ls, struct resource *r);
int lm_rename_vg_sanlock(char *ls_name, char *vg_name, uint32_t flags, char *vg_args);
int lm_prepare_lockspace_sanlock(struct lockspace *ls);
int lm_add_lockspace_sanlock(struct lockspace *ls, int adopt);
int lm_rem_lockspace_sanlock(struct lockspace *ls, int free_vg);
int lm_lock_sanlock(struct lockspace *ls, struct resource *r, int ld_mode,
uint32_t *r_version, int *retry, int adopt);
int lm_convert_sanlock(struct lockspace *ls, struct resource *r,
int ld_mode, uint32_t r_version);
int lm_unlock_sanlock(struct lockspace *ls, struct resource *r,
uint32_t r_version, uint32_t lmu_flags);
int lm_able_gl_sanlock(struct lockspace *ls, int enable);
int lm_ex_disable_gl_sanlock(struct lockspace *ls);
int lm_hosts_sanlock(struct lockspace *ls, int notify);
int lm_rem_resource_sanlock(struct lockspace *ls, struct resource *r);
int lm_gl_is_enabled(struct lockspace *ls);
int lm_get_lockspaces_sanlock(struct list_head *ls_rejoin);
int lm_data_size_sanlock(void);
int lm_is_running_sanlock(void);
int lm_find_free_lock_sanlock(struct lockspace *ls, uint64_t *free_offset);
#define container_of(ptr, type, member) ({ \
const typeof( ((type *)0)->member ) *__mptr = (ptr); \
(type *)( (char *)__mptr - offsetof(type,member) );})
static inline void INIT_LIST_HEAD(struct list_head *list)
{
list->next = list;
list->prev = list;
}
static inline void __list_add(struct list_head *new,
struct list_head *prev,
struct list_head *next)
{
next->prev = new;
new->next = next;
new->prev = prev;
prev->next = new;
}
static inline void __list_del(struct list_head *prev, struct list_head *next)
{
next->prev = prev;
prev->next = next;
}
static inline void list_add(struct list_head *new, struct list_head *head)
{
__list_add(new, head, head->next);
}
static inline void list_add_tail(struct list_head *new, struct list_head *head)
{
__list_add(new, head->prev, head);
}
static inline void list_del(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
}
static inline int list_empty(const struct list_head *head)
{
return head->next == head;
}
#define list_entry(ptr, type, member) \
container_of(ptr, type, member)
#define list_first_entry(ptr, type, member) \
list_entry((ptr)->next, type, member)
#define list_for_each_entry(pos, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member); \
&pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member))
#define list_for_each_entry_safe(pos, n, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member), \
n = list_entry(pos->member.next, typeof(*pos), member); \
&pos->member != (head); \
pos = n, n = list_entry(n->member.next, typeof(*n), member))
/* to improve readability */
#define WAIT 1
#define NO_WAIT 0
#define FORCE 1
#define NO_FORCE 0
/*
* global variables
*/
#ifndef EXTERN
#define EXTERN extern
#define INIT(X)
#else
#undef EXTERN
#define EXTERN
#define INIT(X) =X
#endif
/*
* gl_type_static and gl_use_ are set by command line or config file
* to specify whether the global lock comes from dlm or sanlock.
* Without a static setting, lvmlockd will figure out where the
* global lock should be (but it could get mixed up in cases where
* both sanlock and dlm vgs exist.)
*
* gl_use_dlm means that the gl should come from lockspace gl_lsname_dlm
* gl_use_sanlock means that the gl should come from lockspace gl_lsname_sanlock
*
* gl_use_dlm has precedence over gl_use_sanlock, so if a node sees both
* dlm and sanlock vgs, it will use the dlm gl.
*
* gl_use_ is set when the first evidence of that lm_type is seen
* in any command.
*
* gl_lsname_sanlock is set when the first vg is seen in which an
* enabled gl is exists, or when init_vg creates a vg with gl enabled,
* or when enable_gl is used.
*
* gl_lsname_sanlock is cleared when free_vg deletes a vg with gl enabled
* or when disable_gl matches.
*/
EXTERN int gl_type_static;
EXTERN int gl_use_dlm;
EXTERN int gl_use_sanlock;
EXTERN pthread_mutex_t gl_type_mutex;
EXTERN char gl_lsname_dlm[MAX_NAME+1];
EXTERN char gl_lsname_sanlock[MAX_NAME+1];
EXTERN int gl_running_dlm;
EXTERN int gl_auto_dlm;
EXTERN int daemon_test; /* run as much as possible without a live lock manager */
EXTERN int daemon_debug;
EXTERN int daemon_host_id;
EXTERN const char *daemon_host_id_file;
EXTERN int sanlock_io_timeout;
void log_level(int level, const char *fmt, ...) __attribute__((format(printf, 2, 3)));
#define log_debug(fmt, args...) log_level(LOG_DEBUG, fmt, ##args)
#define log_error(fmt, args...) log_level(LOG_ERR, fmt, ##args)
#define log_warn(fmt, args...) log_level(LOG_WARNING, fmt, ##args)
#endif

File diff suppressed because it is too large Load Diff

View File

@ -3,11 +3,13 @@
@top_srcdir@/daemons/lvmetad/lvmetad-client.h
@top_srcdir@/daemons/lvmpolld/lvmpolld-protocol.h
@top_srcdir@/daemons/lvmpolld/polling_ops.h
@top_srcdir@/daemons/lvmlockd/lvmlockd-client.h
@top_srcdir@/liblvm/lvm2app.h
@top_srcdir@/lib/activate/activate.h
@top_srcdir@/lib/activate/targets.h
@top_srcdir@/lib/cache/lvmcache.h
@top_srcdir@/lib/cache/lvmetad.h
@top_srcdir@/lib/locking/lvmlockd.h
@top_srcdir@/lib/commands/toolcontext.h
@top_srcdir@/lib/config/config.h
@top_srcdir@/lib/config/config_settings.h

View File

@ -200,6 +200,11 @@ ifeq ("@BUILD_LVMPOLLD@", "yes")
lvmpolld/lvmpolld-client.c
endif
ifeq ("@BUILD_LVMLOCKD@", "yes")
SOURCES +=\
locking/lvmlockd.c
endif
ifeq ("@DMEVENTD@", "yes")
CLDFLAGS += -L$(top_builddir)/daemons/dmeventd
LIBS += -ldevmapper-event

8
lib/cache/lvmetad.c vendored
View File

@ -22,6 +22,7 @@
#include "format-text.h" // TODO for disk_locn, used as a DA representation
#include "crc.h"
#include "lvm-signal.h"
#include "lvmlockd.h"
#define SCAN_TIMEOUT_SECONDS 80
#define MAX_RESCANS 10 /* Maximum number of times to scan all PVs and retry if the daemon returns a token mismatch error */
@ -1494,9 +1495,16 @@ void lvmetad_validate_global_cache(struct cmd_context *cmd, int force)
dm_list_init(&pvc_before);
dm_list_init(&pvc_after);
if (!lvmlockd_use()) {
log_error(INTERNAL_ERROR "validate global cache without lvmlockd");
return;
}
if (!lvmetad_used())
return;
log_debug_lvmetad("Validating global lvmetad cache");
if (force)
goto do_scan;

View File

@ -99,9 +99,14 @@ struct cmd_context {
unsigned independent_metadata_areas:1; /* Active formats have MDAs outside PVs */
unsigned unknown_system_id:1;
unsigned include_foreign_vgs:1;
unsigned include_active_foreign_vgs:1;
unsigned error_foreign_vgs:1;
unsigned include_foreign_vgs:1; /* report/display cmds can reveal foreign VGs */
unsigned include_shared_vgs:1; /* report/display cmds can reveal lockd VGs */
unsigned include_active_foreign_vgs:1; /* cmd should process foreign VGs with active LVs */
unsigned vg_read_print_access_error:1; /* print access errors from vg_read */
unsigned lockd_gl_disable:1;
unsigned lockd_vg_disable:1;
unsigned lockd_lv_disable:1;
unsigned lockd_vg_default_sh:1;
struct dev_types *dev_types;
@ -144,6 +149,11 @@ struct cmd_context {
const char *report_list_item_separator;
int hosttags;
/* Locking */
const char *lock_gl_mode; /* gl mode, from --lock-gl */
const char *lock_vg_mode; /* vg mode, from --lock-vg */
const char *lock_lv_mode; /* lv mode, from --lock-lv */
const char *lib_dir; /* Cache value global/library_dir */
char system_dir[PATH_MAX];
char dev_dir[PATH_MAX];

View File

@ -831,6 +831,27 @@ cfg(global_use_lvmetad_CFG, "use_lvmetad", global_CFG_SECTION, 0, CFG_TYPE_BOOL,
"LVM prints warnings and ignores lvmetad if this combination\n"
"is seen.\n")
cfg(global_use_lvmlockd_CFG, "use_lvmlockd", global_CFG_SECTION, 0, CFG_TYPE_BOOL, 0, vsn(2, 2, 124), NULL, 0, NULL,
"Use lvmlockd for locking among hosts using LVM on shared storage.\n")
cfg(global_lock_retries_CFG, "lock_retries", global_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_INT, DEFAULT_LOCK_RETRIES, vsn(2, 2, 124), NULL, 0, NULL,
"Retry lvmlockd lock requests this many times.\n")
cfg(global_sanlock_lv_extend_CFG, "sanlock_lv_extend", global_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_INT, DEFAULT_SANLOCK_LV_EXTEND_MB, vsn(2, 2, 124), NULL, 0, NULL,
"Size in MiB to extend the internal LV holding sanlock locks.\n"
"The internal LV holds locks for each LV in the VG, and after\n"
"enough LVs have been created, the internal LV needs to be extended.\n"
"lvcreate will automatically extend the internal LV when needed by\n"
"the amount specified here. Setting this to 0 disables the\n"
"automatic extension and can cause lvcreate to fail.\n")
cfg(global_allow_override_lock_modes_CFG, "allow_override_lock_modes", global_CFG_SECTION, 0, CFG_TYPE_BOOL, 0, vsn(2, 2, 124), NULL, 0, NULL,
"Allow command options to override normal locking.\n")
cfg(global_read_only_lock_modes_CFG, "read_only_lock_modes", global_CFG_SECTION, 0, CFG_TYPE_BOOL, 0, vsn(2, 2, 124), NULL, 0, NULL,
"Limit commands to actions that use read locks.\n"
"This disallows any actions that require a write (exclusive) lock.\n")
cfg(global_thin_check_executable_CFG, "thin_check_executable", global_CFG_SECTION, CFG_ALLOW_EMPTY | CFG_DEFAULT_COMMENTED, CFG_TYPE_STRING, THIN_CHECK_CMD, vsn(2, 2, 94), "@THIN_CHECK_CMD@", 0, NULL,
"The full path to the thin_check command.\n"
"LVM uses this command to check that a thin metadata\n"
@ -1256,6 +1277,14 @@ cfg(activation_mode_CFG, "activation_mode", activation_CFG_SECTION, 0, CFG_TYPE_
"sometimes assist with data recovery.\n"
"The '--activationmode' option overrides this setting.\n")
cfg_array(activation_lock_start_list_CFG, "lock_start_list", activation_CFG_SECTION, CFG_ALLOW_EMPTY|CFG_DEFAULT_UNDEFINED, CFG_TYPE_STRING, NULL, vsn(2, 2, 124), NULL, 0, NULL,
"Locking is started only for VGs selected by this list.\n"
"The rules are the same as those for LVs in volume_list.\n")
cfg_array(activation_auto_lock_start_list_CFG, "auto_lock_start_list", activation_CFG_SECTION, CFG_ALLOW_EMPTY|CFG_DEFAULT_UNDEFINED, CFG_TYPE_STRING, NULL, vsn(2, 2, 124), NULL, 0, NULL,
"Locking is auto-started only for VGs selected by this list.\n"
"The rules are the same as those for LVs in auto_activation_volume_list.\n")
cfg(metadata_pvmetadatacopies_CFG, "pvmetadatacopies", metadata_CFG_SECTION, CFG_ADVANCED | CFG_DEFAULT_COMMENTED, CFG_TYPE_INT, DEFAULT_PVMETADATACOPIES, vsn(1, 0, 0), NULL, 0, NULL,
"Number of copies of metadata to store on each PV.\n"
"Possible options are: 0, 1, 2.\n"
@ -1580,4 +1609,9 @@ cfg_array(local_extra_system_ids_CFG, "extra_system_ids", local_CFG_SECTION, CFG
"Use this only after consulting 'man lvmsystemid'\n"
"to be certain of correct usage and possible dangers.\n")
cfg(local_host_id_CFG, "host_id", local_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_INT, 0, vsn(2, 2, 124), NULL, 0, NULL,
"The lvmlockd sanlock host_id.\n"
"This must be a unique among all hosts,\n"
"and must be between 1 and 2000.\n")
cfg(CFG_COUNT, NULL, root_CFG_SECTION, 0, CFG_TYPE_INT, 0, vsn(0, 0, 0), NULL, 0, NULL, NULL)

View File

@ -51,11 +51,14 @@
#define DEFAULT_FALLBACK_TO_LOCAL_LOCKING 1
#define DEFAULT_FALLBACK_TO_CLUSTERED_LOCKING 1
#define DEFAULT_WAIT_FOR_LOCKS 1
#define DEFAULT_LOCK_RETRIES 3
#define DEFAULT_PRIORITISE_WRITE_LOCKS 1
#define DEFAULT_USE_MLOCKALL 0
#define DEFAULT_METADATA_READ_ONLY 0
#define DEFAULT_LVDISPLAY_SHOWS_FULL_DEVICE_PATH 0
#define DEFAULT_SANLOCK_LV_EXTEND_MB 256
#define DEFAULT_MIRRORLOG MIRROR_LOG_DISK
#define DEFAULT_MIRROR_LOG_FAULT_POLICY "allocate"
#define DEFAULT_MIRROR_IMAGE_FAULT_POLICY "remove"
@ -221,4 +224,6 @@
#define DEFAULT_THIN_POOL_AUTOEXTEND_THRESHOLD 100
#define DEFAULT_THIN_POOL_AUTOEXTEND_PERCENT 20
#define DEFAULT_CY_LOCK_TYPE "sanlock"
#endif /* _LVM_DEFAULTS_H */

View File

@ -86,6 +86,38 @@ alloc_policy_t get_alloc_from_string(const char *str)
return ALLOC_INVALID;
}
const char *get_lock_type_string(lock_type_t lock_type)
{
switch (lock_type) {
case LOCK_TYPE_INVALID:
return "invalid";
case LOCK_TYPE_NONE:
return "none";
case LOCK_TYPE_CLVM:
return "clvm";
case LOCK_TYPE_DLM:
return "dlm";
case LOCK_TYPE_SANLOCK:
return "sanlock";
}
return "invalid";
}
lock_type_t get_lock_type_from_string(const char *str)
{
if (!str)
return LOCK_TYPE_NONE;
if (!strcmp(str, "none"))
return LOCK_TYPE_NONE;
if (!strcmp(str, "clvm"))
return LOCK_TYPE_CLVM;
if (!strcmp(str, "dlm"))
return LOCK_TYPE_DLM;
if (!strcmp(str, "sanlock"))
return LOCK_TYPE_SANLOCK;
return LOCK_TYPE_INVALID;
}
static const char *_percent_types[7] = { "NONE", "VG", "FREE", "LV", "PVS", "ORIGIN" };
const char *get_percent_string(percent_type_t def)

View File

@ -64,6 +64,9 @@ const char *get_alloc_string(alloc_policy_t alloc);
char alloc_policy_char(alloc_policy_t alloc);
alloc_policy_t get_alloc_from_string(const char *str);
const char *get_lock_type_string(lock_type_t lock_type);
lock_type_t get_lock_type_from_string(const char *str);
const char *get_percent_string(percent_type_t def);
char yes_no_prompt(const char *prompt, ...) __attribute__ ((format(printf, 1, 2)));

View File

@ -472,8 +472,11 @@ static int _print_vg(struct formatter *f, struct volume_group *vg)
else if (vg->lvm1_system_id && *vg->lvm1_system_id)
outf(f, "system_id = \"%s\"", vg->lvm1_system_id);
if (vg->lock_type)
if (vg->lock_type) {
outf(f, "lock_type = \"%s\"", vg->lock_type);
if (vg->lock_args)
outf(f, "lock_args = \"%s\"", vg->lock_args);
}
outsize(f, (uint64_t) vg->extent_size, "extent_size = %u",
vg->extent_size);
@ -699,6 +702,9 @@ static int _print_lv(struct formatter *f, struct logical_volume *lv)
lv->timestamp);
}
if (lv->lock_args)
outf(f, "lock_args = \"%s\"", lv->lock_args);
if (lv->alloc != ALLOC_INHERIT)
outf(f, "allocation_policy = \"%s\"",
get_alloc_string(lv->alloc));

View File

@ -67,6 +67,7 @@ static const struct flag _lv_flags[] = {
{LV_NOSCAN, NULL, 0},
{LV_TEMPORARY, NULL, 0},
{POOL_METADATA_SPARE, NULL, 0},
{LOCKD_SANLOCK_LV, NULL, 0},
{RAID, NULL, 0},
{RAID_META, NULL, 0},
{RAID_IMAGE, NULL, 0},

View File

@ -20,6 +20,7 @@
#include "toolcontext.h"
#include "lvmcache.h"
#include "lvmetad.h"
#include "lvmlockd.h"
#include "lv_alloc.h"
#include "pv_alloc.h"
#include "segtype.h"
@ -599,6 +600,11 @@ static int _read_lvnames(struct format_instance *fid __attribute__((unused)),
return 0;
}
if (dm_config_get_str(lvn, "lock_args", &str)) {
if (!(lv->lock_args = dm_pool_strdup(mem, str)))
return_0;
}
lv->alloc = ALLOC_INHERIT;
if (dm_config_get_str(lvn, "allocation_policy", &str)) {
lv->alloc = get_alloc_from_string(str);
@ -664,6 +670,12 @@ static int _read_lvnames(struct format_instance *fid __attribute__((unused)),
vg->pool_metadata_spare_lv = lv;
}
if (!lv_is_visible(lv) && !strcmp(lv->name, LOCKD_SANLOCK_LV_NAME)) {
log_debug_metadata("Logical volume %s is sanlock lv.", lv->name);
lv->status |= LOCKD_SANLOCK_LV;
vg->sanlock_lv = lv;
}
return 1;
}
@ -816,6 +828,11 @@ static struct volume_group *_read_vg(struct format_instance *fid,
goto bad;
}
if (dm_config_get_str(vgn, "lock_args", &str)) {
if (!(vg->lock_args = dm_pool_strdup(vg->vgmem, str)))
goto bad;
}
if (!_read_id(&vg->id, vgn, "id")) {
log_error("Couldn't read uuid for volume group %s.", vg->name);
goto bad;

2588
lib/locking/lvmlockd.c Normal file

File diff suppressed because it is too large Load Diff

239
lib/locking/lvmlockd.h Normal file
View File

@ -0,0 +1,239 @@
/*
* Copyright (C) 2014 Red Hat, Inc.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*/
#ifndef _LVMLOCKD_H
#define _LVMLOCKD_H
#include "config-util.h"
#include "daemon-client.h"
#define LOCKD_SANLOCK_LV_NAME "lvmlock"
/* lockd_gl flags */
#define LDGL_SKIP_CACHE_VALIDATE 0x00000001
#define LDGL_UPDATE_NAMES 0x00000002
/* lockd_lv flags */
#define LDLV_MODE_NO_SH 0x00000001
#define LDLV_PERSISTENT 0x00000002
/* lvmlockd result flags */
#define LD_RF_NO_LOCKSPACES 0x00000001
#define LD_RF_NO_GL_LS 0x00000002
#define LD_RF_LOCAL_LS 0x00000004
#define LD_RF_DUP_GL_LS 0x00000008
#define LD_RF_INACTIVE_LS 0x00000010
#define LD_RF_ADD_LS_ERROR 0x00000020
/* lockd_state flags */
#define LDST_EX 0x00000001
#define LDST_SH 0x00000002
#define LDST_FAIL_REQUEST 0x00000004
#define LDST_FAIL_NOLS 0x00000008
#define LDST_FAIL_STARTING 0x00000010
#define LDST_FAIL_OTHER 0x00000020
#define LDST_FAIL (LDST_FAIL_REQUEST | LDST_FAIL_NOLS | LDST_FAIL_STARTING | LDST_FAIL_OTHER)
#ifdef LVMLOCKD_SUPPORT
/* lvmlockd connection and communication */
void lvmlockd_set_socket(const char *sock);
void lvmlockd_set_use(int use);
int lvmlockd_use(void);
void lvmlockd_init(struct cmd_context *cmd);
void lvmlockd_connect(void);
void lvmlockd_disconnect(void);
/* vgcreate/vgremove use init/free */
int lockd_init_vg(struct cmd_context *cmd, struct volume_group *vg, const char *lock_type);
int lockd_free_vg_before(struct cmd_context *cmd, struct volume_group *vg);
void lockd_free_vg_final(struct cmd_context *cmd, struct volume_group *vg);
/* vgrename */
int lockd_rename_vg_before(struct cmd_context *cmd, struct volume_group *vg);
int lockd_rename_vg_final(struct cmd_context *cmd, struct volume_group *vg, int success);
/* start and stop the lockspace for a vg */
int lockd_start_vg(struct cmd_context *cmd, struct volume_group *vg);
int lockd_stop_vg(struct cmd_context *cmd, struct volume_group *vg);
int lockd_start_wait(struct cmd_context *cmd);
/* locking */
int lockd_gl_create(struct cmd_context *cmd, const char *def_mode, const char *vg_lock_type);
int lockd_gl(struct cmd_context *cmd, const char *def_mode, uint32_t flags);
int lockd_vg(struct cmd_context *cmd, const char *vg_name, const char *def_mode,
uint32_t flags, uint32_t *lockd_state);
int lockd_vg_update(struct volume_group *vg);
int lockd_lv_name(struct cmd_context *cmd, struct volume_group *vg,
const char *lv_name, struct id *lv_id,
const char *lock_args, const char *def_mode, uint32_t flags);
int lockd_lv(struct cmd_context *cmd, struct logical_volume *lv,
const char *def_mode, uint32_t flags);
/* lvcreate/lvremove use init/free */
int lockd_init_lv(struct cmd_context *cmd, struct volume_group *vg, struct logical_volume *lv,
struct lvcreate_params *lp);
int lockd_init_lv_args(struct cmd_context *cmd, struct volume_group *vg,
struct logical_volume *lv, const char *lock_type, const char **lock_args);
int lockd_free_lv(struct cmd_context *cmd, struct volume_group *vg,
const char *lv_name, struct id *lv_id, const char *lock_args);
const char *lockd_running_lock_type(struct cmd_context *cmd);
int handle_sanlock_lv(struct cmd_context *cmd, struct volume_group *vg);
int lockd_lv_uses_lock(struct logical_volume *lv);
#else /* LVMLOCKD_SUPPORT */
static inline void lvmlockd_set_socket(const char *sock)
{
}
static inline void lvmlockd_set_use(int use)
{
}
static inline void lvmlockd_init(struct cmd_context *cmd)
{
}
static inline void lvmlockd_disconnect(void)
{
}
static inline void lvmlockd_connect(void)
{
}
static inline int lvmlockd_use(void)
{
return 0;
}
static inline int lockd_init_vg(struct cmd_context *cmd, struct volume_group *vg, const char *lock_type)
{
return 1;
}
static inline int lockd_free_vg_before(struct cmd_context *cmd, struct volume_group *vg)
{
return 1;
}
static inline void lockd_free_vg_final(struct cmd_context *cmd, struct volume_group *vg)
{
return;
}
static inline int lockd_rename_vg_before(struct cmd_context *cmd, struct volume_group *vg)
{
return 1;
}
static inline int lockd_rename_vg_final(struct cmd_context *cmd, struct volume_group *vg, int success)
{
return 1;
}
static inline int lockd_start_vg(struct cmd_context *cmd, struct volume_group *vg)
{
return 0;
}
static inline int lockd_stop_vg(struct cmd_context *cmd, struct volume_group *vg)
{
return 0;
}
static inline int lockd_start_wait(struct cmd_context *cmd)
{
return 0;
}
static inline int lockd_gl_create(struct cmd_context *cmd, const char *def_mode, const char *vg_lock_type)
{
return 1;
}
static inline int lockd_gl(struct cmd_context *cmd, const char *def_mode, uint32_t flags)
{
return 1;
}
static inline int lockd_vg(struct cmd_context *cmd, const char *vg_name, const char *def_mode,
uint32_t flags, uint32_t *lockd_state)
{
*lockd_state = 0;
return 1;
}
static inline int lockd_vg_update(struct volume_group *vg)
{
return 1;
}
static inline int lockd_lv_name(struct cmd_context *cmd, struct volume_group *vg,
const char *lv_name, struct id *lv_id,
const char *lock_args, const char *def_mode, uint32_t flags)
{
return 1;
}
static inline int lockd_lv(struct cmd_context *cmd, struct logical_volume *lv,
const char *def_mode, uint32_t flags)
{
return 1;
}
static inline int lockd_init_lv(struct cmd_context *cmd, struct volume_group *vg,
struct logical_volume *lv, struct lvcreate_params *lp)
{
return 0;
}
static inline int lockd_init_lv_args(struct cmd_context *cmd, struct volume_group *vg,
struct logical_volume *lv, const char *lock_type, const char **lock_args)
{
return 0;
}
static inline int lockd_free_lv(struct cmd_context *cmd, struct volume_group *vg,
const char *lv_name, struct id *lv_id, const char *lock_args)
{
return 0;
}
static inline const char *lockd_running_lock_type(struct cmd_context *cmd)
{
return NULL;
}
static inline int handle_sanlock_lv(struct cmd_context *cmd, struct volume_group *vg)
{
return 0;
}
static inline int lockd_lv_uses_lock(struct logical_volume *lv)
{
return 0;
}
#endif /* LVMLOCKD_SUPPORT */
#endif

View File

@ -20,6 +20,7 @@
#include "toolcontext.h"
#include "segtype.h"
#include "str_list.h"
#include "lvmlockd.h"
#include <time.h>
#include <sys/utsname.h>
@ -910,6 +911,19 @@ static int _lv_is_exclusive(struct logical_volume *lv)
int lv_active_change(struct cmd_context *cmd, struct logical_volume *lv,
enum activation_change activate, int needs_exclusive)
{
const char *ay_with_mode = NULL;
if (activate == CHANGE_ASY)
ay_with_mode = "sh";
if (activate == CHANGE_AEY)
ay_with_mode = "ex";
if (is_change_activating(activate) &&
!lockd_lv(cmd, lv, ay_with_mode, LDLV_PERSISTENT)) {
log_error("Failed to lock logical volume %s/%s", lv->vg->name, lv->name);
return 0;
}
switch (activate) {
case CHANGE_AN:
deactivate:
@ -962,6 +976,10 @@ exclusive:
return_0;
}
if (!is_change_activating(activate) &&
!lockd_lv(cmd, lv, "un", LDLV_PERSISTENT))
log_error("Failed to unlock logical volume %s/%s", lv->vg->name, lv->name);
return 1;
}
@ -1001,6 +1019,12 @@ char *lv_profile_dup(struct dm_pool *mem, const struct logical_volume *lv)
return dm_pool_strdup(mem, profile_name);
}
char *lv_lock_args_dup(struct dm_pool *mem, const struct logical_volume *lv)
{
const char *lock_args = lv->lock_args ? lv->lock_args : "";
return dm_pool_strdup(mem, lock_args);
}
/* For given LV find recursively the LV which holds lock for it */
const struct logical_volume *lv_lock_holder(const struct logical_volume *lv)
{

View File

@ -51,7 +51,9 @@ struct logical_volume {
struct dm_list segs_using_this_lv;
uint64_t timestamp;
unsigned new_lock_args:1;
const char *hostname;
const char *lock_args;
};
struct lv_with_info_and_seg_status;
@ -103,6 +105,7 @@ const struct logical_volume *lv_lock_holder(const struct logical_volume *lv);
const struct logical_volume *lv_ondisk(const struct logical_volume *lv);
struct profile *lv_config_profile(const struct logical_volume *lv);
char *lv_profile_dup(struct dm_pool *mem, const struct logical_volume *lv);
char *lv_lock_args_dup(struct dm_pool *mem, const struct logical_volume *lv);
int lv_mirror_image_in_sync(const struct logical_volume *lv);
int lv_raid_image_in_sync(const struct logical_volume *lv);
int lv_raid_healthy(const struct logical_volume *lv);

View File

@ -30,6 +30,7 @@
#include "lvm-exec.h"
#include "lvm-signal.h"
#include "memlock.h"
#include "lvmlockd.h"
typedef enum {
PREFERRED,
@ -4588,7 +4589,9 @@ static int _lvresize_check_lv(struct cmd_context *cmd, struct logical_volume *lv
return 0;
}
if (!lv_is_visible(lv) && !lv_is_thin_pool_metadata(lv)) {
/* FIXME: use a status flag instead of the name "lvmlock". */
if (!lv_is_visible(lv) && !lv_is_thin_pool_metadata(lv) && strcmp(lv->name, "lvmlock")) {
log_error("Can't resize internal logical volume %s", lv->name);
return 0;
}
@ -5238,6 +5241,13 @@ int lv_resize(struct cmd_context *cmd, struct logical_volume *lv,
return 0;
}
/*
* If the LV is locked from activation, this lock call is a no-op.
* Otherwise, this acquires a transient lock on the lv (not PERSISTENT).
*/
if (!lockd_lv(cmd, lv, "ex", 0))
return_0;
if (lp->sizeargs &&
!(lock_lv = _lvresize_volume(cmd, lv, lp, pvh)))
return_0;
@ -5586,6 +5596,7 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv,
int format1_reload_required = 0;
int visible;
struct logical_volume *pool_lv = NULL;
struct logical_volume *lock_lv = lv;
struct lv_segment *cache_seg = NULL;
int ask_discard;
struct lv_list *lvl;
@ -5632,14 +5643,19 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv,
log_error("Can't remove logical volume %s used by a pool.",
lv->name);
return 0;
} else if (lv_is_thin_volume(lv))
} else if (lv_is_thin_volume(lv)) {
pool_lv = first_seg(lv)->pool_lv;
lock_lv = pool_lv;
}
if (lv_is_locked(lv)) {
log_error("Can't remove locked LV %s", lv->name);
return 0;
}
if (!lockd_lv(cmd, lock_lv, "ex", LDLV_PERSISTENT))
return_0;
/* FIXME Ensure not referred to by another existing LVs */
ask_discard = find_config_tree_bool(cmd, devices_issue_discards_CFG, NULL);
@ -5814,6 +5830,9 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv,
backup(vg);
lockd_lv(cmd, lock_lv, "un", LDLV_PERSISTENT);
lockd_free_lv(cmd, vg, lv->name, &lv->lvid.id[1], lv->lock_args);
if (!suppress_remove_message && visible)
log_print_unless_silent("Logical volume \"%s\" successfully removed", lv->name);
@ -7201,6 +7220,14 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
lv->major, lv->minor);
}
/*
* The specific LV may not use a lock. lockd_init_lv() sets
* lv->lock_args to NULL if this LV does not use its own lock.
*/
if (!lockd_init_lv(vg->cmd, vg, lv, lp))
return_NULL;
dm_list_splice(&lv->tags, &lp->tags);
if (!lv_extend(lv, create_segtype,
@ -7515,6 +7542,8 @@ deactivate_and_revert_new_lv:
}
revert_new_lv:
lockd_free_lv(vg->cmd, vg, lp->lv_name, &lv->lvid.id[1], lp->lock_args);
/* FIXME Better to revert to backup of metadata? */
if (!lv_remove(lv) || !vg_write(vg) || !vg_commit(vg))
log_error("Manual intervention may be required to remove "

View File

@ -101,6 +101,7 @@
#define THIN_POOL_DATA UINT64_C(0x0000004000000000) /* LV - Internal use only */
#define THIN_POOL_METADATA UINT64_C(0x0000008000000000) /* LV - Internal use only */
#define POOL_METADATA_SPARE UINT64_C(0x0000010000000000) /* LV - Internal use only */
#define LOCKD_SANLOCK_LV UINT64_C(0x0000020000000000) /* LV - Internal use only */
#define LV_WRITEMOSTLY UINT64_C(0x0000020000000000) /* LV (RAID1) */
@ -228,6 +229,7 @@
#define lv_is_pool_data(lv) (((lv)->status & (CACHE_POOL_DATA | THIN_POOL_DATA)) ? 1 : 0)
#define lv_is_pool_metadata(lv) (((lv)->status & (CACHE_POOL_METADATA | THIN_POOL_METADATA)) ? 1 : 0)
#define lv_is_pool_metadata_spare(lv) (((lv)->status & POOL_METADATA_SPARE) ? 1 : 0)
#define lv_is_lockd_sanlock_lv(lv) (((lv)->status & LOCKD_SANLOCK_LV) ? 1 : 0)
#define lv_is_rlog(lv) (((lv)->status & REPLICATOR_LOG) ? 1 : 0)
@ -262,6 +264,14 @@ typedef enum {
THIN_DISCARDS_PASSDOWN,
} thin_discards_t;
typedef enum {
LOCK_TYPE_INVALID = -1,
LOCK_TYPE_NONE = 0,
LOCK_TYPE_CLVM = 1,
LOCK_TYPE_DLM = 2,
LOCK_TYPE_SANLOCK = 3,
} lock_type_t;
struct cmd_context;
struct format_handler;
struct labeller;
@ -640,9 +650,9 @@ int lv_resize(struct cmd_context *cmd, struct logical_volume *lv,
* Return a handle to VG metadata.
*/
struct volume_group *vg_read(struct cmd_context *cmd, const char *vg_name,
const char *vgid, uint32_t flags);
const char *vgid, uint32_t flags, uint32_t lockd_state);
struct volume_group *vg_read_for_update(struct cmd_context *cmd, const char *vg_name,
const char *vgid, uint32_t flags);
const char *vgid, uint32_t flags, uint32_t lockd_state);
/*
* Test validity of a VG handle.
@ -685,6 +695,7 @@ struct volume_group *vg_create(struct cmd_context *cmd, const char *vg_name);
int vg_remove_mdas(struct volume_group *vg);
int vg_remove_check(struct volume_group *vg);
void vg_remove_pvs(struct volume_group *vg);
int vg_remove_direct(struct volume_group *vg);
int vg_remove(struct volume_group *vg);
int vg_rename(struct cmd_context *cmd, struct volume_group *vg,
const char *new_name);
@ -863,12 +874,15 @@ struct lvcreate_params {
#define THIN_CHUNK_SIZE_CALC_METHOD_GENERIC 0x01
#define THIN_CHUNK_SIZE_CALC_METHOD_PERFORMANCE 0x02
int thin_chunk_size_calc_policy;
unsigned needs_lockd_init : 1;
const char *vg_name; /* only-used when VG is not yet opened (in /tools) */
const char *lv_name; /* all */
const char *origin_name; /* snap */
const char *pool_name; /* thin */
const char *lock_args;
/* Keep args given by the user on command line */
/* FIXME: create some more universal solution here */
#define PASS_ARG_CHUNK_SIZE 0x01
@ -1211,6 +1225,8 @@ struct vgcreate_params {
int clustered; /* FIXME: put this into a 'status' variable instead? */
uint32_t vgmetadatacopies;
const char *system_id;
const char *lock_type;
const char *lock_args;
};
int validate_major_minor(const struct cmd_context *cmd,
@ -1222,4 +1238,7 @@ int vgcreate_params_validate(struct cmd_context *cmd,
int validate_vg_rename_params(struct cmd_context *cmd,
const char *vg_name_old,
const char *vg_name_new);
int is_lockd_type(const char *lock_type);
#endif

View File

@ -31,6 +31,7 @@
#include "locking.h"
#include "archiver.h"
#include "defaults.h"
#include "lvmlockd.h"
#include <math.h>
#include <sys/param.h>
@ -557,20 +558,14 @@ void vg_remove_pvs(struct volume_group *vg)
}
}
int vg_remove(struct volume_group *vg)
int vg_remove_direct(struct volume_group *vg)
{
struct physical_volume *pv;
struct pv_list *pvl;
int ret = 1;
if (!lock_vol(vg->cmd, VG_ORPHANS, LCK_VG_WRITE, NULL)) {
log_error("Can't get lock for orphan PVs");
return 0;
}
if (!vg_remove_mdas(vg)) {
log_error("vg_remove_mdas %s failed", vg->name);
unlock_vg(vg->cmd, VG_ORPHANS);
return 0;
}
@ -604,6 +599,8 @@ int vg_remove(struct volume_group *vg)
if (!lvmetad_vg_remove(vg))
stack;
lockd_vg_update(vg);
if (!backup_remove(vg->cmd, vg->name))
stack;
@ -612,6 +609,20 @@ int vg_remove(struct volume_group *vg)
else
log_error("Volume group \"%s\" not properly removed", vg->name);
return ret;
}
int vg_remove(struct volume_group *vg)
{
int ret;
if (!lock_vol(vg->cmd, VG_ORPHANS, LCK_VG_WRITE, NULL)) {
log_error("Can't get lock for orphan PVs");
return 0;
}
ret = vg_remove_direct(vg);
unlock_vg(vg->cmd, VG_ORPHANS);
return ret;
}
@ -2428,6 +2439,7 @@ struct validate_hash {
struct dm_hash_table *lvname;
struct dm_hash_table *lvid;
struct dm_hash_table *pvid;
struct dm_hash_table *lv_lock_args;
};
/*
@ -2786,6 +2798,87 @@ int vg_validate(struct volume_group *vg)
if (vg_max_lv_reached(vg))
stack;
if (!(vhash.lv_lock_args = dm_hash_create(lv_count))) {
log_error("Failed to allocate lv_lock_args hash");
r = 0;
goto out;
}
if (is_lockd_type(vg->lock_type)) {
if (!vg->lock_args) {
log_error(INTERNAL_ERROR "VG %s with lock_type %s without lock_args",
vg->name, vg->lock_type);
r = 0;
}
if (vg_is_clustered(vg)) {
log_error(INTERNAL_ERROR "VG %s with lock_type %s is clustered",
vg->name, vg->lock_type);
r = 0;
}
if (vg->system_id && vg->system_id[0]) {
log_error(INTERNAL_ERROR "VG %s with lock_type %s has system_id %s",
vg->name, vg->lock_type, vg->system_id);
r = 0;
}
if (strcmp(vg->lock_type, "sanlock") && strcmp(vg->lock_type, "dlm")) {
log_error(INTERNAL_ERROR "VG %s has unknown lock_type %s",
vg->name, vg->lock_type);
r = 0;
}
} else {
if (vg->lock_args) {
log_error(INTERNAL_ERROR "VG %s has lock_args %s without lock_type",
vg->name, vg->lock_args);
r = 0;
}
}
dm_list_iterate_items(lvl, &vg->lvs) {
if (is_lockd_type(vg->lock_type)) {
if (lockd_lv_uses_lock(lvl->lv)) {
if (vg->skip_validate_lock_args) {
continue;
} else if (!lvl->lv->lock_args) {
log_error(INTERNAL_ERROR "LV %s/%s missing lock_args",
vg->name, lvl->lv->name);
r = 0;
} else if (!strcmp(vg->lock_type, "sanlock")) {
if (dm_hash_lookup(vhash.lv_lock_args, lvl->lv->lock_args)) {
log_error(INTERNAL_ERROR "LV %s/%s has duplicate lock_args %s.",
vg->name, lvl->lv->name, lvl->lv->lock_args);
r = 0;
}
if (!dm_hash_insert(vhash.lv_lock_args, lvl->lv->lock_args, lvl)) {
log_error("Failed to hash lvname.");
r = 0;
}
} else if (!strcmp(vg->lock_type, "dlm") && strcmp(lvl->lv->lock_args, "dlm")) {
log_error(INTERNAL_ERROR "LV %s/%s bad dlm lock_args %s",
vg->name, lvl->lv->name, lvl->lv->lock_args);
r = 0;
}
} else {
if (lvl->lv->lock_args) {
log_error(INTERNAL_ERROR "LV %s/%s shouldn't have lock_args",
vg->name, lvl->lv->name);
r = 0;
}
}
} else {
if (lvl->lv->lock_args) {
log_error(INTERNAL_ERROR "LV %s/%s with no lock_type has lock_args %s",
vg->name, lvl->lv->name, lvl->lv->lock_args);
r = 0;
}
}
}
out:
if (vhash.lvid)
dm_hash_destroy(vhash.lvid);
@ -2793,6 +2886,8 @@ out:
dm_hash_destroy(vhash.lvname);
if (vhash.pvid)
dm_hash_destroy(vhash.pvid);
if (vhash.lv_lock_args)
dm_hash_destroy(vhash.lv_lock_args);
return r;
}
@ -2806,8 +2901,19 @@ int vg_write(struct volume_group *vg)
struct dm_list *mdah;
struct pv_to_create *pv_to_create;
struct metadata_area *mda;
struct lv_list *lvl;
int revert = 0, wrote = 0;
dm_list_iterate_items(lvl, &vg->lvs) {
if (lvl->lv->lock_args && !strcmp(lvl->lv->lock_args, "pending")) {
if (!lockd_init_lv_args(vg->cmd, vg, lvl->lv, vg->lock_type, &lvl->lv->lock_args)) {
log_error("Cannot allocate lock for new LV.");
return 0;
}
lvl->lv->new_lock_args = 1;
}
}
if (!vg_validate(vg))
return_0;
@ -2974,6 +3080,8 @@ int vg_commit(struct volume_group *vg)
cache_updated = _vg_commit_mdas(vg);
lockd_vg_update(vg);
if (cache_updated) {
/* Instruct remote nodes to upgrade cached metadata. */
if (!remote_commit_cached_metadata(vg))
@ -3007,6 +3115,14 @@ int vg_commit(struct volume_group *vg)
void vg_revert(struct volume_group *vg)
{
struct metadata_area *mda;
struct lv_list *lvl;
dm_list_iterate_items(lvl, &vg->lvs) {
if (lvl->lv->new_lock_args) {
lockd_free_lv(vg->cmd, vg, lvl->lv->name, &lvl->lv->lvid.id[1], lvl->lv->lock_args);
lvl->lv->new_lock_args = 0;
}
}
release_vg(vg->vg_precommitted); /* VG is no longer needed */
vg->vg_precommitted = NULL;
@ -3821,6 +3937,16 @@ static struct volume_group *_vg_read_by_vgid(struct cmd_context *cmd,
release_vg(vg);
}
/*
* When using lvmlockd we should never reach this point.
* The VG is locked, then vg_read() is done, which gets
* the latest VG from lvmetad, or disk if lvmetad has
* been invalidated. When we get here the VG should
* always be cached and returned above.
*/
if (lvmlockd_use())
log_error(INTERNAL_ERROR "vg_read_by_vgid failed with lvmlockd");
/* Mustn't scan if memory locked: ensure cache gets pre-populated! */
if (critical_section())
return_NULL;
@ -4509,20 +4635,71 @@ static int _access_vg_clustered(struct cmd_context *cmd, struct volume_group *vg
return 1;
}
static int _access_vg_lock_type(struct cmd_context *cmd, struct volume_group *vg)
static int _access_vg_lock_type(struct cmd_context *cmd, struct volume_group *vg,
uint32_t lockd_state)
{
if (!is_real_vg(vg->name))
return 1;
if (cmd->lockd_vg_disable)
return 1;
/*
* Until lock_type support is added, reject any VG that has a lock_type.
* Local VG requires no lock from lvmlockd.
*/
if (vg->lock_type && vg->lock_type[0] && strcmp(vg->lock_type, "none")) {
log_error("Cannot access VG %s with unsupported lock_type %s.",
if (!is_lockd_type(vg->lock_type))
return 1;
/*
* When lvmlockd is not used, lockd VGs are ignored by lvm
* and cannot be used, with two exceptions:
*
* . The --shared option allows them to be revealed with
* reporting/display commands.
*
* . If a command asks to operate on one specifically
* by name, then an error is printed.
*/
if (!lvmlockd_use()) {
/*
* Some reporting/display commands have the --shared option
* (like --foreign) to allow them to reveal lockd VGs that
* are otherwise ignored. The --shared option must only be
* permitted in commands that read the VG for report or display,
* not any that write the VG or activate LVs.
*/
if (cmd->include_shared_vgs)
return 1;
/*
* Some commands want the error printed by vg_read, others by ignore_vg.
* Those using ignore_vg may choose to skip the error.
*/
if (cmd->vg_read_print_access_error) {
log_error("Cannot access VG %s with lock type %s that requires lvmlockd.",
vg->name, vg->lock_type);
}
return 0;
}
/*
* The lock request from lvmlockd failed. If the lock was ex,
* we cannot continue. If the lock was sh, we could also fail
* to continue but since the lock was sh, it means the VG is
* only being read, and it doesn't hurt to allow reading with
* no lock.
*/
if (lockd_state & LDST_FAIL) {
if (lockd_state & LDST_EX) {
log_error("Cannot access VG %s due to failed lock.", vg->name);
return 0;
} else {
log_warn("Reading VG %s without a lock.", vg->name);
return 1;
}
}
return 1;
}
@ -4582,18 +4759,16 @@ static int _access_vg_systemid(struct cmd_context *cmd, struct volume_group *vg)
}
/*
* Some commands always produce an error when accessing foreign VG.
* Some commands want the error printed by vg_read, others by ignore_vg.
* Those using ignore_vg may choose to skip the error.
*/
if (cmd->error_foreign_vgs) {
if (cmd->vg_read_print_access_error) {
log_error("Cannot access VG %s with system ID %s with local system ID %s.",
vg->name, vg->system_id, cmd->system_id);
return 0;
}
/*
* When include_foreign_vgs is 0 and error_foreign_vgs is 0,
* the result is to silently ignore foreign vgs.
*/
/* Silently ignore foreign vgs. */
return 0;
}
@ -4601,7 +4776,8 @@ static int _access_vg_systemid(struct cmd_context *cmd, struct volume_group *vg)
/*
* FIXME: move _vg_bad_status_bits() checks in here.
*/
static int _vg_access_permitted(struct cmd_context *cmd, struct volume_group *vg, uint32_t *failure)
static int _vg_access_permitted(struct cmd_context *cmd, struct volume_group *vg,
uint32_t lockd_state, uint32_t *failure)
{
if (!is_real_vg(vg->name)) {
/* Disallow use of LVM1 orphans when a host system ID is set. */
@ -4617,7 +4793,7 @@ static int _vg_access_permitted(struct cmd_context *cmd, struct volume_group *vg
return 0;
}
if (!_access_vg_lock_type(cmd, vg)) {
if (!_access_vg_lock_type(cmd, vg, lockd_state)) {
*failure |= FAILED_LOCK_TYPE;
return 0;
}
@ -4643,7 +4819,8 @@ static int _vg_access_permitted(struct cmd_context *cmd, struct volume_group *vg
*/
static struct volume_group *_vg_lock_and_read(struct cmd_context *cmd, const char *vg_name,
const char *vgid, uint32_t lock_flags,
uint64_t status_flags, uint32_t misc_flags)
uint64_t status_flags, uint32_t misc_flags,
uint32_t lockd_state)
{
struct volume_group *vg = NULL;
int consistent = 1;
@ -4689,7 +4866,7 @@ static struct volume_group *_vg_lock_and_read(struct cmd_context *cmd, const cha
goto bad;
}
if (!_vg_access_permitted(cmd, vg, &failure))
if (!_vg_access_permitted(cmd, vg, lockd_state, &failure))
goto bad;
/* consistent == 0 when VG is not found, but failed == FAILED_NOTFOUND */
@ -4765,7 +4942,7 @@ bad_no_unlock:
* *consistent = 1.
*/
struct volume_group *vg_read(struct cmd_context *cmd, const char *vg_name,
const char *vgid, uint32_t flags)
const char *vgid, uint32_t flags, uint32_t lockd_state)
{
uint64_t status = UINT64_C(0);
uint32_t lock_flags = LCK_VG_READ;
@ -4778,7 +4955,7 @@ struct volume_group *vg_read(struct cmd_context *cmd, const char *vg_name,
if (flags & READ_ALLOW_EXPORTED)
status &= ~EXPORTED_VG;
return _vg_lock_and_read(cmd, vg_name, vgid, lock_flags, status, flags);
return _vg_lock_and_read(cmd, vg_name, vgid, lock_flags, status, flags, lockd_state);
}
/*
@ -4787,9 +4964,9 @@ struct volume_group *vg_read(struct cmd_context *cmd, const char *vg_name,
* request the new metadata to be written and committed).
*/
struct volume_group *vg_read_for_update(struct cmd_context *cmd, const char *vg_name,
const char *vgid, uint32_t flags)
const char *vgid, uint32_t flags, uint32_t lockd_state)
{
return vg_read(cmd, vg_name, vgid, flags | READ_FOR_UPDATE);
return vg_read(cmd, vg_name, vgid, flags | READ_FOR_UPDATE, lockd_state);
}
/*
@ -5221,3 +5398,21 @@ const struct logical_volume *lv_ondisk(const struct logical_volume *lv)
return lvl->lv;
}
/*
* Check if a lock_type uses lvmlockd.
* If not (none, clvm), return 0.
* If so (dlm, sanlock), return 1.
*/
int is_lockd_type(const char *lock_type)
{
if (!lock_type)
return 0;
if (!strcmp(lock_type, "dlm"))
return 1;
if (!strcmp(lock_type, "sanlock"))
return 1;
return 0;
}

View File

@ -21,6 +21,7 @@
#include "activate.h"
#include "lv_alloc.h"
#include "lvm-string.h"
#include "lvmlockd.h"
static int _lv_is_raid_with_tracking(const struct logical_volume *lv,
struct logical_volume **tracking)
@ -1087,6 +1088,12 @@ int lv_raid_split(struct logical_volume *lv, const char *split_name,
dm_list_init(&removal_list);
dm_list_init(&data_list);
if (is_lockd_type(lv->vg->lock_type)) {
log_error("Splitting raid image is not allowed with lock_type %s",
lv->vg->lock_type);
return 0;
}
if ((old_count - new_count) != 1) {
log_error("Unable to split more than one image from %s/%s",
lv->vg->name, lv->name);

View File

@ -566,7 +566,7 @@ int cmd_vg_read(struct cmd_context *cmd, struct dm_list *cmd_vgs)
/* Iterate through alphabeticaly ordered cmd_vg list */
dm_list_iterate_items(cvl, cmd_vgs) {
cvl->vg = vg_read(cmd, cvl->vg_name, cvl->vgid, cvl->flags);
cvl->vg = vg_read(cmd, cvl->vg_name, cvl->vgid, cvl->flags, 0);
if (vg_read_error(cvl->vg)) {
log_debug_metadata("Failed to vg_read %s", cvl->vg_name);
return 0;
@ -644,7 +644,7 @@ int lv_read_replicator_vgs(const struct logical_volume *lv)
dm_list_iterate_items(rsite, &first_seg(lv)->replicator->rsites) {
if (!rsite->vg_name)
continue;
vg = vg_read(lv->vg->cmd, rsite->vg_name, 0, 0); // READ_WITHOUT_LOCK
vg = vg_read(lv->vg->cmd, rsite->vg_name, 0, 0, 0); // READ_WITHOUT_LOCK
if (vg_read_error(vg)) {
log_error("Unable to read volume group %s",
rsite->vg_name);

View File

@ -20,6 +20,7 @@
#include "toolcontext.h"
#include "lvmcache.h"
#include "archiver.h"
#include "lvmlockd.h"
struct volume_group *alloc_vg(const char *pool_name, struct cmd_context *cmd,
const char *vg_name)
@ -134,6 +135,16 @@ char *vg_system_id_dup(const struct volume_group *vg)
return dm_pool_strdup(vg->vgmem, vg->system_id ? : vg->lvm1_system_id ? : "");
}
char *vg_lock_type_dup(const struct volume_group *vg)
{
return dm_pool_strdup(vg->vgmem, vg->lock_type ? : vg->lock_type ? : "");
}
char *vg_lock_args_dup(const struct volume_group *vg)
{
return dm_pool_strdup(vg->vgmem, vg->lock_args ? : vg->lock_args ? : "");
}
char *vg_uuid_dup(const struct volume_group *vg)
{
return id_format_and_copy(vg->vgmem, &vg->id);
@ -637,6 +648,19 @@ int vg_set_system_id(struct volume_group *vg, const char *system_id)
return 1;
}
int vg_set_lock_type(struct volume_group *vg, const char *lock_type)
{
if (!lock_type)
lock_type = "none";
if (!(vg->lock_type = dm_pool_strdup(vg->vgmem, lock_type))) {
log_error("vg_set_lock_type %s no mem", lock_type);
return 0;
}
return 1;
}
char *vg_attr_dup(struct dm_pool *mem, const struct volume_group *vg)
{
char *repstr;
@ -651,7 +675,14 @@ char *vg_attr_dup(struct dm_pool *mem, const struct volume_group *vg)
repstr[2] = (vg_is_exported(vg)) ? 'x' : '-';
repstr[3] = (vg_missing_pv_count(vg)) ? 'p' : '-';
repstr[4] = alloc_policy_char(vg->alloc);
repstr[5] = (vg_is_clustered(vg)) ? 'c' : '-';
if (vg_is_clustered(vg))
repstr[5] = 'c';
else if (is_lockd_type(vg->lock_type))
repstr[5] = 's';
else
repstr[5] = '-';
return repstr;
}
@ -706,7 +737,7 @@ int vgreduce_single(struct cmd_context *cmd, struct volume_group *vg,
vg->extent_count -= pv_pe_count(pv);
orphan_vg = vg_read_for_update(cmd, vg->fid->fmt->orphan_vg_name,
NULL, 0);
NULL, 0, 0);
if (vg_read_error(orphan_vg))
goto bad;

View File

@ -49,6 +49,7 @@ struct volume_group {
struct dm_list *cmd_vgs;/* List of wanted/locked and opened VGs */
uint32_t cmd_missing_vgs;/* Flag marks missing VG */
uint32_t seqno; /* Metadata sequence number */
unsigned skip_validate_lock_args : 1;
/*
* The parsed on-disk copy of this VG; is NULL if this is the on-disk
@ -71,6 +72,7 @@ struct volume_group {
const char *system_id;
char *lvm1_system_id;
const char *lock_type;
const char *lock_args;
uint32_t extent_size;
uint32_t extent_count;
@ -151,6 +153,7 @@ struct volume_group {
struct dm_hash_table *hostnames; /* map of creation hostnames */
struct logical_volume *pool_metadata_spare_lv; /* one per VG */
struct logical_volume *sanlock_lv; /* one per VG */
};
struct volume_group *alloc_vg(const char *pool_name, struct cmd_context *cmd,
@ -166,11 +169,14 @@ void free_orphan_vg(struct volume_group *vg);
char *vg_fmt_dup(const struct volume_group *vg);
char *vg_name_dup(const struct volume_group *vg);
char *vg_system_id_dup(const struct volume_group *vg);
char *vg_lock_type_dup(const struct volume_group *vg);
char *vg_lock_args_dup(const struct volume_group *vg);
uint32_t vg_seqno(const struct volume_group *vg);
uint64_t vg_status(const struct volume_group *vg);
int vg_set_alloc_policy(struct volume_group *vg, alloc_policy_t alloc);
int vg_set_clustered(struct volume_group *vg, int clustered);
int vg_set_system_id(struct volume_group *vg, const char *system_id);
int vg_set_lock_type(struct volume_group *vg, const char *lock_type);
uint64_t vg_size(const struct volume_group *vg);
uint64_t vg_free(const struct volume_group *vg);
uint64_t vg_extent_size(const struct volume_group *vg);

View File

@ -105,6 +105,9 @@
/* Use lvmetad by default. */
#undef DEFAULT_USE_LVMETAD
/* Use lvmlockd by default. */
#undef DEFAULT_USE_LVMLOCKD
/* Use lvmpolld by default. */
#undef DEFAULT_USE_LVMPOLLD
@ -534,6 +537,12 @@
/* Define to 1 to include code that uses lvmetad. */
#undef LVMETAD_SUPPORT
/* Path to lvmlockd pidfile. */
#undef LVMLOCKD_PIDFILE
/* Define to 1 to include code that uses lvmlockd. */
#undef LVMLOCKD_SUPPORT
/* Path to lvmpolld pidfile. */
#undef LVMPOLLD_PIDFILE

View File

@ -84,7 +84,8 @@ FIELD(LVS, lv, STR, "Meta", lvid, 4, metadatalv, metadata_lv, "For thin and cach
FIELD(LVS, lv, STR, "Pool", lvid, 4, poollv, pool_lv, "For thin volumes, the thin pool LV for this volume.", 0)
FIELD(LVS, lv, STR_LIST, "LV Tags", tags, 7, tags, lv_tags, "Tags, if any.", 0)
FIELD(LVS, lv, STR, "LProfile", lvid, 8, lvprofile, lv_profile, "Configuration profile attached to this LV.", 0)
FIELD(LVS, lv, TIM, "Time", lvid, 26, lvtime, lv_time, "Creation time of the LV, if known", 0)
FIELD(LVS, lv, STR, "Lock Args", lvid, 9, lvlockargs, lv_lockargs, "Lock args of the LV used by lvmlockd.", 0)
FIELD(LVS, lv, STR, "Time", lvid, 26, lvtime, lv_time, "Creation time of the LV, if known", 0)
FIELD(LVS, lv, STR, "Host", lvid, 10, lvhost, lv_host, "Creation host of the LV, if known.", 0)
FIELD(LVS, lv, STR_LIST, "Modules", lvid, 7, modules, lv_modules, "Kernel device-mapper modules required for this LV.", 0)
@ -143,6 +144,8 @@ FIELD(VGS, vg, SIZ, "VSize", cmd, 5, vgsize, vg_size, "Total size of VG in curre
FIELD(VGS, vg, SIZ, "VFree", cmd, 5, vgfree, vg_free, "Total amount of free space in current units.", 0)
FIELD(VGS, vg, STR, "SYS ID", cmd, 6, vgsystemid, vg_sysid, "System ID of the VG indicating which host owns it.", 0)
FIELD(VGS, vg, STR, "System ID", cmd, 9, vgsystemid, vg_systemid, "System ID of the VG indicating which host owns it.", 0)
FIELD(VGS, vg, STR, "Lock Type", cmd, 9, vglocktype, vg_locktype, "Lock type of the VG used by lvmlockd.", 0)
FIELD(VGS, vg, STR, "Lock Args", cmd, 9, vglockargs, vg_lockargs, "Lock args of the VG used by lvmlockd.", 0)
FIELD(VGS, vg, SIZ, "Ext", extent_size, 3, size32, vg_extent_size, "Size of Physical Extents in current units.", 0)
FIELD(VGS, vg, NUM, "#Ext", extent_count, 4, uint32, vg_extent_count, "Total number of Physical Extents.", 0)
FIELD(VGS, vg, NUM, "Free", free_count, 4, uint32, vg_free_count, "Total number of unallocated Physical Extents.", 0)

View File

@ -350,6 +350,8 @@ GET_LV_STR_PROPERTY_FN(lv_active, lv_active_dup(lv->vg->vgmem, lv))
#define _lv_active_set prop_not_implemented_set
GET_LV_STR_PROPERTY_FN(lv_profile, lv_profile_dup(lv->vg->vgmem, lv))
#define _lv_profile_set prop_not_implemented_set
GET_LV_STR_PROPERTY_FN(lv_lockargs, lv_lock_args_dup(lv->vg->vgmem, lv))
#define _lv_lockargs_set prop_not_implemented_set
/* VG */
GET_VG_STR_PROPERTY_FN(vg_fmt, vg_fmt_dup(vg))
@ -368,6 +370,10 @@ GET_VG_STR_PROPERTY_FN(vg_sysid, vg_system_id_dup(vg))
#define _vg_sysid_set prop_not_implemented_set
GET_VG_STR_PROPERTY_FN(vg_systemid, vg_system_id_dup(vg))
#define _vg_systemid_set prop_not_implemented_set
GET_VG_STR_PROPERTY_FN(vg_locktype, vg_lock_type_dup(vg))
#define _vg_locktype_set prop_not_implemented_set
GET_VG_STR_PROPERTY_FN(vg_lockargs, vg_lock_args_dup(vg))
#define _vg_lockargs_set prop_not_implemented_set
GET_VG_NUM_PROPERTY_FN(vg_extent_size, (SECTOR_SIZE * vg->extent_size))
#define _vg_extent_size_set prop_not_implemented_set
GET_VG_NUM_PROPERTY_FN(vg_extent_count, vg->extent_count)

View File

@ -377,6 +377,16 @@ static int _lvprofile_disp(struct dm_report *rh, struct dm_pool *mem,
return _field_set_value(field, "", NULL);
}
static int _lvlockargs_disp(struct dm_report *rh, struct dm_pool *mem,
struct dm_report_field *field,
const void *data, void *private)
{
const struct logical_volume *lv = (const struct logical_volume *) data;
const char *repstr = lv->lock_args ? lv->lock_args : "";
return _string_disp(rh, mem, field, &repstr, private);
}
static int _vgfmt_disp(struct dm_report *rh, struct dm_pool *mem,
struct dm_report_field *field,
const void *data, void *private)
@ -1108,6 +1118,26 @@ static int _vgsystemid_disp(struct dm_report *rh, struct dm_pool *mem,
return _string_disp(rh, mem, field, &repstr, private);
}
static int _vglocktype_disp(struct dm_report *rh, struct dm_pool *mem,
struct dm_report_field *field,
const void *data, void *private)
{
const struct volume_group *vg = (const struct volume_group *) data;
const char *repstr = vg->lock_type ? vg->lock_type : "";
return _string_disp(rh, mem, field, &repstr, private);
}
static int _vglockargs_disp(struct dm_report *rh, struct dm_pool *mem,
struct dm_report_field *field,
const void *data, void *private)
{
const struct volume_group *vg = (const struct volume_group *) data;
const char *repstr = vg->lock_args ? vg->lock_args : "";
return _string_disp(rh, mem, field, &repstr, private);
}
static int _uuid_disp(struct dm_report *rh __attribute__((unused)), struct dm_pool *mem,
struct dm_report_field *field,
const void *data, void *private __attribute__((unused)))

View File

@ -21,7 +21,6 @@
#include <string.h>
#include <stdio.h>
#include <unistd.h>
#include <assert.h>
#include <errno.h> // ENOMEM
daemon_handle daemon_open(daemon_info i)
@ -100,7 +99,13 @@ daemon_reply daemon_send(daemon_handle h, daemon_request rq)
{
struct buffer buffer;
daemon_reply reply = { 0 };
assert(h.socket_fd >= 0);
if (h.socket_fd < 0) {
log_error(INTERNAL_ERROR "Daemon send: socket fd cannot be negative %d", h.socket_fd);
reply.error = EINVAL;
return reply;
}
buffer = rq.buffer;
if (!buffer.mem)
@ -109,7 +114,12 @@ daemon_reply daemon_send(daemon_handle h, daemon_request rq)
return reply;
}
assert(buffer.mem);
if (!buffer.mem) {
log_error(INTERNAL_ERROR "Daemon send: no memory available");
reply.error = ENOMEM;
return reply;
}
if (!buffer_write(h.socket_fd, &buffer))
reply.error = errno;

View File

@ -1,7 +1,6 @@
#include "daemon-server.h"
#include "daemon-log.h"
#include <syslog.h>
#include <assert.h>
struct backend {
int id;
@ -129,7 +128,9 @@ void daemon_log_multi(log_state *s, int type, const char *prefix, const char *ms
void daemon_log_enable(log_state *s, int outlet, int type, int enable)
{
assert(type < 32);
if (type >= 32)
return;
if (enable)
s->log_config[type] |= outlet;
else

View File

@ -218,7 +218,7 @@ static vg_t _lvm_vg_open(lvm_t libh, const char *vgname, const char *mode,
return NULL;
}
vg = vg_read((struct cmd_context *)libh, vgname, NULL, internal_flags);
vg = vg_read((struct cmd_context *)libh, vgname, NULL, internal_flags, 0);
if (vg_read_error(vg)) {
/* FIXME: use log_errno either here in inside vg_read */
release_vg(vg);

View File

@ -46,6 +46,12 @@ else
LVMPOLLD =
endif
ifeq ("@BUILD_LVMLOCKD@", "yes")
LVMLOCKD = lvmlockd.8
else
LVMLOCKD =
endif
MAN5=lvm.conf.5
MAN7=lvmsystemid.7
MAN8=lvm-config.8 lvm-dumpconfig.8 lvm-lvpoll.8 \
@ -56,7 +62,8 @@ MAN8=lvm-config.8 lvm-dumpconfig.8 lvm-lvpoll.8 \
pvresize.8 pvs.8 pvscan.8 vgcfgbackup.8 vgcfgrestore.8 vgchange.8 \
vgck.8 vgcreate.8 vgconvert.8 vgdisplay.8 vgexport.8 vgextend.8 \
vgimport.8 vgimportclone.8 vgmerge.8 vgmknodes.8 vgreduce.8 vgremove.8 \
vgrename.8 vgs.8 vgscan.8 vgsplit.8 $(FSADMMAN) $(LVMETAD) $(LVMPOLLD)
vgrename.8 vgs.8 vgscan.8 vgsplit.8 $(FSADMMAN) $(LVMETAD) $(LVMPOLLD) \
$(LVMLOCKD)
ifneq ("@CLVMD@", "none")
MAN8CLUSTER=clvmd.8

755
man/lvmlockd.8.in Normal file
View File

@ -0,0 +1,755 @@
.TH "LVMLOCKD" "8" "LVM TOOLS #VERSION#" "Red Hat, Inc" "\""
.SH NAME
lvmlockd \(em LVM locking daemon
.SH DESCRIPTION
LVM commands use lvmlockd to coordinate access to shared storage.
.br
When LVM is used on devices shared by multiple hosts, locks will:
.IP \[bu] 2
coordinate reading and writing of LVM metadata
.IP \[bu] 2
validate caching of LVM metadata
.IP \[bu] 2
prevent concurrent activation of logical volumes
.P
lvmlockd uses an external lock manager to perform basic locking.
.br
Lock manager (lock type) options are:
.IP \[bu] 2
sanlock: places locks on disk within LVM storage.
.IP \[bu] 2
dlm: uses network communication and a cluster manager.
.P
.SH OPTIONS
lvmlockd [options]
For default settings, see lvmlockd -h.
.B --help | -h
Show this help information.
.B --version | -V
Show version of lvmlockd.
.B --test | -T
Test mode, do not call lock manager.
.B --foreground | -f
Don't fork.
.B --daemon-debug | -D
Don't fork and print debugging to stdout.
.B --pid-file | -p
.I path
Set path to the pid file.
.B --socket-path | -s
.I path
Set path to the socket to listen on.
.B --syslog-priority | -S err|warning|debug
Write log messages from this level up to syslog.
.B --gl-type | -g
.I str
Set global lock type to be sanlock|dlm.
.B --host-id | -i
.I num
Set the local sanlock host id.
.B --host-id-file | -F
.I path
A file containing the local sanlock host_id.
.B --adopt | A 0|1
Adopt locks from a previous instance of lvmlockd.
.SH USAGE
.SS Initial set up
Using LVM with lvmlockd for the first time includes some one-time set up
steps:
.SS 1. choose a lock manager
.I dlm
.br
If dlm (or corosync) are already being used by other cluster
software, then select dlm. dlm uses corosync which requires additional
configuration beyond the scope of this document. See corosync and dlm
documentation for instructions on configuration, setup and usage.
.I sanlock
.br
Choose sanlock if dlm/corosync are not otherwise required.
sanlock does not depend on any clustering software or configuration.
.SS 2. configure hosts to use lvmlockd
On all hosts running lvmlockd, configure lvm.conf:
.nf
locking_type = 1
use_lvmlockd = 1
use_lvmetad = 1
.fi
.I sanlock
.br
Assign each host a unique host_id in the range 1-2000 by setting
.br
/etc/lvm/lvmlocal.conf local/host_id = <num>
.SS 3. start lvmlockd
Use a service/init file if available, or just run "lvmlockd".
.SS 4. start lock manager
.I sanlock
.br
systemctl start wdmd sanlock
.I dlm
.br
Follow external clustering documentation when applicable, otherwise:
.br
systemctl start corosync dlm
.SS 5. create VGs on shared devices
vgcreate --shared <vg_name> <devices>
The vgcreate --shared option sets the VG lock type to sanlock or dlm
depending on which lock manager is running. LVM commands will perform
locking for the VG using lvmlockd.
.SS 6. start VGs on all hosts
vgchange --lock-start
lvmlockd requires shared VGs to be "started" before they are used. This
is a lock manager operation to start/join the VG lockspace, and it may
take some time. Until the start completes, locks for the VG are not
available. LVM commands are allowed to read the VG while start is in
progress. (A service/init file can be used to start VGs.)
.SS 7. create and activate LVs
Standard lvcreate and lvchange commands are used to create and activate
LVs in a lockd VG.
An LV activated exclusively on one host cannot be activated on another.
When multiple hosts need to use the same LV concurrently, the LV can be
activated with a shared lock (see lvchange options -aey vs -asy.)
(Shared locks are disallowed for certain LV types that cannot be used from
multiple hosts.)
.SS Normal start up and shut down
After initial set up, start up and shut down include the following general
steps. They can be performed manually or using the system init/service
manager.
.IP \[bu] 2
start lvmetad
.IP \[bu] 2
start lvmlockd
.IP \[bu] 2
start lock manager
.IP \[bu] 2
vgchange --lock-start
.IP \[bu] 2
activate LVs in shared VGs
.P
The shut down sequence is the reverse:
.IP \[bu] 2
deactivate LVs in shared VGs
.IP \[bu] 2
vgchange --lock-stop
.IP \[bu] 2
stop lock manager
.IP \[bu] 2
stop lvmlockd
.IP \[bu] 2
stop lvmetad
.P
.SH TOPICS
.SS locking terms
The following terms are used to distinguish VGs that require locking from
those that do not.
.I "lockd VG"
A "lockd VG" is a shared VG that has a "lock type" of dlm or sanlock.
Using it requires lvmlockd. These VGs exist on shared storage that is
visible to multiple hosts. LVM commands use lvmlockd to perform locking
for these VGs when they are used.
If the lock manager for a lock type is not available (e.g. not started or
failed), lvmlockd is not able to acquire locks from it, and LVM commands
are unable to fully use VGs with the given lock type. Commands generally
allow reading VGs in this condition, but changes and activation are not
allowed. Maintaining a properly running lock manager can require
background not covered here.
.I "local VG"
A "local VG" is meant to be used by a single host. It has no lock type or
lock type "none". LVM commands and lvmlockd do not perform locking for
these VGs. A local VG typically exists on local (non-shared) devices and
cannot be used concurrently from different hosts.
If a local VG does exist on shared devices, it should be owned by a single
host by having its system ID set, see
.BR lvmsystemid (7).
Only the host with a matching system ID can use the local VG. A VG
with no lock type and no system ID should be excluded from all but one
host using lvm.conf filters. Without any of these protections, a local VG
on shared devices can be easily damaged or destroyed.
.I "clvm VG"
A "clvm VG" is a VG on shared storage (like a lockd VG) that requires
clvmd for clustering. See below for converting a clvm VG to a lockd VG.
.SS lockd VGs from hosts not using lvmlockd
Only hosts that will use lockd VGs should be configured to run lvmlockd.
However, devices with lockd VGs may be visible from hosts not using
lvmlockd. From a host not using lvmlockd, visible lockd VGs are ignored
in the same way as foreign VGs, i.e. those with a foreign system ID, see
.BR lvmsystemid (7).
.SS vgcreate differences
Forms of the vgcreate command:
.B vgcreate <vg_name> <devices>
.IP \[bu] 2
Creates a local VG with the local system ID when neither lvmlockd nor clvm are configured.
.IP \[bu] 2
Creates a local VG with the local system ID when lvmlockd is configured.
.IP \[bu] 2
Creates a clvm VG when clvm is configured.
.P
.B vgcreate --shared <vg_name> <devices>
.IP \[bu] 2
Requires lvmlockd to be configured (use_lvmlockd=1).
.IP \[bu] 2
Creates a lockd VG with lock type sanlock|dlm depending on which is running.
.IP \[bu] 2
LVM commands request locks from lvmlockd to use the VG.
.IP \[bu] 2
lvmlockd obtains locks from the selected lock manager.
.P
.B vgcreate -c|--clustered y <vg_name> <devices>
.IP \[bu] 2
Requires clvm to be configured (locking_type=3).
.IP \[bu] 2
Creates a clvm VG with the "clustered" flag.
.IP \[bu] 2
LVM commands request locks from clvmd to use the VG.
.P
.SS using lockd VGs
When use_lvmlockd is first enabled, and before the first lockd VG is
created, no global lock will exist, and LVM commands will try and fail to
acquire it. LVM commands will report a warning until the first lockd VG
is created which will create the global lock. Before the global lock
exists, VGs can still be read, but commands that require the global lock
exclusively will fail.
When a new lockd VG is created, its lockspace is automatically started on
the host that creates the VG. Other hosts will need to run 'vgcreate
--lock-start' to start the new VG before they can use it.
From the 'vgs' reporting command, lockd VGs are indicated by "s" (for
shared) in the sixth attr field. The specific lock type and lock args
for a lockd VG can be displayed with 'vgs -o+locktype,lockargs'.
.SS starting and stopping VGs
Starting a lockd VG (vgchange --lock-start) causes the lock manager to
start or join the lockspace for the VG. This makes locks for the VG
accessible to the host. Stopping the VG leaves the lockspace and makes
locks for the VG inaccessible to the host.
Lockspaces should be started as early as possible because starting
(joining) a lockspace can take a long time (potentially minutes after a
host failure when using sanlock.) A VG can be started after all the
following are true:
.nf
- lvmlockd is running
- lock manager is running
- VG is visible to the system
.fi
All lockd VGs can be started/stopped using:
.br
vgchange --lock-start
.br
vgchange --lock-stop
Individual VGs can be started/stopped using:
.br
vgchange --lock-start <vg_name> ...
.br
vgchange --lock-stop <vg_name> ...
To make vgchange not wait for start to complete:
.br
vgchange --lock-start --lock-opt nowait
.br
vgchange --lock-start --lock-opt nowait <vg_name>
To stop all lockspaces and wait for all to complete:
.br
lvmlockctl --stop-lockspaces --wait
To start only selected lockd VGs, use the lvm.conf
activation/lock_start_list. When defined, only VG names in this list are
started by vgchange. If the list is not defined (the default), all
visible lockd VGs are started. To start only "vg1", use the following
lvm.conf configuration:
.nf
activation {
lock_start_list = [ "vg1" ]
...
}
.fi
.SS automatic starting and automatic activation
Scripts or programs on a host that automatically start VGs will use the
"auto" option to indicate that the command is being run automatically by
the system:
vgchange --lock-start --lock-opt auto [vg_name ...]
Without any additional configuration, including the "auto" option has no
effect; all VGs are started unless restricted by lock_start_list.
However, when the lvm.conf activation/auto_lock_start_list is defined, the
auto start command performs an additional filtering phase to all VGs being
started, testing each VG name against the auto_lock_start_list. The
auto_lock_start_list defines lockd VGs that will be started by the auto
start command. Visible lockd VGs not included in the list are ignored by
the auto start command. If the list is undefined, all VG names pass this
filter. (The lock_start_list is also still used to filter all VGs.)
The auto_lock_start_list allows a user to select certain lockd VGs that
should be automatically started by the system (or indirectly, those that
should not).
To use auto activation of lockd LVs (see auto_activation_volume_list),
auto starting of the corresponding lockd VGs is necessary.
.SS locking activity
To optimize the use of LVM with lvmlockd, consider the three kinds of
locks in lvmlockd and when they are used:
.I GL lock
The global lock (GL lock) is associated with global information, which is
information not isolated to a single VG. This includes:
- The global VG namespace.
.br
- The set of orphan PVs and unused devices.
.br
- The properties of orphan PVs, e.g. PV size.
The global lock is used in shared mode by commands that read this
information, or in exclusive mode by commands that change it.
The command 'vgs' acquires the global lock in shared mode because it
reports the list of all VG names.
The vgcreate command acquires the global lock in exclusive mode because it
creates a new VG name, and it takes a PV from the list of unused PVs.
When an LVM command is given a tag argument, or uses select, it must read
all VGs to match the tag or selection, which causes the global lock to be
acquired. To avoid use of the global lock, avoid using tags and select,
and specify VG name arguments.
When use_lvmlockd is enabled, LVM commands attempt to acquire the global
lock even if no lockd VGs exist. For this reason, lvmlockd should not be
enabled unless lockd VGs will be used.
.I VG lock
A VG lock is associated with each VG. The VG lock is acquired in shared
mode to read the VG and in exclusive mode to change the VG (modify the VG
metadata). This lock serializes modifications to a VG with all other LVM
commands on other hosts.
The command 'vgs' will not only acquire the GL lock to read the list of
all VG names, but will acquire the VG lock for each VG prior to reading
it.
The command 'vgs <vg_name>' does not acquire the GL lock (it does not need
the list of all VG names), but will acquire the VG lock on each VG name
argument.
.I LV lock
An LV lock is acquired before the LV is activated, and is released after
the LV is deactivated. If the LV lock cannot be acquired, the LV is not
activated. LV locks are persistent and remain in place after the
activation command is done. GL and VG locks are transient, and are held
only while an LVM command is running.
.I retries
If a request for a GL or VG lock fails due to a lock conflict with another
host, lvmlockd automatically retries for a short time before returning a
failure to the LVM command. The LVM command will then retry the entire
lock request a number of times specified by global/lock_retries before
failing. If a request for an LV lock fails due to a lock conflict, the
command fails immediately.
.SS sanlock global lock
There are some special cases related to the global lock in sanlock VGs.
The global lock exists in one of the sanlock VGs. The first sanlock VG
created will contain the global lock. Subsequent sanlock VGs will each
contain disabled global locks that can be enabled later if necessary.
The VG containing the global lock must be visible to all hosts using
sanlock VGs. This can be a reason to create a small sanlock VG, visible
to all hosts, and dedicated to just holding the global lock. While not
required, this strategy can help to avoid extra work in the future if VGs
are moved or removed.
The vgcreate command typically acquires the global lock, but in the case
of the first sanlock VG, there will be no global lock to acquire until the
initial vgcreate is complete. So, creating the first sanlock VG is a
special case that skips the global lock.
vgcreate for a sanlock VG determines it is the first one to exist if no
other sanlock VGs are visible. It is possible that other sanlock VGs do
exist but are not visible or started on the host running vgcreate. This
raises the possibility of more than one global lock existing. If this
happens, commands will warn of the condition, and it should be manually
corrected.
If the situation arises where more than one sanlock VG contains a global
lock, the global lock should be manually disabled in all but one of them
with the command:
lvmlockctl --gl-disable <vg_name>
(The one VG with the global lock enabled must be visible to all hosts.)
An opposite problem can occur if the VG holding the global lock is
removed. In this case, no global lock will exist following the vgremove,
and subsequent LVM commands will fail to acquire it. In this case, the
global lock needs to be manually enabled in one of the remaining sanlock
VGs with the command:
lvmlockctl --gl-enable <vg_name>
A small sanlock VG dedicated to holding the global lock can avoid the case
where the GL lock must be manually enabled after a vgremove.
.SS changing lock type
To change a local VG to a lockd VG:
vgchange --lock-type sanlock|dlm <vg_name>
All LVs must be inactive to change the lock type.
To change a clvm VG to a lockd VG:
vgchange --lock-type sanlock|dlm <vg_name>
Changing a lockd VG to a local VG is not yet generally allowed.
(It can be done partially in certain recovery cases.)
.SS vgremove of a sanlock VG
vgremove of a sanlock VG will fail if other hosts have the VG started.
Run vgchange --lock-stop <vg_name> on all other hosts before vgremove.
(It may take several seconds before vgremove recognizes that all hosts
have stopped.)
.SS shared LVs
When an LV is used concurrently from multiple hosts (e.g. by a
multi-host/cluster application or file system), the LV can be activated on
multiple hosts concurrently using a shared lock.
To activate the LV with a shared lock: lvchange -asy vg/lv.
With lvmlockd, an unspecified activation mode is always exclusive, i.e.
-ay defaults to -aey.
If the LV type does not allow the LV to be used concurrently from multiple
hosts, then a shared activation lock is not allowed and the lvchange
command will report an error. LV types that cannot be used concurrently
from multiple hosts include thin, cache, raid, mirror, and snapshot.
lvextend on LV with shared locks is not yet allowed. The LV must be
deactivated, or activated exclusively to run lvextend.
.SS recover from lost PV holding sanlock locks
A number of special manual steps must be performed to restore sanlock
locks if the PV holding the locks is lost. Contact the LVM group for
help with this process.
.\" This is not clean or safe enough to suggest using without help.
.\"
.\" .SS recover from lost PV holding sanlock locks
.\"
.\" In a sanlock VG, the locks are stored on a PV within the VG. If this PV
.\" is lost, the locks need to be reconstructed as follows:
.\"
.\" 1. Enable the unsafe lock modes option in lvm.conf so that default locking requirements can be overriden.
.\"
.\" .nf
.\" allow_override_lock_modes = 1
.\" .fi
.\"
.\" 2. Remove missing PVs and partial LVs from the VG.
.\"
.\" Warning: this is a dangerous operation. Read the man page
.\" for vgreduce first, and try running with the test option.
.\" Verify that the only missing PV is the PV holding the sanlock locks.
.\"
.\" .nf
.\" vgreduce --removemissing --force --lock-gl na --lock-vg na <vg>
.\" .fi
.\"
.\" 3. If step 2 does not remove the internal/hidden "lvmlock" lv, it should be removed.
.\"
.\" .nf
.\" lvremove --lock-vg na --lock-lv na <vg>/lvmlock
.\" .fi
.\"
.\" 4. Change the lock type to none.
.\"
.\" .nf
.\" vgchange --lock-type none --force --lock-gl na --lock-vg na <vg>
.\" .fi
.\"
.\" 5. VG space is needed to recreate the locks. If there is not enough space, vgextend the vg.
.\"
.\" 6. Change the lock type back to sanlock. This creates a new internal
.\" lvmlock lv, and recreates locks.
.\"
.\" .nf
.\" vgchange --lock-type sanlock <vg>
.\" .fi
.SS locking system failures
.B lvmlockd failure
If lvmlockd fails or is killed while holding locks, the locks are orphaned
in the lock manager. lvmlockd can be restarted, and it will adopt the
locks from the lock manager that had been held by the previous instance.
.B dlm/corosync failure
If dlm or corosync fail, the clustering system will fence the host using a
method configured within the dlm/corosync clustering environment.
LVM commands on other hosts will be blocked from acquiring any locks until
the dlm/corosync recovery process is complete.
.B sanlock lock storage failure
If access to the device containing the VG's locks is lost, sanlock cannot
renew its leases for locked LVs. This means that the host could soon lose
the lease to another host which could activate the LV exclusively.
sanlock is designed to never reach the point where two hosts hold the
same lease exclusively at once, so the same LV should never be active on
two hosts at once when activated exclusively.
The current method of handling this involves no action from lvmlockd,
while allowing sanlock to protect the leases itself. This produces a safe
but potentially inconvenient result. Doing nothing from lvmlockd leads to
the host's LV locks not being released, which leads to sanlock using the
local watchdog to reset the host before another host can acquire any locks
held by the local host.
LVM commands on other hosts will be blocked from acquiring locks held by
the failed/reset host until the sanlock recovery time expires (2-4
minutes). This includes activation of any LVs that were locked by the
failed host. It also includes GL/VG locks held by any LVM commands that
happened to be running on the failed host at the time of the failure.
(In the future, lvmlockd may have the option to suspend locked LVs in
response the sanlock leases expiring. This would avoid the need for
sanlock to reset the host.)
.B sanlock daemon failure
If the sanlock daemon fails or exits while a lockspace is started, the
local watchdog will reset the host. See previous section for the impact
on other hosts.
.SS changing dlm cluster name
When a dlm VG is created, the cluster name is saved in the VG metadata for
the new VG. To use the VG, a host must be in the named cluster. If the
cluster name is changed, or the VG is moved to a different cluster, the
cluster name for the dlm VG must be changed. To do this:
1. Ensure the VG is not being used by any hosts.
2. The new cluster must be active on the node making the change.
.br
The current dlm cluster name can be seen by:
.br
cat /sys/kernel/config/dlm/cluster/cluster_name
3. Change the VG lock type to none:
.br
vgchange --lock-type none --force <vg_name>
4. Change the VG lock type back to dlm which sets the new cluster name:
.br
vgchange --lock-type dlm <vg_name>
.SS limitations of lvmlockd and lockd VGs
lvmlockd currently requires using lvmetad and lvmpolld.
If a lockd VG becomes visible after the initial system startup, it is not
automatically started through the system service/init manager, and LVs in
it are not autoactivated.
Things that do not yet work in lockd VGs:
.br
- old style mirror LVs (only raid1)
.br
- creating a new thin pool and a new thin LV in a single command
.br
- using lvcreate to create cache pools or cache LVs (use lvconvert)
.br
- splitting raid1 mirror LVs
.br
- vgsplit
.br
- vgmerge
.br
- resizing an LV that is active in the shared mode on multiple hosts
.SS clvmd to lvmlockd transition
(See above for converting an existing clvm VG to a lockd VG.)
While lvmlockd and clvmd are entirely different systems, LVM usage remains
largely the same. Differences are more notable when using lvmlockd's
sanlock option.
Visible usage differences between lockd VGs with lvmlockd and clvm VGs
with clvmd:
.IP \[bu] 2
lvm.conf must be configured to use either lvmlockd (use_lvmlockd=1) or
clvmd (locking_type=3), but not both.
.IP \[bu] 2
vgcreate --shared creates a lockd VG, and vgcreate --clustered y creates a
clvm VG.
.IP \[bu] 2
lvmlockd adds the option of using sanlock for locking, avoiding the
need for network clustering.
.IP \[bu] 2
lvmlockd does not require all hosts to see all the same shared devices.
.IP \[bu] 2
lvmlockd defaults to the exclusive activation mode whenever the activation
mode is unspecified, i.e. -ay means -aey, not -asy.
.IP \[bu] 2
lvmlockd commands always apply to the local host, and never have an effect
on a remote host. (The activation option 'l' is not used.)
.IP \[bu] 2
lvmlockd works with thin and cache pools and LVs.
.IP \[bu] 2
lvmlockd saves the cluster name for a lockd VG using dlm. Only hosts in
the matching cluster can use the VG.
.IP \[bu] 2
lvmlockd requires starting/stopping lockd VGs with vgchange --lock-start
and --lock-stop.
.IP \[bu] 2
vgremove of a sanlock VG may fail indicating that all hosts have not
stopped the lockspace for the VG. Stop the VG lockspace on all uses using
vgchange --lock-stop.
.IP \[bu] 2
Long lasting lock contention among hosts may result in a command giving up
and failing. The number of lock retries can be adjusted with
global/lock_retries.
.IP \[bu] 2
The reporting options locktype and lockargs can be used to view lockd VG
and LV lock_type and lock_args fields, i.g. vgs -o+locktype,lockargs.
In the sixth VG attr field, "s" for "shared" is displayed for lockd VGs.
.IP \[bu] 2
If lvmlockd fails or is killed while in use, locks it held remain but are
orphaned in the lock manager. lvmlockd can be restarted with an option to
adopt the orphan locks from the previous instance of lvmlockd.
.P

View File

@ -83,9 +83,10 @@ version without the system_id feature.
.P
.SS Types of VG access
A "local VG" is meant to be used by a single host.
A local VG is mean to be used by a single host.
.br
A "shared VG" is meant to be used by multiple hosts.
A shared or clustered VG is meant to be used by multiple hosts.
.br
These can be further distinguished as:
@ -107,9 +108,15 @@ A local VG that has been exported with vgexport and has no system_id.
This VG type can only be accessed by vgimport which will change it to
owned.
.B Shared:
A shared or "lockd" VG has lock_type set and no system_id.
A shared VG is meant to be used on shared storage from multiple hosts,
and is only accessible to hosts using lvmlockd.
.B Clustered:
A shared VG with the clustered flag set, and no system_id. This VG type
is only accessible to hosts using clvmd.
A clustered or "clvm" VG has the clustered flag set and no system_id.
A clustered VG is meant to be used on shared storage from multiple hosts,
and is only accessible to hosts using clvmd.
.SS system_id_source
@ -297,10 +304,16 @@ system_id to its allow_system_id list, change the system_id of the foreign
VG to its own, and remove the foreign system_id from its allow_system_id
list.
.SS shared VGs
A shared/lockd VG has no system_id set, allowing multiple hosts to
use it via lvmlockd. Changing a VG to a lockd type will clear the
existing system_id.
.SS clustered VGs
A "clustered" VG should have no system_id set, allowing multiple hosts to
use it via clvm. Changing a VG to clustered will clear the existing
A clustered/clvm VG has no system_id set, allowing multiple hosts to
use it via clvmd. Changing a VG to clustered will clear the existing
system_id. Changing a VG to not clustered will set the system_id to the
host running the vgchange command.

View File

@ -367,6 +367,7 @@ let
centos66 = centos65;
centos70 = [ "dlm-devel" "dlm" "corosynclib-devel" "perl-Digest-MD5" "systemd-devel"
"socat" # used by test suite lvmpolld
"sanlock" # used by test suite lvmlockd
"procps-ng" ];
fedora17_18 = [ "dlm-devel" "corosynclib-devel" "libblkid" "libblkid-devel"

View File

@ -121,6 +121,10 @@ ifeq ("@BUILD_LVMPOLLD@", "yes")
$(INSTALL_DATA) lvm2_lvmpolld_systemd_red_hat.socket $(systemd_unit_dir)/lvm2-lvmpolld.socket
$(INSTALL_DATA) lvm2_lvmpolld_systemd_red_hat.service $(systemd_unit_dir)/lvm2-lvmpolld.service
endif
ifeq ("@BUILD_LVMLOCKD@", "yes")
$(INSTALL_DATA) lvm2_lvmlockd_systemd_red_hat.service $(systemd_unit_dir)/lvm2-lvmlockd.service
$(INSTALL_DATA) lvm2_lvmlocking_systemd_red_hat.service $(systemd_unit_dir)/lvm2-lvmlocking.service
endif
ifneq ("@CLVMD@", "none")
$(INSTALL_DATA) lvm2_clvmd_systemd_red_hat.service $(systemd_unit_dir)/lvm2-clvmd.service
$(INSTALL_DATA) lvm2_cluster_activation_systemd_red_hat.service $(systemd_unit_dir)/lvm2-cluster-activation.service
@ -151,6 +155,8 @@ DISTCLEAN_TARGETS += \
lvm2_lvmetad_systemd_red_hat.socket \
lvm2_lvmpolld_systemd_red_hat.service \
lvm2_lvmpolld_systemd_red_hat.socket \
lvm2_lvmlockd_systemd_red_hat.service \
lvm2_lvmlocking_systemd_red_hat.service \
lvm2_monitoring_init_red_hat \
lvm2_monitoring_systemd_red_hat.service \
lvm2_pvscan_systemd_red_hat@.service \

View File

@ -0,0 +1,16 @@
[Unit]
Description=LVM2 lock daemon
Documentation=man:lvmlockd(8)
After=lvm2-lvmetad.service
[Service]
Type=simple
NonBlocking=true
ExecStart=@sbindir@/lvmlockd -f
Environment=SD_ACTIVATION=1
PIDFile=@LVMLOCKD_PIDFILE@
SendSIGKILL=no
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,24 @@
[Unit]
Description=Availability of lockspaces in lvmlockd
Documentation=man:lvmlockd(8)
After=lvm2-lvmlockd.service sanlock.service dlm.service
[Service]
Type=oneshot
RemainAfterExit=yes
# start lockspaces and wait for them to finish starting
ExecStart=@sbindir@/vgchange --lock-start --lock-opt autowait
# auto activate LVs in the newly started lockd VGs
ExecStart=@sbindir@/vgchange -aay -S 'locktype=sanlock || locktype=dlm'
# deactivate LVs in lockd VGs
ExecStop=@sbindir@/vgchange -an -S 'locktype=sanlock || locktype=dlm'
# stop lockspaces and wait for them to finish stopping
ExecStop=@sbindir@/lvmlockctl --stop-lockspaces --wait 1
[Install]
WantedBy=multi-user.target

View File

@ -6,6 +6,8 @@
%enableif %{enable_lvmetad} lvmetad
%global enable_lvmpolld %(if echo %{services} | grep -q lvmpolld; then echo 1; else echo 0; fi)
%enableif %{enable_lvmpolld} lvmpolld
%global enable_lvmlockd %(if echo %{services} | grep -q lvmlockd; then echo 1; else echo 0; fi)
%enableif %{enable_lvmlockd} lvmlockd
%build
%configure \

View File

@ -86,6 +86,10 @@ fi
%if %{have_service lvmpolld}
%{_sbindir}/lvmpolld
%endif
%if %{have_service lvmlockd}
%{_sbindir}/lvmlockd
%{_sbindir}/lvmlockctl
%endif
%if %{have_with cache}
%{_mandir}/man7/lvmcache.7.gz
%endif
@ -156,6 +160,9 @@ fi
%{_mandir}/man8/lvmpolld.8.gz
%{_mandir}/man8/lvm-lvpoll.8.gz
%endif
%if %{have_service lvmlockd}
%{_mandir}/man8/lvmlockd.8.gz
%endif
%dir %{_sysconfdir}/lvm
%ghost %{_sysconfdir}/lvm/cache/.cache
%config(noreplace) %verify(not md5 mtime size) %{_sysconfdir}/lvm/lvm.conf
@ -182,6 +189,11 @@ fi
%{_unitdir}/lvm2-lvmpolld.service
%{_unitdir}/lvm2-lvmpolld.socket
%endif
%if %{have_service lvmlockd}
%{_unitdir}/lvm2-lvmlockd.service
%{_unitdir}/lvm2-lvmlocking.service
#%{_unitdir}/lvm2-lvmlockd.socket
%endif
%else
%{_sysconfdir}/rc.d/init.d/lvm2-monitor
%{_sysconfdir}/rc.d/init.d/blk-availability
@ -191,6 +203,9 @@ fi
%if %{have_service lvmpolld}
%{_sysconfdir}/rc.d/init.d/lvm2-lvmpolld
%endif
#%if %{have_service lvmlockd}
# %{_sysconfdir}/rc.d/init.d/lvm2-lvmlockd
#%endif
%endif
##############################################################################

View File

@ -27,6 +27,8 @@
%service lvmpolld 1
%service lvmlockd 1
##############################################################
%if %{fedora} == 16 || %{rhel} == 6

View File

@ -70,6 +70,8 @@ help:
@echo " check_cluster Run tests with cluster daemon."
@echo " check_lvmetad Run tests with lvmetad daemon."
@echo " check_lvmpolld Run tests with lvmpolld daemon."
@echo " check_lvmlockd_sanlock Run tests with lvmlockd and sanlock."
@echo " check_lvmlockd_dlm Run tests with lvmlockd and dlm."
@echo " clean Clean dir."
@echo " help Display callable targets."
@echo -e "\nSupported variables:"
@ -138,6 +140,32 @@ check_lvmpolld: .tests-stamp
--flavours ndev-lvmpolld,ndev-cluster-lvmpolld,ndev-lvmetad-lvmpolld --only $(T) --skip $(S)
endif
ifeq ("@BUILD_LVMLOCKD@", "yes")
check_lvmlockd_sanlock: .tests-stamp
VERBOSE=$(VERBOSE) ./lib/runner \
--testdir . --outdir results \
--flavours udev-lvmlockd-sanlock --only shell/sanlock-prepare.sh
VERBOSE=$(VERBOSE) ./lib/runner \
--testdir . --outdir results \
--flavours udev-lvmlockd-sanlock --only $(T) --skip $(S)
VERBOSE=$(VERBOSE) ./lib/runner \
--testdir . --outdir results \
--flavours udev-lvmlockd-sanlock --only shell/sanlock-remove.sh
endif
ifeq ("@BUILD_LVMLOCKD@", "yes")
check_lvmlockd_dlm: .tests-stamp
VERBOSE=$(VERBOSE) ./lib/runner \
--testdir . --outdir results \
--flavours udev-lvmlockd-dlm --only shell/dlm-prepare.sh
VERBOSE=$(VERBOSE) ./lib/runner \
--testdir . --outdir results \
--flavours udev-lvmlockd-dlm --only $(T) --skip $(S)
VERBOSE=$(VERBOSE) ./lib/runner \
--testdir . --outdir results \
--flavours udev-lvmlockd-dlm --only shell/dlm-remove.sh
endif
DATADIR = $(datadir)/lvm2-testsuite
EXECDIR = $(libexecdir)/lvm2-testsuite
@ -153,6 +181,8 @@ LIB_FLAVOURS = \
lib/flavour-udev-lvmetad-lvmpolld\
lib/flavour-udev-lvmetad\
lib/flavour-udev-lvmpolld\
lib/flavour-udev-lvmlockd-sanlock\
lib/flavour-udev-lvmlockd-dlm\
lib/flavour-udev-vanilla
LIB_LOCAL = lib/paths lib/runner

View File

@ -542,6 +542,12 @@ prepare_devs() {
local pvname=${3:-pv}
local shift=0
# sanlock requires more space for the internal sanlock lv
# This could probably be lower, but what are the units?
if test -n "$LVM_TEST_LOCK_TYPE_SANLOCK" ; then
devsize = 1024
fi
touch DEVICES
prepare_backing_dev $(($n*$devsize))
# shift start of PV devices on /dev/loopXX by 1M
@ -817,6 +823,9 @@ generate_config() {
LVM_TEST_LOCKING=${LVM_TEST_LOCKING:-1}
LVM_TEST_LVMETAD=${LVM_TEST_LVMETAD:-0}
LVM_TEST_LVMPOLLD=${LVM_TEST_LVMPOLLD:-0}
LVM_TEST_LVMLOCKD=${LVM_TEST_LVMLOCKD:-0}
LVM_TEST_LOCK_TYPE_SANLOCK=${LVM_TEST_LOCK_TYPE_SANLOCK:-0}
LVM_TEST_LOCK_TYPE_DLM=${LVM_TEST_LOCK_TYPE_DLM:-0}
if test "$DM_DEV_DIR" = "/dev"; then
LVM_VERIFY_UDEV=${LVM_VERIFY_UDEV:-0}
else
@ -859,6 +868,7 @@ global/thin_dump_executable = "$LVM_TEST_THIN_DUMP_CMD"
global/thin_repair_executable = "$LVM_TEST_THIN_REPAIR_CMD"
global/use_lvmetad = $LVM_TEST_LVMETAD
global/use_lvmpolld = $LVM_TEST_LVMPOLLD
global/use_lvmlockd = $LVM_TEST_LVMLOCKD
log/activation = 1
log/file = "$TESTDIR/debug.log"
log/indent = 1

View File

@ -0,0 +1,6 @@
export LVM_TEST_LOCKING=1
export LVM_TEST_LVMETAD=1
export LVM_TEST_LVMPOLLD=1
export LVM_TEST_LVMLOCKD=1
export LVM_TEST_LOCK_TYPE_DLM=1
export LVM_TEST_DEVDIR=/dev

View File

@ -0,0 +1,6 @@
export LVM_TEST_LOCKING=1
export LVM_TEST_LVMETAD=1
export LVM_TEST_LVMPOLLD=1
export LVM_TEST_LVMLOCKD=1
export LVM_TEST_LOCK_TYPE_SANLOCK=1
export LVM_TEST_DEVDIR=/dev

View File

@ -106,6 +106,13 @@ test -n "$LVM_TEST_LVMPOLLD" && {
aux prepare_lvmpolld
}
if test -n "$LVM_TEST_LVMLOCKD" ; then
if test -n "$LVM_TEST_LOCK_TYPE_SANLOCK" ; then
aux lvmconf 'local/host_id = 1'
fi
export SHARED="--shared"
fi
echo "<======== Processing test: \"$TESTNAME\" ========>"
set -vx

View File

@ -0,0 +1,19 @@
# created by lvm test suite
totem {
version: 2
secauth: off
cluster_name: test
}
nodelist {
node {
ring0_addr: @LOCAL_NODE@
nodeid: 1
}
}
quorum {
provider: corosync_votequorum
}
logging {
to_syslog: yes
}

4
test/lib/test-dlm-conf Normal file
View File

@ -0,0 +1,4 @@
# created by lvm test suite
log_debug=1
enable_fencing=0

View File

@ -0,0 +1,2 @@
# created by lvm test suite
SANLOCKOPTS="-U sanlock -G sanlock -w 0"

View File

@ -57,6 +57,8 @@ mkdtemp() {
destdir=$1
template=$2
test -d "$destdir" || die "DIR ('$destdir') does not exist."
case "$template" in
*XXXX) ;;
*) die "Invalid template: $template (must have a suffix of at least 4 X's)";;

View File

@ -0,0 +1,27 @@
#!/bin/sh
# Copyright (C) 2008-2012 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
test_description='Hello world for vgcreate with lvmlockd and dlm'
. lib/inittest
[ -z "$LVM_TEST_LOCK_TYPE_DLM" ] && skip;
aux prepare_devs 1
vgcreate $SHARED $vg "$dev1"
vgs -o+locktype,lockargs $vg
check vg_field $vg vg_locktype dlm
vgremove $vg

90
test/shell/dlm-prepare.sh Normal file
View File

@ -0,0 +1,90 @@
#!/bin/sh
# Copyright (C) 2008-2012 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
test_description='Set up things to run tests with dlm'
. lib/utils
. lib/inittest
[ -z "$LVM_TEST_LOCK_TYPE_DLM" ] && skip;
COROSYNC_CONF="/etc/corosync/corosync.conf"
COROSYNC_NODE="$(hostname)"
create_corosync_conf() {
if test -a $COROSYNC_CONF; then
if ! grep "created by lvm test suite" $COROSYNC_CONF; then
rm $COROSYNC_CONF
else
mv $COROSYNC_CONF $COROSYNC_CONF.prelvmtest
fi
fi
sed -e "s/@LOCAL_NODE@/$COROSYNC_NODE/" lib/test-corosync-conf > $COROSYNC_CONF
echo "created new $COROSYNC_CONF"
}
DLM_CONF="/etc/dlm/dlm.conf"
create_dlm_conf() {
if test -a $DLM_CONF; then
if ! grep "created by lvm test suite" $DLM_CONF; then
rm $DLM_CONF
else
mv $DLM_CONF $DLM_CONF.prelvmtest
fi
fi
cp lib/test-dlm-conf $DLM_CONF
echo "created new $DLM_CONF"
}
prepare_lvmlockd_dlm() {
if pgrep lvmlockd ; then
echo "Cannot run while existing lvmlockd process exists"
exit 1
fi
if pgrep dlm_controld ; then
echo "Cannot run while existing dlm_controld process exists"
exit 1
fi
if pgrep corosync; then
echo "Cannot run while existing corosync process exists"
exit 1
fi
create_corosync_conf
create_dlm_conf
systemctl start corosync
sleep 1
if ! pgrep corosync; then
echo "Failed to start corosync"
exit 1
fi
systemctl start dlm
sleep 1
if ! pgrep dlm_controld; then
echo "Failed to start dlm"
exit 1
fi
lvmlockd
sleep 1
if ! pgrep lvmlockd ; then
echo "Failed to start lvmlockd"
exit 1
fi
}
prepare_lvmlockd_dlm

20
test/shell/dlm-remove.sh Normal file
View File

@ -0,0 +1,20 @@
#!/bin/sh
# Copyright (C) 2008-2012 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
test_description='Remove the dlm test setup'
. lib/inittest
[ -z "$LVM_TEST_LOCK_TYPE_DLM" ] && skip;
systemctl stop dlm
systemctl stop corosync
killall lvmlockd

View File

@ -16,8 +16,6 @@ export LVM_TEST_LVMETAD_DEBUG_OPTS=${LVM_TEST_LVMETAD_DEBUG_OPTS-}
. lib/inittest
test -e LOCAL_LVMPOLLD && skip
aux prepare_devs 10
#
@ -43,11 +41,11 @@ aux prepare_devs 10
prepare_vgs_() {
# set up vgs/lvs that we will remove
vgcreate $vg1 "$dev1" "$dev2"
vgcreate $vg2 "$dev3" "$dev4"
vgcreate $vg3 "$dev5" "$dev6"
vgcreate $vg4 "$dev7" "$dev8"
vgcreate $vg5 "$dev9" "$dev10"
vgcreate $SHARED $vg1 "$dev1" "$dev2"
vgcreate $SHARED $vg2 "$dev3" "$dev4"
vgcreate $SHARED $vg3 "$dev5" "$dev6"
vgcreate $SHARED $vg4 "$dev7" "$dev8"
vgcreate $SHARED $vg5 "$dev9" "$dev10"
lvcreate -Zn -an -l 2 -n $lv1 $vg1
lvcreate -Zn -an -l 2 -n $lv1 $vg2
lvcreate -Zn -an -l 2 -n $lv2 $vg2
@ -656,3 +654,5 @@ not grep $vg5-$lv2 err
not grep $vg5-$lv3 err
not grep $vg5-$lv4 err
not grep $vg5-$lv5 err
vgremove -f $vg1 $vg2 $vg3 $vg4 $vg5

View File

@ -13,8 +13,6 @@ test_description='Exercise toollib process_each_pv'
. lib/inittest
test -e LOCAL_LVMPOLLD && skip
aux prepare_devs 14
#
@ -22,10 +20,9 @@ aux prepare_devs 14
# pvdisplay
# pvresize
# pvs
# vgreduce
#
# process-each-pvresize.sh covers pvresize,
# the others are covered here.
# process-each-pvresize.sh covers pvresize.
# process-each-vgreduce.sh covers vgreduce.
#
@ -36,9 +33,9 @@ aux prepare_devs 14
# dev1 matchines dev10,dev11,etc
#
vgcreate $vg1 "$dev10"
vgcreate $vg2 "$dev2" "$dev3" "$dev4" "$dev5"
vgcreate $vg3 "$dev6" "$dev7" "$dev8" "$dev9"
vgcreate $SHARED $vg1 "$dev10"
vgcreate $SHARED $vg2 "$dev2" "$dev3" "$dev4" "$dev5"
vgcreate $SHARED $vg3 "$dev6" "$dev7" "$dev8" "$dev9"
pvchange --addtag V2D3 "$dev3"
pvchange --addtag V2D4 "$dev4"
@ -713,173 +710,6 @@ not grep "$dev13" err
not grep "$dev14" err
#
# test vgreduce
#
# fail without dev
not vgreduce $vg2
# fail with dev and -a
not vgreduce $vg2 "$dev2" -a
check pv_field "$dev2" vg_name $vg2
check pv_field "$dev3" vg_name $vg2
check pv_field "$dev4" vg_name $vg2
check pv_field "$dev5" vg_name $vg2
check pv_field "$dev6" vg_name $vg3
check pv_field "$dev7" vg_name $vg3
check pv_field "$dev8" vg_name $vg3
check pv_field "$dev9" vg_name $vg3
# remove one pv
vgreduce $vg2 "$dev2"
not check pv_field "$dev2" vg_name $vg2
check pv_field "$dev3" vg_name $vg2
check pv_field "$dev4" vg_name $vg2
check pv_field "$dev5" vg_name $vg2
check pv_field "$dev6" vg_name $vg3
check pv_field "$dev7" vg_name $vg3
check pv_field "$dev8" vg_name $vg3
check pv_field "$dev9" vg_name $vg3
# reset
vgextend $vg2 "$dev2"
# remove two pvs
vgreduce $vg2 "$dev2" "$dev3"
not check pv_field "$dev2" vg_name $vg2
not check pv_field "$dev3" vg_name $vg2
check pv_field "$dev4" vg_name $vg2
check pv_field "$dev5" vg_name $vg2
check pv_field "$dev6" vg_name $vg3
check pv_field "$dev7" vg_name $vg3
check pv_field "$dev8" vg_name $vg3
check pv_field "$dev9" vg_name $vg3
# reset
vgextend $vg2 "$dev2" "$dev3"
pvchange --addtag V2D3 "$dev3"
# remove one pv with tag
vgreduce $vg2 @V2D3
check pv_field "$dev2" vg_name $vg2
not check pv_field "$dev3" vg_name $vg2
check pv_field "$dev4" vg_name $vg2
check pv_field "$dev5" vg_name $vg2
check pv_field "$dev6" vg_name $vg3
check pv_field "$dev7" vg_name $vg3
check pv_field "$dev8" vg_name $vg3
check pv_field "$dev9" vg_name $vg3
# reset
vgextend $vg2 "$dev3"
pvchange --addtag V2D3 "$dev3"
# remove two pvs, each with different tag
vgreduce $vg2 @V2D3 @V2D4
check pv_field "$dev2" vg_name $vg2
not check pv_field "$dev3" vg_name $vg2
not check pv_field "$dev4" vg_name $vg2
check pv_field "$dev5" vg_name $vg2
check pv_field "$dev6" vg_name $vg3
check pv_field "$dev7" vg_name $vg3
check pv_field "$dev8" vg_name $vg3
check pv_field "$dev9" vg_name $vg3
# reset
vgextend $vg2 "$dev3" "$dev4"
pvchange --addtag V2D3 "$dev3"
pvchange --addtag V2D4 "$dev4"
pvchange --addtag V2D45 "$dev4"
# remove two pvs, both with same tag
vgreduce $vg2 @V2D45
check pv_field "$dev2" vg_name $vg2
check pv_field "$dev3" vg_name $vg2
not check pv_field "$dev4" vg_name $vg2
not check pv_field "$dev5" vg_name $vg2
check pv_field "$dev6" vg_name $vg3
check pv_field "$dev7" vg_name $vg3
check pv_field "$dev8" vg_name $vg3
check pv_field "$dev9" vg_name $vg3
# reset
vgextend $vg2 "$dev4" "$dev5"
pvchange --addtag V2D4 "$dev4"
pvchange --addtag V2D45 "$dev4"
pvchange --addtag V2D5 "$dev5"
pvchange --addtag V2D45 "$dev5"
# remove two pvs, one by name, one by tag
vgreduce $vg2 "$dev2" @V2D3
not check pv_field "$dev2" vg_name $vg2
not check pv_field "$dev3" vg_name $vg2
check pv_field "$dev4" vg_name $vg2
check pv_field "$dev5" vg_name $vg2
check pv_field "$dev6" vg_name $vg3
check pv_field "$dev7" vg_name $vg3
check pv_field "$dev8" vg_name $vg3
check pv_field "$dev9" vg_name $vg3
# reset
vgextend $vg2 "$dev2" "$dev3"
pvchange --addtag V2D3 "$dev3"
# remove one pv by tag, where another vg has a pv with same tag
pvchange --addtag V2D5V3D9 "$dev5"
pvchange --addtag V2D5V3D9 "$dev9"
vgreduce $vg2 @V2D5V3D9
check pv_field "$dev2" vg_name $vg2
check pv_field "$dev3" vg_name $vg2
check pv_field "$dev4" vg_name $vg2
not check pv_field "$dev5" vg_name $vg2
check pv_field "$dev6" vg_name $vg3
check pv_field "$dev7" vg_name $vg3
check pv_field "$dev8" vg_name $vg3
check pv_field "$dev9" vg_name $vg3
# reset
vgextend $vg2 "$dev5"
pvchange --addtag V2D5 "$dev5"
pvchange --addtag V2D45 "$dev5"
# fail to remove last pv (don't know which will be last)
not vgreduce -a $vg2
# reset
vgremove $vg2
vgcreate $vg2 "$dev2" "$dev3" "$dev4" "$dev5"
pvchange --addtag V2D3 "$dev3"
pvchange --addtag V2D4 "$dev4"
pvchange --addtag V2D45 "$dev4"
pvchange --addtag V2D5 "$dev5"
pvchange --addtag V2D45 "$dev5"
# lvcreate on one pv to make it used
# remove all unused pvs
lvcreate -n $lv1 -l 2 $vg2 "$dev2"
not vgreduce -a $vg2
check pv_field "$dev2" vg_name $vg2
not check pv_field "$dev3" vg_name $vg2
not check pv_field "$dev4" vg_name $vg2
not check pv_field "$dev5" vg_name $vg2
check pv_field "$dev6" vg_name $vg3
check pv_field "$dev7" vg_name $vg3
check pv_field "$dev8" vg_name $vg3
check pv_field "$dev9" vg_name $vg3
# reset
vgextend $vg2 "$dev3" "$dev4" "$dev5"
pvchange --addtag V2D3 "$dev3"
pvchange --addtag V2D4 "$dev4"
pvchange --addtag V2D45 "$dev4"
pvchange --addtag V2D5 "$dev5"
pvchange --addtag V2D45 "$dev5"
lvchange -an $vg2/$lv1
lvremove $vg2/$lv1
#
# tests including pvs without mdas
#
@ -917,9 +747,9 @@ pvcreate "$dev14" --metadatacopies 0
# dev12
# dev13
vgcreate $vg1 "$dev10"
vgcreate $vg2 "$dev2" "$dev3" "$dev4" "$dev5"
vgcreate $vg3 "$dev6" "$dev7" "$dev8" "$dev9"
vgcreate $SHARED $vg1 "$dev10"
vgcreate $SHARED $vg2 "$dev2" "$dev3" "$dev4" "$dev5"
vgcreate $SHARED $vg3 "$dev6" "$dev7" "$dev8" "$dev9"
pvchange --addtag V2D3 "$dev3"
pvchange --addtag V2D4 "$dev4"
@ -1228,58 +1058,4 @@ grep "$dev12" err
grep "$dev13" err
grep "$dev14" err
#
# vgreduce including pvs without mdas
#
# remove pv without mda
vgreduce $vg2 "$dev2"
not check pv_field "$dev2" vg_name $vg2
check pv_field "$dev3" vg_name $vg2
check pv_field "$dev4" vg_name $vg2
check pv_field "$dev5" vg_name $vg2
check pv_field "$dev6" vg_name $vg3
check pv_field "$dev7" vg_name $vg3
check pv_field "$dev8" vg_name $vg3
check pv_field "$dev9" vg_name $vg3
# reset
vgextend $vg2 "$dev2"
# remove pv with mda and pv without mda
vgreduce $vg2 "$dev2" "$dev3"
not check pv_field "$dev2" vg_name $vg2
not check pv_field "$dev3" vg_name $vg2
check pv_field "$dev4" vg_name $vg2
check pv_field "$dev5" vg_name $vg2
check pv_field "$dev6" vg_name $vg3
check pv_field "$dev7" vg_name $vg3
check pv_field "$dev8" vg_name $vg3
check pv_field "$dev9" vg_name $vg3
# reset
vgextend $vg2 "$dev2"
vgextend $vg2 "$dev3"
# fail to remove only pv with mda
not vgreduce $vg3 "$dev9"
check pv_field "$dev6" vg_name $vg3
check pv_field "$dev7" vg_name $vg3
check pv_field "$dev8" vg_name $vg3
check pv_field "$dev9" vg_name $vg3
check pv_field "$dev2" vg_name $vg2
check pv_field "$dev3" vg_name $vg2
check pv_field "$dev4" vg_name $vg2
check pv_field "$dev5" vg_name $vg2
# remove by tag a pv without mda
vgreduce $vg3 @V3D8
check pv_field "$dev6" vg_name $vg3
check pv_field "$dev7" vg_name $vg3
not check pv_field "$dev8" vg_name $vg3
check pv_field "$dev9" vg_name $vg3
check pv_field "$dev2" vg_name $vg2
check pv_field "$dev3" vg_name $vg2
check pv_field "$dev4" vg_name $vg2
check pv_field "$dev5" vg_name $vg2
# reset
vgextend $vg3 "$dev8"
vgremove $vg1 $vg2 $vg3

View File

@ -13,8 +13,6 @@ test_description='Exercise toollib process_each_vg'
. lib/inittest
test -e LOCAL_LVMPOLLD && skip
aux prepare_devs 6
#
@ -28,16 +26,16 @@ aux prepare_devs 6
#
# set up four vgs that we will remove
#
vgcreate $vg1 "$dev1"
vgcreate $vg2 "$dev2"
vgcreate $vg3 "$dev3"
vgcreate $vg4 "$dev4"
vgcreate $SHARED $vg1 "$dev1"
vgcreate $SHARED $vg2 "$dev2"
vgcreate $SHARED $vg3 "$dev3"
vgcreate $SHARED $vg4 "$dev4"
# these two vgs will not be removed
vgcreate $vg5 "$dev5"
vgcreate $SHARED $vg5 "$dev5"
vgchange --addtag tagvg5 $vg5
lvcreate -l 4 -n $lv1 $vg5
vgcreate $vg6 "$dev6"
vgcreate $SHARED $vg6 "$dev6"
lvcreate -l 4 -n $lv2 $vg6
# should fail without any arg
@ -67,10 +65,10 @@ not vgs $vg4
#
# set up four vgs that we will remove
#
vgcreate --addtag tagfoo $vg1 "$dev1"
vgcreate --addtag tagfoo $vg2 "$dev2"
vgcreate --addtag tagfoo2 $vg3 "$dev3"
vgcreate --addtag tagbar $vg4 "$dev4"
vgcreate $SHARED --addtag tagfoo $vg1 "$dev1"
vgcreate $SHARED --addtag tagfoo $vg2 "$dev2"
vgcreate $SHARED --addtag tagfoo2 $vg3 "$dev3"
vgcreate $SHARED --addtag tagbar $vg4 "$dev4"
vgchange --addtag foo $vg4
# should do nothing and fail
@ -95,10 +93,10 @@ not vgs $vg4
#
# set up four vgs that we will remove
#
vgcreate --addtag tagfoo $vg1 "$dev1"
vgcreate --addtag tagfoo $vg2 "$dev2"
vgcreate --addtag tagfoo2 $vg3 "$dev3"
vgcreate --addtag tagbar $vg4 "$dev4"
vgcreate $SHARED --addtag tagfoo $vg1 "$dev1"
vgcreate $SHARED --addtag tagfoo $vg2 "$dev2"
vgcreate $SHARED --addtag tagfoo2 $vg3 "$dev3"
vgcreate $SHARED --addtag tagbar $vg4 "$dev4"
vgchange --addtag foo $vg4
vgremove @tagfoo
@ -113,10 +111,10 @@ not vgs $vg4
#
# set up four vgs that we will remove
#
vgcreate --addtag tagfoo $vg1 "$dev1"
vgcreate --addtag tagfoo $vg2 "$dev2"
vgcreate --addtag tagfoo2 $vg3 "$dev3"
vgcreate --addtag tagbar $vg4 "$dev4"
vgcreate $SHARED --addtag tagfoo $vg1 "$dev1"
vgcreate $SHARED --addtag tagfoo $vg2 "$dev2"
vgcreate $SHARED --addtag tagfoo2 $vg3 "$dev3"
vgcreate $SHARED --addtag tagbar $vg4 "$dev4"
vgchange --addtag foo $vg4
vgremove $vg1 @tagfoo2
@ -131,10 +129,10 @@ not vgs $vg4
#
# set up four vgs that we will remove
#
vgcreate --addtag tagfoo $vg1 "$dev1"
vgcreate --addtag tagfoo $vg2 "$dev2"
vgcreate --addtag tagfoo2 $vg3 "$dev3"
vgcreate --addtag tagbar $vg4 "$dev4"
vgcreate $SHARED --addtag tagfoo $vg1 "$dev1"
vgcreate $SHARED --addtag tagfoo $vg2 "$dev2"
vgcreate $SHARED --addtag tagfoo2 $vg3 "$dev3"
vgcreate $SHARED --addtag tagbar $vg4 "$dev4"
vgchange --addtag foo $vg4
vgremove @foo @tagfoo2 $vg1 $vg2
@ -147,10 +145,10 @@ not vgs $vg4
#
# set up four vgs that we will remove
#
vgcreate --addtag tagfoo $vg1 "$dev1"
vgcreate --addtag tagfoo $vg2 "$dev2"
vgcreate --addtag tagfoo2 $vg3 "$dev3"
vgcreate --addtag tagbar $vg4 "$dev4"
vgcreate $SHARED --addtag tagfoo $vg1 "$dev1"
vgcreate $SHARED --addtag tagfoo $vg2 "$dev2"
vgcreate $SHARED --addtag tagfoo2 $vg3 "$dev3"
vgcreate $SHARED --addtag tagbar $vg4 "$dev4"
vgchange --addtag foo $vg4
vgremove @tagfoo $vg1 @tagfoo @tagfoo2 $vg3 @tagbar
@ -163,10 +161,10 @@ not vgs $vg4
#
# set up four vgs that we will remove
#
vgcreate --addtag tagfoo $vg1 "$dev1"
vgcreate --addtag tagfoo $vg2 "$dev2"
vgcreate --addtag tagfoo2 $vg3 "$dev3"
vgcreate --addtag tagbar $vg4 "$dev4"
vgcreate $SHARED --addtag tagfoo $vg1 "$dev1"
vgcreate $SHARED --addtag tagfoo $vg2 "$dev2"
vgcreate $SHARED --addtag tagfoo2 $vg3 "$dev3"
vgcreate $SHARED --addtag tagbar $vg4 "$dev4"
vgchange --addtag foo $vg4
not vgremove garbage $vg1
@ -198,10 +196,10 @@ not vgs $vg6
#
# set up four vgs that we will report
#
vgcreate --addtag tagfoo $vg1 "$dev1"
vgcreate --addtag tagfoo $vg2 "$dev2"
vgcreate --addtag tagfoo2 $vg3 "$dev3"
vgcreate --addtag tagbar $vg4 "$dev4"
vgcreate $SHARED --addtag tagfoo $vg1 "$dev1"
vgcreate $SHARED --addtag tagfoo $vg2 "$dev2"
vgcreate $SHARED --addtag tagfoo2 $vg3 "$dev3"
vgcreate $SHARED --addtag tagbar $vg4 "$dev4"
vgchange --addtag foo $vg4
vgs >err
@ -264,3 +262,5 @@ not grep $vg1 err
not grep $vg2 err
not grep $vg3 err
vgremove -f $vg1 $vg2 $vg3 $vg4

View File

@ -0,0 +1,327 @@
#!/bin/sh
# Copyright (C) 2014 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
test_description='Exercise toollib process_each_pv with vgreduce'
. lib/inittest
aux prepare_devs 14
#
# set up
#
# FIXME: some of the setup may not be used by the tests
# since this was split out from process-each-pv, where
# some of the setup was used by other tests that only
# remain in process-each-pv.
#
# use use dev10 instead of dev1 because simple grep for
# dev1 matchines dev10,dev11,etc
#
vgcreate $vg1 "$dev10"
vgcreate $vg2 "$dev2" "$dev3" "$dev4" "$dev5"
vgcreate $vg3 "$dev6" "$dev7" "$dev8" "$dev9"
pvchange --addtag V2D3 "$dev3"
pvchange --addtag V2D4 "$dev4"
pvchange --addtag V2D45 "$dev4"
pvchange --addtag V2D5 "$dev5"
pvchange --addtag V2D45 "$dev5"
pvchange --addtag V3 "$dev6" "$dev7" "$dev8" "$dev9"
pvchange --addtag V3D9 "$dev9"
# orphan
pvcreate "$dev11"
# dev (a non-pv device)
pvcreate "$dev12"
pvremove "$dev12"
# dev13 is intentionally untouched so we can
# test that it is handled appropriately as a non-pv
# orphan
pvcreate "$dev14"
# fail without dev
not vgreduce $vg2
# fail with dev and -a
not vgreduce $vg2 "$dev2" -a
check pv_field "$dev2" vg_name $vg2
check pv_field "$dev3" vg_name $vg2
check pv_field "$dev4" vg_name $vg2
check pv_field "$dev5" vg_name $vg2
check pv_field "$dev6" vg_name $vg3
check pv_field "$dev7" vg_name $vg3
check pv_field "$dev8" vg_name $vg3
check pv_field "$dev9" vg_name $vg3
# remove one pv
vgreduce $vg2 "$dev2"
not check pv_field "$dev2" vg_name $vg2
check pv_field "$dev3" vg_name $vg2
check pv_field "$dev4" vg_name $vg2
check pv_field "$dev5" vg_name $vg2
check pv_field "$dev6" vg_name $vg3
check pv_field "$dev7" vg_name $vg3
check pv_field "$dev8" vg_name $vg3
check pv_field "$dev9" vg_name $vg3
# reset
vgextend $vg2 "$dev2"
# remove two pvs
vgreduce $vg2 "$dev2" "$dev3"
not check pv_field "$dev2" vg_name $vg2
not check pv_field "$dev3" vg_name $vg2
check pv_field "$dev4" vg_name $vg2
check pv_field "$dev5" vg_name $vg2
check pv_field "$dev6" vg_name $vg3
check pv_field "$dev7" vg_name $vg3
check pv_field "$dev8" vg_name $vg3
check pv_field "$dev9" vg_name $vg3
# reset
vgextend $vg2 "$dev2" "$dev3"
pvchange --addtag V2D3 "$dev3"
# remove one pv with tag
vgreduce $vg2 @V2D3
check pv_field "$dev2" vg_name $vg2
not check pv_field "$dev3" vg_name $vg2
check pv_field "$dev4" vg_name $vg2
check pv_field "$dev5" vg_name $vg2
check pv_field "$dev6" vg_name $vg3
check pv_field "$dev7" vg_name $vg3
check pv_field "$dev8" vg_name $vg3
check pv_field "$dev9" vg_name $vg3
# reset
vgextend $vg2 "$dev3"
pvchange --addtag V2D3 "$dev3"
# remove two pvs, each with different tag
vgreduce $vg2 @V2D3 @V2D4
check pv_field "$dev2" vg_name $vg2
not check pv_field "$dev3" vg_name $vg2
not check pv_field "$dev4" vg_name $vg2
check pv_field "$dev5" vg_name $vg2
check pv_field "$dev6" vg_name $vg3
check pv_field "$dev7" vg_name $vg3
check pv_field "$dev8" vg_name $vg3
check pv_field "$dev9" vg_name $vg3
# reset
vgextend $vg2 "$dev3" "$dev4"
pvchange --addtag V2D3 "$dev3"
pvchange --addtag V2D4 "$dev4"
pvchange --addtag V2D45 "$dev4"
# remove two pvs, both with same tag
vgreduce $vg2 @V2D45
check pv_field "$dev2" vg_name $vg2
check pv_field "$dev3" vg_name $vg2
not check pv_field "$dev4" vg_name $vg2
not check pv_field "$dev5" vg_name $vg2
check pv_field "$dev6" vg_name $vg3
check pv_field "$dev7" vg_name $vg3
check pv_field "$dev8" vg_name $vg3
check pv_field "$dev9" vg_name $vg3
# reset
vgextend $vg2 "$dev4" "$dev5"
pvchange --addtag V2D4 "$dev4"
pvchange --addtag V2D45 "$dev4"
pvchange --addtag V2D5 "$dev5"
pvchange --addtag V2D45 "$dev5"
# remove two pvs, one by name, one by tag
vgreduce $vg2 "$dev2" @V2D3
not check pv_field "$dev2" vg_name $vg2
not check pv_field "$dev3" vg_name $vg2
check pv_field "$dev4" vg_name $vg2
check pv_field "$dev5" vg_name $vg2
check pv_field "$dev6" vg_name $vg3
check pv_field "$dev7" vg_name $vg3
check pv_field "$dev8" vg_name $vg3
check pv_field "$dev9" vg_name $vg3
# reset
vgextend $vg2 "$dev2" "$dev3"
pvchange --addtag V2D3 "$dev3"
# remove one pv by tag, where another vg has a pv with same tag
pvchange --addtag V2D5V3D9 "$dev5"
pvchange --addtag V2D5V3D9 "$dev9"
vgreduce $vg2 @V2D5V3D9
check pv_field "$dev2" vg_name $vg2
check pv_field "$dev3" vg_name $vg2
check pv_field "$dev4" vg_name $vg2
not check pv_field "$dev5" vg_name $vg2
check pv_field "$dev6" vg_name $vg3
check pv_field "$dev7" vg_name $vg3
check pv_field "$dev8" vg_name $vg3
check pv_field "$dev9" vg_name $vg3
# reset
vgextend $vg2 "$dev5"
pvchange --addtag V2D5 "$dev5"
pvchange --addtag V2D45 "$dev5"
# fail to remove last pv (don't know which will be last)
not vgreduce -a $vg2
# reset
vgremove $vg2
vgcreate $vg2 "$dev2" "$dev3" "$dev4" "$dev5"
pvchange --addtag V2D3 "$dev3"
pvchange --addtag V2D4 "$dev4"
pvchange --addtag V2D45 "$dev4"
pvchange --addtag V2D5 "$dev5"
pvchange --addtag V2D45 "$dev5"
# lvcreate on one pv to make it used
# remove all unused pvs
lvcreate -n $lv1 -l 2 $vg2 "$dev2"
not vgreduce -a $vg2
check pv_field "$dev2" vg_name $vg2
not check pv_field "$dev3" vg_name $vg2
not check pv_field "$dev4" vg_name $vg2
not check pv_field "$dev5" vg_name $vg2
check pv_field "$dev6" vg_name $vg3
check pv_field "$dev7" vg_name $vg3
check pv_field "$dev8" vg_name $vg3
check pv_field "$dev9" vg_name $vg3
# reset
vgextend $vg2 "$dev3" "$dev4" "$dev5"
pvchange --addtag V2D3 "$dev3"
pvchange --addtag V2D4 "$dev4"
pvchange --addtag V2D45 "$dev4"
pvchange --addtag V2D5 "$dev5"
pvchange --addtag V2D45 "$dev5"
lvchange -an $vg2/$lv1
lvremove $vg2/$lv1
#
# tests including pvs without mdas
#
# remove old config
vgremove $vg1
vgremove $vg2
vgremove $vg3
pvremove "$dev11"
pvremove "$dev14"
# new config with some pvs that have zero mdas
# for vg1
pvcreate "$dev10"
# for vg2
pvcreate "$dev2" --metadatacopies 0
pvcreate "$dev3"
pvcreate "$dev4"
pvcreate "$dev5"
# for vg3
pvcreate "$dev6" --metadatacopies 0
pvcreate "$dev7" --metadatacopies 0
pvcreate "$dev8" --metadatacopies 0
pvcreate "$dev9"
# orphan with mda
pvcreate "$dev11"
# orphan without mda
pvcreate "$dev14" --metadatacopies 0
# non-pv devs
# dev12
# dev13
vgcreate $vg1 "$dev10"
vgcreate $vg2 "$dev2" "$dev3" "$dev4" "$dev5"
vgcreate $vg3 "$dev6" "$dev7" "$dev8" "$dev9"
pvchange --addtag V2D3 "$dev3"
pvchange --addtag V2D4 "$dev4"
pvchange --addtag V2D45 "$dev4"
pvchange --addtag V2D5 "$dev5"
pvchange --addtag V2D45 "$dev5"
pvchange --addtag V3 "$dev6" "$dev7" "$dev8" "$dev9"
pvchange --addtag V3D8 "$dev8"
pvchange --addtag V3D9 "$dev9"
#
# vgreduce including pvs without mdas
#
# remove pv without mda
vgreduce $vg2 "$dev2"
not check pv_field "$dev2" vg_name $vg2
check pv_field "$dev3" vg_name $vg2
check pv_field "$dev4" vg_name $vg2
check pv_field "$dev5" vg_name $vg2
check pv_field "$dev6" vg_name $vg3
check pv_field "$dev7" vg_name $vg3
check pv_field "$dev8" vg_name $vg3
check pv_field "$dev9" vg_name $vg3
# reset
vgextend $vg2 "$dev2"
# remove pv with mda and pv without mda
vgreduce $vg2 "$dev2" "$dev3"
not check pv_field "$dev2" vg_name $vg2
not check pv_field "$dev3" vg_name $vg2
check pv_field "$dev4" vg_name $vg2
check pv_field "$dev5" vg_name $vg2
check pv_field "$dev6" vg_name $vg3
check pv_field "$dev7" vg_name $vg3
check pv_field "$dev8" vg_name $vg3
check pv_field "$dev9" vg_name $vg3
# reset
vgextend $vg2 "$dev2"
vgextend $vg2 "$dev3"
# fail to remove only pv with mda
not vgreduce $vg3 "$dev9"
check pv_field "$dev6" vg_name $vg3
check pv_field "$dev7" vg_name $vg3
check pv_field "$dev8" vg_name $vg3
check pv_field "$dev9" vg_name $vg3
check pv_field "$dev2" vg_name $vg2
check pv_field "$dev3" vg_name $vg2
check pv_field "$dev4" vg_name $vg2
check pv_field "$dev5" vg_name $vg2
# remove by tag a pv without mda
vgreduce $vg3 @V3D8
check pv_field "$dev6" vg_name $vg3
check pv_field "$dev7" vg_name $vg3
not check pv_field "$dev8" vg_name $vg3
check pv_field "$dev9" vg_name $vg3
check pv_field "$dev2" vg_name $vg2
check pv_field "$dev3" vg_name $vg2
check pv_field "$dev4" vg_name $vg2
check pv_field "$dev5" vg_name $vg2
# reset
vgextend $vg3 "$dev8"
vgremove $vg1 $vg2 $vg3

View File

@ -0,0 +1,27 @@
#!/bin/sh
# Copyright (C) 2008-2012 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
test_description='Hello world for vgcreate with lvmlockd and sanlock'
. lib/inittest
[ -z "$LVM_TEST_LOCK_TYPE_SANLOCK" ] && skip;
aux prepare_pvs 1
vgcreate $SHARED $vg "$dev1"
vgs -o+locktype,lockargs $vg
check vg_field $vg vg_locktype sanlock
vgremove $vg

View File

@ -0,0 +1,86 @@
#!/bin/sh
# Copyright (C) 2008-2012 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
test_description='Set up things to run tests with sanlock'
. lib/utils
. lib/inittest
[ -z "$LVM_TEST_LOCK_TYPE_SANLOCK" ] && skip;
SANLOCK_CONF="/etc/sysconfig/sanlock"
create_sanlock_conf() {
if test -a $SANLOCK_CONF; then
if ! grep "created by lvm test suite" $SANLOCK_CONF; then
rm $SANLOCK_CONF
else
mv $SANLOCK_CONF $SANLOCK_CONF.prelvmtest
fi
fi
cp lib/test-sanlock-conf $SANLOCK_CONF
echo "created new $SANLOCK_CONF"
}
prepare_lvmlockd_sanlock() {
if pgrep lvmlockd ; then
echo "Cannot run while existing lvmlockd process exists"
exit 1
fi
if pgrep sanlock ; then
echo "Cannot run while existing sanlock process exists"
exit 1
fi
create_sanlock_conf
# FIXME: use 'systemctl start sanlock' once we can pass options
sanlock daemon -U sanlock -G sanlock -w 0 -e testhostname
sleep 1
if ! pgrep sanlock; then
echo "Failed to start sanlock"
exit 1
fi
# FIXME: use 'systemctl start lvm2-lvmlockd' once we can pass -o 2
lvmlockd -o 2
sleep 1
if ! pgrep lvmlockd; then
echo "Failed to start lvmlockd"
exit 1
fi
}
# Create a device and a VG that are both outside the scope of
# the standard lvm test suite so that they will not be removed
# and will remain in place while all the tests are run.
#
# Use this VG to hold the sanlock global lock which will be used
# by lvmlockd during other tests.
#
# This script will be run before any standard tests are run.
# After all the tests are run, another script will be run
# to remove this VG and device.
GL_DEV="/dev/mapper/GL_DEV"
GL_FILE="$PWD/gl_file.img"
rm -f "$GL_FILE"
dd if=/dev/zero of="$GL_FILE" bs=$((1024*1024)) count=1024 2> /dev/null
GL_LOOP=$(losetup -f "$GL_FILE" --show)
echo "0 `blockdev --getsize $GL_LOOP` linear $GL_LOOP 0" | dmsetup create GL_DEV
prepare_lvmlockd_sanlock
vgcreate --config 'devices { global_filter=["a|GL_DEV|", "r|.*|"] filter=["a|GL_DEV|", "r|.*|"]}' --lock-type sanlock --lock-gl enable --lock-opt wait glvg $GL_DEV
vgs --config 'devices { global_filter=["a|GL_DEV|", "r|.*|"] filter=["a|GL_DEV|", "r|.*|"]}' -o+locktype,lockargs glvg

View File

@ -0,0 +1,28 @@
#!/bin/sh
# Copyright (C) 2008-2012 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
test_description='Remove the sanlock test setup'
. lib/inittest
[ -z "$LVM_TEST_LOCK_TYPE_SANLOCK" ] && skip;
# Removes the VG with the global lock that was created by
# the corresponding create script.
vgremove --config 'devices { global_filter=["a|GL_DEV|", "r|.*|"] filter=["a|GL_DEV|", "r|.*|"]}' glvg
killall lvmlockd
killall sanlock
dmsetup remove GL_DEV
# dmsetup remove glvg-lvmlock

View File

@ -50,6 +50,10 @@ arg(ignoremonitoring_ARG, '\0', "ignoremonitoring", NULL, 0)
arg(ignoreskippedcluster_ARG, '\0', "ignoreskippedcluster", NULL, 0)
arg(ignoreunsupported_ARG, '\0', "ignoreunsupported", NULL, 0)
arg(labelsector_ARG, '\0', "labelsector", int_arg, 0)
arg(lockopt_ARG, '\0', "lockopt", string_arg, 0)
arg(lockstart_ARG, '\0', "lockstart", NULL, 0)
arg(lockstop_ARG, '\0', "lockstop", NULL, 0)
arg(locktype_ARG, '\0', "locktype", locktype_arg, 0)
arg(maxrecoveryrate_ARG, '\0', "maxrecoveryrate", size_kb_arg, 0)
arg(merge_ARG, '\0', "merge", NULL, 0)
arg(mergedconfig_ARG, '\0', "mergedconfig", NULL, 0)
@ -96,6 +100,7 @@ arg(resync_ARG, '\0', "resync", NULL, 0)
arg(rows_ARG, '\0', "rows", NULL, 0)
arg(segments_ARG, '\0', "segments", NULL, 0)
arg(separator_ARG, '\0', "separator", string_arg, 0)
arg(shared_ARG, '\0', "shared", NULL, 0)
arg(split_ARG, '\0', "split", NULL, 0)
arg(splitcache_ARG, '\0', "splitcache", NULL, 0)
arg(splitmirrors_ARG, '\0', "splitmirrors", int_arg, 0)

View File

@ -394,7 +394,7 @@ xx(lvcreate,
xx(lvdisplay,
"Display information about a logical volume",
PERMITTED_READ_ONLY | ALL_VGS_IS_DEFAULT | ENABLE_FOREIGN_VGS,
PERMITTED_READ_ONLY | ALL_VGS_IS_DEFAULT | LOCKD_VG_SH,
"lvdisplay\n"
"\t[-a|--all]\n"
"\t[-c|--colon]\n"
@ -442,7 +442,7 @@ xx(lvdisplay,
aligned_ARG, all_ARG, binary_ARG, colon_ARG, columns_ARG, foreign_ARG,
ignorelockingfailure_ARG, ignoreskippedcluster_ARG, maps_ARG,
noheadings_ARG, nosuffix_ARG, options_ARG, sort_ARG, partial_ARG,
readonly_ARG, segments_ARG, select_ARG, separator_ARG,
readonly_ARG, segments_ARG, select_ARG, separator_ARG, shared_ARG,
unbuffered_ARG, units_ARG)
xx(lvextend,
@ -646,7 +646,7 @@ xx(lvresize,
xx(lvs,
"Display information about logical volumes",
PERMITTED_READ_ONLY | ALL_VGS_IS_DEFAULT | ENABLE_FOREIGN_VGS,
PERMITTED_READ_ONLY | ALL_VGS_IS_DEFAULT | LOCKD_VG_SH,
"lvs\n"
"\t[-a|--all]\n"
"\t[--aligned]\n"
@ -679,12 +679,12 @@ xx(lvs,
aligned_ARG, all_ARG, binary_ARG, foreign_ARG, ignorelockingfailure_ARG,
ignoreskippedcluster_ARG, nameprefixes_ARG, noheadings_ARG,
nolocking_ARG, nosuffix_ARG, options_ARG, partial_ARG,
readonly_ARG, rows_ARG, segments_ARG, select_ARG, separator_ARG,
readonly_ARG, rows_ARG, segments_ARG, select_ARG, separator_ARG, shared_ARG,
sort_ARG, trustcache_ARG, unbuffered_ARG, units_ARG, unquoted_ARG)
xx(lvscan,
"List all logical volumes in all volume groups",
PERMITTED_READ_ONLY | ALL_VGS_IS_DEFAULT,
PERMITTED_READ_ONLY | ALL_VGS_IS_DEFAULT | LOCKD_VG_SH,
"lvscan\n"
"\t[-a|--all]\n"
"\t[-b|--blockdevice]\n"
@ -744,7 +744,7 @@ xx(pvresize,
xx(pvck,
"Check the consistency of physical volume(s)",
0,
LOCKD_VG_SH,
"pvck "
"\t[--commandprofile ProfileName]\n"
"\t[-d|--debug]\n"
@ -810,7 +810,7 @@ xx(pvdata,
xx(pvdisplay,
"Display various attributes of physical volume(s)",
CACHE_VGMETADATA | PERMITTED_READ_ONLY | ENABLE_ALL_DEVS | ENABLE_FOREIGN_VGS,
CACHE_VGMETADATA | PERMITTED_READ_ONLY | ENABLE_ALL_DEVS | LOCKD_VG_SH,
"pvdisplay\n"
"\t[-c|--colon]\n"
"\t[--commandprofile ProfileName]\n"
@ -855,7 +855,7 @@ xx(pvdisplay,
aligned_ARG, all_ARG, binary_ARG, colon_ARG, columns_ARG, foreign_ARG,
ignorelockingfailure_ARG, ignoreskippedcluster_ARG, maps_ARG,
noheadings_ARG, nosuffix_ARG, options_ARG, readonly_ARG,
select_ARG, separator_ARG, short_ARG, sort_ARG, unbuffered_ARG,
select_ARG, separator_ARG, shared_ARG, short_ARG, sort_ARG, unbuffered_ARG,
units_ARG)
xx(pvmove,
@ -919,7 +919,7 @@ xx(pvremove,
xx(pvs,
"Display information about physical volumes",
CACHE_VGMETADATA | PERMITTED_READ_ONLY | ALL_VGS_IS_DEFAULT | ENABLE_ALL_DEVS | ENABLE_FOREIGN_VGS,
CACHE_VGMETADATA | PERMITTED_READ_ONLY | ALL_VGS_IS_DEFAULT | ENABLE_ALL_DEVS | LOCKD_VG_SH,
"pvs\n"
"\t[-a|--all]\n"
"\t[--aligned]\n"
@ -952,12 +952,12 @@ xx(pvs,
aligned_ARG, all_ARG, binary_ARG, foreign_ARG, ignorelockingfailure_ARG,
ignoreskippedcluster_ARG, nameprefixes_ARG, noheadings_ARG, nolocking_ARG,
nosuffix_ARG, options_ARG, partial_ARG, readonly_ARG, rows_ARG,
segments_ARG, select_ARG, separator_ARG, sort_ARG, trustcache_ARG,
segments_ARG, select_ARG, separator_ARG, shared_ARG, sort_ARG, trustcache_ARG,
unbuffered_ARG, units_ARG, unquoted_ARG)
xx(pvscan,
"List all physical volumes",
PERMITTED_READ_ONLY | ENABLE_FOREIGN_VGS,
PERMITTED_READ_ONLY | LOCKD_VG_SH,
"pvscan\n"
"\t[-b|--background]\n"
"\t[--cache [-a|--activate ay] [ DevicePath | -j|--major major --minor minor]...]\n"
@ -994,7 +994,7 @@ xx(tags,
xx(vgcfgbackup,
"Backup volume group configuration(s)",
PERMITTED_READ_ONLY | ALL_VGS_IS_DEFAULT | ENABLE_FOREIGN_VGS,
PERMITTED_READ_ONLY | ALL_VGS_IS_DEFAULT | LOCKD_VG_SH,
"vgcfgbackup\n"
"\t[--commandprofile ProfileName]\n"
"\t[-d|--debug]\n"
@ -1074,11 +1074,12 @@ xx(vgchange,
metadataprofile_ARG, monitor_ARG, noudevsync_ARG, metadatacopies_ARG,
vgmetadatacopies_ARG, partial_ARG, physicalextentsize_ARG, poll_ARG,
refresh_ARG, resizeable_ARG, resizable_ARG, select_ARG, sysinit_ARG,
systemid_ARG, test_ARG, uuid_ARG)
systemid_ARG, test_ARG, uuid_ARG, lockstart_ARG, lockstop_ARG, locktype_ARG, lockopt_ARG,
force_ARG)
xx(vgck,
"Check the consistency of volume group(s)",
ALL_VGS_IS_DEFAULT,
ALL_VGS_IS_DEFAULT | LOCKD_VG_SH,
"vgck "
"\t[--commandprofile ProfileName]\n"
"\t[-d|--debug]\n"
@ -1138,11 +1139,11 @@ xx(vgcreate,
physicalextentsize_ARG, test_ARG, force_ARG, zero_ARG, labelsector_ARG,
metadatasize_ARG, pvmetadatacopies_ARG, metadatacopies_ARG,
vgmetadatacopies_ARG, dataalignment_ARG, dataalignmentoffset_ARG,
systemid_ARG)
shared_ARG, systemid_ARG, locktype_ARG, lockopt_ARG)
xx(vgdisplay,
"Display volume group information",
PERMITTED_READ_ONLY | ALL_VGS_IS_DEFAULT | ENABLE_FOREIGN_VGS,
PERMITTED_READ_ONLY | ALL_VGS_IS_DEFAULT | LOCKD_VG_SH,
"vgdisplay\n"
"\t[-A|--activevolumegroups]\n"
"\t[-c|--colon | -s|--short | -v|--verbose]\n"
@ -1186,11 +1187,11 @@ xx(vgdisplay,
activevolumegroups_ARG, aligned_ARG, binary_ARG, colon_ARG, columns_ARG,
foreign_ARG, ignorelockingfailure_ARG, ignoreskippedcluster_ARG,
noheadings_ARG, nosuffix_ARG, options_ARG, partial_ARG, readonly_ARG,
select_ARG, short_ARG, separator_ARG, sort_ARG, unbuffered_ARG, units_ARG)
select_ARG, shared_ARG, short_ARG, separator_ARG, sort_ARG, unbuffered_ARG, units_ARG)
xx(vgexport,
"Unregister volume group(s) from the system",
ALL_VGS_IS_DEFAULT,
ALL_VGS_IS_DEFAULT | LOCKD_VG_SH,
"vgexport\n"
"\t[-a|--all]\n"
"\t[--commandprofile ProfileName]\n"
@ -1330,7 +1331,7 @@ xx(vgrename,
xx(vgs,
"Display information about volume groups",
PERMITTED_READ_ONLY | ALL_VGS_IS_DEFAULT | ENABLE_FOREIGN_VGS,
PERMITTED_READ_ONLY | ALL_VGS_IS_DEFAULT | LOCKD_VG_SH,
"vgs\n"
"\t[--aligned]\n"
"\t[--binary]\n"
@ -1362,12 +1363,12 @@ xx(vgs,
aligned_ARG, all_ARG, binary_ARG, foreign_ARG, ignorelockingfailure_ARG,
ignoreskippedcluster_ARG, nameprefixes_ARG, noheadings_ARG,
nolocking_ARG, nosuffix_ARG, options_ARG, partial_ARG,
readonly_ARG, rows_ARG, select_ARG, separator_ARG, sort_ARG,
readonly_ARG, rows_ARG, select_ARG, separator_ARG, shared_ARG, sort_ARG,
trustcache_ARG, unbuffered_ARG, units_ARG, unquoted_ARG)
xx(vgscan,
"Search for all volume groups",
PERMITTED_READ_ONLY | ALL_VGS_IS_DEFAULT | ENABLE_FOREIGN_VGS,
PERMITTED_READ_ONLY | ALL_VGS_IS_DEFAULT | LOCKD_VG_SH,
"vgscan "
"\t[--cache]\n"
"\t[--commandprofile ProfileName]\n"

View File

@ -606,6 +606,9 @@ static int _lvchange_persistent(struct cmd_context *cmd,
{
enum activation_change activate = CHANGE_AN;
/* The LV lock in lvmlockd should remain as it is. */
cmd->lockd_lv_disable = 1;
if (!get_and_validate_major_minor(cmd, lv->vg->fid->fmt,
&lv->major, &lv->minor))
return_0;
@ -989,6 +992,22 @@ static int _lvchange_single(struct cmd_context *cmd, struct logical_volume *lv,
return ECMD_FAILED;
}
if (!arg_count(cmd, activate_ARG) && !arg_count(cmd, refresh_ARG)) {
/*
* If a persistent lv lock already exists from activation
* (with the needed mode or higher), this will be a no-op.
* Otherwise, the lv lock will be taken as non-persistent
* and released when this command exits.
*
* FIXME: use "sh" if the options imply that the lvchange
* operation does not modify the LV.
*/
if (!lockd_lv(cmd, lv, "ex", 0)) {
stack;
return ECMD_FAILED;
}
}
/*
* FIXME: DEFAULT_BACKGROUND_POLLING should be "unspecified".
* If --poll is explicitly provided use it; otherwise polling
@ -1259,9 +1278,22 @@ int lvchange(struct cmd_context *cmd, int argc, char **argv)
}
}
/*
* Include foreign VGs that contain active LVs.
* That shouldn't happen in general, but if it does by some
* mistake, then we want to allow those LVs to be deactivated.
*/
if (arg_is_set(cmd, activate_ARG))
cmd->include_active_foreign_vgs = 1;
/*
* The default vg lock mode for lvchange is ex, but these options
* are cases where lvchange does not modify the vg, so they can use
* the sh lock mode.
*/
if (arg_count(cmd, activate_ARG) || arg_count(cmd, refresh_ARG))
cmd->lockd_vg_default_sh = 1;
return process_each_lv(cmd, argc, argv,
update ? READ_FOR_UPDATE : 0, NULL,
&_lvchange_single);

View File

@ -16,6 +16,7 @@
#include "polldaemon.h"
#include "lv_alloc.h"
#include "lvconvert_poll.h"
#include "lvmpolld-client.h"
struct lvconvert_params {
int cache;
@ -2524,6 +2525,12 @@ static int _lvconvert_thin(struct cmd_context *cmd,
return 0;
}
if (is_lockd_type(lv->vg->lock_type)) {
log_error("Can't use lock_type %s LV as external origin.",
lv->vg->lock_type);
return 0;
}
dm_list_init(&lvc.tags);
if (!pool_supports_external_origin(first_seg(pool_lv), lv))
@ -2641,6 +2648,12 @@ static int _lvconvert_pool(struct cmd_context *cmd,
struct logical_volume *data_lv;
struct logical_volume *metadata_lv = NULL;
struct logical_volume *pool_metadata_lv;
char *lockd_data_args = NULL;
char *lockd_meta_args = NULL;
char *lockd_data_name = NULL;
char *lockd_meta_name = NULL;
struct id lockd_data_id;
struct id lockd_meta_id;
char metadata_name[NAME_LEN], data_name[NAME_LEN];
int activate_pool;
@ -2657,6 +2670,13 @@ static int _lvconvert_pool(struct cmd_context *cmd,
}
}
/* An existing LV needs to have its lock freed once it becomes a data LV. */
if (is_lockd_type(vg->lock_type) && !lv_is_pool(pool_lv) && pool_lv->lock_args) {
lockd_data_args = dm_pool_strdup(cmd->mem, pool_lv->lock_args);
lockd_data_name = dm_pool_strdup(cmd->mem, pool_lv->name);
memcpy(&lockd_data_id, &pool_lv->lvid.id[1], sizeof(struct id));
}
if (!lv_is_visible(pool_lv)) {
log_error("Can't convert internal LV %s.", display_lvname(pool_lv));
return 0;
@ -2712,6 +2732,13 @@ static int _lvconvert_pool(struct cmd_context *cmd,
lp->pool_metadata_extents = lp->pool_metadata_lv->le_count;
metadata_lv = lp->pool_metadata_lv;
/* An existing LV needs to have its lock freed once it becomes a meta LV. */
if (is_lockd_type(vg->lock_type) && metadata_lv->lock_args) {
lockd_meta_args = dm_pool_strdup(cmd->mem, metadata_lv->lock_args);
lockd_meta_name = dm_pool_strdup(cmd->mem, metadata_lv->name);
memcpy(&lockd_meta_id, &metadata_lv->lvid.id[1], sizeof(struct id));
}
if (metadata_lv == pool_lv) {
log_error("Can't use same LV for pool data and metadata LV %s.",
display_lvname(metadata_lv));
@ -2974,6 +3001,27 @@ static int _lvconvert_pool(struct cmd_context *cmd,
if (!attach_pool_data_lv(seg, data_lv))
return_0;
/*
* Create a new lock for a thin pool LV. A cache pool LV has no lock.
* Locks are removed from existing LVs that are being converted to
* data and meta LVs (they are unlocked and deleted below.)
*/
if (is_lockd_type(vg->lock_type)) {
if (segtype_is_cache_pool(lp->segtype)) {
data_lv->lock_args = NULL;
metadata_lv->lock_args = NULL;
} else {
data_lv->lock_args = NULL;
metadata_lv->lock_args = NULL;
if (!strcmp(vg->lock_type, "sanlock"))
pool_lv->lock_args = "pending";
else if (!strcmp(vg->lock_type, "dlm"))
pool_lv->lock_args = "dlm";
/* The lock_args will be set in vg_write(). */
}
}
/* FIXME: revert renamed LVs in fail path? */
/* FIXME: any common code with metadata/thin_manip.c extend_pool() ? */
@ -3007,6 +3055,11 @@ mda_write:
log_warn("WARNING: Pool zeroing and large %s chunk size slows down "
"provisioning.", display_size(cmd, seg->chunk_size));
if (activate_pool && !lockd_lv(cmd, pool_lv, "ex", LDLV_PERSISTENT)) {
log_error("Failed to lock pool LV %s/%s", vg->name, pool_lv->name);
goto out;
}
if (activate_pool &&
!activate_lv_excl(cmd, pool_lv)) {
log_error("Failed to activate pool logical volume %s.",
@ -3031,6 +3084,22 @@ out:
(segtype_is_cache_pool(lp->segtype)) ?
"cache" : "thin");
/*
* Unlock and free the locks from existing LVs that became pool data
* and meta LVs.
*/
if (lockd_data_name) {
if (!lockd_lv_name(cmd, vg, lockd_data_name, &lockd_data_id, lockd_data_args, "un", LDLV_PERSISTENT))
log_error("Failed to unlock pool data LV %s/%s", vg->name, lockd_data_name);
lockd_free_lv(cmd, vg, lockd_data_name, &lockd_data_id, lockd_data_args);
}
if (lockd_meta_name) {
if (!lockd_lv_name(cmd, vg, lockd_meta_name, &lockd_meta_id, lockd_meta_args, "un", LDLV_PERSISTENT))
log_error("Failed to unlock pool metadata LV %s/%s", vg->name, lockd_meta_name);
lockd_free_lv(cmd, vg, lockd_meta_name, &lockd_meta_id, lockd_meta_args);
}
return r;
#if 0
revert_new_lv:
@ -3250,13 +3319,21 @@ static int lvconvert_single(struct cmd_context *cmd, struct lvconvert_params *lp
struct volume_group *vg;
int ret = ECMD_FAILED;
int saved_ignore_suspended_devices = ignore_suspended_devices();
uint32_t lockd_state;
if (arg_count(cmd, repair_ARG)) {
init_ignore_suspended_devices(1);
cmd->handles_missing_pvs = 1;
}
vg = vg_read(cmd, lp->vg_name, NULL, READ_FOR_UPDATE);
/*
* The VG lock will be released when the command exits.
* Commands that poll the LV will reacquire the VG lock.
*/
if (!lockd_vg(cmd, lp->vg_name, "ex", 0, &lockd_state))
goto_out;
vg = vg_read(cmd, lp->vg_name, NULL, READ_FOR_UPDATE, lockd_state);
if (vg_read_error(vg)) {
release_vg(vg);
goto_out;
@ -3268,6 +3345,17 @@ static int lvconvert_single(struct cmd_context *cmd, struct lvconvert_params *lp
goto_out;
}
/*
* If the lv is inactive before and after the command, the
* use of PERSISTENT here means the lv will remain locked as
* an effect of running the lvconvert.
* To unlock it, it would need to be activated+deactivated.
* Or, we could identify the commands for which the lv remains
* inactive, and not use PERSISTENT here for those cases.
*/
if (!lockd_lv(cmd, lv, "ex", LDLV_PERSISTENT))
goto_bad;
/*
* lp->pvh holds the list of PVs available for allocation or removal
*/
@ -3288,6 +3376,12 @@ static int lvconvert_single(struct cmd_context *cmd, struct lvconvert_params *lp
bad:
unlock_vg(cmd, lp->vg_name);
/*
* The command may sit and monitor progress for some time,
* and we do not need or want the VG lock held during that.
*/
lockd_vg(cmd, lp->vg_name, "un", 0, &lockd_state);
if (ret == ECMD_PROCESSED && lp->need_polling)
ret = _poll_logical_volume(cmd, lp->lv_to_poll,
lp->wait_completion);
@ -3306,6 +3400,7 @@ static int _lvconvert_merge_single(struct cmd_context *cmd, struct logical_volum
struct volume_group *vg_fresh;
struct logical_volume *lv_fresh;
int ret = ECMD_FAILED;
uint32_t lockd_state = 0; /* dummy placeholder, lvmlockd doesn't use this path */
/*
* FIXME can't trust lv's VG to be current given that caller
@ -3317,7 +3412,7 @@ static int _lvconvert_merge_single(struct cmd_context *cmd, struct logical_volum
vg_name = lv->vg->name;
unlock_vg(cmd, vg_name);
vg_fresh = vg_read(cmd, vg_name, NULL, READ_FOR_UPDATE);
vg_fresh = vg_read(cmd, vg_name, NULL, READ_FOR_UPDATE, lockd_state);
if (vg_read_error(vg_fresh)) {
log_error("ABORTING: Can't reread VG %s", vg_name);
goto out;
@ -3356,6 +3451,26 @@ out:
return ret;
}
/*
* process_each_lv locks the VG, reads the VG, calls this which starts the
* conversion, then unlocks the VG. The lvpoll command will come along later
* and lock the VG, read the VG, check the progress, unlock the VG, sleep and
* repeat until done.
*/
static int _lvconvert_lvmpolld_merge_single(struct cmd_context *cmd, struct logical_volume *lv,
struct processing_handle *handle)
{
struct lvconvert_params *lp = (struct lvconvert_params *) handle->custom_handle;
int ret;
lp->lv_to_poll = lv;
if ((ret = _lvconvert_single(cmd, lv, lp)) != ECMD_PROCESSED)
stack;
return ret;
}
int lvconvert(struct cmd_context * cmd, int argc, char **argv)
{
int ret;
@ -3377,10 +3492,16 @@ int lvconvert(struct cmd_context * cmd, int argc, char **argv)
goto_out;
}
if (lp.merge)
if (lp.merge) {
ret = process_each_lv(cmd, argc, argv, READ_FOR_UPDATE, handle,
lvmpolld_use() ? &_lvconvert_lvmpolld_merge_single :
&_lvconvert_merge_single);
else
if (ret == ECMD_PROCESSED && lvmpolld_use() && lp.need_polling) {
if ((ret = _poll_logical_volume(cmd, lp.lv_to_poll, lp.wait_completion)) != ECMD_PROCESSED)
stack;
}
} else
ret = lvconvert_single(cmd, &lp);
out:
destroy_processing_handle(cmd, handle);

View File

@ -1453,6 +1453,7 @@ int lvcreate(struct cmd_context *cmd, int argc, char **argv)
};
struct lvcreate_cmdline_params lcp = { 0 };
struct volume_group *vg;
uint32_t lockd_state;
if (!_lvcreate_params(cmd, argc, argv, &lp, &lcp)) {
stack;
@ -1464,8 +1465,11 @@ int lvcreate(struct cmd_context *cmd, int argc, char **argv)
return EINVALID_CMD_LINE;
}
if (!lockd_vg(cmd, lp.vg_name, "ex", 0, &lockd_state))
return_ECMD_FAILED;
log_verbose("Finding volume group \"%s\"", lp.vg_name);
vg = vg_read_for_update(cmd, lp.vg_name, NULL, 0);
vg = vg_read_for_update(cmd, lp.vg_name, NULL, 0, lockd_state);
if (vg_read_error(vg)) {
release_vg(vg);
return_ECMD_FAILED;
@ -1510,6 +1514,13 @@ int lvcreate(struct cmd_context *cmd, int argc, char **argv)
lp.pool_name ? : "with generated name", lp.vg_name, lp.segtype->name);
}
if (vg->lock_type && !strcmp(vg->lock_type, "sanlock")) {
if (!handle_sanlock_lv(cmd, vg)) {
log_error("No space for sanlock lock, extend the internal lvmlock LV.");
goto_out;
}
}
if (seg_is_thin_volume(&lp))
log_verbose("Making thin LV %s in pool %s in VG %s%s%s using segtype %s",
lp.lv_name ? : "with generated name",
@ -1517,6 +1528,9 @@ int lvcreate(struct cmd_context *cmd, int argc, char **argv)
lp.snapshot ? " as snapshot of " : "",
lp.snapshot ? lp.origin_name : "", lp.segtype->name);
if (is_lockd_type(vg->lock_type))
lp.needs_lockd_init = 1;
if (!lv_create_single(vg, &lp))
goto_out;

View File

@ -17,6 +17,7 @@
#include "lvm2cmdline.h"
#include "label.h"
#include "lvm-version.h"
#include "lvmlockd.h"
#include "stub.h"
#include "last-path-component.h"
@ -625,6 +626,19 @@ int alloc_arg(struct cmd_context *cmd __attribute__((unused)), struct arg_values
return 1;
}
int locktype_arg(struct cmd_context *cmd __attribute__((unused)), struct arg_values *av)
{
lock_type_t lock_type;
av->sign = SIGN_NONE;
lock_type = get_lock_type_from_string(av->value);
if (lock_type == LOCK_TYPE_INVALID)
return 0;
return 1;
}
int segtype_arg(struct cmd_context *cmd, struct arg_values *av)
{
struct segment_type *segtype;
@ -1045,6 +1059,9 @@ static int _get_settings(struct cmd_context *cmd)
cmd->current_settings.backup = 0;
}
if (cmd->command->flags & LOCKD_VG_SH)
cmd->lockd_vg_default_sh = 1;
cmd->partial_activation = 0;
cmd->degraded_activation = 0;
activation_mode = find_config_tree_str(cmd, activation_mode_CFG, NULL);
@ -1081,9 +1098,14 @@ static int _get_settings(struct cmd_context *cmd)
init_ignorelockingfailure(0);
cmd->ignore_clustered_vgs = arg_is_set(cmd, ignoreskippedcluster_ARG);
cmd->error_foreign_vgs = cmd->command->flags & ENABLE_FOREIGN_VGS ? 0 : 1;
cmd->include_foreign_vgs = arg_is_set(cmd, foreign_ARG) ? 1 : 0;
cmd->include_active_foreign_vgs = cmd->command->flags & ENABLE_FOREIGN_VGS ? 1 : 0;
cmd->include_shared_vgs = arg_is_set(cmd, shared_ARG) ? 1 : 0;
/*
* This is set to zero by process_each which wants to print errors
* itself rather than having them printed in vg_read.
*/
cmd->vg_read_print_access_error = 1;
if (!arg_count(cmd, sysinit_ARG))
lvmetad_connect_or_warn();
@ -1407,6 +1429,31 @@ static int _prepare_profiles(struct cmd_context *cmd)
return 1;
}
static int _init_lvmlockd(struct cmd_context *cmd)
{
const char *lvmlockd_socket;
int use_lvmlockd = find_config_tree_bool(cmd, global_use_lvmlockd_CFG, NULL);
if (use_lvmlockd && locking_is_clustered()) {
log_error("ERROR: configuration setting use_lvmlockd cannot be used with clustered locking_type 3.");
return 0;
}
lvmlockd_disconnect(); /* start over when tool context is refreshed */
lvmlockd_socket = getenv("LVM_LVMLOCKD_SOCKET");
if (!lvmlockd_socket)
lvmlockd_socket = DEFAULT_RUN_DIR "/lvmlockd.socket";
lvmlockd_set_socket(lvmlockd_socket);
lvmlockd_set_use(use_lvmlockd);
if (use_lvmlockd) {
lvmlockd_init(cmd);
lvmlockd_connect();
}
return 1;
}
int lvm_run_command(struct cmd_context *cmd, int argc, char **argv)
{
struct dm_config_tree *config_string_cft;
@ -1534,6 +1581,11 @@ int lvm_run_command(struct cmd_context *cmd, int argc, char **argv)
goto_out;
}
if (!_init_lvmlockd(cmd)) {
ret = ECMD_FAILED;
goto_out;
}
/*
* Other hosts might have changed foreign VGs so enforce a rescan
* before processing any command using them.
@ -1549,6 +1601,7 @@ int lvm_run_command(struct cmd_context *cmd, int argc, char **argv)
*/
ret = cmd->command->fn(cmd, argc, argv);
lvmlockd_disconnect();
fin_locking();
out:

View File

@ -27,6 +27,7 @@ int lvrename(struct cmd_context *cmd, int argc, char **argv)
char *st;
struct volume_group *vg;
struct lv_list *lvl;
uint32_t lockd_state;
int r = ECMD_FAILED;
if (argc == 3) {
@ -98,8 +99,11 @@ int lvrename(struct cmd_context *cmd, int argc, char **argv)
return EINVALID_CMD_LINE;
}
if (!lockd_vg(cmd, vg_name, "ex", 0, &lockd_state))
return_ECMD_FAILED;
log_verbose("Checking for existing volume group \"%s\"", vg_name);
vg = vg_read_for_update(cmd, vg_name, NULL, 0);
vg = vg_read_for_update(cmd, vg_name, NULL, 0, lockd_state);
if (vg_read_error(vg)) {
release_vg(vg);
return_ECMD_FAILED;

View File

@ -169,13 +169,17 @@ int lvresize(struct cmd_context *cmd, int argc, char **argv)
struct volume_group *vg;
struct dm_list *pvh = NULL;
struct logical_volume *lv;
uint32_t lockd_state;
int r = ECMD_FAILED;
if (!_lvresize_params(cmd, argc, argv, &lp))
return EINVALID_CMD_LINE;
if (!lockd_vg(cmd, lp.vg_name, "ex", 0, &lockd_state))
return_ECMD_FAILED;
log_verbose("Finding volume group %s", lp.vg_name);
vg = vg_read_for_update(cmd, lp.vg_name, NULL, 0);
vg = vg_read_for_update(cmd, lp.vg_name, NULL, 0, lockd_state);
if (vg_read_error(vg)) {
release_vg(vg);
return_ECMD_FAILED;

View File

@ -138,14 +138,20 @@ int wait_for_single_lv(struct cmd_context *cmd, struct poll_operation_id *id,
struct volume_group *vg;
struct logical_volume *lv;
int finished = 0;
uint32_t lockd_state;
/* Poll for completion */
while (!finished) {
if (parms->wait_before_testing)
_sleep_and_rescan_devices(parms);
if (!lockd_vg(cmd, id->vg_name, "sh", 0, &lockd_state)) {
log_error("ABORTING: Can't lock VG for %s.", id->display_name);
return 0;
}
/* Locks the (possibly renamed) VG again */
vg = vg_read(cmd, id->vg_name, NULL, READ_FOR_UPDATE);
vg = vg_read(cmd, id->vg_name, NULL, READ_FOR_UPDATE, lockd_state);
if (vg_read_error(vg)) {
release_vg(vg);
log_error("ABORTING: Can't reread VG for %s.", id->display_name);
@ -189,6 +195,8 @@ int wait_for_single_lv(struct cmd_context *cmd, struct poll_operation_id *id,
unlock_and_release_vg(cmd, vg, vg->name);
lockd_vg(cmd, id->vg_name, "un", 0, &lockd_state);
/*
* FIXME Sleeping after testing, while preferred, also works around
* unreliable "finished" state checking in _percent_run. If the
@ -360,12 +368,32 @@ static int report_progress(struct cmd_context *cmd, struct poll_operation_id *id
{
struct volume_group *vg;
struct logical_volume *lv;
uint32_t lockd_state;
int ret;
vg = vg_read(cmd, id->vg_name, NULL, 0);
/*
* FIXME: we don't really need to take the vg lock here,
* because we only report the progress on the same host
* where the pvmove/lvconvert is happening. This means
* that the local pvmove/lvconvert/lvpoll commands are
* updating the local lvmetad with the latest info they
* have, and we just need to read the latest info that
* they have put into lvmetad about their progress.
* No VG lock is needed to protect anything here
* (we're just reading the VG), and no VG lock is
* needed to force a VG read from disk to get changes
* from other hosts, because the only change to the VG
* we're interested in is the change done locally.
*/
if (!lockd_vg(cmd, id->vg_name, "sh", 0, &lockd_state))
return 0;
vg = vg_read(cmd, id->vg_name, NULL, 0, lockd_state);
if (vg_read_error(vg)) {
release_vg(vg);
log_error("Can't reread VG for %s", id->display_name);
return 0;
ret = 0;
goto out_ret;
}
lv = find_lv(vg, id->lv_name);
@ -382,23 +410,28 @@ static int report_progress(struct cmd_context *cmd, struct poll_operation_id *id
else
log_verbose("Can't find LV in %s for %s. Already finished or removed.",
vg->name, id->display_name);
ret = 1;
goto out;
}
if (!lv_is_active_locally(lv)) {
log_verbose("%s: Interrupted: No longer active.", id->display_name);
ret = 1;
goto out;
}
if (parms->poll_fns->poll_progress(cmd, lv, id->display_name, parms) == PROGRESS_CHECK_FAILED) {
unlock_and_release_vg(cmd, vg, vg->name);
return_0;
ret = 0;
goto out;
}
ret = 1;
out:
unlock_and_release_vg(cmd, vg, vg->name);
return 1;
out_ret:
lockd_vg(cmd, id->vg_name, "un", 0, &lockd_state);
return ret;
}
static int _lvmpolld_init_poll_vg(struct cmd_context *cmd, const char *vgname,

View File

@ -82,6 +82,14 @@ static int _pvchange_single(struct cmd_context *cmd, struct volume_group *vg,
}
}
/*
* Needed to change a property on an orphan PV.
* i.e. the global lock is only needed for orphans.
* Convert sh to ex.
*/
if (is_orphan(pv) && !lockd_gl(cmd, "ex", 0))
return_ECMD_FAILED;
if (tagargs) {
/* tag or deltag */
if (arg_count(cmd, addtag_ARG) && !change_tag(cmd, NULL, NULL, pv, addtag_ARG))

View File

@ -96,6 +96,10 @@ int pvcreate(struct cmd_context *cmd, int argc, char **argv)
int ret = ECMD_PROCESSED;
struct pvcreate_params pp;
/* Needed to change the set of orphan PVs. */
if (!lockd_gl(cmd, "ex", 0))
return_ECMD_FAILED;
pvcreate_params_set_defaults(&pp);
if (!pvcreate_restore_params_validate(cmd, argc, argv, &pp)) {

View File

@ -17,6 +17,7 @@
#include "polldaemon.h"
#include "display.h"
#include "pvmove_poll.h"
#include "lvmpolld-client.h"
#define PVMOVE_FIRST_TIME 0x00000001 /* Called for first time */
@ -598,6 +599,7 @@ static int _set_up_pvmove(struct cmd_context *cmd, const char *pv_name,
struct dm_list *lvs_changed;
struct physical_volume *pv;
struct logical_volume *lv_mirr;
uint32_t lockd_state;
unsigned flags = PVMOVE_FIRST_TIME;
unsigned exclusive;
int r = ECMD_FAILED;
@ -631,10 +633,13 @@ static int _set_up_pvmove(struct cmd_context *cmd, const char *pv_name,
/* Read VG */
log_verbose("Finding volume group \"%s\"", vg_name);
vg = vg_read(cmd, vg_name, NULL, READ_FOR_UPDATE);
if (!lockd_vg(cmd, vg_name, "ex", 0, &lockd_state))
return_ECMD_FAILED;
vg = vg_read(cmd, vg_name, NULL, READ_FOR_UPDATE, lockd_state);
if (vg_read_error(vg)) {
release_vg(vg);
return_ECMD_FAILED;
goto out_ret;
}
exclusive = _pvmove_is_exclusive(cmd, vg);
@ -700,6 +705,14 @@ static int _set_up_pvmove(struct cmd_context *cmd, const char *pv_name,
out:
free_pv_fid(pv);
unlock_and_release_vg(cmd, vg, vg_name);
out_ret:
/*
* Release explicitly because the command may continue running
* for some time monitoring the progress, and we don not want
* or need the lockd lock held over that.
*/
lockd_vg(cmd, vg_name, "un", 0, &lockd_state);
return r;
}
@ -712,6 +725,7 @@ static int _read_poll_id_from_pvname(struct cmd_context *cmd, const char *pv_nam
struct logical_volume *lv;
struct physical_volume *pv;
struct volume_group *vg;
uint32_t lockd_state;
if (!pv_name) {
log_error(INTERNAL_ERROR "Invalid PV name parameter.");
@ -723,13 +737,16 @@ static int _read_poll_id_from_pvname(struct cmd_context *cmd, const char *pv_nam
vg_name = pv_vg_name(pv);
if (!lockd_vg(cmd, vg_name, "sh", 0, &lockd_state))
return_0;
/* need read-only access */
vg = vg_read(cmd, vg_name, NULL, 0);
vg = vg_read(cmd, vg_name, NULL, 0, lockd_state);
if (vg_read_error(vg)) {
log_error("ABORTING: Can't read VG for %s.", pv_name);
release_vg(vg);
free_pv_fid(pv);
return 0;
ret = 0;
goto out;
}
if (!(lv = find_pvmove_lv(vg, pv_dev(pv), PVMOVE))) {
@ -743,6 +760,8 @@ static int _read_poll_id_from_pvname(struct cmd_context *cmd, const char *pv_nam
}
unlock_and_release_vg(cmd, vg, vg_name);
out:
lockd_vg(cmd, vg_name, "un", 0, &lockd_state);
free_pv_fid(pv);
return ret;
}
@ -828,6 +847,24 @@ int pvmove(struct cmd_context *cmd, int argc, char **argv)
return ECMD_FAILED;
}
if (lvmlockd_use() && !lvmpolld_use()) {
/*
* Don't want to spend the time making lvmlockd
* work without lvmpolld.
*/
log_error("Enable lvmpolld when using lvmlockd.");
return ECMD_FAILED;
}
if (lvmlockd_use() && !argc) {
/*
* FIXME: move process_each_vg from polldaemon up to here,
* then we can remove this limitation.
*/
log_error("Specify pvmove args when using lvmlockd.");
return ECMD_FAILED;
}
if (argc) {
if (!(lvid = dm_pool_alloc(cmd->mem, sizeof(*lvid)))) {
log_error("Failed to allocate lvid.");
@ -845,6 +882,15 @@ int pvmove(struct cmd_context *cmd, int argc, char **argv)
if (colon)
*colon = '\0';
/*
* To do a reverse mapping from PV name to VG name, we need the
* correct global mapping of PVs to VGs.
*/
if (!lockd_gl(cmd, "sh", 0)) {
stack;
return ECMD_FAILED;
}
if (!arg_count(cmd, abort_ARG)) {
if ((ret = _set_up_pvmove(cmd, pv_name, argc, argv, lvid, &vg_name, &lv_name)) != ECMD_PROCESSED) {
stack;
@ -857,6 +903,13 @@ int pvmove(struct cmd_context *cmd, int argc, char **argv)
if (!in_progress)
return ECMD_PROCESSED;
}
/*
* The command may sit and report progress for some time,
* and we do not want or need the lockd locks held during
* that time.
*/
lockd_gl(cmd, "un", 0);
}
return pvmove_poll(cmd, pv_name, lvid ? lvid->s : NULL, vg_name, lv_name,

View File

@ -32,6 +32,10 @@ int pvremove(struct cmd_context *cmd, int argc, char **argv)
dm_list_init(&pv_names);
/* Needed to change the set of orphan PVs. */
if (!lockd_gl(cmd, "ex", 0))
return_ECMD_FAILED;
for (i = 0; i < argc; i++) {
dm_unescape_colons_and_at_signs(argv[i], NULL, NULL);
if (!str_list_add(cmd->mem, &pv_names, argv[i]))

View File

@ -36,6 +36,14 @@ static int _pvresize_single(struct cmd_context *cmd,
}
params->total++;
/*
* Needed to change a property on an orphan PV.
* i.e. the global lock is only needed for orphans.
* Convert sh to ex.
*/
if (is_orphan(pv) && !lockd_gl(cmd, "ex", 0))
return_ECMD_FAILED;
if (!pv_resize_single(cmd, vg, pv, params->new_size))
return_ECMD_FAILED;

View File

@ -106,7 +106,7 @@ static int _auto_activation_handler(struct cmd_context *cmd,
return_0;
/* NB. This is safe because we know lvmetad is running and we won't hit disk. */
vg = vg_read(cmd, vgname, (const char *)&vgid_raw, 0);
vg = vg_read(cmd, vgname, (const char *)&vgid_raw, 0, 0);
if (vg_read_error(vg)) {
log_error("Failed to read Volume Group \"%s\" (%s) during autoactivation.", vgname, vgid);
release_vg(vg);
@ -322,7 +322,6 @@ out:
if (!sync_local_dev_names(cmd))
stack;
unlock_vg(cmd, VG_GLOBAL);
return ret;
}
@ -372,6 +371,10 @@ int pvscan(struct cmd_context *cmd, int argc, char **argv)
return ECMD_FAILED;
}
/* Needed for a current listing of the global VG namespace. */
if (!lockd_gl(cmd, "sh", 0))
return_ECMD_FAILED;
if (cmd->full_filter->wipe)
cmd->full_filter->wipe(cmd->full_filter);
lvmcache_destroy(cmd, 1, 0);

View File

@ -626,6 +626,14 @@ static int _report(struct cmd_context *cmd, int argc, char **argv,
quoted = find_config_tree_bool(cmd, report_quoted_CFG, NULL);
columns_as_rows = find_config_tree_bool(cmd, report_colums_as_rows_CFG, NULL);
/*
* Include foreign VGs that contain active LVs.
* That shouldn't happen in general, but if it does by some
* mistake, then we want to display those VGs and allow the
* LVs to be deactivated.
*/
cmd->include_active_foreign_vgs = 1;
/* Check PV specifics and do extra changes/actions if needed. */
_check_pv_list(cmd, argc, argv, &report_type, &args_are_pvs);

View File

@ -217,6 +217,22 @@ static int _ignore_vg(struct volume_group *vg, const char *vg_name,
}
}
/*
* Accessing a lockd VG when lvmlockd is not used is similar
* to accessing a foreign VG.
*/
if (read_error & FAILED_LOCK_TYPE) {
if (arg_vgnames && str_list_match_item(arg_vgnames, vg->name)) {
log_error("Cannot access VG %s with lock_type %s that requires lvmlockd.",
vg->name, vg->lock_type);
return 1;
} else {
read_error &= ~FAILED_LOCK_TYPE; /* Check for other errors */
log_verbose("Skipping volume group %s", vg_name);
*skip = 1;
}
}
if (read_error == FAILED_CLUSTERED) {
*skip = 1;
stack; /* Error already logged */
@ -721,6 +737,11 @@ int vgcreate_params_set_from_args(struct cmd_context *cmd,
struct vgcreate_params *vp_def)
{
const char *system_id_arg_str;
const char *lock_type = NULL;
int locking_type;
int use_lvmlockd;
int use_clvmd;
lock_type_t lock_type_num;
vp_new->vg_name = skip_dev_dir(cmd, vp_def->vg_name, NULL);
vp_new->max_lv = arg_uint_value(cmd, maxlogicalvolumes_ARG,
@ -733,12 +754,6 @@ int vgcreate_params_set_from_args(struct cmd_context *cmd,
vp_new->extent_size =
arg_uint_value(cmd, physicalextentsize_ARG, vp_def->extent_size);
if (arg_count(cmd, clustered_ARG))
vp_new->clustered = arg_int_value(cmd, clustered_ARG, vp_def->clustered);
else
/* Default depends on current locking type */
vp_new->clustered = locking_is_clustered();
if (arg_sign_value(cmd, physicalextentsize_ARG, SIGN_NONE) == SIGN_MINUS) {
log_error(_pe_size_may_not_be_negative_msg);
return 0;
@ -769,16 +784,9 @@ int vgcreate_params_set_from_args(struct cmd_context *cmd,
else
vp_new->vgmetadatacopies = find_config_tree_int(cmd, metadata_vgmetadatacopies_CFG, NULL);
/* A clustered VG has no system ID. */
if (vp_new->clustered) {
if (arg_is_set(cmd, systemid_ARG)) {
log_error("system ID cannot be set on clustered Volume Groups.");
return 0;
}
vp_new->system_id = NULL;
} else if (!(system_id_arg_str = arg_str_value(cmd, systemid_ARG, NULL)))
if (!(system_id_arg_str = arg_str_value(cmd, systemid_ARG, NULL))) {
vp_new->system_id = vp_def->system_id;
else {
} else {
if (!(vp_new->system_id = system_id_from_string(cmd, system_id_arg_str)))
return_0;
@ -793,6 +801,186 @@ int vgcreate_params_set_from_args(struct cmd_context *cmd,
}
}
if ((system_id_arg_str = arg_str_value(cmd, systemid_ARG, NULL))) {
vp_new->system_id = system_id_from_string(cmd, system_id_arg_str);
} else {
vp_new->system_id = vp_def->system_id;
}
if (system_id_arg_str) {
if (!vp_new->system_id || !vp_new->system_id[0])
log_warn("WARNING: A VG without a system ID allows unsafe access from other hosts.");
if (vp_new->system_id && cmd->system_id &&
strcmp(vp_new->system_id, cmd->system_id)) {
log_warn("VG with system ID %s might become inaccessible as local system ID is %s",
vp_new->system_id, cmd->system_id);
}
}
/*
* Locking: what kind of locking should be used for the
* new VG, and is it compatible with current lvm.conf settings.
*
* The end result is to set vp_new->lock_type to:
* none | clvm | dlm | sanlock.
*
* If 'vgcreate --lock-type <arg>' is set, the answer is given
* directly by <arg> which is one of none|clvm|dlm|sanlock.
*
* 'vgcreate --clustered y' is the way to create clvm VGs.
*
* 'vgcreate --shared' is the way to create lockd VGs.
* lock_type of sanlock or dlm is selected based on
* which lock manager is running.
*
*
* 1. Using neither clvmd nor lvmlockd.
* ------------------------------------------------
* lvm.conf:
* global/use_lvmlockd = 0
* global/locking_type = 1
*
* - no locking is enabled
* - clvmd is not used
* - lvmlockd is not used
* - VGs with CLUSTERED set are ignored (requires clvmd)
* - VGs with lockd type are ignored (requires lvmlockd)
* - vgcreate can create new VGs with lock_type none
* - 'vgcreate --clustered y' fails
* - 'vgcreate --shared' fails
* - 'vgcreate' (neither option) creates a local VG
*
* 2. Using clvmd.
* ------------------------------------------------
* lvm.conf:
* global/use_lvmlockd = 0
* global/locking_type = 3
*
* - locking through clvmd is enabled (traditional clvm config)
* - clvmd is used
* - lvmlockd is not used
* - VGs with CLUSTERED set can be used
* - VGs with lockd type are ignored (requires lvmlockd)
* - vgcreate can create new VGs with CLUSTERED status flag
* - 'vgcreate --clustered y' works
* - 'vgcreate --shared' fails
* - 'vgcreate' (neither option) creates a clvm VG
*
* 3. Using lvmlockd.
* ------------------------------------------------
* lvm.conf:
* global/use_lvmlockd = 1
* global/locking_type = 1
*
* - locking through lvmlockd is enabled
* - clvmd is not used
* - lvmlockd is used
* - VGs with CLUSTERED set are ignored (requires clvmd)
* - VGs with lockd type can be used
* - vgcreate can create new VGs with lock_type sanlock or dlm
* - 'vgcreate --clustered y' fails
* - 'vgcreate --shared' works
* - 'vgcreate' (neither option) creates a local VG
*/
locking_type = find_config_tree_int(cmd, global_locking_type_CFG, NULL);
use_lvmlockd = find_config_tree_bool(cmd, global_use_lvmlockd_CFG, NULL);
use_clvmd = (locking_type == 3);
if (arg_is_set(cmd, locktype_ARG)) {
if (arg_is_set(cmd, clustered_ARG) || arg_is_set(cmd, shared_ARG)) {
log_error("A lock type cannot be specified with --shared or --clustered.");
return 0;
}
lock_type = arg_str_value(cmd, locktype_ARG, "");
} else if (arg_is_set(cmd, clustered_ARG)) {
const char *arg_str = arg_str_value(cmd, clustered_ARG, "");
int clustery = strcmp(arg_str, "y") ? 0 : 1;
if (use_clvmd) {
lock_type = clustery ? "clvm" : "none";
} else if (use_lvmlockd) {
log_error("lvmlockd is configured, use --shared with lvmlockd, and --clustered with clvmd.");
return 0;
} else {
if (clustery) {
log_error("The --clustered option requires clvmd (locking_type=3).");
return 0;
} else {
lock_type = "none";
}
}
} else if (arg_is_set(cmd, shared_ARG)) {
if (use_lvmlockd) {
if (!(lock_type = lockd_running_lock_type(cmd))) {
log_error("Failed to detect a running lock manager to select lock_type.");
return 0;
}
} else if (use_clvmd) {
log_error("Use --shared with lvmlockd, and --clustered with clvmd.");
return 0;
} else {
log_error("The --shared option requires lvmlockd (use_lvmlockd=1).");
return 0;
}
} else {
if (use_clvmd)
lock_type = locking_is_clustered() ? "clvm" : "none";
else
lock_type = "none";
}
/*
* Check that the lock_type is recognized, and is being
* used with the correct lvm.conf settings.
*/
lock_type_num = get_lock_type_from_string(lock_type);
switch (lock_type_num) {
case LOCK_TYPE_INVALID:
log_error("lock_type %s is invalid", lock_type);
return 0;
case LOCK_TYPE_SANLOCK:
case LOCK_TYPE_DLM:
if (!use_lvmlockd) {
log_error("lock_type %s requires use_lvmlockd configuration setting", lock_type);
return 0;
}
break;
case LOCK_TYPE_CLVM:
if (!use_clvmd) {
log_error("lock_type clvm requires locking_type 3 configuration setting");
return 0;
}
break;
case LOCK_TYPE_NONE:
break;
};
/*
* The vg is not owned by one host/system_id.
* Locking coordinates access from multiple hosts.
*/
if (lock_type_num == LOCK_TYPE_DLM || lock_type_num == LOCK_TYPE_SANLOCK || lock_type_num == LOCK_TYPE_CLVM)
vp_new->system_id = NULL;
vp_new->lock_type = lock_type;
if (lock_type_num == LOCK_TYPE_CLVM)
vp_new->clustered = 1;
else
vp_new->clustered = 0;
log_debug("Setting lock_type to %s", vp_new->lock_type);
return 1;
}
@ -1700,6 +1888,7 @@ static int _process_vgnameid_list(struct cmd_context *cmd, uint32_t flags,
struct vgnameid_list *vgnl;
const char *vg_name;
const char *vg_uuid;
uint32_t lockd_state;
int selected;
int whole_selected = 0;
int ret_max = ECMD_PROCESSED;
@ -1724,17 +1913,19 @@ static int _process_vgnameid_list(struct cmd_context *cmd, uint32_t flags,
vg_uuid = vgnl->vgid;
skip = 0;
vg = vg_read(cmd, vg_name, vg_uuid, flags);
if (!lockd_vg(cmd, vg_name, NULL, 0, &lockd_state)) {
ret_max = ECMD_FAILED;
continue;
}
vg = vg_read(cmd, vg_name, vg_uuid, flags, lockd_state);
if (_ignore_vg(vg, vg_name, arg_vgnames, flags & READ_ALLOW_INCONSISTENT, &skip)) {
stack;
ret_max = ECMD_FAILED;
release_vg(vg);
continue;
}
if (skip) {
release_vg(vg);
continue;
goto endvg;
}
if (skip)
goto endvg;
/* Process this VG? */
if ((process_all ||
@ -1749,10 +1940,11 @@ static int _process_vgnameid_list(struct cmd_context *cmd, uint32_t flags,
ret_max = ret;
}
if (vg_read_error(vg))
if (!vg_read_error(vg))
unlock_vg(cmd, vg_name);
endvg:
release_vg(vg);
else
unlock_and_release_vg(cmd, vg, vg_name);
lockd_vg(cmd, vg_name, "un", 0, &lockd_state);
}
/* the VG is selected if at least one LV is selected */
@ -1806,7 +1998,8 @@ int process_each_vg(struct cmd_context *cmd, int argc, char **argv,
unsigned one_vgname_arg = (flags & ONE_VGNAME_ARG);
int ret;
cmd->error_foreign_vgs = 0;
/* Disable error in vg_read so we can print it from ignore_vg. */
cmd->vg_read_print_access_error = 0;
dm_list_init(&arg_tags);
dm_list_init(&arg_vgnames);
@ -1824,9 +2017,16 @@ int process_each_vg(struct cmd_context *cmd, int argc, char **argv,
* any tags were supplied and need resolving; or
* no VG names were given and the command defaults to processing all VGs.
*/
if (((dm_list_empty(&arg_vgnames) && enable_all_vgs) || !dm_list_empty(&arg_tags)) &&
!get_vgnameids(cmd, &vgnameids_on_system, NULL, 0))
if ((dm_list_empty(&arg_vgnames) && enable_all_vgs) || !dm_list_empty(&arg_tags)) {
/* Needed for a current listing of the global VG namespace. */
if (!lockd_gl(cmd, "sh", 0)) {
ret = ECMD_FAILED;
goto_out;
}
if (!get_vgnameids(cmd, &vgnameids_on_system, NULL, 0))
goto_out;
}
if (dm_list_empty(&arg_vgnames) && dm_list_empty(&vgnameids_on_system)) {
/* FIXME Should be log_print, but suppressed for reporting cmds */
@ -2140,6 +2340,7 @@ static int _process_lv_vgnameid_list(struct cmd_context *cmd, uint32_t flags,
struct dm_str_list *sl;
struct dm_list *tags_arg;
struct dm_list lvnames;
uint32_t lockd_state;
const char *vg_name;
const char *vg_uuid;
const char *vgn;
@ -2186,18 +2387,20 @@ static int _process_lv_vgnameid_list(struct cmd_context *cmd, uint32_t flags,
}
}
vg = vg_read(cmd, vg_name, vg_uuid, flags);
if (!lockd_vg(cmd, vg_name, NULL, 0, &lockd_state)) {
ret_max = ECMD_FAILED;
continue;
}
vg = vg_read(cmd, vg_name, vg_uuid, flags, lockd_state);
if (_ignore_vg(vg, vg_name, arg_vgnames, flags & READ_ALLOW_INCONSISTENT, &skip)) {
stack;
ret_max = ECMD_FAILED;
release_vg(vg);
continue;
goto endvg;
}
if (skip) {
release_vg(vg);
continue;
}
if (skip)
goto endvg;
ret = process_each_lv_in_vg(cmd, vg, &lvnames, tags_arg, 0,
handle, process_single_lv);
@ -2206,7 +2409,10 @@ static int _process_lv_vgnameid_list(struct cmd_context *cmd, uint32_t flags,
if (ret > ret_max)
ret_max = ret;
unlock_and_release_vg(cmd, vg, vg_name);
unlock_vg(cmd, vg_name);
endvg:
release_vg(vg);
lockd_vg(cmd, vg_name, "un", 0, &lockd_state);
}
return ret_max;
@ -2229,7 +2435,8 @@ int process_each_lv(struct cmd_context *cmd, int argc, char **argv, uint32_t fla
int need_vgnameids = 0;
int ret;
cmd->error_foreign_vgs = 0;
/* Disable error in vg_read so we can print it from ignore_vg. */
cmd->vg_read_print_access_error = 0;
dm_list_init(&arg_tags);
dm_list_init(&arg_vgnames);
@ -2263,8 +2470,16 @@ int process_each_lv(struct cmd_context *cmd, int argc, char **argv, uint32_t fla
else if (dm_list_empty(&arg_vgnames) && handle->internal_report_for_select)
need_vgnameids = 1;
if (need_vgnameids && !get_vgnameids(cmd, &vgnameids_on_system, NULL, 0))
if (need_vgnameids) {
/* Needed for a current listing of the global VG namespace. */
if (!lockd_gl(cmd, "sh", 0)) {
ret = ECMD_FAILED;
goto_out;
}
if (!get_vgnameids(cmd, &vgnameids_on_system, NULL, 0))
goto_out;
}
if (dm_list_empty(&arg_vgnames) && dm_list_empty(&vgnameids_on_system)) {
/* FIXME Should be log_print, but suppressed for reporting cmds */
@ -2657,6 +2872,7 @@ static int _process_pvs_in_vgs(struct cmd_context *cmd, uint32_t flags,
struct vgnameid_list *vgnl;
const char *vg_name;
const char *vg_uuid;
uint32_t lockd_state;
int ret_max = ECMD_PROCESSED;
int ret;
int skip;
@ -2669,14 +2885,17 @@ static int _process_pvs_in_vgs(struct cmd_context *cmd, uint32_t flags,
vg_uuid = vgnl->vgid;
skip = 0;
vg = vg_read(cmd, vg_name, vg_uuid, flags | READ_WARN_INCONSISTENT);
if (!lockd_vg(cmd, vg_name, NULL, 0, &lockd_state)) {
ret_max = ECMD_FAILED;
continue;
}
vg = vg_read(cmd, vg_name, vg_uuid, flags | READ_WARN_INCONSISTENT, lockd_state);
if (_ignore_vg(vg, vg_name, NULL, flags & READ_ALLOW_INCONSISTENT, &skip)) {
stack;
ret_max = ECMD_FAILED;
if (!skip) {
release_vg(vg);
continue;
}
if (!skip)
goto endvg;
/* Drop through to eliminate a clustered VG's PVs from the devices list */
}
@ -2693,10 +2912,11 @@ static int _process_pvs_in_vgs(struct cmd_context *cmd, uint32_t flags,
if (ret > ret_max)
ret_max = ret;
if (skip)
if (!skip)
unlock_vg(cmd, vg->name);
endvg:
release_vg(vg);
else
unlock_and_release_vg(cmd, vg, vg->name);
lockd_vg(cmd, vg_name, "un", 0, &lockd_state);
/* Quit early when possible. */
if (!process_all_pvs && dm_list_empty(arg_tags) && dm_list_empty(arg_devices))
@ -2724,7 +2944,8 @@ int process_each_pv(struct cmd_context *cmd,
int ret_max = ECMD_PROCESSED;
int ret;
cmd->error_foreign_vgs = 0;
/* Disable error in vg_read so we can print it from ignore_vg. */
cmd->vg_read_print_access_error = 0;
dm_list_init(&arg_tags);
dm_list_init(&arg_pvnames);
@ -2750,6 +2971,10 @@ int process_each_pv(struct cmd_context *cmd,
process_all_devices = process_all_pvs && (cmd->command->flags & ENABLE_ALL_DEVS) &&
arg_count(cmd, all_ARG);
/* Needed for a current listing of the global VG namespace. */
if (!only_this_vgname && !lockd_gl(cmd, "sh", 0))
return_ECMD_FAILED;
/*
* Need pvid's set on all PVs before processing so that pvid's
* can be compared to find duplicates while processing.

View File

@ -28,6 +28,7 @@
#include "archiver.h"
#include "lvmcache.h"
#include "lvmetad.h"
#include "lvmlockd.h"
#include "lvm-version.h"
#include "config.h"
#include "defaults.h"
@ -108,8 +109,8 @@ struct arg_value_group_list {
#define ENABLE_ALL_DEVS 0x00000008
/* Exactly one VG name argument required. */
#define ONE_VGNAME_ARG 0x00000010
/* Command is allowed to read foreign VGs. */
#define ENABLE_FOREIGN_VGS 0x00000020
/* Command needs a shared lock on a VG; it only reads the VG. */
#define LOCKD_VG_SH 0x00000020
/* a register of the lvm commands */
struct command {
@ -146,6 +147,7 @@ int metadatatype_arg(struct cmd_context *cmd, struct arg_values *av);
int units_arg(struct cmd_context *cmd, struct arg_values *av);
int segtype_arg(struct cmd_context *cmd, struct arg_values *av);
int alloc_arg(struct cmd_context *cmd, struct arg_values *av);
int locktype_arg(struct cmd_context *cmd, struct arg_values *av);
int readahead_arg(struct cmd_context *cmd, struct arg_values *av);
int metadatacopies_arg(struct cmd_context *cmd __attribute__((unused)), struct arg_values *av);

View File

@ -313,9 +313,18 @@ static int _vgchange_clustered(struct cmd_context *cmd,
struct volume_group *vg)
{
int clustered = arg_int_value(cmd, clustered_ARG, 0);
const char *lock_type = arg_str_value(cmd, locktype_ARG, NULL);
struct lv_list *lvl;
struct lv_segment *mirror_seg;
if (find_config_tree_bool(cmd, global_use_lvmlockd_CFG, NULL)) {
log_error("lvmlockd requires using the vgchange --lock-type option.");
return 0;
}
if (lock_type && !strcmp(lock_type, "clvm"))
clustered = 1;
if (clustered && vg_is_clustered(vg)) {
if (vg->system_id && *vg->system_id)
log_warn("WARNING: Clearing invalid system ID %s from volume group %s.",
@ -511,6 +520,216 @@ static int _vgchange_profile(struct cmd_context *cmd,
return 1;
}
static int _vgchange_locktype(struct cmd_context *cmd,
struct volume_group *vg)
{
const char *lock_type = arg_str_value(cmd, locktype_ARG, NULL);
struct lv_list *lvl;
struct logical_volume *lv;
/*
* This is a special/forced exception to change the lock type to none.
* It's needed for recovery cases and skips the normal steps of undoing
* the current lock type. It's a way to forcibly get access to a VG
* when the normal locking mechanisms are not working.
*
* It ignores: the current lvm locking config, lvmlockd, the state of
* the vg on other hosts, etc. It is meant to just remove any locking
* related metadata from the VG (cluster/lock_type flags, lock_type,
* lock_args).
*
* This can be necessary when manually recovering from certain failures.
* e.g. when a pv is lost containing the lvmlock lv (holding sanlock
* leases), the vg lock_type needs to be changed to none, and then
* back to sanlock, which recreates the lvmlock lv and leases.
*/
if (!strcmp(lock_type, "none") && arg_is_set(cmd, force_ARG)) {
if (yes_no_prompt("Forcibly change VG %s lock type to none? [y/n]: ", vg->name) == 'n') {
log_error("VG lock type not changed.");
return 0;
}
vg->status &= ~CLUSTERED;
vg->lock_type = "none";
vg->lock_args = NULL;
dm_list_iterate_items(lvl, &vg->lvs)
lvl->lv->lock_args = NULL;
return 1;
}
if (!vg->lock_type) {
if (vg_is_clustered(vg))
vg->lock_type = "clvm";
else
vg->lock_type = "none";
}
if (!strcmp(vg->lock_type, lock_type)) {
log_warn("New lock_type %s matches the current lock_type %s.",
lock_type, vg->lock_type);
return 1;
}
/*
* When lvm is currently using clvm, this function is just an alternative
* to vgchange -c{y,n}, and can:
* - change none to clvm
* - change clvm to none
* - it CANNOT change to or from a lockd type
*/
if (locking_is_clustered()) {
if (is_lockd_type(lock_type)) {
log_error("Changing to lock type %s requires lvmlockd.", lock_type);
return 0;
}
return _vgchange_clustered(cmd, vg);
}
/*
* When lvm is currently using lvmlockd, this function can:
* - change none to lockd type
* - change none to clvm (with warning about not being able to use it)
* - change lockd type to none
* - change lockd type to clvm (with warning about not being able to use it)
* - change clvm to none
* - change clvm to lockd type
*/
if (lvs_in_vg_activated(vg)) {
log_error("Changing VG %s lock type not allowed with active LVs",
vg->name);
return 0;
}
/*
* Check if there are any LV types in the VG that cannot be handled
* with the new lock type. Remove this once all LV types can be
* handled.
*/
if (is_lockd_type(lock_type)) {
dm_list_iterate_items(lvl, &vg->lvs) {
lv = lvl->lv;
if ((lv->status & SNAPSHOT) || lv_is_cow(lv)) {
log_error("Changing to lock type %s is not allowed with cow snapshot LV %s/%s",
lock_type, vg->name, lv->name);
return 0;
}
}
}
/* none to clvm */
if (!strcmp(vg->lock_type, "none") && !strcmp(lock_type, "clvm")) {
log_warn("New clvm lock type will not be usable with lvmlockd.");
vg->status |= CLUSTERED;
vg->lock_type = "clvm"; /* this is optional */
return 1;
}
/* clvm to none */
if (!strcmp(vg->lock_type, "clvm") && !strcmp(lock_type, "none")) {
vg->status &= ~CLUSTERED;
vg->lock_type = "none";
return 1;
}
/* clvm to ..., first undo clvm */
if (!strcmp(vg->lock_type, "clvm")) {
vg->status &= ~CLUSTERED;
}
/*
* lockd type to ..., first undo lockd type
*
* To allow this, we need to do:
* lockd_stop_vg();
* lockd_free_vg_before();
* lockd_free_vg_after();
*/
if (is_lockd_type(vg->lock_type)) {
/* FIXME: implement full undoing of the lock_type */
log_error("Changing VG %s from lock type %s not yet allowed.",
vg->name, vg->lock_type);
return 0;
}
/* ... to clvm */
if (!strcmp(lock_type, "clvm")) {
log_warn("New clvm lock type will not be usable with lvmlockd.");
vg->status |= CLUSTERED;
vg->lock_type = "clvm"; /* this is optional */
vg->system_id = NULL;
return 1;
}
/* ... to lockd type */
if (is_lockd_type(lock_type)) {
/*
* For lock_type dlm, lockd_init_vg() will do a single
* vg_write() that sets lock_type, sets lock_args, clears
* system_id, and sets all LV lock_args to dlm.
*/
if (!strcmp(lock_type, "dlm")) {
dm_list_iterate_items(lvl, &vg->lvs) {
lv = lvl->lv;
if (lockd_lv_uses_lock(lv))
lv->lock_args = "dlm";
}
}
/*
* See below. We cannot set valid LV lock_args until stage 1
* of the change is done, so we need to skip the validation of
* the lock_args during stage 1.
*/
if (!strcmp(lock_type, "sanlock"))
vg->skip_validate_lock_args = 1;
vg->system_id = NULL;
if (!lockd_init_vg(cmd, vg, lock_type)) {
log_error("Failed to initialize lock args for lock type %s", lock_type);
return 0;
}
/*
* For lock_type sanlock, there must be multiple steps
* because the VG needs an active lvmlock LV before
* LV lock areas can be allocated, which must be done
* before LV lock_args are written. So, the LV lock_args
* remain unset during the first stage of the conversion.
*
* Stage 1:
* lockd_init_vg() creates and activates the lvmlock LV,
* then sets lock_type, sets lock_args, and clears system_id.
*
* Stage 2:
* We get here, and can now set LV lock_args. This uses
* the standard code path for allocating LV locks in
* vg_write() by setting LV lock_args to "pending",
* which tells vg_write() to call lockd_init_lv()
* and sets the lv->lock_args value before writing the VG.
*/
if (!strcmp(lock_type, "sanlock")) {
dm_list_iterate_items(lvl, &vg->lvs) {
lv = lvl->lv;
if (lockd_lv_uses_lock(lv))
lv->lock_args = "pending";
}
vg->skip_validate_lock_args = 0;
}
return 1;
}
log_error("Unknown lock type");
return 0;
}
/*
* This function will not be called unless the local host is allowed to use the
* VG. Either the VG has no system_id, or the VG and host have matching
@ -582,9 +801,83 @@ static int _vgchange_system_id(struct cmd_context *cmd, struct volume_group *vg)
if (vg->lvm1_system_id)
*vg->lvm1_system_id = '\0';
/* update system_id in lvmlockd's record for this vg */
if (!lockd_start_vg(cmd, vg))
log_debug("Failed to update lvmlockd.");
return 1;
}
static int _passes_lock_start_filter(struct cmd_context *cmd,
struct volume_group *vg,
const int cfg_id)
{
const struct dm_config_node *cn;
const struct dm_config_value *cv;
const char *str;
/* undefined list means no restrictions, all vg names pass */
cn = find_config_tree_node(cmd, cfg_id, NULL);
if (!cn)
return 1;
/* with a defined list, the vg name must be included to pass */
for (cv = cn->v; cv; cv = cv->next) {
if (cv->type == DM_CFG_EMPTY_ARRAY)
break;
if (cv->type != DM_CFG_STRING) {
log_error("Ignoring invalid string in lock_start list");
continue;
}
str = cv->v.str;
if (!*str) {
log_error("Ignoring empty string in config file");
continue;
}
/* ignoring tags for now */
if (!strcmp(str, vg->name))
return 1;
}
return 0;
}
static int _vgchange_lock_start(struct cmd_context *cmd, struct volume_group *vg)
{
const char *start_opt = arg_str_value(cmd, lockopt_ARG, NULL);
int auto_opt = 0;
if (!start_opt || arg_is_set(cmd, force_ARG))
goto do_start;
if (!strcmp(start_opt, "auto") || !strcmp(start_opt, "autowait"))
auto_opt = 1;
if (!_passes_lock_start_filter(cmd, vg, activation_lock_start_list_CFG)) {
log_verbose("Not starting %s since it does not pass lock_start_list", vg->name);
return 1;
}
if (auto_opt && !_passes_lock_start_filter(cmd, vg, activation_auto_lock_start_list_CFG)) {
log_verbose("Not starting %s since it does not pass auto_lock_start_list", vg->name);
return 1;
}
do_start:
return lockd_start_vg(cmd, vg);
}
static int _vgchange_lock_stop(struct cmd_context *cmd, struct volume_group *vg)
{
/* Disable the unlock in toollib because it's pointless after the stop. */
cmd->lockd_vg_disable = 1;
return lockd_stop_vg(cmd, vg);
}
static int vgchange_single(struct cmd_context *cmd, const char *vg_name,
struct volume_group *vg,
struct processing_handle *handle __attribute__((unused)))
@ -610,6 +903,7 @@ static int vgchange_single(struct cmd_context *cmd, const char *vg_name,
{ metadataprofile_ARG, &_vgchange_profile },
{ profile_ARG, &_vgchange_profile },
{ detachprofile_ARG, &_vgchange_profile },
{ locktype_ARG, &_vgchange_locktype },
{ systemid_ARG, &_vgchange_system_id },
};
@ -699,13 +993,90 @@ static int vgchange_single(struct cmd_context *cmd, const char *vg_name,
if (!_vgchange_background_polling(cmd, vg))
return_ECMD_FAILED;
if (arg_is_set(cmd, lockstart_ARG)) {
if (!_vgchange_lock_start(cmd, vg))
return_ECMD_FAILED;
} else if (arg_is_set(cmd, lockstop_ARG)) {
if (!_vgchange_lock_stop(cmd, vg))
return_ECMD_FAILED;
}
return ret;
}
/*
* vgchange can do different things that require different
* locking, so look at each of those things here.
*
* Set up overrides for the default VG locking for various special cases.
* The VG lock will be acquired in process_each_vg.
*
* Acquire the gl lock according to which kind of vgchange command this is.
*/
static int _lockd_vgchange(struct cmd_context *cmd, int argc, char **argv)
{
/* The default vg lock mode is ex, but these options only need sh. */
if (arg_is_set(cmd, activate_ARG) || arg_is_set(cmd, refresh_ARG))
cmd->lockd_vg_default_sh = 1;
/* Starting a vg lockspace means there are no locks available yet. */
if (arg_is_set(cmd, lockstart_ARG))
cmd->lockd_vg_disable = 1;
/*
* In most cases, lockd_vg does not apply when changing lock type.
* (We don't generally allow changing *from* lockd type yet.)
* lockd_vg could be called within _vgchange_locktype as needed.
*/
if (arg_is_set(cmd, locktype_ARG))
cmd->lockd_vg_disable = 1;
/*
* Changing system_id or lock_type must only be done on explicitly
* named vgs.
*/
if (arg_is_set(cmd, systemid_ARG) || arg_is_set(cmd, locktype_ARG))
cmd->command->flags &= ~ALL_VGS_IS_DEFAULT;
if (arg_is_set(cmd, lockstart_ARG)) {
/*
* The lockstart condition takes the global lock to serialize
* with any other host that tries to remove the VG while this
* tries to start it. (Zero argc means all VGs, in wich case
* process_each_vg will acquire the global lock.)
*/
if (argc && !lockd_gl(cmd, "sh", 0))
return_ECMD_FAILED;
} else if (arg_is_set(cmd, systemid_ARG) || arg_is_set(cmd, locktype_ARG)) {
/*
* This is a special case where taking the global lock is
* not needed to protect global state, because the change is
* only to an existing VG. But, taking the global lock ex is
* helpful in this case to trigger a global cache validation
* on other hosts, to cause them to see the new system_id or
* lock_type.
*/
if (!lockd_gl(cmd, "ex", LDGL_UPDATE_NAMES))
return_ECMD_FAILED;
}
return 1;
}
int vgchange(struct cmd_context *cmd, int argc, char **argv)
{
int ret;
int noupdate =
arg_count(cmd, activate_ARG) ||
arg_count(cmd, lockstart_ARG) ||
arg_count(cmd, lockstop_ARG) ||
arg_count(cmd, monitor_ARG) ||
arg_count(cmd, poll_ARG) ||
arg_count(cmd, refresh_ARG);
@ -726,6 +1097,7 @@ int vgchange(struct cmd_context *cmd, int argc, char **argv)
arg_count(cmd, clustered_ARG) ||
arg_count(cmd, alloc_ARG) ||
arg_count(cmd, vgmetadatacopies_ARG) ||
arg_count(cmd, locktype_ARG) ||
arg_count(cmd, systemid_ARG);
int update = update_partial_safe || update_partial_unsafe;
@ -821,9 +1193,35 @@ int vgchange(struct cmd_context *cmd, int argc, char **argv)
if (!update || !update_partial_unsafe)
cmd->handles_missing_pvs = 1;
/*
* Include foreign VGs that contain active LVs.
* That shouldn't happen in general, but if it does by some
* mistake, then we want to allow those LVs to be deactivated.
*/
if (arg_is_set(cmd, activate_ARG))
cmd->include_active_foreign_vgs = 1;
return process_each_vg(cmd, argc, argv, update ? READ_FOR_UPDATE : 0,
if (!_lockd_vgchange(cmd, argc, argv))
return_ECMD_FAILED;
ret = process_each_vg(cmd, argc, argv, update ? READ_FOR_UPDATE : 0,
NULL, &vgchange_single);
/* Wait for lock-start ops that were initiated in vgchange_lockstart. */
if (arg_is_set(cmd, lockstart_ARG)) {
const char *start_opt = arg_str_value(cmd, lockopt_ARG, NULL);
lockd_gl(cmd, "un", 0);
if (!start_opt || !strcmp(start_opt, "wait") || !strcmp(start_opt, "autowait")) {
log_print_unless_silent("Starting locking. Waiting until locks are ready...");
lockd_start_wait(cmd);
} else if (!strcmp(start_opt, "nowait")) {
log_print_unless_silent("Starting locking. VG is read-only until locks are ready.");
}
}
return ret;
}

View File

@ -50,6 +50,13 @@ int vgcreate(struct cmd_context *cmd, int argc, char **argv)
if (!vgcreate_params_validate(cmd, &vp_new))
return EINVALID_CMD_LINE;
/*
* Needed to change the global VG namespace,
* and to change the set of orphan PVs.
*/
if (!lockd_gl_create(cmd, "ex", vp_new.lock_type))
return ECMD_FAILED;
lvmcache_seed_infos_from_lvmetad(cmd);
/* Create the new VG */
@ -119,6 +126,19 @@ int vgcreate(struct cmd_context *cmd, int argc, char **argv)
if (!vg_write(vg) || !vg_commit(vg))
goto_bad;
/*
* The VG is initially written without lock_type set, i.e. it starts as
* a local VG. lockd_init_vg() then writes the VG a second time with
* both lock_type and lock_args set.
*/
if (!lockd_init_vg(cmd, vg, vp_new.lock_type)) {
log_error("Failed to initialize lock args for lock type %s",
vp_new.lock_type);
vg_remove_pvs(vg);
vg_remove_direct(vg);
goto_bad;
}
unlock_vg(cmd, VG_ORPHANS);
unlock_vg(cmd, vp_new.vg_name);
@ -128,6 +148,33 @@ int vgcreate(struct cmd_context *cmd, int argc, char **argv)
clustered_message, *clustered_message ? 'v' : 'V', vg->name,
vg->system_id ? " with system ID " : "", vg->system_id ? : "");
/*
* Start the VG lockspace because it will likely be used right away.
* Optionally wait for the start to complete so the VG can be fully
* used after this command completes (otherwise, the VG can only be
* read without locks until the lockspace is done starting.)
*/
if (is_lockd_type(vg->lock_type)) {
const char *start_opt = arg_str_value(cmd, lockopt_ARG, NULL);
if (!lockd_start_vg(cmd, vg)) {
log_error("Failed to start locking");
goto out;
}
lockd_gl(cmd, "un", 0);
if (!start_opt || !strcmp(start_opt, "wait")) {
/* It is OK if the user does Ctrl-C to cancel the wait. */
log_print_unless_silent("Starting locking. Waiting until locks are ready...");
lockd_start_wait(cmd);
} else if (!strcmp(start_opt, "nowait")) {
log_print_unless_silent("Starting locking. VG is read-only until locks are ready.");
}
}
out:
release_vg(vg);
return ECMD_PROCESSED;

View File

@ -165,6 +165,10 @@ int vgextend(struct cmd_context *cmd, int argc, char **argv)
*/
cmd->handles_missing_pvs = 1;
/* Needed to change the set of orphan PVs. */
if (!lockd_gl(cmd, "ex", 0))
return_ECMD_FAILED;
ret = process_each_vg(cmd, argc, argv,
READ_FOR_UPDATE | ONE_VGNAME_ARG, handle,
restoremissing ? &_vgextend_restoremissing : &_vgextend_single);

View File

@ -20,11 +20,18 @@ static struct volume_group *_vgmerge_vg_read(struct cmd_context *cmd,
{
struct volume_group *vg;
log_verbose("Checking for volume group \"%s\"", vg_name);
vg = vg_read_for_update(cmd, vg_name, NULL, 0);
vg = vg_read_for_update(cmd, vg_name, NULL, 0, 0);
if (vg_read_error(vg)) {
release_vg(vg);
return NULL;
}
if (is_lockd_type(vg->lock_type)) {
log_error("vgmerge not allowed for lock_type %s", vg->lock_type);
unlock_and_release_vg(cmd, vg, vg_name);
return NULL;
}
return vg;
}
@ -194,6 +201,10 @@ int vgmerge(struct cmd_context *cmd, int argc, char **argv)
return EINVALID_CMD_LINE;
}
/* Needed change the global VG namespace. */
if (!lockd_gl(cmd, "ex", LDGL_UPDATE_NAMES))
return ECMD_FAILED;
vg_name_to = skip_dev_dir(cmd, argv[0], NULL);
argc--;
argv++;

View File

@ -141,6 +141,7 @@ int vgreduce(struct cmd_context *cmd, int argc, char **argv)
{
struct volume_group *vg;
const char *vg_name;
uint32_t lockd_state;
int ret = ECMD_FAILED;
int fixed = 1;
int repairing = arg_count(cmd, removemissing_ARG);
@ -195,7 +196,14 @@ int vgreduce(struct cmd_context *cmd, int argc, char **argv)
init_ignore_suspended_devices(1);
cmd->handles_missing_pvs = 1;
vg = vg_read_for_update(cmd, vg_name, NULL, READ_ALLOW_EXPORTED);
/* Needed to change the set of orphan PVs. */
if (!lockd_gl(cmd, "ex", 0))
return_ECMD_FAILED;
if (!lockd_vg(cmd, vg_name, "ex", 0, &lockd_state))
return_ECMD_FAILED;
vg = vg_read_for_update(cmd, vg_name, NULL, READ_ALLOW_EXPORTED, lockd_state);
if (vg_read_error(vg) == FAILED_ALLOCATION ||
vg_read_error(vg) == FAILED_NOTFOUND)
goto_out;
@ -218,7 +226,7 @@ int vgreduce(struct cmd_context *cmd, int argc, char **argv)
log_verbose("Trying to open VG %s for recovery...", vg_name);
vg = vg_read_for_update(cmd, vg_name, NULL,
READ_ALLOW_INCONSISTENT | READ_ALLOW_EXPORTED);
READ_ALLOW_INCONSISTENT | READ_ALLOW_EXPORTED, lockd_state);
locked |= !vg_read_error(vg);

View File

@ -68,6 +68,9 @@ static int vgremove_single(struct cmd_context *cmd, const char *vg_name,
}
}
if (!lockd_free_vg_before(cmd, vg))
return_ECMD_FAILED;
if (!force && !vg_remove_check(vg))
return_ECMD_FAILED;
@ -76,6 +79,8 @@ static int vgremove_single(struct cmd_context *cmd, const char *vg_name,
if (!vg_remove(vg))
return_ECMD_FAILED;
lockd_free_vg_final(cmd, vg);
return ECMD_PROCESSED;
}
@ -89,6 +94,20 @@ int vgremove(struct cmd_context *cmd, int argc, char **argv)
return EINVALID_CMD_LINE;
}
/*
* Needed to change the global VG namespace,
* and to change the set of orphan PVs.
*/
if (!lockd_gl(cmd, "ex", LDGL_UPDATE_NAMES))
return ECMD_FAILED;
/*
* This is a special case: if vgremove is given a tag, it causes
* process_each_vg to do lockd_gl(sh) when getting a list of all
* VG names. We don't want the gl converted to sh, so disable it.
*/
cmd->lockd_gl_disable = 1;
cmd->handles_missing_pvs = 1;
ret = process_each_vg(cmd, argc, argv,
READ_FOR_UPDATE,

View File

@ -17,13 +17,14 @@
static struct volume_group *_get_old_vg_for_rename(struct cmd_context *cmd,
const char *vg_name_old,
const char *vgid)
const char *vgid,
uint32_t lockd_state)
{
struct volume_group *vg;
/* FIXME we used to print an error about EXPORTED, but proceeded
nevertheless. */
vg = vg_read_for_update(cmd, vg_name_old, vgid, READ_ALLOW_EXPORTED);
vg = vg_read_for_update(cmd, vg_name_old, vgid, READ_ALLOW_EXPORTED, lockd_state);
if (vg_read_error(vg)) {
release_vg(vg);
return_NULL;
@ -67,6 +68,7 @@ static int vg_rename_path(struct cmd_context *cmd, const char *old_vg_path,
const char *vgid = NULL, *vg_name, *vg_name_old;
char old_path[NAME_LEN], new_path[NAME_LEN];
struct volume_group *vg = NULL;
uint32_t lockd_state;
int lock_vg_old_first = 1;
vg_name_old = skip_dev_dir(cmd, old_vg_path, NULL);
@ -114,11 +116,14 @@ static int vg_rename_path(struct cmd_context *cmd, const char *old_vg_path,
} else
vgid = NULL;
if (!lockd_vg(cmd, vg_name_old, "ex", 0, &lockd_state))
return_0;
if (strcmp(vg_name_new, vg_name_old) < 0)
lock_vg_old_first = 0;
if (lock_vg_old_first) {
vg = _get_old_vg_for_rename(cmd, vg_name_old, vgid);
vg = _get_old_vg_for_rename(cmd, vg_name_old, vgid, lockd_state);
if (!vg)
return_0;
@ -130,7 +135,7 @@ static int vg_rename_path(struct cmd_context *cmd, const char *old_vg_path,
if (!_lock_new_vg_for_rename(cmd, vg_name_new))
return_0;
vg = _get_old_vg_for_rename(cmd, vg_name_old, vgid);
vg = _get_old_vg_for_rename(cmd, vg_name_old, vgid, lockd_state);
if (!vg) {
unlock_vg(cmd, vg_name_new);
return_0;
@ -144,6 +149,9 @@ static int vg_rename_path(struct cmd_context *cmd, const char *old_vg_path,
if (!drop_cached_metadata(vg))
stack;
if (!lockd_rename_vg_before(cmd, vg))
return_0;
/* Change the volume group name */
vg_rename(cmd, vg, vg_name_new);
@ -171,6 +179,8 @@ static int vg_rename_path(struct cmd_context *cmd, const char *old_vg_path,
}
}
lockd_rename_vg_final(cmd, vg, 1);
if (!backup(vg))
stack;
if (!backup_remove(cmd, vg_name_old))
@ -190,6 +200,8 @@ static int vg_rename_path(struct cmd_context *cmd, const char *old_vg_path,
return 1;
error:
lockd_rename_vg_final(cmd, vg, 0);
if (lock_vg_old_first) {
unlock_vg(cmd, vg_name_new);
unlock_and_release_vg(cmd, vg, vg_name_old);
@ -207,6 +219,10 @@ int vgrename(struct cmd_context *cmd, int argc, char **argv)
return EINVALID_CMD_LINE;
}
/* Needed change the global VG namespace. */
if (!lockd_gl(cmd, "ex", LDGL_UPDATE_NAMES))
return_ECMD_FAILED;
if (!vg_rename_path(cmd, argv[0], argv[1]))
return_ECMD_FAILED;

View File

@ -422,7 +422,7 @@ static struct volume_group *_vgsplit_to(struct cmd_context *cmd,
if (vg_read_error(vg_to) == FAILED_EXIST) {
*existing_vg = 1;
release_vg(vg_to);
vg_to = vg_read_for_update(cmd, vg_name_to, NULL, 0);
vg_to = vg_read_for_update(cmd, vg_name_to, NULL, 0, 0);
if (vg_read_error(vg_to)) {
release_vg(vg_to);
@ -448,11 +448,18 @@ static struct volume_group *_vgsplit_from(struct cmd_context *cmd,
log_verbose("Checking for volume group \"%s\"", vg_name_from);
vg_from = vg_read_for_update(cmd, vg_name_from, NULL, 0);
vg_from = vg_read_for_update(cmd, vg_name_from, NULL, 0, 0);
if (vg_read_error(vg_from)) {
release_vg(vg_from);
return NULL;
}
if (is_lockd_type(vg_from->lock_type)) {
log_error("vgsplit not allowed for lock_type %s", vg_from->lock_type);
unlock_and_release_vg(cmd, vg_from, vg_name_from);
return NULL;
}
return vg_from;
}
@ -492,6 +499,10 @@ int vgsplit(struct cmd_context *cmd, int argc, char **argv)
return ECMD_FAILED;
}
/* Needed change the global VG namespace. */
if (!lockd_gl(cmd, "ex", LDGL_UPDATE_NAMES))
return_ECMD_FAILED;
if (arg_count(cmd, name_ARG))
lv_name = arg_value(cmd, name_ARG);
else
@ -662,7 +673,7 @@ int vgsplit(struct cmd_context *cmd, int argc, char **argv)
if (!test_mode()) {
release_vg(vg_to);
vg_to = vg_read_for_update(cmd, vg_name_to, NULL,
READ_ALLOW_EXPORTED);
READ_ALLOW_EXPORTED, 0);
if (vg_read_error(vg_to)) {
log_error("Volume group \"%s\" became inconsistent: "
"please fix manually", vg_name_to);