mirror of
git://sourceware.org/git/lvm2.git
synced 2025-09-15 13:44:18 +03:00
Compare commits
4 Commits
wip-2.03.1
...
dev-dct-pv
Author | SHA1 | Date | |
---|---|---|---|
|
5e48b04561 | ||
|
3e893b9b09 | ||
|
c06d5fe28e | ||
|
4f1957ee50 |
@@ -1 +1 @@
|
||||
1.02.181 (2021-10-20)
|
||||
1.02.181-git (2021-08-11)
|
||||
|
@@ -1,6 +1,5 @@
|
||||
Version 2.03.14 - 20th October 2021
|
||||
===================================
|
||||
Device scanning is skipping directories on different filesystems.
|
||||
Version 2.03.14 -
|
||||
==================================
|
||||
Print info message with too many or too large archived files.
|
||||
Reduce metadata readings during scanning phase.
|
||||
Optimize computation of crc32 check sum with multiple PVs.
|
||||
@@ -8,7 +7,7 @@ Version 2.03.14 - 20th October 2021
|
||||
Filter out unsupported MQ/SMQ cache policy setting.
|
||||
Fix memleak in mpath filter.
|
||||
Support newer location for VDO statistics.
|
||||
Add support for VDO async-unsafe write policy.
|
||||
Add support for VDO async-unsage write policy.
|
||||
Improve lvm_import_vdo script.
|
||||
Support VDO LV with lvcreate -ky.
|
||||
Fix lvconvert for VDO LV bigger then 2T.
|
||||
|
@@ -1,5 +1,5 @@
|
||||
Version 1.02.181 - 20th October 2021
|
||||
====================================
|
||||
Version 1.02.181 -
|
||||
===================================
|
||||
Add IMA support with 'dmsetup measure' command.
|
||||
Add defines DM_NAME_LIST_FLAG_HAS_UUID, DM_NAME_LIST_FLAG_DOESNT_HAVE_UUID.
|
||||
Enhance tracking of activated devices when preloading dm tree.
|
||||
|
117
configure
vendored
117
configure
vendored
@@ -773,10 +773,10 @@ PYTHON
|
||||
LVM2CMD_LIB
|
||||
UDEV_LIBS
|
||||
UDEV_CFLAGS
|
||||
BLKID_LIBS
|
||||
BLKID_CFLAGS
|
||||
SYSTEMD_LIBS
|
||||
SYSTEMD_CFLAGS
|
||||
BLKID_LIBS
|
||||
BLKID_CFLAGS
|
||||
LOCKD_IDM_LIBS
|
||||
LOCKD_IDM_CFLAGS
|
||||
LOCKD_DLM_CONTROL_LIBS
|
||||
@@ -964,6 +964,7 @@ enable_udev_systemd_background_jobs
|
||||
enable_udev_sync
|
||||
enable_udev_rules
|
||||
enable_udev_rule_exec_detection
|
||||
enable_compat
|
||||
enable_units_compat
|
||||
enable_ioctl
|
||||
enable_o_direct
|
||||
@@ -1028,10 +1029,10 @@ LOCKD_DLM_CONTROL_CFLAGS
|
||||
LOCKD_DLM_CONTROL_LIBS
|
||||
LOCKD_IDM_CFLAGS
|
||||
LOCKD_IDM_LIBS
|
||||
SYSTEMD_CFLAGS
|
||||
SYSTEMD_LIBS
|
||||
BLKID_CFLAGS
|
||||
BLKID_LIBS
|
||||
SYSTEMD_CFLAGS
|
||||
SYSTEMD_LIBS
|
||||
UDEV_CFLAGS
|
||||
UDEV_LIBS
|
||||
PYTHON
|
||||
@@ -1704,6 +1705,7 @@ Optional Features:
|
||||
--enable-udev_rules install rule files needed for udev synchronisation
|
||||
--enable-udev-rule-exec-detection
|
||||
enable executable path detection in udev rules
|
||||
--enable-compat enable support for old device-mapper versions
|
||||
--enable-units-compat enable output compatibility with old versions that
|
||||
that do not use KiB-style unit suffixes
|
||||
--disable-ioctl disable ioctl calls to device-mapper in the kernel
|
||||
@@ -1851,13 +1853,13 @@ Some influential environment variables:
|
||||
C compiler flags for LOCKD_IDM, overriding pkg-config
|
||||
LOCKD_IDM_LIBS
|
||||
linker flags for LOCKD_IDM, overriding pkg-config
|
||||
BLKID_CFLAGS
|
||||
C compiler flags for BLKID, overriding pkg-config
|
||||
BLKID_LIBS linker flags for BLKID, overriding pkg-config
|
||||
SYSTEMD_CFLAGS
|
||||
C compiler flags for SYSTEMD, overriding pkg-config
|
||||
SYSTEMD_LIBS
|
||||
linker flags for SYSTEMD, overriding pkg-config
|
||||
BLKID_CFLAGS
|
||||
C compiler flags for BLKID, overriding pkg-config
|
||||
BLKID_LIBS linker flags for BLKID, overriding pkg-config
|
||||
UDEV_CFLAGS C compiler flags for UDEV, overriding pkg-config
|
||||
UDEV_LIBS linker flags for UDEV, overriding pkg-config
|
||||
PYTHON the Python interpreter
|
||||
@@ -3140,6 +3142,10 @@ case "$host_os" in
|
||||
LIB_SUFFIX=so
|
||||
DEVMAPPER=yes
|
||||
BUILD_LVMPOLLD=no
|
||||
LOCKDSANLOCK=no
|
||||
LOCKDDLM=no
|
||||
LOCKDDLM_CONTROL=no
|
||||
LOCKDIDM=no
|
||||
ODIRECT=yes
|
||||
DM_IOCTLS=yes
|
||||
SELINUX=yes
|
||||
@@ -8461,7 +8467,7 @@ $as_echo_n "checking default for use_devicesfile... " >&6; }
|
||||
|
||||
# Check whether --with-default-use-devices-file was given.
|
||||
if test "${with_default_use_devices_file+set}" = set; then :
|
||||
withval=$with_default_use_devices_file; DEFAULT_USE_DEVICES_FILE=$withval
|
||||
withval=$with_default_use_devices_file; DEFAULT_USE_DEVICES_FILE=$enableval
|
||||
else
|
||||
DEFAULT_USE_DEVICES_FILE=0
|
||||
fi
|
||||
@@ -10995,8 +11001,6 @@ $as_echo_n "checking whether to build lvmpolld... " >&6; }
|
||||
# Check whether --enable-lvmpolld was given.
|
||||
if test "${enable_lvmpolld+set}" = set; then :
|
||||
enableval=$enable_lvmpolld; LVMPOLLD=$enableval
|
||||
else
|
||||
LVMPOLLD=no
|
||||
fi
|
||||
|
||||
test -n "$LVMPOLLD" && BUILD_LVMPOLLD=$LVMPOLLD
|
||||
@@ -11011,8 +11015,6 @@ $as_echo_n "checking whether to build lvmlockdsanlock... " >&6; }
|
||||
# Check whether --enable-lvmlockd-sanlock was given.
|
||||
if test "${enable_lvmlockd_sanlock+set}" = set; then :
|
||||
enableval=$enable_lvmlockd_sanlock; LOCKDSANLOCK=$enableval
|
||||
else
|
||||
LOCKDSANLOCK=no
|
||||
fi
|
||||
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $LOCKDSANLOCK" >&5
|
||||
@@ -11104,8 +11106,6 @@ $as_echo_n "checking whether to build lvmlockddlm... " >&6; }
|
||||
# Check whether --enable-lvmlockd-dlm was given.
|
||||
if test "${enable_lvmlockd_dlm+set}" = set; then :
|
||||
enableval=$enable_lvmlockd_dlm; LOCKDDLM=$enableval
|
||||
else
|
||||
LOCKDDLM=no
|
||||
fi
|
||||
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $LOCKDDLM" >&5
|
||||
@@ -11197,8 +11197,6 @@ $as_echo_n "checking whether to build lvmlockddlmcontrol... " >&6; }
|
||||
# Check whether --enable-lvmlockd-dlmcontrol was given.
|
||||
if test "${enable_lvmlockd_dlmcontrol+set}" = set; then :
|
||||
enableval=$enable_lvmlockd_dlmcontrol; LOCKDDLM_CONTROL=$enableval
|
||||
else
|
||||
LOCKDDLM_CONTROL=no
|
||||
fi
|
||||
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $LOCKDDLM_CONTROL" >&5
|
||||
@@ -11290,8 +11288,6 @@ $as_echo_n "checking whether to build lvmlockdidm... " >&6; }
|
||||
# Check whether --enable-lvmlockd-idm was given.
|
||||
if test "${enable_lvmlockd_idm+set}" = set; then :
|
||||
enableval=$enable_lvmlockd_idm; LOCKDIDM=$enableval
|
||||
else
|
||||
LOCKDIDM=no
|
||||
fi
|
||||
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $LOCKDIDM" >&5
|
||||
@@ -11371,15 +11367,76 @@ else
|
||||
$as_echo "yes" >&6; }
|
||||
HAVE_LOCKD_IDM=yes
|
||||
fi
|
||||
if test -n "$PKG_CONFIG" && \
|
||||
|
||||
pkg_failed=no
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for BLKID" >&5
|
||||
$as_echo_n "checking for BLKID... " >&6; }
|
||||
|
||||
if test -n "$BLKID_CFLAGS"; then
|
||||
pkg_cv_BLKID_CFLAGS="$BLKID_CFLAGS"
|
||||
elif test -n "$PKG_CONFIG"; then
|
||||
if test -n "$PKG_CONFIG" && \
|
||||
{ { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"blkid >= 2.24\""; } >&5
|
||||
($PKG_CONFIG --exists --print-errors "blkid >= 2.24") 2>&5
|
||||
ac_status=$?
|
||||
$as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
|
||||
test $ac_status = 0; }; then
|
||||
HAVE_LOCKD_IDM=yes
|
||||
pkg_cv_BLKID_CFLAGS=`$PKG_CONFIG --cflags "blkid >= 2.24" 2>/dev/null`
|
||||
test "x$?" != "x0" && pkg_failed=yes
|
||||
else
|
||||
$bailout
|
||||
pkg_failed=yes
|
||||
fi
|
||||
else
|
||||
pkg_failed=untried
|
||||
fi
|
||||
if test -n "$BLKID_LIBS"; then
|
||||
pkg_cv_BLKID_LIBS="$BLKID_LIBS"
|
||||
elif test -n "$PKG_CONFIG"; then
|
||||
if test -n "$PKG_CONFIG" && \
|
||||
{ { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"blkid >= 2.24\""; } >&5
|
||||
($PKG_CONFIG --exists --print-errors "blkid >= 2.24") 2>&5
|
||||
ac_status=$?
|
||||
$as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
|
||||
test $ac_status = 0; }; then
|
||||
pkg_cv_BLKID_LIBS=`$PKG_CONFIG --libs "blkid >= 2.24" 2>/dev/null`
|
||||
test "x$?" != "x0" && pkg_failed=yes
|
||||
else
|
||||
pkg_failed=yes
|
||||
fi
|
||||
else
|
||||
pkg_failed=untried
|
||||
fi
|
||||
|
||||
|
||||
|
||||
if test $pkg_failed = yes; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
|
||||
$as_echo "no" >&6; }
|
||||
|
||||
if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then
|
||||
_pkg_short_errors_supported=yes
|
||||
else
|
||||
_pkg_short_errors_supported=no
|
||||
fi
|
||||
if test $_pkg_short_errors_supported = yes; then
|
||||
BLKID_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "blkid >= 2.24" 2>&1`
|
||||
else
|
||||
BLKID_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "blkid >= 2.24" 2>&1`
|
||||
fi
|
||||
# Put the nasty error message in config.log where it belongs
|
||||
echo "$BLKID_PKG_ERRORS" >&5
|
||||
|
||||
$bailout
|
||||
elif test $pkg_failed = untried; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
|
||||
$as_echo "no" >&6; }
|
||||
$bailout
|
||||
else
|
||||
BLKID_CFLAGS=$pkg_cv_BLKID_CFLAGS
|
||||
BLKID_LIBS=$pkg_cv_BLKID_LIBS
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
|
||||
$as_echo "yes" >&6; }
|
||||
HAVE_LOCKD_IDM=yes
|
||||
fi
|
||||
|
||||
$as_echo "#define LOCKDIDM_SUPPORT 1" >>confdefs.h
|
||||
@@ -12029,6 +12086,24 @@ $as_echo_n "checking whether udev supports built-in blkid... " >&6; }
|
||||
$as_echo "$UDEV_HAS_BUILTIN_BLKID" >&6; }
|
||||
fi
|
||||
|
||||
################################################################################
|
||||
# Check whether --enable-compat was given.
|
||||
if test "${enable_compat+set}" = set; then :
|
||||
enableval=$enable_compat; DM_COMPAT=$enableval
|
||||
else
|
||||
DM_COMPAT=no
|
||||
fi
|
||||
|
||||
|
||||
if test "$DM_COMPAT" = yes; then :
|
||||
|
||||
$as_echo "#define DM_COMPAT 1" >>confdefs.h
|
||||
|
||||
as_fn_error $? "--enable-compat is not currently supported.
|
||||
Since device-mapper version 1.02.66, only one version (4) of the device-mapper
|
||||
ioctl protocol is supported." "$LINENO" 5
|
||||
fi
|
||||
|
||||
################################################################################
|
||||
# Check whether --enable-units-compat was given.
|
||||
if test "${enable_units_compat+set}" = set; then :
|
||||
|
75
configure.ac
75
configure.ac
@@ -38,6 +38,10 @@ case "$host_os" in
|
||||
LIB_SUFFIX=so
|
||||
DEVMAPPER=yes
|
||||
BUILD_LVMPOLLD=no
|
||||
LOCKDSANLOCK=no
|
||||
LOCKDDLM=no
|
||||
LOCKDDLM_CONTROL=no
|
||||
LOCKDIDM=no
|
||||
ODIRECT=yes
|
||||
DM_IOCTLS=yes
|
||||
SELINUX=yes
|
||||
@@ -286,7 +290,7 @@ dnl -- Default settings for lvm.conf { devices/use_devicesfile }
|
||||
AC_MSG_CHECKING(default for use_devicesfile)
|
||||
AC_ARG_WITH(default-use-devices-file,
|
||||
AS_HELP_STRING([--with-default-use-devices-file], [default for lvm.conf devices/use_devicesfile = [0]]),
|
||||
DEFAULT_USE_DEVICES_FILE=$withval, DEFAULT_USE_DEVICES_FILE=0)
|
||||
DEFAULT_USE_DEVICES_FILE=$enableval, DEFAULT_USE_DEVICES_FILE=0)
|
||||
case "$DEFAULT_USE_DEVICES_FILE" in
|
||||
0|1);;
|
||||
*) AC_MSG_ERROR([--with-default-use-devices-file parameter invalid]);;
|
||||
@@ -781,39 +785,6 @@ AC_ARG_WITH(default-run-dir,
|
||||
AC_DEFINE_UNQUOTED(DEFAULT_RUN_DIR, ["$DEFAULT_RUN_DIR"],
|
||||
[Default LVM run directory.])
|
||||
|
||||
################################################################################
|
||||
dnl -- Build cluster mirror log daemon
|
||||
AC_MSG_CHECKING(whether to build cluster mirror log daemon)
|
||||
AC_ARG_ENABLE(cmirrord,
|
||||
AS_HELP_STRING([--enable-cmirrord],
|
||||
[enable the cluster mirror log daemon]),
|
||||
CMIRRORD=$enableval, CMIRRORD=no)
|
||||
AC_MSG_RESULT($CMIRRORD)
|
||||
|
||||
BUILD_CMIRRORD=$CMIRRORD
|
||||
|
||||
################################################################################
|
||||
dnl -- cmirrord pidfile
|
||||
if test "$BUILD_CMIRRORD" = yes; then
|
||||
AC_ARG_WITH(cmirrord-pidfile,
|
||||
AS_HELP_STRING([--with-cmirrord-pidfile=PATH],
|
||||
[cmirrord pidfile [PID_DIR/cmirrord.pid]]),
|
||||
CMIRRORD_PIDFILE=$withval,
|
||||
CMIRRORD_PIDFILE="$DEFAULT_PID_DIR/cmirrord.pid")
|
||||
AC_DEFINE_UNQUOTED(CMIRRORD_PIDFILE, ["$CMIRRORD_PIDFILE"],
|
||||
[Path to cmirrord pidfile.])
|
||||
fi
|
||||
|
||||
################################################################################
|
||||
dnl -- Look for corosync libraries if required.
|
||||
if [[ "$BUILD_CMIRRORD" = yes ]]; then
|
||||
pkg_config_init
|
||||
|
||||
if test "$HAVE_CPG" != yes; then
|
||||
PKG_CHECK_MODULES(CPG, libcpg)
|
||||
fi
|
||||
fi
|
||||
|
||||
################################################################################
|
||||
dnl -- Enable debugging
|
||||
AC_MSG_CHECKING(whether to enable debugging)
|
||||
@@ -942,7 +913,7 @@ AC_MSG_CHECKING(whether to build lvmpolld)
|
||||
AC_ARG_ENABLE(lvmpolld,
|
||||
AS_HELP_STRING([--enable-lvmpolld],
|
||||
[enable the LVM Polling Daemon]),
|
||||
LVMPOLLD=$enableval, LVMPOLLD=no)
|
||||
LVMPOLLD=$enableval)
|
||||
test -n "$LVMPOLLD" && BUILD_LVMPOLLD=$LVMPOLLD
|
||||
AC_MSG_RESULT($BUILD_LVMPOLLD)
|
||||
|
||||
@@ -954,7 +925,7 @@ AC_MSG_CHECKING(whether to build lvmlockdsanlock)
|
||||
AC_ARG_ENABLE(lvmlockd-sanlock,
|
||||
AS_HELP_STRING([--enable-lvmlockd-sanlock],
|
||||
[enable the LVM lock daemon using sanlock]),
|
||||
LOCKDSANLOCK=$enableval, LOCKDSANLOCK=no)
|
||||
LOCKDSANLOCK=$enableval)
|
||||
AC_MSG_RESULT($LOCKDSANLOCK)
|
||||
|
||||
BUILD_LOCKDSANLOCK=$LOCKDSANLOCK
|
||||
@@ -972,7 +943,7 @@ AC_MSG_CHECKING(whether to build lvmlockddlm)
|
||||
AC_ARG_ENABLE(lvmlockd-dlm,
|
||||
AS_HELP_STRING([--enable-lvmlockd-dlm],
|
||||
[enable the LVM lock daemon using dlm]),
|
||||
LOCKDDLM=$enableval, LOCKDDLM=no)
|
||||
LOCKDDLM=$enableval)
|
||||
AC_MSG_RESULT($LOCKDDLM)
|
||||
|
||||
BUILD_LOCKDDLM=$LOCKDDLM
|
||||
@@ -990,7 +961,7 @@ AC_MSG_CHECKING(whether to build lvmlockddlmcontrol)
|
||||
AC_ARG_ENABLE(lvmlockd-dlmcontrol,
|
||||
AS_HELP_STRING([--enable-lvmlockd-dlmcontrol],
|
||||
[enable lvmlockd remote refresh using libdlmcontrol]),
|
||||
LOCKDDLM_CONTROL=$enableval, LOCKDDLM_CONTROL=no)
|
||||
LOCKDDLM_CONTROL=$enableval)
|
||||
AC_MSG_RESULT($LOCKDDLM_CONTROL)
|
||||
|
||||
BUILD_LOCKDDLM_CONTROL=$LOCKDDLM_CONTROL
|
||||
@@ -1008,7 +979,7 @@ AC_MSG_CHECKING(whether to build lvmlockdidm)
|
||||
AC_ARG_ENABLE(lvmlockd-idm,
|
||||
AS_HELP_STRING([--enable-lvmlockd-idm],
|
||||
[enable the LVM lock daemon using idm]),
|
||||
LOCKDIDM=$enableval, LOCKDIDM=no)
|
||||
LOCKDIDM=$enableval)
|
||||
AC_MSG_RESULT($LOCKDIDM)
|
||||
|
||||
BUILD_LOCKDIDM=$LOCKDIDM
|
||||
@@ -1016,7 +987,7 @@ BUILD_LOCKDIDM=$LOCKDIDM
|
||||
dnl -- Look for Seagate IDM libraries
|
||||
if test "$BUILD_LOCKDIDM" = yes; then
|
||||
PKG_CHECK_MODULES(LOCKD_IDM, libseagate_ilm >= 0.1.0, [HAVE_LOCKD_IDM=yes], $bailout)
|
||||
PKG_CHECK_EXISTS(blkid >= 2.24, [HAVE_LOCKD_IDM=yes], $bailout)
|
||||
PKG_CHECK_MODULES(BLKID, blkid >= 2.24, [HAVE_LOCKD_IDM=yes], $bailout)
|
||||
AC_DEFINE([LOCKDIDM_SUPPORT], 1, [Define to 1 to include code that uses lvmlockd IDM option.])
|
||||
BUILD_LVMLOCKD=yes
|
||||
fi
|
||||
@@ -1247,6 +1218,19 @@ if test "$UDEV_RULE" != no ; then
|
||||
AC_MSG_RESULT($UDEV_HAS_BUILTIN_BLKID)
|
||||
fi
|
||||
|
||||
################################################################################
|
||||
dnl -- Compatibility mode
|
||||
AC_ARG_ENABLE(compat,
|
||||
AS_HELP_STRING([--enable-compat],
|
||||
[enable support for old device-mapper versions]),
|
||||
DM_COMPAT=$enableval, DM_COMPAT=no)
|
||||
|
||||
AS_IF([test "$DM_COMPAT" = yes],
|
||||
[AC_DEFINE([DM_COMPAT], 1, [Define to enable compat protocol])
|
||||
AC_MSG_ERROR([--enable-compat is not currently supported.
|
||||
Since device-mapper version 1.02.66, only one version (4) of the device-mapper
|
||||
ioctl protocol is supported.])])
|
||||
|
||||
################################################################################
|
||||
dnl -- Compatible units suffix mode
|
||||
AC_ARG_ENABLE(units-compat,
|
||||
@@ -1655,10 +1639,6 @@ fi
|
||||
AC_MSG_CHECKING(whether to enable editline)
|
||||
AC_MSG_RESULT($EDITLINE)
|
||||
|
||||
if test "$BUILD_CMIRRORD" = yes; then
|
||||
AC_CHECK_FUNCS(atexit,,hard_bailout)
|
||||
fi
|
||||
|
||||
if test "$BUILD_LVMLOCKD" = yes; then
|
||||
AS_IF([test "$HAVE_REALTIME" != yes], [AC_MSG_ERROR([Realtime clock support is mandatory for lvmlockd.])])
|
||||
AC_CHECK_FUNCS(strtoull,,hard_bailout)
|
||||
@@ -1834,7 +1814,6 @@ AC_ARG_VAR([UDEV_LIBS], [linker flags for udev])
|
||||
################################################################################
|
||||
AC_SUBST(AWK)
|
||||
AC_SUBST(BLKID_PC)
|
||||
AC_SUBST(BUILD_CMIRRORD)
|
||||
AC_SUBST(BUILD_DMEVENTD)
|
||||
AC_SUBST(BUILD_LVMDBUSD)
|
||||
AC_SUBST(BUILD_LVMPOLLD)
|
||||
@@ -1967,7 +1946,6 @@ AC_SUBST(WRITE_INSTALL)
|
||||
AC_SUBST(DMEVENTD_PIDFILE)
|
||||
AC_SUBST(LVMPOLLD_PIDFILE)
|
||||
AC_SUBST(LVMLOCKD_PIDFILE)
|
||||
AC_SUBST(CMIRRORD_PIDFILE)
|
||||
AC_SUBST(interface)
|
||||
AC_SUBST(kerneldir)
|
||||
AC_SUBST(missingkernel)
|
||||
@@ -1989,7 +1967,6 @@ Makefile
|
||||
make.tmpl
|
||||
libdm/make.tmpl
|
||||
daemons/Makefile
|
||||
daemons/cmirrord/Makefile
|
||||
daemons/dmeventd/Makefile
|
||||
daemons/dmeventd/libdevmapper-event.pc
|
||||
daemons/dmeventd/plugins/Makefile
|
||||
@@ -2023,14 +2000,14 @@ libdm/libdevmapper.pc
|
||||
man/Makefile
|
||||
po/Makefile
|
||||
scripts/lvm2-pvscan.service
|
||||
scripts/lvm-activate-vgs-main.service
|
||||
scripts/lvm-activate-vgs-last.service
|
||||
scripts/blkdeactivate.sh
|
||||
scripts/blk_availability_init_red_hat
|
||||
scripts/blk_availability_systemd_red_hat.service
|
||||
scripts/cmirrord_init_red_hat
|
||||
scripts/com.redhat.lvmdbus1.service
|
||||
scripts/dm_event_systemd_red_hat.service
|
||||
scripts/dm_event_systemd_red_hat.socket
|
||||
scripts/lvm2_cmirrord_systemd_red_hat.service
|
||||
scripts/lvm2_lvmdbusd_systemd_red_hat.service
|
||||
scripts/lvm2_lvmpolld_init_red_hat
|
||||
scripts/lvm2_lvmpolld_systemd_red_hat.service
|
||||
|
@@ -3016,7 +3016,9 @@ static int add_lockspace_thread(const char *ls_name,
|
||||
!alloc_and_copy_pvs_path(&ls2->pvs, &ls->pvs)) {
|
||||
log_debug("add_lockspace_thread %s fails to allocate pvs", ls->name);
|
||||
rv = -ENOMEM;
|
||||
} else if (ls2->thread_stop) {
|
||||
}
|
||||
|
||||
if (ls2->thread_stop) {
|
||||
log_debug("add_lockspace_thread %s exists and stopping", ls->name);
|
||||
rv = -EAGAIN;
|
||||
} else if (!ls2->create_fail && !ls2->create_done) {
|
||||
|
@@ -114,6 +114,9 @@
|
||||
/* Define to 1 to enable the device-mapper filemap daemon. */
|
||||
#undef DMFILEMAPD
|
||||
|
||||
/* Define to enable compat protocol */
|
||||
#undef DM_COMPAT
|
||||
|
||||
/* Define default group for device node */
|
||||
#undef DM_DEVICE_GID
|
||||
|
||||
|
@@ -159,11 +159,9 @@ static const char *_system_id_from_source(struct cmd_context *cmd, const char *s
|
||||
|
||||
#ifdef APP_MACHINEID_SUPPORT
|
||||
if (!strcasecmp(source, "appmachineid")) {
|
||||
sd_id128_t id = { 0 };
|
||||
sd_id128_t id;
|
||||
|
||||
if (sd_id128_get_machine_app_specific(LVM_APPLICATION_ID, &id) != 0)
|
||||
log_warn("WARNING: sd_id128_get_machine_app_specific() failed %s (%d).",
|
||||
strerror(errno), errno);
|
||||
sd_id128_get_machine_app_specific(LVM_APPLICATION_ID, &id);
|
||||
|
||||
if (dm_snprintf(buf, PATH_MAX, SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(id)) < 0)
|
||||
stack;
|
||||
@@ -1143,6 +1141,19 @@ static struct dev_filter *_init_filter_chain(struct cmd_context *cmd)
|
||||
* Update MAX_FILTERS definition above when adding new filters.
|
||||
*/
|
||||
|
||||
/*
|
||||
* sysfs filter. Only available on 2.6 kernels. Non-critical.
|
||||
* Listed first because it's very efficient at eliminating
|
||||
* unavailable devices.
|
||||
*
|
||||
* TODO: I suspect that using the lvm_type and device_id
|
||||
* filters before this one may be more efficient.
|
||||
*/
|
||||
if (find_config_tree_bool(cmd, devices_sysfs_scan_CFG, NULL)) {
|
||||
if ((filters[nr_filt] = sysfs_filter_create()))
|
||||
nr_filt++;
|
||||
}
|
||||
|
||||
/* internal filter used by command processing. */
|
||||
if (!(filters[nr_filt] = internal_filter_create())) {
|
||||
log_error("Failed to create internal device filter");
|
||||
@@ -1182,17 +1193,6 @@ static struct dev_filter *_init_filter_chain(struct cmd_context *cmd)
|
||||
}
|
||||
nr_filt++;
|
||||
|
||||
/*
|
||||
* sysfs filter. Only available on 2.6 kernels. Non-critical.
|
||||
* Eliminates unavailable devices.
|
||||
* TODO: this may be unnecessary now with device ids
|
||||
* (currently not used for devs match to device id using syfs)
|
||||
*/
|
||||
if (find_config_tree_bool(cmd, devices_sysfs_scan_CFG, NULL)) {
|
||||
if ((filters[nr_filt] = sysfs_filter_create()))
|
||||
nr_filt++;
|
||||
}
|
||||
|
||||
/* usable device filter. Required. */
|
||||
if (!(filters[nr_filt] = usable_filter_create(cmd, cmd->dev_types, FILTER_MODE_NO_LVMETAD))) {
|
||||
log_error("Failed to create usabled device filter");
|
||||
@@ -1603,6 +1603,7 @@ struct cmd_context *create_config_context(void)
|
||||
|
||||
dm_list_init(&cmd->config_files);
|
||||
dm_list_init(&cmd->tags);
|
||||
dm_list_init(&cmd->hints);
|
||||
|
||||
if (!_init_lvm_conf(cmd))
|
||||
goto_out;
|
||||
@@ -1667,6 +1668,7 @@ struct cmd_context *create_toolcontext(unsigned is_clvmd,
|
||||
dm_list_init(&cmd->formats);
|
||||
dm_list_init(&cmd->segtypes);
|
||||
dm_list_init(&cmd->tags);
|
||||
dm_list_init(&cmd->hints);
|
||||
dm_list_init(&cmd->config_files);
|
||||
label_init();
|
||||
|
||||
|
@@ -183,6 +183,7 @@ struct cmd_context {
|
||||
unsigned enable_hints:1; /* hints are enabled for cmds in general */
|
||||
unsigned use_hints:1; /* if hints are enabled this cmd can use them */
|
||||
unsigned pvscan_recreate_hints:1; /* enable special case hint handling for pvscan --cache */
|
||||
unsigned hints_pvs_online:1; /* hints="pvs_online" */
|
||||
unsigned scan_lvs:1;
|
||||
unsigned wipe_outdated_pvs:1;
|
||||
unsigned enable_devices_list:1; /* command is using --devices option */
|
||||
@@ -206,6 +207,7 @@ struct cmd_context {
|
||||
* Devices and filtering.
|
||||
*/
|
||||
struct dev_filter *filter;
|
||||
struct dm_list hints;
|
||||
struct dm_list use_devices; /* struct dev_use for each entry in devices file */
|
||||
const char *md_component_checks;
|
||||
const char *search_for_devnames; /* config file setting */
|
||||
|
@@ -1130,6 +1130,18 @@ cfg(global_event_activation_CFG, "event_activation", global_CFG_SECTION, CFG_DEF
|
||||
"See the --setautoactivation option or the auto_activation_volume_list\n"
|
||||
"setting to configure autoactivation for specific VGs or LVs.\n")
|
||||
|
||||
cfg_array(global_event_activation_options_CFG, "event_activation_options", global_CFG_SECTION, CFG_ALLOW_EMPTY | CFG_DEFAULT_COMMENTED, CFG_TYPE_STRING, DEFAULT_EVENT_ACTIVATION_OPTIONS, vsn(2, 3, 14), NULL, 0, NULL,
|
||||
"Set event activation options.\n"
|
||||
"service_to_event: begin with fixed activation services,\n"
|
||||
"then switch to event based activation.\n"
|
||||
"event_only: only use event based activation.\n"
|
||||
"service_only: only use fixed activation services.\n"
|
||||
"(This is equivalent to event_activation=0.)\n"
|
||||
"Autoactivation commands should set --eventactivation service|event\n"
|
||||
"to indicate if they are performing service or event activation.\n"
|
||||
"An autoactivation command may then be skipped according to the\n"
|
||||
"value of this setting.\n")
|
||||
|
||||
cfg(global_use_lvmetad_CFG, "use_lvmetad", global_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_BOOL, 0, vsn(2, 2, 93), 0, vsn(2, 3, 0), NULL,
|
||||
NULL)
|
||||
|
||||
|
@@ -328,4 +328,11 @@
|
||||
|
||||
#define DEFAULT_WWIDS_FILE "/etc/multipath/wwids"
|
||||
|
||||
#define DEFAULT_EVENT_ACTIVATION_OPTION1 "service_to_event"
|
||||
#define DEFAULT_EVENT_ACTIVATION_OPTIONS "#S" DEFAULT_EVENT_ACTIVATION_OPTION1
|
||||
|
||||
#define PVS_ONLINE_DIR DEFAULT_RUN_DIR "/pvs_online"
|
||||
#define VGS_ONLINE_DIR DEFAULT_RUN_DIR "/vgs_online"
|
||||
#define PVS_LOOKUP_DIR DEFAULT_RUN_DIR "/pvs_lookup"
|
||||
|
||||
#endif /* _LVM_DEFAULTS_H */
|
||||
|
@@ -53,7 +53,6 @@ static struct {
|
||||
const char *dev_dir;
|
||||
|
||||
int has_scanned;
|
||||
long st_dev;
|
||||
struct dm_list dirs;
|
||||
struct dm_list files;
|
||||
|
||||
@@ -1065,18 +1064,11 @@ static void _insert_dirs(struct dm_list *dirs)
|
||||
struct dir_list *dl;
|
||||
struct udev *udev = NULL;
|
||||
int with_udev;
|
||||
struct stat tinfo;
|
||||
|
||||
with_udev = obtain_device_list_from_udev() &&
|
||||
(udev = udev_get_library_context());
|
||||
|
||||
dm_list_iterate_items(dl, &_cache.dirs) {
|
||||
if (stat(dl->dir, &tinfo) < 0) {
|
||||
log_warn("WARNING: Cannot use dir %s, %s.",
|
||||
dl->dir, strerror(errno));
|
||||
continue;
|
||||
}
|
||||
_cache.st_dev = tinfo.st_dev;
|
||||
if (with_udev) {
|
||||
if (!_insert_udev_dir(udev, dl->dir))
|
||||
log_debug_devs("%s: Failed to insert devices from "
|
||||
@@ -1099,17 +1091,9 @@ static int _device_in_udev_db(const dev_t d)
|
||||
static void _insert_dirs(struct dm_list *dirs)
|
||||
{
|
||||
struct dir_list *dl;
|
||||
struct stat tinfo;
|
||||
|
||||
dm_list_iterate_items(dl, &_cache.dirs) {
|
||||
if (stat(dl->dir, &tinfo) < 0) {
|
||||
log_warn("WARNING: Cannot use dir %s, %s.",
|
||||
dl->dir, strerror(errno));
|
||||
continue;
|
||||
}
|
||||
_cache.st_dev = tinfo.st_dev;
|
||||
dm_list_iterate_items(dl, &_cache.dirs)
|
||||
_insert_dir(dl->dir);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* UDEV_SYNC_SUPPORT */
|
||||
@@ -1144,11 +1128,6 @@ static int _insert(const char *path, const struct stat *info,
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (info->st_dev != _cache.st_dev) {
|
||||
log_debug_devs("%s: Different filesystem in directory", path);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (rec && !_insert_dir(path))
|
||||
return 0;
|
||||
} else { /* add a device */
|
||||
@@ -1852,7 +1831,7 @@ int setup_devices_file(struct cmd_context *cmd)
|
||||
* Add all system devices to dev-cache, and attempt to
|
||||
* match all devices_file entries to dev-cache entries.
|
||||
*/
|
||||
static int _setup_devices(struct cmd_context *cmd, int no_file_match)
|
||||
int setup_devices(struct cmd_context *cmd)
|
||||
{
|
||||
int file_exists;
|
||||
int lock_mode = 0;
|
||||
@@ -1979,13 +1958,6 @@ static int _setup_devices(struct cmd_context *cmd, int no_file_match)
|
||||
*/
|
||||
dev_cache_scan(cmd);
|
||||
|
||||
/*
|
||||
* The caller uses "no_file_match" if it wants to match specific devs
|
||||
* itself, instead of matching everything in device_ids_match.
|
||||
*/
|
||||
if (no_file_match && cmd->enable_devices_file)
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* Match entries from cmd->use_devices with device structs in dev-cache.
|
||||
*/
|
||||
@@ -1994,16 +1966,6 @@ static int _setup_devices(struct cmd_context *cmd, int no_file_match)
|
||||
return 1;
|
||||
}
|
||||
|
||||
int setup_devices(struct cmd_context *cmd)
|
||||
{
|
||||
return _setup_devices(cmd, 0);
|
||||
}
|
||||
|
||||
int setup_devices_no_file_match(struct cmd_context *cmd)
|
||||
{
|
||||
return _setup_devices(cmd, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* The alternative to setup_devices() when the command is interested
|
||||
* in using only one PV.
|
||||
@@ -2072,3 +2034,188 @@ int setup_device(struct cmd_context *cmd, const char *devname)
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* pvscan --cache is specialized/optimized to look only at command args,
|
||||
* so this just sets up the devices file, then individual devices are
|
||||
* added to dev-cache and matched with device_ids later in pvscan.
|
||||
*/
|
||||
|
||||
int setup_devices_for_pvscan_cache(struct cmd_context *cmd)
|
||||
{
|
||||
if (cmd->enable_devices_list) {
|
||||
if (!_setup_devices_list(cmd))
|
||||
return_0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (!setup_devices_file(cmd))
|
||||
return_0;
|
||||
|
||||
if (!cmd->enable_devices_file)
|
||||
return 1;
|
||||
|
||||
if (!devices_file_exists(cmd)) {
|
||||
log_debug("Devices file not found, ignoring.");
|
||||
cmd->enable_devices_file = 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (!lock_devices_file(cmd, LOCK_SH)) {
|
||||
log_error("Failed to lock the devices file to read.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!device_ids_read(cmd)) {
|
||||
log_error("Failed to read the devices file.");
|
||||
unlock_devices_file(cmd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
unlock_devices_file(cmd);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
/* Get a device name from a devno. */
|
||||
|
||||
static char *_get_devname_from_devno(struct cmd_context *cmd, dev_t devno)
|
||||
{
|
||||
char path[PATH_MAX];
|
||||
char devname[PATH_MAX];
|
||||
char namebuf[NAME_LEN];
|
||||
char line[1024];
|
||||
int major = MAJOR(devno);
|
||||
int minor = MINOR(devno);
|
||||
int line_major;
|
||||
int line_minor;
|
||||
uint64_t line_blocks;
|
||||
DIR *dir;
|
||||
struct dirent *dirent;
|
||||
FILE *fp;
|
||||
|
||||
/*
|
||||
* $ ls /sys/dev/block/8:0/device/block/
|
||||
* sda
|
||||
*/
|
||||
if (major_is_scsi_device(cmd->dev_types, major)) {
|
||||
if (dm_snprintf(path, sizeof(path), "%sdev/block/%d:%d/device/block",
|
||||
dm_sysfs_dir(), major, minor) < 0) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!(dir = opendir(path)))
|
||||
return NULL;
|
||||
|
||||
while ((dirent = readdir(dir))) {
|
||||
if (dirent->d_name[0] == '.')
|
||||
continue;
|
||||
if (dm_snprintf(devname, sizeof(devname), "/dev/%s", dirent->d_name) < 0) {
|
||||
devname[0] = '\0';
|
||||
stack;
|
||||
}
|
||||
break;
|
||||
}
|
||||
closedir(dir);
|
||||
|
||||
if (devname[0]) {
|
||||
log_debug("Found %s for %d:%d from sys", devname, major, minor);
|
||||
return _strdup(devname);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* $ cat /sys/dev/block/253:3/dm/name
|
||||
* mpatha
|
||||
*/
|
||||
if (major == cmd->dev_types->device_mapper_major) {
|
||||
if (dm_snprintf(path, sizeof(path), "%sdev/block/%d:%d/dm/name",
|
||||
dm_sysfs_dir(), major, minor) < 0) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!get_sysfs_value(path, namebuf, sizeof(namebuf), 0))
|
||||
return NULL;
|
||||
|
||||
if (dm_snprintf(devname, sizeof(devname), "/dev/mapper/%s", namebuf) < 0) {
|
||||
devname[0] = '\0';
|
||||
stack;
|
||||
}
|
||||
|
||||
if (devname[0]) {
|
||||
log_debug("Found %s for %d:%d from sys", devname, major, minor);
|
||||
return _strdup(devname);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* /proc/partitions lists
|
||||
* major minor #blocks name
|
||||
*/
|
||||
|
||||
if (!(fp = fopen("/proc/partitions", "r")))
|
||||
return NULL;
|
||||
|
||||
while (fgets(line, sizeof(line), fp)) {
|
||||
if (sscanf(line, "%u %u %llu %s", &line_major, &line_minor, (unsigned long long *)&line_blocks, namebuf) != 4)
|
||||
continue;
|
||||
if (line_major != major)
|
||||
continue;
|
||||
if (line_minor != minor)
|
||||
continue;
|
||||
|
||||
if (dm_snprintf(devname, sizeof(devname), "/dev/%s", namebuf) < 0) {
|
||||
devname[0] = '\0';
|
||||
stack;
|
||||
}
|
||||
break;
|
||||
}
|
||||
fclose(fp);
|
||||
|
||||
if (devname[0]) {
|
||||
log_debug("Found %s for %d:%d from proc", devname, major, minor);
|
||||
return _strdup(devname);
|
||||
}
|
||||
|
||||
/*
|
||||
* If necessary, this could continue searching by stat'ing /dev entries.
|
||||
*/
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int setup_devname_in_dev_cache(struct cmd_context *cmd, const char *devname)
|
||||
{
|
||||
struct stat buf;
|
||||
struct device *dev;
|
||||
|
||||
if (stat(devname, &buf) < 0) {
|
||||
log_error("Cannot access device %s.", devname);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!S_ISBLK(buf.st_mode)) {
|
||||
log_error("Invaild device type %s.", devname);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!_insert_dev(devname, buf.st_rdev))
|
||||
return_0;
|
||||
|
||||
if (!(dev = (struct device *) dm_hash_lookup(_cache.names, devname)))
|
||||
return_0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int setup_devno_in_dev_cache(struct cmd_context *cmd, dev_t devno)
|
||||
{
|
||||
const char *devname;
|
||||
|
||||
if (!(devname = _get_devname_from_devno(cmd, devno)))
|
||||
return_0;
|
||||
|
||||
return setup_devname_in_dev_cache(cmd, devname);
|
||||
}
|
||||
|
||||
|
@@ -77,7 +77,11 @@ int get_dm_uuid_from_sysfs(char *buf, size_t buf_size, int major, int minor);
|
||||
|
||||
int setup_devices_file(struct cmd_context *cmd);
|
||||
int setup_devices(struct cmd_context *cmd);
|
||||
int setup_devices_no_file_match(struct cmd_context *cmd);
|
||||
int setup_device(struct cmd_context *cmd, const char *devname);
|
||||
|
||||
/* Normal device setup functions are split up for pvscan optimization. */
|
||||
int setup_devices_for_pvscan_cache(struct cmd_context *cmd);
|
||||
int setup_devname_in_dev_cache(struct cmd_context *cmd, const char *devname);
|
||||
int setup_devno_in_dev_cache(struct cmd_context *cmd, dev_t devno);
|
||||
|
||||
#endif
|
||||
|
@@ -74,8 +74,6 @@ void unlink_searched_devnames(struct cmd_context *cmd)
|
||||
|
||||
if (unlink(_searched_file))
|
||||
log_debug("unlink %s errno %d", _searched_file, errno);
|
||||
else
|
||||
log_debug("unlink %s", _searched_file);
|
||||
}
|
||||
|
||||
static int _searched_devnames_exists(struct cmd_context *cmd)
|
||||
@@ -304,7 +302,6 @@ const char *device_id_system_read(struct cmd_context *cmd, struct device *dev, u
|
||||
{
|
||||
char sysbuf[PATH_MAX] = { 0 };
|
||||
const char *idname = NULL;
|
||||
int i;
|
||||
|
||||
if (idtype == DEV_ID_TYPE_SYS_WWID) {
|
||||
read_sys_block(cmd, dev, "device/wwid", sysbuf, sizeof(sysbuf));
|
||||
@@ -312,6 +309,10 @@ const char *device_id_system_read(struct cmd_context *cmd, struct device *dev, u
|
||||
if (!sysbuf[0])
|
||||
read_sys_block(cmd, dev, "wwid", sysbuf, sizeof(sysbuf));
|
||||
|
||||
/* scsi_debug wwid begins "t10.Linux scsi_debug ..." */
|
||||
if (strstr(sysbuf, "scsi_debug"))
|
||||
sysbuf[0] = '\0';
|
||||
|
||||
/* qemu wwid begins "t10.ATA QEMU HARDDISK ..." */
|
||||
if (strstr(sysbuf, "QEMU HARDDISK"))
|
||||
sysbuf[0] = '\0';
|
||||
@@ -352,11 +353,6 @@ const char *device_id_system_read(struct cmd_context *cmd, struct device *dev, u
|
||||
return idname;
|
||||
}
|
||||
|
||||
for (i = 0; i < strlen(sysbuf); i++) {
|
||||
if (isblank(sysbuf[i]) || isspace(sysbuf[i]) || iscntrl(sysbuf[i]))
|
||||
sysbuf[i] = '_';
|
||||
}
|
||||
|
||||
if (!sysbuf[0])
|
||||
goto_bad;
|
||||
|
||||
@@ -675,9 +671,6 @@ int device_ids_write(struct cmd_context *cmd)
|
||||
cmd->enable_devices_file = 1;
|
||||
}
|
||||
|
||||
if (test_mode())
|
||||
return 1;
|
||||
|
||||
if (_devices_file_version[0]) {
|
||||
if (sscanf(_devices_file_version, "%u.%u.%u", &df_major, &df_minor, &df_counter) != 3) {
|
||||
/* don't update a file we can't parse */
|
||||
@@ -787,7 +780,7 @@ static void _device_ids_update_try(struct cmd_context *cmd)
|
||||
|
||||
/* Defer updates to non-pvscan-cache commands. */
|
||||
if (cmd->pvscan_cache_single) {
|
||||
log_print("Devices file update skipped.");
|
||||
log_print("pvscan[%d] skip updating devices file.", getpid());
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1358,19 +1351,14 @@ void device_id_update_vg_uuid(struct cmd_context *cmd, struct volume_group *vg,
|
||||
}
|
||||
}
|
||||
|
||||
if (update &&
|
||||
!device_ids_write(cmd))
|
||||
stack;
|
||||
if (update)
|
||||
device_ids_write(cmd);
|
||||
out:
|
||||
unlock_devices_file(cmd);
|
||||
}
|
||||
|
||||
static int _idtype_compatible_with_major_number(struct cmd_context *cmd, int idtype, int major)
|
||||
{
|
||||
/* devname can be used with any kind of device */
|
||||
if (idtype == DEV_ID_TYPE_DEVNAME)
|
||||
return 1;
|
||||
|
||||
if (idtype == DEV_ID_TYPE_MPATH_UUID ||
|
||||
idtype == DEV_ID_TYPE_CRYPT_UUID ||
|
||||
idtype == DEV_ID_TYPE_LVMLV_UUID)
|
||||
@@ -1399,43 +1387,6 @@ static int _idtype_compatible_with_major_number(struct cmd_context *cmd, int idt
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int _match_dm_devnames(struct cmd_context *cmd, struct device *dev,
|
||||
struct dev_id *id, struct dev_use *du)
|
||||
{
|
||||
struct stat buf;
|
||||
|
||||
if (MAJOR(dev->dev) != cmd->dev_types->device_mapper_major)
|
||||
return 0;
|
||||
|
||||
if (id->idname && du->idname && !strcmp(id->idname, du->idname))
|
||||
return 1;
|
||||
|
||||
if (du->idname && !strcmp(du->idname, dev_name(dev))) {
|
||||
log_debug("Match device_id %s %s to %s: ignoring idname %s",
|
||||
idtype_to_str(du->idtype), du->idname, dev_name(dev), id->idname ?: ".");
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (!du->idname)
|
||||
return 0;
|
||||
|
||||
/* detect that a du entry is for a dm device */
|
||||
|
||||
if (!strncmp(du->idname, "/dev/dm-", 8) || !strncmp(du->idname, "/dev/mapper/", 12)) {
|
||||
if (stat(du->idname, &buf))
|
||||
return 0;
|
||||
|
||||
if ((MAJOR(buf.st_rdev) == cmd->dev_types->device_mapper_major) &&
|
||||
(MINOR(buf.st_rdev) == MINOR(dev->dev))) {
|
||||
log_debug("Match device_id %s %s to %s: using other dm name, ignoring %s",
|
||||
idtype_to_str(du->idtype), du->idname, dev_name(dev), id->idname ?: ".");
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* check for dev->ids entry with du->idtype, if found compare it,
|
||||
* if not, system_read of this type and add entry to dev->ids, compare it.
|
||||
@@ -1448,74 +1399,43 @@ static int _match_du_to_dev(struct cmd_context *cmd, struct dev_use *du, struct
|
||||
const char *idname;
|
||||
int part;
|
||||
|
||||
/*
|
||||
* The idname will be removed from an entry with devname type when the
|
||||
* devname is read and found to hold a different PVID than the PVID in
|
||||
* the entry. At that point we only have the PVID and no known
|
||||
* location for it.
|
||||
*/
|
||||
if (!du->idname || !du->idtype) {
|
||||
/*
|
||||
log_debug("Mismatch device_id %s %s %s to %s",
|
||||
du->idtype ? idtype_to_str(du->idtype) : "idtype_missing",
|
||||
du->idname ? du->idname : "idname_missing",
|
||||
du->devname ? du->devname : "devname_missing",
|
||||
dev_name(dev));
|
||||
*/
|
||||
if (!du->idname || !du->idtype)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some idtypes can only match devices with a specific major number,
|
||||
* so we can skip trying to match certain du entries based simply on
|
||||
* the major number of dev.
|
||||
*/
|
||||
if (!_idtype_compatible_with_major_number(cmd, du->idtype, (int)MAJOR(dev->dev))) {
|
||||
/*
|
||||
log_debug("Mismatch device_id %s %s to %s: wrong major",
|
||||
idtype_to_str(du->idtype), du->idname ?: ".", dev_name(dev));
|
||||
*/
|
||||
if (!_idtype_compatible_with_major_number(cmd, du->idtype, (int)MAJOR(dev->dev)))
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!dev_get_partition_number(dev, &part)) {
|
||||
/*
|
||||
log_debug("Mismatch device_id %s %s to %s: no partition",
|
||||
idtype_to_str(du->idtype), du->idname ?: ".", dev_name(dev));
|
||||
*/
|
||||
log_debug("compare %s failed to get dev partition", dev_name(dev));
|
||||
return 0;
|
||||
}
|
||||
if (part != du->part) {
|
||||
/*
|
||||
log_debug("Mismatch device_id %s %s to %s: wrong partition %d vs %d",
|
||||
idtype_to_str(du->idtype), du->idname ?: ".", dev_name(dev), du->part, part);
|
||||
log_debug("compare mis %s %s part %d to %s part %d",
|
||||
idtype_to_str(du->idtype), du->idname ?: ".", du->part, dev_name(dev), part);
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
dm_list_iterate_items(id, &dev->ids) {
|
||||
if (id->idtype == du->idtype) {
|
||||
if ((id->idtype == DEV_ID_TYPE_DEVNAME) && _match_dm_devnames(cmd, dev, id, du)) {
|
||||
/* dm devs can have differing names that we know still match */
|
||||
du->dev = dev;
|
||||
dev->id = id;
|
||||
dev->flags |= DEV_MATCHED_USE_ID;
|
||||
log_debug("Match device_id %s %s to %s: dm names",
|
||||
idtype_to_str(du->idtype), du->idname, dev_name(dev));
|
||||
return 1;
|
||||
|
||||
} else if (id->idname && !strcmp(id->idname, du->idname)) {
|
||||
if (id->idname && !strcmp(id->idname, du->idname)) {
|
||||
du->dev = dev;
|
||||
dev->id = id;
|
||||
dev->flags |= DEV_MATCHED_USE_ID;
|
||||
log_debug("Match device_id %s %s to %s",
|
||||
idtype_to_str(du->idtype), du->idname, dev_name(dev));
|
||||
return 1;
|
||||
|
||||
} else {
|
||||
/*
|
||||
log_debug("Mismatch device_id %s %s to %s: idname %s",
|
||||
idtype_to_str(du->idtype), du->idname ?: ".", dev_name(dev), id->idname ?: ":");
|
||||
log_debug("compare mis %s %s to %s %s",
|
||||
idtype_to_str(du->idtype), du->idname ?: ".", dev_name(dev),
|
||||
((id->idtype != DEV_ID_TYPE_DEVNAME) && id->idname) ? id->idname : "");
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
@@ -1535,7 +1455,7 @@ static int _match_du_to_dev(struct cmd_context *cmd, struct dev_use *du, struct
|
||||
id->dev = dev;
|
||||
dm_list_add(&dev->ids, &id->list);
|
||||
/*
|
||||
log_debug("Mismatch device_id %s %s to %s: no idtype",
|
||||
log_debug("compare mis %s %s to %s no idtype",
|
||||
idtype_to_str(du->idtype), du->idname ?: ".", dev_name(dev));
|
||||
*/
|
||||
return 0;
|
||||
@@ -1560,8 +1480,9 @@ static int _match_du_to_dev(struct cmd_context *cmd, struct dev_use *du, struct
|
||||
}
|
||||
|
||||
/*
|
||||
log_debug("Mismatch device_id %s %s to %s: idname %s",
|
||||
idtype_to_str(du->idtype), du->idname ?: ".", dev_name(dev), idname);
|
||||
log_debug("compare mis %s %s to %s %s",
|
||||
idtype_to_str(du->idtype), du->idname ?: ".", dev_name(dev),
|
||||
((id->idtype != DEV_ID_TYPE_DEVNAME) && id->idname) ? id->idname : "");
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
@@ -1612,6 +1533,22 @@ int device_ids_match_dev(struct cmd_context *cmd, struct device *dev)
|
||||
* passes the filter.
|
||||
*/
|
||||
|
||||
void device_ids_match_device_list(struct cmd_context *cmd)
|
||||
{
|
||||
struct dev_use *du;
|
||||
|
||||
dm_list_iterate_items(du, &cmd->use_devices) {
|
||||
if (du->dev)
|
||||
continue;
|
||||
if (!(du->dev = dev_cache_get(cmd, du->devname, NULL))) {
|
||||
log_warn("Device not found for %s.", du->devname);
|
||||
} else {
|
||||
/* Should we set dev->id? Which idtype? Use --deviceidtype? */
|
||||
du->dev->flags |= DEV_MATCHED_USE_ID;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void device_ids_match(struct cmd_context *cmd)
|
||||
{
|
||||
struct dev_iter *iter;
|
||||
@@ -1619,16 +1556,7 @@ void device_ids_match(struct cmd_context *cmd)
|
||||
struct device *dev;
|
||||
|
||||
if (cmd->enable_devices_list) {
|
||||
dm_list_iterate_items(du, &cmd->use_devices) {
|
||||
if (du->dev)
|
||||
continue;
|
||||
if (!(du->dev = dev_cache_get(cmd, du->devname, NULL))) {
|
||||
log_warn("Device not found for %s.", du->devname);
|
||||
} else {
|
||||
/* Should we set dev->id? Which idtype? Use --deviceidtype? */
|
||||
du->dev->flags |= DEV_MATCHED_USE_ID;
|
||||
}
|
||||
}
|
||||
device_ids_match_device_list(cmd);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1978,14 +1906,6 @@ void device_ids_validate(struct cmd_context *cmd, struct dm_list *scanned_devs,
|
||||
*device_ids_invalid = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* When a new devname/pvid mismatch is discovered, a new search for the
|
||||
* pvid should be permitted (searched_devnames may exist to suppress
|
||||
* searching for other pvids.)
|
||||
*/
|
||||
if (update_file)
|
||||
unlink_searched_devnames(cmd);
|
||||
|
||||
/* FIXME: for wrong devname cases, wait to write new until device_ids_find_renamed_devs? */
|
||||
|
||||
/*
|
||||
@@ -2054,19 +1974,12 @@ void device_ids_find_renamed_devs(struct cmd_context *cmd, struct dm_list *dev_l
|
||||
search_auto = !strcmp(cmd->search_for_devnames, "auto");
|
||||
|
||||
dm_list_iterate_items(du, &cmd->use_devices) {
|
||||
if (du->dev)
|
||||
continue;
|
||||
if (!du->pvid)
|
||||
continue;
|
||||
if (du->idtype != DEV_ID_TYPE_DEVNAME)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* if the old incorrect devname is now a device that's
|
||||
* filtered and not scanned, e.g. an mpath component,
|
||||
* then we want to look for the pvid on a new device.
|
||||
*/
|
||||
if (du->dev && !du->dev->filtered_flags)
|
||||
continue;
|
||||
|
||||
if (!(dil = dm_pool_zalloc(cmd->mem, sizeof(*dil))))
|
||||
continue;
|
||||
|
||||
@@ -2091,11 +2004,6 @@ void device_ids_find_renamed_devs(struct cmd_context *cmd, struct dm_list *dev_l
|
||||
* the searched file, so a subsequent lvm command will do the search
|
||||
* again. In future perhaps we could add a policy to automatically
|
||||
* remove a devices file entry that's not been found for some time.
|
||||
*
|
||||
* TODO: like the hint file, add a hash of all devnames to the searched
|
||||
* file so it can be ignored and removed if the devs/hash change.
|
||||
* If hints are enabled, the hints invalidation could also remove the
|
||||
* searched file.
|
||||
*/
|
||||
if (_searched_devnames_exists(cmd)) {
|
||||
log_debug("Search for PVIDs skipped for %s", _searched_file);
|
||||
@@ -2271,8 +2179,7 @@ void device_ids_find_renamed_devs(struct cmd_context *cmd, struct dm_list *dev_l
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!noupdate)
|
||||
log_warn("Devices file PVID %s updating IDNAME to %s.", dev->pvid, devname);
|
||||
log_warn("Devices file PVID %s updating IDNAME to %s.", dev->pvid, devname);
|
||||
|
||||
free(du->idname);
|
||||
free(du->devname);
|
||||
|
@@ -32,6 +32,7 @@ int device_id_add(struct cmd_context *cmd, struct device *dev, const char *pvid,
|
||||
void device_id_pvremove(struct cmd_context *cmd, struct device *dev);
|
||||
void device_ids_match(struct cmd_context *cmd);
|
||||
int device_ids_match_dev(struct cmd_context *cmd, struct device *dev);
|
||||
void device_ids_match_device_list(struct cmd_context *cmd);
|
||||
void device_ids_validate(struct cmd_context *cmd, struct dm_list *scanned_devs, int *device_ids_invalid, int noupdate);
|
||||
int device_ids_version_unchanged(struct cmd_context *cmd);
|
||||
void device_ids_find_renamed_devs(struct cmd_context *cmd, struct dm_list *dev_list, int *search_count, int noupdate);
|
||||
|
@@ -15,41 +15,268 @@
|
||||
#include "lib/misc/lib.h"
|
||||
#include "lib/filters/filter.h"
|
||||
|
||||
static int _sys_dev_block_found;
|
||||
|
||||
#ifdef __linux__
|
||||
|
||||
static int _accept_p(struct cmd_context *cmd, struct dev_filter *f, struct device *dev, const char *use_filter_name)
|
||||
#include <sys/sysmacros.h>
|
||||
#include <dirent.h>
|
||||
|
||||
static int _locate_sysfs_blocks(const char *sysfs_dir, char *path, size_t len,
|
||||
unsigned *sysfs_depth)
|
||||
{
|
||||
char path[PATH_MAX];
|
||||
const char *sysfs_dir;
|
||||
struct stat info;
|
||||
unsigned i;
|
||||
static const struct dir_class {
|
||||
const char path[32];
|
||||
int depth;
|
||||
} classes[] = {
|
||||
/*
|
||||
* unified classification directory for all kernel subsystems
|
||||
*
|
||||
* /sys/subsystem/block/devices
|
||||
* |-- sda -> ../../../devices/pci0000:00/0000:00:1f.2/host0/target0:0:0/0:0:0:0/block/sda
|
||||
* |-- sda1 -> ../../../devices/pci0000:00/0000:00:1f.2/host0/target0:0:0/0:0:0:0/block/sda/sda1
|
||||
* `-- sr0 -> ../../../devices/pci0000:00/0000:00:1f.2/host1/target1:0:0/1:0:0:0/block/sr0
|
||||
*
|
||||
*/
|
||||
{ "subsystem/block/devices", 0 },
|
||||
|
||||
if (!_sys_dev_block_found)
|
||||
return 1;
|
||||
/*
|
||||
* block subsystem as a class
|
||||
*
|
||||
* /sys/class/block
|
||||
* |-- sda -> ../../devices/pci0000:00/0000:00:1f.2/host0/target0:0:0/0:0:0:0/block/sda
|
||||
* |-- sda1 -> ../../devices/pci0000:00/0000:00:1f.2/host0/target0:0:0/0:0:0:0/block/sda/sda1
|
||||
* `-- sr0 -> ../../devices/pci0000:00/0000:00:1f.2/host1/target1:0:0/1:0:0:0/block/sr0
|
||||
*
|
||||
*/
|
||||
{ "class/block", 0 },
|
||||
|
||||
dev->filtered_flags &= ~DEV_FILTERED_SYSFS;
|
||||
/*
|
||||
* old block subsystem layout with nested directories
|
||||
*
|
||||
* /sys/block/
|
||||
* |-- sda
|
||||
* | |-- capability
|
||||
* | |-- dev
|
||||
* ...
|
||||
* | |-- sda1
|
||||
* | | |-- dev
|
||||
* ...
|
||||
* |
|
||||
* `-- sr0
|
||||
* |-- capability
|
||||
* |-- dev
|
||||
* ...
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Any kind of device id other than devname has been set
|
||||
* using sysfs so we know that sysfs info exists for dev.
|
||||
*/
|
||||
if (dev->id && dev->id->idtype && (dev->id->idtype != DEV_ID_TYPE_DEVNAME))
|
||||
return 1;
|
||||
{ "block", 1 }
|
||||
};
|
||||
|
||||
sysfs_dir = dm_sysfs_dir();
|
||||
if (sysfs_dir && *sysfs_dir) {
|
||||
if (dm_snprintf(path, sizeof(path), "%sdev/block/%d:%d",
|
||||
sysfs_dir, (int)MAJOR(dev->dev), (int)MINOR(dev->dev)) < 0) {
|
||||
log_debug("failed to create sysfs path");
|
||||
for (i = 0; i < DM_ARRAY_SIZE(classes); ++i)
|
||||
if ((dm_snprintf(path, len, "%s%s", sysfs_dir, classes[i].path) >= 0) &&
|
||||
(stat(path, &info) == 0)) {
|
||||
*sysfs_depth = classes[i].depth;
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (lstat(path, &info)) {
|
||||
log_debug_devs("%s: Skipping (sysfs)", dev_name(dev));
|
||||
dev->filtered_flags |= DEV_FILTERED_SYSFS;
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*----------------------------------------------------------------
|
||||
* We need to store a set of dev_t.
|
||||
*--------------------------------------------------------------*/
|
||||
struct entry {
|
||||
struct entry *next;
|
||||
dev_t dev;
|
||||
};
|
||||
|
||||
#define SET_BUCKETS 64
|
||||
struct dev_set {
|
||||
struct dm_pool *mem;
|
||||
const char *sys_block;
|
||||
unsigned sysfs_depth;
|
||||
int initialised;
|
||||
struct entry *slots[SET_BUCKETS];
|
||||
};
|
||||
|
||||
static struct dev_set *_dev_set_create(struct dm_pool *mem,
|
||||
const char *sys_block,
|
||||
unsigned sysfs_depth)
|
||||
{
|
||||
struct dev_set *ds;
|
||||
|
||||
if (!(ds = dm_pool_zalloc(mem, sizeof(*ds))))
|
||||
return NULL;
|
||||
|
||||
ds->mem = mem;
|
||||
if (!(ds->sys_block = dm_pool_strdup(mem, sys_block)))
|
||||
return NULL;
|
||||
|
||||
ds->sysfs_depth = sysfs_depth;
|
||||
ds->initialised = 0;
|
||||
|
||||
return ds;
|
||||
}
|
||||
|
||||
static unsigned _hash_dev(dev_t dev)
|
||||
{
|
||||
return (major(dev) ^ minor(dev)) & (SET_BUCKETS - 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Doesn't check that the set already contains dev.
|
||||
*/
|
||||
static int _set_insert(struct dev_set *ds, dev_t dev)
|
||||
{
|
||||
struct entry *e;
|
||||
unsigned h = _hash_dev(dev);
|
||||
|
||||
if (!(e = dm_pool_alloc(ds->mem, sizeof(*e))))
|
||||
return 0;
|
||||
|
||||
e->next = ds->slots[h];
|
||||
e->dev = dev;
|
||||
ds->slots[h] = e;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int _set_lookup(struct dev_set *ds, dev_t dev)
|
||||
{
|
||||
unsigned h = _hash_dev(dev);
|
||||
struct entry *e;
|
||||
|
||||
for (e = ds->slots[h]; e; e = e->next)
|
||||
if (e->dev == dev)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*----------------------------------------------------------------
|
||||
* filter methods
|
||||
*--------------------------------------------------------------*/
|
||||
static int _parse_dev(const char *file, FILE *fp, dev_t *result)
|
||||
{
|
||||
unsigned major, minor;
|
||||
char buffer[64];
|
||||
|
||||
if (!fgets(buffer, sizeof(buffer), fp)) {
|
||||
log_error("Empty sysfs device file: %s", file);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (sscanf(buffer, "%u:%u", &major, &minor) != 2) {
|
||||
log_error("Incorrect format for sysfs device file: %s.", file);
|
||||
return 0;
|
||||
}
|
||||
|
||||
*result = makedev(major, minor);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int _read_dev(const char *file, dev_t *result)
|
||||
{
|
||||
int r;
|
||||
FILE *fp;
|
||||
|
||||
if (!(fp = fopen(file, "r"))) {
|
||||
log_sys_error("fopen", file);
|
||||
return 0;
|
||||
}
|
||||
|
||||
r = _parse_dev(file, fp, result);
|
||||
|
||||
if (fclose(fp))
|
||||
log_sys_error("fclose", file);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/*
|
||||
* Recurse through sysfs directories, inserting any devs found.
|
||||
*/
|
||||
static int _read_devs(struct dev_set *ds, const char *dir, unsigned sysfs_depth)
|
||||
{
|
||||
struct dirent *d;
|
||||
DIR *dr;
|
||||
struct stat info;
|
||||
char path[PATH_MAX];
|
||||
char file[PATH_MAX];
|
||||
dev_t dev = { 0 };
|
||||
int r = 1;
|
||||
|
||||
if (!(dr = opendir(dir))) {
|
||||
log_sys_error("opendir", dir);
|
||||
return 0;
|
||||
}
|
||||
|
||||
while ((d = readdir(dr))) {
|
||||
if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, ".."))
|
||||
continue;
|
||||
|
||||
if (dm_snprintf(path, sizeof(path), "%s/%s", dir,
|
||||
d->d_name) < 0) {
|
||||
log_warn("WARNING: sysfs path name too long: %s in %s.",
|
||||
d->d_name, dir);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* devices have a "dev" file */
|
||||
if (dm_snprintf(file, sizeof(file), "%s/dev", path) < 0) {
|
||||
log_warn("WARNING: sysfs path name too long: %s in %s.",
|
||||
d->d_name, dir);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!stat(file, &info)) {
|
||||
/* recurse if we found a device and expect subdirs */
|
||||
if (sysfs_depth)
|
||||
_read_devs(ds, path, sysfs_depth - 1);
|
||||
|
||||
/* add the device we have found */
|
||||
if (_read_dev(file, &dev))
|
||||
_set_insert(ds, dev);
|
||||
}
|
||||
}
|
||||
|
||||
if (closedir(dr))
|
||||
log_sys_debug("closedir", dir);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int _init_devs(struct dev_set *ds)
|
||||
{
|
||||
if (!_read_devs(ds, ds->sys_block, ds->sysfs_depth)) {
|
||||
ds->initialised = -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
ds->initialised = 1;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
static int _accept_p(struct cmd_context *cmd, struct dev_filter *f, struct device *dev, const char *use_filter_name)
|
||||
{
|
||||
struct dev_set *ds = (struct dev_set *) f->private;
|
||||
|
||||
dev->filtered_flags &= ~DEV_FILTERED_SYSFS;
|
||||
|
||||
if (!ds->initialised)
|
||||
_init_devs(ds);
|
||||
|
||||
/* Pass through if initialisation failed */
|
||||
if (ds->initialised != 1)
|
||||
return 1;
|
||||
|
||||
if (!_set_lookup(ds, dev->dev)) {
|
||||
log_debug_devs("%s: Skipping (sysfs)", dev_name(dev));
|
||||
dev->filtered_flags |= DEV_FILTERED_SYSFS;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
@@ -57,34 +284,21 @@ static int _accept_p(struct cmd_context *cmd, struct dev_filter *f, struct devic
|
||||
|
||||
static void _destroy(struct dev_filter *f)
|
||||
{
|
||||
struct dev_set *ds = (struct dev_set *) f->private;
|
||||
|
||||
if (f->use_count)
|
||||
log_error(INTERNAL_ERROR "Destroying sysfs filter while in use %u times.", f->use_count);
|
||||
free(f);
|
||||
}
|
||||
|
||||
static void _check_sys_dev_block(void)
|
||||
{
|
||||
char path[PATH_MAX];
|
||||
const char *sysfs_dir;
|
||||
struct stat info;
|
||||
|
||||
sysfs_dir = dm_sysfs_dir();
|
||||
if (sysfs_dir && *sysfs_dir) {
|
||||
if (dm_snprintf(path, sizeof(path), "%sdev/block", sysfs_dir) < 0)
|
||||
return;
|
||||
|
||||
if (lstat(path, &info)) {
|
||||
log_debug("filter-sysfs disabled: /sys/dev/block not found");
|
||||
_sys_dev_block_found = 0;
|
||||
} else {
|
||||
_sys_dev_block_found = 1;
|
||||
}
|
||||
}
|
||||
dm_pool_destroy(ds->mem);
|
||||
}
|
||||
|
||||
struct dev_filter *sysfs_filter_create(void)
|
||||
{
|
||||
const char *sysfs_dir = dm_sysfs_dir();
|
||||
char sys_block[PATH_MAX];
|
||||
unsigned sysfs_depth;
|
||||
struct dm_pool *mem;
|
||||
struct dev_set *ds;
|
||||
struct dev_filter *f;
|
||||
|
||||
if (!*sysfs_dir) {
|
||||
@@ -92,15 +306,26 @@ struct dev_filter *sysfs_filter_create(void)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* support old kernels that don't have this */
|
||||
_check_sys_dev_block();
|
||||
if (!_locate_sysfs_blocks(sysfs_dir, sys_block, sizeof(sys_block), &sysfs_depth))
|
||||
return NULL;
|
||||
|
||||
if (!(f = zalloc(sizeof(*f))))
|
||||
if (!(mem = dm_pool_create("sysfs", 256))) {
|
||||
log_error("sysfs pool creation failed");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!(ds = _dev_set_create(mem, sys_block, sysfs_depth))) {
|
||||
log_error("sysfs dev_set creation failed");
|
||||
goto bad;
|
||||
}
|
||||
|
||||
if (!(f = dm_pool_zalloc(mem, sizeof(*f))))
|
||||
goto_bad;
|
||||
|
||||
f->passes_filter = _accept_p;
|
||||
f->destroy = _destroy;
|
||||
f->use_count = 0;
|
||||
f->private = ds;
|
||||
f->name = "sysfs";
|
||||
|
||||
log_debug_devs("Sysfs filter initialised.");
|
||||
@@ -108,6 +333,7 @@ struct dev_filter *sysfs_filter_create(void)
|
||||
return f;
|
||||
|
||||
bad:
|
||||
dm_pool_destroy(mem);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@@ -219,7 +219,7 @@ static void _remove_expired(const char *dir, const char *vgname,
|
||||
|
||||
sum /= 1024 * 1024;
|
||||
if (sum > 128 || archives_size > 8192)
|
||||
log_print_unless_silent("Consider pruning %s VG archive with more then %u MiB in %u files (check archiving is needed in lvm.conf).",
|
||||
log_print_unless_silent("Consider prunning %s VG archive with more then %u MiB in %u files (check archiving is needed in lvm.conf).",
|
||||
vgname, (unsigned)sum, archives_size);
|
||||
}
|
||||
|
||||
|
@@ -465,7 +465,7 @@ static struct volume_group *_vg_read_raw(struct cmd_context *cmd,
|
||||
|
||||
vg = _vg_read_raw_area(cmd, fid, vgname, &mdac->area, vg_fmtdata, use_previous_vg, 0, mda_is_primary(mda));
|
||||
|
||||
if (!vg && use_previous_vg && !*use_previous_vg) {
|
||||
if (!vg && !*use_previous_vg) {
|
||||
/*
|
||||
* This condition (corrupt metadata text) is often seen in the
|
||||
* label_scan()/_text_read() phase, where this code corresponds to
|
||||
@@ -477,12 +477,8 @@ static struct volume_group *_vg_read_raw(struct cmd_context *cmd,
|
||||
struct lvmcache_info *info = lvmcache_info_from_pvid(dev->pvid, dev, 0);
|
||||
log_warn("WARNING: reading %s mda%d failed to read metadata.", dev_name(dev), mda_is_primary(mda)?1:2);
|
||||
log_warn("WARNING: repair VG metadata on %s with vgck --updatemetadata.", dev_name(dev));
|
||||
if (info)
|
||||
/* remove mda from lvmcache, saving it in info->bad_mdas for possible repair with updatemetadata */
|
||||
lvmcache_del_save_bad_mda(info, mda->mda_num, BAD_MDA_TEXT);
|
||||
else
|
||||
log_warn("WARNING: No cache info for %s", dev_name(dev));
|
||||
|
||||
/* remove mda from lvmcache, saving it in info->bad_mdas for possible repair with updatemetadata */
|
||||
lvmcache_del_save_bad_mda(info, mda->mda_num, BAD_MDA_TEXT);
|
||||
/* remove mda from fid */
|
||||
fid_remove_mda(fid, mda, NULL, 0, 0);
|
||||
}
|
||||
|
@@ -150,11 +150,14 @@
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
#include <dirent.h>
|
||||
#include <time.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/file.h>
|
||||
#include <sys/sysmacros.h>
|
||||
|
||||
int online_pvid_file_read(char *path, int *major, int *minor, char *vgname);
|
||||
|
||||
static const char *_hints_file = DEFAULT_RUN_DIR "/hints";
|
||||
static const char *_nohints_file = DEFAULT_RUN_DIR "/nohints";
|
||||
static const char *_newhints_file = DEFAULT_RUN_DIR "/newhints";
|
||||
@@ -365,6 +368,7 @@ static void _unlock_hints(struct cmd_context *cmd)
|
||||
|
||||
void hints_exit(struct cmd_context *cmd)
|
||||
{
|
||||
free_hints(&cmd->hints);
|
||||
if (_hints_fd == -1)
|
||||
return;
|
||||
_unlock_hints(cmd);
|
||||
@@ -1278,6 +1282,109 @@ check:
|
||||
free(name);
|
||||
}
|
||||
|
||||
static int _get_hints_from_pvs_online(struct cmd_context *cmd, struct dm_list *hints_out,
|
||||
struct dm_list *devs_in, struct dm_list *devs_out)
|
||||
{
|
||||
char path[PATH_MAX];
|
||||
char file_vgname[NAME_LEN];
|
||||
struct dm_list hints_list;
|
||||
struct hint file_hint;
|
||||
struct hint *alloc_hint;
|
||||
struct hint *hint, *hint2;
|
||||
struct device_list *devl, *devl2;
|
||||
int file_major, file_minor;
|
||||
int found = 0;
|
||||
DIR *dir;
|
||||
struct dirent *de;
|
||||
char *vgname = NULL;
|
||||
char *pvid;
|
||||
|
||||
dm_list_init(&hints_list);
|
||||
|
||||
if (!(dir = opendir(PVS_ONLINE_DIR)))
|
||||
return 0;
|
||||
|
||||
while ((de = readdir(dir))) {
|
||||
if (de->d_name[0] == '.')
|
||||
continue;
|
||||
|
||||
pvid = de->d_name;
|
||||
|
||||
if (strlen(pvid) != ID_LEN) /* 32 */
|
||||
continue;
|
||||
|
||||
memset(path, 0, sizeof(path));
|
||||
snprintf(path, sizeof(path), "%s/%s", PVS_ONLINE_DIR, pvid);
|
||||
|
||||
memset(&file_hint, 0, sizeof(file_hint));
|
||||
memset(file_vgname, 0, sizeof(file_vgname));
|
||||
file_major = 0;
|
||||
file_minor = 0;
|
||||
|
||||
if (!online_pvid_file_read(path, &file_major, &file_minor, file_vgname))
|
||||
continue;
|
||||
|
||||
if (!dm_strncpy(file_hint.pvid, pvid, sizeof(file_hint.pvid)))
|
||||
continue;
|
||||
|
||||
file_hint.devt = makedev(file_major, file_minor);
|
||||
|
||||
if (file_vgname[0] && validate_name(file_vgname)) {
|
||||
if (!dm_strncpy(file_hint.vgname, file_vgname, sizeof(file_hint.vgname)))
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!(alloc_hint = malloc(sizeof(struct hint))))
|
||||
continue;
|
||||
|
||||
memcpy(alloc_hint, &file_hint, sizeof(struct hint));
|
||||
|
||||
log_debug("add hint %s %d:%d %s from pvs_online", file_hint.pvid, file_major, file_minor, file_vgname);
|
||||
dm_list_add(&hints_list, &alloc_hint->list);
|
||||
found++;
|
||||
}
|
||||
|
||||
if (closedir(dir))
|
||||
stack;
|
||||
|
||||
log_debug("accept hints found %d from pvs_online", found);
|
||||
|
||||
_get_single_vgname_cmd_arg(cmd, &hints_list, &vgname);
|
||||
|
||||
/*
|
||||
* apply_hints equivalent, move devs from devs_in to devs_out if
|
||||
* their devno matches the devno of a hint (and if the hint matches
|
||||
* the vgname when a vgname is present.)
|
||||
*/
|
||||
dm_list_iterate_items_safe(devl, devl2, devs_in) {
|
||||
dm_list_iterate_items_safe(hint, hint2, &hints_list) {
|
||||
if ((MAJOR(devl->dev->dev) == MAJOR(hint->devt)) &&
|
||||
(MINOR(devl->dev->dev) == MINOR(hint->devt))) {
|
||||
|
||||
if (vgname && hint->vgname[0] && strcmp(vgname, hint->vgname))
|
||||
goto next_dev;
|
||||
|
||||
snprintf(hint->name, sizeof(hint->name), "%s", dev_name(devl->dev));
|
||||
hint->chosen = 1;
|
||||
|
||||
dm_list_del(&devl->list);
|
||||
dm_list_add(devs_out, &devl->list);
|
||||
}
|
||||
}
|
||||
next_dev:
|
||||
;
|
||||
}
|
||||
|
||||
log_debug("applied hints using %d other %d vgname %s from pvs_online",
|
||||
dm_list_size(devs_out), dm_list_size(devs_in), vgname ?: "");
|
||||
|
||||
dm_list_splice(hints_out, &hints_list);
|
||||
|
||||
free(vgname);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns 0: no hints are used.
|
||||
* . newhints is set if this command should create new hints after scan
|
||||
@@ -1319,6 +1426,15 @@ int get_hints(struct cmd_context *cmd, struct dm_list *hints_out, int *newhints,
|
||||
if (!cmd->use_hints)
|
||||
return 0;
|
||||
|
||||
/* hints = "pvs_online" */
|
||||
if (cmd->hints_pvs_online) {
|
||||
if (!_get_hints_from_pvs_online(cmd, &hints_list, devs_in, devs_out)) {
|
||||
log_debug("get_hints: pvs_online failed");
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if another command created the nohints file to prevent us from
|
||||
* using hints.
|
||||
@@ -1390,16 +1506,6 @@ int get_hints(struct cmd_context *cmd, struct dm_list *hints_out, int *newhints,
|
||||
log_debug("get_hints: needs refresh");
|
||||
free_hints(&hints_list);
|
||||
|
||||
/*
|
||||
* This is not related to hints, and is probably unnecessary,
|
||||
* but it could possibly help. When hints become invalid it's
|
||||
* usually becaues devs on the system have changed, and that
|
||||
* also means that a missing devices file entry might be found
|
||||
* by searching devices again. (the searched_devnames
|
||||
* mechanism should eventually be replaced)
|
||||
*/
|
||||
unlink_searched_devnames(cmd);
|
||||
|
||||
if (!_lock_hints(cmd, LOCK_EX, NONBLOCK))
|
||||
return 0;
|
||||
|
||||
|
@@ -891,7 +891,7 @@ static int _setup_bcache(void)
|
||||
|
||||
#define BASE_FD_COUNT 32 /* Number of open files we want apart from devs */
|
||||
|
||||
void prepare_open_file_limit(struct cmd_context *cmd, unsigned int num_devs)
|
||||
static void _prepare_open_file_limit(struct cmd_context *cmd, unsigned int num_devs)
|
||||
{
|
||||
#ifdef HAVE_PRLIMIT
|
||||
struct rlimit old = { 0 }, new;
|
||||
@@ -1130,6 +1130,9 @@ int label_scan(struct cmd_context *cmd)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log_debug_devs("Filtering devices to scan done (nodata)");
|
||||
|
||||
cmd->filter_nodata_only = 0;
|
||||
|
||||
dm_list_iterate_items(devl, &all_devs)
|
||||
@@ -1165,7 +1168,7 @@ int label_scan(struct cmd_context *cmd)
|
||||
* which we want to keep open) is higher than the current
|
||||
* soft limit.
|
||||
*/
|
||||
prepare_open_file_limit(cmd, dm_list_size(&scan_devs));
|
||||
_prepare_open_file_limit(cmd, dm_list_size(&scan_devs));
|
||||
|
||||
/*
|
||||
* Do the main scan.
|
||||
@@ -1207,6 +1210,8 @@ int label_scan(struct cmd_context *cmd)
|
||||
(unsigned long long)want_size_kb);
|
||||
}
|
||||
|
||||
dm_list_init(&cmd->hints);
|
||||
|
||||
/*
|
||||
* If we're using hints to limit which devs we scanned, verify
|
||||
* that those hints were valid, and if not we need to scan the
|
||||
@@ -1218,16 +1223,18 @@ int label_scan(struct cmd_context *cmd)
|
||||
_scan_list(cmd, cmd->filter, &all_devs, 0, NULL);
|
||||
/* scan_devs are the devs that have been scanned */
|
||||
dm_list_splice(&scan_devs, &all_devs);
|
||||
free_hints(&hints_list);
|
||||
using_hints = 0;
|
||||
create_hints = 0;
|
||||
/* invalid hints means a new dev probably appeared and
|
||||
we should search for any missing pvids again. */
|
||||
unlink_searched_devnames(cmd);
|
||||
} else {
|
||||
/* The hints may be used by another device iteration. */
|
||||
dm_list_splice(&cmd->hints, &hints_list);
|
||||
}
|
||||
}
|
||||
|
||||
free_hints(&hints_list);
|
||||
|
||||
/*
|
||||
* Check if the devices_file content is up to date and
|
||||
* if not update it.
|
||||
|
@@ -134,6 +134,4 @@ void dev_invalidate(struct device *dev);
|
||||
void dev_set_last_byte(struct device *dev, uint64_t offset);
|
||||
void dev_unset_last_byte(struct device *dev);
|
||||
|
||||
void prepare_open_file_limit(struct cmd_context *cmd, unsigned int num_devs);
|
||||
|
||||
#endif
|
||||
|
@@ -503,7 +503,7 @@ static int _create_sanlock_lv(struct cmd_context *cmd, struct volume_group *vg,
|
||||
.read_ahead = DM_READ_AHEAD_NONE,
|
||||
.stripes = 1,
|
||||
.vg_name = vg->name,
|
||||
.lv_name = lock_lv_name,
|
||||
.lv_name = dm_pool_strdup(cmd->mem, lock_lv_name),
|
||||
.zero = 1,
|
||||
};
|
||||
|
||||
|
@@ -892,7 +892,6 @@ uint32_t log_journal_str_to_val(const char *str)
|
||||
return LOG_JOURNAL_OUTPUT;
|
||||
if (!strcasecmp(str, "debug"))
|
||||
return LOG_JOURNAL_DEBUG;
|
||||
log_warn("Ignoring unrecognized journal value.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -2354,7 +2354,7 @@ static int _match_pv_tags(const struct dm_config_node *cling_tag_list_cn,
|
||||
const struct dm_config_value *cv;
|
||||
const char *str;
|
||||
const char *tag_matched;
|
||||
struct dm_list *tags_to_match = mem ? NULL : pv_tags ? : ((pv2) ? &pv2->tags : NULL);
|
||||
struct dm_list *tags_to_match = mem ? NULL : pv_tags ? : &pv2->tags;
|
||||
struct dm_str_list *sl;
|
||||
unsigned first_tag = 1;
|
||||
|
||||
@@ -2409,7 +2409,7 @@ static int _match_pv_tags(const struct dm_config_node *cling_tag_list_cn,
|
||||
continue;
|
||||
}
|
||||
|
||||
if (tags_to_match && !str_list_match_list(&pv1->tags, tags_to_match, &tag_matched))
|
||||
if (!str_list_match_list(&pv1->tags, tags_to_match, &tag_matched))
|
||||
continue;
|
||||
|
||||
if (!pv_tags) {
|
||||
|
@@ -5846,12 +5846,12 @@ static int _stats_report(CMD_ARGS)
|
||||
if (_switches[ALL_PROGRAMS_ARG])
|
||||
_program_id = "";
|
||||
|
||||
if (_switches[VERBOSE_ARG] && subcommand && !strcmp(subcommand, "list"))
|
||||
if (_switches[VERBOSE_ARG] && !strcmp(subcommand, "list"))
|
||||
_statstype |= (DM_STATS_WALK_ALL
|
||||
| DM_STATS_WALK_SKIP_SINGLE_AREA);
|
||||
|
||||
/* suppress duplicates unless the user has requested all regions */
|
||||
if (subcommand && !objtype_args && !strcmp(subcommand, "report"))
|
||||
if (!strcmp(subcommand, "report") && !objtype_args)
|
||||
/* suppress duplicate rows of output */
|
||||
_statstype |= (DM_STATS_WALK_ALL
|
||||
| DM_STATS_WALK_SKIP_SINGLE_AREA);
|
||||
|
@@ -33,7 +33,7 @@ CMIRRORDMAN = cmirrord.8
|
||||
LVMDBUSDMAN = lvmdbusd.8
|
||||
|
||||
MAN5=lvm.conf.5
|
||||
MAN7=lvmsystemid.7 lvmreport.7 lvmraid.7 lvmautoactivation.7
|
||||
MAN7=lvmsystemid.7 lvmreport.7 lvmraid.7
|
||||
|
||||
MAN8=lvm.8 lvmdump.8 lvm-fullreport.8 lvm-lvpoll.8 \
|
||||
lvcreate.8 lvchange.8 lvmconfig.8 lvconvert.8 lvdisplay.8 \
|
||||
|
@@ -61,6 +61,8 @@ and more, using a more compact and configurable output format.
|
||||
.br
|
||||
[ \fB--readonly\fP ]
|
||||
.br
|
||||
[ \fB--reportformat\fP \fBbasic\fP|\fBjson\fP ]
|
||||
.br
|
||||
[ \fB--segments\fP ]
|
||||
.br
|
||||
[ \fB--separator\fP \fIString\fP ]
|
||||
@@ -330,6 +332,16 @@ device-mapper kernel driver, so this option is unable to report whether
|
||||
or not LVs are actually in use.
|
||||
.
|
||||
.HP
|
||||
\fB--reportformat\fP \fBbasic\fP|\fBjson\fP
|
||||
.br
|
||||
Overrides current output format for reports which is defined globally by
|
||||
the report/output_format setting in \fBlvm.conf\fP(5).
|
||||
\fBbasic\fP is the original format with columns and rows.
|
||||
If there is more than one report per command, each report is prefixed
|
||||
with the report name for identification. \fBjson\fP produces report
|
||||
output in JSON format. See \fBlvmreport\fP(7) for more information.
|
||||
.
|
||||
.HP
|
||||
\fB--segments\fP
|
||||
.br
|
||||
.
|
||||
|
@@ -579,7 +579,6 @@ Prepends source file name and code line number with libdm debugging.
|
||||
.BR lvmraid (7),
|
||||
.BR lvmthin (7),
|
||||
.BR lvmcache (7),
|
||||
.BR lvmautoactivation (7),
|
||||
.P
|
||||
.BR dmsetup (8),
|
||||
.BR dmstats (8),
|
||||
|
@@ -1,314 +0,0 @@
|
||||
.TH "LVMAUTOACTIVATION" "7" "LVM TOOLS #VERSION#" "Red Hat, Inc" "\""
|
||||
.
|
||||
.SH NAME
|
||||
.
|
||||
lvmautoactivation \(em LVM autoactivation
|
||||
.
|
||||
.SH DESCRIPTION
|
||||
.
|
||||
Autoactivation is the activation of LVs performed automatically by the
|
||||
system in response to LVM devices being attached to the machine. When all
|
||||
PVs in a VG have been attached, the VG is complete, and LVs in the VG are
|
||||
activated.
|
||||
.P
|
||||
Autoactivation of VGs, or specific LVs, can be prevented using vgchange or
|
||||
lvchange --setautoactivation n. The lvm.conf auto_activation_volume_list
|
||||
is another way to limit autoactivation.
|
||||
.
|
||||
.SS event autoactivation
|
||||
.P
|
||||
The most common form of autoactivation is "event based", in which complete
|
||||
VGs are activated in response to uevents which occur during system startup
|
||||
or at any time after the system has started. Another form of
|
||||
autoactivation is "static" in which complete VGs are activated at a fixed
|
||||
point during system startup by a systemd service, and not in response to
|
||||
events. This can be controlled with the lvm.conf setting
|
||||
event_activation.
|
||||
.P
|
||||
Event based autoactivation is driven by udev, udev rules, and systemd.
|
||||
When a device is attached to a machine, a uevent is generated by the
|
||||
kernel to notify userspace of the new device. systemd-udev runs udev
|
||||
rules to process the new device. Udev rules use blkid to identify the
|
||||
device as an LVM PV and then execute the lvm-specific udev rule for the
|
||||
device, which triggers autoactivation.
|
||||
.P
|
||||
There are two variations of event baed autoactivation that may be used on
|
||||
a system, depending on the LVM udev rule that is installed (found in
|
||||
/lib/udev/rules.d/.) The following summarizes the steps in each rule
|
||||
which lead to autoactivation:
|
||||
.P
|
||||
.B 69-dm-lvm-metad.rules
|
||||
.
|
||||
.IP \[bu] 2
|
||||
device /dev/name with major:minor X:Y is attached to the machine
|
||||
.
|
||||
.IP \[bu] 2
|
||||
systemd/udev runs blkid to identify /dev/name as an LVM PV
|
||||
.
|
||||
.IP \[bu] 2
|
||||
udev rule 69-dm-lvm-metad.rules is run for /dev/name
|
||||
.
|
||||
.IP \[bu] 2
|
||||
the lvm udev rule runs the systemd service lvm2-pvscan@X:Yservice
|
||||
.
|
||||
.IP \[bu] 2
|
||||
the lvm2-pvscan service runs:
|
||||
.br
|
||||
pvscan --cache -aay --major X --minor Y
|
||||
.
|
||||
.IP \[bu] 2
|
||||
pvscan reads the device, records that the PV is online
|
||||
(see pvs_online), and checks if the VG is complete.
|
||||
.
|
||||
.IP \[bu] 2
|
||||
if the VG is complete, pvscan creates the vgs_online temp file,
|
||||
and activates the VG.
|
||||
.
|
||||
.IP \[bu] 2
|
||||
the activation command output can be seen from
|
||||
systemctl status lvm2-pvscan*
|
||||
.P
|
||||
.B 69-dm-lvm.rules
|
||||
.
|
||||
.IP \[bu] 2
|
||||
device /dev/name with major:minor X:Y is attached to the machine
|
||||
.
|
||||
.IP \[bu] 2
|
||||
systemd/udev runs blkid to identify /dev/name as an LVM PV
|
||||
.
|
||||
.IP \[bu] 2
|
||||
udev rule 69-dm-lvm.rules is run for /dev/name
|
||||
.
|
||||
.IP \[bu] 2
|
||||
the lvm udev rule runs:
|
||||
.br
|
||||
pvscan --cache --listvg --checkcomplete --vgonline
|
||||
.br
|
||||
--autoactivation event --udevoutput --journal=output /dev/name
|
||||
.
|
||||
.IP \[bu] 2
|
||||
pvscan reads the device, records that the PV is online
|
||||
(see pvs_online), and checks if the VG is complete.
|
||||
.
|
||||
.IP \[bu] 2
|
||||
if the VG is complete, pvscan creates the vgs_online temp file,
|
||||
and prints the name of the VG for the udev rule to import:
|
||||
LVM_VG_NAME_COMPLETE='vgname'
|
||||
.
|
||||
.IP \[bu] 2
|
||||
if the lvm udev rule sees LVM_VG_NAME_COMPLETE from pvscan,
|
||||
it activates the VG using a transient systemd service named
|
||||
lvm-activate-<vgname>.
|
||||
.
|
||||
.IP \[bu] 2
|
||||
the lvm-activate-<vgname> service runs
|
||||
.br
|
||||
vgchange -aay --autoactivation event <vgname>
|
||||
.
|
||||
.IP \[bu] 2
|
||||
the activation command output can be seen from
|
||||
journalctl -u lvm-activate-<vgname>
|
||||
.P
|
||||
.
|
||||
.SS pvscan options
|
||||
.P
|
||||
.B --cache
|
||||
.br
|
||||
Read the <device> arg (and only that device), and record that
|
||||
the PV is online by creating the /run/lvm/pvs_online/<pvid>
|
||||
file containing the name of the VG and the device for the PV.
|
||||
.P
|
||||
.B -aay
|
||||
.br
|
||||
Activate the VG from the pvscan command
|
||||
(includes implicit --checkcomplete and --vgonline.)
|
||||
.P
|
||||
.B --checkcomplete
|
||||
.br
|
||||
Check if the VG is complete, i.e. all PVs are present on
|
||||
the system, by checking /run/lvm/pvs_online/<pvid> files.
|
||||
.P
|
||||
.B --vgonline
|
||||
.br
|
||||
Create /run/lvm/vgs_online/<vgname> if the VG is complete
|
||||
(used to ensure only one command performs activation.)
|
||||
.P
|
||||
.B --autoactivation event
|
||||
.br
|
||||
Inform the command it is used for event based autoactivation.
|
||||
.P
|
||||
.B --listvg
|
||||
.br
|
||||
Print the name of the VG using the device.
|
||||
.P
|
||||
.B --udevoutput
|
||||
.br
|
||||
Only print output that can be imported to the udev rule,
|
||||
using the udev environment key format, i.e. NAME='value'.
|
||||
.P
|
||||
.B --journal=output
|
||||
.br
|
||||
Send standard command output to the journal (when stdout
|
||||
is reserved for udev output.)
|
||||
.P
|
||||
.SS run files
|
||||
.P
|
||||
Autoactivation commands use a number of temp files in /run/lvm (with the
|
||||
expectation that /run is cleared between boots.)
|
||||
.P
|
||||
.B pvs_online
|
||||
.br
|
||||
pvscan --cache creates a file here for each PV that is attached. The file
|
||||
is named with the PVID and contains the VG name and device information.
|
||||
The existence of the file is used to determine when all PVs for a given VG
|
||||
are present. The device information in these files is also used to
|
||||
optimize locating devices for a VG when the VG is activated.
|
||||
.P
|
||||
.B pvs_lookup
|
||||
.br
|
||||
pvscan --cache creates a file here named for a VG (if one doesn't already
|
||||
exist.) The file contains a list of PVIDs in the VG. This is needed when
|
||||
a PV is processed which has no VG metadata, in which case the list of
|
||||
PVIDs from the lookup file is used to check if the VG is complete.
|
||||
.P
|
||||
.B vgs_online
|
||||
.br
|
||||
The first activation command (pvscan or vgchange) to create a file here,
|
||||
named for the VG, will activate the VG. This resolves a race when
|
||||
concurrent commands attempt to activate a VG at once.
|
||||
.
|
||||
.SS static autoactivation
|
||||
.P
|
||||
When event autoactivation is disabled by setting lvm.conf
|
||||
event_activation=0, autoactivation is performed at one or more static
|
||||
points during system startup. At these points, a vgchange -aay command is
|
||||
run to activate complete VGs from devices that are present on the system
|
||||
at that time. pvscan commands (and lvm2-pvscan services) do not perform
|
||||
autoactivation in this mode. pvscan commands may still be run from
|
||||
uevents but will do nothing when they read the event_activation=0 setting.
|
||||
.P
|
||||
The static vgchange -aay commands are run by three systemd services at
|
||||
three points during startup: lvm2-activation-early, lvm2-activation, and
|
||||
lvm2-activation-net. These static activation services are "generated
|
||||
services", so the service files are created at run time by the
|
||||
lvm2-activation-generator command (run by systemd).
|
||||
lvm2-activation-generator creates the services if lvm.conf
|
||||
event_activation=0.
|
||||
.P
|
||||
The limitation of this method is that devices may not be attached to the
|
||||
system (or set up) at a reliable point in time during startup, and they
|
||||
may not be present when the services run vgchange. In this case, the VGs
|
||||
will not be autoactivated. So, the timing of device attachment/setup
|
||||
determines whether static autoactivation will produce the same results as
|
||||
event autoactivation. For this reason, static autoactivation is not
|
||||
recommended.
|
||||
.P
|
||||
Sometimes, static autoactivation is mistakenly expected to disable all
|
||||
autoactivation of particular VGs. This may appear to be effective if those
|
||||
VGs are slow to be attached or set up. But, the only correct and reliable
|
||||
way to disable autoactivation is using vgchange/lvchange
|
||||
--setautoactivation n, or lvm.conf auto_activation_volume_list.
|
||||
.
|
||||
.SH EXAMPLES
|
||||
.P
|
||||
VG "vg" contains two PVs:
|
||||
.nf
|
||||
$ pvs -o name,vgname,uuid /dev/sdb /dev/sdc
|
||||
PV VG PV UUID
|
||||
/dev/sdb vg 1uKpaT-lFOZ-NLHX-j4jI-OBi1-QpdE-HZ5hZY
|
||||
/dev/sdc vg 5J3tM8-aIPe-2vbd-DBe7-bvRq-TGj0-DaKV2G
|
||||
.fi
|
||||
.P
|
||||
use of --cache:
|
||||
.nf
|
||||
$ pvscan --cache /dev/sdb
|
||||
pvscan[12922] PV /dev/sdb online.
|
||||
$ pvscan --cache /dev/sdc
|
||||
pvscan[12923] PV /dev/sdc online.
|
||||
|
||||
$ cat /run/lvm/pvs_online/1uKpaTlFOZNLHXj4jIOBi1QpdEHZ5hZY
|
||||
8:16
|
||||
vg:vg
|
||||
dev:/dev/sdb
|
||||
$ cat /run/lvm/pvs_online/5J3tM8aIPe2vbdDBe7bvRqTGj0DaKV2G
|
||||
8:32
|
||||
vg:vg
|
||||
dev:/dev/sdc
|
||||
.fi
|
||||
.P
|
||||
use of -aay:
|
||||
.nf
|
||||
$ pvscan --cache -aay /dev/sdb
|
||||
pvscan[12935] PV /dev/sdb online, VG vg incomplete (need 1).
|
||||
$ pvscan --cache -aay /dev/sdc
|
||||
pvscan[12936] PV /dev/sdc online, VG vg is complete.
|
||||
pvscan[12936] VG vg run autoactivation.
|
||||
1 logical volume(s) in volume group "vg" now active
|
||||
|
||||
$ cat /run/lvm/pvs_online/1uKpaTlFOZNLHXj4jIOBi1QpdEHZ5hZY
|
||||
8:16
|
||||
vg:vg
|
||||
dev:/dev/sdb
|
||||
$ cat /run/lvm/pvs_online/5J3tM8aIPe2vbdDBe7bvRqTGj0DaKV2G
|
||||
8:32
|
||||
vg:vg
|
||||
dev:/dev/sdc
|
||||
$ ls /run/lvm/vgs_online/vg
|
||||
/run/lvm/vgs_online/vg
|
||||
.fi
|
||||
.P
|
||||
use of --listvg:
|
||||
.nf
|
||||
$ pvscan --cache --listvg /dev/sdb
|
||||
VG vg
|
||||
$ pvscan --cache --listvg /dev/sdc
|
||||
VG vg
|
||||
|
||||
$ cat /run/lvm/pvs_online/1uKpaTlFOZNLHXj4jIOBi1QpdEHZ5hZY
|
||||
8:16
|
||||
vg:vg
|
||||
dev:/dev/sdb
|
||||
$ cat /run/lvm/pvs_online/5J3tM8aIPe2vbdDBe7bvRqTGj0DaKV2G
|
||||
8:32
|
||||
vg:vg
|
||||
dev:/dev/sdc
|
||||
.fi
|
||||
.P
|
||||
use of --checkcomplete:
|
||||
.nf
|
||||
$ pvscan --cache --listvg --checkcomplete --vgonline /dev/sdb
|
||||
pvscan[12996] PV /dev/sdb online, VG vg incomplete (need 1).
|
||||
VG vg incomplete
|
||||
$ pvscan --cache --listvg --checkcomplete --vgonline /dev/sdc
|
||||
pvscan[12997] PV /dev/sdc online, VG vg is complete.
|
||||
VG vg complete
|
||||
.fi
|
||||
.P
|
||||
use of --udevoutput:
|
||||
.nf
|
||||
$ pvscan --cache --listvg --checkcomplete --vgonline --udevoutput /dev/sdb
|
||||
LVM_VG_NAME_INCOMPLETE='vg'
|
||||
$ pvscan --cache --listvg --checkcomplete --vgonline --udevoutput /dev/sdc
|
||||
LVM_VG_NAME_COMPLETE='vg'
|
||||
.fi
|
||||
.P
|
||||
use of --listlvs:
|
||||
.nf
|
||||
$ lvs -o name,devices vg
|
||||
LV Devices
|
||||
lvol0 /dev/sdb(0)
|
||||
lvol1 /dev/sdc(0)
|
||||
lvol2 /dev/sdb(1),/dev/sdc(1)
|
||||
|
||||
$ pvscan --cache --listlvs --checkcomplete /dev/sdb
|
||||
pvscan[13288] PV /dev/sdb online, VG vg incomplete (need 1).
|
||||
VG vg incomplete
|
||||
LV vg/lvol0 complete
|
||||
LV vg/lvol2 incomplete
|
||||
$ pvscan --cache --listlvs --checkcomplete /dev/sdc
|
||||
pvscan[13289] PV /dev/sdc online, VG vg is complete.
|
||||
VG vg complete
|
||||
LV vg/lvol1 complete
|
||||
LV vg/lvol2 complete
|
||||
.fi
|
||||
|
@@ -61,6 +61,8 @@ and more, using a more compact and configurable output format.
|
||||
.br
|
||||
[ \fB--readonly\fP ]
|
||||
.br
|
||||
[ \fB--reportformat\fP \fBbasic\fP|\fBjson\fP ]
|
||||
.br
|
||||
[ \fB--separator\fP \fIString\fP ]
|
||||
.br
|
||||
[ \fB--shared\fP ]
|
||||
@@ -318,6 +320,16 @@ device-mapper kernel driver, so this option is unable to report whether
|
||||
or not LVs are actually in use.
|
||||
.
|
||||
.HP
|
||||
\fB--reportformat\fP \fBbasic\fP|\fBjson\fP
|
||||
.br
|
||||
Overrides current output format for reports which is defined globally by
|
||||
the report/output_format setting in \fBlvm.conf\fP(5).
|
||||
\fBbasic\fP is the original format with columns and rows.
|
||||
If there is more than one report per command, each report is prefixed
|
||||
with the report name for identification. \fBjson\fP produces report
|
||||
output in JSON format. See \fBlvmreport\fP(7) for more information.
|
||||
.
|
||||
.HP
|
||||
\fB-S\fP|\fB--select\fP \fIString\fP
|
||||
.br
|
||||
Select objects for processing and reporting based on specified criteria.
|
||||
|
@@ -4,47 +4,56 @@ like
|
||||
or
|
||||
.BR pvdisplay (8).
|
||||
.P
|
||||
When --cache is used, pvscan updates runtime lvm state on the system, or
|
||||
with -aay performs autoactivation.
|
||||
When the --cache and -aay options are used, pvscan records which PVs are
|
||||
available on the system, and activates LVs in completed VGs. A VG is
|
||||
complete when pvscan sees that the final PV in the VG has appeared. This
|
||||
is used by event-based system startup (systemd, udev) to activate LVs.
|
||||
.P
|
||||
The four main variations of this are:
|
||||
.P
|
||||
.B pvscan --cache
|
||||
.I device
|
||||
.P
|
||||
If device is present, lvm records that the PV on device is online.
|
||||
If device is present, lvm adds a record that the PV on device is online.
|
||||
If device is not present, lvm removes the online record for the PV.
|
||||
pvscan only reads the named device.
|
||||
In most cases, the pvscan will only read the named devices.
|
||||
.P
|
||||
.B pvscan --cache -aay
|
||||
.IR device ...
|
||||
.P
|
||||
This begins by performing the same steps as above. Afterward, if the VG
|
||||
for the specified PV is complete, then pvscan will activate LVs in the VG
|
||||
(the same as vgchange -aay vgname would do.)
|
||||
.P
|
||||
.B pvscan --cache
|
||||
.P
|
||||
Updates the runtime state for all lvm devices.
|
||||
.P
|
||||
.B pvscan --cache -aay
|
||||
.I device
|
||||
.P
|
||||
Performs the --cache steps for the device, then checks if the VG using the
|
||||
device is complete. If so, LVs in the VG are autoactivated, the same as
|
||||
vgchange -aay vgname would do. (A device name may be replaced with major
|
||||
and minor numbers.)
|
||||
This first clears all existing PV online records, then scans all devices
|
||||
on the system, adding PV online records for any PVs that are found.
|
||||
.P
|
||||
.B pvscan --cache -aay
|
||||
.P
|
||||
Performs the --cache steps for all devices, then autoactivates any complete VGs.
|
||||
This begins by performing the same steps as pvscan --cache. Afterward, it
|
||||
activates LVs in any complete VGs.
|
||||
.P
|
||||
.B pvscan --cache --listvg|--listlvs
|
||||
.I device
|
||||
To prevent devices from being scanned by pvscan --cache, add them
|
||||
to
|
||||
.BR lvm.conf (5)
|
||||
.B devices/global_filter.
|
||||
For more information, see:
|
||||
.br
|
||||
.B lvmconfig --withcomments devices/global_filter
|
||||
.P
|
||||
Performs the --cache steps for the device, then prints the name of the VG
|
||||
using the device, or the names of LVs using the device. --checkcomplete
|
||||
is usually included to check if all PVs for the VG or LVs are online.
|
||||
When this command is called by a udev rule, the output must conform to
|
||||
udev rule specifications (see --udevoutput.) The udev rule will use the
|
||||
results to perform autoactivation.
|
||||
.P
|
||||
Autoactivation of VGs or LVs can be enabled/disabled using vgchange or
|
||||
lvchange with --setautoactivation y|n, or by adding names to
|
||||
Auto-activation of VGs or LVs can be enabled/disabled using:
|
||||
.br
|
||||
.BR lvm.conf (5)
|
||||
.B activation/auto_activation_volume_list
|
||||
.P
|
||||
See
|
||||
.BR lvmautoactivation (7)
|
||||
for more information about how pvscan is used for autoactivation.
|
||||
For more information, see:
|
||||
.br
|
||||
.B lvmconfig --withcomments activation/auto_activation_volume_list
|
||||
.P
|
||||
To disable auto-activation, explicitly set this list to an empty list,
|
||||
i.e. auto_activation_volume_list = [ ].
|
||||
.P
|
||||
When this setting is undefined (e.g. commented), then all LVs are
|
||||
auto-activated.
|
||||
|
@@ -91,50 +91,59 @@ like
|
||||
or
|
||||
.BR pvdisplay (8).
|
||||
.P
|
||||
When --cache is used, pvscan updates runtime lvm state on the system, or
|
||||
with -aay performs autoactivation.
|
||||
When the --cache and -aay options are used, pvscan records which PVs are
|
||||
available on the system, and activates LVs in completed VGs. A VG is
|
||||
complete when pvscan sees that the final PV in the VG has appeared. This
|
||||
is used by event-based system startup (systemd, udev) to activate LVs.
|
||||
.P
|
||||
The four main variations of this are:
|
||||
.P
|
||||
.B pvscan --cache
|
||||
.I device
|
||||
.P
|
||||
If device is present, lvm records that the PV on device is online.
|
||||
If device is present, lvm adds a record that the PV on device is online.
|
||||
If device is not present, lvm removes the online record for the PV.
|
||||
pvscan only reads the named device.
|
||||
In most cases, the pvscan will only read the named devices.
|
||||
.P
|
||||
.B pvscan --cache -aay
|
||||
.IR device ...
|
||||
.P
|
||||
This begins by performing the same steps as above. Afterward, if the VG
|
||||
for the specified PV is complete, then pvscan will activate LVs in the VG
|
||||
(the same as vgchange -aay vgname would do.)
|
||||
.P
|
||||
.B pvscan --cache
|
||||
.P
|
||||
Updates the runtime state for all lvm devices.
|
||||
.P
|
||||
.B pvscan --cache -aay
|
||||
.I device
|
||||
.P
|
||||
Performs the --cache steps for the device, then checks if the VG using the
|
||||
device is complete. If so, LVs in the VG are autoactivated, the same as
|
||||
vgchange -aay vgname would do. (A device name may be replaced with major
|
||||
and minor numbers.)
|
||||
This first clears all existing PV online records, then scans all devices
|
||||
on the system, adding PV online records for any PVs that are found.
|
||||
.P
|
||||
.B pvscan --cache -aay
|
||||
.P
|
||||
Performs the --cache steps for all devices, then autoactivates any complete VGs.
|
||||
This begins by performing the same steps as pvscan --cache. Afterward, it
|
||||
activates LVs in any complete VGs.
|
||||
.P
|
||||
.B pvscan --cache --listvg|--listlvs
|
||||
.I device
|
||||
To prevent devices from being scanned by pvscan --cache, add them
|
||||
to
|
||||
.BR lvm.conf (5)
|
||||
.B devices/global_filter.
|
||||
For more information, see:
|
||||
.br
|
||||
.B lvmconfig --withcomments devices/global_filter
|
||||
.P
|
||||
Performs the --cache steps for the device, then prints the name of the VG
|
||||
using the device, or the names of LVs using the device. --checkcomplete
|
||||
is usually included to check if all PVs for the VG or LVs are online.
|
||||
When this command is called by a udev rule, the output must conform to
|
||||
udev rule specifications (see --udevoutput.) The udev rule will use the
|
||||
results to perform autoactivation.
|
||||
.P
|
||||
Autoactivation of VGs or LVs can be enabled/disabled using vgchange or
|
||||
lvchange with --setautoactivation y|n, or by adding names to
|
||||
Auto-activation of VGs or LVs can be enabled/disabled using:
|
||||
.br
|
||||
.BR lvm.conf (5)
|
||||
.B activation/auto_activation_volume_list
|
||||
.P
|
||||
See
|
||||
.BR lvmautoactivation (7)
|
||||
for more information about how pvscan is used for autoactivation.
|
||||
For more information, see:
|
||||
.br
|
||||
.B lvmconfig --withcomments activation/auto_activation_volume_list
|
||||
.P
|
||||
To disable auto-activation, explicitly set this list to an empty list,
|
||||
i.e. auto_activation_volume_list = [ ].
|
||||
.P
|
||||
When this setting is undefined (e.g. commented), then all LVs are
|
||||
auto-activated.
|
||||
.
|
||||
.SH USAGE
|
||||
.
|
||||
|
@@ -58,6 +58,8 @@ and more, using a more compact and configurable output format.
|
||||
.br
|
||||
[ \fB--readonly\fP ]
|
||||
.br
|
||||
[ \fB--reportformat\fP \fBbasic\fP|\fBjson\fP ]
|
||||
.br
|
||||
[ \fB--shared\fP ]
|
||||
.br
|
||||
[ \fB--separator\fP \fIString\fP ]
|
||||
@@ -310,6 +312,16 @@ device-mapper kernel driver, so this option is unable to report whether
|
||||
or not LVs are actually in use.
|
||||
.
|
||||
.HP
|
||||
\fB--reportformat\fP \fBbasic\fP|\fBjson\fP
|
||||
.br
|
||||
Overrides current output format for reports which is defined globally by
|
||||
the report/output_format setting in \fBlvm.conf\fP(5).
|
||||
\fBbasic\fP is the original format with columns and rows.
|
||||
If there is more than one report per command, each report is prefixed
|
||||
with the report name for identification. \fBjson\fP produces report
|
||||
output in JSON format. See \fBlvmreport\fP(7) for more information.
|
||||
.
|
||||
.HP
|
||||
\fB-S\fP|\fB--select\fP \fIString\fP
|
||||
.br
|
||||
Select objects for processing and reporting based on specified criteria.
|
||||
|
@@ -15,9 +15,6 @@ srcdir = @srcdir@
|
||||
top_srcdir = @top_srcdir@
|
||||
top_builddir = @top_builddir@
|
||||
|
||||
SOURCES = lvm2_activation_generator_systemd_red_hat.c
|
||||
TARGETS = lvm2_activation_generator_systemd_red_hat
|
||||
|
||||
include $(top_builddir)/make.tmpl
|
||||
|
||||
ifeq ("@BUILD_DMEVENTD@", "yes")
|
||||
@@ -66,7 +63,7 @@ install_initscripts:
|
||||
@echo " [INSTALL] initscripts"
|
||||
$(Q) $(INSTALL_DIR) $(initdir)
|
||||
ifeq ("@BUILD_DMEVENTD@", "yes")
|
||||
$(Q) $(INSTALL_SCRIPT) lvm2_monitoring_init_red_hat $(initdir)/lvm2-monitor
|
||||
$(Q) $(INSTALL_SCRIPT) lvm2_monitoring_init_red_hat $(initdir)/lvm-monitor
|
||||
endif
|
||||
ifeq ("@BUILD_LVMPOLLD@", "yes")
|
||||
$(Q) $(INSTALL_SCRIPT) lvm2_lvmpolld_init_red_hat $(initdir)/lvm2-lvmpolld
|
||||
@@ -78,25 +75,15 @@ ifeq ("@BLKDEACTIVATE@", "yes")
|
||||
$(Q) $(INSTALL_SCRIPT) blk_availability_init_red_hat $(initdir)/blk-availability
|
||||
endif
|
||||
|
||||
CFLAGS_lvm2_activation_generator_systemd_red_hat.o += $(EXTRA_EXEC_CFLAGS)
|
||||
|
||||
lvm2_activation_generator_systemd_red_hat: $(OBJECTS) $(LVMINTERNAL_LIBS)
|
||||
@echo " [CC] $@"
|
||||
$(Q) $(CC) -o $@ $(OBJECTS) $(CFLAGS) $(LDFLAGS) $(EXTRA_EXEC_LDFLAGS) $(ELDFLAGS) $(LVMINTERNAL_LIBS) $(LIBS)
|
||||
|
||||
install_systemd_generators:
|
||||
@echo " [INSTALL] systemd_generators"
|
||||
$(Q) $(INSTALL_DIR) $(systemd_generator_dir)
|
||||
$(Q) $(INSTALL_PROGRAM) lvm2_activation_generator_systemd_red_hat $(systemd_generator_dir)/lvm2-activation-generator
|
||||
|
||||
install_systemd_units: install_dbus_service
|
||||
@echo " [INSTALL] systemd_units"
|
||||
$(Q) $(INSTALL_DIR) $(systemd_unit_dir)
|
||||
$(Q) $(INSTALL_DATA) lvm2-pvscan.service $(systemd_unit_dir)/lvm2-pvscan@.service
|
||||
$(Q) $(INSTALL_DATA) lvm-activate-vgs-main.service $(systemd_unit_dir)/lvm-activate-vgs-main.service
|
||||
$(Q) $(INSTALL_DATA) lvm-activate-vgs-last.service $(systemd_unit_dir)/lvm-activate-vgs-last.service
|
||||
ifeq ("@BUILD_DMEVENTD@", "yes")
|
||||
$(Q) $(INSTALL_DATA) dm_event_systemd_red_hat.socket $(systemd_unit_dir)/dm-event.socket
|
||||
$(Q) $(INSTALL_DATA) dm_event_systemd_red_hat.service $(systemd_unit_dir)/dm-event.service
|
||||
$(Q) $(INSTALL_DATA) lvm2_monitoring_systemd_red_hat.service $(systemd_unit_dir)/lvm2-monitor.service
|
||||
$(Q) $(INSTALL_DATA) lvm2_monitoring_systemd_red_hat.service $(systemd_unit_dir)/lvm-monitor.service
|
||||
endif
|
||||
ifeq ("@BLKDEACTIVATE@", "yes")
|
||||
$(Q) $(INSTALL_DATA) blk_availability_systemd_red_hat.service $(systemd_unit_dir)/blk-availability.service
|
||||
@@ -156,7 +143,9 @@ DISTCLEAN_TARGETS += \
|
||||
lvm2_monitoring_init_red_hat \
|
||||
lvm2_monitoring_systemd_red_hat.service \
|
||||
lvm2_pvscan_systemd_red_hat@.service \
|
||||
lvm2_tmpfiles_red_hat.conf
|
||||
lvm2_tmpfiles_red_hat.conf \
|
||||
lvm-activate-vgs-main.service \
|
||||
lvm-activate-vgs-last.service
|
||||
|
||||
# Remove ancient files
|
||||
DISTCLEAN_TARGETS += \
|
||||
|
@@ -1,7 +1,7 @@
|
||||
[Unit]
|
||||
Description=Availability of block devices
|
||||
Before=shutdown.target
|
||||
After=lvm2-activation.service iscsi-shutdown.service iscsi.service iscsid.service fcoe.service rbdmap.service
|
||||
After=lvm-activate-vgs-main.service lvm-activate-vgs-last.service iscsi-shutdown.service iscsi.service iscsid.service fcoe.service rbdmap.service
|
||||
DefaultDependencies=no
|
||||
Conflicts=shutdown.target
|
||||
|
||||
|
22
scripts/lvm-activate-vgs-last.service.in
Normal file
22
scripts/lvm-activate-vgs-last.service.in
Normal file
@@ -0,0 +1,22 @@
|
||||
[Unit]
|
||||
Description=Activate LVM Volume Groups (last)
|
||||
Documentation=man:vgchange(8)
|
||||
Wants=systemd-udev-settle.service
|
||||
After=lvm-activate-vgs-main.service systemd-udev-settle.service multipathd.service cryptsetup.target
|
||||
Before=local-fs-pre.target shutdown.target
|
||||
DefaultDependencies=no
|
||||
Conflicts=shutdown.target
|
||||
|
||||
# "--eventactivation service" tells vgchange it is being called
|
||||
# from an activation service, so it will do nothing if
|
||||
# lvm.conf event_activation_options = "event_only".
|
||||
# "--eventactivation on" tells vgchange to enable event-based
|
||||
# pvscan activations by creating /run/lvm/event-activation-on.
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=@SBINDIR@/lvm vgchange -aay --nohints --vgonline --eventactivation service,on
|
||||
RemainAfterExit=yes
|
||||
|
||||
[Install]
|
||||
WantedBy=sysinit.target
|
19
scripts/lvm-activate-vgs-main.service.in
Normal file
19
scripts/lvm-activate-vgs-main.service.in
Normal file
@@ -0,0 +1,19 @@
|
||||
[Unit]
|
||||
Description=Activate LVM Volume Groups
|
||||
Documentation=man:vgchange(8)
|
||||
After=dm-event.socket dm-event.service
|
||||
Before=local-fs-pre.target shutdown.target
|
||||
DefaultDependencies=no
|
||||
Conflicts=shutdown.target
|
||||
|
||||
# "--eventactivation service" tells vgchange it is being called
|
||||
# from an activation service, so it will do nothing if
|
||||
# lvm.conf event_activation_options = "event_only".
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=@SBINDIR@/lvm vgchange -aay --nohints --vgonline --eventactivation service
|
||||
RemainAfterExit=yes
|
||||
|
||||
[Install]
|
||||
WantedBy=sysinit.target
|
@@ -1,8 +1,8 @@
|
||||
[Unit]
|
||||
Description=Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling
|
||||
Description=Monitor LVM Logical Volumes
|
||||
Documentation=man:dmeventd(8) man:lvcreate(8) man:lvchange(8) man:vgchange(8)
|
||||
Requires=dm-event.socket
|
||||
After=dm-event.socket dm-event.service lvm2-activation.service
|
||||
After=dm-event.socket dm-event.service lvm-activate-vgs-last.service
|
||||
Before=local-fs-pre.target shutdown.target
|
||||
DefaultDependencies=no
|
||||
Conflicts=shutdown.target
|
||||
|
@@ -23,10 +23,10 @@ if [ $1 = 0 ]; then
|
||||
fi
|
||||
|
||||
%triggerun -- %{name} < 2.02.86-2
|
||||
%{_bindir}/systemd-sysv-convert --save lvm2-monitor >/dev/null 2>&1 || :
|
||||
/bin/systemctl --no-reload enable lvm2-monitor.service > /dev/null 2>&1 || :
|
||||
/sbin/chkconfig --del lvm2-monitor > /dev/null 2>&1 || :
|
||||
/bin/systemctl try-restart lvm2-monitor.service > /dev/null 2>&1 || :
|
||||
%{_bindir}/systemd-sysv-convert --save lvm-monitor >/dev/null 2>&1 || :
|
||||
/bin/systemctl --no-reload enable lvm-monitor.service > /dev/null 2>&1 || :
|
||||
/sbin/chkconfig --del lvm-monitor > /dev/null 2>&1 || :
|
||||
/bin/systemctl try-restart lvm-monitor.service > /dev/null 2>&1 || :
|
||||
# files in the main package
|
||||
|
||||
%files
|
||||
@@ -88,7 +88,6 @@ fi
|
||||
%{_sbindir}/lvmpolld
|
||||
%endif
|
||||
%{_mandir}/man5/lvm.conf.5.gz
|
||||
%{_mandir}/man7/lvmautoactivation.7.gz
|
||||
%{_mandir}/man7/lvmsystemid.7.gz
|
||||
%{_mandir}/man7/lvmreport.7.gz
|
||||
%{_mandir}/man7/lvmraid.7.gz
|
||||
@@ -101,9 +100,6 @@ fi
|
||||
%{_mandir}/man8/lvm-config.8.gz
|
||||
%{_mandir}/man8/lvm-dumpconfig.8.gz
|
||||
%{_mandir}/man8/lvm.8.gz
|
||||
%if %{enable_systemd}
|
||||
%{_mandir}/man8/lvm2-activation-generator.8.gz
|
||||
%endif
|
||||
%{_mandir}/man8/lvmconfig.8.gz
|
||||
%{_mandir}/man8/lvmdevices.8.gz
|
||||
%{_mandir}/man8/lvmdiskscan.8.gz
|
||||
@@ -164,7 +160,7 @@ fi
|
||||
%endif
|
||||
%if %{enable_udev}
|
||||
%{_udevdir}/11-dm-lvm.rules
|
||||
%{_udevdir}/69-dm-lvm-metad.rules
|
||||
%{_udevdir}/69-dm-lvm.rules
|
||||
%endif
|
||||
%dir %{_sysconfdir}/lvm
|
||||
%ghost %{_sysconfdir}/lvm/cache/.cache
|
||||
@@ -188,17 +184,17 @@ fi
|
||||
%dir %{_default_run_dir}
|
||||
%if %{enable_systemd}
|
||||
%{_tmpfilesdir}/%{name}.conf
|
||||
%{_unitdir}/lvm-activate-vgs-main.service
|
||||
%{_unitdir}/lvm-activate-vgs-last.service
|
||||
%{_unitdir}/lvm-monitor.service
|
||||
%{_unitdir}/blk-availability.service
|
||||
%{_unitdir}/lvm2-monitor.service
|
||||
%{_unitdir}/lvm2-pvscan@.service
|
||||
%attr(555, -, -) %{_prefix}/lib/systemd/system-generators/lvm2-activation-generator
|
||||
%if %{have_service lvmpolld}
|
||||
%{_unitdir}/lvm2-lvmpolld.service
|
||||
%{_unitdir}/lvm2-lvmpolld.socket
|
||||
%endif
|
||||
%else
|
||||
%{_sysconfdir}/rc.d/init.d/blk-availability
|
||||
%{_sysconfdir}/rc.d/init.d/lvm2-monitor
|
||||
%{_sysconfdir}/rc.d/init.d/lvm-monitor
|
||||
%if %{have_service lvmpolld}
|
||||
%{_sysconfdir}/rc.d/init.d/lvm2-lvmpolld
|
||||
%endif
|
||||
|
@@ -545,73 +545,4 @@ grep "$PVID2" "$DF" |tee out
|
||||
grep "$dev2" out
|
||||
not grep "$dev1" out
|
||||
|
||||
vgchange -an $vg1
|
||||
vgchange -an $vg2
|
||||
vgremove -ff $vg1
|
||||
vgremove -ff $vg2
|
||||
|
||||
# devnames change so the new devname now refers to a filtered device,
|
||||
# e.g. an mpath or md component, which is not scanned
|
||||
|
||||
wait_md_create() {
|
||||
local md=$1
|
||||
|
||||
while :; do
|
||||
if ! grep "$(basename $md)" /proc/mdstat; then
|
||||
echo "$md not ready"
|
||||
cat /proc/mdstat
|
||||
sleep 2
|
||||
else
|
||||
break
|
||||
fi
|
||||
done
|
||||
echo "$md" > WAIT_MD_DEV
|
||||
}
|
||||
|
||||
aux wipefs_a "$dev1"
|
||||
aux wipefs_a "$dev2"
|
||||
aux wipefs_a "$dev3"
|
||||
aux wipefs_a "$dev4"
|
||||
|
||||
mddev="/dev/md33"
|
||||
not grep $mddev /proc/mdstat || skip
|
||||
|
||||
rm "$DF"
|
||||
touch "$DF"
|
||||
vgcreate $vg1 "$dev1" "$dev2"
|
||||
cat "$DF"
|
||||
cp "$DF" "$ORIG"
|
||||
|
||||
# PVID with dashes for matching pvs -o+uuid output
|
||||
OPVID1=`pvs "$dev1" --noheading -o uuid | awk '{print $1}'`
|
||||
OPVID2=`pvs "$dev2" --noheading -o uuid | awk '{print $1}'`
|
||||
|
||||
# PVID without dashes for matching devices file fields
|
||||
PVID1=`pvs "$dev1" --noheading -o uuid | tr -d - | awk '{print $1}'`
|
||||
PVID2=`pvs "$dev2" --noheading -o uuid | tr -d - | awk '{print $1}'`
|
||||
|
||||
mdadm --create --metadata=1.0 "$mddev" --level 1 --raid-devices=2 "$dev3" "$dev4"
|
||||
wait_md_create "$mddev"
|
||||
|
||||
sed -e "s|DEVNAME=$dev1|DEVNAME=$dev3|" "$ORIG" > tmp1.devices
|
||||
sed -e "s|IDNAME=$dev1|IDNAME=$dev3|" tmp1.devices > "$DF"
|
||||
cat "$DF"
|
||||
pvs -o+uuid |tee out
|
||||
grep "$dev1" out
|
||||
grep "$dev2" out
|
||||
grep "$OPVID1" out
|
||||
grep "$OPVID2" out
|
||||
not grep "$dev3" out
|
||||
not grep "$dev4" out
|
||||
|
||||
grep "$dev1" "$DF"
|
||||
grep "$dev2" "$DF"
|
||||
grep "$PVID1" "$DF"
|
||||
grep "$PVID2" "$DF"
|
||||
not grep "$dev3" "$DF"
|
||||
not grep "$dev4" "$DF"
|
||||
|
||||
mdadm --stop "$mddev"
|
||||
aux udev_wait
|
||||
|
||||
vgremove -ff $vg1
|
||||
|
@@ -423,7 +423,7 @@ sed "s/$pvid1/badpvid/" "$DF.orig" |tee $DF
|
||||
not grep $pvid1 $DF
|
||||
grep $did1 $DF
|
||||
|
||||
not lvmdevices --check 2>&1|tee out
|
||||
lvmdevices --check 2>&1|tee out
|
||||
grep $dev1 out
|
||||
grep badpvid out
|
||||
grep $pvid1 out
|
||||
@@ -493,7 +493,7 @@ rm $DF
|
||||
d1=$(basename $dev1)
|
||||
d3=$(basename $dev3)
|
||||
sed "s/$d1/$d3/" "$DF.orig" |tee $DF
|
||||
not lvmdevices --check 2>&1 |tee out
|
||||
lvmdevices --check 2>&1 |tee out
|
||||
grep $dev1 out
|
||||
|
||||
lvmdevices --update
|
||||
@@ -515,7 +515,7 @@ sed "s/$d1/tmp/" "$DF.orig" |tee ${DF}_1
|
||||
sed "s/$d2/$d1/" "${DF}_1" |tee ${DF}_2
|
||||
sed "s/tmp/$d2/" "${DF}_2" |tee $DF
|
||||
rm ${DF}_1 ${DF}_2
|
||||
not lvmdevices --check 2>&1 |tee out
|
||||
lvmdevices --check 2>&1 |tee out
|
||||
grep $dev1 out
|
||||
grep $dev2 out
|
||||
|
||||
@@ -536,7 +536,7 @@ rm $DF
|
||||
d1=$(basename $dev1)
|
||||
d3=$(basename $dev3)
|
||||
sed "s/$d1/$d3/" "$DF.orig" |tee $DF
|
||||
not lvmdevices --check 2>&1 |tee out
|
||||
lvmdevices --check 2>&1 |tee out
|
||||
grep $dev1 out
|
||||
|
||||
pvs -o+uuid,deviceid | grep $vg |tee out
|
||||
|
@@ -57,11 +57,9 @@ dd if="$dev1" of=dev1_backup bs=1M
|
||||
|
||||
# pvcreate and pvremove can be forced even if the PV is marked as used
|
||||
pvremove -ff -y "$dev1"
|
||||
lvmdevices --deldev "$dev1" || true
|
||||
dd if=dev1_backup of="$dev1" bs=1M
|
||||
pvcreate -ff -y "$dev1"
|
||||
dd if=dev1_backup of="$dev1" bs=1M
|
||||
lvmdevices --adddev "$dev1" || true
|
||||
|
||||
# prepare a VG with $dev1 and $dev both having 1 MDA
|
||||
aux enable_dev "$dev2"
|
||||
@@ -118,11 +116,9 @@ dd if="$dev1" of=dev1_backup bs=1M
|
||||
|
||||
# pvcreate and pvremove can be forced even if the PV is marked as used
|
||||
pvremove -ff -y "$dev1"
|
||||
lvmdevices --deldev "$dev1" || true
|
||||
dd if=dev1_backup of="$dev1" bs=1M
|
||||
pvcreate -ff -y "$dev1"
|
||||
dd if=dev1_backup of="$dev1" bs=1M
|
||||
lvmdevices --adddev "$dev1" || true
|
||||
|
||||
# prepare a VG with $dev1 and $dev both having 1 MDA
|
||||
aux enable_dev "$dev2"
|
||||
|
403
test/shell/udev-pvscan-vgchange.sh
Normal file
403
test/shell/udev-pvscan-vgchange.sh
Normal file
@@ -0,0 +1,403 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (C) 2021 Red Hat, Inc. All rights reserved.
|
||||
#
|
||||
# This copyrighted material is made available to anyone wishing to use,
|
||||
# modify, copy, or redistribute it subject to the terms and conditions
|
||||
# of the GNU General Public License v.2.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
test_description='udev rule and systemd unit run vgchange'
|
||||
|
||||
SKIP_WITH_LVMPOLLD=1
|
||||
SKIP_WITH_LVMLOCKD=1
|
||||
|
||||
. lib/inittest
|
||||
|
||||
#
|
||||
# $ cat /tmp/devs
|
||||
# /dev/sdb
|
||||
# /dev/sdc
|
||||
# /dev/sdd
|
||||
#
|
||||
# Specify this file as LVM_TEST_DEVICE_LIST=/tmp/devs
|
||||
# when running the test.
|
||||
#
|
||||
# This test will wipe these devices.
|
||||
#
|
||||
|
||||
if [ -z ${LVM_TEST_DEVICE_LIST+x} ]; then echo "LVM_TEST_DEVICE_LIST is unset" && skip; else echo "LVM_TEST_DEVICE_LIST is set to '$LVM_TEST_DEVICE_LIST'"; fi
|
||||
|
||||
test -e "$LVM_TEST_DEVICE_LIST" || skip
|
||||
|
||||
num_devs=$(cat $LVM_TEST_DEVICE_LIST | wc -l)
|
||||
|
||||
RUNDIR="/run"
|
||||
test -d "$RUNDIR" || RUNDIR="/var/run"
|
||||
PVS_ONLINE_DIR="$RUNDIR/lvm/pvs_online"
|
||||
VGS_ONLINE_DIR="$RUNDIR/lvm/vgs_online"
|
||||
PVS_LOOKUP_DIR="$RUNDIR/lvm/pvs_lookup"
|
||||
|
||||
_clear_online_files() {
|
||||
# wait till udev is finished
|
||||
aux udev_wait
|
||||
rm -f "$PVS_ONLINE_DIR"/*
|
||||
rm -f "$VGS_ONLINE_DIR"/*
|
||||
rm -f "$PVS_LOOKUP_DIR"/*
|
||||
}
|
||||
|
||||
test -d "$PVS_ONLINE_DIR" || mkdir -p "$PVS_ONLINE_DIR"
|
||||
test -d "$VGS_ONLINE_DIR" || mkdir -p "$VGS_ONLINE_DIR"
|
||||
test -d "$PVS_LOOKUP_DIR" || mkdir -p "$PVS_LOOKUP_DIR"
|
||||
_clear_online_files
|
||||
|
||||
aux prepare_real_devs
|
||||
|
||||
aux lvmconf 'devices/dir = "/dev"'
|
||||
aux lvmconf 'devices/use_devicesfile = 1'
|
||||
DFDIR="$LVM_SYSTEM_DIR/devices"
|
||||
DF="$DFDIR/system.devices"
|
||||
mkdir $DFDIR || true
|
||||
not ls $DF
|
||||
|
||||
get_real_devs
|
||||
|
||||
wipe_all() {
|
||||
for dev in "${REAL_DEVICES[@]}"; do
|
||||
wipefs -a $dev
|
||||
done
|
||||
}
|
||||
|
||||
# udevadm trigger runs udev rule which runs systemd-run --no-wait vgchange -aay
|
||||
# Because of --no-wait, we need to wait for the transient systemd
|
||||
# service to be gone before checking the effects of the vgchange.
|
||||
|
||||
wait_lvm_activate() {
|
||||
local vgw=$1
|
||||
local wait=0
|
||||
|
||||
while systemctl status lvm-activate-$vgw > /dev/null && test "$wait" -le 30; do
|
||||
sleep .2
|
||||
wait=$(( wait + 1 ))
|
||||
done
|
||||
}
|
||||
|
||||
# Test requires 3 devs
|
||||
test $num_devs -gt 2 || skip
|
||||
BDEV1=$(basename "$dev1")
|
||||
BDEV2=$(basename "$dev2")
|
||||
BDEV3=$(basename "$dev3")
|
||||
|
||||
wipe_all
|
||||
touch $DF
|
||||
for dev in "${REAL_DEVICES[@]}"; do
|
||||
pvcreate $dev
|
||||
done
|
||||
|
||||
# 1 dev, 1 vg, 1 lv
|
||||
|
||||
vgcreate $vg1 "$dev1"
|
||||
lvcreate -l1 -an -n $lv1 $vg1 "$dev1"
|
||||
|
||||
PVID1=$(pvs "$dev1" --noheading -o uuid | tr -d - | awk '{print $1}')
|
||||
|
||||
_clear_online_files
|
||||
udevadm trigger --settle -c add /sys/block/$BDEV1
|
||||
|
||||
wait_lvm_activate $vg1
|
||||
|
||||
ls "$RUNDIR/lvm/pvs_online/$PVID1"
|
||||
ls "$RUNDIR/lvm/vgs_online/$vg1"
|
||||
journalctl -u lvm-activate-$vg1 | tee out || true
|
||||
grep "now active" out
|
||||
check lv_field $vg1/$lv1 lv_active "active"
|
||||
|
||||
vgchange -an $vg1
|
||||
vgremove -y $vg1
|
||||
|
||||
|
||||
# 2 devs, 1 vg, 2 lvs
|
||||
|
||||
vgcreate $vg2 "$dev1" "$dev2"
|
||||
lvcreate -l1 -an -n $lv1 $vg2 "$dev1"
|
||||
lvcreate -l1 -an -n $lv2 $vg2 "$dev2"
|
||||
|
||||
PVID1=$(pvs "$dev1" --noheading -o uuid | tr -d - | awk '{print $1}')
|
||||
PVID2=$(pvs "$dev2" --noheading -o uuid | tr -d - | awk '{print $1}')
|
||||
|
||||
_clear_online_files
|
||||
|
||||
udevadm trigger --settle -c add /sys/block/$BDEV1
|
||||
ls "$RUNDIR/lvm/pvs_online/$PVID1"
|
||||
not ls "$RUNDIR/lvm/vgs_online/$vg2"
|
||||
journalctl -u lvm-activate-$vg2 | tee out || true
|
||||
not grep "now active" out
|
||||
check lv_field $vg2/$lv1 lv_active ""
|
||||
check lv_field $vg2/$lv2 lv_active ""
|
||||
|
||||
udevadm trigger --settle -c add /sys/block/$BDEV2
|
||||
ls "$RUNDIR/lvm/pvs_online/$PVID2"
|
||||
ls "$RUNDIR/lvm/vgs_online/$vg2"
|
||||
|
||||
wait_lvm_activate $vg2
|
||||
|
||||
journalctl -u lvm-activate-$vg2 | tee out || true
|
||||
grep "now active" out
|
||||
check lv_field $vg2/$lv1 lv_active "active"
|
||||
check lv_field $vg2/$lv2 lv_active "active"
|
||||
|
||||
vgchange -an $vg2
|
||||
vgremove -y $vg2
|
||||
|
||||
|
||||
# 3 devs, 1 vg, 4 lvs, concurrent pvscans
|
||||
# (attempting to have the pvscans run concurrently and race
|
||||
# to activate the VG)
|
||||
|
||||
vgcreate $vg3 "$dev1" "$dev2" "$dev3"
|
||||
lvcreate -l1 -an -n $lv1 $vg3 "$dev1"
|
||||
lvcreate -l1 -an -n $lv2 $vg3 "$dev2"
|
||||
lvcreate -l1 -an -n $lv3 $vg3 "$dev3"
|
||||
lvcreate -l8 -an -n $lv4 -i 2 $vg3 "$dev1" "$dev2"
|
||||
|
||||
PVID1=$(pvs "$dev1" --noheading -o uuid | tr -d - | awk '{print $1}')
|
||||
PVID2=$(pvs "$dev2" --noheading -o uuid | tr -d - | awk '{print $1}')
|
||||
PVID3=$(pvs "$dev3" --noheading -o uuid | tr -d - | awk '{print $1}')
|
||||
|
||||
_clear_online_files
|
||||
|
||||
udevadm trigger -c add /sys/block/$BDEV1 &
|
||||
udevadm trigger -c add /sys/block/$BDEV2 &
|
||||
udevadm trigger -c add /sys/block/$BDEV3
|
||||
|
||||
aux udev_wait
|
||||
wait_lvm_activate $vg3
|
||||
|
||||
ls "$RUNDIR/lvm/pvs_online/$PVID1"
|
||||
ls "$RUNDIR/lvm/pvs_online/$PVID2"
|
||||
ls "$RUNDIR/lvm/pvs_online/$PVID3"
|
||||
ls "$RUNDIR/lvm/vgs_online/$vg3"
|
||||
journalctl -u lvm-activate-$vg3 | tee out || true
|
||||
grep "now active" out
|
||||
check lv_field $vg3/$lv1 lv_active "active"
|
||||
check lv_field $vg3/$lv2 lv_active "active"
|
||||
check lv_field $vg3/$lv3 lv_active "active"
|
||||
check lv_field $vg3/$lv4 lv_active "active"
|
||||
|
||||
vgchange -an $vg3
|
||||
vgremove -y $vg3
|
||||
|
||||
|
||||
# 3 devs, 1 vg, 4 lvs, concurrent pvscans, metadata on only 1 PV
|
||||
|
||||
wipe_all
|
||||
rm $DF
|
||||
touch $DF
|
||||
pvcreate --metadatacopies 0 "$dev1"
|
||||
pvcreate --metadatacopies 0 "$dev2"
|
||||
pvcreate "$dev3"
|
||||
|
||||
vgcreate $vg4 "$dev1" "$dev2" "$dev3"
|
||||
lvcreate -l1 -an -n $lv1 $vg4 "$dev1"
|
||||
lvcreate -l1 -an -n $lv2 $vg4 "$dev2"
|
||||
lvcreate -l1 -an -n $lv3 $vg4 "$dev3"
|
||||
lvcreate -l8 -an -n $lv4 -i 2 $vg4 "$dev1" "$dev2"
|
||||
|
||||
PVID1=$(pvs "$dev1" --noheading -o uuid | tr -d - | awk '{print $1}')
|
||||
PVID2=$(pvs "$dev2" --noheading -o uuid | tr -d - | awk '{print $1}')
|
||||
PVID3=$(pvs "$dev3" --noheading -o uuid | tr -d - | awk '{print $1}')
|
||||
|
||||
_clear_online_files
|
||||
|
||||
udevadm trigger -c add /sys/block/$BDEV1 &
|
||||
udevadm trigger -c add /sys/block/$BDEV2 &
|
||||
udevadm trigger -c add /sys/block/$BDEV3
|
||||
|
||||
aux udev_wait
|
||||
wait_lvm_activate $vg4
|
||||
|
||||
ls "$RUNDIR/lvm/pvs_online/$PVID1"
|
||||
ls "$RUNDIR/lvm/pvs_online/$PVID2"
|
||||
ls "$RUNDIR/lvm/pvs_online/$PVID3"
|
||||
ls "$RUNDIR/lvm/vgs_online/$vg4"
|
||||
journalctl -u lvm-activate-$vg4 | tee out || true
|
||||
grep "now active" out
|
||||
check lv_field $vg4/$lv1 lv_active "active"
|
||||
check lv_field $vg4/$lv2 lv_active "active"
|
||||
check lv_field $vg4/$lv3 lv_active "active"
|
||||
check lv_field $vg4/$lv4 lv_active "active"
|
||||
|
||||
vgchange -an $vg4
|
||||
vgremove -y $vg4
|
||||
|
||||
|
||||
# 3 devs, 3 vgs, 2 lvs in each vg, concurrent pvscans
|
||||
|
||||
wipe_all
|
||||
rm $DF
|
||||
touch $DF
|
||||
|
||||
vgcreate $vg5 "$dev1"
|
||||
vgcreate $vg6 "$dev2"
|
||||
vgcreate $vg7 "$dev3"
|
||||
lvcreate -l1 -an -n $lv1 $vg5
|
||||
lvcreate -l1 -an -n $lv2 $vg5
|
||||
lvcreate -l1 -an -n $lv1 $vg6
|
||||
lvcreate -l1 -an -n $lv2 $vg6
|
||||
lvcreate -l1 -an -n $lv1 $vg7
|
||||
lvcreate -l1 -an -n $lv2 $vg7
|
||||
|
||||
_clear_online_files
|
||||
|
||||
udevadm trigger -c add /sys/block/$BDEV1 &
|
||||
udevadm trigger -c add /sys/block/$BDEV2 &
|
||||
udevadm trigger -c add /sys/block/$BDEV3
|
||||
|
||||
aux udev_wait
|
||||
wait_lvm_activate $vg5
|
||||
wait_lvm_activate $vg6
|
||||
wait_lvm_activate $vg7
|
||||
|
||||
ls "$RUNDIR/lvm/vgs_online/$vg5"
|
||||
ls "$RUNDIR/lvm/vgs_online/$vg6"
|
||||
ls "$RUNDIR/lvm/vgs_online/$vg7"
|
||||
journalctl -u lvm-activate-$vg5 | tee out || true
|
||||
grep "now active" out
|
||||
journalctl -u lvm-activate-$vg6 | tee out || true
|
||||
grep "now active" out
|
||||
journalctl -u lvm-activate-$vg7 | tee out || true
|
||||
grep "now active" out
|
||||
check lv_field $vg5/$lv1 lv_active "active"
|
||||
check lv_field $vg5/$lv2 lv_active "active"
|
||||
check lv_field $vg6/$lv1 lv_active "active"
|
||||
check lv_field $vg6/$lv2 lv_active "active"
|
||||
check lv_field $vg7/$lv1 lv_active "active"
|
||||
check lv_field $vg7/$lv2 lv_active "active"
|
||||
|
||||
vgchange -an $vg5
|
||||
vgremove -y $vg5
|
||||
vgchange -an $vg6
|
||||
vgremove -y $vg6
|
||||
vgchange -an $vg7
|
||||
vgremove -y $vg7
|
||||
|
||||
# 3 devs, 1 vg, 1000 LVs
|
||||
|
||||
wipe_all
|
||||
rm $DF
|
||||
touch $DF
|
||||
pvcreate --metadatacopies 0 "$dev1"
|
||||
pvcreate "$dev2"
|
||||
pvcreate "$dev3"
|
||||
vgcreate -s 128K $vg8 "$dev1" "$dev2" "$dev3"
|
||||
|
||||
# Number of LVs to create
|
||||
TEST_DEVS=1000
|
||||
# On low-memory boxes let's not stress too much
|
||||
test "$(aux total_mem)" -gt 524288 || TEST_DEVS=256
|
||||
|
||||
vgcfgbackup -f data $vg8
|
||||
|
||||
# Generate a lot of devices (size of 1 extent)
|
||||
awk -v TEST_DEVS=$TEST_DEVS '/^\t\}/ {
|
||||
printf("\t}\n\tlogical_volumes {\n");
|
||||
cnt=0;
|
||||
for (i = 0; i < TEST_DEVS; i++) {
|
||||
printf("\t\tlvol%06d {\n", i);
|
||||
printf("\t\t\tid = \"%06d-1111-2222-3333-2222-1111-%06d\"\n", i, i);
|
||||
print "\t\t\tstatus = [\"READ\", \"WRITE\", \"VISIBLE\"]";
|
||||
print "\t\t\tsegment_count = 1";
|
||||
print "\t\t\tsegment1 {";
|
||||
print "\t\t\t\tstart_extent = 0";
|
||||
print "\t\t\t\textent_count = 1";
|
||||
print "\t\t\t\ttype = \"striped\"";
|
||||
print "\t\t\t\tstripe_count = 1";
|
||||
print "\t\t\t\tstripes = [";
|
||||
print "\t\t\t\t\t\"pv0\", " cnt++;
|
||||
printf("\t\t\t\t]\n\t\t\t}\n\t\t}\n");
|
||||
}
|
||||
}
|
||||
{print}
|
||||
' data >data_new
|
||||
|
||||
vgcfgrestore -f data_new $vg8
|
||||
|
||||
_clear_online_files
|
||||
|
||||
udevadm trigger -c add /sys/block/$BDEV1 &
|
||||
udevadm trigger -c add /sys/block/$BDEV2 &
|
||||
udevadm trigger -c add /sys/block/$BDEV3
|
||||
|
||||
aux udev_wait
|
||||
wait_lvm_activate $vg8
|
||||
|
||||
ls "$RUNDIR/lvm/vgs_online/$vg8"
|
||||
journalctl -u lvm-activate-$vg8 | tee out || true
|
||||
grep "now active" out
|
||||
|
||||
num_active=$(lvs $vg8 --noheading -o active | grep active | wc -l)
|
||||
|
||||
test $num_active -eq $TEST_DEVS
|
||||
|
||||
vgchange -an $vg8
|
||||
vgremove -y $vg8
|
||||
|
||||
# 1 pv on an md dev, 1 vg
|
||||
|
||||
wait_md_create() {
|
||||
local md=$1
|
||||
|
||||
while :; do
|
||||
if ! grep "$(basename $md)" /proc/mdstat; then
|
||||
echo "$md not ready"
|
||||
cat /proc/mdstat
|
||||
sleep 2
|
||||
else
|
||||
break
|
||||
fi
|
||||
done
|
||||
echo "$md" > WAIT_MD_DEV
|
||||
}
|
||||
|
||||
test -f /proc/mdstat && grep -q raid1 /proc/mdstat || \
|
||||
modprobe raid1 || skip
|
||||
|
||||
mddev="/dev/md33"
|
||||
not grep $mddev /proc/mdstat || skip
|
||||
|
||||
wipe_all
|
||||
rm $DF
|
||||
touch $DF
|
||||
|
||||
mdadm --create --metadata=1.0 "$mddev" --level 1 --chunk=64 --raid-devices=2 "$dev1" "$dev2"
|
||||
wait_md_create "$mddev"
|
||||
vgcreate $vg9 "$mddev"
|
||||
|
||||
PVIDMD=`pvs $mddev --noheading -o uuid | tr -d - | awk '{print $1}'`
|
||||
BDEVMD=$(basename "$mddev")
|
||||
|
||||
lvcreate -l1 -an -n $lv1 $vg9
|
||||
lvcreate -l1 -an -n $lv2 $vg9
|
||||
|
||||
_clear_online_files
|
||||
|
||||
udevadm trigger --settle -c add /sys/block/$BDEVMD
|
||||
|
||||
wait_lvm_activate $vg9
|
||||
|
||||
ls "$RUNDIR/lvm/vgs_online/$vg9"
|
||||
journalctl -u lvm-activate-$vg9 | tee out || true
|
||||
grep "now active" out
|
||||
check lv_field $vg9/$lv1 lv_active "active"
|
||||
check lv_field $vg9/$lv2 lv_active "active"
|
||||
|
||||
vgchange -an $vg9
|
||||
vgremove -y $vg9
|
||||
|
||||
mdadm --stop "$mddev"
|
||||
aux udev_wait
|
||||
wipe_all
|
||||
|
@@ -86,7 +86,7 @@ static void *_fix_init(struct io_engine *engine)
|
||||
}
|
||||
|
||||
if (!_runs_is_tmpfs) {
|
||||
(void) close(f->fd);
|
||||
close(f->fd);
|
||||
// reopen with O_DIRECT
|
||||
f->fd = open(f->fname, O_RDWR | O_DIRECT);
|
||||
T_ASSERT(f->fd >= 0);
|
||||
|
@@ -153,8 +153,7 @@ arg(cachesize_ARG, '\0', "cachesize", sizemb_VAL, 0, 0,
|
||||
"The size of cache to use.\n")
|
||||
|
||||
arg(check_ARG, '\0', "check", 0, 0, 0,
|
||||
"Checks the content of the devices file.\n"
|
||||
"Reports incorrect device names or PVIDs for entries.\n")
|
||||
"Check the content of the devices file.\n")
|
||||
|
||||
arg(commandprofile_ARG, '\0', "commandprofile", string_VAL, 0, 0,
|
||||
"The command profile to use for command configuration.\n"
|
||||
@@ -279,6 +278,11 @@ arg(errorwhenfull_ARG, '\0', "errorwhenfull", bool_VAL, 0, 0,
|
||||
"(Also see dm-thin-pool kernel module option no_space_timeout.)\n"
|
||||
"See \\fBlvmthin\\fP(7) for more information.\n")
|
||||
|
||||
arg(eventactivation_ARG, '\0', "eventactivation", string_VAL, 0, 0,
|
||||
"Specify if the command is running autoactivation from an event\n"
|
||||
"or a fixed service. The lvm.conf event_activation_options setting\n"
|
||||
"determines if event or service based activation commands are used.\n")
|
||||
|
||||
arg(force_long_ARG, '\0', "force", 0, ARG_COUNTABLE, 0,
|
||||
"Force metadata restore even with thin pool LVs.\n"
|
||||
"Use with extreme caution. Most changes to thin metadata\n"
|
||||
|
@@ -1359,10 +1359,10 @@ OO: --aligned, --all, --binary, --colon, --columns,
|
||||
--configreport ConfigReport, --foreign, --history, --ignorelockingfailure,
|
||||
--logonly, --maps, --noheadings,
|
||||
--nosuffix, --options String, --sort String, --readonly,
|
||||
--segments, --select String, --separator String,
|
||||
--reportformat ReportFmt, --segments, --select String, --separator String,
|
||||
--shared, --unbuffered, --units Units
|
||||
OP: VG|LV|Tag ...
|
||||
IO: --partial, --ignoreskippedcluster, --reportformat ReportFmt
|
||||
IO: --partial, --ignoreskippedcluster
|
||||
ID: lvdisplay_general
|
||||
|
||||
---
|
||||
@@ -1590,10 +1590,10 @@ pvdisplay
|
||||
OO: --aligned, --all, --binary, --colon, --columns, --configreport ConfigReport,
|
||||
--foreign, --ignorelockingfailure,
|
||||
--logonly, --maps, --noheadings, --nosuffix, --options String,
|
||||
--readonly, --select String, --separator String, --shared,
|
||||
--readonly, --reportformat ReportFmt, --select String, --separator String, --shared,
|
||||
--short, --sort String, --unbuffered, --units Units
|
||||
OP: PV|Tag ...
|
||||
IO: --ignoreskippedcluster, --reportformat ReportFmt
|
||||
IO: --ignoreskippedcluster
|
||||
ID: pvdisplay_general
|
||||
|
||||
---
|
||||
@@ -1642,14 +1642,15 @@ DESC: Record that a PV is online or offline.
|
||||
|
||||
pvscan --cache_long --activate ay
|
||||
OO: --ignorelockingfailure, --reportformat ReportFmt,
|
||||
--major Number, --minor Number, --noudevsync
|
||||
--major Number, --minor Number, --noudevsync, --eventactivation String
|
||||
OP: PV|String ...
|
||||
IO: --background
|
||||
ID: pvscan_cache
|
||||
DESC: Record that a PV is online and autoactivate the VG if complete.
|
||||
|
||||
pvscan --cache_long --listvg PV
|
||||
OO: --ignorelockingfailure, --checkcomplete, --vgonline, --udevoutput
|
||||
OO: --ignorelockingfailure, --checkcomplete, --vgonline, --udevoutput,
|
||||
--eventactivation String
|
||||
ID: pvscan_cache
|
||||
DESC: Record that a PV is online and list the VG using the PV.
|
||||
|
||||
@@ -1747,7 +1748,8 @@ DESC: Start or stop processing LV conversions.
|
||||
|
||||
vgchange --activate Active
|
||||
OO: --activationmode ActivationMode, --ignoreactivationskip, --partial, --sysinit,
|
||||
--readonly, --ignorelockingfailure, --monitor Bool, --poll Bool, OO_VGCHANGE
|
||||
--readonly, --ignorelockingfailure, --monitor Bool, --poll Bool,
|
||||
--vgonline, --eventactivation String, OO_VGCHANGE
|
||||
OP: VG|Tag|Select ...
|
||||
IO: --ignoreskippedcluster
|
||||
ID: vgchange_activate
|
||||
@@ -1809,10 +1811,10 @@ vgdisplay
|
||||
OO: --activevolumegroups, --aligned, --binary, --colon, --columns,
|
||||
--configreport ConfigReport, --foreign, --ignorelockingfailure,
|
||||
--logonly, --noheadings, --nosuffix,
|
||||
--options String, --readonly, --select String,
|
||||
--options String, --readonly, --reportformat ReportFmt, --select String,
|
||||
--shared, --short, --separator String, --sort String, --unbuffered, --units Units
|
||||
OP: VG|Tag ...
|
||||
IO: --partial, --ignoreskippedcluster, --reportformat ReportFmt
|
||||
IO: --partial, --ignoreskippedcluster
|
||||
ID: vgdisplay_general
|
||||
|
||||
---
|
||||
|
@@ -824,16 +824,12 @@ static int _lvcreate_params(struct cmd_context *cmd,
|
||||
autobackup_ARG,\
|
||||
available_ARG,\
|
||||
contiguous_ARG,\
|
||||
devices_ARG,\
|
||||
devicesfile_ARG,\
|
||||
ignoreactivationskip_ARG,\
|
||||
ignoremonitoring_ARG,\
|
||||
journal_ARG,\
|
||||
metadataprofile_ARG,\
|
||||
monitor_ARG,\
|
||||
mirrors_ARG,\
|
||||
name_ARG,\
|
||||
nohints_ARG,\
|
||||
noudevsync_ARG,\
|
||||
permission_ARG,\
|
||||
persistent_ARG,\
|
||||
|
@@ -2540,6 +2540,8 @@ static int _get_current_settings(struct cmd_context *cmd)
|
||||
if (!strcmp(hint_mode, "none")) {
|
||||
cmd->enable_hints = 0;
|
||||
cmd->use_hints = 0;
|
||||
} else if (!strcmp(hint_mode, "pvs_online")) {
|
||||
cmd->hints_pvs_online = 1;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3058,7 +3060,6 @@ int lvm_run_command(struct cmd_context *cmd, int argc, char **argv)
|
||||
int i;
|
||||
int skip_hyphens;
|
||||
int refresh_done = 0;
|
||||
int io;
|
||||
|
||||
/* Avoid excessive access to /etc/localtime and set TZ variable for glibc
|
||||
* so it does not need to check /etc/localtime everytime that needs that info */
|
||||
@@ -3141,20 +3142,6 @@ int lvm_run_command(struct cmd_context *cmd, int argc, char **argv)
|
||||
if (!(cmd->command = _find_command(cmd, cmd->name, &argc, argv)))
|
||||
return EINVALID_CMD_LINE;
|
||||
|
||||
/*
|
||||
* If option --foo is set which is listed in IO (ignore option) in
|
||||
* command-lines.in, then unset foo. Commands won't usually use an
|
||||
* ignored option, but there can be shared code that checks for --foo,
|
||||
* and should not find it to be set.
|
||||
*/
|
||||
for (io = 0; io < cmd->command->io_count; io++) {
|
||||
int opt = cmd->command->ignore_opt_args[io].opt;
|
||||
if (arg_is_set(cmd, opt)) {
|
||||
log_debug("Ignore opt %d", opt);
|
||||
cmd->opt_arg_values[opt].count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Remaining position args after command name and --options are removed.
|
||||
*/
|
||||
|
@@ -128,6 +128,7 @@ int lvmdevices(struct cmd_context *cmd, int argc, char **argv)
|
||||
struct device *dev;
|
||||
struct dev_use *du, *du2;
|
||||
const char *deviceidtype;
|
||||
int changes = 0;
|
||||
|
||||
dm_list_init(&search_pvids);
|
||||
dm_list_init(&found_devs);
|
||||
@@ -175,19 +176,13 @@ int lvmdevices(struct cmd_context *cmd, int argc, char **argv)
|
||||
log_error("Failed to read the devices file.");
|
||||
return ECMD_FAILED;
|
||||
}
|
||||
|
||||
prepare_open_file_limit(cmd, dm_list_size(&cmd->use_devices));
|
||||
|
||||
dev_cache_scan(cmd);
|
||||
device_ids_match(cmd);
|
||||
|
||||
if (arg_is_set(cmd, check_ARG) || arg_is_set(cmd, update_ARG)) {
|
||||
int search_count = 0;
|
||||
int update_needed = 0;
|
||||
int invalid = 0;
|
||||
|
||||
unlink_searched_devnames(cmd);
|
||||
|
||||
label_scan_setup_bcache();
|
||||
|
||||
dm_list_iterate_items(du, &cmd->use_devices) {
|
||||
@@ -227,8 +222,6 @@ int lvmdevices(struct cmd_context *cmd, int argc, char **argv)
|
||||
* run just above.
|
||||
*/
|
||||
device_ids_validate(cmd, NULL, &invalid, 1);
|
||||
if (invalid)
|
||||
update_needed = 1;
|
||||
|
||||
/*
|
||||
* Find and fix any devname entries that have moved to a
|
||||
@@ -244,24 +237,33 @@ int lvmdevices(struct cmd_context *cmd, int argc, char **argv)
|
||||
label_scan_invalidate(du->dev);
|
||||
}
|
||||
|
||||
/*
|
||||
* check du->part
|
||||
*/
|
||||
dm_list_iterate_items(du, &cmd->use_devices) {
|
||||
int part = 0;
|
||||
if (!du->dev)
|
||||
continue;
|
||||
dev = du->dev;
|
||||
|
||||
dev_get_partition_number(dev, &part);
|
||||
|
||||
if (part != du->part) {
|
||||
log_warn("Device %s partition %u has incorrect PART in devices file (%u)",
|
||||
dev_name(dev), part, du->part);
|
||||
du->part = part;
|
||||
changes++;
|
||||
}
|
||||
}
|
||||
|
||||
if (arg_is_set(cmd, update_ARG)) {
|
||||
if (update_needed || !dm_list_empty(&found_devs)) {
|
||||
if (invalid || !dm_list_empty(&found_devs)) {
|
||||
if (!device_ids_write(cmd))
|
||||
goto_bad;
|
||||
log_print("Updated devices file to version %s", devices_file_version());
|
||||
} else {
|
||||
log_print("No update for devices file is needed.");
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* --check exits with an error if the devices file
|
||||
* needs updates, i.e. running --update would make
|
||||
* changes.
|
||||
*/
|
||||
if (update_needed) {
|
||||
log_error("Updates needed for devices file.");
|
||||
goto bad;
|
||||
}
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
199
tools/pvscan.c
199
tools/pvscan.c
@@ -21,6 +21,8 @@
|
||||
|
||||
#include <dirent.h>
|
||||
|
||||
int online_pvid_file_read(char *path, int *major, int *minor, char *vgname);
|
||||
|
||||
struct pvscan_params {
|
||||
int new_pvs_found;
|
||||
int pvs_found;
|
||||
@@ -46,6 +48,8 @@ static const char *_pvs_online_dir = DEFAULT_RUN_DIR "/pvs_online";
|
||||
static const char *_vgs_online_dir = DEFAULT_RUN_DIR "/vgs_online";
|
||||
static const char *_pvs_lookup_dir = DEFAULT_RUN_DIR "/pvs_lookup";
|
||||
|
||||
static const char *_event_activation_file = DEFAULT_RUN_DIR "/event-activation-on";
|
||||
|
||||
static int _pvscan_display_pv(struct cmd_context *cmd,
|
||||
struct physical_volume *pv,
|
||||
struct pvscan_params *params)
|
||||
@@ -179,6 +183,84 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Event based activation
|
||||
* lvm.conf event_activation_options = "event_only"
|
||||
* . all events are used for activation
|
||||
* . no fixed services are used for activation
|
||||
* . lvm.conf event_activation=1 required
|
||||
*
|
||||
* vgchange -aay --eventactivation service
|
||||
* . does nothing
|
||||
* vgchange -aay --eventactivation event
|
||||
* . does activation
|
||||
* pvscan --eventactivation event
|
||||
* . does activation
|
||||
*
|
||||
* ---
|
||||
*
|
||||
* Non-event based activation
|
||||
* lvm.conf event_activation_options = "service_only"
|
||||
* . fixed services are used for activation
|
||||
* . no events are used for activation
|
||||
* . lvm.conf event_activation=0 is equivalent to
|
||||
* event_activation=1 event_activation_options="service_only"
|
||||
*
|
||||
* vgchange -aay --eventactivation service
|
||||
* . does activation
|
||||
* vgchange -aay --eventactivation event
|
||||
* . does nothing
|
||||
* pvscan --eventactivation event
|
||||
* . does nothing
|
||||
*
|
||||
* ---
|
||||
*
|
||||
* Mix of event and non-event based activation
|
||||
* lvm.conf event_activation_options = "service_to_event"
|
||||
* . both services and events are used for activation
|
||||
* . fixed services are used for activation initially,
|
||||
* and last service enables event based activation
|
||||
* by creating the event-activation-on file
|
||||
*
|
||||
* vgchange -aay --eventactivation service
|
||||
* . does activation only if event-activation-on does not exist
|
||||
* vgchange -aay --eventactivation event
|
||||
* . does activation only if event-activation-on exists
|
||||
* vgchange -aay --eventactivation service,on
|
||||
* . does activation only if event-activation-on does not exist
|
||||
* . creates event-activation-on to enable event-based activation
|
||||
* vgchange --eventactivation on|off
|
||||
* . create or remove event-activation-on to enable|disable
|
||||
* event-based activation
|
||||
* pvscan --eventactivation event
|
||||
* . does activation only if event-activation-on exists
|
||||
*
|
||||
*/
|
||||
|
||||
int event_activation_enable(struct cmd_context *cmd)
|
||||
{
|
||||
FILE *fp;
|
||||
|
||||
if (!(fp = fopen(_event_activation_file, "w")))
|
||||
return_0;
|
||||
if (fclose(fp))
|
||||
stack;
|
||||
return 1;
|
||||
}
|
||||
|
||||
int event_activation_is_on(struct cmd_context *cmd)
|
||||
{
|
||||
struct stat buf;
|
||||
|
||||
if (!stat(_event_activation_file, &buf))
|
||||
return 1;
|
||||
|
||||
if (errno != ENOENT)
|
||||
log_debug("event_activation_is_on errno %d", errno);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Avoid a duplicate pvscan[%d] prefix when logging to the journal.
|
||||
* FIXME: this should probably replace if (udevoutput) with
|
||||
@@ -225,7 +307,7 @@ static char *_vgname_in_pvid_file_buf(char *buf)
|
||||
|
||||
#define MAX_PVID_FILE_SIZE 512
|
||||
|
||||
static int _online_pvid_file_read(char *path, int *major, int *minor, char *vgname)
|
||||
int online_pvid_file_read(char *path, int *major, int *minor, char *vgname)
|
||||
{
|
||||
char buf[MAX_PVID_FILE_SIZE] = { 0 };
|
||||
char *name;
|
||||
@@ -328,7 +410,7 @@ static void _online_pvid_file_remove_devno(int major, int minor)
|
||||
file_minor = 0;
|
||||
memset(file_vgname, 0, sizeof(file_vgname));
|
||||
|
||||
_online_pvid_file_read(path, &file_major, &file_minor, file_vgname);
|
||||
online_pvid_file_read(path, &file_major, &file_minor, file_vgname);
|
||||
|
||||
if ((file_major == major) && (file_minor == minor)) {
|
||||
log_debug("Unlink pv online %s", path);
|
||||
@@ -367,7 +449,7 @@ static void _online_files_remove(const char *dirpath)
|
||||
log_sys_debug("closedir", dirpath);
|
||||
}
|
||||
|
||||
static int _online_pvid_file_create(struct cmd_context *cmd, struct device *dev, const char *vgname)
|
||||
int online_pvid_file_create(struct cmd_context *cmd, struct device *dev, const char *vgname, int ignore_existing, int *exists)
|
||||
{
|
||||
char path[PATH_MAX];
|
||||
char buf[MAX_PVID_FILE_SIZE] = { 0 };
|
||||
@@ -407,8 +489,13 @@ static int _online_pvid_file_create(struct cmd_context *cmd, struct device *dev,
|
||||
|
||||
fd = open(path, O_CREAT | O_EXCL | O_RDWR, S_IRUSR | S_IWUSR);
|
||||
if (fd < 0) {
|
||||
if (errno == EEXIST)
|
||||
if (errno == EEXIST) {
|
||||
if (exists)
|
||||
*exists = 1;
|
||||
if (ignore_existing)
|
||||
return 1;
|
||||
goto check_duplicate;
|
||||
}
|
||||
log_error_pvscan(cmd, "Failed to create online file for %s path %s error %d", dev_name(dev), path, errno);
|
||||
return 0;
|
||||
}
|
||||
@@ -427,7 +514,6 @@ static int _online_pvid_file_create(struct cmd_context *cmd, struct device *dev,
|
||||
}
|
||||
|
||||
/* We don't care about syncing, these files are not even persistent. */
|
||||
|
||||
if (close(fd))
|
||||
log_sys_debug("close", path);
|
||||
|
||||
@@ -447,7 +533,7 @@ check_duplicate:
|
||||
|
||||
memset(file_vgname, 0, sizeof(file_vgname));
|
||||
|
||||
_online_pvid_file_read(path, &file_major, &file_minor, file_vgname);
|
||||
online_pvid_file_read(path, &file_major, &file_minor, file_vgname);
|
||||
|
||||
if ((file_major == major) && (file_minor == minor)) {
|
||||
log_debug("Existing online file for %d:%d", major, minor);
|
||||
@@ -660,7 +746,7 @@ static int _count_pvid_files_from_lookup_file(struct cmd_context *cmd, struct de
|
||||
return (vgname) ? 1 : 0;
|
||||
}
|
||||
|
||||
static void _online_dir_setup(struct cmd_context *cmd)
|
||||
void online_dir_setup(struct cmd_context *cmd)
|
||||
{
|
||||
struct stat st;
|
||||
int rv;
|
||||
@@ -748,7 +834,7 @@ static int _pvscan_aa_single(struct cmd_context *cmd, const char *vg_name,
|
||||
|
||||
log_debug("pvscan autoactivating VG %s.", vg_name);
|
||||
|
||||
if (!vgchange_activate(cmd, vg, CHANGE_AAY)) {
|
||||
if (!vgchange_activate(cmd, vg, CHANGE_AAY, 1)) {
|
||||
log_error_pvscan(cmd, "%s: autoactivation failed.", vg->name);
|
||||
pp->activate_errors++;
|
||||
}
|
||||
@@ -756,7 +842,7 @@ static int _pvscan_aa_single(struct cmd_context *cmd, const char *vg_name,
|
||||
return ECMD_PROCESSED;
|
||||
}
|
||||
|
||||
static int _online_vg_file_create(struct cmd_context *cmd, const char *vgname)
|
||||
int online_vg_file_create(struct cmd_context *cmd, const char *vgname)
|
||||
{
|
||||
char path[PATH_MAX];
|
||||
int fd;
|
||||
@@ -847,7 +933,7 @@ static int _get_devs_from_saved_vg(struct cmd_context *cmd, const char *vgname,
|
||||
file_minor = 0;
|
||||
memset(file_vgname, 0, sizeof(file_vgname));
|
||||
|
||||
_online_pvid_file_read(path, &file_major, &file_minor, file_vgname);
|
||||
online_pvid_file_read(path, &file_major, &file_minor, file_vgname);
|
||||
|
||||
if (file_vgname[0] && strcmp(vgname, file_vgname)) {
|
||||
log_error_pvscan(cmd, "Wrong VG found for %d:%d PVID %s: %s vs %s",
|
||||
@@ -857,11 +943,21 @@ static int _get_devs_from_saved_vg(struct cmd_context *cmd, const char *vgname,
|
||||
|
||||
devno = MKDEV(file_major, file_minor);
|
||||
|
||||
if (!setup_devno_in_dev_cache(cmd, devno)) {
|
||||
log_error_pvscan(cmd, "No device set up for %d:%d PVID %s", file_major, file_minor, pvid);
|
||||
goto bad;
|
||||
}
|
||||
|
||||
if (!(dev = dev_cache_get_by_devt(cmd, devno, NULL, NULL))) {
|
||||
log_error_pvscan(cmd, "No device found for %d:%d PVID %s", file_major, file_minor, pvid);
|
||||
goto bad;
|
||||
}
|
||||
|
||||
/*
|
||||
* Do not need to match device_id here, see comment after
|
||||
* get_devs_from_saved_vg about relying on pvid online file.
|
||||
*/
|
||||
|
||||
name1 = dev_name(dev);
|
||||
name2 = pvl->pv->device_hint;
|
||||
|
||||
@@ -1026,7 +1122,7 @@ static int _pvscan_aa_quick(struct cmd_context *cmd, struct pvscan_aa_params *pp
|
||||
|
||||
log_debug("pvscan autoactivating VG %s.", vgname);
|
||||
|
||||
if (!vgchange_activate(cmd, vg, CHANGE_AAY)) {
|
||||
if (!vgchange_activate(cmd, vg, CHANGE_AAY, 1)) {
|
||||
log_error_pvscan(cmd, "%s: autoactivation failed.", vg->name);
|
||||
pp->activate_errors++;
|
||||
}
|
||||
@@ -1060,7 +1156,7 @@ static int _pvscan_aa(struct cmd_context *cmd, struct pvscan_aa_params *pp,
|
||||
* to run the activation. The first to create the file will do it.
|
||||
*/
|
||||
dm_list_iterate_items_safe(sl, sl2, vgnames) {
|
||||
if (!_online_vg_file_create(cmd, sl->str)) {
|
||||
if (!online_vg_file_create(cmd, sl->str)) {
|
||||
log_print_pvscan(cmd, "VG %s skip autoactivation.", sl->str);
|
||||
str_list_del(vgnames, sl->str);
|
||||
continue;
|
||||
@@ -1192,11 +1288,15 @@ static int _get_args_devs(struct cmd_context *cmd, struct dm_list *pvscan_args,
|
||||
/* in common usage, no dev will be found for a devno */
|
||||
|
||||
dm_list_iterate_items(arg, pvscan_args) {
|
||||
if (arg->devname)
|
||||
if (arg->devname) {
|
||||
if (!setup_devname_in_dev_cache(cmd, arg->devname))
|
||||
log_error_pvscan(cmd, "No device set up for name arg %s", arg->devname);
|
||||
arg->dev = dev_cache_get(cmd, arg->devname, NULL);
|
||||
else if (arg->devno)
|
||||
} else if (arg->devno) {
|
||||
if (!setup_devno_in_dev_cache(cmd, arg->devno))
|
||||
log_error_pvscan(cmd, "No device set up for devno arg %d", (int)arg->devno);
|
||||
arg->dev = dev_cache_get_by_devt(cmd, arg->devno, NULL, NULL);
|
||||
else
|
||||
} else
|
||||
return_0;
|
||||
}
|
||||
|
||||
@@ -1246,7 +1346,7 @@ static void _set_pv_devices_online(struct cmd_context *cmd, struct volume_group
|
||||
minor = 0;
|
||||
file_vgname[0] = '\0';
|
||||
|
||||
_online_pvid_file_read(path, &major, &minor, file_vgname);
|
||||
online_pvid_file_read(path, &major, &minor, file_vgname);
|
||||
|
||||
if (file_vgname[0] && strcmp(vg->name, file_vgname)) {
|
||||
log_warn("WARNING: VG %s PV %s wrong vgname in online file %s",
|
||||
@@ -1354,7 +1454,7 @@ static int _online_devs(struct cmd_context *cmd, int do_all, struct dm_list *pvs
|
||||
devsize = dev->size;
|
||||
if (!devsize &&
|
||||
!dev_get_size(dev, &devsize)) {
|
||||
log_print_pvscan(cmd, "PV %s missing device size.", dev_name(dev));
|
||||
log_print("pvscan[%d] PV %s can get device size.", getpid(), dev_name(dev));
|
||||
release_vg(vg);
|
||||
continue;
|
||||
}
|
||||
@@ -1412,7 +1512,7 @@ static int _online_devs(struct cmd_context *cmd, int do_all, struct dm_list *pvs
|
||||
* Create file named for pvid to record this PV is online.
|
||||
* The command creates/checks online files only when --cache is used.
|
||||
*/
|
||||
if (do_cache && !_online_pvid_file_create(cmd, dev, vg ? vg->name : NULL)) {
|
||||
if (do_cache && !online_pvid_file_create(cmd, dev, vg ? vg->name : NULL, 0, NULL)) {
|
||||
log_error_pvscan(cmd, "PV %s failed to create online file.", dev_name(dev));
|
||||
release_vg(vg);
|
||||
ret = 0;
|
||||
@@ -1497,7 +1597,7 @@ static int _online_devs(struct cmd_context *cmd, int do_all, struct dm_list *pvs
|
||||
} else if (!do_check_complete) {
|
||||
log_print("VG %s", vgname);
|
||||
} else if (vg_complete) {
|
||||
if (do_vgonline && !_online_vg_file_create(cmd, vgname)) {
|
||||
if (do_vgonline && !online_vg_file_create(cmd, vgname)) {
|
||||
log_print("VG %s finished", vgname);
|
||||
} else {
|
||||
/*
|
||||
@@ -1672,11 +1772,13 @@ static int _pvscan_cache_args(struct cmd_context *cmd, int argc, char **argv,
|
||||
cmd->pvscan_cache_single = 1;
|
||||
|
||||
/*
|
||||
* "no_file_match" means that when the devices file is used,
|
||||
* setup_devices will skip matching devs to devices file entries.
|
||||
* Specific devs must be matched later with device_ids_match_dev().
|
||||
* Special pvscan-specific setup steps to avoid looking
|
||||
* at any devices except for device args.
|
||||
* Read devices file and determine if devices file will be used.
|
||||
* Does not do dev_cache_scan (adds nothing to dev-cache), and
|
||||
* does not do any device id matching.
|
||||
*/
|
||||
if (!setup_devices_no_file_match(cmd)) {
|
||||
if (!setup_devices_for_pvscan_cache(cmd)) {
|
||||
log_error_pvscan(cmd, "Failed to set up devices.");
|
||||
return 0;
|
||||
}
|
||||
@@ -1735,17 +1837,21 @@ static int _pvscan_cache_args(struct cmd_context *cmd, int argc, char **argv,
|
||||
log_debug("pvscan_cache_args: filter devs nodata");
|
||||
|
||||
/*
|
||||
* Match dev args with the devices file because
|
||||
* setup_devices_no_file_match() was used above which skipped checking
|
||||
* the devices file. If a match fails here do not exclude it, that
|
||||
* will be done below by passes_filter() which runs filter-deviceid.
|
||||
* The relax_deviceid_filter case needs to be able to work around
|
||||
* Match dev args with the devices file because special/optimized
|
||||
* device setup was used above which does not check the devices file.
|
||||
* If a match fails here do not exclude it, that will be done below by
|
||||
* passes_filter() which runs filter-deviceid. The
|
||||
* relax_deviceid_filter case needs to be able to work around
|
||||
* unmatching devs.
|
||||
*/
|
||||
|
||||
if (cmd->enable_devices_file) {
|
||||
dm_list_iterate_items_safe(devl, devl2, &pvscan_devs)
|
||||
dm_list_iterate_items(devl, &pvscan_devs)
|
||||
device_ids_match_dev(cmd, devl->dev);
|
||||
|
||||
}
|
||||
if (cmd->enable_devices_list)
|
||||
device_ids_match_device_list(cmd);
|
||||
|
||||
if (cmd->enable_devices_file && device_ids_use_devname(cmd)) {
|
||||
relax_deviceid_filter = 1;
|
||||
@@ -1786,7 +1892,7 @@ static int _pvscan_cache_args(struct cmd_context *cmd, int argc, char **argv,
|
||||
int has_pvid;
|
||||
|
||||
if (!label_read_pvid(devl->dev, &has_pvid)) {
|
||||
log_print_pvscan(cmd, "%s cannot read label.", dev_name(devl->dev));
|
||||
log_print("pvscan[%d] %s cannot read.", getpid(), dev_name(devl->dev));
|
||||
dm_list_del(&devl->list);
|
||||
continue;
|
||||
}
|
||||
@@ -1857,6 +1963,7 @@ int pvscan_cache_cmd(struct cmd_context *cmd, int argc, char **argv)
|
||||
{
|
||||
struct pvscan_aa_params pp = { 0 };
|
||||
struct dm_list complete_vgnames;
|
||||
const char *ea;
|
||||
int do_activate = arg_is_set(cmd, activate_ARG);
|
||||
int event_activation;
|
||||
int devno_args = 0;
|
||||
@@ -1919,7 +2026,7 @@ int pvscan_cache_cmd(struct cmd_context *cmd, int argc, char **argv)
|
||||
|
||||
do_all = !argc && !devno_args;
|
||||
|
||||
_online_dir_setup(cmd);
|
||||
online_dir_setup(cmd);
|
||||
|
||||
if (do_all) {
|
||||
if (!_pvscan_cache_all(cmd, argc, argv, &complete_vgnames))
|
||||
@@ -1930,6 +2037,36 @@ int pvscan_cache_cmd(struct cmd_context *cmd, int argc, char **argv)
|
||||
log_verbose("Ignoring pvscan --cache because event_activation is disabled.");
|
||||
return ECMD_PROCESSED;
|
||||
}
|
||||
|
||||
|
||||
if ((ea = arg_str_value(cmd, eventactivation_ARG, NULL))) {
|
||||
int service_only = 0, event_only = 0, service_to_event = 0;
|
||||
int ea_service = 0, ea_event = 0, ea_on = 0;
|
||||
|
||||
if (!get_event_activation_config_settings(cmd, &service_only, &event_only, &service_to_event))
|
||||
return ECMD_FAILED;
|
||||
if (!get_event_activation_command_options(cmd, ea, &ea_service, &ea_event, &ea_on))
|
||||
return ECMD_FAILED;
|
||||
|
||||
if (ea_event) {
|
||||
if (!event_activation) {
|
||||
log_print("Skip pvscan for event and event_activation=0.");
|
||||
return ECMD_PROCESSED;
|
||||
}
|
||||
if (service_only) {
|
||||
log_print("Skip pvscan for event and event_activation_options service_only.");
|
||||
return ECMD_PROCESSED;
|
||||
}
|
||||
if (service_to_event && !event_activation_is_on(cmd)) {
|
||||
log_print("Skip pvscan for event and no event-activation-on.");
|
||||
return ECMD_PROCESSED;
|
||||
}
|
||||
} else {
|
||||
log_error("Option --eventactivation %s is not used by pvscan.", ea);
|
||||
return ECMD_FAILED;
|
||||
}
|
||||
}
|
||||
|
||||
if (!_pvscan_cache_args(cmd, argc, argv, &complete_vgnames))
|
||||
return ECMD_FAILED;
|
||||
}
|
||||
|
@@ -5714,3 +5714,90 @@ bad:
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
int get_event_activation_config_settings(struct cmd_context *cmd,
|
||||
int *service_only, int *event_only, int *service_to_event)
|
||||
{
|
||||
const struct dm_config_node *cn;
|
||||
const struct dm_config_value *cv;
|
||||
int eo = 0, so = 0, se = 0;
|
||||
|
||||
if (!(cn = find_config_tree_array(cmd, global_event_activation_options_CFG, NULL)))
|
||||
return 1;
|
||||
|
||||
for (cv = cn->v; cv; cv = cv->next) {
|
||||
if (cv->type != DM_CFG_STRING)
|
||||
continue;
|
||||
if (!strcmp(cv->v.str, "service_only"))
|
||||
so = 1;
|
||||
else if (!strcmp(cv->v.str, "event_only"))
|
||||
eo = 1;
|
||||
else if (!strcmp(cv->v.str, "service_to_event"))
|
||||
se = 1;
|
||||
else if (strlen(cv->v.str) > 0)
|
||||
log_warn("WARNING: ignoring unrecognized event_activation_options value %s.", cv->v.str);
|
||||
}
|
||||
|
||||
if (se && (so || eo)) {
|
||||
log_warn("WARNING: ignoring incompatible event_activation_options, using service_to_event.");
|
||||
*service_to_event = 1;
|
||||
} else if (so && eo) {
|
||||
log_warn("WARNING: ignoring incompatible event_activation_options, using event_only.");
|
||||
*event_only = 1;
|
||||
} else if (se) {
|
||||
*service_to_event = 1;
|
||||
} else if (so) {
|
||||
*service_only = 1;
|
||||
} else if (eo) {
|
||||
*event_only = 1;
|
||||
} else {
|
||||
*service_to_event = 1;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int _ea_option_value(const char *val, int *ea_service, int *ea_event, int *ea_on)
|
||||
{
|
||||
if (!strcmp(val, "service"))
|
||||
*ea_service = 1;
|
||||
else if (!strcmp(val, "event"))
|
||||
*ea_event = 1;
|
||||
else if (!strcmp(val, "on"))
|
||||
*ea_on = 1;
|
||||
else {
|
||||
log_error("Unknown --eventactivation value.");
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
int get_event_activation_command_options(struct cmd_context *cmd, const char *ea, int *ea_service, int *ea_event, int *ea_on)
|
||||
{
|
||||
char ea_vals[128] = {0};
|
||||
char *val1, *val2;
|
||||
|
||||
strncpy(ea_vals, ea, 127);
|
||||
|
||||
/* Currently only two values can be used together. */
|
||||
|
||||
val1 = ea_vals;
|
||||
|
||||
if ((val2 = strchr(ea_vals, ','))) {
|
||||
*val2 = '\0';
|
||||
val2++;
|
||||
}
|
||||
|
||||
if (val1 && !_ea_option_value(val1, ea_service, ea_event, ea_on))
|
||||
return 0;
|
||||
|
||||
if (val2 && !_ea_option_value(val2, ea_service, ea_event, ea_on))
|
||||
return 0;
|
||||
|
||||
if (*ea_service && *ea_event) {
|
||||
log_error("Invalid --eventactivation options, service and event are incompatible.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@@ -237,4 +237,9 @@ int lvremove_single(struct cmd_context *cmd, struct logical_volume *lv,
|
||||
|
||||
int get_lvt_enum(struct logical_volume *lv);
|
||||
|
||||
int get_event_activation_config_settings(struct cmd_context *cmd,
|
||||
int *service_only, int *event_only, int *service_to_event);
|
||||
int get_event_activation_command_options(struct cmd_context *cmd,
|
||||
const char *ea, int *ea_service, int *ea_event, int *ea_on);
|
||||
|
||||
#endif
|
||||
|
@@ -226,9 +226,8 @@ int lvconvert_poll(struct cmd_context *cmd, struct logical_volume *lv, unsigned
|
||||
int mirror_remove_missing(struct cmd_context *cmd,
|
||||
struct logical_volume *lv, int force);
|
||||
|
||||
|
||||
int vgchange_activate(struct cmd_context *cmd, struct volume_group *vg,
|
||||
activation_change_t activate);
|
||||
activation_change_t activate, int vg_complete_to_activate);
|
||||
|
||||
int vgchange_background_polling(struct cmd_context *cmd, struct volume_group *vg);
|
||||
|
||||
@@ -295,6 +294,11 @@ int lvconvert_cachevol_attach_single(struct cmd_context *cmd,
|
||||
struct logical_volume *lv,
|
||||
struct processing_handle *handle);
|
||||
|
||||
int online_pvid_file_create(struct cmd_context *cmd, struct device *dev, const char *vgname, int ignore_existing, int *exists);
|
||||
void online_vg_file_remove(const char *vgname);
|
||||
int online_vg_file_create(struct cmd_context *cmd, const char *vgname);
|
||||
void online_dir_setup(struct cmd_context *cmd);
|
||||
int event_activation_enable(struct cmd_context *cmd);
|
||||
int event_activation_is_on(struct cmd_context *cmd);
|
||||
|
||||
#endif
|
||||
|
184
tools/vgchange.c
184
tools/vgchange.c
@@ -19,6 +19,7 @@
|
||||
struct vgchange_params {
|
||||
int lock_start_count;
|
||||
unsigned int lock_start_sanlock : 1;
|
||||
unsigned int vg_complete_to_activate : 1;
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -194,11 +195,47 @@ int vgchange_background_polling(struct cmd_context *cmd, struct volume_group *vg
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int _online_pvid_file_create_all(struct cmd_context *cmd)
|
||||
{
|
||||
struct lvmcache_info *info;
|
||||
struct dev_iter *iter;
|
||||
struct device *dev;
|
||||
const char *vgname;
|
||||
int exists;
|
||||
int exist_count = 0;
|
||||
int create_count = 0;
|
||||
|
||||
if (!(iter = dev_iter_create(NULL, 0)))
|
||||
return 0;
|
||||
while ((dev = dev_iter_get(cmd, iter))) {
|
||||
if (dev->pvid[0] &&
|
||||
(info = lvmcache_info_from_pvid(dev->pvid, dev, 0))) {
|
||||
vgname = lvmcache_vgname_from_info(info);
|
||||
if (vgname && !is_orphan_vg(vgname)) {
|
||||
/*
|
||||
* Ignore exsting pvid file because a pvscan may be creating
|
||||
* the same file as the same time we are, which is expected.
|
||||
*/
|
||||
exists = 0;
|
||||
online_pvid_file_create(cmd, dev, vgname, 1, &exists);
|
||||
if (exists)
|
||||
exist_count++;
|
||||
else
|
||||
create_count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
dev_iter_destroy(iter);
|
||||
log_debug("PV online files created %d exist %d", create_count, exist_count);
|
||||
return 1;
|
||||
}
|
||||
|
||||
int vgchange_activate(struct cmd_context *cmd, struct volume_group *vg,
|
||||
activation_change_t activate)
|
||||
activation_change_t activate, int vg_complete_to_activate)
|
||||
{
|
||||
int lv_open, active, monitored = 0, r = 1;
|
||||
const struct lv_list *lvl;
|
||||
struct pv_list *pvl;
|
||||
int do_activate = is_change_activating(activate);
|
||||
|
||||
/*
|
||||
@@ -219,6 +256,20 @@ int vgchange_activate(struct cmd_context *cmd, struct volume_group *vg,
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (arg_is_set(cmd, vgonline_ARG) && !online_vg_file_create(cmd, vg->name)) {
|
||||
log_print("VG %s already online", vg->name);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (do_activate && vg_complete_to_activate) {
|
||||
dm_list_iterate_items(pvl, &vg->pvs) {
|
||||
if (!pvl->pv->dev) {
|
||||
log_print("VG %s is incomplete.", vg->name);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Safe, since we never write out new metadata here. Required for
|
||||
* partial activation to work.
|
||||
@@ -647,6 +698,7 @@ static int _vgchange_single(struct cmd_context *cmd, const char *vg_name,
|
||||
struct volume_group *vg,
|
||||
struct processing_handle *handle)
|
||||
{
|
||||
struct vgchange_params *vp = (struct vgchange_params *)handle->custom_handle;
|
||||
int ret = ECMD_PROCESSED;
|
||||
unsigned i;
|
||||
activation_change_t activate;
|
||||
@@ -699,9 +751,12 @@ static int _vgchange_single(struct cmd_context *cmd, const char *vg_name,
|
||||
log_print_unless_silent("Volume group \"%s\" successfully changed", vg->name);
|
||||
}
|
||||
|
||||
if (arg_is_set(cmd, vgonline_ARG))
|
||||
online_dir_setup(cmd);
|
||||
|
||||
if (arg_is_set(cmd, activate_ARG)) {
|
||||
activate = (activation_change_t) arg_uint_value(cmd, activate_ARG, 0);
|
||||
if (!vgchange_activate(cmd, vg, activate))
|
||||
if (!vgchange_activate(cmd, vg, activate, vp->vg_complete_to_activate))
|
||||
return_ECMD_FAILED;
|
||||
} else if (arg_is_set(cmd, refresh_ARG)) {
|
||||
/* refreshes the visible LVs (which starts polling) */
|
||||
@@ -722,8 +777,113 @@ static int _vgchange_single(struct cmd_context *cmd, const char *vg_name,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int _check_event_activation(struct cmd_context *cmd, struct vgchange_params *vp, int *skip_command, int *enable_events)
|
||||
{
|
||||
const char *ea;
|
||||
int service_only = 0, event_only = 0, service_to_event = 0;
|
||||
int ea_service = 0, ea_event = 0, ea_on = 0;
|
||||
int on_file_exists;
|
||||
int event_activation;
|
||||
|
||||
if (!(ea = arg_str_value(cmd, eventactivation_ARG, NULL))) {
|
||||
log_error("No eventactivation value.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* lvm.conf event_activation_options */
|
||||
if (!get_event_activation_config_settings(cmd, &service_only, &event_only, &service_to_event))
|
||||
return_0;
|
||||
|
||||
/* --eventactivation values */
|
||||
if (!get_event_activation_command_options(cmd, ea, &ea_service, &ea_event, &ea_on))
|
||||
return_0;
|
||||
|
||||
event_activation = find_config_tree_bool(cmd, global_event_activation_CFG, NULL);
|
||||
|
||||
/*
|
||||
* The combination of lvm.conf event_activation/event_activation_options
|
||||
* and the --eventactivation service|event value determines if this
|
||||
* command should do anything or be skipped, along with the existence of
|
||||
* the /run/lvm/event-activation-on file in case of service_to_event.
|
||||
*/
|
||||
if (!event_activation) {
|
||||
if (ea_event) {
|
||||
log_print("Skip vgchange for event and event_activation=0.");
|
||||
*skip_command = 1;
|
||||
return 1;
|
||||
}
|
||||
} else {
|
||||
if (event_only && ea_service) {
|
||||
log_print("Skip vgchange for service and event_activation_options event_only.");
|
||||
*skip_command = 1;
|
||||
return 1;
|
||||
}
|
||||
if (service_only && ea_event) {
|
||||
log_print("Skip vgchange for event and event_activation_options service_only.");
|
||||
*skip_command = 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
on_file_exists = event_activation_is_on(cmd);
|
||||
|
||||
if (service_to_event && ea_service && on_file_exists) {
|
||||
log_print("Skip vgchange for service and event-activation-on.");
|
||||
*skip_command = 1;
|
||||
return 1;
|
||||
}
|
||||
if (service_to_event && ea_event && !on_file_exists) {
|
||||
log_print("Skip vgchange for event and no event-activation-on.");
|
||||
*skip_command = 1;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Switch from service activation to event activation when:
|
||||
* lvm.conf event_activation=1,
|
||||
* event_activation_options=service_to_event,
|
||||
* and --eventactivation service,on.
|
||||
*
|
||||
* When enabling event-based activation, first create the
|
||||
* /run/lvm/event-activation-on file to tell other commands
|
||||
* to begin responding to PV events and doing activation
|
||||
* for newly completed VGs. It also needs to create online
|
||||
* files for existing PVs because some VGs may be incomplete
|
||||
* at this point, and future pvscan commands need to
|
||||
* find online files for PVs that have already appeared.
|
||||
* The label scan provides info to know which PVs are
|
||||
* present and should have pvid online files created.
|
||||
*
|
||||
* process_each_vg() usually begins with lock_global() and
|
||||
* lvmcache_label_scan(), and then processes each VG.
|
||||
* In this case, lock_global/lvmcache_label_scan are done
|
||||
* before calling process_each_vg. This allows a special
|
||||
* step to be inserted between the label scan and processing
|
||||
* vgs. That step creates the pvid online files, which
|
||||
* requires label scan info. The lock_global and
|
||||
* lvmcache_label_scan will be skipped by process_each_vg
|
||||
* since they are already done here.
|
||||
*/
|
||||
if (event_activation && service_to_event && ea_service && ea_on) {
|
||||
if (!event_activation_enable(cmd))
|
||||
log_warn("WARNING: Failed to create event-activation-on.");
|
||||
*enable_events = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* lvm.conf service_to_event, and vgchange -aay --eventactivation service,
|
||||
* then only activate LVs if the VG is complete.
|
||||
* A later event will complete the VG and activate it.
|
||||
*/
|
||||
if (event_activation && service_to_event && ea_service)
|
||||
vp->vg_complete_to_activate = 1;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int vgchange(struct cmd_context *cmd, int argc, char **argv)
|
||||
{
|
||||
struct vgchange_params vp = { 0 };
|
||||
struct processing_handle *handle;
|
||||
uint32_t flags = 0;
|
||||
int ret;
|
||||
@@ -837,6 +997,24 @@ int vgchange(struct cmd_context *cmd, int argc, char **argv)
|
||||
cmd->lockd_vg_enforce_sh = 1;
|
||||
}
|
||||
|
||||
if (arg_is_set(cmd, eventactivation_ARG)) {
|
||||
int skip_command = 0, enable_events = 0;
|
||||
if (!_check_event_activation(cmd, &vp, &skip_command, &enable_events))
|
||||
return ECMD_FAILED;
|
||||
if (skip_command)
|
||||
return ECMD_PROCESSED;
|
||||
if (enable_events) {
|
||||
if (!event_activation_enable(cmd))
|
||||
log_warn("WARNING: Failed to create event-activation-on.");
|
||||
/* The process_each_vg lock_global/lvmcache_label_scan will be skipped. */
|
||||
if (!lock_global(cmd, "sh"))
|
||||
return ECMD_FAILED;
|
||||
lvmcache_label_scan(cmd);
|
||||
_online_pvid_file_create_all(cmd);
|
||||
flags |= PROCESS_SKIP_SCAN;
|
||||
}
|
||||
}
|
||||
|
||||
if (update)
|
||||
flags |= READ_FOR_UPDATE;
|
||||
else if (arg_is_set(cmd, activate_ARG))
|
||||
@@ -847,6 +1025,8 @@ int vgchange(struct cmd_context *cmd, int argc, char **argv)
|
||||
return ECMD_FAILED;
|
||||
}
|
||||
|
||||
handle->custom_handle = &vp;
|
||||
|
||||
ret = process_each_vg(cmd, argc, argv, NULL, NULL, flags, 0, handle, &_vgchange_single);
|
||||
|
||||
destroy_processing_handle(cmd, handle);
|
||||
|
@@ -172,17 +172,6 @@ int vgimportdevices(struct cmd_context *cmd, int argc, char **argv)
|
||||
cmd->filter_regex_with_devices_file = 1;
|
||||
cmd->create_edit_devices_file = 1;
|
||||
|
||||
/*
|
||||
* This helps a user bootstrap existing shared VGs into the devices
|
||||
* file. Reading the vg to import devices requires locking, but
|
||||
* lockstart won't find the vg before it's in the devices file.
|
||||
* So, allow importing devices without an lvmlockd lock (in a
|
||||
* a shared vg the vg metadata won't be updated with device ids,
|
||||
* so the lvmlockd lock isn't protecting vg modification.)
|
||||
*/
|
||||
cmd->lockd_gl_disable = 1;
|
||||
cmd->lockd_vg_disable = 1;
|
||||
|
||||
/*
|
||||
* For each VG:
|
||||
* device_id_add() each PV in the VG
|
||||
|
@@ -121,6 +121,6 @@ LABEL="direct_pvscan"
|
||||
# MD | | X | X* | |
|
||||
# loop | | X | X* | |
|
||||
# other | X | | X | | X
|
||||
RUN+="(LVM_EXEC)/lvm pvscan --background --cache --activate ay --major $major --minor $minor", ENV{LVM_SCANNED}="1"
|
||||
RUN+="(LVM_EXEC)/lvm pvscan --cache -aay --eventactivation event --major $major --minor $minor", ENV{LVM_SCANNED}="1"
|
||||
|
||||
LABEL="lvm_end"
|
||||
|
97
udev/69-dm-lvm.rules.in
Normal file
97
udev/69-dm-lvm.rules.in
Normal file
@@ -0,0 +1,97 @@
|
||||
# Copyright (C) 2012,2021 Red Hat, Inc. All rights reserved.
|
||||
#
|
||||
# This file is part of LVM.
|
||||
#
|
||||
# This rule requires blkid to be called on block devices before so only devices
|
||||
# used as LVM PVs are processed (ID_FS_TYPE="LVM2_member").
|
||||
|
||||
SUBSYSTEM!="block", GOTO="lvm_end"
|
||||
(LVM_EXEC_RULE)
|
||||
|
||||
ENV{DM_UDEV_DISABLE_OTHER_RULES_FLAG}=="1", GOTO="lvm_end"
|
||||
|
||||
# Only process devices already marked as a PV - this requires blkid to be called before.
|
||||
ENV{ID_FS_TYPE}!="LVM2_member", GOTO="lvm_end"
|
||||
ENV{DM_MULTIPATH_DEVICE_PATH}=="1", GOTO="lvm_end"
|
||||
ACTION=="remove", GOTO="lvm_end"
|
||||
|
||||
# Create /dev/disk/by-id/lvm-pv-uuid-<PV_UUID> symlink for each PV
|
||||
ENV{ID_FS_UUID_ENC}=="?*", SYMLINK+="disk/by-id/lvm-pv-uuid-$env{ID_FS_UUID_ENC}"
|
||||
|
||||
# If the PV is a special device listed below, scan only if the device is
|
||||
# properly activated. These devices are not usable after an ADD event,
|
||||
# but they require an extra setup and they are ready after a CHANGE event.
|
||||
# Also support coldplugging with ADD event but only if the device is already
|
||||
# properly activated.
|
||||
# This logic should be eventually moved to rules where those particular
|
||||
# devices are processed primarily (MD and loop).
|
||||
|
||||
# DM device:
|
||||
KERNEL!="dm-[0-9]*", GOTO="next"
|
||||
ENV{DM_UDEV_PRIMARY_SOURCE_FLAG}=="1", ENV{DM_ACTIVATION}=="1", GOTO="lvm_scan"
|
||||
GOTO="lvm_end"
|
||||
|
||||
# MD device:
|
||||
LABEL="next"
|
||||
KERNEL!="md[0-9]*", GOTO="next"
|
||||
IMPORT{db}="LVM_MD_PV_ACTIVATED"
|
||||
ACTION=="add", ENV{LVM_MD_PV_ACTIVATED}=="1", GOTO="lvm_scan"
|
||||
ACTION=="change", ENV{LVM_MD_PV_ACTIVATED}!="1", TEST=="md/array_state", ENV{LVM_MD_PV_ACTIVATED}="1", GOTO="lvm_scan"
|
||||
ACTION=="add", KERNEL=="md[0-9]*p[0-9]*", GOTO="lvm_scan"
|
||||
ENV{LVM_MD_PV_ACTIVATED}!="1", ENV{SYSTEMD_READY}="0"
|
||||
GOTO="lvm_end"
|
||||
|
||||
# Loop device:
|
||||
LABEL="next"
|
||||
KERNEL!="loop[0-9]*", GOTO="next"
|
||||
ACTION=="add", ENV{LVM_LOOP_PV_ACTIVATED}=="1", GOTO="lvm_scan"
|
||||
ACTION=="change", ENV{LVM_LOOP_PV_ACTIVATED}!="1", TEST=="loop/backing_file", ENV{LVM_LOOP_PV_ACTIVATED}="1", GOTO="lvm_scan"
|
||||
ENV{LVM_LOOP_PV_ACTIVATED}!="1", ENV{SYSTEMD_READY}="0"
|
||||
GOTO="lvm_end"
|
||||
|
||||
LABEL="next"
|
||||
ACTION!="add", GOTO="lvm_end"
|
||||
|
||||
LABEL="lvm_scan"
|
||||
|
||||
ENV{SYSTEMD_READY}="1"
|
||||
|
||||
# pvscan will check if this device completes a VG,
|
||||
# i.e. all PVs in the VG are now present with the
|
||||
# arrival of this PV. If so, it prints to stdout:
|
||||
# LVM_VG_NAME_COMPLETE='foo'
|
||||
#
|
||||
# When the VG is complete it can be activated, so
|
||||
# vgchange -aay <vgname> is run. It is run via
|
||||
# systemd since it can take longer to run than
|
||||
# udev wants to block when processing rules.
|
||||
# (if there are hundreds of LVs to activate,
|
||||
# the vgchange can take many seconds.)
|
||||
#
|
||||
# pvscan only reads the single device specified,
|
||||
# and uses temp files under /run/lvm to check if
|
||||
# other PVs in the VG are present.
|
||||
#
|
||||
# If event_activation=0 in lvm.conf, this pvscan
|
||||
# (using checkcomplete) will do nothing, so that
|
||||
# no event-based autoactivation will be happen.
|
||||
#
|
||||
# TODO: adjust the output of vgchange -aay so that
|
||||
# it's better suited to appearing in the journal.
|
||||
#
|
||||
# "--eventactivation event" used with pvscan or vgchange
|
||||
# tells the command that it is being run from an event.
|
||||
# The command does nothing if lvm.conf event_activation=0.
|
||||
# The command does nothing if lvm.conf event_activation=1,
|
||||
# and lvm.conf event_activation_options="service_only".
|
||||
# The command goes ahead if event_activation_options="event_only",
|
||||
# or if event_activation_options="service_to_event" and the
|
||||
# event-activation-on file exists.
|
||||
#
|
||||
|
||||
IMPORT{program}="(LVM_EXEC)/lvm pvscan --cache --listvg --checkcomplete --vgonline --eventactivation event --udevoutput --journal=output $env{DEVNAME}"
|
||||
ENV{LVM_VG_NAME_COMPLETE}=="?*", RUN+="/usr/bin/systemd-run -r --no-block --property DefaultDependencies=no --unit lvm-activate-$env{LVM_VG_NAME_COMPLETE} lvm vgchange -aay --config devices/hints=pvs_online --eventactivation event $env{LVM_VG_NAME_COMPLETE}"
|
||||
GOTO="lvm_end"
|
||||
|
||||
LABEL="lvm_end"
|
||||
|
@@ -18,7 +18,7 @@ top_builddir = @top_builddir@
|
||||
include $(top_builddir)/make.tmpl
|
||||
|
||||
DM_RULES=10-dm.rules 13-dm-disk.rules 95-dm-notify.rules
|
||||
LVM_RULES=11-dm-lvm.rules 69-dm-lvm-metad.rules
|
||||
LVM_RULES=11-dm-lvm.rules 69-dm-lvm.rules
|
||||
|
||||
DM_DIR=$(shell $(GREP) "\#define DM_DIR" $(top_srcdir)/libdm/misc/dm-ioctl.h | $(AWK) '{print $$3}')
|
||||
|
||||
|
Reference in New Issue
Block a user