mirror of
				git://sourceware.org/git/lvm2.git
				synced 2025-11-03 08:23:48 +03:00 
			
		
		
		
	Compare commits
	
		
			65 Commits
		
	
	
		
			dev-dct-pv
			...
			v2_02_181
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 
						 | 
					a3353e766e | ||
| 
						 | 
					12dfd0ed02 | ||
| 
						 | 
					ad10d42671 | ||
| 
						 | 
					f7645995da | ||
| 
						 | 
					4ed9b07380 | ||
| 
						 | 
					0174ba692c | ||
| 
						 | 
					48594d007a | ||
| 
						 | 
					50a603de6f | ||
| 
						 | 
					e4fe0d1b8f | ||
| 
						 | 
					951676a59e | ||
| 
						 | 
					4456d9aa77 | ||
| 
						 | 
					b394a9f63f | ||
| 
						 | 
					9e296c9c6f | ||
| 
						 | 
					5b87f5fb72 | ||
| 
						 | 
					bb384f8488 | ||
| 
						 | 
					82feb5f111 | ||
| 
						 | 
					66990bc7c8 | ||
| 
						 | 
					6fcb2ba440 | ||
| 
						 | 
					b8a7f6ba3d | ||
| 
						 | 
					0851ee5301 | ||
| 
						 | 
					df8eef7096 | ||
| 
						 | 
					c1dbb22ba4 | ||
| 
						 | 
					99cddd67a9 | ||
| 
						 | 
					814dd84e07 | ||
| 
						 | 
					d5bcc56eef | ||
| 
						 | 
					f7ffba204e | ||
| 
						 | 
					90e419c645 | ||
| 
						 | 
					49147cbaa7 | ||
| 
						 | 
					69907e0780 | ||
| 
						 | 
					b90d4b38e5 | ||
| 
						 | 
					befdfc245b | ||
| 
						 | 
					0d78e4c1e9 | ||
| 
						 | 
					763c65314e | ||
| 
						 | 
					24aee732a5 | ||
| 
						 | 
					ba6ed5c90c | ||
| 
						 | 
					e0c94d883a | ||
| 
						 | 
					39e3b5d8ac | ||
| 
						 | 
					39fc98d731 | ||
| 
						 | 
					5503699c37 | ||
| 
						 | 
					e0bfc946cb | ||
| 
						 | 
					9546edeef9 | ||
| 
						 | 
					716199334c | ||
| 
						 | 
					4479228d32 | ||
| 
						 | 
					4afb5971b9 | ||
| 
						 | 
					dd075e93c1 | ||
| 
						 | 
					d4fd39f64c | ||
| 
						 | 
					acb784e2a8 | ||
| 
						 | 
					8a0af1bec8 | ||
| 
						 | 
					8bd9a89c14 | ||
| 
						 | 
					a30e622279 | ||
| 
						 | 
					76075ff55d | ||
| 
						 | 
					bfb904af1c | ||
| 
						 | 
					d88376ca78 | ||
| 
						 | 
					6283f5ea3f | ||
| 
						 | 
					43ce357ebc | ||
| 
						 | 
					d136790bab | ||
| 
						 | 
					214de62b5d | ||
| 
						 | 
					e9c0a64fb5 | ||
| 
						 | 
					7ac8e21f3c | ||
| 
						 | 
					fdb362b998 | ||
| 
						 | 
					06accf1395 | ||
| 
						 | 
					d3dcca639c | ||
| 
						 | 
					98eb9e5754 | ||
| 
						 | 
					347c807f86 | ||
| 
						 | 
					1e5f6887b1 | 
							
								
								
									
										25
									
								
								COPYING.BSD
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										25
									
								
								COPYING.BSD
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,25 @@
 | 
			
		||||
BSD 2-Clause License
 | 
			
		||||
 | 
			
		||||
Copyright (c) 2014, Red Hat, Inc.
 | 
			
		||||
All rights reserved.
 | 
			
		||||
 | 
			
		||||
Redistribution and use in source and binary forms, with or without
 | 
			
		||||
modification, are permitted provided that the following conditions are met:
 | 
			
		||||
 | 
			
		||||
1. Redistributions of source code must retain the above copyright notice, this
 | 
			
		||||
   list of conditions and the following disclaimer.
 | 
			
		||||
 | 
			
		||||
2. Redistributions in binary form must reproduce the above copyright notice,
 | 
			
		||||
   this list of conditions and the following disclaimer in the documentation
 | 
			
		||||
   and/or other materials provided with the distribution.
 | 
			
		||||
 | 
			
		||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
 | 
			
		||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 | 
			
		||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 | 
			
		||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
 | 
			
		||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 | 
			
		||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
 | 
			
		||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
 | 
			
		||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
 | 
			
		||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 | 
			
		||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | 
			
		||||
@@ -1 +1 @@
 | 
			
		||||
1.02.147-git (2018-05-24)
 | 
			
		||||
1.02.150 (2018-08-01)
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										41
									
								
								WHATS_NEW
									
									
									
									
									
								
							
							
						
						
									
										41
									
								
								WHATS_NEW
									
									
									
									
									
								
							@@ -1,7 +1,44 @@
 | 
			
		||||
Version 2.02.178 -
 | 
			
		||||
====================================
 | 
			
		||||
Version 2.02.181 - 02 Aug 2018
 | 
			
		||||
==============================
 | 
			
		||||
  Reject conversions on raid1 LVs with split tracked SubLVs.
 | 
			
		||||
  Reject conversions on raid1 split tracked SubLVs.
 | 
			
		||||
  Fix dmstats list failing when no regions exist.
 | 
			
		||||
  Reject conversions of LVs under snapshot.
 | 
			
		||||
  Limit suggested options on incorrect option for lvconvert subcommand.
 | 
			
		||||
 | 
			
		||||
Version 2.02.180 - 19th July 2018
 | 
			
		||||
=================================
 | 
			
		||||
  Never send any discard ioctl with test mode.
 | 
			
		||||
  Fix thin-pool alloc which needs same PV for data and metadata.
 | 
			
		||||
  Extend list of non-memlocked areas with newly linked libs.
 | 
			
		||||
  Enhance vgcfgrestore to check for active LVs in restored VG.
 | 
			
		||||
  lvconvert: provide possible layouts between linear and striped/raid
 | 
			
		||||
  Fix unmonitoring of merging snapshots.
 | 
			
		||||
  Add missing -l description in fsadm man page.
 | 
			
		||||
  Cache can uses metadata format 2 with cleaner policy.
 | 
			
		||||
  Avoid showing internal error in lvs output or pvmoved LVs.
 | 
			
		||||
  Fix check if resized PV can also fit metadata area.
 | 
			
		||||
  Reopen devices RDWR only before writing to avoid udev issues.
 | 
			
		||||
  Change pvresize output confusing when no resize took place.
 | 
			
		||||
  Fix lvmetad hanging on shutdown.
 | 
			
		||||
  Fix mem leak in clvmd and more coverity issues.
 | 
			
		||||
 | 
			
		||||
Version 2.02.179 - 18th June 2018
 | 
			
		||||
=================================
 | 
			
		||||
  Allow forced vgchange to lock type none on clustered VG.
 | 
			
		||||
  Add the report field "shared".
 | 
			
		||||
  Enable automatic metadata consistency repair on a shared VG.
 | 
			
		||||
  Fix pvremove force on a PV with a shared VG.
 | 
			
		||||
  Fixed vgimportclone of a PV with a shared VG.
 | 
			
		||||
  Enable previously disallowed thin/cache commands in shared VGs.
 | 
			
		||||
  Enable metadata-related changes on LVs active with shared lock.
 | 
			
		||||
  Do not continue trying to use a device that cannot be opened.
 | 
			
		||||
  Fix problems opening a device that fails and returns.
 | 
			
		||||
  Use versionsort to fix archive file expiry beyond 100000 files.
 | 
			
		||||
 | 
			
		||||
Version 2.02.178 - 13th June 2018
 | 
			
		||||
=================================
 | 
			
		||||
 | 
			
		||||
Version 2.02.178-rc1 - 24th May 2018
 | 
			
		||||
====================================
 | 
			
		||||
  Add libaio dependency for build.
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										14
									
								
								WHATS_NEW_DM
									
									
									
									
									
								
							
							
						
						
									
										14
									
								
								WHATS_NEW_DM
									
									
									
									
									
								
							@@ -1,5 +1,15 @@
 | 
			
		||||
Version 1.02.147 -
 | 
			
		||||
====================================
 | 
			
		||||
Version 1.02.150 - 02 Aug 2018
 | 
			
		||||
==============================
 | 
			
		||||
  Add vdo plugin for monitoring VDO devices.
 | 
			
		||||
 | 
			
		||||
Version 1.02.149 - 19th July 2018
 | 
			
		||||
=================================
 | 
			
		||||
 | 
			
		||||
Version 1.02.148 - 18th June 2018
 | 
			
		||||
=================================
 | 
			
		||||
 | 
			
		||||
Version 1.02.147 - 13th June 2018
 | 
			
		||||
=================================
 | 
			
		||||
 | 
			
		||||
Version 1.02.147-rc1 - 24th May 2018
 | 
			
		||||
====================================
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										3
									
								
								configure
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								configure
									
									
									
									
										vendored
									
									
								
							@@ -15559,7 +15559,7 @@ _ACEOF
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
################################################################################
 | 
			
		||||
ac_config_files="$ac_config_files Makefile make.tmpl daemons/Makefile daemons/clvmd/Makefile daemons/cmirrord/Makefile daemons/dmeventd/Makefile daemons/dmeventd/libdevmapper-event.pc daemons/dmeventd/plugins/Makefile daemons/dmeventd/plugins/lvm2/Makefile daemons/dmeventd/plugins/raid/Makefile daemons/dmeventd/plugins/mirror/Makefile daemons/dmeventd/plugins/snapshot/Makefile daemons/dmeventd/plugins/thin/Makefile daemons/dmfilemapd/Makefile daemons/lvmdbusd/Makefile daemons/lvmdbusd/lvmdbusd daemons/lvmdbusd/lvmdb.py daemons/lvmdbusd/lvm_shell_proxy.py daemons/lvmdbusd/path.py daemons/lvmetad/Makefile daemons/lvmpolld/Makefile daemons/lvmlockd/Makefile device_mapper/Makefile conf/Makefile conf/example.conf conf/lvmlocal.conf conf/command_profile_template.profile conf/metadata_profile_template.profile include/.symlinks include/Makefile lib/Makefile lib/locking/Makefile include/lvm-version.h libdaemon/Makefile libdaemon/client/Makefile libdaemon/server/Makefile libdm/Makefile libdm/libdevmapper.pc liblvm/Makefile liblvm/liblvm2app.pc man/Makefile po/Makefile python/Makefile python/setup.py scripts/blkdeactivate.sh scripts/blk_availability_init_red_hat scripts/blk_availability_systemd_red_hat.service scripts/clvmd_init_red_hat scripts/cmirrord_init_red_hat scripts/com.redhat.lvmdbus1.service scripts/dm_event_systemd_red_hat.service scripts/dm_event_systemd_red_hat.socket scripts/lvm2_cluster_activation_red_hat.sh scripts/lvm2_cluster_activation_systemd_red_hat.service scripts/lvm2_clvmd_systemd_red_hat.service scripts/lvm2_cmirrord_systemd_red_hat.service scripts/lvm2_lvmdbusd_systemd_red_hat.service scripts/lvm2_lvmetad_init_red_hat scripts/lvm2_lvmetad_systemd_red_hat.service scripts/lvm2_lvmetad_systemd_red_hat.socket scripts/lvm2_lvmpolld_init_red_hat scripts/lvm2_lvmpolld_systemd_red_hat.service scripts/lvm2_lvmpolld_systemd_red_hat.socket scripts/lvm2_lvmlockd_systemd_red_hat.service scripts/lvm2_lvmlocking_systemd_red_hat.service scripts/lvm2_monitoring_init_red_hat scripts/lvm2_monitoring_systemd_red_hat.service scripts/lvm2_pvscan_systemd_red_hat@.service scripts/lvm2_tmpfiles_red_hat.conf scripts/lvmdump.sh scripts/Makefile test/Makefile test/api/Makefile test/api/python_lvm_unit.py test/unit/Makefile tools/Makefile udev/Makefile"
 | 
			
		||||
ac_config_files="$ac_config_files Makefile make.tmpl daemons/Makefile daemons/clvmd/Makefile daemons/cmirrord/Makefile daemons/dmeventd/Makefile daemons/dmeventd/libdevmapper-event.pc daemons/dmeventd/plugins/Makefile daemons/dmeventd/plugins/lvm2/Makefile daemons/dmeventd/plugins/raid/Makefile daemons/dmeventd/plugins/mirror/Makefile daemons/dmeventd/plugins/snapshot/Makefile daemons/dmeventd/plugins/thin/Makefile daemons/dmeventd/plugins/vdo/Makefile daemons/dmfilemapd/Makefile daemons/lvmdbusd/Makefile daemons/lvmdbusd/lvmdbusd daemons/lvmdbusd/lvmdb.py daemons/lvmdbusd/lvm_shell_proxy.py daemons/lvmdbusd/path.py daemons/lvmetad/Makefile daemons/lvmpolld/Makefile daemons/lvmlockd/Makefile device_mapper/Makefile conf/Makefile conf/example.conf conf/lvmlocal.conf conf/command_profile_template.profile conf/metadata_profile_template.profile include/.symlinks include/Makefile lib/Makefile lib/locking/Makefile include/lvm-version.h libdaemon/Makefile libdaemon/client/Makefile libdaemon/server/Makefile libdm/Makefile libdm/libdevmapper.pc liblvm/Makefile liblvm/liblvm2app.pc man/Makefile po/Makefile python/Makefile python/setup.py scripts/blkdeactivate.sh scripts/blk_availability_init_red_hat scripts/blk_availability_systemd_red_hat.service scripts/clvmd_init_red_hat scripts/cmirrord_init_red_hat scripts/com.redhat.lvmdbus1.service scripts/dm_event_systemd_red_hat.service scripts/dm_event_systemd_red_hat.socket scripts/lvm2_cluster_activation_red_hat.sh scripts/lvm2_cluster_activation_systemd_red_hat.service scripts/lvm2_clvmd_systemd_red_hat.service scripts/lvm2_cmirrord_systemd_red_hat.service scripts/lvm2_lvmdbusd_systemd_red_hat.service scripts/lvm2_lvmetad_init_red_hat scripts/lvm2_lvmetad_systemd_red_hat.service scripts/lvm2_lvmetad_systemd_red_hat.socket scripts/lvm2_lvmpolld_init_red_hat scripts/lvm2_lvmpolld_systemd_red_hat.service scripts/lvm2_lvmpolld_systemd_red_hat.socket scripts/lvm2_lvmlockd_systemd_red_hat.service scripts/lvm2_lvmlocking_systemd_red_hat.service scripts/lvm2_monitoring_init_red_hat scripts/lvm2_monitoring_systemd_red_hat.service scripts/lvm2_pvscan_systemd_red_hat@.service scripts/lvm2_tmpfiles_red_hat.conf scripts/lvmdump.sh scripts/Makefile test/Makefile test/api/Makefile test/api/python_lvm_unit.py test/unit/Makefile tools/Makefile udev/Makefile"
 | 
			
		||||
 | 
			
		||||
cat >confcache <<\_ACEOF
 | 
			
		||||
# This file is a shell script that caches the results of configure
 | 
			
		||||
@@ -16267,6 +16267,7 @@ do
 | 
			
		||||
    "daemons/dmeventd/plugins/mirror/Makefile") CONFIG_FILES="$CONFIG_FILES daemons/dmeventd/plugins/mirror/Makefile" ;;
 | 
			
		||||
    "daemons/dmeventd/plugins/snapshot/Makefile") CONFIG_FILES="$CONFIG_FILES daemons/dmeventd/plugins/snapshot/Makefile" ;;
 | 
			
		||||
    "daemons/dmeventd/plugins/thin/Makefile") CONFIG_FILES="$CONFIG_FILES daemons/dmeventd/plugins/thin/Makefile" ;;
 | 
			
		||||
    "daemons/dmeventd/plugins/vdo/Makefile") CONFIG_FILES="$CONFIG_FILES daemons/dmeventd/plugins/vdo/Makefile" ;;
 | 
			
		||||
    "daemons/dmfilemapd/Makefile") CONFIG_FILES="$CONFIG_FILES daemons/dmfilemapd/Makefile" ;;
 | 
			
		||||
    "daemons/lvmdbusd/Makefile") CONFIG_FILES="$CONFIG_FILES daemons/lvmdbusd/Makefile" ;;
 | 
			
		||||
    "daemons/lvmdbusd/lvmdbusd") CONFIG_FILES="$CONFIG_FILES daemons/lvmdbusd/lvmdbusd" ;;
 | 
			
		||||
 
 | 
			
		||||
@@ -2099,6 +2099,7 @@ daemons/dmeventd/plugins/raid/Makefile
 | 
			
		||||
daemons/dmeventd/plugins/mirror/Makefile
 | 
			
		||||
daemons/dmeventd/plugins/snapshot/Makefile
 | 
			
		||||
daemons/dmeventd/plugins/thin/Makefile
 | 
			
		||||
daemons/dmeventd/plugins/vdo/Makefile
 | 
			
		||||
daemons/dmfilemapd/Makefile
 | 
			
		||||
daemons/lvmdbusd/Makefile
 | 
			
		||||
daemons/lvmdbusd/lvmdbusd
 | 
			
		||||
 
 | 
			
		||||
@@ -832,7 +832,7 @@ void lvm_do_backup(const char *vgname)
 | 
			
		||||
 | 
			
		||||
	pthread_mutex_lock(&lvm_lock);
 | 
			
		||||
 | 
			
		||||
	vg = vg_read_internal(cmd, vgname, NULL /*vgid*/, 0, WARN_PV_READ, &consistent);
 | 
			
		||||
	vg = vg_read_internal(cmd, vgname, NULL /*vgid*/, 0, 0, WARN_PV_READ, &consistent);
 | 
			
		||||
 | 
			
		||||
	if (vg && consistent)
 | 
			
		||||
		check_current_backup(vg);
 | 
			
		||||
 
 | 
			
		||||
@@ -645,6 +645,7 @@ int dm_event_register_handler(const struct dm_event_handler *dmevh)
 | 
			
		||||
	uuid = dm_task_get_uuid(dmt);
 | 
			
		||||
 | 
			
		||||
	if (!strstr(dmevh->dso, "libdevmapper-event-lvm2thin.so") &&
 | 
			
		||||
	    !strstr(dmevh->dso, "libdevmapper-event-lvm2vdo.so") &&
 | 
			
		||||
	    !strstr(dmevh->dso, "libdevmapper-event-lvm2snapshot.so") &&
 | 
			
		||||
	    !strstr(dmevh->dso, "libdevmapper-event-lvm2mirror.so") &&
 | 
			
		||||
	    !strstr(dmevh->dso, "libdevmapper-event-lvm2raid.so"))
 | 
			
		||||
 
 | 
			
		||||
@@ -1,6 +1,6 @@
 | 
			
		||||
#
 | 
			
		||||
# Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
 | 
			
		||||
# Copyright (C) 2004-2005, 2011 Red Hat, Inc. All rights reserved.
 | 
			
		||||
# Copyright (C) 2004-2018 Red Hat, Inc. All rights reserved.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of LVM2.
 | 
			
		||||
#
 | 
			
		||||
@@ -16,11 +16,7 @@ srcdir = @srcdir@
 | 
			
		||||
top_srcdir = @top_srcdir@
 | 
			
		||||
top_builddir = @top_builddir@
 | 
			
		||||
 | 
			
		||||
SUBDIRS += lvm2 snapshot raid thin mirror
 | 
			
		||||
 | 
			
		||||
ifeq ($(MAKECMDGOALS),distclean)
 | 
			
		||||
  SUBDIRS = lvm2 mirror snapshot raid thin
 | 
			
		||||
endif
 | 
			
		||||
SUBDIRS += lvm2 snapshot raid thin mirror vdo
 | 
			
		||||
 | 
			
		||||
include $(top_builddir)/make.tmpl
 | 
			
		||||
 | 
			
		||||
@@ -28,3 +24,4 @@ snapshot: lvm2
 | 
			
		||||
mirror: lvm2
 | 
			
		||||
raid: lvm2
 | 
			
		||||
thin: lvm2
 | 
			
		||||
vdo: lvm2
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										3
									
								
								daemons/dmeventd/plugins/vdo/.exported_symbols
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										3
									
								
								daemons/dmeventd/plugins/vdo/.exported_symbols
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,3 @@
 | 
			
		||||
process_event
 | 
			
		||||
register_device
 | 
			
		||||
unregister_device
 | 
			
		||||
							
								
								
									
										36
									
								
								daemons/dmeventd/plugins/vdo/Makefile.in
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										36
									
								
								daemons/dmeventd/plugins/vdo/Makefile.in
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,36 @@
 | 
			
		||||
#
 | 
			
		||||
# Copyright (C) 2018 Red Hat, Inc. All rights reserved.
 | 
			
		||||
#
 | 
			
		||||
# This file is part of LVM2.
 | 
			
		||||
#
 | 
			
		||||
# This copyrighted material is made available to anyone wishing to use,
 | 
			
		||||
# modify, copy, or redistribute it subject to the terms and conditions
 | 
			
		||||
# of the GNU General Public License v.2.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU General Public License
 | 
			
		||||
# along with this program; if not, write to the Free Software Foundation,
 | 
			
		||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 | 
			
		||||
 | 
			
		||||
srcdir = @srcdir@
 | 
			
		||||
top_srcdir = @top_srcdir@
 | 
			
		||||
top_builddir = @top_builddir@
 | 
			
		||||
 | 
			
		||||
INCLUDES += -I$(top_srcdir)/daemons/dmeventd/plugins/lvm2
 | 
			
		||||
CLDFLAGS += -L$(top_builddir)/daemons/dmeventd/plugins/lvm2
 | 
			
		||||
 | 
			
		||||
SOURCES = dmeventd_vdo.c
 | 
			
		||||
 | 
			
		||||
LIB_NAME = libdevmapper-event-lvm2vdo
 | 
			
		||||
LIB_SHARED = $(LIB_NAME).$(LIB_SUFFIX)
 | 
			
		||||
LIB_VERSION = $(LIB_VERSION_LVM)
 | 
			
		||||
 | 
			
		||||
CFLOW_LIST = $(SOURCES)
 | 
			
		||||
CFLOW_LIST_TARGET = $(LIB_NAME).cflow
 | 
			
		||||
 | 
			
		||||
include $(top_builddir)/make.tmpl
 | 
			
		||||
 | 
			
		||||
LIBS += -ldevmapper-event-lvm2 $(INTERNAL_LIBS)
 | 
			
		||||
 | 
			
		||||
install_lvm2: install_dm_plugin
 | 
			
		||||
 | 
			
		||||
install: install_lvm2
 | 
			
		||||
							
								
								
									
										419
									
								
								daemons/dmeventd/plugins/vdo/dmeventd_vdo.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										419
									
								
								daemons/dmeventd/plugins/vdo/dmeventd_vdo.c
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,419 @@
 | 
			
		||||
/*
 | 
			
		||||
 * Copyright (C) 2018 Red Hat, Inc. All rights reserved.
 | 
			
		||||
 *
 | 
			
		||||
 * This file is part of LVM2.
 | 
			
		||||
 *
 | 
			
		||||
 * This copyrighted material is made available to anyone wishing to use,
 | 
			
		||||
 * modify, copy, or redistribute it subject to the terms and conditions
 | 
			
		||||
 * of the GNU Lesser General Public License v.2.1.
 | 
			
		||||
 *
 | 
			
		||||
 * You should have received a copy of the GNU Lesser General Public License
 | 
			
		||||
 * along with this program; if not, write to the Free Software Foundation,
 | 
			
		||||
 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
#include "lib.h"
 | 
			
		||||
#include "dmeventd_lvm.h"
 | 
			
		||||
#include "libdevmapper-event.h"
 | 
			
		||||
 | 
			
		||||
#include <sys/wait.h>
 | 
			
		||||
#include <stdarg.h>
 | 
			
		||||
 | 
			
		||||
/* First warning when VDO pool is 80% full. */
 | 
			
		||||
#define WARNING_THRESH	(DM_PERCENT_1 * 80)
 | 
			
		||||
/* Run a check every 5%. */
 | 
			
		||||
#define CHECK_STEP	(DM_PERCENT_1 *  5)
 | 
			
		||||
/* Do not bother checking VDO pool is less than 50% full. */
 | 
			
		||||
#define CHECK_MINIMUM	(DM_PERCENT_1 * 50)
 | 
			
		||||
 | 
			
		||||
#define MAX_FAILS	(256)  /* ~42 mins between cmd call retry with 10s delay */
 | 
			
		||||
 | 
			
		||||
#define VDO_DEBUG 0
 | 
			
		||||
 | 
			
		||||
struct dso_state {
 | 
			
		||||
	struct dm_pool *mem;
 | 
			
		||||
	int percent_check;
 | 
			
		||||
	int percent;
 | 
			
		||||
	uint64_t known_data_size;
 | 
			
		||||
	unsigned fails;
 | 
			
		||||
	unsigned max_fails;
 | 
			
		||||
	int restore_sigset;
 | 
			
		||||
	sigset_t old_sigset;
 | 
			
		||||
	pid_t pid;
 | 
			
		||||
	char *argv[3];
 | 
			
		||||
	const char *cmd_str;
 | 
			
		||||
	const char *name;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct vdo_status {
 | 
			
		||||
	uint64_t used_blocks;
 | 
			
		||||
	uint64_t total_blocks;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static int _vdo_status_parse(const char *params, struct vdo_status *status)
 | 
			
		||||
{
 | 
			
		||||
	if (sscanf(params, "%*s %*s %*s %*s %*s %" PRIu64 " %" PRIu64,
 | 
			
		||||
		   &status->used_blocks,
 | 
			
		||||
		   &status->total_blocks) < 2) {
 | 
			
		||||
		log_error("Failed to parse vdo params: %s.", params);
 | 
			
		||||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
DM_EVENT_LOG_FN("vdo")
 | 
			
		||||
 | 
			
		||||
static int _run_command(struct dso_state *state)
 | 
			
		||||
{
 | 
			
		||||
	char val[16];
 | 
			
		||||
	int i;
 | 
			
		||||
 | 
			
		||||
	/* Mark for possible lvm2 command we are running from dmeventd
 | 
			
		||||
	 * lvm2 will not try to talk back to dmeventd while processing it */
 | 
			
		||||
	(void) setenv("LVM_RUN_BY_DMEVENTD", "1", 1);
 | 
			
		||||
 | 
			
		||||
	if (state->percent) {
 | 
			
		||||
		/* Prepare some known data to env vars for easy use */
 | 
			
		||||
		if (dm_snprintf(val, sizeof(val), "%d",
 | 
			
		||||
				state->percent / DM_PERCENT_1) != -1)
 | 
			
		||||
			(void) setenv("DMEVENTD_VDO_POOL", val, 1);
 | 
			
		||||
	} else {
 | 
			
		||||
		/* For an error event it's for a user to check status and decide */
 | 
			
		||||
		log_debug("Error event processing.");
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	log_verbose("Executing command: %s", state->cmd_str);
 | 
			
		||||
 | 
			
		||||
	/* TODO:
 | 
			
		||||
	 *   Support parallel run of 'task' and it's waitpid maintainence
 | 
			
		||||
	 *   ATM we can't handle signaling of  SIGALRM
 | 
			
		||||
	 *   as signalling is not allowed while 'process_event()' is running
 | 
			
		||||
	 */
 | 
			
		||||
	if (!(state->pid = fork())) {
 | 
			
		||||
		/* child */
 | 
			
		||||
		(void) close(0);
 | 
			
		||||
		for (i = 3; i < 255; ++i) (void) close(i);
 | 
			
		||||
		execvp(state->argv[0], state->argv);
 | 
			
		||||
		_exit(errno);
 | 
			
		||||
	} else if (state->pid == -1) {
 | 
			
		||||
		log_error("Can't fork command %s.", state->cmd_str);
 | 
			
		||||
		state->fails = 1;
 | 
			
		||||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int _use_policy(struct dm_task *dmt, struct dso_state *state)
 | 
			
		||||
{
 | 
			
		||||
#if VDO_DEBUG
 | 
			
		||||
	log_debug("dmeventd executes: %s.", state->cmd_str);
 | 
			
		||||
#endif
 | 
			
		||||
	if (state->argv[0])
 | 
			
		||||
		return _run_command(state);
 | 
			
		||||
 | 
			
		||||
	if (!dmeventd_lvm2_run_with_lock(state->cmd_str)) {
 | 
			
		||||
		log_error("Failed command for %s.", dm_task_get_name(dmt));
 | 
			
		||||
		state->fails = 1;
 | 
			
		||||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	state->fails = 0;
 | 
			
		||||
 | 
			
		||||
	return 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Check if executed command has finished
 | 
			
		||||
 * Only 1 command may run */
 | 
			
		||||
static int _wait_for_pid(struct dso_state *state)
 | 
			
		||||
{
 | 
			
		||||
	int status = 0;
 | 
			
		||||
 | 
			
		||||
	if (state->pid == -1)
 | 
			
		||||
		return 1;
 | 
			
		||||
 | 
			
		||||
	if (!waitpid(state->pid, &status, WNOHANG))
 | 
			
		||||
		return 0;
 | 
			
		||||
 | 
			
		||||
	/* Wait for finish */
 | 
			
		||||
	if (WIFEXITED(status)) {
 | 
			
		||||
		log_verbose("Child %d exited with status %d.",
 | 
			
		||||
			    state->pid, WEXITSTATUS(status));
 | 
			
		||||
		state->fails = WEXITSTATUS(status) ? 1 : 0;
 | 
			
		||||
	} else {
 | 
			
		||||
		if (WIFSIGNALED(status))
 | 
			
		||||
			log_verbose("Child %d was terminated with status %d.",
 | 
			
		||||
				    state->pid, WTERMSIG(status));
 | 
			
		||||
		state->fails = 1;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	state->pid = -1;
 | 
			
		||||
 | 
			
		||||
	return 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void process_event(struct dm_task *dmt,
 | 
			
		||||
		   enum dm_event_mask event __attribute__((unused)),
 | 
			
		||||
		   void **user)
 | 
			
		||||
{
 | 
			
		||||
	const char *device = dm_task_get_name(dmt);
 | 
			
		||||
	struct dso_state *state = *user;
 | 
			
		||||
	void *next = NULL;
 | 
			
		||||
	uint64_t start, length;
 | 
			
		||||
	char *target_type = NULL;
 | 
			
		||||
	char *params;
 | 
			
		||||
	int needs_policy = 0;
 | 
			
		||||
	struct dm_task *new_dmt = NULL;
 | 
			
		||||
	struct vdo_status status;
 | 
			
		||||
 | 
			
		||||
#if VDO_DEBUG
 | 
			
		||||
	log_debug("Watch for VDO %s:%.2f%%.", state->name,
 | 
			
		||||
		  dm_percent_to_round_float(state->percent_check, 2));
 | 
			
		||||
#endif
 | 
			
		||||
	if (!_wait_for_pid(state)) {
 | 
			
		||||
		log_warn("WARNING: Skipping event, child %d is still running (%s).",
 | 
			
		||||
			 state->pid, state->cmd_str);
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (event & DM_EVENT_DEVICE_ERROR) {
 | 
			
		||||
#if VDO_DEBUG
 | 
			
		||||
		log_debug("VDO event error.");
 | 
			
		||||
#endif
 | 
			
		||||
		/* Error -> no need to check and do instant resize */
 | 
			
		||||
		state->percent = 0;
 | 
			
		||||
		if (_use_policy(dmt, state))
 | 
			
		||||
			goto out;
 | 
			
		||||
 | 
			
		||||
		stack;
 | 
			
		||||
 | 
			
		||||
		if (!(new_dmt = dm_task_create(DM_DEVICE_STATUS)))
 | 
			
		||||
			goto_out;
 | 
			
		||||
 | 
			
		||||
		if (!dm_task_set_uuid(new_dmt, dm_task_get_uuid(dmt)))
 | 
			
		||||
			goto_out;
 | 
			
		||||
 | 
			
		||||
		/* Non-blocking status read */
 | 
			
		||||
		if (!dm_task_no_flush(new_dmt))
 | 
			
		||||
			log_warn("WARNING: Can't set no_flush for dm status.");
 | 
			
		||||
 | 
			
		||||
		if (!dm_task_run(new_dmt))
 | 
			
		||||
			goto_out;
 | 
			
		||||
 | 
			
		||||
		dmt = new_dmt;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	dm_get_next_target(dmt, next, &start, &length, &target_type, ¶ms);
 | 
			
		||||
 | 
			
		||||
	if (!target_type || (strcmp(target_type, "vdo") != 0)) {
 | 
			
		||||
		log_error("Invalid target type.");
 | 
			
		||||
		goto out;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!_vdo_status_parse(params, &status)) {
 | 
			
		||||
		log_error("Failed to parse status.");
 | 
			
		||||
		goto out;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	state->percent = dm_make_percent(status.used_blocks,
 | 
			
		||||
					 status.total_blocks);
 | 
			
		||||
 | 
			
		||||
#if VDO_DEBUG
 | 
			
		||||
	log_debug("VDO %s status  %.2f%% " FMTu64 "/" FMTu64 ".",
 | 
			
		||||
		  state->name, dm_percent_to_round_float(state->percent, 2),
 | 
			
		||||
		  status.used_blocks, status.total_blocks);
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
	/* VDO pool size had changed. Clear the threshold. */
 | 
			
		||||
	if (state->known_data_size != status.total_blocks) {
 | 
			
		||||
		state->percent_check = CHECK_MINIMUM;
 | 
			
		||||
		state->known_data_size = status.total_blocks;
 | 
			
		||||
		state->fails = 0;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Trigger action when threshold boundary is exceeded.
 | 
			
		||||
	 * Report 80% threshold warning when it's used above 80%.
 | 
			
		||||
	 * Only 100% is exception as it cannot be surpased so policy
 | 
			
		||||
	 * action is called for:  >50%, >55% ... >95%, 100%
 | 
			
		||||
	 */
 | 
			
		||||
	if ((state->percent > WARNING_THRESH) &&
 | 
			
		||||
	    (state->percent > state->percent_check))
 | 
			
		||||
		log_warn("WARNING: VDO %s %s is now %.2f%% full.",
 | 
			
		||||
			 state->name, device,
 | 
			
		||||
			 dm_percent_to_round_float(state->percent, 2));
 | 
			
		||||
	if (state->percent > CHECK_MINIMUM) {
 | 
			
		||||
		/* Run action when usage raised more than CHECK_STEP since the last time */
 | 
			
		||||
		if (state->percent > state->percent_check)
 | 
			
		||||
			needs_policy = 1;
 | 
			
		||||
		state->percent_check = (state->percent / CHECK_STEP + 1) * CHECK_STEP;
 | 
			
		||||
		if (state->percent_check == DM_PERCENT_100)
 | 
			
		||||
			state->percent_check--; /* Can't get bigger then 100% */
 | 
			
		||||
	} else
 | 
			
		||||
		state->percent_check = CHECK_MINIMUM;
 | 
			
		||||
 | 
			
		||||
	/* Reduce number of _use_policy() calls by power-of-2 factor till frequency of MAX_FAILS is reached.
 | 
			
		||||
	 * Avoids too high number of error retries, yet shows some status messages in log regularly.
 | 
			
		||||
	 * i.e. PV could have been pvmoved and VG/LV was locked for a while...
 | 
			
		||||
	 */
 | 
			
		||||
	if (state->fails) {
 | 
			
		||||
		if (state->fails++ <= state->max_fails) {
 | 
			
		||||
			log_debug("Postponing frequently failing policy (%u <= %u).",
 | 
			
		||||
				  state->fails - 1, state->max_fails);
 | 
			
		||||
			return;
 | 
			
		||||
		}
 | 
			
		||||
		if (state->max_fails < MAX_FAILS)
 | 
			
		||||
			state->max_fails <<= 1;
 | 
			
		||||
		state->fails = needs_policy = 1; /* Retry failing command */
 | 
			
		||||
	} else
 | 
			
		||||
		state->max_fails = 1; /* Reset on success */
 | 
			
		||||
 | 
			
		||||
	/* FIXME: ATM nothing can be done, drop 0, once it becomes useful */
 | 
			
		||||
	if (0 && needs_policy)
 | 
			
		||||
		_use_policy(dmt, state);
 | 
			
		||||
out:
 | 
			
		||||
	if (new_dmt)
 | 
			
		||||
		dm_task_destroy(new_dmt);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Handle SIGCHLD for a thread */
 | 
			
		||||
static void _sig_child(int signum __attribute__((unused)))
 | 
			
		||||
{
 | 
			
		||||
	/* empty SIG_IGN */;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Setup handler for SIGCHLD when executing external command
 | 
			
		||||
 * to get quick 'waitpid()' reaction
 | 
			
		||||
 * It will interrupt syscall just like SIGALRM and
 | 
			
		||||
 * invoke process_event().
 | 
			
		||||
 */
 | 
			
		||||
static void _init_thread_signals(struct dso_state *state)
 | 
			
		||||
{
 | 
			
		||||
	struct sigaction act = { .sa_handler = _sig_child };
 | 
			
		||||
	sigset_t my_sigset;
 | 
			
		||||
 | 
			
		||||
	sigemptyset(&my_sigset);
 | 
			
		||||
 | 
			
		||||
	if (sigaction(SIGCHLD, &act, NULL))
 | 
			
		||||
		log_warn("WARNING: Failed to set SIGCHLD action.");
 | 
			
		||||
	else if (sigaddset(&my_sigset, SIGCHLD))
 | 
			
		||||
		log_warn("WARNING: Failed to add SIGCHLD to set.");
 | 
			
		||||
	else if (pthread_sigmask(SIG_UNBLOCK, &my_sigset, &state->old_sigset))
 | 
			
		||||
		log_warn("WARNING: Failed to unblock SIGCHLD.");
 | 
			
		||||
	else
 | 
			
		||||
		state->restore_sigset = 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void _restore_thread_signals(struct dso_state *state)
 | 
			
		||||
{
 | 
			
		||||
	if (state->restore_sigset &&
 | 
			
		||||
	    pthread_sigmask(SIG_SETMASK, &state->old_sigset, NULL))
 | 
			
		||||
		log_warn("WARNING: Failed to block SIGCHLD.");
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int register_device(const char *device,
 | 
			
		||||
		    const char *uuid,
 | 
			
		||||
		    int major __attribute__((unused)),
 | 
			
		||||
		    int minor __attribute__((unused)),
 | 
			
		||||
		    void **user)
 | 
			
		||||
{
 | 
			
		||||
	struct dso_state *state;
 | 
			
		||||
	const char *cmd;
 | 
			
		||||
	char *str;
 | 
			
		||||
	char cmd_str[PATH_MAX + 128 + 2]; /* cmd ' ' vg/lv \0 */
 | 
			
		||||
        const char *name = "pool";
 | 
			
		||||
 | 
			
		||||
	if (!dmeventd_lvm2_init_with_pool("vdo_pool_state", state))
 | 
			
		||||
		goto_bad;
 | 
			
		||||
 | 
			
		||||
	state->cmd_str = "";
 | 
			
		||||
 | 
			
		||||
	/* Search for command for LVM- prefixed devices only */
 | 
			
		||||
	cmd = (strncmp(uuid, "LVM-", 4) == 0) ? "_dmeventd_vdo_command" : "";
 | 
			
		||||
 | 
			
		||||
	if (!dmeventd_lvm2_command(state->mem, cmd_str, sizeof(cmd_str), cmd, device))
 | 
			
		||||
		goto_bad;
 | 
			
		||||
 | 
			
		||||
	if (strncmp(cmd_str, "lvm ", 4) == 0) {
 | 
			
		||||
		if (!(state->cmd_str = dm_pool_strdup(state->mem, cmd_str + 4))) {
 | 
			
		||||
			log_error("Failed to copy lvm VDO command.");
 | 
			
		||||
				goto bad;
 | 
			
		||||
		}
 | 
			
		||||
	} else if (cmd_str[0] == '/') {
 | 
			
		||||
		if (!(state->cmd_str = dm_pool_strdup(state->mem, cmd_str))) {
 | 
			
		||||
			log_error("Failed to copy VDO command.");
 | 
			
		||||
			goto bad;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		/* Find last space before 'vg/lv' */
 | 
			
		||||
		if (!(str = strrchr(state->cmd_str, ' ')))
 | 
			
		||||
			goto inval;
 | 
			
		||||
 | 
			
		||||
		if (!(state->argv[0] = dm_pool_strndup(state->mem, state->cmd_str,
 | 
			
		||||
						       str - state->cmd_str))) {
 | 
			
		||||
			log_error("Failed to copy command.");
 | 
			
		||||
			goto bad;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		state->argv[1] = str + 1;  /* 1 argument - vg/lv */
 | 
			
		||||
		_init_thread_signals(state);
 | 
			
		||||
	} else if (cmd[0] == 0) {
 | 
			
		||||
		state->name = "volume"; /* What to use with 'others?' */
 | 
			
		||||
	} else/* Unuspported command format */
 | 
			
		||||
		goto inval;
 | 
			
		||||
 | 
			
		||||
	state->pid = -1;
 | 
			
		||||
	state->name = name;
 | 
			
		||||
	*user = state;
 | 
			
		||||
 | 
			
		||||
	log_info("Monitoring VDO %s %s.", name, device);
 | 
			
		||||
 | 
			
		||||
	return 1;
 | 
			
		||||
inval:
 | 
			
		||||
	log_error("Invalid command for monitoring: %s.", cmd_str);
 | 
			
		||||
bad:
 | 
			
		||||
	log_error("Failed to monitor VDO %s %s.", name, device);
 | 
			
		||||
 | 
			
		||||
	if (state)
 | 
			
		||||
		dmeventd_lvm2_exit_with_pool(state);
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int unregister_device(const char *device,
 | 
			
		||||
		      const char *uuid __attribute__((unused)),
 | 
			
		||||
		      int major __attribute__((unused)),
 | 
			
		||||
		      int minor __attribute__((unused)),
 | 
			
		||||
		      void **user)
 | 
			
		||||
{
 | 
			
		||||
	struct dso_state *state = *user;
 | 
			
		||||
	const char *name = state->name;
 | 
			
		||||
	int i;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; !_wait_for_pid(state) && (i < 6); ++i) {
 | 
			
		||||
		if (i == 0)
 | 
			
		||||
			/* Give it 2 seconds, then try to terminate & kill it */
 | 
			
		||||
			log_verbose("Child %d still not finished (%s) waiting.",
 | 
			
		||||
				    state->pid, state->cmd_str);
 | 
			
		||||
		else if (i == 3) {
 | 
			
		||||
			log_warn("WARNING: Terminating child %d.", state->pid);
 | 
			
		||||
			kill(state->pid, SIGINT);
 | 
			
		||||
			kill(state->pid, SIGTERM);
 | 
			
		||||
		} else if (i == 5) {
 | 
			
		||||
			log_warn("WARNING: Killing child %d.", state->pid);
 | 
			
		||||
			kill(state->pid, SIGKILL);
 | 
			
		||||
		}
 | 
			
		||||
		sleep(1);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (state->pid != -1)
 | 
			
		||||
		log_warn("WARNING: Cannot kill child %d!", state->pid);
 | 
			
		||||
 | 
			
		||||
	_restore_thread_signals(state);
 | 
			
		||||
 | 
			
		||||
	dmeventd_lvm2_exit_with_pool(state);
 | 
			
		||||
	log_info("No longer monitoring VDO %s %s.", name, device);
 | 
			
		||||
 | 
			
		||||
	return 1;
 | 
			
		||||
}
 | 
			
		||||
@@ -1907,7 +1907,8 @@ int monitor_dev_for_events(struct cmd_context *cmd, const struct logical_volume
 | 
			
		||||
	 * In case of a snapshot device, we monitor lv->snapshot->lv,
 | 
			
		||||
	 * not the actual LV itself.
 | 
			
		||||
	 */
 | 
			
		||||
	if (lv_is_cow(lv) && (laopts->no_merging || !lv_is_merging_cow(lv))) {
 | 
			
		||||
	if (lv_is_cow(lv) && (laopts->no_merging || !lv_is_merging_cow(lv) ||
 | 
			
		||||
			      lv_has_target_type(lv->vg->cmd->mem, lv, NULL, TARGET_NAME_SNAPSHOT))) {
 | 
			
		||||
		if (!(r = monitor_dev_for_events(cmd, lv->snapshot->lv, NULL, monitor)))
 | 
			
		||||
			stack;
 | 
			
		||||
		return r;
 | 
			
		||||
 
 | 
			
		||||
@@ -178,7 +178,8 @@ static int _get_segment_status_from_target_params(const char *target_name,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* Validate target_name segtype from DM table with lvm2 metadata segtype */
 | 
			
		||||
	if (strcmp(segtype->name, target_name) &&
 | 
			
		||||
	if (!lv_is_locked(seg->lv) &&
 | 
			
		||||
	    strcmp(segtype->name, target_name) &&
 | 
			
		||||
	    /* If kernel's type isn't an exact match is it compatible? */
 | 
			
		||||
	    (!segtype->ops->target_status_compatible ||
 | 
			
		||||
	     !segtype->ops->target_status_compatible(target_name))) {
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										19
									
								
								lib/cache/lvmcache.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										19
									
								
								lib/cache/lvmcache.c
									
									
									
									
										vendored
									
									
								
							@@ -295,6 +295,11 @@ static void _drop_metadata(const char *vgname, int drop_precommitted)
 | 
			
		||||
		_saved_vg_free(svg, 0, 1);
 | 
			
		||||
	else
 | 
			
		||||
		_saved_vg_free(svg, 1, 1);
 | 
			
		||||
 | 
			
		||||
	if (!svg->saved_vg_old && !svg->saved_vg_new) {
 | 
			
		||||
		dm_hash_remove(_saved_vg_hash, svg->vgid);
 | 
			
		||||
		dm_free(svg);
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void lvmcache_save_vg(struct volume_group *vg, int precommitted)
 | 
			
		||||
@@ -1010,7 +1015,8 @@ static void _filter_duplicate_devs(struct cmd_context *cmd)
 | 
			
		||||
 | 
			
		||||
	dm_list_iterate_items_safe(devl, devl2, &_unused_duplicate_devs) {
 | 
			
		||||
 | 
			
		||||
		info = lvmcache_info_from_pvid(devl->dev->pvid, NULL, 0);
 | 
			
		||||
		if (!(info = lvmcache_info_from_pvid(devl->dev->pvid, NULL, 0)))
 | 
			
		||||
			continue;
 | 
			
		||||
 | 
			
		||||
		if (MAJOR(info->dev->dev) == dt->md_major) {
 | 
			
		||||
			log_debug_devs("Ignoring md component duplicate %s", dev_name(devl->dev));
 | 
			
		||||
@@ -1038,7 +1044,8 @@ static void _warn_duplicate_devs(struct cmd_context *cmd)
 | 
			
		||||
 | 
			
		||||
	dm_list_iterate_items_safe(devl, devl2, &_unused_duplicate_devs) {
 | 
			
		||||
		/* info for the preferred device that we're actually using */
 | 
			
		||||
		info = lvmcache_info_from_pvid(devl->dev->pvid, NULL, 0);
 | 
			
		||||
		if (!(info = lvmcache_info_from_pvid(devl->dev->pvid, NULL, 0)))
 | 
			
		||||
			continue;
 | 
			
		||||
 | 
			
		||||
		if (!id_write_format((const struct id *)info->dev->pvid, uuid, sizeof(uuid)))
 | 
			
		||||
			stack;
 | 
			
		||||
@@ -1344,7 +1351,7 @@ next:
 | 
			
		||||
 * comes directly from files.)
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
int lvmcache_label_rescan_vg(struct cmd_context *cmd, const char *vgname, const char *vgid)
 | 
			
		||||
int lvmcache_label_rescan_vg(struct cmd_context *cmd, const char *vgname, const char *vgid, int open_rw)
 | 
			
		||||
{
 | 
			
		||||
	struct dm_list devs;
 | 
			
		||||
	struct device_list *devl, *devl2;
 | 
			
		||||
@@ -1389,7 +1396,10 @@ int lvmcache_label_rescan_vg(struct cmd_context *cmd, const char *vgname, const
 | 
			
		||||
	/* FIXME: should we also rescan unused_duplicate_devs for devs
 | 
			
		||||
	   being rescanned here and then repeat resolving the duplicates? */
 | 
			
		||||
 | 
			
		||||
	label_scan_devs(cmd, cmd->filter, &devs);
 | 
			
		||||
	if (open_rw)
 | 
			
		||||
		label_scan_devs_rw(cmd, cmd->filter, &devs);
 | 
			
		||||
	else
 | 
			
		||||
		label_scan_devs(cmd, cmd->filter, &devs);
 | 
			
		||||
 | 
			
		||||
	dm_list_iterate_items_safe(devl, devl2, &devs) {
 | 
			
		||||
		dm_list_del(&devl->list);
 | 
			
		||||
@@ -2515,6 +2525,7 @@ static void _lvmcache_destroy_lockname(struct dm_hash_node *n)
 | 
			
		||||
static void _destroy_saved_vg(struct saved_vg *svg)
 | 
			
		||||
{
 | 
			
		||||
	_saved_vg_free(svg, 1, 1);
 | 
			
		||||
	dm_free(svg);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void lvmcache_destroy(struct cmd_context *cmd, int retain_orphans, int reset)
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										2
									
								
								lib/cache/lvmcache.h
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								lib/cache/lvmcache.h
									
									
									
									
										vendored
									
									
								
							@@ -69,7 +69,7 @@ void lvmcache_allow_reads_with_lvmetad(void);
 | 
			
		||||
void lvmcache_destroy(struct cmd_context *cmd, int retain_orphans, int reset);
 | 
			
		||||
 | 
			
		||||
int lvmcache_label_scan(struct cmd_context *cmd);
 | 
			
		||||
int lvmcache_label_rescan_vg(struct cmd_context *cmd, const char *vgname, const char *vgid);
 | 
			
		||||
int lvmcache_label_rescan_vg(struct cmd_context *cmd, const char *vgname, const char *vgid, int open_rw);
 | 
			
		||||
 | 
			
		||||
/* Add/delete a device */
 | 
			
		||||
struct lvmcache_info *lvmcache_add(struct labeller *labeller, const char *pvid,
 | 
			
		||||
 
 | 
			
		||||
@@ -1462,6 +1462,7 @@ static int _init_segtypes(struct cmd_context *cmd)
 | 
			
		||||
	struct segment_type *segtype;
 | 
			
		||||
	struct segtype_library seglib = { .cmd = cmd, .lib = NULL };
 | 
			
		||||
	struct segment_type *(*init_segtype_array[])(struct cmd_context *cmd) = {
 | 
			
		||||
		init_linear_segtype,
 | 
			
		||||
		init_striped_segtype,
 | 
			
		||||
		init_zero_segtype,
 | 
			
		||||
		init_error_segtype,
 | 
			
		||||
 
 | 
			
		||||
@@ -95,6 +95,7 @@ struct cmd_context {
 | 
			
		||||
	char **argv;
 | 
			
		||||
	struct arg_values *opt_arg_values;
 | 
			
		||||
	struct dm_list arg_value_groups;
 | 
			
		||||
	int opt_count; /* total number of options (beginning with - or --) */
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Position args remaining after command name
 | 
			
		||||
@@ -154,6 +155,7 @@ struct cmd_context {
 | 
			
		||||
	unsigned include_shared_vgs:1;		/* report/display cmds can reveal lockd VGs */
 | 
			
		||||
	unsigned include_active_foreign_vgs:1;	/* cmd should process foreign VGs with active LVs */
 | 
			
		||||
	unsigned vg_read_print_access_error:1;	/* print access errors from vg_read */
 | 
			
		||||
	unsigned force_access_clustered:1;
 | 
			
		||||
	unsigned lockd_gl_disable:1;
 | 
			
		||||
	unsigned lockd_vg_disable:1;
 | 
			
		||||
	unsigned lockd_lv_disable:1;
 | 
			
		||||
 
 | 
			
		||||
@@ -189,7 +189,6 @@ static bool _async_issue(struct io_engine *ioe, enum dir d, int fd,
 | 
			
		||||
	} while (r == -EAGAIN);
 | 
			
		||||
 | 
			
		||||
	if (r < 0) {
 | 
			
		||||
		log_sys_warn("io_submit");
 | 
			
		||||
		_cb_free(e->cbs, cb);
 | 
			
		||||
		return false;
 | 
			
		||||
	}
 | 
			
		||||
@@ -320,6 +319,7 @@ static bool _sync_issue(struct io_engine *ioe, enum dir d, int fd,
 | 
			
		||||
	r = lseek(fd, where, SEEK_SET);
 | 
			
		||||
	if (r < 0) {
 | 
			
		||||
        	log_warn("unable to seek to position %llu", (unsigned long long) where);
 | 
			
		||||
        	free(io);
 | 
			
		||||
        	return false;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -334,6 +334,7 @@ static bool _sync_issue(struct io_engine *ioe, enum dir d, int fd,
 | 
			
		||||
 | 
			
		||||
        	if (r < 0) {
 | 
			
		||||
                	log_warn("io failed %d", r);
 | 
			
		||||
                	free(io);
 | 
			
		||||
                	return false;
 | 
			
		||||
        	}
 | 
			
		||||
 | 
			
		||||
@@ -342,6 +343,7 @@ static bool _sync_issue(struct io_engine *ioe, enum dir d, int fd,
 | 
			
		||||
 | 
			
		||||
	if (len) {
 | 
			
		||||
        	log_warn("short io %u bytes remaining", (unsigned) len);
 | 
			
		||||
        	free(io);
 | 
			
		||||
        	return false;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -557,11 +559,13 @@ static bool _init_free_list(struct bcache *cache, unsigned count, unsigned pgsiz
 | 
			
		||||
	if (!data)
 | 
			
		||||
		return false;
 | 
			
		||||
 | 
			
		||||
	cache->raw_data = data;
 | 
			
		||||
	cache->raw_blocks = dm_malloc(count * sizeof(*cache->raw_blocks));
 | 
			
		||||
	if (!cache->raw_blocks) {
 | 
			
		||||
		free(data);
 | 
			
		||||
		return false;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!cache->raw_blocks)
 | 
			
		||||
		dm_free(cache->raw_data);
 | 
			
		||||
	cache->raw_data = data;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < count; i++) {
 | 
			
		||||
		struct block *b = cache->raw_blocks + i;
 | 
			
		||||
@@ -646,7 +650,6 @@ static void _complete_io(void *context, int err)
 | 
			
		||||
	dm_list_del(&b->list);
 | 
			
		||||
 | 
			
		||||
	if (b->error) {
 | 
			
		||||
		log_warn("bcache io error %d fd %d", b->error, b->fd);
 | 
			
		||||
		dm_list_add(&cache->errored, &b->list);
 | 
			
		||||
 | 
			
		||||
	} else {
 | 
			
		||||
 
 | 
			
		||||
@@ -367,18 +367,24 @@ static int _dev_get_size_dev(struct device *dev, uint64_t *size)
 | 
			
		||||
static int _dev_read_ahead_dev(struct device *dev, uint32_t *read_ahead)
 | 
			
		||||
{
 | 
			
		||||
	long read_ahead_long;
 | 
			
		||||
	int fd = dev->bcache_fd;
 | 
			
		||||
	int do_close = 0;
 | 
			
		||||
 | 
			
		||||
	if (dev->read_ahead != -1) {
 | 
			
		||||
		*read_ahead = (uint32_t) dev->read_ahead;
 | 
			
		||||
		return 1;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!dev_open_readonly(dev))
 | 
			
		||||
		return_0;
 | 
			
		||||
	if (fd <= 0) {
 | 
			
		||||
		if (!dev_open_readonly(dev))
 | 
			
		||||
			return_0;
 | 
			
		||||
		fd = dev_fd(dev);
 | 
			
		||||
		do_close = 1;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (ioctl(dev->fd, BLKRAGET, &read_ahead_long) < 0) {
 | 
			
		||||
	if (ioctl(fd, BLKRAGET, &read_ahead_long) < 0) {
 | 
			
		||||
		log_sys_error("ioctl BLKRAGET", dev_name(dev));
 | 
			
		||||
		if (!dev_close_immediate(dev))
 | 
			
		||||
		if (do_close && !dev_close_immediate(dev))
 | 
			
		||||
			stack;
 | 
			
		||||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
@@ -389,8 +395,8 @@ static int _dev_read_ahead_dev(struct device *dev, uint32_t *read_ahead)
 | 
			
		||||
	log_very_verbose("%s: read_ahead is %u sectors",
 | 
			
		||||
			 dev_name(dev), *read_ahead);
 | 
			
		||||
 | 
			
		||||
	if (!dev_close_immediate(dev))
 | 
			
		||||
		stack;
 | 
			
		||||
	if (do_close && !dev_close_immediate(dev))
 | 
			
		||||
		log_sys_error("close", dev_name(dev));
 | 
			
		||||
 | 
			
		||||
	return 1;
 | 
			
		||||
}
 | 
			
		||||
@@ -405,9 +411,11 @@ static int _dev_discard_blocks(struct device *dev, uint64_t offset_bytes, uint64
 | 
			
		||||
	discard_range[0] = offset_bytes;
 | 
			
		||||
	discard_range[1] = size_bytes;
 | 
			
		||||
 | 
			
		||||
	log_debug_devs("Discarding %" PRIu64 " bytes offset %" PRIu64 " bytes on %s.",
 | 
			
		||||
		       size_bytes, offset_bytes, dev_name(dev));
 | 
			
		||||
	if (ioctl(dev->fd, BLKDISCARD, &discard_range) < 0) {
 | 
			
		||||
	log_debug_devs("Discarding %" PRIu64 " bytes offset %" PRIu64 " bytes on %s. %s",
 | 
			
		||||
		       size_bytes, offset_bytes, dev_name(dev),
 | 
			
		||||
		       test_mode() ? " (test mode - suppressed)" : "");
 | 
			
		||||
 | 
			
		||||
	if (!test_mode() && ioctl(dev->fd, BLKDISCARD, &discard_range) < 0) {
 | 
			
		||||
		log_error("%s: BLKDISCARD ioctl at offset %" PRIu64 " size %" PRIu64 " failed: %s.",
 | 
			
		||||
			  dev_name(dev), offset_bytes, size_bytes, strerror(errno));
 | 
			
		||||
		if (!dev_close_immediate(dev))
 | 
			
		||||
 
 | 
			
		||||
@@ -35,6 +35,7 @@
 | 
			
		||||
#define DEV_BCACHE_EXCL		0x00001000      /* bcache_fd should be open EXCL */
 | 
			
		||||
#define DEV_FILTER_AFTER_SCAN	0x00002000	/* apply filter after bcache has data */
 | 
			
		||||
#define DEV_FILTER_OUT_SCAN	0x00004000	/* filtered out during label scan */
 | 
			
		||||
#define DEV_BCACHE_WRITE	0x00008000      /* bcache_fd is open with RDWR */
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Support for external device info.
 | 
			
		||||
 
 | 
			
		||||
@@ -50,12 +50,15 @@ struct pfilter {
 | 
			
		||||
 * by default.  The old code for it should be removed.
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
static char* _good_device = "good";
 | 
			
		||||
static char* _bad_device = "bad";
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * The hash table holds one of these two states
 | 
			
		||||
 * against each entry.
 | 
			
		||||
 */
 | 
			
		||||
#define PF_BAD_DEVICE ((void *) 1)
 | 
			
		||||
#define PF_GOOD_DEVICE ((void *) 2)
 | 
			
		||||
#define PF_BAD_DEVICE ((void *) &_good_device)
 | 
			
		||||
#define PF_GOOD_DEVICE ((void *) &_bad_device)
 | 
			
		||||
 | 
			
		||||
static int _init_hash(struct pfilter *pf)
 | 
			
		||||
{
 | 
			
		||||
 
 | 
			
		||||
@@ -464,12 +464,24 @@ static int _scan_dev_open(struct device *dev)
 | 
			
		||||
	name_sl = dm_list_item(name_list, struct dm_str_list);
 | 
			
		||||
	name = name_sl->str;
 | 
			
		||||
 | 
			
		||||
	flags |= O_RDWR;
 | 
			
		||||
	flags |= O_DIRECT;
 | 
			
		||||
	flags |= O_NOATIME;
 | 
			
		||||
 | 
			
		||||
	if (dev->flags & DEV_BCACHE_EXCL)
 | 
			
		||||
	/*
 | 
			
		||||
	 * FIXME: udev is a train wreck when we open RDWR and close, so we
 | 
			
		||||
	 * need to only use RDWR when we actually need to write, and use
 | 
			
		||||
	 * RDONLY otherwise.  Fix, disable or scrap udev nonsense so we can
 | 
			
		||||
	 * just open with RDWR by default.
 | 
			
		||||
	 */
 | 
			
		||||
 | 
			
		||||
	if (dev->flags & DEV_BCACHE_EXCL) {
 | 
			
		||||
		flags |= O_EXCL;
 | 
			
		||||
		flags |= O_RDWR;
 | 
			
		||||
	} else if (dev->flags & DEV_BCACHE_WRITE) {
 | 
			
		||||
		flags |= O_RDWR;
 | 
			
		||||
	} else {
 | 
			
		||||
		flags |= O_RDONLY;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
retry_open:
 | 
			
		||||
 | 
			
		||||
@@ -897,6 +909,28 @@ int label_scan_devs(struct cmd_context *cmd, struct dev_filter *f, struct dm_lis
 | 
			
		||||
	return 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int label_scan_devs_rw(struct cmd_context *cmd, struct dev_filter *f, struct dm_list *devs)
 | 
			
		||||
{
 | 
			
		||||
	struct device_list *devl;
 | 
			
		||||
	int failed = 0;
 | 
			
		||||
 | 
			
		||||
	dm_list_iterate_items(devl, devs) {
 | 
			
		||||
		if (_in_bcache(devl->dev)) {
 | 
			
		||||
			bcache_invalidate_fd(scan_bcache, devl->dev->bcache_fd);
 | 
			
		||||
			_scan_dev_close(devl->dev);
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		/* _scan_dev_open will open(RDWR) when this flag is set */
 | 
			
		||||
		devl->dev->flags |= DEV_BCACHE_WRITE;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	_scan_list(cmd, f, devs, &failed);
 | 
			
		||||
 | 
			
		||||
	/* FIXME: this function should probably fail if any devs couldn't be scanned */
 | 
			
		||||
 | 
			
		||||
	return 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int label_scan_devs_excl(struct dm_list *devs)
 | 
			
		||||
{
 | 
			
		||||
	struct device_list *devl;
 | 
			
		||||
@@ -1107,7 +1141,14 @@ int label_scan_open(struct device *dev)
 | 
			
		||||
 | 
			
		||||
int label_scan_open_excl(struct device *dev)
 | 
			
		||||
{
 | 
			
		||||
	if (_in_bcache(dev) && !(dev->flags & DEV_BCACHE_EXCL)) {
 | 
			
		||||
		/* FIXME: avoid tossing out bcache blocks just to replace fd. */
 | 
			
		||||
		log_debug("Close and reopen excl %s", dev_name(dev));
 | 
			
		||||
		bcache_invalidate_fd(scan_bcache, dev->bcache_fd);
 | 
			
		||||
		_scan_dev_close(dev);
 | 
			
		||||
	}
 | 
			
		||||
	dev->flags |= DEV_BCACHE_EXCL;
 | 
			
		||||
	dev->flags |= DEV_BCACHE_WRITE;
 | 
			
		||||
	return label_scan_open(dev);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -1122,14 +1163,15 @@ bool dev_read_bytes(struct device *dev, uint64_t start, size_t len, void *data)
 | 
			
		||||
	if (dev->bcache_fd <= 0) {
 | 
			
		||||
		/* This is not often needed, perhaps only with lvmetad. */
 | 
			
		||||
		if (!label_scan_open(dev)) {
 | 
			
		||||
			log_error("dev_read_bytes %s cannot open dev", dev_name(dev));
 | 
			
		||||
			log_error("Error opening device %s for reading at %llu length %u.",
 | 
			
		||||
				  dev_name(dev), (unsigned long long)start, (uint32_t)len);
 | 
			
		||||
			return false;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!bcache_read_bytes(scan_bcache, dev->bcache_fd, start, len, data)) {
 | 
			
		||||
		log_error("dev_read_bytes %s at %u failed invalidate fd %d",
 | 
			
		||||
			  dev_name(dev), (uint32_t)start, dev->bcache_fd);
 | 
			
		||||
		log_error("Error reading device %s at %llu length %u.",
 | 
			
		||||
			  dev_name(dev), (unsigned long long)start, (uint32_t)len);
 | 
			
		||||
		label_scan_invalidate(dev);
 | 
			
		||||
		return false;
 | 
			
		||||
	}
 | 
			
		||||
@@ -1148,24 +1190,36 @@ bool dev_write_bytes(struct device *dev, uint64_t start, size_t len, void *data)
 | 
			
		||||
		return false;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!(dev->flags & DEV_BCACHE_WRITE)) {
 | 
			
		||||
		/* FIXME: avoid tossing out bcache blocks just to replace fd. */
 | 
			
		||||
		log_debug("Close and reopen to write %s", dev_name(dev));
 | 
			
		||||
		bcache_invalidate_fd(scan_bcache, dev->bcache_fd);
 | 
			
		||||
		_scan_dev_close(dev);
 | 
			
		||||
 | 
			
		||||
		dev->flags |= DEV_BCACHE_WRITE;
 | 
			
		||||
		label_scan_open(dev);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (dev->bcache_fd <= 0) {
 | 
			
		||||
		/* This is not often needed, perhaps only with lvmetad. */
 | 
			
		||||
		dev->flags |= DEV_BCACHE_WRITE;
 | 
			
		||||
		if (!label_scan_open(dev)) {
 | 
			
		||||
			log_error("dev_write_bytes %s cannot open dev", dev_name(dev));
 | 
			
		||||
			log_error("Error opening device %s for writing at %llu length %u.",
 | 
			
		||||
				  dev_name(dev), (unsigned long long)start, (uint32_t)len);
 | 
			
		||||
			return false;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!bcache_write_bytes(scan_bcache, dev->bcache_fd, start, len, data)) {
 | 
			
		||||
		log_error("dev_write_bytes %s at %u bcache write failed invalidate fd %d",
 | 
			
		||||
			  dev_name(dev), (uint32_t)start, dev->bcache_fd);
 | 
			
		||||
		log_error("Error writing device %s at %llu length %u.",
 | 
			
		||||
			  dev_name(dev), (unsigned long long)start, (uint32_t)len);
 | 
			
		||||
		label_scan_invalidate(dev);
 | 
			
		||||
		return false;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!bcache_flush(scan_bcache)) {
 | 
			
		||||
		log_error("dev_write_bytes %s at %u bcache flush failed invalidate fd %d",
 | 
			
		||||
			  dev_name(dev), (uint32_t)start, dev->bcache_fd);
 | 
			
		||||
		log_error("Error writing device %s at %llu length %u.",
 | 
			
		||||
			  dev_name(dev), (unsigned long long)start, (uint32_t)len);
 | 
			
		||||
		label_scan_invalidate(dev);
 | 
			
		||||
		return false;
 | 
			
		||||
	}
 | 
			
		||||
@@ -1182,24 +1236,36 @@ bool dev_write_zeros(struct device *dev, uint64_t start, size_t len)
 | 
			
		||||
		return false;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!(dev->flags & DEV_BCACHE_WRITE)) {
 | 
			
		||||
		/* FIXME: avoid tossing out bcache blocks just to replace fd. */
 | 
			
		||||
		log_debug("Close and reopen to write %s", dev_name(dev));
 | 
			
		||||
		bcache_invalidate_fd(scan_bcache, dev->bcache_fd);
 | 
			
		||||
		_scan_dev_close(dev);
 | 
			
		||||
 | 
			
		||||
		dev->flags |= DEV_BCACHE_WRITE;
 | 
			
		||||
		label_scan_open(dev);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (dev->bcache_fd <= 0) {
 | 
			
		||||
		/* This is not often needed, perhaps only with lvmetad. */
 | 
			
		||||
		dev->flags |= DEV_BCACHE_WRITE;
 | 
			
		||||
		if (!label_scan_open(dev)) {
 | 
			
		||||
			log_error("dev_write_zeros %s cannot open dev", dev_name(dev));
 | 
			
		||||
			log_error("Error opening device %s for writing at %llu length %u.",
 | 
			
		||||
				  dev_name(dev), (unsigned long long)start, (uint32_t)len);
 | 
			
		||||
			return false;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!bcache_zero_bytes(scan_bcache, dev->bcache_fd, start, len)) {
 | 
			
		||||
		log_error("dev_write_zeros %s at %u bcache write failed invalidate fd %d",
 | 
			
		||||
			  dev_name(dev), (uint32_t)start, dev->bcache_fd);
 | 
			
		||||
		log_error("Error writing device %s at %llu length %u.",
 | 
			
		||||
			  dev_name(dev), (unsigned long long)start, (uint32_t)len);
 | 
			
		||||
		label_scan_invalidate(dev);
 | 
			
		||||
		return false;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!bcache_flush(scan_bcache)) {
 | 
			
		||||
		log_error("dev_write_zeros %s at %u bcache flush failed invalidate fd %d",
 | 
			
		||||
			  dev_name(dev), (uint32_t)start, dev->bcache_fd);
 | 
			
		||||
		log_error("Error writing device %s at %llu length %u.",
 | 
			
		||||
			  dev_name(dev), (unsigned long long)start, (uint32_t)len);
 | 
			
		||||
		label_scan_invalidate(dev);
 | 
			
		||||
		return false;
 | 
			
		||||
	}
 | 
			
		||||
@@ -1216,24 +1282,36 @@ bool dev_set_bytes(struct device *dev, uint64_t start, size_t len, uint8_t val)
 | 
			
		||||
		return false;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!(dev->flags & DEV_BCACHE_WRITE)) {
 | 
			
		||||
		/* FIXME: avoid tossing out bcache blocks just to replace fd. */
 | 
			
		||||
		log_debug("Close and reopen to write %s", dev_name(dev));
 | 
			
		||||
		bcache_invalidate_fd(scan_bcache, dev->bcache_fd);
 | 
			
		||||
		_scan_dev_close(dev);
 | 
			
		||||
 | 
			
		||||
		dev->flags |= DEV_BCACHE_WRITE;
 | 
			
		||||
		label_scan_open(dev);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (dev->bcache_fd <= 0) {
 | 
			
		||||
		/* This is not often needed, perhaps only with lvmetad. */
 | 
			
		||||
		dev->flags |= DEV_BCACHE_WRITE;
 | 
			
		||||
		if (!label_scan_open(dev)) {
 | 
			
		||||
			log_error("dev_set_bytes %s cannot open dev", dev_name(dev));
 | 
			
		||||
			log_error("Error opening device %s for writing at %llu length %u.",
 | 
			
		||||
				  dev_name(dev), (unsigned long long)start, (uint32_t)len);
 | 
			
		||||
			return false;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!bcache_set_bytes(scan_bcache, dev->bcache_fd, start, len, val)) {
 | 
			
		||||
		log_error("dev_set_bytes %s at %u bcache write failed invalidate fd %d",
 | 
			
		||||
			  dev_name(dev), (uint32_t)start, dev->bcache_fd);
 | 
			
		||||
		log_error("Error writing device %s at %llu length %u.",
 | 
			
		||||
			  dev_name(dev), (unsigned long long)start, (uint32_t)len);
 | 
			
		||||
		label_scan_invalidate(dev);
 | 
			
		||||
		return false;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!bcache_flush(scan_bcache)) {
 | 
			
		||||
		log_error("dev_set_bytes %s at %u bcache flush failed invalidate fd %d",
 | 
			
		||||
			  dev_name(dev), (uint32_t)start, dev->bcache_fd);
 | 
			
		||||
		log_error("Error writing device %s at %llu length %u.",
 | 
			
		||||
			  dev_name(dev), (unsigned long long)start, (uint32_t)len);
 | 
			
		||||
		label_scan_invalidate(dev);
 | 
			
		||||
		return false;
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -104,6 +104,7 @@ extern struct bcache *scan_bcache;
 | 
			
		||||
 | 
			
		||||
int label_scan(struct cmd_context *cmd);
 | 
			
		||||
int label_scan_devs(struct cmd_context *cmd, struct dev_filter *f, struct dm_list *devs);
 | 
			
		||||
int label_scan_devs_rw(struct cmd_context *cmd, struct dev_filter *f, struct dm_list *devs);
 | 
			
		||||
int label_scan_devs_excl(struct dm_list *devs);
 | 
			
		||||
void label_scan_invalidate(struct device *dev);
 | 
			
		||||
void label_scan_invalidate_lv(struct cmd_context *cmd, struct logical_volume *lv);
 | 
			
		||||
 
 | 
			
		||||
@@ -843,12 +843,13 @@ int cache_set_metadata_format(struct lv_segment *seg, cache_metadata_format_t fo
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * If policy is unselected, but format 2 is selected, policy smq is enforced.
 | 
			
		||||
	 * ATM no other then smq policy is allowed to select format 2.
 | 
			
		||||
	 * ATM no other then smq & cleaner policy is allowed to select format 2.
 | 
			
		||||
	 */
 | 
			
		||||
	if (!seg->policy_name) {
 | 
			
		||||
		if (format == CACHE_METADATA_FORMAT_2)
 | 
			
		||||
			seg->policy_name = "smq";
 | 
			
		||||
	} else if (strcmp(seg->policy_name, "smq")) {
 | 
			
		||||
	} else if (strcmp(seg->policy_name, "smq") &&
 | 
			
		||||
		   strcmp(seg->policy_name, "cleaner")) {
 | 
			
		||||
		seg->cache_metadata_format = CACHE_METADATA_FORMAT_1;
 | 
			
		||||
		return 1;
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -301,7 +301,8 @@ char *lvseg_monitor_dup(struct dm_pool *mem, const struct lv_segment *seg)
 | 
			
		||||
	int pending = 0, monitored = 0;
 | 
			
		||||
	struct lv_segment *segm = (struct lv_segment *) seg;
 | 
			
		||||
 | 
			
		||||
	if (lv_is_cow(seg->lv) && !lv_is_merging_cow(seg->lv))
 | 
			
		||||
	if (lv_is_cow(seg->lv) && (!lv_is_merging_cow(seg->lv) ||
 | 
			
		||||
				   lv_has_target_type(seg->lv->vg->cmd->mem, seg->lv, NULL, TARGET_NAME_SNAPSHOT)))
 | 
			
		||||
		segm = first_seg(seg->lv->snapshot->lv);
 | 
			
		||||
 | 
			
		||||
	// log_debug("Query LV:%s mon:%s segm:%s tgtm:%p  segmon:%d statusm:%d", seg->lv->name, segm->lv->name, segm->segtype->name, segm->segtype->ops->target_monitored, seg_monitored(segm), (int)(segm->status & PVMOVE));
 | 
			
		||||
 
 | 
			
		||||
@@ -2959,12 +2959,16 @@ static int _find_some_parallel_space(struct alloc_handle *ah,
 | 
			
		||||
		       (*(alloc_state->areas + alloc_state->num_positional_areas + ix - 1 -
 | 
			
		||||
			  too_small_for_log_count)).used < ah->log_len)
 | 
			
		||||
			too_small_for_log_count++;
 | 
			
		||||
		ix_log_offset = alloc_state->num_positional_areas + ix - too_small_for_log_count - ah->log_area_count;
 | 
			
		||||
		if (ah->mirror_logs_separate &&
 | 
			
		||||
		    too_small_for_log_count &&
 | 
			
		||||
		    (too_small_for_log_count >= devices_needed))
 | 
			
		||||
			return 1;
 | 
			
		||||
		if ((alloc_state->num_positional_areas + ix) < (too_small_for_log_count + ah->log_area_count))
 | 
			
		||||
			return 1;
 | 
			
		||||
		ix_log_offset = alloc_state->num_positional_areas + ix - (too_small_for_log_count + ah->log_area_count);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (ix + alloc_state->num_positional_areas < devices_needed +
 | 
			
		||||
	    (alloc_state->log_area_count_still_needed ? alloc_state->log_area_count_still_needed +
 | 
			
		||||
				    too_small_for_log_count : 0))
 | 
			
		||||
	if (ix + alloc_state->num_positional_areas < devices_needed)
 | 
			
		||||
		return 1;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
 
 | 
			
		||||
@@ -651,8 +651,12 @@ void pvcreate_params_set_defaults(struct pvcreate_params *pp);
 | 
			
		||||
int vg_write(struct volume_group *vg);
 | 
			
		||||
int vg_commit(struct volume_group *vg);
 | 
			
		||||
void vg_revert(struct volume_group *vg);
 | 
			
		||||
struct volume_group *vg_read_internal(struct cmd_context *cmd, const char *vg_name,
 | 
			
		||||
				      const char *vgid, uint32_t lockd_state, uint32_t warn_flags, int *consistent);
 | 
			
		||||
 | 
			
		||||
struct volume_group *vg_read_internal(struct cmd_context *cmd, const char *vg_name, const char *vgid,
 | 
			
		||||
				      int write_lock_held,
 | 
			
		||||
				      uint32_t lockd_state,
 | 
			
		||||
				      uint32_t warn_flags,
 | 
			
		||||
				      int *consistent);
 | 
			
		||||
 | 
			
		||||
#define get_pvs( cmd ) get_pvs_internal((cmd), NULL, NULL)
 | 
			
		||||
#define get_pvs_perserve_vg( cmd, pv_list, vg_list ) get_pvs_internal((cmd), (pv_list), (vg_list))
 | 
			
		||||
 
 | 
			
		||||
@@ -3731,6 +3731,7 @@ out:
 | 
			
		||||
static struct volume_group *_vg_read(struct cmd_context *cmd,
 | 
			
		||||
				     const char *vgname,
 | 
			
		||||
				     const char *vgid,
 | 
			
		||||
				     int write_lock_held,
 | 
			
		||||
				     uint32_t lockd_state, 
 | 
			
		||||
				     uint32_t warn_flags, 
 | 
			
		||||
				     int *consistent, unsigned precommitted)
 | 
			
		||||
@@ -3863,8 +3864,15 @@ static struct volume_group *_vg_read(struct cmd_context *cmd,
 | 
			
		||||
		if (warn_flags & SKIP_RESCAN)
 | 
			
		||||
			goto find_vg;
 | 
			
		||||
		skipped_rescan = 0;
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
		 * When a write lock is held, it implies we are going to be
 | 
			
		||||
		 * writing to the devs in the VG, so when we rescan the VG
 | 
			
		||||
		 * we should reopen the devices in RDWR (since they were
 | 
			
		||||
		 * open RDONLY from the initial scan.
 | 
			
		||||
		 */
 | 
			
		||||
		log_debug_metadata("Rescanning devices for %s", vgname);
 | 
			
		||||
		lvmcache_label_rescan_vg(cmd, vgname, vgid);
 | 
			
		||||
		lvmcache_label_rescan_vg(cmd, vgname, vgid, write_lock_held);
 | 
			
		||||
	} else {
 | 
			
		||||
		log_debug_metadata("Skipped rescanning devices for %s", vgname);
 | 
			
		||||
		skipped_rescan = 1;
 | 
			
		||||
@@ -4498,13 +4506,15 @@ static int _check_devs_used_correspond_with_vg(struct volume_group *vg)
 | 
			
		||||
 | 
			
		||||
struct volume_group *vg_read_internal(struct cmd_context *cmd,
 | 
			
		||||
				      const char *vgname, const char *vgid,
 | 
			
		||||
				      uint32_t lockd_state, uint32_t warn_flags,
 | 
			
		||||
				      int write_lock_held,
 | 
			
		||||
				      uint32_t lockd_state,
 | 
			
		||||
				      uint32_t warn_flags,
 | 
			
		||||
				      int *consistent)
 | 
			
		||||
{
 | 
			
		||||
	struct volume_group *vg;
 | 
			
		||||
	struct lv_list *lvl;
 | 
			
		||||
 | 
			
		||||
	if (!(vg = _vg_read(cmd, vgname, vgid, lockd_state, warn_flags, consistent, 0)))
 | 
			
		||||
	if (!(vg = _vg_read(cmd, vgname, vgid, write_lock_held, lockd_state, warn_flags, consistent, 0)))
 | 
			
		||||
		goto_out;
 | 
			
		||||
 | 
			
		||||
	if (!check_pv_dev_sizes(vg))
 | 
			
		||||
@@ -4612,7 +4622,7 @@ struct volume_group *vg_read_by_vgid(struct cmd_context *cmd,
 | 
			
		||||
 | 
			
		||||
	label_scan_setup_bcache();
 | 
			
		||||
 | 
			
		||||
	if (!(vg = _vg_read(cmd, vgname, vgid, 0, warn_flags, &consistent, precommitted))) {
 | 
			
		||||
	if (!(vg = _vg_read(cmd, vgname, vgid, 0, 0, warn_flags, &consistent, precommitted))) {
 | 
			
		||||
		log_error("Rescan devices to look for missing VG.");
 | 
			
		||||
		goto scan;
 | 
			
		||||
	}
 | 
			
		||||
@@ -4633,7 +4643,7 @@ struct volume_group *vg_read_by_vgid(struct cmd_context *cmd,
 | 
			
		||||
	lvmcache_label_scan(cmd);
 | 
			
		||||
	warn_flags |= SKIP_RESCAN;
 | 
			
		||||
 | 
			
		||||
	if (!(vg = _vg_read(cmd, vgname, vgid, 0, warn_flags, &consistent, precommitted)))
 | 
			
		||||
	if (!(vg = _vg_read(cmd, vgname, vgid, 0, 0, warn_flags, &consistent, precommitted)))
 | 
			
		||||
		goto fail;
 | 
			
		||||
 | 
			
		||||
	label_scan_destroy(cmd); /* drop bcache to close devs, keep lvmcache */
 | 
			
		||||
@@ -4872,7 +4882,7 @@ static int _get_pvs(struct cmd_context *cmd, uint32_t warn_flags,
 | 
			
		||||
 | 
			
		||||
		warn_flags |= WARN_INCONSISTENT;
 | 
			
		||||
 | 
			
		||||
		if (!(vg = vg_read_internal(cmd, vgname, (!vgslist) ? vgid : NULL, 0, warn_flags, &consistent))) {
 | 
			
		||||
		if (!(vg = vg_read_internal(cmd, vgname, (!vgslist) ? vgid : NULL, 0, 0, warn_flags, &consistent))) {
 | 
			
		||||
			stack;
 | 
			
		||||
			continue;
 | 
			
		||||
		}
 | 
			
		||||
@@ -5126,6 +5136,15 @@ int vg_flag_write_locked(struct volume_group *vg)
 | 
			
		||||
static int _access_vg_clustered(struct cmd_context *cmd, const struct volume_group *vg)
 | 
			
		||||
{
 | 
			
		||||
	if (vg_is_clustered(vg) && !locking_is_clustered()) {
 | 
			
		||||
		/*
 | 
			
		||||
		 * force_access_clustered is only set when forcibly
 | 
			
		||||
		 * converting a clustered vg to lock type none.
 | 
			
		||||
		 */
 | 
			
		||||
		if (cmd->force_access_clustered) {
 | 
			
		||||
			log_debug("Allowing forced access to clustered vg %s", vg->name);
 | 
			
		||||
			return 1;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if (!cmd->ignore_clustered_vgs)
 | 
			
		||||
			log_error("Skipping clustered volume group %s", vg->name);
 | 
			
		||||
		else
 | 
			
		||||
@@ -5185,7 +5204,8 @@ int vg_check_status(const struct volume_group *vg, uint64_t status)
 | 
			
		||||
 * VG is left unlocked on failure
 | 
			
		||||
 */
 | 
			
		||||
static struct volume_group *_recover_vg(struct cmd_context *cmd,
 | 
			
		||||
			 const char *vg_name, const char *vgid, uint32_t lockd_state)
 | 
			
		||||
			 const char *vg_name, const char *vgid,
 | 
			
		||||
			 int is_shared, uint32_t lockd_state)
 | 
			
		||||
{
 | 
			
		||||
	int consistent = 1;
 | 
			
		||||
	struct volume_group *vg;
 | 
			
		||||
@@ -5199,7 +5219,7 @@ static struct volume_group *_recover_vg(struct cmd_context *cmd,
 | 
			
		||||
	/*
 | 
			
		||||
	 * Convert vg lock in lvmlockd from sh to ex.
 | 
			
		||||
	 */
 | 
			
		||||
	if (!(lockd_state & LDST_FAIL) && !(lockd_state & LDST_EX)) {
 | 
			
		||||
	if (is_shared && !(lockd_state & LDST_FAIL) && !(lockd_state & LDST_EX)) {
 | 
			
		||||
		log_debug("Upgrade lvmlockd lock to repair vg %s.", vg_name);
 | 
			
		||||
		if (!lockd_vg(cmd, vg_name, "ex", 0, &state)) {
 | 
			
		||||
			log_warn("Skip repair for shared VG without exclusive lock.");
 | 
			
		||||
@@ -5208,7 +5228,7 @@ static struct volume_group *_recover_vg(struct cmd_context *cmd,
 | 
			
		||||
		lockd_state |= LDST_EX;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!(vg = vg_read_internal(cmd, vg_name, vgid, lockd_state, WARN_PV_READ, &consistent))) {
 | 
			
		||||
	if (!(vg = vg_read_internal(cmd, vg_name, vgid, 1, lockd_state, WARN_PV_READ, &consistent))) {
 | 
			
		||||
		unlock_vg(cmd, NULL, vg_name);
 | 
			
		||||
		return_NULL;
 | 
			
		||||
	}
 | 
			
		||||
@@ -5450,7 +5470,9 @@ static struct volume_group *_vg_lock_and_read(struct cmd_context *cmd, const cha
 | 
			
		||||
	int consistent_in;
 | 
			
		||||
	uint32_t failure = 0;
 | 
			
		||||
	uint32_t warn_flags = 0;
 | 
			
		||||
	int is_shared = 0;
 | 
			
		||||
	int already_locked;
 | 
			
		||||
	int write_lock_held = (lock_flags == LCK_VG_WRITE);
 | 
			
		||||
 | 
			
		||||
	if ((read_flags & READ_ALLOW_INCONSISTENT) || (lock_flags != LCK_VG_WRITE))
 | 
			
		||||
		consistent = 0;
 | 
			
		||||
@@ -5482,7 +5504,7 @@ static struct volume_group *_vg_lock_and_read(struct cmd_context *cmd, const cha
 | 
			
		||||
		warn_flags |= WARN_INCONSISTENT;
 | 
			
		||||
 | 
			
		||||
	/* If consistent == 1, we get NULL here if correction fails. */
 | 
			
		||||
	if (!(vg = vg_read_internal(cmd, vg_name, vgid, lockd_state, warn_flags, &consistent))) {
 | 
			
		||||
	if (!(vg = vg_read_internal(cmd, vg_name, vgid, write_lock_held, lockd_state, warn_flags, &consistent))) {
 | 
			
		||||
		if (consistent_in && !consistent) {
 | 
			
		||||
			failure |= FAILED_INCONSISTENT;
 | 
			
		||||
			goto bad;
 | 
			
		||||
@@ -5498,8 +5520,9 @@ static struct volume_group *_vg_lock_and_read(struct cmd_context *cmd, const cha
 | 
			
		||||
 | 
			
		||||
	/* consistent == 0 when VG is not found, but failed == FAILED_NOTFOUND */
 | 
			
		||||
	if (!consistent && !failure) {
 | 
			
		||||
		is_shared = vg_is_shared(vg);
 | 
			
		||||
		release_vg(vg);
 | 
			
		||||
		if (!(vg = _recover_vg(cmd, vg_name, vgid, lockd_state))) {
 | 
			
		||||
		if (!(vg = _recover_vg(cmd, vg_name, vgid, is_shared, lockd_state))) {
 | 
			
		||||
			if (is_orphan_vg(vg_name))
 | 
			
		||||
				log_error("Recovery of standalone physical volumes failed.");
 | 
			
		||||
			else
 | 
			
		||||
 
 | 
			
		||||
@@ -710,7 +710,7 @@ static int _split_mirror_images(struct logical_volume *lv,
 | 
			
		||||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!strcmp(lv->vg->lock_type, "dlm"))
 | 
			
		||||
	if (lv->vg->lock_type && !strcmp(lv->vg->lock_type, "dlm"))
 | 
			
		||||
		new_lv->lock_args = lv->lock_args;
 | 
			
		||||
 | 
			
		||||
	if (!dm_list_empty(&split_images)) {
 | 
			
		||||
 
 | 
			
		||||
@@ -566,6 +566,7 @@ static int _pv_resize(struct physical_volume *pv, struct volume_group *vg, uint6
 | 
			
		||||
		log_error("Size must exceed physical extent start "
 | 
			
		||||
			  "of %" PRIu64 " sectors on PV %s.",
 | 
			
		||||
			  pv_pe_start(pv), pv_dev_name(pv));
 | 
			
		||||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	old_pe_count = pv->pe_count;
 | 
			
		||||
@@ -645,7 +646,7 @@ int pv_resize_single(struct cmd_context *cmd,
 | 
			
		||||
						  pv_name, display_size(cmd, new_size),
 | 
			
		||||
						  display_size(cmd, size)) == 'n') {
 | 
			
		||||
				log_error("Physical Volume %s not resized.", pv_name);
 | 
			
		||||
				goto_out;
 | 
			
		||||
				goto out;
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
		}  else if (new_size < size)
 | 
			
		||||
@@ -653,7 +654,7 @@ int pv_resize_single(struct cmd_context *cmd,
 | 
			
		||||
						  pv_name, display_size(cmd, new_size),
 | 
			
		||||
						  display_size(cmd, size)) == 'n') {
 | 
			
		||||
				log_error("Physical Volume %s not resized.", pv_name);
 | 
			
		||||
				goto_out;
 | 
			
		||||
				goto out;
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
		if (new_size == size)
 | 
			
		||||
 
 | 
			
		||||
@@ -3395,7 +3395,7 @@ int lv_raid_split(struct logical_volume *lv, int yes, const char *split_name,
 | 
			
		||||
 | 
			
		||||
	lvl->lv->name = split_name;
 | 
			
		||||
 | 
			
		||||
	if (!strcmp(lv->vg->lock_type, "dlm"))
 | 
			
		||||
	if (lv->vg->lock_type && !strcmp(lv->vg->lock_type, "dlm"))
 | 
			
		||||
		lvl->lv->lock_args = lv->lock_args;
 | 
			
		||||
 | 
			
		||||
	if (!vg_write(lv->vg)) {
 | 
			
		||||
@@ -3563,7 +3563,7 @@ int lv_raid_merge(struct logical_volume *image_lv)
 | 
			
		||||
	struct volume_group *vg = image_lv->vg;
 | 
			
		||||
 | 
			
		||||
	if (image_lv->status & LVM_WRITE) {
 | 
			
		||||
		log_error("%s is not read-only - refusing to merge.",
 | 
			
		||||
		log_error("%s cannot be merged because --trackchanges was not used.",
 | 
			
		||||
			  display_lvname(image_lv));
 | 
			
		||||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
@@ -3572,7 +3572,7 @@ int lv_raid_merge(struct logical_volume *image_lv)
 | 
			
		||||
		return_0;
 | 
			
		||||
 | 
			
		||||
	if (!(p = strstr(lv_name, "_rimage_"))) {
 | 
			
		||||
		log_error("Unable to merge non-mirror image %s.",
 | 
			
		||||
		log_error("Unable to merge non-raid image %s.",
 | 
			
		||||
			  display_lvname(image_lv));
 | 
			
		||||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
@@ -4526,17 +4526,18 @@ static struct possible_takeover_reshape_type _possible_takeover_reshape_types[]
 | 
			
		||||
	  .current_areas = 1,
 | 
			
		||||
	  .options = ALLOW_REGION_SIZE },
 | 
			
		||||
 | 
			
		||||
	{ .current_types  = SEG_STRIPED_TARGET, /* linear, i.e. seg->area_count = 1 */
 | 
			
		||||
	  .possible_types = SEG_RAID0|SEG_RAID0_META,
 | 
			
		||||
	  .current_areas = 1,
 | 
			
		||||
	  .options = ALLOW_STRIPE_SIZE },
 | 
			
		||||
 | 
			
		||||
	/* raid0* -> raid1 */
 | 
			
		||||
	{ .current_types  = SEG_RAID0|SEG_RAID0_META, /* seg->area_count = 1 */
 | 
			
		||||
	  .possible_types = SEG_RAID1,
 | 
			
		||||
	  .current_areas = 1,
 | 
			
		||||
	  .options = ALLOW_REGION_SIZE },
 | 
			
		||||
 | 
			
		||||
	/* raid5_n -> linear through interim raid1 */
 | 
			
		||||
	{ .current_types  = SEG_RAID5_N,
 | 
			
		||||
	  .possible_types = SEG_STRIPED_TARGET,
 | 
			
		||||
	  .current_areas = 2,
 | 
			
		||||
	  .options = ALLOW_NONE },
 | 
			
		||||
 | 
			
		||||
	/* striped,raid0* <-> striped,raid0* */
 | 
			
		||||
	{ .current_types  = SEG_STRIPED_TARGET|SEG_RAID0|SEG_RAID0_META,
 | 
			
		||||
	  .possible_types = SEG_STRIPED_TARGET|SEG_RAID0|SEG_RAID0_META,
 | 
			
		||||
@@ -4547,13 +4548,13 @@ static struct possible_takeover_reshape_type _possible_takeover_reshape_types[]
 | 
			
		||||
	{ .current_types  = SEG_STRIPED_TARGET|SEG_RAID0|SEG_RAID0_META,
 | 
			
		||||
	  .possible_types = SEG_RAID4|SEG_RAID5_N|SEG_RAID6_N_6|SEG_RAID10_NEAR,
 | 
			
		||||
	  .current_areas = ~0U,
 | 
			
		||||
	  .options = ALLOW_REGION_SIZE },
 | 
			
		||||
	  .options = ALLOW_REGION_SIZE|ALLOW_STRIPES },
 | 
			
		||||
 | 
			
		||||
	/* raid4,raid5_n,raid6_n_6,raid10_near -> striped/raid0* */
 | 
			
		||||
	{ .current_types  = SEG_RAID4|SEG_RAID5_N|SEG_RAID6_N_6|SEG_RAID10_NEAR,
 | 
			
		||||
	  .possible_types = SEG_STRIPED_TARGET|SEG_RAID0|SEG_RAID0_META,
 | 
			
		||||
	  .current_areas = ~0U,
 | 
			
		||||
	  .options = ALLOW_NONE },
 | 
			
		||||
	  .options = ALLOW_STRIPES },
 | 
			
		||||
 | 
			
		||||
	/* raid4,raid5_n,raid6_n_6 <-> raid4,raid5_n,raid6_n_6 */
 | 
			
		||||
	{ .current_types  = SEG_RAID4|SEG_RAID5_N|SEG_RAID6_N_6,
 | 
			
		||||
@@ -4640,7 +4641,8 @@ static struct possible_takeover_reshape_type *_get_possible_takeover_reshape_typ
 | 
			
		||||
	for ( ; pt->current_types; pt++)
 | 
			
		||||
		if ((seg_from->segtype->flags & pt->current_types) &&
 | 
			
		||||
		    (segtype_to ? (segtype_to->flags & pt->possible_types) : 1))
 | 
			
		||||
			if (seg_from->area_count <= pt->current_areas)
 | 
			
		||||
			if ((seg_from->area_count == pt->current_areas) ||
 | 
			
		||||
			    (seg_from->area_count > 1 && seg_from->area_count <= pt->current_areas))
 | 
			
		||||
				return pt;
 | 
			
		||||
 | 
			
		||||
	return NULL;
 | 
			
		||||
@@ -4816,7 +4818,7 @@ typedef int (*takeover_fn_t)(TAKEOVER_FN_ARGS);
 | 
			
		||||
/*
 | 
			
		||||
 * Unsupported takeover functions.
 | 
			
		||||
 */
 | 
			
		||||
static int _takeover_noop(TAKEOVER_FN_ARGS)
 | 
			
		||||
static int _takeover_same_layout(const struct logical_volume *lv)
 | 
			
		||||
{
 | 
			
		||||
	log_error("Logical volume %s is already of requested type %s.",
 | 
			
		||||
		  display_lvname(lv), lvseg_name(first_seg(lv)));
 | 
			
		||||
@@ -4824,6 +4826,11 @@ static int _takeover_noop(TAKEOVER_FN_ARGS)
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int _takeover_noop(TAKEOVER_FN_ARGS)
 | 
			
		||||
{
 | 
			
		||||
	return _takeover_same_layout(lv);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int _takeover_unsupported(TAKEOVER_FN_ARGS)
 | 
			
		||||
{
 | 
			
		||||
	struct lv_segment *seg = first_seg(lv);
 | 
			
		||||
@@ -5618,7 +5625,9 @@ static int _takeover_from_linear_to_raid0(TAKEOVER_FN_ARGS)
 | 
			
		||||
 | 
			
		||||
static int _takeover_from_linear_to_raid1(TAKEOVER_FN_ARGS)
 | 
			
		||||
{
 | 
			
		||||
	return _takeover_unsupported_yet(lv, new_stripes, new_segtype);
 | 
			
		||||
	first_seg(lv)->region_size = new_region_size;
 | 
			
		||||
 | 
			
		||||
	return _lv_raid_change_image_count(lv, 1, 2, allocate_pvs, NULL, 1, 0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int _takeover_from_linear_to_raid10(TAKEOVER_FN_ARGS)
 | 
			
		||||
@@ -6102,23 +6111,34 @@ static uint64_t _raid_segtype_flag_5_to_6(const struct segment_type *segtype)
 | 
			
		||||
/* FIXME: do this like _conversion_options_allowed()? */
 | 
			
		||||
static int _set_convenient_raid145610_segtype_to(const struct lv_segment *seg_from,
 | 
			
		||||
						 const struct segment_type **segtype,
 | 
			
		||||
						 uint32_t *new_image_count,
 | 
			
		||||
						 uint32_t *stripes,
 | 
			
		||||
						 int yes)
 | 
			
		||||
{
 | 
			
		||||
	uint64_t seg_flag = 0;
 | 
			
		||||
	struct cmd_context *cmd = seg_from->lv->vg->cmd;
 | 
			
		||||
	const struct segment_type *segtype_sav = *segtype;
 | 
			
		||||
 | 
			
		||||
	/* Linear -> striped request */
 | 
			
		||||
	if (seg_is_striped(seg_from) &&
 | 
			
		||||
	    seg_from->area_count == 1 &&
 | 
			
		||||
	    segtype_is_striped(*segtype))
 | 
			
		||||
		;
 | 
			
		||||
	/* Bail out if same RAID level is requested. */
 | 
			
		||||
	if (_is_same_level(seg_from->segtype, *segtype))
 | 
			
		||||
	else if (_is_same_level(seg_from->segtype, *segtype))
 | 
			
		||||
		return 1;
 | 
			
		||||
 | 
			
		||||
	log_debug("Checking LV %s requested %s segment type for convenience",
 | 
			
		||||
		  display_lvname(seg_from->lv), (*segtype)->name);
 | 
			
		||||
 | 
			
		||||
	/* striped/raid0 -> raid5/6 */
 | 
			
		||||
	if (seg_is_striped(seg_from) || seg_is_any_raid0(seg_from)) {
 | 
			
		||||
		/* linear -> raid*, interim/first conversion is to raid1 */
 | 
			
		||||
		if (seg_from->area_count == 1)
 | 
			
		||||
			seg_flag = SEG_RAID1;
 | 
			
		||||
 | 
			
		||||
		/* If this is any raid5 conversion request -> enforce raid5_n, because we convert from striped */
 | 
			
		||||
		if (segtype_is_any_raid5(*segtype) && !segtype_is_raid5_n(*segtype))
 | 
			
		||||
		else if (((segtype_is_striped(*segtype) && !segtype_is_any_raid0(*segtype)) || segtype_is_any_raid5(*segtype)) &&
 | 
			
		||||
			 !segtype_is_raid5_n(*segtype))
 | 
			
		||||
			seg_flag = SEG_RAID5_N;
 | 
			
		||||
 | 
			
		||||
		/* If this is any raid6 conversion request -> enforce raid6_n_6, because we convert from striped */
 | 
			
		||||
@@ -6143,40 +6163,71 @@ static int _set_convenient_raid145610_segtype_to(const struct lv_segment *seg_fr
 | 
			
		||||
 | 
			
		||||
	/* raid4/raid5 -> striped/raid0/raid1/raid6/raid10 */
 | 
			
		||||
	} else if (seg_is_raid4(seg_from) || seg_is_any_raid5(seg_from)) {
 | 
			
		||||
		if (segtype_is_raid1(*segtype) &&
 | 
			
		||||
		    seg_from->area_count != 2) {
 | 
			
		||||
		if ((segtype_is_raid1(*segtype) || segtype_is_linear(*segtype)) && seg_is_raid5_n(seg_from)) {
 | 
			
		||||
			if (seg_from->area_count != 2) {
 | 
			
		||||
				log_error("Converting %s LV %s to 2 stripes first.",
 | 
			
		||||
					  lvseg_name(seg_from), display_lvname(seg_from->lv));
 | 
			
		||||
				*new_image_count = 2;
 | 
			
		||||
				seg_flag = SEG_RAID5_N;
 | 
			
		||||
			} else
 | 
			
		||||
				seg_flag = SEG_RAID1;
 | 
			
		||||
 | 
			
		||||
		} else if (segtype_is_raid1(*segtype) && seg_from->area_count != 2) {
 | 
			
		||||
			log_error("Convert %s LV %s to 2 stripes first (i.e. --stripes 1).",
 | 
			
		||||
				  lvseg_name(seg_from), display_lvname(seg_from->lv));
 | 
			
		||||
			return 0;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if (seg_is_raid4(seg_from) &&
 | 
			
		||||
			   segtype_is_any_raid5(*segtype) &&
 | 
			
		||||
			   !segtype_is_raid5_n(*segtype))
 | 
			
		||||
		} else if (seg_is_raid4(seg_from) &&
 | 
			
		||||
		         (segtype_is_linear(*segtype) || segtype_is_any_raid5(*segtype)) &&
 | 
			
		||||
			 !segtype_is_raid5_n(*segtype))
 | 
			
		||||
			seg_flag = SEG_RAID5_N;
 | 
			
		||||
 | 
			
		||||
		else if (seg_is_any_raid5(seg_from) &&
 | 
			
		||||
			 segtype_is_raid4(*segtype) &&
 | 
			
		||||
		else if (seg_is_raid5_n(seg_from) && seg_from->area_count == 2) {
 | 
			
		||||
			if (*stripes >= 2) {
 | 
			
		||||
				log_error("Converting %s LV %s to %u stripes first.",
 | 
			
		||||
					  lvseg_name(seg_from), display_lvname(seg_from->lv), *stripes);
 | 
			
		||||
				*new_image_count = *stripes + seg_from->segtype->parity_devs;
 | 
			
		||||
				seg_flag = SEG_RAID5_N;
 | 
			
		||||
			} else {
 | 
			
		||||
				log_error("Convert %s LV %s to minimum 3 stripes first (i.e. --stripes 2).",
 | 
			
		||||
					  lvseg_name(seg_from), display_lvname(seg_from->lv));
 | 
			
		||||
				return 0;
 | 
			
		||||
			}
 | 
			
		||||
		} else if (seg_is_any_raid5(seg_from) &&
 | 
			
		||||
		         (segtype_is_linear(*segtype) || segtype_is_raid4(*segtype)) &&
 | 
			
		||||
			 !segtype_is_raid5_n(*segtype))
 | 
			
		||||
			seg_flag = SEG_RAID5_N;
 | 
			
		||||
 | 
			
		||||
		else if (segtype_is_raid10(*segtype)) {
 | 
			
		||||
			if (seg_from->area_count < 3) {
 | 
			
		||||
				log_error("Convert %s LV %s to minimum 3 stripes first (i.e. --stripes 2).",
 | 
			
		||||
					  lvseg_name(seg_from), display_lvname(seg_from->lv));
 | 
			
		||||
				return 0;
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			seg_flag = seg_is_raid5_n(seg_from) ? SEG_RAID0_META : SEG_RAID5_N;
 | 
			
		||||
				if (*stripes >= 2) {
 | 
			
		||||
					log_error("Converting %s LV %s to %u stripes first.",
 | 
			
		||||
						  lvseg_name(seg_from), display_lvname(seg_from->lv), *stripes);
 | 
			
		||||
					*new_image_count = *stripes + seg_from->segtype->parity_devs;
 | 
			
		||||
					seg_flag = SEG_RAID5_N;
 | 
			
		||||
				} else {
 | 
			
		||||
					log_error("Convert %s LV %s to minimum 3 stripes first (i.e. --stripes 2).",
 | 
			
		||||
						  lvseg_name(seg_from), display_lvname(seg_from->lv));
 | 
			
		||||
					return 0;
 | 
			
		||||
				}
 | 
			
		||||
			} else
 | 
			
		||||
				seg_flag = seg_is_raid5_n(seg_from) ? SEG_RAID0_META : SEG_RAID5_N;
 | 
			
		||||
 | 
			
		||||
		} else if (segtype_is_any_raid6(*segtype)) {
 | 
			
		||||
			if (seg_from->area_count < 4) {
 | 
			
		||||
				log_error("Convert %s LV %s to minimum 4 stripes first (i.e. --stripes 3).",
 | 
			
		||||
					  lvseg_name(seg_from), display_lvname(seg_from->lv));
 | 
			
		||||
				return 0;
 | 
			
		||||
			}
 | 
			
		||||
			if (seg_from->area_count < 4 &&
 | 
			
		||||
			    seg_is_any_raid5(seg_from)) {
 | 
			
		||||
				if (*stripes >= 3) {
 | 
			
		||||
					log_error("Converting %s LV %s to %u stripes first.",
 | 
			
		||||
						  lvseg_name(seg_from), display_lvname(seg_from->lv), *stripes);
 | 
			
		||||
					*new_image_count = *stripes + seg_from->segtype->parity_devs;
 | 
			
		||||
					seg_flag = SEG_RAID5_LS;
 | 
			
		||||
				} else {
 | 
			
		||||
					log_error("Convert %s LV %s to minimum 4 stripes first (i.e. --stripes 3).",
 | 
			
		||||
						  lvseg_name(seg_from), display_lvname(seg_from->lv));
 | 
			
		||||
					return 0;
 | 
			
		||||
				}
 | 
			
		||||
 | 
			
		||||
			if (seg_is_raid4(seg_from) && !segtype_is_raid6_n_6(*segtype))
 | 
			
		||||
			} else if (seg_is_raid4(seg_from) && !segtype_is_raid6_n_6(*segtype))
 | 
			
		||||
				seg_flag = SEG_RAID6_N_6;
 | 
			
		||||
			else
 | 
			
		||||
				seg_flag = _raid_seg_flag_5_to_6(seg_from);
 | 
			
		||||
@@ -6193,9 +6244,9 @@ static int _set_convenient_raid145610_segtype_to(const struct lv_segment *seg_fr
 | 
			
		||||
		} else if (segtype_is_any_raid10(*segtype)) {
 | 
			
		||||
			seg_flag = seg_is_raid6_n_6(seg_from) ? SEG_RAID0_META : SEG_RAID6_N_6;
 | 
			
		||||
 | 
			
		||||
		} else if ((segtype_is_striped(*segtype) || segtype_is_any_raid0(*segtype)) &&
 | 
			
		||||
			   !seg_is_raid6_n_6(seg_from)) {
 | 
			
		||||
			seg_flag = SEG_RAID6_N_6;
 | 
			
		||||
		} else if (segtype_is_striped(*segtype) || segtype_is_any_raid0(*segtype)) {
 | 
			
		||||
			if (!seg_is_raid6_n_6(seg_from))
 | 
			
		||||
				seg_flag = SEG_RAID6_N_6;
 | 
			
		||||
 | 
			
		||||
		} else if (segtype_is_raid4(*segtype) && !seg_is_raid6_n_6(seg_from)) {
 | 
			
		||||
			seg_flag = SEG_RAID6_N_6;
 | 
			
		||||
@@ -6331,41 +6382,48 @@ static int _conversion_options_allowed(const struct lv_segment *seg_from,
 | 
			
		||||
				       int yes,
 | 
			
		||||
				       uint32_t new_image_count,
 | 
			
		||||
				       int new_data_copies, int new_region_size,
 | 
			
		||||
				       int stripes, unsigned new_stripe_size_supplied)
 | 
			
		||||
				       uint32_t *stripes, unsigned new_stripe_size_supplied)
 | 
			
		||||
{
 | 
			
		||||
	int r = 1;
 | 
			
		||||
	uint32_t opts;
 | 
			
		||||
	uint32_t count = new_image_count, opts;
 | 
			
		||||
 | 
			
		||||
	if (!new_image_count && !_set_convenient_raid145610_segtype_to(seg_from, segtype_to, yes))
 | 
			
		||||
	/* Linear -> linear rejection */
 | 
			
		||||
	if ((seg_is_linear(seg_from) || seg_is_striped(seg_from)) &&
 | 
			
		||||
	    seg_from->area_count == 1 &&
 | 
			
		||||
	    segtype_is_striped(*segtype_to) &&
 | 
			
		||||
	    *stripes < 2)
 | 
			
		||||
		return _takeover_same_layout(seg_from->lv);
 | 
			
		||||
 | 
			
		||||
	if (!new_image_count && !_set_convenient_raid145610_segtype_to(seg_from, segtype_to, &count, stripes, yes))
 | 
			
		||||
		return_0;
 | 
			
		||||
 | 
			
		||||
	if (new_image_count != count)
 | 
			
		||||
		*stripes = count - seg_from->segtype->parity_devs;
 | 
			
		||||
 | 
			
		||||
	if (!_get_allowed_conversion_options(seg_from, *segtype_to, new_image_count, &opts)) {
 | 
			
		||||
		log_error("Unable to convert LV %s from %s to %s.",
 | 
			
		||||
			  display_lvname(seg_from->lv), lvseg_name(seg_from), (*segtype_to)->name);
 | 
			
		||||
		if (strcmp(lvseg_name(seg_from), (*segtype_to)->name))
 | 
			
		||||
			log_error("Unable to convert LV %s from %s to %s.",
 | 
			
		||||
				  display_lvname(seg_from->lv), lvseg_name(seg_from), (*segtype_to)->name);
 | 
			
		||||
		else
 | 
			
		||||
			_takeover_same_layout(seg_from->lv);
 | 
			
		||||
 | 
			
		||||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (stripes > 1 && !(opts & ALLOW_STRIPES)) {
 | 
			
		||||
		if (!_log_prohibited_option(seg_from, *segtype_to, "--stripes"))
 | 
			
		||||
			stack;
 | 
			
		||||
		r = 0;
 | 
			
		||||
	if (*stripes > 1 && !(opts & ALLOW_STRIPES)) {
 | 
			
		||||
		_log_prohibited_option(seg_from, *segtype_to, "--stripes");
 | 
			
		||||
		*stripes = seg_from->area_count;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (new_stripe_size_supplied && !(opts & ALLOW_STRIPE_SIZE)) {
 | 
			
		||||
		if (!_log_prohibited_option(seg_from, *segtype_to, "-I/--stripesize"))
 | 
			
		||||
			stack;
 | 
			
		||||
		r = 0;
 | 
			
		||||
	}
 | 
			
		||||
	if (new_stripe_size_supplied && !(opts & ALLOW_STRIPE_SIZE))
 | 
			
		||||
		_log_prohibited_option(seg_from, *segtype_to, "-I/--stripesize");
 | 
			
		||||
 | 
			
		||||
	if (new_region_size && !(opts & ALLOW_REGION_SIZE)) {
 | 
			
		||||
		if (!_log_prohibited_option(seg_from, *segtype_to, "-R/--regionsize"))
 | 
			
		||||
			stack;
 | 
			
		||||
		r = 0;
 | 
			
		||||
	}
 | 
			
		||||
	if (new_region_size && new_region_size != seg_from->region_size && !(opts & ALLOW_REGION_SIZE))
 | 
			
		||||
		_log_prohibited_option(seg_from, *segtype_to, "-R/--regionsize");
 | 
			
		||||
 | 
			
		||||
	/* Can't reshape stripes or stripe size when performing a takeover! */
 | 
			
		||||
	if (!_is_same_level(seg_from->segtype, *segtype_to)) {
 | 
			
		||||
		if (stripes && stripes != _data_rimages_count(seg_from, seg_from->area_count))
 | 
			
		||||
		if (*stripes && *stripes != _data_rimages_count(seg_from, seg_from->area_count))
 | 
			
		||||
			log_warn("WARNING: ignoring --stripes option on takeover of %s (reshape afterwards).",
 | 
			
		||||
				 display_lvname(seg_from->lv));
 | 
			
		||||
 | 
			
		||||
@@ -6501,7 +6559,7 @@ int lv_raid_convert(struct logical_volume *lv,
 | 
			
		||||
	 */
 | 
			
		||||
	if (!_conversion_options_allowed(seg, &new_segtype, yes,
 | 
			
		||||
					 0 /* Takeover */, 0 /*new_data_copies*/, new_region_size,
 | 
			
		||||
					 new_stripes, new_stripe_size_supplied))
 | 
			
		||||
					 &stripes, new_stripe_size_supplied))
 | 
			
		||||
		return _log_possible_conversion_types(lv, new_segtype);
 | 
			
		||||
 | 
			
		||||
	/* https://bugzilla.redhat.com/1439399 */
 | 
			
		||||
 
 | 
			
		||||
@@ -22,10 +22,6 @@ struct segment_type *get_segtype_from_string(struct cmd_context *cmd,
 | 
			
		||||
{
 | 
			
		||||
	struct segment_type *segtype;
 | 
			
		||||
 | 
			
		||||
	/* FIXME Register this properly within striped.c */
 | 
			
		||||
	if (!strcmp(str, SEG_TYPE_NAME_LINEAR))
 | 
			
		||||
		str = SEG_TYPE_NAME_STRIPED;
 | 
			
		||||
 | 
			
		||||
	dm_list_iterate_items(segtype, &cmd->segtypes)
 | 
			
		||||
		if (!strcmp(segtype->name, str))
 | 
			
		||||
			return segtype;
 | 
			
		||||
 
 | 
			
		||||
@@ -68,6 +68,7 @@ struct dev_manager;
 | 
			
		||||
#define SEG_RAID6		SEG_RAID6_ZR
 | 
			
		||||
 | 
			
		||||
#define SEG_STRIPED_TARGET	(1ULL << 39)
 | 
			
		||||
#define SEG_LINEAR_TARGET	(1ULL << 40)
 | 
			
		||||
 | 
			
		||||
#define SEG_UNKNOWN		(1ULL << 63)
 | 
			
		||||
 | 
			
		||||
@@ -105,7 +106,7 @@ struct dev_manager;
 | 
			
		||||
#define SEG_TYPE_NAME_RAID6_RS_6	"raid6_rs_6"
 | 
			
		||||
#define SEG_TYPE_NAME_RAID6_N_6		"raid6_n_6"
 | 
			
		||||
 | 
			
		||||
#define segtype_is_linear(segtype)	(!strcmp(segtype->name, SEG_TYPE_NAME_LINEAR))
 | 
			
		||||
#define segtype_is_linear(segtype)	(!strcmp((segtype)->name, SEG_TYPE_NAME_LINEAR))
 | 
			
		||||
#define segtype_is_striped_target(segtype)	((segtype)->flags & SEG_STRIPED_TARGET ? 1 : 0)
 | 
			
		||||
#define segtype_is_cache(segtype)	((segtype)->flags & SEG_CACHE ? 1 : 0)
 | 
			
		||||
#define segtype_is_cache_pool(segtype)	((segtype)->flags & SEG_CACHE_POOL ? 1 : 0)
 | 
			
		||||
@@ -274,6 +275,7 @@ struct segtype_library;
 | 
			
		||||
int lvm_register_segtype(struct segtype_library *seglib,
 | 
			
		||||
			 struct segment_type *segtype);
 | 
			
		||||
 | 
			
		||||
struct segment_type *init_linear_segtype(struct cmd_context *cmd);
 | 
			
		||||
struct segment_type *init_striped_segtype(struct cmd_context *cmd);
 | 
			
		||||
struct segment_type *init_zero_segtype(struct cmd_context *cmd);
 | 
			
		||||
struct segment_type *init_error_segtype(struct cmd_context *cmd);
 | 
			
		||||
 
 | 
			
		||||
@@ -105,23 +105,30 @@ static const char * const _blacklist_maps[] = {
 | 
			
		||||
	"/LC_MESSAGES/",
 | 
			
		||||
	"gconv/gconv-modules.cache",
 | 
			
		||||
	"/ld-2.",		/* not using dlopen,dlsym during mlock */
 | 
			
		||||
	"/libaio.so.",		/* not using aio during mlock */
 | 
			
		||||
	"/libattr.so.",		/* not using during mlock (udev) */
 | 
			
		||||
	"/libblkid.so.",	/* not using lzma during mlock (selinux) */
 | 
			
		||||
	"/libblkid.so.",	/* not using blkid during mlock (udev) */
 | 
			
		||||
	"/libbz2.so.",		/* not using during mlock (udev) */
 | 
			
		||||
	"/libcap.so.",		/* not using during mlock (udev) */
 | 
			
		||||
	"/libcap.so.",		/* not using during mlock (systemd) */
 | 
			
		||||
	"/libdl-",		/* not using dlopen,dlsym during mlock */
 | 
			
		||||
	"/libdw-",		/* not using during mlock (udev) */
 | 
			
		||||
	"/libelf-",		/* not using during mlock (udev) */
 | 
			
		||||
	"/liblzma.so.",	/* not using lzma during mlock (selinux) */
 | 
			
		||||
	"/libgcrypt.so.",	/* not using during mlock (systemd) */
 | 
			
		||||
	"/libgpg-error.so.",	/* not using gpg-error during mlock (systemd) */
 | 
			
		||||
	"/liblz4.so.",		/* not using lz4 during mlock (systemd) */
 | 
			
		||||
	"/liblzma.so.",		/* not using lzma during mlock (systemd) */
 | 
			
		||||
	"/libmount.so.",	/* not using mount during mlock (udev) */
 | 
			
		||||
	"/libncurses.so.",	/* not using ncurses during mlock */
 | 
			
		||||
	"/libpcre.so.",	/* not using pcre during mlock (selinux) */
 | 
			
		||||
	"/libpcre.so.",		/* not using pcre during mlock (selinux) */
 | 
			
		||||
	"/libpcre2-",		/* not using pcre during mlock (selinux) */
 | 
			
		||||
	"/libreadline.so.",	/* not using readline during mlock */
 | 
			
		||||
	"/libresolv-",	/* not using during mlock (udev) */
 | 
			
		||||
	"/libresolv-",		/* not using during mlock (udev) */
 | 
			
		||||
	"/libselinux.so.",	/* not using selinux during mlock */
 | 
			
		||||
	"/libsepol.so.",	/* not using sepol during mlock */
 | 
			
		||||
	"/libsystemd.so.",	/* not using systemd during mlock */
 | 
			
		||||
	"/libtinfo.so.",	/* not using tinfo during mlock */
 | 
			
		||||
	"/libudev.so.",		/* not using udev during mlock */
 | 
			
		||||
	"/libuuid.so.",		/* not using uuid during mlock (blkid) */
 | 
			
		||||
	"/libdl-",		/* not using dlopen,dlsym during mlock */
 | 
			
		||||
	"/libz.so.",		/* not using during mlock (udev) */
 | 
			
		||||
	"/etc/selinux",		/* not using selinux during mlock */
 | 
			
		||||
	/* "/libdevmapper-event.so" */
 | 
			
		||||
 
 | 
			
		||||
@@ -230,7 +230,7 @@ static struct segtype_handler _striped_ops = {
 | 
			
		||||
	.destroy = _striped_destroy,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct segment_type *init_striped_segtype(struct cmd_context *cmd)
 | 
			
		||||
static struct segment_type *_init_segtype(struct cmd_context *cmd, const char *name, uint64_t target)
 | 
			
		||||
{
 | 
			
		||||
	struct segment_type *segtype = dm_zalloc(sizeof(*segtype));
 | 
			
		||||
 | 
			
		||||
@@ -238,11 +238,20 @@ struct segment_type *init_striped_segtype(struct cmd_context *cmd)
 | 
			
		||||
		return_NULL;
 | 
			
		||||
 | 
			
		||||
	segtype->ops = &_striped_ops;
 | 
			
		||||
	segtype->name = SEG_TYPE_NAME_STRIPED;
 | 
			
		||||
	segtype->flags = SEG_STRIPED_TARGET |
 | 
			
		||||
	    SEG_CAN_SPLIT | SEG_AREAS_STRIPED;
 | 
			
		||||
	segtype->name = name;
 | 
			
		||||
	segtype->flags = target | SEG_CAN_SPLIT | SEG_AREAS_STRIPED;
 | 
			
		||||
 | 
			
		||||
	log_very_verbose("Initialised segtype: %s", segtype->name);
 | 
			
		||||
 | 
			
		||||
	return segtype;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct segment_type *init_striped_segtype(struct cmd_context *cmd)
 | 
			
		||||
{
 | 
			
		||||
	return _init_segtype(cmd, SEG_TYPE_NAME_STRIPED, SEG_STRIPED_TARGET);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
struct segment_type *init_linear_segtype(struct cmd_context *cmd)
 | 
			
		||||
{
 | 
			
		||||
	return _init_segtype(cmd, SEG_TYPE_NAME_LINEAR, SEG_LINEAR_TARGET);
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -1763,7 +1763,7 @@ static int _mountinfo_parse_line(const char *line, unsigned *maj, unsigned *min,
 | 
			
		||||
			return 0;
 | 
			
		||||
		}
 | 
			
		||||
		devmapper += 12; /* skip fixed prefix */
 | 
			
		||||
		for (i = 0; devmapper[i] && devmapper[i] != ' ' && i < sizeof(root); ++i)
 | 
			
		||||
		for (i = 0; devmapper[i] && devmapper[i] != ' ' && i < sizeof(root)-1; ++i)
 | 
			
		||||
			root[i] = devmapper[i];
 | 
			
		||||
		root[i] = 0;
 | 
			
		||||
		_unmangle_mountinfo_string(root, buf);
 | 
			
		||||
 
 | 
			
		||||
@@ -40,6 +40,11 @@ filesystem.
 | 
			
		||||
Unmount ext2/ext3/ext4 filesystem before doing resize.
 | 
			
		||||
.
 | 
			
		||||
.HP
 | 
			
		||||
.BR -l | --lvresize
 | 
			
		||||
.br
 | 
			
		||||
Resize given device if it is LVM device.
 | 
			
		||||
.
 | 
			
		||||
.HP
 | 
			
		||||
.BR -f | --force
 | 
			
		||||
.br
 | 
			
		||||
Bypass some sanity checks.
 | 
			
		||||
 
 | 
			
		||||
@@ -475,7 +475,7 @@ Split images from a raid1 or mirror LV and use them to create a new LV.
 | 
			
		||||
.RE
 | 
			
		||||
-
 | 
			
		||||
 | 
			
		||||
Split images from a raid1 LV and track changes to origin.
 | 
			
		||||
Split images from a raid1 LV and track changes to origin for later merge.
 | 
			
		||||
.br
 | 
			
		||||
.P
 | 
			
		||||
\fBlvconvert\fP \fB--splitmirrors\fP \fINumber\fP \fB--trackchanges\fP \fILV\fP\fI_cache_raid1\fP
 | 
			
		||||
@@ -1281,6 +1281,8 @@ Before the separation, the cache is flushed. Also see --uncache.
 | 
			
		||||
Splits the specified number of images from a raid1 or mirror LV
 | 
			
		||||
and uses them to create a new LV. If --trackchanges is also specified,
 | 
			
		||||
changes to the raid1 LV are tracked while the split LV remains detached.
 | 
			
		||||
If --name is specified, then the images are permanently split from the
 | 
			
		||||
original LV and changes are not tracked.
 | 
			
		||||
.ad b
 | 
			
		||||
.HP
 | 
			
		||||
.ad l
 | 
			
		||||
@@ -1354,10 +1356,12 @@ The name of a thin pool LV.
 | 
			
		||||
.br
 | 
			
		||||
Can be used with --splitmirrors on a raid1 LV. This causes
 | 
			
		||||
changes to the original raid1 LV to be tracked while the split images
 | 
			
		||||
remain detached. This allows the read-only detached image(s) to be
 | 
			
		||||
merged efficiently back into the raid1 LV later. Only the regions with
 | 
			
		||||
changed data are resynchronized during merge. (This option only applies
 | 
			
		||||
when using the raid1 LV type.)
 | 
			
		||||
remain detached. This is a temporary state that allows the read-only
 | 
			
		||||
detached image to be merged efficiently back into the raid1 LV later.
 | 
			
		||||
Only the regions with changed data are resynchronized during merge.
 | 
			
		||||
While a raid1 LV is tracking changes, operations on it are limited to
 | 
			
		||||
merging the split image (see --mergemirrors) or permanently splitting
 | 
			
		||||
the image (see --splitmirrors with --name.
 | 
			
		||||
.ad b
 | 
			
		||||
.HP
 | 
			
		||||
.ad l
 | 
			
		||||
 
 | 
			
		||||
@@ -84,8 +84,8 @@ For default settings, see lvmlockd -h.
 | 
			
		||||
 | 
			
		||||
.SS Initial set up
 | 
			
		||||
 | 
			
		||||
Using LVM with lvmlockd for the first time includes some one-time set up
 | 
			
		||||
steps:
 | 
			
		||||
Setting up LVM to use lvmlockd and a shared VG for the first time includes
 | 
			
		||||
some one time set up steps:
 | 
			
		||||
 | 
			
		||||
.SS 1. choose a lock manager
 | 
			
		||||
 | 
			
		||||
@@ -94,7 +94,7 @@ steps:
 | 
			
		||||
If dlm (or corosync) are already being used by other cluster
 | 
			
		||||
software, then select dlm.  dlm uses corosync which requires additional
 | 
			
		||||
configuration beyond the scope of this document.  See corosync and dlm
 | 
			
		||||
documentation for instructions on configuration, setup and usage.
 | 
			
		||||
documentation for instructions on configuration, set up and usage.
 | 
			
		||||
 | 
			
		||||
.I sanlock
 | 
			
		||||
.br
 | 
			
		||||
@@ -117,7 +117,9 @@ Assign each host a unique host_id in the range 1-2000 by setting
 | 
			
		||||
 | 
			
		||||
.SS 3. start lvmlockd
 | 
			
		||||
 | 
			
		||||
Use a unit/init file, or run the lvmlockd daemon directly:
 | 
			
		||||
Start the lvmlockd daemon.
 | 
			
		||||
.br
 | 
			
		||||
Use systemctl, a cluster resource agent, or run directly, e.g.
 | 
			
		||||
.br
 | 
			
		||||
systemctl start lvm2-lvmlockd
 | 
			
		||||
 | 
			
		||||
@@ -125,14 +127,17 @@ systemctl start lvm2-lvmlockd
 | 
			
		||||
 | 
			
		||||
.I sanlock
 | 
			
		||||
.br
 | 
			
		||||
Use unit/init files, or start wdmd and sanlock daemons directly:
 | 
			
		||||
Start the sanlock and wdmd daemons.
 | 
			
		||||
.br
 | 
			
		||||
Use systemctl or run directly, e.g.
 | 
			
		||||
.br
 | 
			
		||||
systemctl start wdmd sanlock
 | 
			
		||||
 | 
			
		||||
.I dlm
 | 
			
		||||
.br
 | 
			
		||||
Follow external clustering documentation when applicable, or use
 | 
			
		||||
unit/init files:
 | 
			
		||||
Start the dlm and corosync daemons.
 | 
			
		||||
.br
 | 
			
		||||
Use systemctl, a cluster resource agent, or run directly, e.g.
 | 
			
		||||
.br
 | 
			
		||||
systemctl start corosync dlm
 | 
			
		||||
 | 
			
		||||
@@ -141,18 +146,17 @@ systemctl start corosync dlm
 | 
			
		||||
vgcreate --shared <vgname> <devices>
 | 
			
		||||
 | 
			
		||||
The shared option sets the VG lock type to sanlock or dlm depending on
 | 
			
		||||
which lock manager is running.  LVM commands will perform locking for the
 | 
			
		||||
VG using lvmlockd.  lvmlockd will use the chosen lock manager.
 | 
			
		||||
which lock manager is running.  LVM commands acquire locks from lvmlockd,
 | 
			
		||||
and lvmlockd uses the chosen lock manager.
 | 
			
		||||
 | 
			
		||||
.SS 6. start VG on all hosts
 | 
			
		||||
 | 
			
		||||
vgchange --lock-start
 | 
			
		||||
 | 
			
		||||
lvmlockd requires shared VGs to be started before they are used.  This is
 | 
			
		||||
a lock manager operation to start (join) the VG lockspace, and it may take
 | 
			
		||||
some time.  Until the start completes, locks for the VG are not available.
 | 
			
		||||
LVM commands are allowed to read the VG while start is in progress.  (A
 | 
			
		||||
unit/init file can also be used to start VGs.)
 | 
			
		||||
Shared VGs must be started before they are used.  Starting the VG performs
 | 
			
		||||
lock manager initialization that is necessary to begin using locks (i.e.
 | 
			
		||||
creating and joining a lockspace).  Starting the VG may take some time,
 | 
			
		||||
and until the start completes the VG may not be modified or activated.
 | 
			
		||||
 | 
			
		||||
.SS 7. create and activate LVs
 | 
			
		||||
 | 
			
		||||
@@ -168,13 +172,10 @@ multiple hosts.)
 | 
			
		||||
 | 
			
		||||
.SS Normal start up and shut down
 | 
			
		||||
 | 
			
		||||
After initial set up, start up and shut down include the following general
 | 
			
		||||
steps.  They can be performed manually or using the system service
 | 
			
		||||
manager.
 | 
			
		||||
After initial set up, start up and shut down include the following steps.
 | 
			
		||||
They can be performed directly or may be automated using systemd or a
 | 
			
		||||
cluster resource manager/agents.
 | 
			
		||||
 | 
			
		||||
\[bu]
 | 
			
		||||
start lvmetad
 | 
			
		||||
.br
 | 
			
		||||
\[bu]
 | 
			
		||||
start lvmlockd
 | 
			
		||||
.br
 | 
			
		||||
@@ -202,114 +203,69 @@ stop lock manager
 | 
			
		||||
\[bu]
 | 
			
		||||
stop lvmlockd
 | 
			
		||||
.br
 | 
			
		||||
\[bu]
 | 
			
		||||
stop lvmetad
 | 
			
		||||
.br
 | 
			
		||||
 | 
			
		||||
.P
 | 
			
		||||
 | 
			
		||||
.SH TOPICS
 | 
			
		||||
 | 
			
		||||
.SS VG access control
 | 
			
		||||
.SS Protecting VGs on shared devices
 | 
			
		||||
 | 
			
		||||
The following terms are used to describe different forms of VG access
 | 
			
		||||
control.
 | 
			
		||||
The following terms are used to describe the different ways of accessing
 | 
			
		||||
VGs on shared devices.
 | 
			
		||||
 | 
			
		||||
.I "lockd VG"
 | 
			
		||||
.I "shared VG"
 | 
			
		||||
 | 
			
		||||
A "lockd VG" is a shared VG that has a "lock type" of dlm or sanlock.
 | 
			
		||||
Using it requires lvmlockd.  These VGs exist on shared storage that is
 | 
			
		||||
visible to multiple hosts.  LVM commands use lvmlockd to perform locking
 | 
			
		||||
for these VGs when they are used.
 | 
			
		||||
A shared VG exists on shared storage that is visible to multiple hosts.
 | 
			
		||||
LVM acquires locks through lvmlockd to coordinate access to shared VGs.
 | 
			
		||||
A shared VG has lock_type "dlm" or "sanlock", which specifies the lock
 | 
			
		||||
manager lvmlockd will use.
 | 
			
		||||
 | 
			
		||||
If the lock manager for the lock type is not available (e.g. not started
 | 
			
		||||
or failed), lvmlockd is unable to acquire locks for LVM commands.  LVM
 | 
			
		||||
commands that only read the VG will generally be allowed to continue
 | 
			
		||||
without locks in this case (with a warning).  Commands to modify or
 | 
			
		||||
activate the VG will fail without the necessary locks.
 | 
			
		||||
When the lock manager for the lock type is not available (e.g. not started
 | 
			
		||||
or failed), lvmlockd is unable to acquire locks for LVM commands.  In this
 | 
			
		||||
situation, LVM commands are only allowed to read and display the VG;
 | 
			
		||||
changes and activation will fail.
 | 
			
		||||
 | 
			
		||||
.I "local VG"
 | 
			
		||||
 | 
			
		||||
A "local VG" is meant to be used by a single host.  It has no lock type or
 | 
			
		||||
lock type "none".  LVM commands and lvmlockd do not perform locking for
 | 
			
		||||
these VGs.  A local VG typically exists on local (non-shared) devices and
 | 
			
		||||
cannot be used concurrently from different hosts.
 | 
			
		||||
A local VG is meant to be used by a single host.  It has no lock type or
 | 
			
		||||
lock type "none".  A local VG typically exists on local (non-shared)
 | 
			
		||||
devices and cannot be used concurrently from different hosts.
 | 
			
		||||
 | 
			
		||||
If a local VG does exist on shared devices, it should be owned by a single
 | 
			
		||||
host by having its system ID set, see
 | 
			
		||||
host by having the system ID set, see
 | 
			
		||||
.BR lvmsystemid (7).
 | 
			
		||||
Only the host with a matching system ID can use the local VG.  A VG
 | 
			
		||||
with no lock type and no system ID should be excluded from all but one
 | 
			
		||||
host using lvm.conf filters.  Without any of these protections, a local VG
 | 
			
		||||
on shared devices can be easily damaged or destroyed.
 | 
			
		||||
The host with a matching system ID can use the local VG and other hosts
 | 
			
		||||
will ignore it.  A VG with no lock type and no system ID should be
 | 
			
		||||
excluded from all but one host using lvm.conf filters.  Without any of
 | 
			
		||||
these protections, a local VG on shared devices can be easily damaged or
 | 
			
		||||
destroyed.
 | 
			
		||||
 | 
			
		||||
.I "clvm VG"
 | 
			
		||||
 | 
			
		||||
A "clvm VG" is a VG on shared storage (like a lockd VG) that requires
 | 
			
		||||
clvmd for clustering.  See below for converting a clvm VG to a lockd VG.
 | 
			
		||||
A clvm VG (or clustered VG) is a VG on shared storage (like a shared VG)
 | 
			
		||||
that requires clvmd for clustering and locking.  See below for converting
 | 
			
		||||
a clvm/clustered VG to a shared VG.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
.SS lockd VGs from hosts not using lvmlockd
 | 
			
		||||
.SS shared VGs from hosts not using lvmlockd
 | 
			
		||||
 | 
			
		||||
Only hosts that use lockd VGs should be configured to run lvmlockd.
 | 
			
		||||
However, shared devices in lockd VGs may be visible from hosts not
 | 
			
		||||
using lvmlockd.  From a host not using lvmlockd, lockd VGs are ignored
 | 
			
		||||
in the same way as foreign VGs (see
 | 
			
		||||
Hosts that do not use shared VGs will not be running lvmlockd.  In this
 | 
			
		||||
case, shared VGs that are still visible to the host will be ignored
 | 
			
		||||
(like foreign VGs, see 
 | 
			
		||||
.BR lvmsystemid (7).)
 | 
			
		||||
 | 
			
		||||
The --shared option for reporting and display commands causes lockd VGs
 | 
			
		||||
The --shared option for reporting and display commands causes shared VGs
 | 
			
		||||
to be displayed on a host not using lvmlockd, like the --foreign option
 | 
			
		||||
does for foreign VGs.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
.SS vgcreate comparison
 | 
			
		||||
 | 
			
		||||
The type of VG access control is specified in the vgcreate command.
 | 
			
		||||
See
 | 
			
		||||
.BR vgcreate (8)
 | 
			
		||||
for all vgcreate options.
 | 
			
		||||
 | 
			
		||||
.B vgcreate <vgname> <devices>
 | 
			
		||||
 | 
			
		||||
.IP \[bu] 2
 | 
			
		||||
Creates a local VG with the local host's system ID when neither lvmlockd nor clvm are configured.
 | 
			
		||||
.IP \[bu] 2
 | 
			
		||||
Creates a local VG with the local host's system ID when lvmlockd is configured.
 | 
			
		||||
.IP \[bu] 2
 | 
			
		||||
Creates a clvm VG when clvm is configured.
 | 
			
		||||
 | 
			
		||||
.P
 | 
			
		||||
 | 
			
		||||
.B vgcreate --shared <vgname> <devices>
 | 
			
		||||
.IP \[bu] 2
 | 
			
		||||
Requires lvmlockd to be configured and running.
 | 
			
		||||
.IP \[bu] 2
 | 
			
		||||
Creates a lockd VG with lock type sanlock|dlm depending on which lock
 | 
			
		||||
manager is running.
 | 
			
		||||
.IP \[bu] 2
 | 
			
		||||
LVM commands request locks from lvmlockd to use the VG.
 | 
			
		||||
.IP \[bu] 2
 | 
			
		||||
lvmlockd obtains locks from the selected lock manager.
 | 
			
		||||
 | 
			
		||||
.P
 | 
			
		||||
 | 
			
		||||
.B vgcreate -c|--clustered y <vgname> <devices>
 | 
			
		||||
.IP \[bu] 2
 | 
			
		||||
Requires clvm to be configured and running.
 | 
			
		||||
.IP \[bu] 2
 | 
			
		||||
Creates a clvm VG with the "clustered" flag.
 | 
			
		||||
.IP \[bu] 2
 | 
			
		||||
LVM commands request locks from clvmd to use the VG.
 | 
			
		||||
 | 
			
		||||
.P
 | 
			
		||||
 | 
			
		||||
.SS creating the first sanlock VG
 | 
			
		||||
 | 
			
		||||
Creating the first sanlock VG is not protected by locking, so it requires
 | 
			
		||||
special attention.  This is because sanlock locks exist on storage within
 | 
			
		||||
the VG, so they are not available until the VG exists.  The first sanlock
 | 
			
		||||
VG created will automatically contain the "global lock".  Be aware of the
 | 
			
		||||
following special considerations:
 | 
			
		||||
the VG, so they are not available until after the VG is created.  The
 | 
			
		||||
first sanlock VG that is created will automatically contain the "global
 | 
			
		||||
lock".  Be aware of the following special considerations:
 | 
			
		||||
 | 
			
		||||
.IP \[bu] 2
 | 
			
		||||
The first vgcreate command needs to be given the path to a device that has
 | 
			
		||||
@@ -324,54 +280,48 @@ to be accessible to all hosts that will use sanlock shared VGs.  All hosts
 | 
			
		||||
will need to use the global lock from the first sanlock VG.
 | 
			
		||||
 | 
			
		||||
.IP \[bu] 2
 | 
			
		||||
While running vgcreate for the first sanlock VG, ensure that the device
 | 
			
		||||
being used is not used by another LVM command.  Allocation of shared
 | 
			
		||||
devices is usually protected by the global lock, but this cannot be done
 | 
			
		||||
for the first sanlock VG which will hold the global lock.
 | 
			
		||||
 | 
			
		||||
.IP \[bu] 2
 | 
			
		||||
While running vgcreate for the first sanlock VG, ensure that the VG name
 | 
			
		||||
being used is not used by another LVM command.  Uniqueness of VG names is
 | 
			
		||||
usually ensured by the global lock.
 | 
			
		||||
The device and VG name used by the initial vgcreate will not be protected
 | 
			
		||||
from concurrent use by another vgcreate on another host.
 | 
			
		||||
 | 
			
		||||
See below for more information about managing the sanlock global lock.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
.SS using lockd VGs
 | 
			
		||||
.SS using shared VGs
 | 
			
		||||
 | 
			
		||||
There are some special considerations when using lockd VGs.
 | 
			
		||||
There are some special considerations when using shared VGs.
 | 
			
		||||
 | 
			
		||||
When use_lvmlockd is first enabled in lvm.conf, and before the first lockd
 | 
			
		||||
VG is created, no global lock will exist.  In this initial state, LVM
 | 
			
		||||
commands try and fail to acquire the global lock, producing a warning, and
 | 
			
		||||
some commands are disallowed.  Once the first lockd VG is created, the
 | 
			
		||||
global lock will be available, and LVM will be fully operational.
 | 
			
		||||
When use_lvmlockd is first enabled in lvm.conf, and before the first
 | 
			
		||||
shared VG is created, no global lock will exist.  In this initial state,
 | 
			
		||||
LVM commands try and fail to acquire the global lock, producing a warning,
 | 
			
		||||
and some commands are disallowed.  Once the first shared VG is created,
 | 
			
		||||
the global lock will be available, and LVM will be fully operational.
 | 
			
		||||
 | 
			
		||||
When a new lockd VG is created, its lockspace is automatically started on
 | 
			
		||||
the host that creates it.  Other hosts need to run 'vgchange
 | 
			
		||||
--lock-start' to start the new VG before they can use it.
 | 
			
		||||
When a new shared VG is created, its lockspace is automatically started on
 | 
			
		||||
the host that creates it.  Other hosts need to run 'vgchange --lock-start'
 | 
			
		||||
to start the new VG before they can use it.
 | 
			
		||||
 | 
			
		||||
From the 'vgs' command, lockd VGs are indicated by "s" (for shared) in the
 | 
			
		||||
sixth attr field.  The specific lock type and lock args for a lockd VG can
 | 
			
		||||
be displayed with 'vgs -o+locktype,lockargs'.
 | 
			
		||||
From the 'vgs' command, shared VGs are indicated by "s" (for shared) in
 | 
			
		||||
the sixth attr field, and by "shared" in the "--options shared" report
 | 
			
		||||
field.  The specific lock type and lock args for a shared VG can be
 | 
			
		||||
displayed with 'vgs -o+locktype,lockargs'.
 | 
			
		||||
 | 
			
		||||
lockd VGs need to be "started" and "stopped", unlike other types of VGs.
 | 
			
		||||
Shared VGs need to be "started" and "stopped", unlike other types of VGs.
 | 
			
		||||
See the following section for a full description of starting and stopping.
 | 
			
		||||
 | 
			
		||||
vgremove of a lockd VG will fail if other hosts have the VG started.
 | 
			
		||||
Run vgchange --lock-stop <vgname> on all other hosts before vgremove.
 | 
			
		||||
(It may take several seconds before vgremove recognizes that all hosts
 | 
			
		||||
have stopped a sanlock VG.)
 | 
			
		||||
Removing a shared VG will fail if other hosts have the VG started.  Run
 | 
			
		||||
vgchange --lock-stop <vgname> on all other hosts before vgremove.  (It may
 | 
			
		||||
take several seconds before vgremove recognizes that all hosts have
 | 
			
		||||
stopped a sanlock VG.)
 | 
			
		||||
 | 
			
		||||
.SS starting and stopping VGs
 | 
			
		||||
 | 
			
		||||
Starting a lockd VG (vgchange --lock-start) causes the lock manager to
 | 
			
		||||
Starting a shared VG (vgchange --lock-start) causes the lock manager to
 | 
			
		||||
start (join) the lockspace for the VG on the host where it is run.  This
 | 
			
		||||
makes locks for the VG available to LVM commands on the host.  Before a VG
 | 
			
		||||
is started, only LVM commands that read/display the VG are allowed to
 | 
			
		||||
continue without locks (and with a warning).
 | 
			
		||||
 | 
			
		||||
Stopping a lockd VG (vgchange --lock-stop) causes the lock manager to
 | 
			
		||||
Stopping a shared VG (vgchange --lock-stop) causes the lock manager to
 | 
			
		||||
stop (leave) the lockspace for the VG on the host where it is run.  This
 | 
			
		||||
makes locks for the VG inaccessible to the host.  A VG cannot be stopped
 | 
			
		||||
while it has active LVs.
 | 
			
		||||
@@ -380,7 +330,7 @@ When using the lock type sanlock, starting a VG can take a long time
 | 
			
		||||
(potentially minutes if the host was previously shut down without cleanly
 | 
			
		||||
stopping the VG.)
 | 
			
		||||
 | 
			
		||||
A lockd VG can be started after all the following are true:
 | 
			
		||||
A shared VG can be started after all the following are true:
 | 
			
		||||
.br
 | 
			
		||||
\[bu]
 | 
			
		||||
lvmlockd is running
 | 
			
		||||
@@ -392,9 +342,9 @@ the lock manager is running
 | 
			
		||||
the VG's devices are visible on the system
 | 
			
		||||
.br
 | 
			
		||||
 | 
			
		||||
A lockd VG can be stopped if all LVs are deactivated.
 | 
			
		||||
A shared VG can be stopped if all LVs are deactivated.
 | 
			
		||||
 | 
			
		||||
All lockd VGs can be started/stopped using:
 | 
			
		||||
All shared VGs can be started/stopped using:
 | 
			
		||||
.br
 | 
			
		||||
vgchange --lock-start
 | 
			
		||||
.br
 | 
			
		||||
@@ -413,12 +363,12 @@ vgchange --lock-start --lock-opt nowait ...
 | 
			
		||||
 | 
			
		||||
lvmlockd can be asked directly to stop all lockspaces:
 | 
			
		||||
.br
 | 
			
		||||
lvmlockctl --stop-lockspaces
 | 
			
		||||
lvmlockctl -S|--stop-lockspaces
 | 
			
		||||
 | 
			
		||||
To start only selected lockd VGs, use the lvm.conf
 | 
			
		||||
To start only selected shared VGs, use the lvm.conf
 | 
			
		||||
activation/lock_start_list.  When defined, only VG names in this list are
 | 
			
		||||
started by vgchange.  If the list is not defined (the default), all
 | 
			
		||||
visible lockd VGs are started.  To start only "vg1", use the following
 | 
			
		||||
visible shared VGs are started.  To start only "vg1", use the following
 | 
			
		||||
lvm.conf configuration:
 | 
			
		||||
 | 
			
		||||
.nf
 | 
			
		||||
@@ -441,7 +391,7 @@ The "auto" option causes the command to follow the lvm.conf
 | 
			
		||||
activation/auto_lock_start_list.  If auto_lock_start_list is undefined,
 | 
			
		||||
all VGs are started, just as if the auto option was not used.
 | 
			
		||||
 | 
			
		||||
When auto_lock_start_list is defined, it lists the lockd VGs that should
 | 
			
		||||
When auto_lock_start_list is defined, it lists the shared VGs that should
 | 
			
		||||
be started by the auto command.  VG names that do not match an item in the
 | 
			
		||||
list will be ignored by the auto start command.
 | 
			
		||||
 | 
			
		||||
@@ -449,23 +399,20 @@ list will be ignored by the auto start command.
 | 
			
		||||
commands, i.e. with or without the auto option.  When the lock_start_list
 | 
			
		||||
is defined, only VGs matching a list item can be started with vgchange.)
 | 
			
		||||
 | 
			
		||||
The auto_lock_start_list allows a user to select certain lockd VGs that
 | 
			
		||||
The auto_lock_start_list allows a user to select certain shared VGs that
 | 
			
		||||
should be automatically started by the system (or indirectly, those that
 | 
			
		||||
should not).
 | 
			
		||||
 | 
			
		||||
To use auto activation of lockd LVs (see auto_activation_volume_list),
 | 
			
		||||
auto starting of the corresponding lockd VGs is necessary.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
.SS internal command locking
 | 
			
		||||
 | 
			
		||||
To optimize the use of LVM with lvmlockd, be aware of the three kinds of
 | 
			
		||||
locks and when they are used:
 | 
			
		||||
 | 
			
		||||
.I GL lock
 | 
			
		||||
.I Global lock
 | 
			
		||||
 | 
			
		||||
The global lock (GL lock) is associated with global information, which is
 | 
			
		||||
information not isolated to a single VG.  This includes:
 | 
			
		||||
The global lock s associated with global information, which is information
 | 
			
		||||
not isolated to a single VG.  This includes:
 | 
			
		||||
 | 
			
		||||
\[bu]
 | 
			
		||||
The global VG namespace.
 | 
			
		||||
@@ -490,61 +437,58 @@ acquired.
 | 
			
		||||
 | 
			
		||||
.I VG lock
 | 
			
		||||
 | 
			
		||||
A VG lock is associated with each lockd VG.  The VG lock is acquired in
 | 
			
		||||
shared mode to read the VG and in exclusive mode to change the VG (modify
 | 
			
		||||
the VG metadata or activating LVs).  This lock serializes access to a VG
 | 
			
		||||
with all other LVM commands accessing the VG from all hosts.
 | 
			
		||||
A VG lock is associated with each shared VG.  The VG lock is acquired in
 | 
			
		||||
shared mode to read the VG and in exclusive mode to change the VG or
 | 
			
		||||
activate LVs.  This lock serializes access to a VG with all other LVM
 | 
			
		||||
commands accessing the VG from all hosts.
 | 
			
		||||
 | 
			
		||||
The command 'vgs' will not only acquire the GL lock to read the list of
 | 
			
		||||
all VG names, but will acquire the VG lock for each VG prior to reading
 | 
			
		||||
it.
 | 
			
		||||
 | 
			
		||||
The command 'vgs <vgname>' does not acquire the GL lock (it does not need
 | 
			
		||||
the list of all VG names), but will acquire the VG lock on each VG name
 | 
			
		||||
argument.
 | 
			
		||||
The command 'vgs <vgname>' does not acquire the global lock (it does not
 | 
			
		||||
need the list of all VG names), but will acquire the VG lock on each VG
 | 
			
		||||
name argument.
 | 
			
		||||
 | 
			
		||||
.I LV lock
 | 
			
		||||
 | 
			
		||||
An LV lock is acquired before the LV is activated, and is released after
 | 
			
		||||
the LV is deactivated.  If the LV lock cannot be acquired, the LV is not
 | 
			
		||||
activated.  LV locks are persistent and remain in place when the
 | 
			
		||||
activation command is done.  GL and VG locks are transient, and are held
 | 
			
		||||
only while an LVM command is running.
 | 
			
		||||
activated.  (LV locks are persistent and remain in place when the
 | 
			
		||||
activation command is done.  Global and VG locks are transient, and are
 | 
			
		||||
held only while an LVM command is running.)
 | 
			
		||||
 | 
			
		||||
.I lock retries
 | 
			
		||||
 | 
			
		||||
If a request for a GL or VG lock fails due to a lock conflict with another
 | 
			
		||||
host, lvmlockd automatically retries for a short time before returning a
 | 
			
		||||
failure to the LVM command.  If those retries are insufficient, the LVM
 | 
			
		||||
command will retry the entire lock request a number of times specified by
 | 
			
		||||
global/lvmlockd_lock_retries before failing.  If a request for an LV lock
 | 
			
		||||
fails due to a lock conflict, the command fails immediately.
 | 
			
		||||
If a request for a Global or VG lock fails due to a lock conflict with
 | 
			
		||||
another host, lvmlockd automatically retries for a short time before
 | 
			
		||||
returning a failure to the LVM command.  If those retries are
 | 
			
		||||
insufficient, the LVM command will retry the entire lock request a number
 | 
			
		||||
of times specified by global/lvmlockd_lock_retries before failing.  If a
 | 
			
		||||
request for an LV lock fails due to a lock conflict, the command fails
 | 
			
		||||
immediately.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
.SS managing the global lock in sanlock VGs
 | 
			
		||||
 | 
			
		||||
The global lock exists in one of the sanlock VGs.  The first sanlock VG
 | 
			
		||||
created will contain the global lock.  Subsequent sanlock VGs will each
 | 
			
		||||
contain disabled global locks that can be enabled later if necessary.
 | 
			
		||||
contain a disabled global lock that can be enabled later if necessary.
 | 
			
		||||
 | 
			
		||||
The VG containing the global lock must be visible to all hosts using
 | 
			
		||||
sanlock VGs.  This can be a reason to create a small sanlock VG, visible
 | 
			
		||||
to all hosts, and dedicated to just holding the global lock.  While not
 | 
			
		||||
required, this strategy can help to avoid difficulty in the future if VGs
 | 
			
		||||
are moved or removed.
 | 
			
		||||
sanlock VGs.  For this reason, it can be useful to create a small sanlock
 | 
			
		||||
VG, visible to all hosts, and dedicated to just holding the global lock.
 | 
			
		||||
While not required, this strategy can help to avoid difficulty in the
 | 
			
		||||
future if VGs are moved or removed.
 | 
			
		||||
 | 
			
		||||
The vgcreate command typically acquires the global lock, but in the case
 | 
			
		||||
of the first sanlock VG, there will be no global lock to acquire until the
 | 
			
		||||
first vgcreate is complete.  So, creating the first sanlock VG is a
 | 
			
		||||
special case that skips the global lock.
 | 
			
		||||
 | 
			
		||||
vgcreate for a sanlock VG determines it is the first one to exist if no
 | 
			
		||||
other sanlock VGs are visible.  It is possible that other sanlock VGs do
 | 
			
		||||
exist but are not visible on the host running vgcreate.  In this case,
 | 
			
		||||
vgcreate would create a new sanlock VG with the global lock enabled.  When
 | 
			
		||||
the other VG containing a global lock appears, lvmlockd will see more than
 | 
			
		||||
one VG with a global lock enabled, and LVM commands will report that there
 | 
			
		||||
are duplicate global locks.
 | 
			
		||||
vgcreate determines that it's creating the first sanlock VG when no other
 | 
			
		||||
sanlock VGs are visible on the system.  It is possible that other sanlock
 | 
			
		||||
VGs do exist, but are not visible when vgcreate checks for them.  In this
 | 
			
		||||
case, vgcreate will create a new sanlock VG with the global lock enabled.
 | 
			
		||||
When the another VG containing a global lock appears, lvmlockd will then
 | 
			
		||||
see more than one VG with a global lock enabled.  LVM commands will report
 | 
			
		||||
that there are duplicate global locks.
 | 
			
		||||
 | 
			
		||||
If the situation arises where more than one sanlock VG contains a global
 | 
			
		||||
lock, the global lock should be manually disabled in all but one of them
 | 
			
		||||
@@ -562,8 +506,8 @@ VGs with the command:
 | 
			
		||||
 | 
			
		||||
lvmlockctl --gl-enable <vgname>
 | 
			
		||||
 | 
			
		||||
A small sanlock VG dedicated to holding the global lock can avoid the case
 | 
			
		||||
where the GL lock must be manually enabled after a vgremove.
 | 
			
		||||
(Using a small sanlock VG dedicated to holding the global lock can avoid
 | 
			
		||||
the case where the global lock must be manually enabled after a vgremove.)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
.SS internal lvmlock LV
 | 
			
		||||
@@ -580,8 +524,8 @@ device, then use vgextend to add other devices.
 | 
			
		||||
 | 
			
		||||
.SS LV activation
 | 
			
		||||
 | 
			
		||||
In a shared VG, activation changes involve locking through lvmlockd, and
 | 
			
		||||
the following values are possible with lvchange/vgchange -a:
 | 
			
		||||
In a shared VG, LV activation involves locking through lvmlockd, and the
 | 
			
		||||
following values are possible with lvchange/vgchange -a:
 | 
			
		||||
 | 
			
		||||
.IP \fBy\fP|\fBey\fP
 | 
			
		||||
The command activates the LV in exclusive mode, allowing a single host
 | 
			
		||||
@@ -602,10 +546,6 @@ The shared mode is intended for a multi-host/cluster application or
 | 
			
		||||
file system.
 | 
			
		||||
LV types that cannot be used concurrently
 | 
			
		||||
from multiple hosts include thin, cache, raid, and snapshot.
 | 
			
		||||
lvextend on LV with shared locks is not yet allowed.  The LV must be
 | 
			
		||||
deactivated, or activated exclusively to run lvextend.  (LVs with
 | 
			
		||||
the mirror type can be activated in shared mode from multiple hosts
 | 
			
		||||
when using the dlm lock type and cmirrord.)
 | 
			
		||||
 | 
			
		||||
.IP \fBn\fP
 | 
			
		||||
The command deactivates the LV.  After deactivating the LV, the command
 | 
			
		||||
@@ -660,7 +600,7 @@ with the expiring lease before other hosts can acquire its locks.
 | 
			
		||||
 | 
			
		||||
When the sanlock daemon detects that the lease storage is lost, it runs
 | 
			
		||||
the command lvmlockctl --kill <vgname>.  This command emits a syslog
 | 
			
		||||
message stating that lease storage is lost for the VG and LVs must be
 | 
			
		||||
message stating that lease storage is lost for the VG, and LVs must be
 | 
			
		||||
immediately deactivated.
 | 
			
		||||
 | 
			
		||||
If no LVs are active in the VG, then the lockspace with an expiring lease
 | 
			
		||||
@@ -672,10 +612,10 @@ If the VG has active LVs when the lock storage is lost, the LVs must be
 | 
			
		||||
quickly deactivated before the lockspace lease expires.  After all LVs are
 | 
			
		||||
deactivated, run lvmlockctl --drop <vgname> to clear the expiring
 | 
			
		||||
lockspace from lvmlockd.  If all LVs in the VG are not deactivated within
 | 
			
		||||
about 40 seconds, sanlock will reset the host using the local watchdog.
 | 
			
		||||
The machine reset is effectively a severe form of "deactivating" LVs
 | 
			
		||||
before they can be activated on other hosts.  The reset is considered a
 | 
			
		||||
better alternative than having LVs used by multiple hosts at once, which
 | 
			
		||||
about 40 seconds, sanlock uses wdmd and the local watchdog to reset the
 | 
			
		||||
host.  The machine reset is effectively a severe form of "deactivating"
 | 
			
		||||
LVs before they can be activated on other hosts.  The reset is considered
 | 
			
		||||
a better alternative than having LVs used by multiple hosts at once, which
 | 
			
		||||
could easily damage or destroy their content.
 | 
			
		||||
 | 
			
		||||
In the future, the lvmlockctl kill command may automatically attempt to
 | 
			
		||||
@@ -687,8 +627,7 @@ sanlock resets the machine.
 | 
			
		||||
 | 
			
		||||
If the sanlock daemon fails or exits while a lockspace is started, the
 | 
			
		||||
local watchdog will reset the host.  This is necessary to protect any
 | 
			
		||||
application resources that depend on sanlock leases which will be lost
 | 
			
		||||
without sanlock running.
 | 
			
		||||
application resources that depend on sanlock leases.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
.SS changing dlm cluster name
 | 
			
		||||
@@ -768,14 +707,14 @@ Start the VG on hosts to use it:
 | 
			
		||||
vgchange --lock-start <vgname>
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
.SS changing a local VG to a lockd VG
 | 
			
		||||
.SS changing a local VG to a shared VG
 | 
			
		||||
 | 
			
		||||
All LVs must be inactive to change the lock type.
 | 
			
		||||
 | 
			
		||||
lvmlockd must be configured and running as described in USAGE.
 | 
			
		||||
 | 
			
		||||
.IP \[bu] 2
 | 
			
		||||
Change a local VG to a lockd VG with the command:
 | 
			
		||||
Change a local VG to a shared VG with the command:
 | 
			
		||||
.br
 | 
			
		||||
vgchange --lock-type sanlock|dlm <vgname>
 | 
			
		||||
 | 
			
		||||
@@ -786,7 +725,7 @@ vgchange --lock-start <vgname>
 | 
			
		||||
 | 
			
		||||
.P
 | 
			
		||||
 | 
			
		||||
.SS changing a lockd VG to a local VG
 | 
			
		||||
.SS changing a shared VG to a local VG
 | 
			
		||||
 | 
			
		||||
All LVs must be inactive to change the lock type.
 | 
			
		||||
 | 
			
		||||
@@ -812,16 +751,16 @@ type can be forcibly changed to none with:
 | 
			
		||||
 | 
			
		||||
vgchange --lock-type none --lock-opt force <vgname>
 | 
			
		||||
 | 
			
		||||
To change a VG from one lockd type to another (i.e. between sanlock and
 | 
			
		||||
To change a VG from one lock type to another (i.e. between sanlock and
 | 
			
		||||
dlm), first change it to a local VG, then to the new type.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
.SS changing a clvm VG to a lockd VG
 | 
			
		||||
.SS changing a clvm/clustered VG to a shared VG
 | 
			
		||||
 | 
			
		||||
All LVs must be inactive to change the lock type.
 | 
			
		||||
 | 
			
		||||
First change the clvm VG to a local VG.  Within a running clvm cluster,
 | 
			
		||||
change a clvm VG to a local VG with the command:
 | 
			
		||||
First change the clvm/clustered VG to a local VG.  Within a running clvm
 | 
			
		||||
cluster, change a clustered VG to a local VG with the command:
 | 
			
		||||
 | 
			
		||||
vgchange -cn <vgname>
 | 
			
		||||
 | 
			
		||||
@@ -829,18 +768,15 @@ If the clvm cluster is no longer running on any nodes, then extra options
 | 
			
		||||
can be used to forcibly make the VG local.  Caution: this is only safe if
 | 
			
		||||
all nodes have stopped using the VG:
 | 
			
		||||
 | 
			
		||||
vgchange --config 'global/locking_type=0 global/use_lvmlockd=0'
 | 
			
		||||
.RS
 | 
			
		||||
-cn <vgname>
 | 
			
		||||
.RE
 | 
			
		||||
vgchange --lock-type none --lock-opt force <vgname>
 | 
			
		||||
 | 
			
		||||
After the VG is local, follow the steps described in "changing a local VG
 | 
			
		||||
to a lockd VG".
 | 
			
		||||
to a shared VG".
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
.SS limitations of lockd VGs
 | 
			
		||||
.SS limitations of shared VGs
 | 
			
		||||
 | 
			
		||||
Things that do not yet work in lockd VGs:
 | 
			
		||||
Things that do not yet work in shared VGs:
 | 
			
		||||
.br
 | 
			
		||||
\[bu]
 | 
			
		||||
using external origins for thin LVs
 | 
			
		||||
@@ -860,22 +796,22 @@ vgsplit and vgmerge (convert to a local VG to do this)
 | 
			
		||||
 | 
			
		||||
.SS lvmlockd changes from clvmd
 | 
			
		||||
 | 
			
		||||
(See above for converting an existing clvm VG to a lockd VG.)
 | 
			
		||||
(See above for converting an existing clvm VG to a shared VG.)
 | 
			
		||||
 | 
			
		||||
While lvmlockd and clvmd are entirely different systems, LVM command usage
 | 
			
		||||
remains similar.  Differences are more notable when using lvmlockd's
 | 
			
		||||
sanlock option.
 | 
			
		||||
 | 
			
		||||
Visible usage differences between lockd VGs (using lvmlockd) and clvm VGs
 | 
			
		||||
(using clvmd):
 | 
			
		||||
Visible usage differences between shared VGs (using lvmlockd) and
 | 
			
		||||
clvm/clustered VGs (using clvmd):
 | 
			
		||||
 | 
			
		||||
.IP \[bu] 2
 | 
			
		||||
lvm.conf must be configured to use either lvmlockd (use_lvmlockd=1) or
 | 
			
		||||
clvmd (locking_type=3), but not both.
 | 
			
		||||
 | 
			
		||||
.IP \[bu] 2
 | 
			
		||||
vgcreate --shared creates a lockd VG, and vgcreate --clustered y
 | 
			
		||||
creates a clvm VG.
 | 
			
		||||
vgcreate --shared creates a shared VG, and vgcreate --clustered y
 | 
			
		||||
creates a clvm/clustered VG.
 | 
			
		||||
 | 
			
		||||
.IP \[bu] 2
 | 
			
		||||
lvmlockd adds the option of using sanlock for locking, avoiding the
 | 
			
		||||
@@ -896,11 +832,11 @@ lvmlockd works with thin and cache pools and LVs.
 | 
			
		||||
lvmlockd works with lvmetad.
 | 
			
		||||
 | 
			
		||||
.IP \[bu] 2
 | 
			
		||||
lvmlockd saves the cluster name for a lockd VG using dlm.  Only hosts in
 | 
			
		||||
lvmlockd saves the cluster name for a shared VG using dlm.  Only hosts in
 | 
			
		||||
the matching cluster can use the VG.
 | 
			
		||||
 | 
			
		||||
.IP \[bu] 2
 | 
			
		||||
lvmlockd requires starting/stopping lockd VGs with vgchange --lock-start
 | 
			
		||||
lvmlockd requires starting/stopping shared VGs with vgchange --lock-start
 | 
			
		||||
and --lock-stop.
 | 
			
		||||
 | 
			
		||||
.IP \[bu] 2
 | 
			
		||||
@@ -923,7 +859,7 @@ reporting option lock_args to view the corresponding metadata fields.
 | 
			
		||||
 | 
			
		||||
.IP \[bu] 2
 | 
			
		||||
In the 'vgs' command's sixth VG attr field, "s" for "shared" is displayed
 | 
			
		||||
for lockd VGs.
 | 
			
		||||
for shared VGs.
 | 
			
		||||
 | 
			
		||||
.IP \[bu] 2
 | 
			
		||||
If lvmlockd fails or is killed while in use, locks it held remain but are
 | 
			
		||||
 
 | 
			
		||||
@@ -346,9 +346,9 @@ of the foreign VG to its own.  See Overriding system ID above.
 | 
			
		||||
 | 
			
		||||
.SS shared VGs
 | 
			
		||||
 | 
			
		||||
A shared/lockd VG has no system ID set, allowing multiple hosts to use it
 | 
			
		||||
via lvmlockd.  Changing a VG to a lockd type will clear the existing
 | 
			
		||||
system ID.  Applicable only if LVM is compiled with lockd support.
 | 
			
		||||
A shared VG has no system ID set, allowing multiple hosts to use it
 | 
			
		||||
via lvmlockd.  Changing a VG to shared will clear the existing
 | 
			
		||||
system ID.  Applicable only if LVM is compiled with lvmlockd support.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
.SS clustered VGs
 | 
			
		||||
 
 | 
			
		||||
@@ -2,6 +2,7 @@
 | 
			
		||||
Description=LVM2 metadata daemon socket
 | 
			
		||||
Documentation=man:lvmetad(8)
 | 
			
		||||
DefaultDependencies=no
 | 
			
		||||
Conflicts=shutdown.target
 | 
			
		||||
 | 
			
		||||
[Socket]
 | 
			
		||||
ListenStream=@DEFAULT_RUN_DIR@/lvmetad.socket
 | 
			
		||||
 
 | 
			
		||||
@@ -2,6 +2,7 @@
 | 
			
		||||
Description=LVM2 poll daemon socket
 | 
			
		||||
Documentation=man:lvmpolld(8)
 | 
			
		||||
DefaultDependencies=no
 | 
			
		||||
Conflicts=shutdown.target
 | 
			
		||||
 | 
			
		||||
[Socket]
 | 
			
		||||
ListenStream=@DEFAULT_RUN_DIR@/lvmpolld.socket
 | 
			
		||||
 
 | 
			
		||||
@@ -25,6 +25,8 @@ TESTNAME=${0##*/}
 | 
			
		||||
PS4='#${BASH_SOURCE[0]##*/}:${LINENO}+ '
 | 
			
		||||
export TESTNAME PS4
 | 
			
		||||
 | 
			
		||||
LVM_TEST_FLAVOUR=${LVM_TEST_FLAVOUR-}
 | 
			
		||||
 | 
			
		||||
LVM_TEST_BACKING_DEVICE=${LVM_TEST_BACKING_DEVICE-}
 | 
			
		||||
LVM_TEST_DEVDIR=${LVM_TEST_DEVDIR-}
 | 
			
		||||
LVM_TEST_NODEBUG=${LVM_TEST_NODEBUG-}
 | 
			
		||||
@@ -49,9 +51,9 @@ SKIP_WITH_LVMPOLLD=${SKIP_WITH_LVMPOLLD-}
 | 
			
		||||
SKIP_WITH_LVMLOCKD=${SKIP_WITH_LVMLOCKD-}
 | 
			
		||||
SKIP_ROOT_DM_CHECK=${SKIP_ROOT_DM_CHECK-}
 | 
			
		||||
 | 
			
		||||
if test -n "$LVM_TEST_FLAVOUR"; then
 | 
			
		||||
	. "lib/flavour-$LVM_TEST_FLAVOUR"
 | 
			
		||||
fi
 | 
			
		||||
test -n "$LVM_TEST_FLAVOUR" || { echo "NOTE: Empty flavour">&2; initskip; }
 | 
			
		||||
test -f "lib/flavour-$LVM_TEST_FLAVOUR" || { echo "NOTE: Flavour '$LVM_TEST_FLAVOUR' does not exist">&2; initskip; }
 | 
			
		||||
. "lib/flavour-$LVM_TEST_FLAVOUR"
 | 
			
		||||
 | 
			
		||||
test -n "$SKIP_WITHOUT_CLVMD" && test "$LVM_TEST_LOCKING" -ne 3 && initskip
 | 
			
		||||
test -n "$SKIP_WITH_CLVMD" && test "$LVM_TEST_LOCKING" = 3 && initskip
 | 
			
		||||
 
 | 
			
		||||
@@ -17,7 +17,7 @@ SKIP_WITH_LVMPOLLD=1
 | 
			
		||||
 | 
			
		||||
aux have_raid 1 3 2 || skip
 | 
			
		||||
v1_9_0=0
 | 
			
		||||
aux have_raid 1 9 && v1_9_0=1
 | 
			
		||||
aux have_raid 1 9 0 && v1_9_0=1
 | 
			
		||||
 | 
			
		||||
aux prepare_vg 8
 | 
			
		||||
get_devs
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										102
									
								
								test/shell/lvconvert-raid-reshape-linear_to_raid6-single-type.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										102
									
								
								test/shell/lvconvert-raid-reshape-linear_to_raid6-single-type.sh
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,102 @@
 | 
			
		||||
#!/usr/bin/env bash
 | 
			
		||||
 | 
			
		||||
# Copyright (C) 2018 Red Hat, Inc. All rights reserved.
 | 
			
		||||
#
 | 
			
		||||
# This copyrighted material is made available to anyone wishing to use,
 | 
			
		||||
# modify, copy, or redistribute it subject to the terms and conditions
 | 
			
		||||
# of the GNU General Public License v.2.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU General Public License
 | 
			
		||||
# along with this program; if not, write to the Free Software Foundation,
 | 
			
		||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA2110-1301 USA
 | 
			
		||||
 | 
			
		||||
SKIP_WITH_LVMLOCKD=1
 | 
			
		||||
SKIP_WITH_LVMPOLLD=1
 | 
			
		||||
 | 
			
		||||
. lib/inittest
 | 
			
		||||
 | 
			
		||||
# Ensure expected default region size
 | 
			
		||||
aux lvmconf 'activation/raid_region_size = 512'
 | 
			
		||||
 | 
			
		||||
which mkfs.ext4 || skip
 | 
			
		||||
aux have_raid 1 13 1 || skip
 | 
			
		||||
 | 
			
		||||
# Temporarily skip reshape tests on single-core CPUs until there's a fix for
 | 
			
		||||
# https://bugzilla.redhat.com/1443999 - AGK 2017/04/20
 | 
			
		||||
aux have_multi_core || skip
 | 
			
		||||
aux prepare_vg 5
 | 
			
		||||
 | 
			
		||||
#
 | 
			
		||||
# Test multi step linear -> striped conversion
 | 
			
		||||
#
 | 
			
		||||
 | 
			
		||||
# Create linear LV
 | 
			
		||||
lvcreate -aey -L 16M -n $lv $vg
 | 
			
		||||
check lv_field $vg/$lv segtype "linear"
 | 
			
		||||
check lv_field $vg/$lv stripes 1
 | 
			
		||||
check lv_field $vg/$lv data_stripes 1
 | 
			
		||||
echo y|mkfs -t ext4 $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
fsck -fn $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
 | 
			
		||||
# Convert linear -> raid1 (takeover)
 | 
			
		||||
lvconvert -y --type raid6 --stripes 3 --stripesize 64K --regionsize 128K $vg/$lv
 | 
			
		||||
fsck -fn $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
check lv_field $vg/$lv segtype "raid1"
 | 
			
		||||
check lv_field $vg/$lv stripes 2
 | 
			
		||||
check lv_field $vg/$lv data_stripes 2
 | 
			
		||||
check lv_field $vg/$lv regionsize "128.00k"
 | 
			
		||||
aux wait_for_sync $vg $lv
 | 
			
		||||
fsck -fn $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
 | 
			
		||||
# Convert raid1 -> raid5_ls (takeover)
 | 
			
		||||
lvconvert -y --type raid6 --stripes 3 --stripesize 64K --regionsize 128K $vg/$lv
 | 
			
		||||
fsck -fn $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
check lv_field $vg/$lv segtype "raid5_ls"
 | 
			
		||||
check lv_field $vg/$lv stripes 2
 | 
			
		||||
check lv_field $vg/$lv data_stripes 1
 | 
			
		||||
check lv_field $vg/$lv stripesize "64.00k"
 | 
			
		||||
check lv_field $vg/$lv regionsize "128.00k"
 | 
			
		||||
 | 
			
		||||
# Convert raid5_ls adding stripes (reshape)
 | 
			
		||||
lvconvert -y --type raid6 --stripes 3 --stripesize 64K --regionsize 128K $vg/$lv
 | 
			
		||||
fsck -fn $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
check lv_first_seg_field $vg/$lv segtype "raid5_ls"
 | 
			
		||||
check lv_first_seg_field $vg/$lv stripes 4
 | 
			
		||||
check lv_first_seg_field $vg/$lv data_stripes 3
 | 
			
		||||
check lv_first_seg_field $vg/$lv stripesize "64.00k"
 | 
			
		||||
check lv_first_seg_field $vg/$lv regionsize "128.00k"
 | 
			
		||||
check lv_first_seg_field $vg/$lv reshape_len_le 8
 | 
			
		||||
aux wait_for_sync $vg $lv
 | 
			
		||||
fsck -fn $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
 | 
			
		||||
# Convert raid5_ls -> raid6_ls_6 (takeover)
 | 
			
		||||
lvconvert -y --type raid6 --stripes 3 --stripesize 64K --regionsize 128K $vg/$lv
 | 
			
		||||
fsck -fn $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
check lv_first_seg_field $vg/$lv segtype "raid6_ls_6"
 | 
			
		||||
check lv_first_seg_field $vg/$lv stripes 5
 | 
			
		||||
check lv_first_seg_field $vg/$lv data_stripes 3
 | 
			
		||||
check lv_first_seg_field $vg/$lv stripesize "64.00k"
 | 
			
		||||
check lv_first_seg_field $vg/$lv regionsize "128.00k"
 | 
			
		||||
check lv_first_seg_field $vg/$lv reshape_len_le 0
 | 
			
		||||
 | 
			
		||||
# Convert raid6_ls_6 -> raid6(_zr) (reshape)
 | 
			
		||||
lvconvert -y --type raid6 --stripes 3 --stripesize 64K --regionsize 128K $vg/$lv
 | 
			
		||||
fsck -fn $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
check lv_first_seg_field $vg/$lv segtype "raid6"
 | 
			
		||||
check lv_first_seg_field $vg/$lv stripes 5
 | 
			
		||||
check lv_first_seg_field $vg/$lv data_stripes 3
 | 
			
		||||
check lv_first_seg_field $vg/$lv stripesize "64.00k"
 | 
			
		||||
check lv_first_seg_field $vg/$lv regionsize "128.00k"
 | 
			
		||||
check lv_first_seg_field $vg/$lv reshape_len_le 10
 | 
			
		||||
 | 
			
		||||
# Remove reshape space
 | 
			
		||||
lvconvert -y --type raid6 --stripes 3 --stripesize 64K --regionsize 128K $vg/$lv
 | 
			
		||||
fsck -fn $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
check lv_first_seg_field $vg/$lv segtype "raid6"
 | 
			
		||||
check lv_first_seg_field $vg/$lv stripes 5
 | 
			
		||||
check lv_first_seg_field $vg/$lv data_stripes 3
 | 
			
		||||
check lv_first_seg_field $vg/$lv stripesize "64.00k"
 | 
			
		||||
check lv_first_seg_field $vg/$lv regionsize "128.00k"
 | 
			
		||||
check lv_first_seg_field $vg/$lv reshape_len_le 0
 | 
			
		||||
 | 
			
		||||
vgremove -ff $vg
 | 
			
		||||
@@ -0,0 +1,80 @@
 | 
			
		||||
#!/usr/bin/env bash
 | 
			
		||||
 | 
			
		||||
# Copyright (C) 2018 Red Hat, Inc. All rights reserved.
 | 
			
		||||
#
 | 
			
		||||
# This copyrighted material is made available to anyone wishing to use,
 | 
			
		||||
# modify, copy, or redistribute it subject to the terms and conditions
 | 
			
		||||
# of the GNU General Public License v.2.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU General Public License
 | 
			
		||||
# along with this program; if not, write to the Free Software Foundation,
 | 
			
		||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA2110-1301 USA
 | 
			
		||||
 | 
			
		||||
SKIP_WITH_LVMLOCKD=1
 | 
			
		||||
SKIP_WITH_LVMPOLLD=1
 | 
			
		||||
 | 
			
		||||
. lib/inittest
 | 
			
		||||
 | 
			
		||||
aux lvmconf 'activation/raid_region_size = 512'
 | 
			
		||||
 | 
			
		||||
which mkfs.ext4 || skip
 | 
			
		||||
aux have_raid 1 13 1 || skip
 | 
			
		||||
 | 
			
		||||
# Temporarily skip reshape tests on single-core CPUs until there's a fix for
 | 
			
		||||
# https://bugzilla.redhat.com/1443999 - AGK 2017/04/20
 | 
			
		||||
aux have_multi_core || skip
 | 
			
		||||
aux prepare_vg 5
 | 
			
		||||
 | 
			
		||||
#
 | 
			
		||||
# Test multi step linear -> striped conversion
 | 
			
		||||
#
 | 
			
		||||
 | 
			
		||||
# Create linear LV
 | 
			
		||||
lvcreate -aey -L 16M -n $lv $vg
 | 
			
		||||
check lv_field $vg/$lv segtype "linear"
 | 
			
		||||
check lv_field $vg/$lv stripes 1
 | 
			
		||||
check lv_field $vg/$lv data_stripes 1
 | 
			
		||||
echo y|mkfs -t ext4 $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
fsck -fn $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
 | 
			
		||||
# Convert linear -> raid1
 | 
			
		||||
lvconvert -y --type striped --stripes 4 --stripesize 64K --regionsize 128K $vg/$lv
 | 
			
		||||
fsck -fn $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
check lv_field $vg/$lv segtype "raid1"
 | 
			
		||||
check lv_field $vg/$lv stripes 2
 | 
			
		||||
check lv_field $vg/$lv data_stripes 2
 | 
			
		||||
check lv_field $vg/$lv regionsize "128.00k"
 | 
			
		||||
aux wait_for_sync $vg $lv
 | 
			
		||||
fsck -fn $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
 | 
			
		||||
# Convert raid1 -> raid5_n
 | 
			
		||||
lvconvert -y --type striped --stripes 4 --stripesize 64K --regionsize 128K $vg/$lv
 | 
			
		||||
fsck -fn $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
check lv_field $vg/$lv segtype "raid5_n"
 | 
			
		||||
check lv_field $vg/$lv stripes 2
 | 
			
		||||
check lv_field $vg/$lv data_stripes 1
 | 
			
		||||
check lv_field $vg/$lv stripesize "64.00k"
 | 
			
		||||
check lv_field $vg/$lv regionsize "128.00k"
 | 
			
		||||
 | 
			
		||||
# Convert raid5_n adding stripes
 | 
			
		||||
lvconvert -y --type striped --stripes 4 --stripesize 64K --regionsize 128K $vg/$lv
 | 
			
		||||
fsck -fn $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
check lv_first_seg_field $vg/$lv segtype "raid5_n"
 | 
			
		||||
check lv_first_seg_field $vg/$lv data_stripes 4
 | 
			
		||||
check lv_first_seg_field $vg/$lv stripes 5
 | 
			
		||||
check lv_first_seg_field $vg/$lv data_stripes 4
 | 
			
		||||
check lv_first_seg_field $vg/$lv stripesize "64.00k"
 | 
			
		||||
check lv_first_seg_field $vg/$lv regionsize "128.00k"
 | 
			
		||||
check lv_first_seg_field $vg/$lv reshape_len_le 10
 | 
			
		||||
aux wait_for_sync $vg $lv
 | 
			
		||||
fsck -fn $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
 | 
			
		||||
# Convert raid5_n -> striped
 | 
			
		||||
lvconvert -y --type striped --stripes 4 --stripesize 64K --regionsize 128K $vg/$lv
 | 
			
		||||
fsck -fn $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
check lv_first_seg_field $vg/$lv segtype "striped"
 | 
			
		||||
check lv_first_seg_field $vg/$lv stripes 4
 | 
			
		||||
check lv_first_seg_field $vg/$lv data_stripes 4
 | 
			
		||||
check lv_first_seg_field $vg/$lv stripesize "64.00k"
 | 
			
		||||
 | 
			
		||||
vgremove -ff $vg
 | 
			
		||||
@@ -14,6 +14,8 @@ SKIP_WITH_LVMPOLLD=1
 | 
			
		||||
 | 
			
		||||
. lib/inittest
 | 
			
		||||
 | 
			
		||||
aux lvmconf 'activation/raid_region_size = 512'
 | 
			
		||||
 | 
			
		||||
which mkfs.ext4 || skip
 | 
			
		||||
aux have_raid 1 12 0 || skip
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -0,0 +1,89 @@
 | 
			
		||||
#!/usr/bin/env bash
 | 
			
		||||
 | 
			
		||||
# Copyright (C) 2018 Red Hat, Inc. All rights reserved.
 | 
			
		||||
#
 | 
			
		||||
# This copyrighted material is made available to anyone wishing to use,
 | 
			
		||||
# modify, copy, or redistribute it subject to the terms and conditions
 | 
			
		||||
# of the GNU General Public License v.2.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU General Public License
 | 
			
		||||
# along with this program; if not, write to the Free Software Foundation,
 | 
			
		||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA2110-1301 USA
 | 
			
		||||
 | 
			
		||||
SKIP_WITH_LVMLOCKD=1
 | 
			
		||||
SKIP_WITH_LVMPOLLD=1
 | 
			
		||||
 | 
			
		||||
. lib/inittest
 | 
			
		||||
 | 
			
		||||
aux lvmconf 'activation/raid_region_size = 512'
 | 
			
		||||
 | 
			
		||||
which mkfs.ext4 || skip
 | 
			
		||||
aux have_raid 1 13 1 || skip
 | 
			
		||||
 | 
			
		||||
# Temporarily skip reshape tests on single-core CPUs until there's a fix for
 | 
			
		||||
# https://bugzilla.redhat.com/1443999 - AGK 2017/04/20
 | 
			
		||||
aux have_multi_core || skip
 | 
			
		||||
 | 
			
		||||
aux prepare_vg 5
 | 
			
		||||
 | 
			
		||||
#
 | 
			
		||||
# Test multi step striped -> linear conversion
 | 
			
		||||
#
 | 
			
		||||
 | 
			
		||||
# Create 4-way striped LV
 | 
			
		||||
lvcreate -aey --type striped -L 16M --stripes 4 --stripesize 64K -n $lv $vg
 | 
			
		||||
check lv_first_seg_field $vg/$lv segtype "striped"
 | 
			
		||||
check lv_first_seg_field $vg/$lv stripes 4
 | 
			
		||||
check lv_first_seg_field $vg/$lv data_stripes 4
 | 
			
		||||
check lv_first_seg_field $vg/$lv stripesize "64.00k"
 | 
			
		||||
echo y|mkfs -t ext4 $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
fsck -fn $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
lvextend -y -L64M $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
 | 
			
		||||
# Convert striped -> raid5_n
 | 
			
		||||
lvconvert -y --type linear $vg/$lv
 | 
			
		||||
check lv_field $vg/$lv segtype "raid5_n"
 | 
			
		||||
check lv_field $vg/$lv data_stripes 4
 | 
			
		||||
check lv_field $vg/$lv stripes 5
 | 
			
		||||
check lv_field $vg/$lv data_stripes 4
 | 
			
		||||
check lv_field $vg/$lv stripesize "64.00k"
 | 
			
		||||
check lv_field $vg/$lv regionsize "512.00k"
 | 
			
		||||
check lv_field $vg/$lv reshape_len_le 0
 | 
			
		||||
aux wait_for_sync $vg $lv
 | 
			
		||||
fsck -fn $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
 | 
			
		||||
# Restripe raid5_n LV to single data stripe
 | 
			
		||||
#
 | 
			
		||||
# Need --force in order to remove stripes thus shrinking LV size!
 | 
			
		||||
lvconvert -y --force --type linear $vg/$lv
 | 
			
		||||
aux wait_for_sync $vg $lv 1
 | 
			
		||||
fsck -fn $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
# Remove the now freed stripes
 | 
			
		||||
lvconvert -y --type linear $vg/$lv
 | 
			
		||||
check lv_field $vg/$lv segtype "raid5_n"
 | 
			
		||||
check lv_field $vg/$lv stripes 2
 | 
			
		||||
check lv_field $vg/$lv data_stripes 1
 | 
			
		||||
check lv_field $vg/$lv stripesize "64.00k"
 | 
			
		||||
check lv_field $vg/$lv regionsize "512.00k"
 | 
			
		||||
check lv_field $vg/$lv reshape_len_le 4
 | 
			
		||||
 | 
			
		||||
# Convert raid5_n -> raid1
 | 
			
		||||
lvconvert -y --type linear $vg/$lv
 | 
			
		||||
check lv_field $vg/$lv segtype "raid1"
 | 
			
		||||
check lv_field $vg/$lv stripes 2
 | 
			
		||||
check lv_field $vg/$lv data_stripes 2
 | 
			
		||||
check lv_field $vg/$lv stripesize 0
 | 
			
		||||
check lv_field $vg/$lv regionsize "512.00k"
 | 
			
		||||
check lv_field $vg/$lv reshape_len_le ""
 | 
			
		||||
fsck -fn $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
 | 
			
		||||
# Convert raid1 -> linear
 | 
			
		||||
lvconvert -y --type linear $vg/$lv
 | 
			
		||||
check lv_first_seg_field $vg/$lv segtype "linear"
 | 
			
		||||
check lv_first_seg_field $vg/$lv stripes 1
 | 
			
		||||
check lv_first_seg_field $vg/$lv data_stripes 1
 | 
			
		||||
check lv_first_seg_field $vg/$lv stripesize 0
 | 
			
		||||
check lv_first_seg_field $vg/$lv regionsize 0
 | 
			
		||||
fsck -fn $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
 | 
			
		||||
vgremove -ff $vg
 | 
			
		||||
@@ -15,6 +15,8 @@ SKIP_WITH_LVMPOLLD=1
 | 
			
		||||
 | 
			
		||||
. lib/inittest
 | 
			
		||||
 | 
			
		||||
aux lvmconf 'activation/raid_region_size = 512'
 | 
			
		||||
 | 
			
		||||
which mkfs.ext4 || skip
 | 
			
		||||
aux have_raid 1 12 0 || skip
 | 
			
		||||
 | 
			
		||||
@@ -51,7 +53,9 @@ aux wait_for_sync $vg $lv1
 | 
			
		||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
 | 
			
		||||
 | 
			
		||||
# Extend raid5_n LV by factor 4 to keep size once linear
 | 
			
		||||
lvresize -y -L 64 $vg/$lv1
 | 
			
		||||
lvresize -y -L 64M $vg/$lv1
 | 
			
		||||
aux wait_for_sync $vg $lv1
 | 
			
		||||
 | 
			
		||||
check lv_field $vg/$lv1 segtype "raid5_n"
 | 
			
		||||
check lv_field $vg/$lv1 data_stripes 4
 | 
			
		||||
check lv_field $vg/$lv1 stripes 5
 | 
			
		||||
@@ -87,6 +91,7 @@ check lv_first_seg_field $vg/$lv1 stripes 2
 | 
			
		||||
check lv_first_seg_field $vg/$lv1 stripesize "32.00k"
 | 
			
		||||
check lv_first_seg_field $vg/$lv1 regionsize "1.00m"
 | 
			
		||||
check lv_first_seg_field $vg/$lv1 reshape_len_le 4
 | 
			
		||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
 | 
			
		||||
 | 
			
		||||
# Convert raid5_n to raid1
 | 
			
		||||
lvconvert -y --type raid1 $vg/$lv1
 | 
			
		||||
@@ -97,6 +102,7 @@ check lv_first_seg_field $vg/$lv1 stripes 2
 | 
			
		||||
check lv_first_seg_field $vg/$lv1 stripesize "0"
 | 
			
		||||
check lv_first_seg_field $vg/$lv1 regionsize "1.00m"
 | 
			
		||||
check lv_first_seg_field $vg/$lv1 reshape_len_le ""
 | 
			
		||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
 | 
			
		||||
 | 
			
		||||
# Convert raid1 -> linear
 | 
			
		||||
lvconvert -y --type linear $vg/$lv1
 | 
			
		||||
@@ -107,5 +113,6 @@ check lv_first_seg_field $vg/$lv1 stripes 1
 | 
			
		||||
check lv_first_seg_field $vg/$lv1 stripesize "0"
 | 
			
		||||
check lv_first_seg_field $vg/$lv1 regionsize "0"
 | 
			
		||||
check lv_first_seg_field $vg/$lv1 reshape_len_le ""
 | 
			
		||||
fsck -fn $DM_DEV_DIR/$vg/$lv1
 | 
			
		||||
 | 
			
		||||
vgremove -ff $vg
 | 
			
		||||
 
 | 
			
		||||
@@ -46,6 +46,7 @@ check lv_first_seg_field $vg/$lv1 stripesize "64.00k"
 | 
			
		||||
check lv_first_seg_field $vg/$lv1 data_stripes 10
 | 
			
		||||
check lv_first_seg_field $vg/$lv1 stripes 11
 | 
			
		||||
echo y|mkfs -t ext4 /dev/$vg/$lv1
 | 
			
		||||
fsck -fn /dev/$vg/$lv1
 | 
			
		||||
 | 
			
		||||
mkdir -p $mount_dir
 | 
			
		||||
mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir
 | 
			
		||||
@@ -53,8 +54,8 @@ mkdir -p $mount_dir/1 $mount_dir/2
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
echo 3 >/proc/sys/vm/drop_caches
 | 
			
		||||
cp -r /usr/bin $mount_dir/1 >/dev/null 2>/dev/null &
 | 
			
		||||
cp -r /usr/bin $mount_dir/2 >/dev/null 2>/dev/null &
 | 
			
		||||
cp -r /usr/bin $mount_dir/1 &>/dev/null &
 | 
			
		||||
cp -r /usr/bin $mount_dir/2 &>/dev/null &
 | 
			
		||||
sync &
 | 
			
		||||
 | 
			
		||||
aux wait_for_sync $vg $lv1
 | 
			
		||||
@@ -69,11 +70,11 @@ check lv_first_seg_field $vg/$lv1 stripesize "64.00k"
 | 
			
		||||
check lv_first_seg_field $vg/$lv1 data_stripes 15
 | 
			
		||||
check lv_first_seg_field $vg/$lv1 stripes 16
 | 
			
		||||
 | 
			
		||||
rm -fr $mount_dir/2
 | 
			
		||||
sync
 | 
			
		||||
kill -9 %%
 | 
			
		||||
wait
 | 
			
		||||
rm -fr $mount_dir/[12]
 | 
			
		||||
 | 
			
		||||
sync
 | 
			
		||||
umount $mount_dir
 | 
			
		||||
 | 
			
		||||
fsck -fn "$DM_DEV_DIR/$vg/$lv1"
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										76
									
								
								test/shell/lvconvert-raid-restripe-linear.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										76
									
								
								test/shell/lvconvert-raid-restripe-linear.sh
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,76 @@
 | 
			
		||||
#!/usr/bin/env bash
 | 
			
		||||
 | 
			
		||||
# Copyright (C) 2017 Red Hat, Inc. All rights reserved.
 | 
			
		||||
#
 | 
			
		||||
# This copyrighted material is made available to anyone wishing to use,
 | 
			
		||||
# modify, copy, or redistribute it subject to the terms and conditions
 | 
			
		||||
# of the GNU General Public License v.2.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU General Public License
 | 
			
		||||
# along with this program; if not, write to the Free Software Foundation,
 | 
			
		||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA2110-1301 USA
 | 
			
		||||
 | 
			
		||||
SKIP_WITH_LVMLOCKD=1
 | 
			
		||||
SKIP_WITH_LVMPOLLD=1
 | 
			
		||||
 | 
			
		||||
. lib/inittest
 | 
			
		||||
 | 
			
		||||
which mkfs.ext4 || skip
 | 
			
		||||
aux have_raid 1 12 0 || skip
 | 
			
		||||
 | 
			
		||||
# Temporarily skip reshape tests on single-core CPUs until there's a fix for
 | 
			
		||||
# https://bugzilla.redhat.com/1443999 - AGK 2017/04/20
 | 
			
		||||
aux have_multi_core || skip
 | 
			
		||||
aux prepare_vg 5
 | 
			
		||||
 | 
			
		||||
#
 | 
			
		||||
# Test single step linear -> striped conversion
 | 
			
		||||
#
 | 
			
		||||
 | 
			
		||||
# Create linear LV
 | 
			
		||||
lvcreate -aey -L 16M -n $lv $vg
 | 
			
		||||
check lv_field $vg/$lv segtype "linear"
 | 
			
		||||
check lv_field $vg/$lv stripes 1
 | 
			
		||||
check lv_field $vg/$lv data_stripes 1
 | 
			
		||||
echo y|mkfs -t ext4 $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
fsck -fn $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
 | 
			
		||||
# Convert linear -> raid1
 | 
			
		||||
not lvconvert -y --stripes 4 $vg/$lv
 | 
			
		||||
not lvconvert -y --stripes 4 --stripesize 64K $vg/$lv
 | 
			
		||||
not lvconvert -y --stripes 4 --stripesize 64K --regionsize 512K $vg/$lv
 | 
			
		||||
lvconvert -y --type striped --stripes 4 --stripesize 64K --regionsize 512K $vg/$lv
 | 
			
		||||
fsck -fn $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
check lv_field $vg/$lv segtype "raid1"
 | 
			
		||||
check lv_field $vg/$lv stripes 2
 | 
			
		||||
check lv_field $vg/$lv data_stripes 2
 | 
			
		||||
check lv_field $vg/$lv regionsize "512.00k"
 | 
			
		||||
aux wait_for_sync $vg $lv
 | 
			
		||||
fsck -fn $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
 | 
			
		||||
# Convert raid1 -> raid5_n
 | 
			
		||||
lvconvert -y --type striped --stripes 4 --stripesize 64K --regionsize 512K $vg/$lv
 | 
			
		||||
check lv_field $vg/$lv segtype "raid5_n"
 | 
			
		||||
check lv_field $vg/$lv stripes 2
 | 
			
		||||
check lv_field $vg/$lv data_stripes 1
 | 
			
		||||
check lv_field $vg/$lv stripesize "64.00k"
 | 
			
		||||
check lv_field $vg/$lv regionsize "512.00k"
 | 
			
		||||
fsck -fn $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
 | 
			
		||||
# Convert raid5_n adding stripes
 | 
			
		||||
lvconvert -y --type striped --stripes 4 --stripesize 64K --regionsize 512K $vg/$lv
 | 
			
		||||
check lv_first_seg_field $vg/$lv segtype "raid5_n"
 | 
			
		||||
check lv_first_seg_field $vg/$lv data_stripes 4
 | 
			
		||||
check lv_first_seg_field $vg/$lv stripes 5
 | 
			
		||||
check lv_first_seg_field $vg/$lv stripesize "64.00k"
 | 
			
		||||
check lv_first_seg_field $vg/$lv regionsize "512.00k"
 | 
			
		||||
check lv_first_seg_field $vg/$lv reshape_len_le 10
 | 
			
		||||
aux wait_for_sync $vg $lv
 | 
			
		||||
fsck -fn $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
resize2fs $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
 | 
			
		||||
# Convert raid5_n -> striped
 | 
			
		||||
lvconvert -y --type striped $vg/$lv
 | 
			
		||||
fsck -fn $DM_DEV_DIR/$vg/$lv
 | 
			
		||||
 | 
			
		||||
vgremove -ff $vg
 | 
			
		||||
							
								
								
									
										43
									
								
								test/shell/lvconvert-raid1-split-trackchanges.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										43
									
								
								test/shell/lvconvert-raid1-split-trackchanges.sh
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,43 @@
 | 
			
		||||
#!/usr/bin/env bash
 | 
			
		||||
 | 
			
		||||
# Copyright (C) 2018 Red Hat, Inc. All rights reserved.
 | 
			
		||||
#
 | 
			
		||||
# This copyrighted material is made available to anyone wishing to use,
 | 
			
		||||
# modify, copy, or redistribute it subject to the terms and conditions
 | 
			
		||||
# of the GNU General Public License v.2.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU General Public License
 | 
			
		||||
# along with this program; if not, write to the Free Software Foundation,
 | 
			
		||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
SKIP_WITH_LVMPOLLD=1
 | 
			
		||||
 | 
			
		||||
. lib/inittest
 | 
			
		||||
 | 
			
		||||
# rhbz1579072/rhbz1579438
 | 
			
		||||
 | 
			
		||||
aux have_raid 1 3 0 || skip
 | 
			
		||||
 | 
			
		||||
# 8 PVs needed for RAID10 testing (4-stripes/2-mirror)
 | 
			
		||||
aux prepare_pvs 4 2
 | 
			
		||||
get_devs
 | 
			
		||||
vgcreate $SHARED -s 512k "$vg" "${DEVICES[@]}"
 | 
			
		||||
 | 
			
		||||
lvcreate -y --ty raid1 -m 2 -n $lv1 -l 1 $vg
 | 
			
		||||
lvconvert -y --splitmirrors 1 --trackchanges $vg/$lv1
 | 
			
		||||
 | 
			
		||||
not lvconvert -y --ty linear $vg/$lv1
 | 
			
		||||
not lvconvert -y --ty striped -i 3 $vg/$lv1
 | 
			
		||||
not lvconvert -y --ty mirror $vg/$lv1
 | 
			
		||||
not lvconvert -y --ty raid4 $vg/$lv1
 | 
			
		||||
not lvconvert -y --ty raid5 $vg/$lv1
 | 
			
		||||
not lvconvert -y --ty raid6 $vg/$lv1
 | 
			
		||||
not lvconvert -y --ty raid10 $vg/$lv1
 | 
			
		||||
not lvconvert -y --ty striped -m 1 $vg/${lv1}_rimage_2
 | 
			
		||||
not lvconvert -y --ty raid1 -m 1 $vg/${lv1}_rimage_2
 | 
			
		||||
not lvconvert -y --ty mirror -m 1 $vg/${lv1}_rimage_2
 | 
			
		||||
not lvconvert -y --ty cache-pool $vg/${lv1}_rimage_2
 | 
			
		||||
not lvconvert -y --ty thin-pool $vg/${lv1}_rimage_2
 | 
			
		||||
 | 
			
		||||
vgremove -ff $vg
 | 
			
		||||
@@ -33,7 +33,7 @@ vgcfgbackup -f backup $vg
 | 
			
		||||
# use of --force is mandatory
 | 
			
		||||
not vgcfgrestore -f backup $vg
 | 
			
		||||
 | 
			
		||||
vgcfgrestore -f backup --force $vg
 | 
			
		||||
vgcfgrestore -y -f backup --force $vg
 | 
			
		||||
 | 
			
		||||
check lv_field $vg/pool transaction_id 1
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										14
									
								
								tools/args.h
									
									
									
									
									
								
							
							
						
						
									
										14
									
								
								tools/args.h
									
									
									
									
									
								
							@@ -611,7 +611,9 @@ arg(splitcache_ARG, '\0', "splitcache", 0, 0, 0,
 | 
			
		||||
arg(splitmirrors_ARG, '\0', "splitmirrors", number_VAL, 0, 0,
 | 
			
		||||
    "Splits the specified number of images from a raid1 or mirror LV\n"
 | 
			
		||||
    "and uses them to create a new LV. If --trackchanges is also specified,\n"
 | 
			
		||||
    "changes to the raid1 LV are tracked while the split LV remains detached.\n")
 | 
			
		||||
    "changes to the raid1 LV are tracked while the split LV remains detached.\n"
 | 
			
		||||
    "If --name is specified, then the images are permanently split from the\n"
 | 
			
		||||
    "original LV and changes are not tracked.\n")
 | 
			
		||||
 | 
			
		||||
arg(splitsnapshot_ARG, '\0', "splitsnapshot", 0, 0, 0,
 | 
			
		||||
    "Separates a COW snapshot from its origin LV. The LV that is split off\n"
 | 
			
		||||
@@ -691,10 +693,12 @@ arg(thinpool_ARG, '\0', "thinpool", lv_VAL, 0, 0,
 | 
			
		||||
arg(trackchanges_ARG, '\0', "trackchanges", 0, 0, 0,
 | 
			
		||||
    "Can be used with --splitmirrors on a raid1 LV. This causes\n"
 | 
			
		||||
    "changes to the original raid1 LV to be tracked while the split images\n"
 | 
			
		||||
    "remain detached. This allows the read-only detached image(s) to be\n"
 | 
			
		||||
    "merged efficiently back into the raid1 LV later. Only the regions with\n"
 | 
			
		||||
    "changed data are resynchronized during merge. (This option only applies\n"
 | 
			
		||||
    "when using the raid1 LV type.)\n")
 | 
			
		||||
    "remain detached. This is a temporary state that allows the read-only\n"
 | 
			
		||||
    "detached image to be merged efficiently back into the raid1 LV later.\n"
 | 
			
		||||
    "Only the regions with changed data are resynchronized during merge.\n"
 | 
			
		||||
    "While a raid1 LV is tracking changes, operations on it are limited to\n"
 | 
			
		||||
    "merging the split image (see --mergemirrors) or permanently splitting\n"
 | 
			
		||||
    "the image (see --splitmirrors with --name.\n")
 | 
			
		||||
 | 
			
		||||
/* TODO: hide this? */
 | 
			
		||||
arg(trustcache_ARG, '\0', "trustcache", 0, 0, 0,
 | 
			
		||||
 
 | 
			
		||||
@@ -399,7 +399,7 @@ lvconvert --splitmirrors Number --trackchanges LV_raid1_cache
 | 
			
		||||
OO: OO_LVCONVERT
 | 
			
		||||
OP: PV ...
 | 
			
		||||
ID: lvconvert_split_mirror_images
 | 
			
		||||
DESC: Split images from a raid1 LV and track changes to origin.
 | 
			
		||||
DESC: Split images from a raid1 LV and track changes to origin for later merge.
 | 
			
		||||
RULE: all not lv_is_locked lv_is_pvmove
 | 
			
		||||
 | 
			
		||||
lvconvert --mergemirrors LV_linear_raid|VG|Tag ...
 | 
			
		||||
@@ -700,7 +700,7 @@ RULE: all and lv_is_converting
 | 
			
		||||
# for compat since this was how it used to be done.
 | 
			
		||||
lvconvert LV_mirror_raid
 | 
			
		||||
OO: OO_LVCONVERT
 | 
			
		||||
ID: lvconvert_start_poll
 | 
			
		||||
ID: lvconvert_plain
 | 
			
		||||
DESC: Poll LV to continue conversion (also see --startpoll)
 | 
			
		||||
DESC: or waits till conversion/mirror syncing is finished
 | 
			
		||||
FLAGS: SECONDARY_SYNTAX
 | 
			
		||||
 
 | 
			
		||||
@@ -939,8 +939,10 @@ static int _display_info_cols(struct dm_task *dmt, struct dm_info *info)
 | 
			
		||||
			goto_out;
 | 
			
		||||
 | 
			
		||||
		/* No regions to report is not an error */
 | 
			
		||||
		if (!dm_stats_get_nr_regions(obj.stats))
 | 
			
		||||
		if (!dm_stats_get_nr_regions(obj.stats)) {
 | 
			
		||||
			r = 1;
 | 
			
		||||
			goto out;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* group report with no groups? */
 | 
			
		||||
 
 | 
			
		||||
@@ -238,6 +238,14 @@ static int _read_params(struct cmd_context *cmd, struct lvconvert_params *lp)
 | 
			
		||||
                break;
 | 
			
		||||
 | 
			
		||||
	case CONV_OTHER:
 | 
			
		||||
		if (arg_is_set(cmd, regionsize_ARG)) {
 | 
			
		||||
			lp->region_size = arg_uint_value(cmd, regionsize_ARG, 0);
 | 
			
		||||
			lp->region_size_supplied = 1;
 | 
			
		||||
		} else {
 | 
			
		||||
			lp->region_size = get_default_region_size(cmd);
 | 
			
		||||
			lp->region_size_supplied = 0;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if (_mirror_or_raid_type_requested(cmd, lp->type_str) ||
 | 
			
		||||
			   lp->mirrorlog || lp->corelog) { /* Mirrors (and some RAID functions) */
 | 
			
		||||
			if (arg_is_set(cmd, chunksize_ARG)) {
 | 
			
		||||
@@ -250,14 +258,6 @@ static int _read_params(struct cmd_context *cmd, struct lvconvert_params *lp)
 | 
			
		||||
				return 0;
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if (arg_is_set(cmd, regionsize_ARG)) {
 | 
			
		||||
				lp->region_size = arg_uint_value(cmd, regionsize_ARG, 0);
 | 
			
		||||
				lp->region_size_supplied = 1;
 | 
			
		||||
			} else {
 | 
			
		||||
				lp->region_size = get_default_region_size(cmd);
 | 
			
		||||
				lp->region_size_supplied = 0;
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			/* FIXME man page says in one place that --type and --mirrors can't be mixed */
 | 
			
		||||
			if (lp->mirrors_supplied && !lp->mirrors)
 | 
			
		||||
				/* down-converting to linear/stripe? */
 | 
			
		||||
@@ -265,7 +265,7 @@ static int _read_params(struct cmd_context *cmd, struct lvconvert_params *lp)
 | 
			
		||||
 | 
			
		||||
		} else if (_raid0_type_requested(lp->type_str) || _striped_type_requested(lp->type_str)) { /* striped or linear or raid0 */
 | 
			
		||||
			if (arg_from_list_is_set(cmd, "cannot be used with --type raid0 or --type striped or --type linear",
 | 
			
		||||
						 chunksize_ARG, corelog_ARG, mirrors_ARG, mirrorlog_ARG, regionsize_ARG, zero_ARG,
 | 
			
		||||
						 chunksize_ARG, corelog_ARG, mirrors_ARG, mirrorlog_ARG, zero_ARG,
 | 
			
		||||
						 -1))
 | 
			
		||||
				return_0;
 | 
			
		||||
		} /* else segtype will default to current type */
 | 
			
		||||
@@ -1165,6 +1165,42 @@ static int _lvconvert_validate_thin(struct logical_volume *lv,
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Check for raid1 split trackchanges image to reject conversions on it. */
 | 
			
		||||
static int _raid_split_image_conversion(struct logical_volume *lv)
 | 
			
		||||
{
 | 
			
		||||
	const char *s;
 | 
			
		||||
 | 
			
		||||
	if (lv_is_raid_with_tracking(lv)) {
 | 
			
		||||
		log_error("Conversion of tracking raid1 LV %s is not supported.",
 | 
			
		||||
			  display_lvname(lv));
 | 
			
		||||
		return 1;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (lv_is_raid_image(lv) &&
 | 
			
		||||
	    (s = strstr(lv->name, "_rimage_"))) {
 | 
			
		||||
		size_t len = s - lv->name;
 | 
			
		||||
		char raidlv_name[len + 1];
 | 
			
		||||
		const struct logical_volume *tmp_lv;
 | 
			
		||||
 | 
			
		||||
		strncpy(raidlv_name, lv->name, len);
 | 
			
		||||
		raidlv_name[len] = '\0';
 | 
			
		||||
 | 
			
		||||
		if (!(tmp_lv = find_lv(lv->vg, raidlv_name))) {
 | 
			
		||||
			log_error(INTERNAL_ERROR "Failed to find RaidLV of RAID subvolume %s.",
 | 
			
		||||
				  display_lvname(lv));
 | 
			
		||||
			return 1;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if (lv_is_raid_with_tracking(tmp_lv)) {
 | 
			
		||||
			log_error("Conversion of tracked raid1 subvolume %s is not supported.",
 | 
			
		||||
				  display_lvname(lv));
 | 
			
		||||
			return 1;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * _lvconvert_mirrors
 | 
			
		||||
 *
 | 
			
		||||
@@ -1180,6 +1216,9 @@ static int _lvconvert_mirrors(struct cmd_context *cmd,
 | 
			
		||||
	uint32_t new_mimage_count = 0;
 | 
			
		||||
	uint32_t new_log_count = 0;
 | 
			
		||||
 | 
			
		||||
	if (_raid_split_image_conversion(lv))
 | 
			
		||||
		return 0;
 | 
			
		||||
 | 
			
		||||
	if ((lp->corelog || lp->mirrorlog) && *lp->type_str && strcmp(lp->type_str, SEG_TYPE_NAME_MIRROR)) {
 | 
			
		||||
		log_error("--corelog and --mirrorlog are only compatible with mirror devices.");
 | 
			
		||||
		return 0;
 | 
			
		||||
@@ -1258,11 +1297,11 @@ static int _is_valid_raid_conversion(const struct segment_type *from_segtype,
 | 
			
		||||
	if (!from_segtype)
 | 
			
		||||
		return 1;
 | 
			
		||||
 | 
			
		||||
	if (from_segtype == to_segtype)
 | 
			
		||||
		return 1;
 | 
			
		||||
 | 
			
		||||
	/* Support raid0 <-> striped conversions */
 | 
			
		||||
	/* linear/striped/raid0 <-> striped/raid0/linear (restriping via raid) */
 | 
			
		||||
	if (segtype_is_striped(from_segtype) && segtype_is_striped(to_segtype))
 | 
			
		||||
		return 0;
 | 
			
		||||
 | 
			
		||||
	if (from_segtype == to_segtype)
 | 
			
		||||
		return 1;
 | 
			
		||||
 | 
			
		||||
	if (!segtype_is_raid(from_segtype) && !segtype_is_raid(to_segtype))
 | 
			
		||||
@@ -1296,6 +1335,9 @@ static int _lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *l
 | 
			
		||||
	struct cmd_context *cmd = lv->vg->cmd;
 | 
			
		||||
	struct lv_segment *seg = first_seg(lv);
 | 
			
		||||
 | 
			
		||||
	if (_raid_split_image_conversion(lv))
 | 
			
		||||
		return 0;
 | 
			
		||||
 | 
			
		||||
	if (_linear_type_requested(lp->type_str)) {
 | 
			
		||||
		if (arg_is_set(cmd, mirrors_ARG) && (arg_uint_value(cmd, mirrors_ARG, 0) != 0)) {
 | 
			
		||||
			log_error("Cannot specify mirrors with linear type.");
 | 
			
		||||
@@ -1305,45 +1347,18 @@ static int _lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *l
 | 
			
		||||
		lp->mirrors = 0;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* Can only change image count for raid1 and linear */
 | 
			
		||||
	if (lp->mirrors_supplied) {
 | 
			
		||||
		if (_raid0_type_requested(lp->type_str)) {
 | 
			
		||||
			log_error("--mirrors/-m is not compatible with conversion to %s.",
 | 
			
		||||
				  lp->type_str);
 | 
			
		||||
			return 0;
 | 
			
		||||
		}
 | 
			
		||||
		if (!seg_is_mirrored(seg) && !seg_is_linear(seg)) {
 | 
			
		||||
			log_error("--mirrors/-m is not compatible with %s.",
 | 
			
		||||
				  lvseg_name(seg));
 | 
			
		||||
			return 0;
 | 
			
		||||
		}
 | 
			
		||||
		if (seg_is_raid10(seg)) {
 | 
			
		||||
			log_error("--mirrors/-m cannot be changed with %s.",
 | 
			
		||||
				  lvseg_name(seg));
 | 
			
		||||
			return 0;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!_lvconvert_validate_thin(lv, lp))
 | 
			
		||||
		return_0;
 | 
			
		||||
 | 
			
		||||
	if (!_is_valid_raid_conversion(seg->segtype, lp->segtype))
 | 
			
		||||
	if (!_is_valid_raid_conversion(seg->segtype, lp->segtype) &&
 | 
			
		||||
	    !lp->mirrors_supplied)
 | 
			
		||||
		goto try_new_takeover_or_reshape;
 | 
			
		||||
 | 
			
		||||
	if (seg_is_linear(seg) && !lp->mirrors_supplied) {
 | 
			
		||||
		if (_raid0_type_requested(lp->type_str)) {
 | 
			
		||||
			log_error("Linear LV %s cannot be converted to %s.",
 | 
			
		||||
				  display_lvname(lv), lp->type_str);
 | 
			
		||||
			return 0;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if (!strcmp(lp->type_str, SEG_TYPE_NAME_RAID1)) {
 | 
			
		||||
			log_error("Raid conversions of LV %s require -m/--mirrors.",
 | 
			
		||||
				  display_lvname(lv));
 | 
			
		||||
			return 0;
 | 
			
		||||
		}
 | 
			
		||||
	if (seg_is_striped(seg) && !lp->mirrors_supplied)
 | 
			
		||||
		goto try_new_takeover_or_reshape;
 | 
			
		||||
 | 
			
		||||
	if (seg_is_linear(seg) && !lp->mirrors_supplied)
 | 
			
		||||
		goto try_new_takeover_or_reshape;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* Change number of RAID1 images */
 | 
			
		||||
	if (lp->mirrors_supplied || lp->keep_mimages) {
 | 
			
		||||
@@ -1381,6 +1396,7 @@ static int _lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *l
 | 
			
		||||
		return lv_raid_split(lv, lp->yes, lp->lv_split_name, image_count, lp->pvh);
 | 
			
		||||
 | 
			
		||||
	if (lp->mirrors_supplied) {
 | 
			
		||||
		if ((seg_is_striped(seg) && seg->area_count == 1) || seg_is_raid1(seg)) { /* ??? */
 | 
			
		||||
		if (!*lp->type_str || !strcmp(lp->type_str, SEG_TYPE_NAME_RAID1) || !strcmp(lp->type_str, SEG_TYPE_NAME_LINEAR) ||
 | 
			
		||||
		    (!strcmp(lp->type_str, SEG_TYPE_NAME_STRIPED) && image_count == 1)) {
 | 
			
		||||
			if (image_count > DEFAULT_RAID1_MAX_IMAGES) {
 | 
			
		||||
@@ -1398,6 +1414,7 @@ static int _lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *l
 | 
			
		||||
 | 
			
		||||
			return 1;
 | 
			
		||||
		}
 | 
			
		||||
		}
 | 
			
		||||
		goto try_new_takeover_or_reshape;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -1440,7 +1457,6 @@ static int _lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *l
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
try_new_takeover_or_reshape:
 | 
			
		||||
 | 
			
		||||
	if (!_raid4_conversion_supported(lv, lp))
 | 
			
		||||
		return 0;
 | 
			
		||||
 | 
			
		||||
@@ -1450,24 +1466,15 @@ try_new_takeover_or_reshape:
 | 
			
		||||
	if (!arg_is_set(cmd, type_ARG))
 | 
			
		||||
	       lp->segtype = NULL;
 | 
			
		||||
 | 
			
		||||
	/* Only let raid4 through for now. */
 | 
			
		||||
	if (!lp->segtype ||
 | 
			
		||||
	    (lp->type_str && lp->type_str[0] && lp->segtype != seg->segtype &&
 | 
			
		||||
	     ((seg_is_raid4(seg) && seg_is_striped(lp) && lp->stripes > 1) ||
 | 
			
		||||
	     (seg_is_striped(seg) && seg->area_count > 1 && seg_is_raid4(lp))))) {
 | 
			
		||||
		if (!lv_raid_convert(lv, lp->segtype,
 | 
			
		||||
				     lp->yes, lp->force, lp->stripes, lp->stripe_size_supplied, lp->stripe_size,
 | 
			
		||||
				     (lp->region_size_supplied || !seg->region_size) ?
 | 
			
		||||
				     lp->region_size : seg->region_size , lp->pvh))
 | 
			
		||||
			return_0;
 | 
			
		||||
	if (!lv_raid_convert(lv, lp->segtype,
 | 
			
		||||
			     lp->yes, lp->force, lp->stripes, lp->stripe_size_supplied, lp->stripe_size,
 | 
			
		||||
			     (lp->region_size_supplied || !seg->region_size) ?
 | 
			
		||||
			     lp->region_size : seg->region_size , lp->pvh))
 | 
			
		||||
		return_0;
 | 
			
		||||
 | 
			
		||||
		log_print_unless_silent("Logical volume %s successfully converted.",
 | 
			
		||||
					display_lvname(lv));
 | 
			
		||||
		return 1;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	log_error("Conversion operation not yet supported.");
 | 
			
		||||
	return 0;
 | 
			
		||||
	log_print_unless_silent("Logical volume %s successfully converted.",
 | 
			
		||||
				display_lvname(lv));
 | 
			
		||||
	return 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
@@ -1692,21 +1699,24 @@ static int _convert_striped(struct cmd_context *cmd, struct logical_volume *lv,
 | 
			
		||||
			    struct lvconvert_params *lp)
 | 
			
		||||
{
 | 
			
		||||
	const char *mirrors_type = find_config_tree_str(cmd, global_mirror_segtype_default_CFG, NULL);
 | 
			
		||||
	int raid_type = *lp->type_str && !strncmp(lp->type_str, "raid", 4);
 | 
			
		||||
 | 
			
		||||
	if (!strcmp(lp->type_str, SEG_TYPE_NAME_MIRROR))
 | 
			
		||||
		return _convert_striped_mirror(cmd, lv, lp);
 | 
			
		||||
	if (!raid_type) {
 | 
			
		||||
		if (!strcmp(lp->type_str, SEG_TYPE_NAME_MIRROR))
 | 
			
		||||
			return _convert_striped_mirror(cmd, lv, lp);
 | 
			
		||||
 | 
			
		||||
	if (segtype_is_raid(lp->segtype))
 | 
			
		||||
		return _convert_striped_raid(cmd, lv, lp);
 | 
			
		||||
		/* --mirrors can mean --type mirror or --type raid1 depending on config setting. */
 | 
			
		||||
 | 
			
		||||
	/* --mirrors can mean --type mirror or --type raid1 depending on config setting. */
 | 
			
		||||
 | 
			
		||||
	if (arg_is_set(cmd, mirrors_ARG) && mirrors_type && !strcmp(mirrors_type, SEG_TYPE_NAME_MIRROR))
 | 
			
		||||
		return _convert_striped_mirror(cmd, lv, lp);
 | 
			
		||||
		if (arg_is_set(cmd, mirrors_ARG) && mirrors_type && !strcmp(mirrors_type, SEG_TYPE_NAME_MIRROR))
 | 
			
		||||
			return _convert_striped_mirror(cmd, lv, lp);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (arg_is_set(cmd, mirrors_ARG) && mirrors_type && !strcmp(mirrors_type, SEG_TYPE_NAME_RAID1))
 | 
			
		||||
		return _convert_striped_raid(cmd, lv, lp);
 | 
			
		||||
 | 
			
		||||
	if (segtype_is_striped(lp->segtype) || segtype_is_raid(lp->segtype))
 | 
			
		||||
		return _convert_striped_raid(cmd, lv, lp);
 | 
			
		||||
 | 
			
		||||
	log_error("Unknown operation on striped or linear LV %s.", display_lvname(lv));
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
@@ -2606,6 +2616,9 @@ static int _lvconvert_to_thin_with_external(struct cmd_context *cmd,
 | 
			
		||||
		.virtual_extents = lv->le_count,
 | 
			
		||||
	};
 | 
			
		||||
 | 
			
		||||
	if (_raid_split_image_conversion(lv))
 | 
			
		||||
		return 0;
 | 
			
		||||
 | 
			
		||||
	if (lv == thinpool_lv) {
 | 
			
		||||
		log_error("Can't use same LV %s for thin pool and thin volume.",
 | 
			
		||||
			  display_lvname(thinpool_lv));
 | 
			
		||||
@@ -2915,6 +2928,9 @@ static int _lvconvert_to_pool(struct cmd_context *cmd,
 | 
			
		||||
	struct id lockd_meta_id;
 | 
			
		||||
	const char *str_seg_type = to_cachepool ? SEG_TYPE_NAME_CACHE_POOL : SEG_TYPE_NAME_THIN_POOL;
 | 
			
		||||
 | 
			
		||||
	if (_raid_split_image_conversion(lv))
 | 
			
		||||
		return 0;
 | 
			
		||||
 | 
			
		||||
	if (lv_is_thin_pool(lv) || lv_is_cache_pool(lv)) {
 | 
			
		||||
		log_error(INTERNAL_ERROR "LV %s is already a pool.", display_lvname(lv));
 | 
			
		||||
		return 0;
 | 
			
		||||
@@ -3366,6 +3382,9 @@ static int _lvconvert_to_cache_vol(struct cmd_context *cmd,
 | 
			
		||||
	struct dm_config_tree *policy_settings = NULL;
 | 
			
		||||
	int r = 0;
 | 
			
		||||
 | 
			
		||||
	if (_raid_split_image_conversion(lv))
 | 
			
		||||
		return 0;
 | 
			
		||||
 | 
			
		||||
	/* If LV is inactive here, ensure it's not active elsewhere. */
 | 
			
		||||
	if (!lockd_lv(cmd, lv, "ex", 0))
 | 
			
		||||
		return_0;
 | 
			
		||||
@@ -4283,6 +4302,12 @@ static int _lvconvert_to_pool_or_swap_metadata_single(struct cmd_context *cmd,
 | 
			
		||||
		return 0;
 | 
			
		||||
	};
 | 
			
		||||
 | 
			
		||||
	if (lv_is_origin(lv)) {
 | 
			
		||||
		log_error("Cannot convert logical volume %s under snapshot.",
 | 
			
		||||
			  display_lvname(lv));
 | 
			
		||||
		return 0;
 | 
			
		||||
	};
 | 
			
		||||
 | 
			
		||||
	if (cmd->position_argc > 1) {
 | 
			
		||||
		/* First pos arg is required LV, remaining are optional PVs. */
 | 
			
		||||
		if (!(use_pvh = create_pv_list(cmd->mem, lv->vg, cmd->position_argc - 1, cmd->position_argv + 1, 0)))
 | 
			
		||||
 
 | 
			
		||||
@@ -118,6 +118,7 @@ static const struct command_function _command_functions[CMD_COUNT] = {
 | 
			
		||||
 | 
			
		||||
	/* lvconvert utility to trigger polling on an LV. */
 | 
			
		||||
	{ lvconvert_start_poll_CMD, lvconvert_start_poll_cmd },
 | 
			
		||||
	{ lvconvert_plain_CMD, lvconvert_start_poll_cmd },
 | 
			
		||||
 | 
			
		||||
	/* lvconvert utilities for creating/maintaining thin and cache objects. */
 | 
			
		||||
	{ lvconvert_to_thinpool_CMD,			lvconvert_to_pool_cmd },
 | 
			
		||||
@@ -1578,6 +1579,17 @@ static struct command *_find_command(struct cmd_context *cmd, const char *path,
 | 
			
		||||
		if (arg_is_set(cmd, help_ARG) || arg_is_set(cmd, help2_ARG) || arg_is_set(cmd, longhelp_ARG) || arg_is_set(cmd, version_ARG))
 | 
			
		||||
			return &commands[i];
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
		 * The 'lvconvert LV' cmd def matches any lvconvert cmd which throws off
 | 
			
		||||
		 * nearest-command partial-match suggestions.  Make it a special case so
 | 
			
		||||
		 * that it won't be used as a close match.  If the command has any option
 | 
			
		||||
		 * set (other than -v), don't attempt to match it to 'lvconvert LV'.
 | 
			
		||||
		 */
 | 
			
		||||
		if (commands[i].command_enum == lvconvert_plain_CMD) {
 | 
			
		||||
			if (cmd->opt_count - cmd->opt_arg_values[verbose_ARG].count)
 | 
			
		||||
				continue;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		match_required = 0;	/* required parameters that match */
 | 
			
		||||
		match_ro = 0;		/* required opt_args that match */
 | 
			
		||||
		match_rp = 0;		/* required pos_args that match */
 | 
			
		||||
@@ -2096,6 +2108,8 @@ static int _process_command_line(struct cmd_context *cmd, int *argc, char ***arg
 | 
			
		||||
		if (goval == '?')
 | 
			
		||||
			return 0;
 | 
			
		||||
 | 
			
		||||
		cmd->opt_count++;
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
		 * translate the option value used by getopt into the enum
 | 
			
		||||
		 * value (e.g. foo_ARG) from the args array.
 | 
			
		||||
 
 | 
			
		||||
@@ -96,7 +96,7 @@ int pvresize(struct cmd_context *cmd, int argc, char **argv)
 | 
			
		||||
 | 
			
		||||
	ret = process_each_pv(cmd, argc, argv, NULL, 0, READ_FOR_UPDATE | READ_ALLOW_EXPORTED, handle, _pvresize_single);
 | 
			
		||||
 | 
			
		||||
	log_print_unless_silent("%d physical volume(s) resized / %d physical volume(s) "
 | 
			
		||||
	log_print_unless_silent("%d physical volume(s) resized or updated / %d physical volume(s) "
 | 
			
		||||
				"not resized", params.done, params.total - params.done);
 | 
			
		||||
out:
 | 
			
		||||
	destroy_processing_handle(cmd, handle);
 | 
			
		||||
 
 | 
			
		||||
@@ -318,21 +318,22 @@ static int _pvscan_cache(struct cmd_context *cmd, int argc, char **argv)
 | 
			
		||||
	dm_list_init(&found_vgnames);
 | 
			
		||||
	dm_list_init(&pp.changed_vgnames);
 | 
			
		||||
 | 
			
		||||
	do_activate = arg_is_set(cmd, activate_ARG);
 | 
			
		||||
	if ((do_activate = arg_is_set(cmd, activate_ARG))) {
 | 
			
		||||
		if (arg_uint_value(cmd, activate_ARG, 0) != CHANGE_AAY) {
 | 
			
		||||
			log_error("Only --activate ay allowed with pvscan.");
 | 
			
		||||
			return EINVALID_CMD_LINE;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
	if (!lvmetad_used() && !do_activate) {
 | 
			
		||||
		log_verbose("Ignoring pvscan --cache because lvmetad is not in use.");
 | 
			
		||||
		return ret;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (do_activate && (arg_uint_value(cmd, activate_ARG, CHANGE_AAY) != CHANGE_AAY)) {
 | 
			
		||||
		log_error("Only --activate ay allowed with pvscan.");
 | 
			
		||||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!lvmetad_used() && do_activate && !find_config_tree_bool(cmd, global_use_lvmetad_CFG, NULL)) {
 | 
			
		||||
		log_verbose("Ignoring pvscan --cache -aay because lvmetad is not in use.");
 | 
			
		||||
		return ret;
 | 
			
		||||
		if (!lvmetad_used() &&
 | 
			
		||||
		    !find_config_tree_bool(cmd, global_use_lvmetad_CFG, NULL)) {
 | 
			
		||||
			log_verbose("Ignoring pvscan --cache -aay because lvmetad is not in use.");
 | 
			
		||||
			return ret;
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		if (!lvmetad_used()) {
 | 
			
		||||
			log_verbose("Ignoring pvscan --cache because lvmetad is not in use.");
 | 
			
		||||
			return ret;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (arg_is_set(cmd, major_ARG) + arg_is_set(cmd, minor_ARG))
 | 
			
		||||
 
 | 
			
		||||
@@ -5730,7 +5730,7 @@ do_command:
 | 
			
		||||
	if (pp->preserve_existing && pp->orphan_vg_name) {
 | 
			
		||||
		log_debug("Using existing orphan PVs in %s.", pp->orphan_vg_name);
 | 
			
		||||
 | 
			
		||||
		if (!(orphan_vg = vg_read_internal(cmd, pp->orphan_vg_name, NULL, 0, 0, &consistent))) {
 | 
			
		||||
		if (!(orphan_vg = vg_read_internal(cmd, pp->orphan_vg_name, NULL, 0, 0, 0, &consistent))) {
 | 
			
		||||
			log_error("Cannot read orphans VG %s.", pp->orphan_vg_name);
 | 
			
		||||
			goto bad;
 | 
			
		||||
		}
 | 
			
		||||
 
 | 
			
		||||
@@ -1,6 +1,6 @@
 | 
			
		||||
/*
 | 
			
		||||
 * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
 | 
			
		||||
 * Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
 | 
			
		||||
 * Copyright (C) 2004-2018 Red Hat, Inc. All rights reserved.
 | 
			
		||||
 *
 | 
			
		||||
 * This file is part of LVM2.
 | 
			
		||||
 *
 | 
			
		||||
@@ -15,11 +15,69 @@
 | 
			
		||||
 | 
			
		||||
#include "tools.h"
 | 
			
		||||
#include "lvmetad-client.h"
 | 
			
		||||
#include "dm-ioctl.h"
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Check if there are any active volumes from restored vg_name.
 | 
			
		||||
 * We can prompt user, as such operation may make some serious
 | 
			
		||||
 * troubles later, when user will try to continue such devices.
 | 
			
		||||
 */
 | 
			
		||||
static int _check_all_dm_devices(const char *vg_name, unsigned *found)
 | 
			
		||||
{
 | 
			
		||||
	struct dm_task *dmt;
 | 
			
		||||
	struct dm_names *names;
 | 
			
		||||
	char vgname_buf[DM_NAME_LEN * 2];
 | 
			
		||||
	char *vgname, *lvname, *lvlayer;
 | 
			
		||||
	unsigned next = 0;
 | 
			
		||||
	int r = 1;
 | 
			
		||||
 | 
			
		||||
	if (!(dmt = dm_task_create(DM_DEVICE_LIST)))
 | 
			
		||||
		return_0;
 | 
			
		||||
 | 
			
		||||
	if (!dm_task_run(dmt)) {
 | 
			
		||||
		r = 0;
 | 
			
		||||
		goto_out;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!(names = dm_task_get_names(dmt))) {
 | 
			
		||||
		r = 0;
 | 
			
		||||
		goto_out;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!names->dev) {
 | 
			
		||||
		log_verbose("No devices found.");
 | 
			
		||||
		goto out;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	do {
 | 
			
		||||
		/* TODO: Do we want to validate UUID LVM- prefix as well ? */
 | 
			
		||||
		names = (struct dm_names *)((char *) names + next);
 | 
			
		||||
		if (!dm_strncpy(vgname_buf, names->name, sizeof(vgname_buf))) {
 | 
			
		||||
			r = 0;
 | 
			
		||||
			goto_out;
 | 
			
		||||
		}
 | 
			
		||||
		vgname = vgname_buf;
 | 
			
		||||
		if (!dm_split_lvm_name(NULL, NULL, &vgname, &lvname, &lvlayer)) {
 | 
			
		||||
			r = 0;
 | 
			
		||||
			goto_out;
 | 
			
		||||
		}
 | 
			
		||||
		if (strcmp(vgname, vg_name) == 0) {
 | 
			
		||||
			log_print("Volume group %s has active volume: %s.", vgname, lvname);
 | 
			
		||||
			(*found)++;
 | 
			
		||||
		}
 | 
			
		||||
		next = names->next;
 | 
			
		||||
	} while (next);
 | 
			
		||||
 | 
			
		||||
out:
 | 
			
		||||
	dm_task_destroy(dmt);
 | 
			
		||||
	return r;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int vgcfgrestore(struct cmd_context *cmd, int argc, char **argv)
 | 
			
		||||
{
 | 
			
		||||
	const char *vg_name = NULL;
 | 
			
		||||
	int lvmetad_rescan = 0;
 | 
			
		||||
	unsigned found = 0;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	if (argc == 1) {
 | 
			
		||||
@@ -47,6 +105,21 @@ int vgcfgrestore(struct cmd_context *cmd, int argc, char **argv)
 | 
			
		||||
		return ECMD_PROCESSED;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!_check_all_dm_devices(vg_name, &found)) {
 | 
			
		||||
		log_warn("WARNING: Failed to check for active volumes in volume group \"%s\".", vg_name);
 | 
			
		||||
	} else if (found) {
 | 
			
		||||
		log_warn("WARNING: Found %u active volume(s) in volume group \"%s\".",
 | 
			
		||||
			 found, vg_name);
 | 
			
		||||
		log_print("Restoring VG with active LVs, may cause mismatch with its metadata.");
 | 
			
		||||
		if (!arg_is_set(cmd, yes_ARG) &&
 | 
			
		||||
		    yes_no_prompt("Do you really want to proceed with restore of volume group \"%s\", "
 | 
			
		||||
				  "while %u volume(s) are active? [y/n]: ",
 | 
			
		||||
				  vg_name, found) == 'n') {
 | 
			
		||||
			log_error("Restore aborted.");
 | 
			
		||||
			return ECMD_FAILED;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * lvmetad does not handle a VG being restored, which would require
 | 
			
		||||
	 * vg_remove of the existing VG, then vg_update of the restored VG.  A
 | 
			
		||||
 
 | 
			
		||||
@@ -199,7 +199,7 @@ int vgchange_activate(struct cmd_context *cmd, struct volume_group *vg,
 | 
			
		||||
	    strcmp(vg->system_id, cmd->system_id) &&
 | 
			
		||||
	    do_activate) {
 | 
			
		||||
		log_error("Cannot activate LVs in a foreign VG.");
 | 
			
		||||
		return ECMD_FAILED;
 | 
			
		||||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
@@ -1189,6 +1189,7 @@ int vgchange_locktype_cmd(struct cmd_context *cmd, int argc, char **argv)
 | 
			
		||||
		cmd->lockd_vg_disable = 1;
 | 
			
		||||
		cmd->lockd_lv_disable = 1;
 | 
			
		||||
		cmd->handles_missing_pvs = 1;
 | 
			
		||||
		cmd->force_access_clustered = 1;
 | 
			
		||||
		goto process;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user