1
0
mirror of git://sourceware.org/git/lvm2.git synced 2024-10-06 22:19:30 +03:00

generate man pages

This commit is contained in:
David Teigland 2017-02-10 16:20:19 -06:00
parent 13a6368522
commit 698abdde16
125 changed files with 1056 additions and 6580 deletions

View File

@ -86,6 +86,8 @@ AC_PROG_RANLIB
AC_PATH_TOOL(CFLOW_CMD, cflow)
AC_PATH_TOOL(CSCOPE_CMD, cscope)
AC_PATH_TOOL(CHMOD, chmod)
AC_PATH_TOOL(WC, wc)
AC_PATH_TOOL(SORT, sort)
################################################################################
dnl -- Check for header files.

14
doc/license.txt Normal file
View File

@ -0,0 +1,14 @@
/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/

View File

@ -31,18 +31,20 @@ LVMRAIDMAN = lvmraid.7
MAN5=lvm.conf.5
MAN7=lvmsystemid.7 lvmreport.7
MAN8=lvm-config.8 lvm-dumpconfig.8 lvm-fullreport.8 lvm-lvpoll.8 \
lvchange.8 lvmconfig.8 lvconvert.8 lvcreate.8 lvdisplay.8 lvextend.8 \
lvm.8 lvmchange.8 lvmconf.8 lvmdiskscan.8 lvmdump.8 lvmsadc.8 lvmsar.8 \
MAN8=lvm.8 lvmconf.8 lvmdump.8
MAN8DM=dmsetup.8 dmstats.8
MAN8CLUSTER=
MAN8SYSTEMD_GENERATORS=lvm2-activation-generator.8
MAN8GEN=lvm-config.8 lvm-dumpconfig.8 lvm-fullreport.8 lvm-lvpoll.8 \
lvcreate.8 lvchange.8 lvmconfig.8 lvconvert.8 lvdisplay.8 lvextend.8 \
lvreduce.8 lvremove.8 lvrename.8 lvresize.8 lvs.8 \
lvscan.8 pvchange.8 pvck.8 pvcreate.8 pvdisplay.8 pvmove.8 pvremove.8 \
pvresize.8 pvs.8 pvscan.8 vgcfgbackup.8 vgcfgrestore.8 vgchange.8 \
vgck.8 vgcreate.8 vgconvert.8 vgdisplay.8 vgexport.8 vgextend.8 \
vgimport.8 vgimportclone.8 vgmerge.8 vgmknodes.8 vgreduce.8 vgremove.8 \
vgrename.8 vgs.8 vgscan.8 vgsplit.8
MAN8DM=dmsetup.8 dmstats.8
MAN8CLUSTER=
MAN8SYSTEMD_GENERATORS=lvm2-activation-generator.8
vgrename.8 vgs.8 vgscan.8 vgsplit.8 \
lvmsar.8 lvmsadc.8 lvmdiskscan.8 lvmchange.8
ifeq ($(MAKECMDGOALS),all_man)
MAN_ALL="yes"
@ -113,8 +115,8 @@ MAN8DIR=$(mandir)/man8
include $(top_builddir)/make.tmpl
CLEAN_TARGETS+=$(MAN5) $(MAN7) $(MAN8) $(MAN8CLUSTER) \
$(MAN8SYSTEMD_GENERATORS) $(MAN8DM)
CLEAN_TARGETS+=$(MAN5) $(MAN7) $(MAN8) $(MAN8GEN) $(MAN8CLUSTER) \
$(MAN8SYSTEMD_GENERATORS) $(MAN8DM) *.gen man-generator
DISTCLEAN_TARGETS+=$(FSADMMAN) $(BLKDEACTIVATEMAN) $(DMEVENTDMAN) \
$(LVMETADMAN) $(LVMPOLLDMAN) $(LVMLOCKDMAN) $(CLVMDMAN) $(CMIRRORDMAN) \
$(LVMCACHEMAN) $(LVMTHINMAN) $(LVMDBUSDMAN) $(LVMRAIDMAN)
@ -125,11 +127,11 @@ all: man device-mapper
device-mapper: $(MAN8DM)
man: $(MAN5) $(MAN7) $(MAN8) $(MAN8CLUSTER) $(MAN8SYSTEMD_GENERATORS)
man: $(MAN5) $(MAN7) $(MAN8) $(MAN8GEN) $(MAN8CLUSTER) $(MAN8SYSTEMD_GENERATORS)
all_man: man
$(MAN5) $(MAN7) $(MAN8) $(MAN8DM) $(MAN8CLUSTER): Makefile
$(MAN5) $(MAN7) $(MAN8) $(MAN8GEN) $(MAN8DM) $(MAN8CLUSTER): Makefile
Makefile: Makefile.in
@:
@ -140,6 +142,18 @@ Makefile: Makefile.in
*) echo "Creating $@" ; $(SED) -e "s+#VERSION#+$(LVM_VERSION)+;s+#DEFAULT_SYS_DIR#+$(DEFAULT_SYS_DIR)+;s+#DEFAULT_ARCHIVE_DIR#+$(DEFAULT_ARCHIVE_DIR)+;s+#DEFAULT_BACKUP_DIR#+$(DEFAULT_BACKUP_DIR)+;s+#DEFAULT_PROFILE_DIR#+$(DEFAULT_PROFILE_DIR)+;s+#DEFAULT_CACHE_DIR#+$(DEFAULT_CACHE_DIR)+;s+#DEFAULT_LOCK_DIR#+$(DEFAULT_LOCK_DIR)+;s+#CLVMD_PATH#+@CLVMD_PATH@+;s+#LVM_PATH#+@LVM_PATH@+;s+#DEFAULT_RUN_DIR#+@DEFAULT_RUN_DIR@+;s+#DEFAULT_PID_DIR#+@DEFAULT_PID_DIR@+;s+#SYSTEMD_GENERATOR_DIR#+$(SYSTEMD_GENERATOR_DIR)+;s+#DEFAULT_MANGLING#+$(DEFAULT_MANGLING)+;" $< > $@ ;; \
esac
man-generator:
$(CC) -DMAN_PAGE_GENERATOR -I$(top_builddir)/tools $(CFLAGS) $(top_srcdir)/tools/command.c -o $@
- ./man-generator lvmconfig > test.gen
if [ ! -s test.gen ] ; then cp genfiles/*.gen $(top_builddir)/man; fi;
$(MAN8GEN): man-generator
echo "Generating $@" ;
if [ ! -e $@.gen ]; then ./man-generator $(basename $@) $(top_srcdir)/man/$@.des > $@.gen; fi
if [ -f $(top_srcdir)/man/$@.end ]; then cat $(top_srcdir)/man/$@.end >> $@.gen; fi;
cat $(top_srcdir)/man/see_also.end >> $@.gen
$(SED) -e "s+#VERSION#+$(LVM_VERSION)+;s+#DEFAULT_SYS_DIR#+$(DEFAULT_SYS_DIR)+;s+#DEFAULT_ARCHIVE_DIR#+$(DEFAULT_ARCHIVE_DIR)+;s+#DEFAULT_BACKUP_DIR#+$(DEFAULT_BACKUP_DIR)+;s+#DEFAULT_PROFILE_DIR#+$(DEFAULT_PROFILE_DIR)+;s+#DEFAULT_CACHE_DIR#+$(DEFAULT_CACHE_DIR)+;s+#DEFAULT_LOCK_DIR#+$(DEFAULT_LOCK_DIR)+;s+#CLVMD_PATH#+@CLVMD_PATH@+;s+#LVM_PATH#+@LVM_PATH@+;s+#DEFAULT_RUN_DIR#+@DEFAULT_RUN_DIR@+;s+#DEFAULT_PID_DIR#+@DEFAULT_PID_DIR@+;s+#SYSTEMD_GENERATOR_DIR#+$(SYSTEMD_GENERATOR_DIR)+;s+#DEFAULT_MANGLING#+$(DEFAULT_MANGLING)+;" $@.gen > $@
install_man5: $(MAN5)
$(INSTALL) -d $(MAN5DIR)
$(INSTALL_DATA) $(MAN5) $(MAN5DIR)/
@ -148,9 +162,10 @@ install_man7: $(MAN7)
$(INSTALL) -d $(MAN7DIR)
$(INSTALL_DATA) $(MAN7) $(MAN7DIR)/
install_man8: $(MAN8)
install_man8: $(MAN8) $(MAN8GEN)
$(INSTALL) -d $(MAN8DIR)
$(INSTALL_DATA) $(MAN8) $(MAN8DIR)/
$(INSTALL_DATA) $(MAN8GEN) $(MAN8DIR)/
install_lvm2: install_man5 install_man7 install_man8

View File

@ -154,6 +154,32 @@ This timeout will be ignored if you start \fBclvmd\fP with the \fB\-d\fP.
.br
Display the version of the cluster LVM daemon.
.
.SH NOTES
.
.SS Activation
.
In a clustered VG, clvmd is used for activation, and the following values are
possible with \fBlvchange/vgchange -a\fP:
.IP \fBy\fP|\fBsy\fP
clvmd activates the LV in shared mode (with a shared lock),
allowing multiple nodes to activate the LV concurrently.
If the LV type prohibits shared access, such as an LV with a snapshot,
an exclusive lock is automatically used instead.
clvmd attempts to activate the LV concurrently on all nodes.
.IP \fBey\fP
clvmd activates the LV in exclusive mode (with an exclusive lock),
allowing a single node to activate the LV.
clvmd attempts to activate the LV concurrently on all nodes, but only
one will succeed.
.IP \fBly\fP
clvmd attempts to activate the LV only on the local node.
If the LV type allows concurrent access, then shared mode is used,
otherwise exclusive.
.IP \fBn\fP
clvmd deactivates the LV on all nodes.
.IP \fBln\fP
clvmd deactivates the LV on the local node.
.
.SH ENVIRONMENT VARIABLES
.TP
.B LVM_CLVMD_BINARY

2
man/lvchange.8.des Normal file
View File

@ -0,0 +1,2 @@
lvchange changes LV attributes in the VG, changes LV activation in the
kernel, and includes other utilities for LV maintenance.

6
man/lvchange.8.end Normal file
View File

@ -0,0 +1,6 @@
.SH EXAMPLES
Change LV permission to read-only:
.sp
.B lvchange \-pr vg00/lvol1

View File

@ -1,491 +0,0 @@
.TH LVCHANGE 8 "LVM TOOLS #VERSION#" "Sistina Software UK" \" -*- nroff -*-
.de UNITS
..
.
.SH NAME
.
lvchange \(em change attributes of a logical volume
.
.SH SYNOPSIS
.
.ad l
.B lvchange
.RB [ \-a | \-\-activate
.RB [ a ][ e | s | l ]{ y | n }]
.RB [ \-\-activationmode
.RB { complete | degraded | partial }]
.RB [ \-\-addtag
.IR Tag ]
.RB [ \-K | \-\-ignoreactivationskip ]
.RB [ \-k | \-\-setactivationskip
.RB { y | n }]
.RB [ \-\-alloc
.IR AllocationPolicy ]
.RB [ \-A | \-\-autobackup
.RB { y | n }]
.RB [ \-\-rebuild
.IR PhysicalVolume ]
.RB [ \-\-cachemode
.RB { passthrough | writeback | writethrough }]
.RB [ \-\-cachepolicy
.IR Policy ]
.RB [ \-\-cachesettings
.IR Key \fB= Value ]
.RB [ \-\-commandprofile
.IR ProfileName ]
.RB [ \-C | \-\-contiguous
.RB { y | n }]
.RB [ \-d | \-\-debug ]
.RB [ \-\-deltag
.IR Tag ]
.RB [ \-\-detachprofile ]
.RB [ \-\-discards
.RB { ignore | nopassdown | passdown }]
.RB [ \-\-errorwhenfull
.RB { y | n }]
.RB [ \-h | \-? | \-\-help ]
.RB \%[ \-\-ignorelockingfailure ]
.RB \%[ \-\-ignoremonitoring ]
.RB \%[ \-\-ignoreskippedcluster ]
.RB \%[ \-\-metadataprofile
.IR ProfileName ]
.RB [ \-\-monitor
.RB { y | n }]
.RB [ \-\-noudevsync ]
.RB [ \-P | \-\-partial ]
.RB [ \-p | \-\-permission
.RB { r | rw }]
.RB [ \-M | \-\-persistent
.RB { y | n }
.RB [ \-\-major
.IR Major ]
.RB [ \-\-minor
.IR Minor ]]
.RB [ \-\-poll
.RB { y | n }]
.RB [ \-\- [ raid ] maxrecoveryrate
.IR Rate ]
.RB [ \-\- [ raid ] minrecoveryrate
.IR Rate ]
.RB [ \-\- [ raid ] syncaction
.RB { check | repair }]
.RB [ \-\- [ raid ] writebehind
.IR IOCount ]
.RB [ \-\- [ raid ] writemostly
.BR \fIPhysicalVolume [ : { y | n | t }]]
.RB [ \-r | \-\-readahead
.RB { \fIReadAheadSectors | auto | none }]
.RB [ \-\-refresh ]
.RB [ \-\-reportformat
.RB { basic | json }]
.RB [ \-\-resync ]
.RB [ \-S | \-\-select
.IR Selection ]
.RB [ \-\-sysinit ]
.RB [ \-t | \-\-test ]
.RB [ \-v | \-\-verbose ]
.RB [ \-Z | \-\-zero
.RB { y | n }]
.RI [ LogicalVolumePath ...]
.ad b
.
.SH DESCRIPTION
.
lvchange allows you to change the attributes of a logical volume
including making them known to the kernel ready for use.
.
.SH OPTIONS
.
See \fBlvm\fP(8) for common options.
.
.HP
.BR \-a | \-\-activate
.RB [ a ][ e | s | l ]{ y | n }
.br
Controls the availability of the logical volumes for use.
Communicates with the kernel device-mapper driver via
libdevmapper to activate (\fB\-ay\fP) or deactivate (\fB\-an\fP) the
logical volumes.
.br
Activation of a logical volume creates a symbolic link
\fI/dev/VolumeGroupName/LogicalVolumeName\fP pointing to the device node.
This link is removed on deactivation.
All software and scripts should access the device through
this symbolic link and present this as the name of the device.
The location and name of the underlying device node may depend on
the distribution and configuration (e.g. udev) and might change
from release to release.
.br
If autoactivation option is used (\fB\-aay\fP),
the logical volume is activated only if it matches an item in
the \fBactivation/auto_activation_volume_list\fP
set in \fBlvm.conf\fP(5).
If this list is not set, then all volumes are considered for
activation. The \fB\-aay\fP option should be also used during system
boot so it's possible to select which volumes to activate using
the \fBactivation/auto_activation_volume_list\fP setting.
.br
In a clustered VG, clvmd is used for activation, and the
following options are possible:
With \fB\-aey\fP, clvmd activates the LV in exclusive mode
(with an exclusive lock), allowing a single node to activate the LV.
With \fB\-asy\fP, clvmd activates the LV in shared mode
(with a shared lock), allowing multiple nodes to activate the LV concurrently.
If the LV type prohibits shared access, such as an LV with a snapshot,
the '\fBs\fP' option is ignored and an exclusive lock is used.
With \fB\-ay\fP (no mode specified), clvmd activates the LV in shared mode
if the LV type allows concurrent access, such as a linear LV.
Otherwise, clvmd activates the LV in exclusive mode.
With \fB\-aey\fP, \fB\-asy\fP, and \fB\-ay\fP, clvmd attempts to activate the LV
on all nodes. If exclusive mode is used, then only one of the
nodes will be successful.
With \fB\-an\fP, clvmd attempts to deactivate the LV on all nodes.
With \fB\-aly\fP, clvmd activates the LV only on the local node, and \fB\-aln\fP
deactivates only on the local node. If the LV type allows concurrent
access, then shared mode is used, otherwise exclusive.
LVs with snapshots are always activated exclusively because they can only
be used on one node at once.
For local VGs \fB\-ay\fP, \fB\-aey\fP, and \fB\-asy\fP are all equivalent.
.
.HP
.BR \-\-activationmode
.RB { complete | degraded | partial }
.br
The activation mode determines whether logical volumes are allowed to
activate when there are physical volumes missing (e.g. due to a device
failure). \fBcomplete\fP is the most restrictive; allowing only those
logical volumes to be activated that are not affected by the missing
PVs. \fBdegraded\fP allows RAID logical volumes to be activated even if
they have PVs missing. (Note that the "\fImirror\fP" segment type is not
considered a RAID logical volume. The "\fIraid1\fP" segment type should
be used instead.) Finally, \fBpartial\fP allows any logical volume to
be activated even if portions are missing due to a missing or failed
PV. This last option should only be used when performing recovery or
repair operations. \fBdegraded\fP is the default mode. To change it,
modify \fBactivation_mode\fP in \fBlvm.conf\fP(5).
.
.HP
.BR \-K | \-\-ignoreactivationskip
.br
Ignore the flag to skip Logical Volumes during activation.
.
.HP
.BR \-k | \-\-setactivationskip
.RB { y | n }
.br
Controls whether Logical Volumes are persistently flagged to be
skipped during activation. By default, thin snapshot volumes are
flagged for activation skip. To activate such volumes,
an extra \fB\-\-ignoreactivationskip\fP option must be used.
The flag is not applied during deactivation. To see whether
the flag is attached, use \fBlvs\fP(8) command where the state
of the flag is reported within \fBlv_attr\fP bits.
.
.HP
.BR \-\-cachemode
.RB { passthrough | writeback | writethrough }
.br
Specifying a cache mode determines when the writes to a cache LV
are considered complete. When \fBwriteback\fP is specified, a write is
considered complete as soon as it is stored in the cache pool LV.
If \fBwritethough\fP is specified, a write is considered complete only
when it has been stored in the cache pool LV and on the origin LV.
While \fBwritethrough\fP may be slower for writes, it is more
resilient if something should happen to a device associated with the
cache pool LV. With \fBpassthrough\fP mode, all reads are served
from origin LV (all reads miss the cache) and all writes are
forwarded to the origin LV; additionally, write hits cause cache
block invalidates. See \fBlvmcache(7)\fP for more details.
.
.HP
.BR \-\-cachepolicy
.IR Policy ,
.BR \-\-cachesettings
.IR Key \fB= Value
.br
Only applicable to cached LVs; see also \fBlvmcache(7)\fP. Sets
the cache policy and its associated tunable settings. In most use-cases,
default values should be adequate.
.
.HP
.BR \-C | \-\-contiguous
.RB { y | n }
.br
Tries to set or reset the contiguous allocation policy for
logical volumes. It's only possible to change a non-contiguous
logical volume's allocation policy to contiguous, if all of the
allocated physical extents are already contiguous.
.
.HP
.BR \-\-detachprofile
.br
Detach any metadata configuration profiles attached to given
Logical Volumes. See \fBlvm.conf\fP(5) for more information
about metadata profiles.
.
.HP
.BR \-\-discards
.RB { ignore | nopassdown | passdown }
.br
Set this to \fBignore\fP to ignore any discards received by a
thin pool Logical Volume. Set to \fBnopassdown\fP to process such
discards within the thin pool itself and allow the no-longer-needed
extents to be overwritten by new data. Set to \fBpassdown\fP (the
default) to process them both within the thin pool itself and to
pass them down the underlying device.
.
.HP
.BR \-\-errorwhenfull
.RB { y | n }
.br
Sets thin pool behavior when data space is exhaused. See
.BR lvcreate (8)
for information.
.
.HP
.BR \-\-ignoremonitoring
.br
Make no attempt to interact with dmeventd unless \fB\-\-monitor\fP
is specified.
Do not use this if dmeventd is already monitoring a device.
.
.HP
.BR \-\-major
.IR Major
.br
Sets the major number. This option is supported only on older systems
(kernel version 2.4) and is ignored on modern Linux systems where major
numbers are dynamically assigned.
.
.HP
.BR \-\-minor
.IR Minor
.br
Set the minor number.
.
.HP
.BR \-\-metadataprofile
.IR ProfileName
.br
Uses and attaches \fIProfileName\fP configuration profile to the logical
volume metadata. Whenever the logical volume is processed next time,
the profile is automatically applied. If the volume group has another
profile attached, the logical volume profile is preferred.
See \fBlvm.conf\fP(5) for more information about metadata profiles.
.
.HP
.BR \-\-monitor
.RB { y | n }
.br
Start or stop monitoring a mirrored or snapshot logical volume with
dmeventd, if it is installed.
If a device used by a monitored mirror reports an I/O error,
the failure is handled according to
\%\fBmirror_image_fault_policy\fP and \fBmirror_log_fault_policy\fP
set in \fBlvm.conf\fP(5).
.
.HP
.BR \-\-noudevsync
.br
Disable udev synchronisation. The
process will not wait for notification from udev.
It will continue irrespective of any possible udev processing
in the background. You should only use this if udev is not running
or has rules that ignore the devices LVM2 creates.
.
.HP
.BR \-p | \-\-permission
.RB { r | rw }
.br
Change access permission to read-only or read/write.
.
.HP
.BR \-M | \-\-persistent
.RB { y | n }
.br
Set to \fBy\fP to make the minor number specified persistent.
Change of persistent numbers is not supported for pool volumes.
.
.HP
.BR \-\-poll
.RB { y | n }
.br
Without polling a logical volume's backgrounded transformation process
will never complete. If there is an incomplete pvmove or lvconvert (for
example, on rebooting after a crash), use \fB\-\-poll y\fP to restart the
process from its last checkpoint. However, it may not be appropriate to
immediately poll a logical volume when it is activated, use
\fB\-\-poll n\fP to defer and then \fB\-\-poll y\fP to restart the process.
.
.HP
.BR \-\- [ raid ] rebuild
.BR \fIPhysicalVolume
.br
Option can be repeated multiple times.
Selects PhysicalVolume(s) to be rebuild in a RaidLV.
Use this option instead of
.BR \-\-resync
or
.BR \-\- [ raid ] syncaction
\fBrepair\fP in case the PVs with corrupted data are known and their data
should be reconstructed rather than reconstructing default (rotating) data.
.br
E.g. in a raid1 mirror, the master leg on /dev/sda may hold corrupt data due
to a known transient disk error, thus
.br
\fBlvchange --rebuild /dev/sda LV\fP
.br
will request the master leg to be rebuild rather than rebuilding
all other legs from the master.
On a raid5 with rotating data and parity
.br
\fBlvchange --rebuild /dev/sda LV\fP
.br
will rebuild all data and parity blocks in the stripe on /dev/sda.
.HP
.BR \-\- [ raid ] maxrecoveryrate
.BR \fIRate [ b | B | s | S | k | K | m | M | g | G ]
.br
Sets the maximum recovery rate for a RAID logical volume. \fIRate\fP
is specified as an amount per second for each device in the array.
If no suffix is given, then KiB/sec/device is assumed. Setting the
recovery rate to \fB0\fP means it will be unbounded.
.
.HP
.BR \-\- [ raid ] minrecoveryrate
.BR \fIRate [ b | B | s | S | k | K | m | M | g | G ]
.br
Sets the minimum recovery rate for a RAID logical volume. \fIRate\fP
is specified as an amount per second for each device in the array.
If no suffix is given, then KiB/sec/device is assumed. Setting the
recovery rate to \fB0\fP means it will be unbounded.
.
.HP
.BR \-\- [ raid ] syncaction
.RB { check | repair }
.br
This argument is used to initiate various RAID synchronization operations.
The \fBcheck\fP and \fBrepair\fP options provide a way to check the
integrity of a RAID logical volume (often referred to as "scrubbing").
These options cause the RAID logical volume to
read all of the data and parity blocks in the array and check for any
discrepancies (e.g. mismatches between mirrors or incorrect parity values).
If \fBcheck\fP is used, the discrepancies will be counted but not repaired.
If \fBrepair\fP is used, the discrepancies will be corrected as they are
encountered. The \fBlvs\fP(8) command can be used to show the number of
discrepancies found or repaired.
.
.HP
.BR \-\- [ raid ] writebehind
.IR IOCount
.br
Specify the maximum number of outstanding writes that are allowed to
devices in a RAID1 logical volume that are marked as write-mostly.
Once this value is exceeded, writes become synchronous (i.e. all writes
to the constituent devices must complete before the array signals the
write has completed). Setting the value to zero clears the preference
and allows the system to choose the value arbitrarily.
.
.HP
.BR \-\- [ raid ] writemostly
.BR \fIPhysicalVolume [ : { y | n | t }]
.br
Mark a device in a RAID1 logical volume as write-mostly. All reads
to these drives will be avoided unless absolutely necessary. This keeps
the number of I/Os to the drive to a minimum. The default behavior is to
set the write-mostly attribute for the specified physical volume in the
logical volume. It is possible to also remove the write-mostly flag by
appending a "\fB:n\fP" to the physical volume or to toggle the value by specifying
"\fB:t\fP". The \fB\-\-writemostly\fP argument can be specified more than one time
in a single command; making it possible to toggle the write-mostly attributes
for all the physical volumes in a logical volume at once.
.
.HP
.BR \-r | \-\-readahead
.RB { \fIReadAheadSectors | auto | none }
.br
Set read ahead sector count of this logical volume.
For volume groups with metadata in lvm1 format, this must
be a value between 2 and 120 sectors.
The default value is "\fBauto\fP" which allows the kernel to choose
a suitable value automatically.
"\fBnone\fP" is equivalent to specifying zero.
.
.HP
.BR \-\-refresh
.br
If the logical volume is active, reload its metadata.
This is not necessary in normal operation, but may be useful
if something has gone wrong or if you're doing clustering
manually without a clustered lock manager.
.
.HP
.BR \-\-resync
.br
Forces the complete resynchronization of a mirror. In normal
circumstances you should not need this option because synchronization
happens automatically. Data is read from the primary mirror device
and copied to the others, so this can take a considerable amount of
time - and during this time you are without a complete redundant copy
of your data.
.
.HP
.BR \-\-sysinit
.br
Indicates that \fBlvchange\fP(8) is being invoked from early system
initialisation scripts (e.g. rc.sysinit or an initrd),
before writeable filesystems are available. As such,
some functionality needs to be disabled and this option
acts as a shortcut which selects an appropriate set of options. Currently
this is equivalent to using \fB\-\-ignorelockingfailure\fP,
\fB\-\-ignoremonitoring\fP, \fB\-\-poll n\fP and setting
\fBLVM_SUPPRESS_LOCKING_FAILURE_MESSAGES\fP
environment variable.
If \fB\-\-sysinit\fP is used in conjunction with
\fBlvmetad\fP(8) enabled and running,
autoactivation is preferred over manual activation via direct lvchange call.
Logical volumes are autoactivated according to
\fBauto_activation_volume_list\fP set in \fBlvm.conf\fP(5).
.
.HP
.BR \-Z | \-\-zero
.RB { y | n }
.br
Set zeroing mode for thin pool. Note: already provisioned blocks from pool
in non-zero mode are not cleared in unwritten parts when setting zero to
\fBy\fP.
.
.SH ENVIRONMENT VARIABLES
.
.TP
.B LVM_SUPPRESS_LOCKING_FAILURE_MESSAGES
Suppress locking failure messages.
.
.SH Examples
.
Changes the permission on volume lvol1 in volume group vg00 to be read-only:
.sp
.B lvchange \-pr vg00/lvol1
.
.SH SEE ALSO
.
.nh
.BR lvm (8),
.BR lvmetad (8),
.BR lvs (8),
.BR lvcreate (8),
.BR vgchange (8),
.BR lvmcache (7),
.BR lvmthin (7),
.BR lvm.conf (5)

32
man/lvconvert.8.des Normal file
View File

@ -0,0 +1,32 @@
lvconvert changes the LV type and includes utilities for LV data
maintenance. The LV type controls data layout and redundancy.
The LV type is also called the segment type or segtype.
To display the current LV type, run the command:
.B lvs \-o name,segtype
.I LV
The
.B linear
type is equivalent to the
.B striped
type when one stripe exists.
In that case, the types can sometimes be used interchangably.
In most cases, the
.B mirror
type is deprecated and the
.B raid1
type should be used. They are both implementations of mirroring.
In some cases, an LV is a single device mapper (dm) layer above physical
devices. In other cases, hidden LVs (dm devices) are layered between the
visible LV and physical devices. LVs in the middle layers are called sub LVs.
A command run on a visible LV sometimes operates on a sub LV rather than
the specified LV. In other cases, a sub LV must be specified directly on
the command line.
Sub LVs can be displayed with the command
.B lvs -a

95
man/lvconvert.8.end Normal file
View File

@ -0,0 +1,95 @@
.SH EXAMPLES
Convert a linear LV to a two-way mirror LV.
.br
.B lvconvert \-\-type mirror \-\-mirrors 1 vg/lvol1
Convert a linear LV to a two-way RAID1 LV.
.br
.B lvconvert \-\-type raid1 \-\-mirrors 1 vg/lvol1
Convert a mirror LV to use an in\-memory log.
.br
.B lvconvert \-\-mirrorlog core vg/lvol1
Convert a mirror LV to use a disk log.
.br
.B lvconvert \-\-mirrorlog disk vg/lvol1
Convert a mirror or raid1 LV to a linear LV.
.br
.B lvconvert --type linear vg/lvol1
Convert a mirror LV to a raid1 LV with the same number of images.
.br
.B lvconvert \-\-type raid1 vg/lvol1
Convert a linear LV to a two-way mirror LV, allocating new extents from specific
PV ranges.
.br
.B lvconvert \-\-mirrors 1 vg/lvol1 /dev/sda:0\-15 /dev/sdb:0\-15
Convert a mirror LV to a linear LV, freeing physical extents from a specific PV.
.br
.B lvconvert \-\-type linear vg/lvol1 /dev/sda
Split one image from a mirror or raid1 LV, making it a new LV.
.br
.B lvconvert \-\-splitmirrors 1 \-\-name lv_split vg/lvol1
Split one image from a raid1 LV, and track changes made to the raid1 LV
while the split image remains detached.
.br
.B lvconvert \-\-splitmirrors 1 \-\-trackchanges vg/lvol1
Merge an image (that was previously created with \-\-splitmirrors and
\-\-trackchanges) back into the original raid1 LV.
.br
.B lvconvert \-\-mergemirrors vg/lvol1_rimage_1
Replace PV /dev/sdb1 with PV /dev/sdf1 in a raid1/4/5/6/10 LV.
.br
.B lvconvert \-\-replace /dev/sdb1 vg/lvol1 /dev/sdf1
Replace 3 PVs /dev/sd[b-d]1 with PVs /dev/sd[f-h]1 in a raid1 LV.
.br
.B lvconvert \-\-replace /dev/sdb1 \-\-replace /dev/sdc1 \-\-replace /dev/sdd1
.RS
.B vg/lvol1 /dev/sd[fgh]1
.RE
Replace the maximum of 2 PVs /dev/sd[bc]1 with PVs /dev/sd[gh]1 in a raid6 LV.
.br
.B lvconvert \-\-replace /dev/sdb1 \-\-replace /dev/sdc1 vg/lvol1 /dev/sd[gh]1
Convert an LV into a thin LV in the specified thin pool. The existing LV
is used as an external read\-only origin for the new thin LV.
.br
.B lvconvert \-\-type thin \-\-thinpool vg/tpool1 vg/lvol1
Convert an LV into a thin LV in the specified thin pool. The existing LV
is used as an external read\-only origin for the new thin LV, and is
renamed "external".
.br
.B lvconvert \-\-type thin \-\-thinpool vg/tpool1
.RS
.B \-\-originname external vg/lvol1
.RE
Convert an LV to a cache pool LV using another specified LV for cache pool
metadata.
.br
.B lvconvert \-\-type cache-pool \-\-poolmetadata vg/poolmeta1 vg/lvol1
Convert an LV to a cache LV using the specified cache pool and chunk size.
.br
.B lvconvert \-\-type cache \-\-cachepool vg/cpool1 \-c 128 vg/lvol1
Detach and keep the cache pool from a cache LV.
.br
.B lvconvert \-\-splitcache vg/lvol1
Detach and remove the cache pool from a cache LV.
.br
.B lvconvert \-\-uncache vg/lvol1

File diff suppressed because it is too large Load Diff

28
man/lvcreate.8.des Normal file
View File

@ -0,0 +1,28 @@
lvcreate creates a new LV in a VG. For standard LVs, this requires
allocating logical extents from the VG's free physical extents. If there
is not enough free space, then the VG can be extended (see
\fBvgextend\fP(8)) with other PVs, or existing LVs can be reduced or
removed (see \fBlvremove\fP, \fBlvreduce\fP.)
To control which PVs a new LV will use, specify one or more PVs as
position args at the end of the command line. lvcreate will allocate
physical extents only from the specified PVs.
lvcreate can also create snapshots of existing LVs, e.g. for backup
purposes. The data in a new snapshot LV represents the content of the
original LV from the time the snapshot was created.
RAID LVs can be created by specifying an LV type when creating the LV (see
\fBlvmraid\fP(7)). Different RAID levels require different numbers of
unique PVs be available in the VG for allocation.
Thin pools (for thin provisioning) and cache pools (for caching) are
represented by special LVs with types thin-pool and cache-pool (see
\fBlvmthin\fP(7) and \fBlvmcache\fP(7)). The pool LVs are not usable as
standard block devices, but the LV names act references to the pools.
Thin LVs are thinly provisioned from a thin pool, and are created with a
virtual size rather than a physical size. A cache LV is the combination of
a standard LV with a cache pool, used to cache active portions of the LV
to improve performance.

98
man/lvcreate.8.end Normal file
View File

@ -0,0 +1,98 @@
.SH EXAMPLES
Create a striped LV with 3 stripes, a stripe size of 8KiB and a size of 100MiB.
The LV name is chosen by lvcreate.
.br
.B lvcreate \-i 3 \-I 8 \-L 100m vg00
Create a raid1 LV with two images, and a useable size of 500 MiB. This
operation requires two devices, one for each mirror image. RAID metadata
(superblock and bitmap) is also included on the two devices.
.br
.B lvcreate \-\-type raid1 \-m1 \-L 500m \-n mylv vg00
Create a mirror LV with two images, and a useable size of 500 MiB.
This operation requires three devices: two for mirror images and
one for a disk log.
.br
.B lvcreate \-\-type mirror \-m1 \-L 500m \-n mylv vg00
Create a mirror LV with 2 images, and a useable size of 500 MiB.
This operation requires 2 devices because the log is in memory.
.br
.B lvcreate \-\-type mirror \-m1 \-\-mirrorlog core \-L 500m \-n mylv vg00
Create a copy\-on\-write snapshot of an LV:
.br
.B lvcreate \-\-snapshot \-\-size 100m \-\-name mysnap vg00/mylv
Create a copy\-on\-write snapshot with a size sufficient
for overwriting 20% of the size of the original LV.
.br
.B lvcreate \-s \-l 20%ORIGIN \-n mysnap vg00/mylv
Create a sparse LV with 1TiB of virtual space, and actual space just under
100MiB.
.br
.B lvcreate \-\-snapshot \-\-virtualsize 1t \-\-size 100m \-\-name mylv vg00
Create a linear LV with a usable size of 64MiB on specific physical extents.
.br
.B lvcreate \-L 64m \-n mylv vg00 /dev/sda:0\-7 /dev/sdb:0\-7
Create a RAID5 LV with a usable size of 5GiB, 3 stripes, a stripe size of
64KiB, using a total of 4 devices (including one for parity).
.br
.B lvcreate \-\-type raid5 \-L 5G \-i 3 \-I 64 \-n mylv vg00
Create a RAID5 LV using all of the free space in the VG and spanning all the
PVs in the VG (note that the command will fail if there are more than 8 PVs in
the VG, in which case \fB\-i 7\fP must be used to get to the current maximum of
8 devices including parity for RaidLVs).
.br
.B lvcreate \-\-config allocation/raid_stripe_all_devices=1
.RS
.B \-\-type raid5 \-l 100%FREE \-n mylv vg00
.RE
Create RAID10 LV with a usable size of 5GiB, using 2 stripes, each on
a two-image mirror. (Note that the \fB-i\fP and \fB-m\fP arguments behave
differently:
\fB-i\fP specifies the total number of stripes,
but \fB-m\fP specifies the number of images in addition
to the first image).
.br
.B lvcreate \-\-type raid10 \-L 5G \-i 2 \-m 1 \-n mylv vg00
Create a 1TiB thin LV, first creating a new thin pool for it, where
the thin pool has 100MiB of space, uses 2 stripes, has a 64KiB stripe
size, and 256KiB chunk size.
.br
.B lvcreate \-\-type thin \-\-name mylv \-\-thinpool mypool
.RS
.B \-V 1t \-L 100m \-i 2 \-I 64 \-c 256 vg00
.RE
Create a thin snapshot of a thin LV (the size option must not be
used, otherwise a copy-on-write snapshot would be created).
.br
.B lvcreate \-\-snapshot \-\-name mysnap vg00/thinvol
Create a thin snapshot of the read-only inactive LV named "origin"
which becomes an external origin for the thin snapshot LV.
.br
.B lvcreate \-\-snapshot \-\-name mysnap \-\-thinpool mypool vg00/origin
Create a cache pool from a fast physical device. The cache pool can
then be used to cache an LV.
.br
.B lvcreate \-\-type cache-pool \-L 1G \-n my_cpool vg00 /dev/fast1
Create a cache LV, first creating a new origin LV on a slow physical device,
then combining the new origin LV with an existing cache pool.
.br
.B lvcreate \-\-type cache \-\-cachepool my_cpool
.RS
.B \-L 100G \-n mylv vg00 /dev/slow1
.RE

View File

@ -1,914 +0,0 @@
.TH LVCREATE 8 "LVM TOOLS #VERSION#" "Sistina Software UK" \" -*- nroff -*-
.
.\" Use 1st. parameter with \% to fix 'man2html' rendeing on same line!
.de SIZE_G
. IR \\$1 \c
. RB [ b | B | s | S | k | K | m | M | g | G ]
..
.de SIZE_E
. IR \\$1 \c
. RB [ b | B | s | S | k | K | m | M | \c
. BR g | G | t | T | p | P | e | E ]
..
.
.SH NAME
.
lvcreate \- create a logical volume in an existing volume group
.
.SH SYNOPSIS
.
.ad l
.B lvcreate
.RB [ \-a | \-\-activate
.RB [ a ][ e | l | s ]{ y | n }]
.RB [ \-\-addtag
.IR Tag ]
.RB [ \-\-alloc
.IR Allocation\%Policy ]
.RB [ \-A | \-\-autobackup
.RB { y | n }]
.RB [ \-H | \-\-cache ]
.RB [ \-\-cachemode
.RB { passthrough | writeback | writethrough }]
.RB [ \-\-cachepolicy
.IR Policy ]
.RB \%[ \-\-cachepool
.IR CachePoolLogicalVolume ]
.RB [ \-\-cachesettings
.IR Key \fB= Value ]
.RB [ \-c | \-\-chunksize
.IR ChunkSize ]
.RB [ \-\-commandprofile
.IR ProfileName ]
.RB \%[ \-C | \-\-contiguous
.RB { y | n }]
.RB [ \-d | \-\-debug ]
.RB [ \-\-discards
.RB \%{ ignore | nopassdown | passdown }]
.RB [ \-\-errorwhenfull
.RB { y | n }]
.RB [{ \-l | \-\-extents
.BR \fILogicalExtents\%Number [ % { FREE | PVS | VG }]
.RB |
.BR \-L | \-\-size
.BR \fILogicalVolumeSize }
.RB [ \-i | \-\-stripes
.IR Stripes
.RB [ \-I | \-\-stripesize
.IR StripeSize ]]]
.RB [ \-h | \-? | \-\-help ]
.RB [ \-K | \-\-ignoreactivationskip ]
.RB [ \-\-ignoremonitoring ]
.RB [ \-\-minor
.IR Minor
.RB [ \-j | \-\-major
.IR Major ]]
.RB [ \-\-metadataprofile
.IR Profile\%Name ]
.RB [ \-m | \-\-mirrors
.IR Mirrors
.RB [ \-\-corelog | \-\-mirrorlog
.RB { disk | core | mirrored }]
.RB [ \-\-nosync ]
.RB [ \-R | \-\-regionsize
.BR \fIMirrorLogRegionSize ]]
.RB [ \-\-monitor
.RB { y | n }]
.RB [ \-n | \-\-name
.IR Logical\%Volume ]
.RB [ \-\-noudevsync ]
.RB [ \-p | \-\-permission
.RB { r | rw }]
.RB [ \-M | \-\-persistent
.RB { y | n }]
.\" .RB [ \-\-pooldatasize
.\" .I DataVolumeSize
.RB \%[ \-\-poolmetadatasize
.IR MetadataVolumeSize ]
.RB [ \-\-poolmetadataspare
.RB { y | n }]
.RB [ \-\- [ raid ] maxrecoveryrate
.IR Rate ]
.RB [ \-\- [ raid ] minrecoveryrate
.IR Rate ]
.RB [ \-r | \-\-readahead
.RB { \fIReadAheadSectors | auto | none }]
.RB [ \-\-reportformat
.RB {basic | json}]
.RB \%[ \-k | \-\-setactivationskip
.RB { y | n }]
.RB [ \-s | \-\-snapshot ]
.RB [ \-V | \-\-virtualsize
.IR VirtualSize ]
.RB [ \-t | \-\-test ]
.RB [ \-T | \-\-thin ]
.RB [ \-\-thinpool
.IR ThinPoolLogicalVolume ]
.RB [ \-\-type
.IR SegmentType ]
.RB [ \-v | \-\-verbose ]
.RB [ \-W | \-\-wipesignatures
.RB { y | n }]
.RB [ \-Z | \-\-zero
.RB { y | n }]
.RI [ VolumeGroup
.RI |
.RI \%{ ExternalOrigin | Origin | Pool } LogicalVolume
.RI \%[ PhysicalVolumePath [ \fB: \fIPE \fR[ \fB\- PE ]]...]]
.LP
.B lvcreate
.RB [ \-l | \-\-extents
.BR \fILogicalExtentsNumber [ % { FREE | ORIGIN | PVS | VG }]
|
.BR \-L | \-\-size
.\" | \-\-pooldatasize
.IR LogicalVolumeSize ]
.RB [ \-c | \-\-chunksize
.IR ChunkSize ]
.RB \%[ \-\-commandprofile
.IR Profile\%Name ]
.RB [ \-\-noudevsync ]
.RB [ \-\-ignoremonitoring ]
.RB [ \-\-metadataprofile
.IR Profile\%Name ]
.RB \%[ \-\-monitor
.RB { y | n }]
.RB [ \-n | \-\-name
.IR SnapshotLogicalVolumeName ]
.RB [ \-\-reportformat
.RB {basic | json}]
.BR \-s | \-\-snapshot | \-H | \-\-cache
.RI \%{[ VolumeGroup \fB/\fP] OriginalLogicalVolume
.RB \%[ \-V | \-\-virtualsize
.IR VirtualSize ]}
.ad b
.
.SH DESCRIPTION
.
lvcreate creates a new logical volume in a volume group (see
.BR vgcreate "(8), " vgchange (8))
by allocating logical extents from the free physical extent pool
of that volume group. If there are not enough free physical extents then
the volume group can be extended (see
.BR vgextend (8))
with other physical volumes or by reducing existing logical volumes
of this volume group in size (see
.BR lvreduce (8)).
If you specify one or more PhysicalVolumes, allocation of physical
extents will be restricted to these volumes.
.br
.br
The second form supports the creation of snapshot logical volumes which
keep the contents of the original logical volume for backup purposes.
.
.SH OPTIONS
.
See
.BR lvm (8)
for common options.
.
.HP
.BR \-a | \-\-activate
.RB [ a ][ l | e | s ]{ y | n }
.br
Controls the availability of the Logical Volumes for immediate use after
the command finishes running.
By default, new Logical Volumes are activated (\fB\-ay\fP).
If it is possible technically, \fB\-an\fP will leave the new Logical
Volume inactive. But for example, snapshots of active origin can only be
created in the active state so \fB\-an\fP cannot be used with
\fB-\-type snapshot\fP. This does not apply to thin volume snapshots,
which are by default created with flag to skip their activation
(\fB-ky\fP).
Normally the \fB\-\-zero n\fP argument has to be supplied too because
zeroing (the default behaviour) also requires activation.
If autoactivation option is used (\fB\-aay\fP), the logical volume is
activated only if it matches an item in the
\fBactivation/auto_activation_volume_list\fP
set in \fBlvm.conf\fP(5).
For autoactivated logical volumes, \fB\-\-zero n\fP and
\fB\-\-wipesignatures n\fP is always assumed and it can't
be overridden. If the clustered locking is enabled,
\fB\-aey\fP will activate exclusively on one node and
.BR \-a { a | l } y
will activate only on the local node.
.
.HP
.BR \-H | \-\-cache
.br
Creates cache or cache pool logical volume.
.\" or both.
Specifying the optional argument \fB\-\-extents\fP or \fB\-\-size\fP
will cause the creation of the cache logical volume.
.\" Specifying the optional argument \fB\-\-pooldatasize\fP will cause
.\" the creation of the cache pool logical volume.
.\" Specifying both arguments will cause the creation of cache with its
.\" cache pool volume.
When the Volume group name is specified together with existing logical volume
name which is NOT a cache pool name, such volume is treated
as cache origin volume and cache pool is created. In this case the
\fB\-\-extents\fP or \fB\-\-size\fP is used to specify size of cache pool volume.
See \fBlvmcache\fP(7) for more info about caching support.
Note that the cache segment type requires a dm-cache kernel module version
1.3.0 or greater.
.
.HP
.BR \-\-cachemode
.RB { passthrough | writeback | writethrough }
.br
Specifying a cache mode determines when the writes to a cache LV
are considered complete. When \fBwriteback\fP is specified, a write is
considered complete as soon as it is stored in the cache pool LV.
If \fBwritethough\fP is specified, a write is considered complete only
when it has been stored in the cache pool LV and on the origin LV.
While \fBwritethrough\fP may be slower for writes, it is more
resilient if something should happen to a device associated with the
cache pool LV. With \fBpassthrough\fP mode, all reads are served
from origin LV (all reads miss the cache) and all writes are
forwarded to the origin LV; additionally, write hits cause cache
block invalidates. See \fBlvmcache(7)\fP for more details.
.
.HP
.BR \-\-cachepolicy
.IR Policy
.br
Only applicable to cached LVs; see also \fBlvmcache(7)\fP. Sets
the cache policy. \fBmq\fP is the basic policy name. \fBsmq\fP is more advanced
version available in newer kernels.
.
.HP
.BR \-\-cachepool
.IR CachePoolLogicalVolume { Name | Path }
.br
Specifies the name of cache pool volume name. The other way to specify pool name
is to append name to Volume group name argument.
.
.HP
.BR \-\-cachesettings
.IB Key = Value
.br
Only applicable to cached LVs; see also \fBlvmcache(7)\fP. Sets
the cache tunable settings. In most use-cases, default values should be adequate.
Special string value \fBdefault\fP switches setting back to its default kernel value
and removes it from the list of settings stored in lvm2 metadata.
.
.HP
.BR \-c | \-\-chunksize
.SIZE_G \%ChunkSize
.br
Gives the size of chunk for snapshot, cache pool and thin pool logical volumes.
Default unit is in kilobytes.
.br
For snapshots the value must be power of 2 between 4KiB and 512KiB
and the default value is 4KiB.
.br
For cache pools the value must a multiple of 32KiB
between 32KiB and 1GiB. The default is 64KiB.
When the size is specified with volume caching, it may not be smaller
than cache pool creation chunk size was.
.br
For thin pools the value must be a multiple of 64KiB
between 64KiB and 1GiB.
Default value starts with 64KiB and grows up to
fit the pool metadata size within 128MiB,
if the pool metadata size is not specified.
See
.BR lvm.conf (5)
setting \fBallocation/thin_pool_chunk_size_policy\fP
to select different calculation policy.
Thin pool target version <1.4 requires this value to be a power of 2.
For target version <1.5 discard is not supported for non power of 2 values.
.
.HP
.BR \-C | \-\-contiguous
.RB { y | n }
.br
Sets or resets the contiguous allocation policy for
logical volumes. Default is no contiguous allocation based
on a next free principle.
.
.HP
.BR \-\-corelog
.br
This is shortcut for option \fB\-\-mirrorlog core\fP.
.
.HP
.BR \-\-discards
.RB { ignore | nopassdown | passdown }
.br
Sets discards behavior for thin pool.
Default is \fBpassdown\fP.
.
.HP
.BR \-\-errorwhenfull
.RB { y | n }
.br
Configures thin pool behaviour when data space is exhausted.
Default is \fBn\fPo.
Device will queue I/O operations until target timeout
(see dm-thin-pool kernel module option \fPno_space_timeout\fP)
expires. Thus configured system has a time to i.e. extend
the size of thin pool data device.
When set to \fBy\fPes, the I/O operation is immeditelly errored.
.
.HP
.BR \-K | \-\-ignoreactivationskip
.br
Ignore the flag to skip Logical Volumes during activation.
Use \fB\-\-setactivationskip\fP option to set or reset
activation skipping flag persistently for logical volume.
.
.HP
.BR \-\-ignoremonitoring
.br
Make no attempt to interact with dmeventd unless \fB\-\-monitor\fP
is specified.
.
.HP
.BR -l | \-\-extents
.IR LogicalExtentsNumber \c
.RB [ % { VG | PVS | FREE | ORIGIN }]
.br
Specifies the size of the new LV in logical extents. The number of
physical extents allocated may be different, and depends on the LV type.
Certain LV types require more physical extents for data redundancy or
metadata. An alternate syntax allows the size to be determined indirectly
as a percentage of the size of a related VG, LV, or set of PVs. The
suffix \fB%VG\fP denotes the total size of the VG, the suffix \fB%FREE\fP
the remaining free space in the VG, and the suffix \fB%PVS\fP the free
space in the specified Physical Volumes. For a snapshot, the size
can be expressed as a percentage of the total size of the Origin Logical
Volume with the suffix \fB%ORIGIN\fP (\fB100%ORIGIN\fP provides space for
the whole origin).
When expressed as a percentage, the size defines an upper limit for the
number of logical extents in the new LV. The precise number of logical
extents in the new LV is not determined until the command has completed.
.
.HP
.BR \-j | \-\-major
.IR Major
.br
Sets the major number.
Major numbers are not supported with pool volumes.
This option is supported only on older systems
(kernel version 2.4) and is ignored on modern Linux systems where major
numbers are dynamically assigned.
.
.HP
.BR \-\-metadataprofile
.IR ProfileName
.br
Uses and attaches the \fIProfileName\fP configuration profile to the logical
volume metadata. Whenever the logical volume is processed next time,
the profile is automatically applied. If the volume group has another
profile attached, the logical volume profile is preferred.
See \fBlvm.conf\fP(5) for more information about \fBmetadata profiles\fP.
.
.HP
.BR \-\-minor
.IR Minor
.br
Sets the minor number.
Minor numbers are not supported with pool volumes.
.
.HP
.BR \-m | \-\-mirrors
.IR mirrors
.br
Creates a mirrored logical volume with \fImirrors\fP copies.
For example, specifying \fB\-m 1\fP
would result in a mirror with two-sides; that is,
a linear volume plus one copy.
Specifying the optional argument \fB\-\-nosync\fP will cause the creation
of the mirror LV to skip the initial resynchronization. Any data written
afterwards will be mirrored, but the original contents will not be copied.
This is useful for skipping a potentially long and resource intensive initial
sync of an empty mirrored RaidLV.
There are two implementations of mirroring which can be used and correspond
to the "\fIraid1\fP" and "\fImirror\fP" segment types.
The default is "\fIraid1\fP". See the
\fB\-\-type\fP option for more information if you would like to use the
legacy "\fImirror\fP" segment type. See
.BR lvm.conf (5)
settings \fB global/mirror_segtype_default\fP
and \fBglobal/raid10_segtype_default\fP
to configure default mirror segment type.
The options
\fB\-\-mirrorlog\fP and \fB\-\-corelog\fP apply
to the legacy "\fImirror\fP" segment type only.
Note the current maxima for mirrors are 7 for "mirror" providing
8 mirror legs and 9 for "raid1" providing 10 legs.
.
.HP
.BR \-\-mirrorlog
.RB { disk | core | mirrored }
.br
Specifies the type of log to be used for logical volumes utilizing
the legacy "\fImirror\fP" segment type.
.br
The default is \fBdisk\fP, which is persistent and requires
a small amount of storage space, usually on a separate device from the
data being mirrored.
.br
Using \fBcore\fP means the mirror is regenerated by copying the data
from the first device each time the logical volume is activated,
like after every reboot.
.br
Using \fBmirrored\fP will create a persistent log that is itself mirrored.
.
.HP
.BR \-\-monitor
.RB { y | n }
.br
Starts or avoids monitoring a mirrored, snapshot or thin pool logical volume with
dmeventd, if it is installed.
If a device used by a monitored mirror reports an I/O error,
the failure is handled according to
\fBactivation/mirror_image_fault_policy\fP
and \fBactivation/mirror_log_fault_policy\fP
set in \fBlvm.conf\fP(5).
.
.HP
.BR \-n | \-\-name
.IR LogicalVolume { Name | Path }
.br
Sets the name for the new logical volume.
.br
Without this option a default name of "lvol#" will be generated where
# is the LVM internal number of the logical volume.
.
.HP
.BR \-\-nosync
.br
Causes the creation of mirror, raid1, raid4, raid5 and raid10 to skip the
initial resynchronization. In case of mirror, raid1 and raid10, any data
written afterwards will be mirrored, but the original contents will not be
copied. In case of raid4 and raid5, no parity blocks will be written,
though any data written afterwards will cause parity blocks to be stored.
.br
This is useful for skipping a potentially long and resource intensive initial
sync of an empty mirror/raid1/raid4/raid5 and raid10 LV.
.br
This option is not valid for raid6, because raid6 relies on proper parity
(P and Q Syndromes) being created during initial synchronization in order
to reconstruct proper user date in case of device failures.
raid0 and raid0_meta don't provide any data copies or parity support
and thus don't support initial resynchronization.
.
.HP
.BR \-\-noudevsync
.br
Disables udev synchronisation. The
process will not wait for notification from udev.
It will continue irrespective of any possible udev processing
in the background. You should only use this if udev is not running
or has rules that ignore the devices LVM2 creates.
.
.HP
.BR \-p | \-\-permission
.RB { r | rw }
.br
Sets access permissions to read only (\fBr\fP) or read and write (\fBrw\fP).
.br
Default is read and write.
.
.HP
.BR \-M | \-\-persistent
.RB { y | n }
.br
Set to \fBy\fP to make the minor number specified persistent.
Pool volumes cannot have persistent major and minor numbers.
Defaults to \fBy\fPes only when major or minor number is specified.
Otherwise it is \fBn\fPo.
.\" .HP
.\" .IR \fB\-\-pooldatasize " " PoolDataVolumeSize [ bBsSkKmMgGtTpPeE ]
.\" Sets the size of pool's data logical volume.
.\" For thin pools you may also specify the size
.\" with the option \fB\-\-size\fP.
.\"
.
.HP
.BR \-\-poolmetadatasize
.SIZE_G \%MetadataVolumeSize
.br
Sets the size of pool's metadata logical volume.
Supported values are in range between 2MiB and 16GiB for thin pool,
and upto 16GiB for cache pool. The minimum value is computed from pool's
data size.
Default value for thin pool is (Pool_LV_size / Pool_LV_chunk_size * 64b).
To work with a thin pool, there should be at least 25% of free space
when the size of metadata is smaller then 16MiB,
or at least 4MiB of free space otherwise.
Default unit is megabytes.
.
.HP
.BR \-\-poolmetadataspare
.RB { y | n }
.br
Controls creation and maintanence of pool metadata spare logical volume
that will be used for automated pool recovery.
Only one such volume is maintained within a volume group
with the size of the biggest pool metadata volume.
Default is \fBy\fPes.
.
.HP
.BR \-\- [ raid ] maxrecoveryrate
.SIZE_G \%Rate
.br
Sets the maximum recovery rate for a RAID logical volume. \fIRate\fP
is specified as an amount per second for each device in the array.
If no suffix is given, then KiB/sec/device is assumed. Setting the
recovery rate to 0 means it will be unbounded.
.
.HP
.BR \-\- [ raid ] minrecoveryrate
.SIZE_G \%Rate
.br
Sets the minimum recovery rate for a RAID logical volume. \fIRate\fP
is specified as an amount per second for each device in the array.
If no suffix is given, then KiB/sec/device is assumed. Setting the
recovery rate to 0 means it will be unbounded.
.
.HP
.BR \-r | \-\-readahead
.RB { \fIReadAheadSectors | auto | none }
.br
Sets read ahead sector count of this logical volume.
For volume groups with metadata in lvm1 format, this must
be a value between 2 and 120.
The default value is \fBauto\fP which allows the kernel to choose
a suitable value automatically.
\fBnone\fP is equivalent to specifying zero.
.
.HP
.BR \-R | \-\-regionsize
.SIZE_G \%MirrorLogRegionSize
.br
A mirror is divided into regions of this size (in MiB), and the mirror log
uses this granularity to track which regions are in sync.
.
.HP
.BR \-k | \-\-setactivationskip
.RB { y | n }
.br
Controls whether Logical Volumes are persistently flagged to be skipped during
activation. By default, thin snapshot volumes are flagged for activation skip.
See
.BR lvm.conf (5)
\fBactivation/auto_set_activation_skip\fP
how to change its default behaviour.
To activate such volumes, an extra \fB\-\-ignoreactivationskip\fP
option must be used. The flag is not applied during deactivation. Use
\fBlvchange \-\-setactivationskip\fP
command to change the skip flag for existing volumes.
To see whether the flag is attached, use \fBlvs\fP command
where the state of the flag is reported within \fBlv_attr\fP bits.
.
.HP
.BR \-L | \-\-size
.SIZE_E \%LogicalVolumeSize
.br
Gives the size to allocate for the new logical volume.
A size suffix of \fBB\fP for bytes, \fBS\fP for sectors as 512 bytes,
\fBK\fP for kilobytes, \fBM\fP for megabytes,
\fBG\fP for gigabytes, \fBT\fP for terabytes, \fBP\fP for petabytes
or \fBE\fP for exabytes is optional.
.br
Default unit is megabytes.
.
.HP
.BR \-s | \fB\-\-snapshot
.IR OriginalLogicalVolume { Name | Path }
.br
Creates a snapshot logical volume (or snapshot) for an existing, so called
original logical volume (or origin).
Snapshots provide a 'frozen image' of the contents of the origin
while the origin can still be updated. They enable consistent
backups and online recovery of removed/overwritten data/files.
.br
Thin snapshot is created when the origin is a thin volume and
the size IS NOT specified. Thin snapshot shares same blocks within
the thin pool volume.
The non thin volume snapshot with the specified size does not need
the same amount of storage the origin has. In a typical scenario,
15-20% might be enough. In case the snapshot runs out of storage, use
.BR lvextend (8)
to grow it. Shrinking a snapshot is supported by
.BR lvreduce (8)
as well. Run
.BR lvs (8)
on the snapshot in order to check how much data is allocated to it.
Note: a small amount of the space you allocate to the snapshot is
used to track the locations of the chunks of data, so you should
allocate slightly more space than you actually need and monitor
(\fB\-\-monitor\fP) the rate at which the snapshot data is growing
so you can \fBavoid\fP running out of space.
If \fB\-\-thinpool\fP is specified, thin volume is created that will
use given original logical volume as an external origin that
serves unprovisioned blocks.
Only read-only volumes can be used as external origins.
To make the volume external origin, lvm expects the volume to be inactive.
External origin volume can be used/shared for many thin volumes
even from different thin pools. See
.BR lvconvert (8)
for online conversion to thin volumes with external origin.
.
.HP
.BR \-i | \-\-stripes
.IR Stripes
.br
Gives the number of stripes.
This is equal to the number of physical volumes to scatter
the logical volume data. When creating a RAID 4/5/6 logical volume,
the extra devices which are necessary for parity are
internally accounted for. Specifying \fB\-i 3\fP
would cause 3 devices for striped and RAID 0 logical volumes,
4 devices for RAID 4/5, 5 devices for RAID 6 and 6 devices for RAID 10.
Alternatively, RAID 0 will stripe across 2 devices,
RAID 4/5 across 3 PVs, RAID 6 across 5 PVs and RAID 10 across
4 PVs in the volume group if the \fB\-i\fP argument is omitted.
In order to stripe across all PVs of the VG if the \fB\-i\fP argument is
omitted, set raid_stripe_all_devices=1 in the allocation
section of \fBlvm.conf (5)\fP or add
.br
\fB\-\-config allocation/raid_stripe_all_devices=1\fP
.br
to the command.
Note the current maxima for stripes depend on the created RAID type.
For raid10, the maximum of stripes is 32,
for raid0, it is 64,
for raid4/5, it is 63
and for raid6 it is 62.
See the \fB\-\-nosync\fP option to optionally avoid initial syncrhonization of RaidLVs.
Two implementations of basic striping are available in the kernel.
The original device-mapper implementation is the default and should
normally be used. The alternative implementation using MD, available
since version 1.7 of the RAID device-mapper kernel target (kernel
version 4.2) is provided to facilitate the development of new RAID
features. It may be accessed with \fB--type raid0[_meta]\fP, but is best
avoided at present because of assorted restrictions on resizing and converting
such devices.
.HP
.BR \-I | \-\-stripesize
.IR StripeSize
.br
Gives the number of kilobytes for the granularity of the stripes.
.br
StripeSize must be 2^n (n = 2 to 9) for metadata in LVM1 format.
For metadata in LVM2 format, the stripe size may be a larger
power of 2 but must not exceed the physical extent size.
.
.HP
.BR \-T | \-\-thin
.br
Creates thin pool or thin logical volume or both.
Specifying the optional argument \fB\-\-size\fP or \fB\-\-extents\fP
will cause the creation of the thin pool logical volume.
Specifying the optional argument \fB\-\-virtualsize\fP will cause
the creation of the thin logical volume from given thin pool volume.
Specifying both arguments will cause the creation of both
thin pool and thin volume using this pool.
See \fBlvmthin\fP(7) for more info about thin provisioning support.
Thin provisioning requires device mapper kernel driver
from kernel 3.2 or greater.
.
.HP
.BR \-\-thinpool
.IR ThinPoolLogicalVolume { Name | Path }
.br
Specifies the name of thin pool volume name. The other way to specify pool name
is to append name to Volume group name argument.
.
.HP
.BR \-\-type
.IR SegmentType
.br