mirror of
git://sourceware.org/git/lvm2.git
synced 2024-12-21 13:34:40 +03:00
c2c2721d00
Unconditionally guard there is at least 1/4 of metadata volume free (<16Mib) or 4MiB - whichever value is smaller. In case there is not enough free space do not let operation proceed and recommend thin-pool metadata resize (in case user has not enabled autoresize, manual 'lvextend --poolmetadatasize' is needed).
915 lines
30 KiB
Groff
915 lines
30 KiB
Groff
.TH LVCREATE 8 "LVM TOOLS #VERSION#" "Sistina Software UK" \" -*- nroff -*-
|
|
.
|
|
.\" Use 1st. parameter with \% to fix 'man2html' rendeing on same line!
|
|
.de SIZE_G
|
|
. IR \\$1 \c
|
|
. RB [ b | B | s | S | k | K | m | M | g | G ]
|
|
..
|
|
.de SIZE_E
|
|
. IR \\$1 \c
|
|
. RB [ b | B | s | S | k | K | m | M | \c
|
|
. BR g | G | t | T | p | P | e | E ]
|
|
..
|
|
.
|
|
.SH NAME
|
|
.
|
|
lvcreate \- create a logical volume in an existing volume group
|
|
.
|
|
.SH SYNOPSIS
|
|
.
|
|
.ad l
|
|
.B lvcreate
|
|
.RB [ \-a | \-\-activate
|
|
.RB [ a ][ e | l | s ]{ y | n }]
|
|
.RB [ \-\-addtag
|
|
.IR Tag ]
|
|
.RB [ \-\-alloc
|
|
.IR Allocation\%Policy ]
|
|
.RB [ \-A | \-\-autobackup
|
|
.RB { y | n }]
|
|
.RB [ \-H | \-\-cache ]
|
|
.RB [ \-\-cachemode
|
|
.RB { passthrough | writeback | writethrough }]
|
|
.RB [ \-\-cachepolicy
|
|
.IR Policy ]
|
|
.RB \%[ \-\-cachepool
|
|
.IR CachePoolLogicalVolume ]
|
|
.RB [ \-\-cachesettings
|
|
.IR Key \fB= Value ]
|
|
.RB [ \-c | \-\-chunksize
|
|
.IR ChunkSize ]
|
|
.RB [ \-\-commandprofile
|
|
.IR ProfileName ]
|
|
.RB \%[ \-C | \-\-contiguous
|
|
.RB { y | n }]
|
|
.RB [ \-d | \-\-debug ]
|
|
.RB [ \-\-discards
|
|
.RB \%{ ignore | nopassdown | passdown }]
|
|
.RB [ \-\-errorwhenfull
|
|
.RB { y | n }]
|
|
.RB [{ \-l | \-\-extents
|
|
.BR \fILogicalExtents\%Number [ % { FREE | PVS | VG }]
|
|
.RB |
|
|
.BR \-L | \-\-size
|
|
.BR \fILogicalVolumeSize }
|
|
.RB [ \-i | \-\-stripes
|
|
.IR Stripes
|
|
.RB [ \-I | \-\-stripesize
|
|
.IR StripeSize ]]]
|
|
.RB [ \-h | \-? | \-\-help ]
|
|
.RB [ \-K | \-\-ignoreactivationskip ]
|
|
.RB [ \-\-ignoremonitoring ]
|
|
.RB [ \-\-minor
|
|
.IR Minor
|
|
.RB [ \-j | \-\-major
|
|
.IR Major ]]
|
|
.RB [ \-\-metadataprofile
|
|
.IR Profile\%Name ]
|
|
.RB [ \-m | \-\-mirrors
|
|
.IR Mirrors
|
|
.RB [ \-\-corelog | \-\-mirrorlog
|
|
.RB { disk | core | mirrored }]
|
|
.RB [ \-\-nosync ]
|
|
.RB [ \-R | \-\-regionsize
|
|
.BR \fIMirrorLogRegionSize ]]
|
|
.RB [ \-\-monitor
|
|
.RB { y | n }]
|
|
.RB [ \-n | \-\-name
|
|
.IR Logical\%Volume ]
|
|
.RB [ \-\-noudevsync ]
|
|
.RB [ \-p | \-\-permission
|
|
.RB { r | rw }]
|
|
.RB [ \-M | \-\-persistent
|
|
.RB { y | n }]
|
|
.\" .RB [ \-\-pooldatasize
|
|
.\" .I DataVolumeSize
|
|
.RB \%[ \-\-poolmetadatasize
|
|
.IR MetadataVolumeSize ]
|
|
.RB [ \-\-poolmetadataspare
|
|
.RB { y | n }]
|
|
.RB [ \-\- [ raid ] maxrecoveryrate
|
|
.IR Rate ]
|
|
.RB [ \-\- [ raid ] minrecoveryrate
|
|
.IR Rate ]
|
|
.RB [ \-r | \-\-readahead
|
|
.RB { \fIReadAheadSectors | auto | none }]
|
|
.RB [ \-\-reportformat
|
|
.RB {basic | json}]
|
|
.RB \%[ \-k | \-\-setactivationskip
|
|
.RB { y | n }]
|
|
.RB [ \-s | \-\-snapshot ]
|
|
.RB [ \-V | \-\-virtualsize
|
|
.IR VirtualSize ]
|
|
.RB [ \-t | \-\-test ]
|
|
.RB [ \-T | \-\-thin ]
|
|
.RB [ \-\-thinpool
|
|
.IR ThinPoolLogicalVolume ]
|
|
.RB [ \-\-type
|
|
.IR SegmentType ]
|
|
.RB [ \-v | \-\-verbose ]
|
|
.RB [ \-W | \-\-wipesignatures
|
|
.RB { y | n }]
|
|
.RB [ \-Z | \-\-zero
|
|
.RB { y | n }]
|
|
.RI [ VolumeGroup
|
|
.RI |
|
|
.RI \%{ ExternalOrigin | Origin | Pool } LogicalVolume
|
|
.RI \%[ PhysicalVolumePath [ \fB: \fIPE \fR[ \fB\- PE ]]...]]
|
|
.LP
|
|
.B lvcreate
|
|
.RB [ \-l | \-\-extents
|
|
.BR \fILogicalExtentsNumber [ % { FREE | ORIGIN | PVS | VG }]
|
|
|
|
|
.BR \-L | \-\-size
|
|
.\" | \-\-pooldatasize
|
|
.IR LogicalVolumeSize ]
|
|
.RB [ \-c | \-\-chunksize
|
|
.IR ChunkSize ]
|
|
.RB \%[ \-\-commandprofile
|
|
.IR Profile\%Name ]
|
|
.RB [ \-\-noudevsync ]
|
|
.RB [ \-\-ignoremonitoring ]
|
|
.RB [ \-\-metadataprofile
|
|
.IR Profile\%Name ]
|
|
.RB \%[ \-\-monitor
|
|
.RB { y | n }]
|
|
.RB [ \-n | \-\-name
|
|
.IR SnapshotLogicalVolumeName ]
|
|
.RB [ \-\-reportformat
|
|
.RB {basic | json}]
|
|
.BR \-s | \-\-snapshot | \-H | \-\-cache
|
|
.RI \%{[ VolumeGroup \fB/\fP] OriginalLogicalVolume
|
|
.RB \%[ \-V | \-\-virtualsize
|
|
.IR VirtualSize ]}
|
|
.ad b
|
|
.
|
|
.SH DESCRIPTION
|
|
.
|
|
lvcreate creates a new logical volume in a volume group (see
|
|
.BR vgcreate "(8), " vgchange (8))
|
|
by allocating logical extents from the free physical extent pool
|
|
of that volume group. If there are not enough free physical extents then
|
|
the volume group can be extended (see
|
|
.BR vgextend (8))
|
|
with other physical volumes or by reducing existing logical volumes
|
|
of this volume group in size (see
|
|
.BR lvreduce (8)).
|
|
If you specify one or more PhysicalVolumes, allocation of physical
|
|
extents will be restricted to these volumes.
|
|
.br
|
|
.br
|
|
The second form supports the creation of snapshot logical volumes which
|
|
keep the contents of the original logical volume for backup purposes.
|
|
.
|
|
.SH OPTIONS
|
|
.
|
|
See
|
|
.BR lvm (8)
|
|
for common options.
|
|
.
|
|
.HP
|
|
.BR \-a | \-\-activate
|
|
.RB [ a ][ l | e | s ]{ y | n }
|
|
.br
|
|
Controls the availability of the Logical Volumes for immediate use after
|
|
the command finishes running.
|
|
By default, new Logical Volumes are activated (\fB\-ay\fP).
|
|
If it is possible technically, \fB\-an\fP will leave the new Logical
|
|
Volume inactive. But for example, snapshots of active origin can only be
|
|
created in the active state so \fB\-an\fP cannot be used with
|
|
\fB-\-type snapshot\fP. This does not apply to thin volume snapshots,
|
|
which are by default created with flag to skip their activation
|
|
(\fB-ky\fP).
|
|
Normally the \fB\-\-zero n\fP argument has to be supplied too because
|
|
zeroing (the default behaviour) also requires activation.
|
|
If autoactivation option is used (\fB\-aay\fP), the logical volume is
|
|
activated only if it matches an item in the
|
|
\fBactivation/auto_activation_volume_list\fP
|
|
set in \fBlvm.conf\fP(5).
|
|
For autoactivated logical volumes, \fB\-\-zero n\fP and
|
|
\fB\-\-wipesignatures n\fP is always assumed and it can't
|
|
be overridden. If the clustered locking is enabled,
|
|
\fB\-aey\fP will activate exclusively on one node and
|
|
.BR \-a { a | l } y
|
|
will activate only on the local node.
|
|
.
|
|
.HP
|
|
.BR \-H | \-\-cache
|
|
.br
|
|
Creates cache or cache pool logical volume.
|
|
.\" or both.
|
|
Specifying the optional argument \fB\-\-extents\fP or \fB\-\-size\fP
|
|
will cause the creation of the cache logical volume.
|
|
.\" Specifying the optional argument \fB\-\-pooldatasize\fP will cause
|
|
.\" the creation of the cache pool logical volume.
|
|
.\" Specifying both arguments will cause the creation of cache with its
|
|
.\" cache pool volume.
|
|
When the Volume group name is specified together with existing logical volume
|
|
name which is NOT a cache pool name, such volume is treated
|
|
as cache origin volume and cache pool is created. In this case the
|
|
\fB\-\-extents\fP or \fB\-\-size\fP is used to specify size of cache pool volume.
|
|
See \fBlvmcache\fP(7) for more info about caching support.
|
|
Note that the cache segment type requires a dm-cache kernel module version
|
|
1.3.0 or greater.
|
|
.
|
|
.HP
|
|
.BR \-\-cachemode
|
|
.RB { passthrough | writeback | writethrough }
|
|
.br
|
|
Specifying a cache mode determines when the writes to a cache LV
|
|
are considered complete. When \fBwriteback\fP is specified, a write is
|
|
considered complete as soon as it is stored in the cache pool LV.
|
|
If \fBwritethough\fP is specified, a write is considered complete only
|
|
when it has been stored in the cache pool LV and on the origin LV.
|
|
While \fBwritethrough\fP may be slower for writes, it is more
|
|
resilient if something should happen to a device associated with the
|
|
cache pool LV. With \fBpassthrough\fP mode, all reads are served
|
|
from origin LV (all reads miss the cache) and all writes are
|
|
forwarded to the origin LV; additionally, write hits cause cache
|
|
block invalidates. See \fBlvmcache(7)\fP for more details.
|
|
.
|
|
.HP
|
|
.BR \-\-cachepolicy
|
|
.IR Policy
|
|
.br
|
|
Only applicable to cached LVs; see also \fBlvmcache(7)\fP. Sets
|
|
the cache policy. \fBmq\fP is the basic policy name. \fBsmq\fP is more advanced
|
|
version available in newer kernels.
|
|
.
|
|
.HP
|
|
.BR \-\-cachepool
|
|
.IR CachePoolLogicalVolume { Name | Path }
|
|
.br
|
|
Specifies the name of cache pool volume name. The other way to specify pool name
|
|
is to append name to Volume group name argument.
|
|
.
|
|
.HP
|
|
.BR \-\-cachesettings
|
|
.IB Key = Value
|
|
.br
|
|
Only applicable to cached LVs; see also \fBlvmcache(7)\fP. Sets
|
|
the cache tunable settings. In most use-cases, default values should be adequate.
|
|
Special string value \fBdefault\fP switches setting back to its default kernel value
|
|
and removes it from the list of settings stored in lvm2 metadata.
|
|
.
|
|
.HP
|
|
.BR \-c | \-\-chunksize
|
|
.SIZE_G \%ChunkSize
|
|
.br
|
|
Gives the size of chunk for snapshot, cache pool and thin pool logical volumes.
|
|
Default unit is in kilobytes.
|
|
.br
|
|
For snapshots the value must be power of 2 between 4KiB and 512KiB
|
|
and the default value is 4KiB.
|
|
.br
|
|
For cache pools the value must a multiple of 32KiB
|
|
between 32KiB and 1GiB. The default is 64KiB.
|
|
When the size is specified with volume caching, it may not be smaller
|
|
than cache pool creation chunk size was.
|
|
.br
|
|
For thin pools the value must be a multiple of 64KiB
|
|
between 64KiB and 1GiB.
|
|
Default value starts with 64KiB and grows up to
|
|
fit the pool metadata size within 128MiB,
|
|
if the pool metadata size is not specified.
|
|
See
|
|
.BR lvm.conf (5)
|
|
setting \fBallocation/thin_pool_chunk_size_policy\fP
|
|
to select different calculation policy.
|
|
Thin pool target version <1.4 requires this value to be a power of 2.
|
|
For target version <1.5 discard is not supported for non power of 2 values.
|
|
.
|
|
.HP
|
|
.BR \-C | \-\-contiguous
|
|
.RB { y | n }
|
|
.br
|
|
Sets or resets the contiguous allocation policy for
|
|
logical volumes. Default is no contiguous allocation based
|
|
on a next free principle.
|
|
.
|
|
.HP
|
|
.BR \-\-corelog
|
|
.br
|
|
This is shortcut for option \fB\-\-mirrorlog core\fP.
|
|
.
|
|
.HP
|
|
.BR \-\-discards
|
|
.RB { ignore | nopassdown | passdown }
|
|
.br
|
|
Sets discards behavior for thin pool.
|
|
Default is \fBpassdown\fP.
|
|
.
|
|
.HP
|
|
.BR \-\-errorwhenfull
|
|
.RB { y | n }
|
|
.br
|
|
Configures thin pool behaviour when data space is exhausted.
|
|
Default is \fBn\fPo.
|
|
Device will queue I/O operations until target timeout
|
|
(see dm-thin-pool kernel module option \fPno_space_timeout\fP)
|
|
expires. Thus configured system has a time to i.e. extend
|
|
the size of thin pool data device.
|
|
When set to \fBy\fPes, the I/O operation is immeditelly errored.
|
|
.
|
|
.HP
|
|
.BR \-K | \-\-ignoreactivationskip
|
|
.br
|
|
Ignore the flag to skip Logical Volumes during activation.
|
|
Use \fB\-\-setactivationskip\fP option to set or reset
|
|
activation skipping flag persistently for logical volume.
|
|
.
|
|
.HP
|
|
.BR \-\-ignoremonitoring
|
|
.br
|
|
Make no attempt to interact with dmeventd unless \fB\-\-monitor\fP
|
|
is specified.
|
|
.
|
|
.HP
|
|
.BR -l | \-\-extents
|
|
.IR LogicalExtentsNumber \c
|
|
.RB [ % { VG | PVS | FREE | ORIGIN }]
|
|
.br
|
|
Specifies the size of the new LV in logical extents. The number of
|
|
physical extents allocated may be different, and depends on the LV type.
|
|
Certain LV types require more physical extents for data redundancy or
|
|
metadata. An alternate syntax allows the size to be determined indirectly
|
|
as a percentage of the size of a related VG, LV, or set of PVs. The
|
|
suffix \fB%VG\fP denotes the total size of the VG, the suffix \fB%FREE\fP
|
|
the remaining free space in the VG, and the suffix \fB%PVS\fP the free
|
|
space in the specified Physical Volumes. For a snapshot, the size
|
|
can be expressed as a percentage of the total size of the Origin Logical
|
|
Volume with the suffix \fB%ORIGIN\fP (\fB100%ORIGIN\fP provides space for
|
|
the whole origin).
|
|
When expressed as a percentage, the size defines an upper limit for the
|
|
number of logical extents in the new LV. The precise number of logical
|
|
extents in the new LV is not determined until the command has completed.
|
|
.
|
|
.HP
|
|
.BR \-j | \-\-major
|
|
.IR Major
|
|
.br
|
|
Sets the major number.
|
|
Major numbers are not supported with pool volumes.
|
|
This option is supported only on older systems
|
|
(kernel version 2.4) and is ignored on modern Linux systems where major
|
|
numbers are dynamically assigned.
|
|
.
|
|
.HP
|
|
.BR \-\-metadataprofile
|
|
.IR ProfileName
|
|
.br
|
|
Uses and attaches the \fIProfileName\fP configuration profile to the logical
|
|
volume metadata. Whenever the logical volume is processed next time,
|
|
the profile is automatically applied. If the volume group has another
|
|
profile attached, the logical volume profile is preferred.
|
|
See \fBlvm.conf\fP(5) for more information about \fBmetadata profiles\fP.
|
|
.
|
|
.HP
|
|
.BR \-\-minor
|
|
.IR Minor
|
|
.br
|
|
Sets the minor number.
|
|
Minor numbers are not supported with pool volumes.
|
|
.
|
|
.HP
|
|
.BR \-m | \-\-mirrors
|
|
.IR mirrors
|
|
.br
|
|
Creates a mirrored logical volume with \fImirrors\fP copies.
|
|
For example, specifying \fB\-m 1\fP
|
|
would result in a mirror with two-sides; that is,
|
|
a linear volume plus one copy.
|
|
|
|
Specifying the optional argument \fB\-\-nosync\fP will cause the creation
|
|
of the mirror LV to skip the initial resynchronization. Any data written
|
|
afterwards will be mirrored, but the original contents will not be copied.
|
|
|
|
This is useful for skipping a potentially long and resource intensive initial
|
|
sync of an empty mirrored RaidLV.
|
|
|
|
There are two implementations of mirroring which can be used and correspond
|
|
to the "\fIraid1\fP" and "\fImirror\fP" segment types.
|
|
The default is "\fIraid1\fP". See the
|
|
\fB\-\-type\fP option for more information if you would like to use the
|
|
legacy "\fImirror\fP" segment type. See
|
|
.BR lvm.conf (5)
|
|
settings \fB global/mirror_segtype_default\fP
|
|
and \fBglobal/raid10_segtype_default\fP
|
|
to configure default mirror segment type.
|
|
The options
|
|
\fB\-\-mirrorlog\fP and \fB\-\-corelog\fP apply
|
|
to the legacy "\fImirror\fP" segment type only.
|
|
|
|
Note the current maxima for mirrors are 7 for "mirror" providing
|
|
8 mirror legs and 9 for "raid1" providing 10 legs.
|
|
.
|
|
.HP
|
|
.BR \-\-mirrorlog
|
|
.RB { disk | core | mirrored }
|
|
.br
|
|
Specifies the type of log to be used for logical volumes utilizing
|
|
the legacy "\fImirror\fP" segment type.
|
|
.br
|
|
The default is \fBdisk\fP, which is persistent and requires
|
|
a small amount of storage space, usually on a separate device from the
|
|
data being mirrored.
|
|
.br
|
|
Using \fBcore\fP means the mirror is regenerated by copying the data
|
|
from the first device each time the logical volume is activated,
|
|
like after every reboot.
|
|
.br
|
|
Using \fBmirrored\fP will create a persistent log that is itself mirrored.
|
|
.
|
|
.HP
|
|
.BR \-\-monitor
|
|
.RB { y | n }
|
|
.br
|
|
Starts or avoids monitoring a mirrored, snapshot or thin pool logical volume with
|
|
dmeventd, if it is installed.
|
|
If a device used by a monitored mirror reports an I/O error,
|
|
the failure is handled according to
|
|
\fBactivation/mirror_image_fault_policy\fP
|
|
and \fBactivation/mirror_log_fault_policy\fP
|
|
set in \fBlvm.conf\fP(5).
|
|
.
|
|
.HP
|
|
.BR \-n | \-\-name
|
|
.IR LogicalVolume { Name | Path }
|
|
.br
|
|
Sets the name for the new logical volume.
|
|
.br
|
|
Without this option a default name of "lvol#" will be generated where
|
|
# is the LVM internal number of the logical volume.
|
|
.
|
|
.HP
|
|
.BR \-\-nosync
|
|
.br
|
|
Causes the creation of mirror, raid1, raid4, raid5 and raid10 to skip the
|
|
initial resynchronization. In case of mirror, raid1 and raid10, any data
|
|
written afterwards will be mirrored, but the original contents will not be
|
|
copied. In case of raid4 and raid5, no parity blocks will be written,
|
|
though any data written afterwards will cause parity blocks to be stored.
|
|
.br
|
|
This is useful for skipping a potentially long and resource intensive initial
|
|
sync of an empty mirror/raid1/raid4/raid5 and raid10 LV.
|
|
.br
|
|
This option is not valid for raid6, because raid6 relies on proper parity
|
|
(P and Q Syndromes) being created during initial synchronization in order
|
|
to reconstruct proper user date in case of device failures.
|
|
|
|
raid0 and raid0_meta don't provide any data copies or parity support
|
|
and thus don't support initial resynchronization.
|
|
.
|
|
.HP
|
|
.BR \-\-noudevsync
|
|
.br
|
|
Disables udev synchronisation. The
|
|
process will not wait for notification from udev.
|
|
It will continue irrespective of any possible udev processing
|
|
in the background. You should only use this if udev is not running
|
|
or has rules that ignore the devices LVM2 creates.
|
|
.
|
|
.HP
|
|
.BR \-p | \-\-permission
|
|
.RB { r | rw }
|
|
.br
|
|
Sets access permissions to read only (\fBr\fP) or read and write (\fBrw\fP).
|
|
.br
|
|
Default is read and write.
|
|
.
|
|
.HP
|
|
.BR \-M | \-\-persistent
|
|
.RB { y | n }
|
|
.br
|
|
Set to \fBy\fP to make the minor number specified persistent.
|
|
Pool volumes cannot have persistent major and minor numbers.
|
|
Defaults to \fBy\fPes only when major or minor number is specified.
|
|
Otherwise it is \fBn\fPo.
|
|
.\" .HP
|
|
.\" .IR \fB\-\-pooldatasize " " PoolDataVolumeSize [ bBsSkKmMgGtTpPeE ]
|
|
.\" Sets the size of pool's data logical volume.
|
|
.\" For thin pools you may also specify the size
|
|
.\" with the option \fB\-\-size\fP.
|
|
.\"
|
|
.
|
|
.HP
|
|
.BR \-\-poolmetadatasize
|
|
.SIZE_G \%MetadataVolumeSize
|
|
.br
|
|
Sets the size of pool's metadata logical volume.
|
|
Supported values are in range between 2MiB and 16GiB for thin pool,
|
|
and upto 16GiB for cache pool. The minimum value is computed from pool's
|
|
data size.
|
|
Default value for thin pool is (Pool_LV_size / Pool_LV_chunk_size * 64b).
|
|
To work with a thin pool, there should be at least 25% of free space
|
|
when the size of metadata is smaller then 16MiB,
|
|
or at least 4MiB of free space otherwise.
|
|
Default unit is megabytes.
|
|
.
|
|
.HP
|
|
.BR \-\-poolmetadataspare
|
|
.RB { y | n }
|
|
.br
|
|
Controls creation and maintanence of pool metadata spare logical volume
|
|
that will be used for automated pool recovery.
|
|
Only one such volume is maintained within a volume group
|
|
with the size of the biggest pool metadata volume.
|
|
Default is \fBy\fPes.
|
|
.
|
|
.HP
|
|
.BR \-\- [ raid ] maxrecoveryrate
|
|
.SIZE_G \%Rate
|
|
.br
|
|
Sets the maximum recovery rate for a RAID logical volume. \fIRate\fP
|
|
is specified as an amount per second for each device in the array.
|
|
If no suffix is given, then KiB/sec/device is assumed. Setting the
|
|
recovery rate to 0 means it will be unbounded.
|
|
.
|
|
.HP
|
|
.BR \-\- [ raid ] minrecoveryrate
|
|
.SIZE_G \%Rate
|
|
.br
|
|
Sets the minimum recovery rate for a RAID logical volume. \fIRate\fP
|
|
is specified as an amount per second for each device in the array.
|
|
If no suffix is given, then KiB/sec/device is assumed. Setting the
|
|
recovery rate to 0 means it will be unbounded.
|
|
.
|
|
.HP
|
|
.BR \-r | \-\-readahead
|
|
.RB { \fIReadAheadSectors | auto | none }
|
|
.br
|
|
Sets read ahead sector count of this logical volume.
|
|
For volume groups with metadata in lvm1 format, this must
|
|
be a value between 2 and 120.
|
|
The default value is \fBauto\fP which allows the kernel to choose
|
|
a suitable value automatically.
|
|
\fBnone\fP is equivalent to specifying zero.
|
|
.
|
|
.HP
|
|
.BR \-R | \-\-regionsize
|
|
.SIZE_G \%MirrorLogRegionSize
|
|
.br
|
|
A mirror is divided into regions of this size (in MiB), and the mirror log
|
|
uses this granularity to track which regions are in sync.
|
|
.
|
|
.HP
|
|
.BR \-k | \-\-setactivationskip
|
|
.RB { y | n }
|
|
.br
|
|
Controls whether Logical Volumes are persistently flagged to be skipped during
|
|
activation. By default, thin snapshot volumes are flagged for activation skip.
|
|
See
|
|
.BR lvm.conf (5)
|
|
\fBactivation/auto_set_activation_skip\fP
|
|
how to change its default behaviour.
|
|
To activate such volumes, an extra \fB\-\-ignoreactivationskip\fP
|
|
option must be used. The flag is not applied during deactivation. Use
|
|
\fBlvchange \-\-setactivationskip\fP
|
|
command to change the skip flag for existing volumes.
|
|
To see whether the flag is attached, use \fBlvs\fP command
|
|
where the state of the flag is reported within \fBlv_attr\fP bits.
|
|
.
|
|
.HP
|
|
.BR \-L | \-\-size
|
|
.SIZE_E \%LogicalVolumeSize
|
|
.br
|
|
Gives the size to allocate for the new logical volume.
|
|
A size suffix of \fBB\fP for bytes, \fBS\fP for sectors as 512 bytes,
|
|
\fBK\fP for kilobytes, \fBM\fP for megabytes,
|
|
\fBG\fP for gigabytes, \fBT\fP for terabytes, \fBP\fP for petabytes
|
|
or \fBE\fP for exabytes is optional.
|
|
.br
|
|
Default unit is megabytes.
|
|
.
|
|
.HP
|
|
.BR \-s | \fB\-\-snapshot
|
|
.IR OriginalLogicalVolume { Name | Path }
|
|
.br
|
|
Creates a snapshot logical volume (or snapshot) for an existing, so called
|
|
original logical volume (or origin).
|
|
Snapshots provide a 'frozen image' of the contents of the origin
|
|
while the origin can still be updated. They enable consistent
|
|
backups and online recovery of removed/overwritten data/files.
|
|
.br
|
|
Thin snapshot is created when the origin is a thin volume and
|
|
the size IS NOT specified. Thin snapshot shares same blocks within
|
|
the thin pool volume.
|
|
The non thin volume snapshot with the specified size does not need
|
|
the same amount of storage the origin has. In a typical scenario,
|
|
15-20% might be enough. In case the snapshot runs out of storage, use
|
|
.BR lvextend (8)
|
|
to grow it. Shrinking a snapshot is supported by
|
|
.BR lvreduce (8)
|
|
as well. Run
|
|
.BR lvs (8)
|
|
on the snapshot in order to check how much data is allocated to it.
|
|
Note: a small amount of the space you allocate to the snapshot is
|
|
used to track the locations of the chunks of data, so you should
|
|
allocate slightly more space than you actually need and monitor
|
|
(\fB\-\-monitor\fP) the rate at which the snapshot data is growing
|
|
so you can \fBavoid\fP running out of space.
|
|
If \fB\-\-thinpool\fP is specified, thin volume is created that will
|
|
use given original logical volume as an external origin that
|
|
serves unprovisioned blocks.
|
|
Only read-only volumes can be used as external origins.
|
|
To make the volume external origin, lvm expects the volume to be inactive.
|
|
External origin volume can be used/shared for many thin volumes
|
|
even from different thin pools. See
|
|
.BR lvconvert (8)
|
|
for online conversion to thin volumes with external origin.
|
|
.
|
|
.HP
|
|
.BR \-i | \-\-stripes
|
|
.IR Stripes
|
|
.br
|
|
Gives the number of stripes.
|
|
This is equal to the number of physical volumes to scatter
|
|
the logical volume data. When creating a RAID 4/5/6 logical volume,
|
|
the extra devices which are necessary for parity are
|
|
internally accounted for. Specifying \fB\-i 3\fP
|
|
would cause 3 devices for striped and RAID 0 logical volumes,
|
|
4 devices for RAID 4/5, 5 devices for RAID 6 and 6 devices for RAID 10.
|
|
Alternatively, RAID 0 will stripe across 2 devices,
|
|
RAID 4/5 across 3 PVs, RAID 6 across 5 PVs and RAID 10 across
|
|
4 PVs in the volume group if the \fB\-i\fP argument is omitted.
|
|
In order to stripe across all PVs of the VG if the \fB\-i\fP argument is
|
|
omitted, set raid_stripe_all_devices=1 in the allocation
|
|
section of \fBlvm.conf (5)\fP or add
|
|
.br
|
|
\fB\-\-config allocation/raid_stripe_all_devices=1\fP
|
|
.br
|
|
to the command.
|
|
|
|
Note the current maxima for stripes depend on the created RAID type.
|
|
For raid10, the maximum of stripes is 32,
|
|
for raid0, it is 64,
|
|
for raid4/5, it is 63
|
|
and for raid6 it is 62.
|
|
|
|
See the \fB\-\-nosync\fP option to optionally avoid initial syncrhonization of RaidLVs.
|
|
|
|
Two implementations of basic striping are available in the kernel.
|
|
The original device-mapper implementation is the default and should
|
|
normally be used. The alternative implementation using MD, available
|
|
since version 1.7 of the RAID device-mapper kernel target (kernel
|
|
version 4.2) is provided to facilitate the development of new RAID
|
|
features. It may be accessed with \fB--type raid0[_meta]\fP, but is best
|
|
avoided at present because of assorted restrictions on resizing and converting
|
|
such devices.
|
|
.HP
|
|
.BR \-I | \-\-stripesize
|
|
.IR StripeSize
|
|
.br
|
|
Gives the number of kilobytes for the granularity of the stripes.
|
|
.br
|
|
StripeSize must be 2^n (n = 2 to 9) for metadata in LVM1 format.
|
|
For metadata in LVM2 format, the stripe size may be a larger
|
|
power of 2 but must not exceed the physical extent size.
|
|
.
|
|
.HP
|
|
.BR \-T | \-\-thin
|
|
.br
|
|
Creates thin pool or thin logical volume or both.
|
|
Specifying the optional argument \fB\-\-size\fP or \fB\-\-extents\fP
|
|
will cause the creation of the thin pool logical volume.
|
|
Specifying the optional argument \fB\-\-virtualsize\fP will cause
|
|
the creation of the thin logical volume from given thin pool volume.
|
|
Specifying both arguments will cause the creation of both
|
|
thin pool and thin volume using this pool.
|
|
See \fBlvmthin\fP(7) for more info about thin provisioning support.
|
|
Thin provisioning requires device mapper kernel driver
|
|
from kernel 3.2 or greater.
|
|
.
|
|
.HP
|
|
.BR \-\-thinpool
|
|
.IR ThinPoolLogicalVolume { Name | Path }
|
|
.br
|
|
Specifies the name of thin pool volume name. The other way to specify pool name
|
|
is to append name to Volume group name argument.
|
|
.
|
|
.HP
|
|
.BR \-\-type
|
|
.IR SegmentType
|
|
.br
|
|
Creates a logical volume with the specified segment type.
|
|
Supported types are:
|
|
.BR cache ,
|
|
.BR cache-pool ,
|
|
.BR error ,
|
|
.BR linear ,
|
|
.BR mirror,
|
|
.BR raid0 ,
|
|
.BR raid1 ,
|
|
.BR raid4 ,
|
|
.BR raid5_la ,
|
|
.BR raid5_ls
|
|
.RB (=
|
|
.BR raid5 ),
|
|
.BR raid5_ra ,
|
|
.BR raid5_rs ,
|
|
.BR raid6_nc ,
|
|
.BR raid6_nr ,
|
|
.BR raid6_zr
|
|
.RB (=
|
|
.BR raid6 ),
|
|
.BR raid10 ,
|
|
.BR snapshot ,
|
|
.BR striped,
|
|
.BR thin ,
|
|
.BR thin-pool
|
|
or
|
|
.BR zero .
|
|
Segment type may have a commandline switch alias that will
|
|
enable its use.
|
|
When the type is not explicitly specified an implicit type
|
|
is selected from combination of options:
|
|
.BR \-H | \-\-cache | \-\-cachepool
|
|
(cache or cachepool),
|
|
.BR \-T | \-\-thin | \-\-thinpool
|
|
(thin or thinpool),
|
|
.BR \-m | \-\-mirrors
|
|
(raid1 or mirror),
|
|
.BR \-s | \-\-snapshot | \-V | \-\-virtualsize
|
|
(snapshot or thin),
|
|
.BR \-i | \-\-stripes
|
|
(striped).
|
|
The default segment type is \fBlinear\fP.
|
|
.
|
|
.HP
|
|
.BR \-V | \-\-virtualsize
|
|
.SIZE_E \%VirtualSize
|
|
.br
|
|
Creates a thinly provisioned device or a sparse device of the given size (in MiB by default).
|
|
See
|
|
.BR lvm.conf (5)
|
|
settings \fBglobal/sparse_segtype_default\fP
|
|
to configure default sparse segment type.
|
|
See \fBlvmthin\fP(7) for more info about thin provisioning support.
|
|
Anything written to a sparse snapshot will be returned when reading from it.
|
|
Reading from other areas of the device will return blocks of zeros.
|
|
Virtual snapshot (sparse snapshot) is implemented by creating
|
|
a hidden virtual device of the requested size using the zero target.
|
|
A suffix of _vorigin is used for this device.
|
|
Note: using sparse snapshots is not efficient for larger
|
|
device sizes (GiB), thin provisioning should be used for this case.
|
|
.
|
|
.HP
|
|
.BR \-W | \-\-wipesignatures
|
|
.RB { y | n }
|
|
.br
|
|
Controls detection and subsequent wiping of signatures on newly created
|
|
Logical Volume. There's a prompt for each signature detected to confirm
|
|
its wiping (unless \fB--yes\fP is used where LVM assumes 'yes' answer
|
|
for each prompt automatically). If this option is not specified, then by
|
|
default \fB-W\fP | \fB--wipesignatures y\fP is assumed each time the
|
|
zeroing is done (\fB\-Z\fP | \fB\-\-zero y\fP). This default behaviour
|
|
can be controlled by \fB\%allocation/wipe_signatures_when_zeroing_new_lvs\fP
|
|
setting found in
|
|
.BR lvm.conf (5).
|
|
.br
|
|
If blkid wiping is used (\fBallocation/use_blkid_wiping\fP setting in
|
|
.BR lvm.conf (5))
|
|
and LVM2 is compiled with blkid wiping support, then \fBblkid\fP(8) library is used
|
|
to detect the signatures (use \fBblkid \-k\fP command to list the signatures that are recognized).
|
|
Otherwise, native LVM2 code is used to detect signatures (MD RAID, swap and LUKS
|
|
signatures are detected only in this case).
|
|
.br
|
|
Logical volume is not wiped if the read only flag is set.
|
|
.
|
|
.HP
|
|
.BR \-Z | \-\-zero
|
|
.RB { y | n }
|
|
.br
|
|
Controls zeroing of the first 4KiB of data in the new logical volume.
|
|
Default is \fBy\fPes.
|
|
Snapshot COW volumes are always zeroed.
|
|
Logical volume is not zeroed if the read only flag is set.
|
|
.br
|
|
Warning: trying to mount an unzeroed logical volume can cause the system to
|
|
hang.
|
|
.
|
|
.SH Examples
|
|
.
|
|
Creates a striped logical volume with 3 stripes, a stripe size of 8KiB
|
|
and a size of 100MiB in the volume group named vg00.
|
|
The logical volume name will be chosen by lvcreate:
|
|
.sp
|
|
.B lvcreate \-i 3 \-I 8 \-L 100M vg00
|
|
|
|
Creates a mirror logical volume with 2 sides with a useable size of 500 MiB.
|
|
This operation would require 3 devices (or option
|
|
\fB\-\-alloc \%anywhere\fP) - two for the mirror
|
|
devices and one for the disk log:
|
|
.sp
|
|
.B lvcreate \-m1 \-L 500M vg00
|
|
|
|
Creates a mirror logical volume with 2 sides with a useable size of 500 MiB.
|
|
This operation would require 2 devices - the log is "in-memory":
|
|
.sp
|
|
.B lvcreate \-m1 \-\-mirrorlog core \-L 500M vg00
|
|
|
|
Creates a snapshot logical volume named "vg00/snap" which has access to the
|
|
contents of the original logical volume named "vg00/lvol1"
|
|
at snapshot logical volume creation time. If the original logical volume
|
|
contains a file system, you can mount the snapshot logical volume on an
|
|
arbitrary directory in order to access the contents of the filesystem to run
|
|
a backup while the original filesystem continues to get updated:
|
|
.sp
|
|
.B lvcreate \-\-size 100m \-\-snapshot \-\-name snap /dev/vg00/lvol1
|
|
|
|
Creates a snapshot logical volume named "vg00/snap" with size
|
|
for overwriting 20% of the original logical volume named "vg00/lvol1".:
|
|
.sp
|
|
.B lvcreate \-s \-l 20%ORIGIN \-\-name snap vg00/lvol1
|
|
|
|
Creates a sparse device named /dev/vg1/sparse of size 1TiB with space for just
|
|
under 100MiB of actual data on it:
|
|
.sp
|
|
.B lvcreate \-\-virtualsize 1T \-\-size 100M \-\-snapshot \-\-name sparse vg1
|
|
|
|
Creates a linear logical volume "vg00/lvol1" using physical extents
|
|
/dev/sda:0\-7 and /dev/sdb:0\-7 for allocation of extents:
|
|
.sp
|
|
.B lvcreate \-L 64M \-n lvol1 vg00 /dev/sda:0\-7 /dev/sdb:0\-7
|
|
|
|
Creates a 5GiB RAID5 logical volume "vg00/my_lv", with 3 stripes (plus
|
|
a parity drive for a total of 4 devices) and a stripesize of 64KiB:
|
|
.sp
|
|
.B lvcreate \-\-type raid5 \-L 5G \-i 3 \-I 64 \-n my_lv vg00
|
|
|
|
Creates a RAID5 logical volume "vg00/my_lv", using all of the free
|
|
space in the VG and spanning all the PVs in the VG (note that the command
|
|
will fail if there's more than 8 PVs in the VG in which case \fB\-i 7\fP
|
|
has to be used to get to the currently possible maximum of
|
|
8 devices including parity for RaidLVs):
|
|
.sp
|
|
.B lvcreate \-\-config allocation/raid_stripe_all_devices=1 \-\-type raid5 \-l 100%FREE \-n my_lv vg00
|
|
|
|
Creates a 5GiB RAID10 logical volume "vg00/my_lv", with 2 stripes on
|
|
2 2-way mirrors. Note that the \fB-i\fP and \fB-m\fP arguments behave
|
|
differently.
|
|
The \fB-i\fP specifies the number of stripes.
|
|
The \fB-m\fP specifies the number of
|
|
.B additional
|
|
copies:
|
|
.sp
|
|
.B lvcreate \-\-type raid10 \-L 5G \-i 2 \-m 1 \-n my_lv vg00
|
|
|
|
Creates 100MiB pool logical volume for thin provisioning
|
|
build with 2 stripes 64KiB and chunk size 256KiB together with
|
|
1TiB thin provisioned logical volume "vg00/thin_lv":
|
|
.sp
|
|
.B lvcreate \-i 2 \-I 64 \-c 256 \-L100M \-T vg00/pool \-V 1T \-\-name thin_lv
|
|
|
|
Creates a thin snapshot volume "thinsnap" of thin volume "thinvol" that
|
|
will share the same blocks within the thin pool.
|
|
Note: the size MUST NOT be specified, otherwise the non-thin snapshot
|
|
is created instead:
|
|
.sp
|
|
.B lvcreate \-s vg00/thinvol \-\-name thinsnap
|
|
|
|
Creates a thin snapshot volume of read-only inactive volume "origin"
|
|
which then becomes the thin external origin for the thin snapshot volume
|
|
in vg00 that will use an existing thin pool "vg00/pool":
|
|
.sp
|
|
.B lvcreate \-s \-\-thinpool vg00/pool origin
|
|
|
|
Create a cache pool LV that can later be used to cache one
|
|
logical volume.
|
|
.sp
|
|
.B lvcreate \-\-type cache-pool \-L 1G \-n my_lv_cachepool vg /dev/fast1
|
|
|
|
If there is an existing cache pool LV, create the large slow
|
|
device (i.e. the origin LV) and link it to the supplied cache pool LV,
|
|
creating a cache LV.
|
|
.sp
|
|
.B lvcreate \-\-cache \-L 100G \-n my_lv vg/my_lv_cachepool /dev/slow1
|
|
|
|
If there is an existing logical volume, create the small and fast
|
|
cache pool LV and link it to the supplied existing logical
|
|
volume (i.e. the origin LV), creating a cache LV.
|
|
.sp
|
|
.B lvcreate \-\-type cache \-L 1G \-n my_lv_cachepool vg/my_lv /dev/fast1
|
|
|
|
.\" Create a 1G cached LV "lvol1" with 10M cache pool "vg00/pool".
|
|
.\" .sp
|
|
.\" .B lvcreate \-\-cache \-L 1G \-n lv \-\-pooldatasize 10M vg00/pool
|
|
.
|
|
.SH SEE ALSO
|
|
.
|
|
.nh
|
|
.BR lvm (8),
|
|
.BR lvm.conf (5),
|
|
.BR lvmcache (7),
|
|
.BR lvmthin (7),
|
|
.BR lvconvert (8),
|
|
.BR lvchange (8),
|
|
.BR lvextend (8),
|
|
.BR lvreduce (8),
|
|
.BR lvremove (8),
|
|
.BR lvrename (8)
|
|
.BR lvs (8),
|
|
.BR lvscan (8),
|
|
.BR vgcreate (8),
|
|
.BR blkid (8)
|