2012-03-16 17:00:05 +04:00
#!/bin/sh
2012-10-09 15:51:30 +04:00
# Copyright (C) 2011-2012 Red Hat, Inc. All rights reserved.
2011-08-11 22:24:40 +04:00
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
. lib/test
RAID: Allow implicit stripe (and parity) when creating RAID LVs
There are typically 2 functions for the more advanced segment types that
deal with parameters in lvcreate.c: _get_*_params() and _check_*_params().
(Not all segment types name their functions according to this scheme.)
The former function is responsible for reading parameters before the VG
has been read. The latter is for sanity checking and possibly setting
parameters after the VG has been read.
This patch adds a _check_raid_parameters() function that will determine
if the user has specified 'stripe' or 'mirror' parameters. If not, the
proper number is computed from the list of PVs the user has supplied or
the number that are available in the VG. Now that _check_raid_parameters()
is available, we move the check for proper number of stripes from
_get_* to _check_*.
This gives the user the ability to create RAID LVs as follows:
# 5-device RAID5, 4-data, 1-parity (i.e. implicit '-i 4')
~> lvcreate --type raid5 -L 100G -n lv vg /dev/sd[abcde]1
# 5-device RAID6, 3-data, 2-parity (i.e. implicit '-i 3')
~> lvcreate --type raid6 -L 100G -n lv vg /dev/sd[abcde]1
# If 5 PVs in VG, 4-data, 1-parity RAID5
~> lvcreate --type raid5 -L 100G -n lv vg
Considerations:
This patch only affects RAID. It might also be useful to apply this to
the 'stripe' segment type. LVM RAID may include RAID0 at some point in
the future and the implicit stripes would apply there. It would be odd
to have RAID0 be able to auto-determine the stripe count while 'stripe'
could not.
The only draw-back of this patch that I can see is that there might be
less error checking. Rather than informing the user that they forgot
to supply an argument (e.g. '-i'), the value would be computed and it
may differ from what the user actually wanted. I don't see this as a
problem, because the user can check the device count after creation
and remove the LV if they have made an error.
2014-02-18 06:18:23 +04:00
lv_devices( ) {
local local_vg = $1
local local_lv = $2
local count = $3
[ $count = = ` lvs --noheadings -o devices $local_vg /$local_lv | sed s/,/' ' /g | wc -w` ]
}
2011-08-11 22:24:40 +04:00
########################################################
# MAIN
########################################################
2012-03-04 20:02:19 +04:00
aux target_at_least dm-raid 1 1 0 || skip
2011-08-11 22:24:40 +04:00
2012-08-25 00:34:19 +04:00
aux prepare_pvs 6 20 # 6 devices for RAID10 (2-mirror,3-stripe) test
2013-05-31 23:11:51 +04:00
vgcreate -s 512k $vg $( cat DEVICES)
2011-08-11 22:24:40 +04:00
###########################################
# Create, wait for sync, remove tests
###########################################
# Create RAID1 (implicit 2-way)
lvcreate --type raid1 -l 2 -n $lv1 $vg
2012-07-24 23:20:30 +04:00
aux wait_for_sync $vg $lv1
2011-08-11 22:24:40 +04:00
lvremove -ff $vg
# Create RAID1 (explicit 2-way)
lvcreate --type raid1 -m 1 -l 2 -n $lv1 $vg
2012-07-24 23:20:30 +04:00
aux wait_for_sync $vg $lv1
2011-08-11 22:24:40 +04:00
lvremove -ff $vg
# Create RAID1 (explicit 3-way)
lvcreate --type raid1 -m 2 -l 2 -n $lv1 $vg
2012-07-24 23:20:30 +04:00
aux wait_for_sync $vg $lv1
2011-08-11 22:24:40 +04:00
lvremove -ff $vg
2013-08-10 02:09:47 +04:00
# Create RAID1 (explicit 3-way) - Set min/max recovery rate
lvcreate --type raid1 -m 2 -l 2 \
--minrecoveryrate 50 --maxrecoveryrate 100 \
-n $lv1 $vg
2013-09-16 13:15:38 +04:00
check lv_field $vg /$lv1 raid_min_recovery_rate 50
check lv_field $vg /$lv1 raid_max_recovery_rate 100
2013-08-10 02:09:47 +04:00
aux wait_for_sync $vg $lv1
lvremove -ff $vg
2011-08-11 22:24:40 +04:00
# Create RAID 4/5/6 (explicit 3-stripe + parity devs)
for i in raid4 \
raid5 raid5_ls raid5_la raid5_rs raid5_ra \
raid6 raid6_zr raid6_nr raid6_nc; do
lvcreate --type $i -l 3 -i 3 -n $lv1 $vg
2012-07-24 23:20:30 +04:00
aux wait_for_sync $vg $lv1
2011-08-11 22:24:40 +04:00
lvremove -ff $vg
done
2013-08-10 02:09:47 +04:00
# Create RAID 4/5/6 (explicit 3-stripe + parity devs) - Set min/max recovery
for i in raid4 \
raid5 raid5_ls raid5_la raid5_rs raid5_ra \
raid6 raid6_zr raid6_nr raid6_nc; do
lvcreate --type $i -l 3 -i 3 \
--minrecoveryrate 50 --maxrecoveryrate 100 \
-n $lv1 $vg
2013-09-16 13:15:38 +04:00
check lv_field $vg /$lv1 raid_min_recovery_rate 50
check lv_field $vg /$lv1 raid_max_recovery_rate 100
2013-08-10 02:09:47 +04:00
aux wait_for_sync $vg $lv1
lvremove -ff $vg
done
2014-02-14 07:10:28 +04:00
# Create RAID using 100%FREE
############################
# 6 PVs with 18.5m in each PV.
# 1 metadata LV = 1 extent = .5m
# 1 image = 36+37+37 extents = 55.00m = lv_size
lvcreate --type raid1 -m 1 -l 100%FREE -n raid1 $vg
check lv_field $vg /raid1 size "55.00m"
lvremove -ff $vg
# 1 metadata LV = 1 extent
# 1 image = 36 extents
# 5 images = 180 extents = 90.00m = lv_size
lvcreate --type raid5 -i 5 -l 100%FREE -n raid5 $vg
2014-02-28 08:44:57 +04:00
should check lv_field $vg /raid5 size "90.00m"
#FIXME: Currently allocates incorrectly at 87.50m
2014-02-14 07:10:28 +04:00
lvremove -ff $vg
# 1 image = 36+37 extents
# 2 images = 146 extents = 73.00m = lv_size
lvcreate --type raid5 -i 2 -l 100%FREE -n raid5 $vg
check lv_field $vg /raid5 size "73.00m"
lvremove -ff $vg
# 1 image = 36 extents
# 4 images = 144 extents = 72.00m = lv_size
lvcreate --type raid6 -i 4 -l 100%FREE -n raid6 $vg
2014-02-28 08:44:57 +04:00
should check lv_field $vg /raid6 size "72.00m"
#FIXME: Currnently allocates incorrectly at 70.00m
2014-02-14 07:10:28 +04:00
lvremove -ff $vg
# Eat 18 of 37 extents from dev1, leaving 19
2014-03-20 02:32:25 +04:00
lvcreate -l 18 -n lv $vg " $dev1 "
2014-02-14 07:10:28 +04:00
# Using 100% free should take the rest of dev1 and equal from dev2
# 1 meta takes 1 extent
# 1 image = 18 extents = 9.00m = lv_size
2014-03-20 02:32:25 +04:00
lvcreate --type raid1 -m 1 -l 100%FREE -n raid1 $vg " $dev1 " " $dev2 "
2014-02-14 07:10:28 +04:00
check lv_field $vg /raid1 size "9.00m"
# Ensure image size is the same as the RAID1 size
2014-03-28 03:39:55 +04:00
check lv_field $vg /raid1 size $( get lv_field $vg /raid1_rimage_0 size -a)
2014-02-14 07:10:28 +04:00
# Amount remaining in dev2 should equal the amount taken by 'lv' in dev1
2014-03-28 03:39:55 +04:00
check pv_field " $dev2 " pv_free $( get lv_field $vg /lv size)
2014-02-14 07:10:28 +04:00
lvremove -ff $vg
# Eat 18 of 37 extents from dev1, leaving 19
2014-03-20 02:32:25 +04:00
lvcreate -l 18 -n lv $vg " $dev1 "
2014-02-14 07:10:28 +04:00
# Using 100% free should take the rest of dev1 and equal amount from the rest
# 1 meta takes 1 extent
# 1 image = 18 extents = 9.00m
# 5 images = 90 extents = 45.00m = lv_size
lvcreate --type raid5 -i 5 -l 100%FREE -n raid5 $vg
check lv_field $vg /raid5 size "45.00m"
# Amount remaining in dev6 should equal the amount taken by 'lv' in dev1
check pv_field " $dev6 " pv_free ` lvs --noheadings -o size $vg /lv`
lvremove -ff $vg
# Eat 18 of 37 extents from dev1, leaving 19
2014-03-20 02:32:25 +04:00
lvcreate -l 18 -n lv $vg " $dev1 "
2014-02-14 07:10:28 +04:00
# Using 100% free should take the rest of dev1, an equal amount
# from 2 more devs, and all extents from 3 additional devs
# 1 meta takes 1 extent
# 1 image = 18+37 extents
# 2 images = 110 extents = 55.00m = lv_size
lvcreate --type raid5 -i 2 -l 100%FREE -n raid5 $vg
check lv_field $vg /raid5 size "55.00m"
lvremove -ff $vg
# Let's do some stripe tests too
# Eat 18 of 37 extents from dev1, leaving 19
2014-03-20 02:32:25 +04:00
lvcreate -l 18 -n lv $vg " $dev1 "
2014-02-14 07:10:28 +04:00
# Using 100% free should take the rest of dev1 and an equal amount from rest
# 1 image = 19 extents
# 6 images = 114 extents = 57.00m = lv_size
lvcreate -i 6 -l 100%FREE -n stripe $vg
check lv_field $vg /stripe size "57.00m"
lvremove -ff $vg
# Eat 18 of 37 extents from dev1, leaving 19
2014-03-20 02:32:25 +04:00
lvcreate -l 18 -n lv $vg " $dev1 "
2014-02-14 07:10:28 +04:00
# Using 100% free should take the rest of dev1, an equal amount from
# one more dev, and all of the remaining 4
# 1 image = 19+37+37 extents
# 2 images = 186 extents = 93.00m = lv_size
lvcreate -i 2 -l 100%FREE -n stripe $vg
check lv_field $vg /stripe size "93.00m"
lvremove -ff $vg
RAID: Allow implicit stripe (and parity) when creating RAID LVs
There are typically 2 functions for the more advanced segment types that
deal with parameters in lvcreate.c: _get_*_params() and _check_*_params().
(Not all segment types name their functions according to this scheme.)
The former function is responsible for reading parameters before the VG
has been read. The latter is for sanity checking and possibly setting
parameters after the VG has been read.
This patch adds a _check_raid_parameters() function that will determine
if the user has specified 'stripe' or 'mirror' parameters. If not, the
proper number is computed from the list of PVs the user has supplied or
the number that are available in the VG. Now that _check_raid_parameters()
is available, we move the check for proper number of stripes from
_get_* to _check_*.
This gives the user the ability to create RAID LVs as follows:
# 5-device RAID5, 4-data, 1-parity (i.e. implicit '-i 4')
~> lvcreate --type raid5 -L 100G -n lv vg /dev/sd[abcde]1
# 5-device RAID6, 3-data, 2-parity (i.e. implicit '-i 3')
~> lvcreate --type raid6 -L 100G -n lv vg /dev/sd[abcde]1
# If 5 PVs in VG, 4-data, 1-parity RAID5
~> lvcreate --type raid5 -L 100G -n lv vg
Considerations:
This patch only affects RAID. It might also be useful to apply this to
the 'stripe' segment type. LVM RAID may include RAID0 at some point in
the future and the implicit stripes would apply there. It would be odd
to have RAID0 be able to auto-determine the stripe count while 'stripe'
could not.
The only draw-back of this patch that I can see is that there might be
less error checking. Rather than informing the user that they forgot
to supply an argument (e.g. '-i'), the value would be computed and it
may differ from what the user actually wanted. I don't see this as a
problem, because the user can check the device count after creation
and remove the LV if they have made an error.
2014-02-18 06:18:23 +04:00
# Create RAID (implicit stripe count based on PV count)
#######################################################
# Not enough drives
2014-03-20 02:32:25 +04:00
not lvcreate --type raid1 -l1 $vg " $dev1 "
not lvcreate --type raid5 -l2 $vg " $dev1 " " $dev2 "
not lvcreate --type raid6 -l3 $vg " $dev1 " " $dev2 " " $dev3 " " $dev4 "
RAID: Allow implicit stripe (and parity) when creating RAID LVs
There are typically 2 functions for the more advanced segment types that
deal with parameters in lvcreate.c: _get_*_params() and _check_*_params().
(Not all segment types name their functions according to this scheme.)
The former function is responsible for reading parameters before the VG
has been read. The latter is for sanity checking and possibly setting
parameters after the VG has been read.
This patch adds a _check_raid_parameters() function that will determine
if the user has specified 'stripe' or 'mirror' parameters. If not, the
proper number is computed from the list of PVs the user has supplied or
the number that are available in the VG. Now that _check_raid_parameters()
is available, we move the check for proper number of stripes from
_get_* to _check_*.
This gives the user the ability to create RAID LVs as follows:
# 5-device RAID5, 4-data, 1-parity (i.e. implicit '-i 4')
~> lvcreate --type raid5 -L 100G -n lv vg /dev/sd[abcde]1
# 5-device RAID6, 3-data, 2-parity (i.e. implicit '-i 3')
~> lvcreate --type raid6 -L 100G -n lv vg /dev/sd[abcde]1
# If 5 PVs in VG, 4-data, 1-parity RAID5
~> lvcreate --type raid5 -L 100G -n lv vg
Considerations:
This patch only affects RAID. It might also be useful to apply this to
the 'stripe' segment type. LVM RAID may include RAID0 at some point in
the future and the implicit stripes would apply there. It would be odd
to have RAID0 be able to auto-determine the stripe count while 'stripe'
could not.
The only draw-back of this patch that I can see is that there might be
less error checking. Rather than informing the user that they forgot
to supply an argument (e.g. '-i'), the value would be computed and it
may differ from what the user actually wanted. I don't see this as a
problem, because the user can check the device count after creation
and remove the LV if they have made an error.
2014-02-18 06:18:23 +04:00
# Implicit count comes from #PVs given (always 2 for mirror though)
2014-03-20 02:32:25 +04:00
lvcreate --type raid1 -l1 -n raid1 $vg " $dev1 " " $dev2 "
RAID: Allow implicit stripe (and parity) when creating RAID LVs
There are typically 2 functions for the more advanced segment types that
deal with parameters in lvcreate.c: _get_*_params() and _check_*_params().
(Not all segment types name their functions according to this scheme.)
The former function is responsible for reading parameters before the VG
has been read. The latter is for sanity checking and possibly setting
parameters after the VG has been read.
This patch adds a _check_raid_parameters() function that will determine
if the user has specified 'stripe' or 'mirror' parameters. If not, the
proper number is computed from the list of PVs the user has supplied or
the number that are available in the VG. Now that _check_raid_parameters()
is available, we move the check for proper number of stripes from
_get_* to _check_*.
This gives the user the ability to create RAID LVs as follows:
# 5-device RAID5, 4-data, 1-parity (i.e. implicit '-i 4')
~> lvcreate --type raid5 -L 100G -n lv vg /dev/sd[abcde]1
# 5-device RAID6, 3-data, 2-parity (i.e. implicit '-i 3')
~> lvcreate --type raid6 -L 100G -n lv vg /dev/sd[abcde]1
# If 5 PVs in VG, 4-data, 1-parity RAID5
~> lvcreate --type raid5 -L 100G -n lv vg
Considerations:
This patch only affects RAID. It might also be useful to apply this to
the 'stripe' segment type. LVM RAID may include RAID0 at some point in
the future and the implicit stripes would apply there. It would be odd
to have RAID0 be able to auto-determine the stripe count while 'stripe'
could not.
The only draw-back of this patch that I can see is that there might be
less error checking. Rather than informing the user that they forgot
to supply an argument (e.g. '-i'), the value would be computed and it
may differ from what the user actually wanted. I don't see this as a
problem, because the user can check the device count after creation
and remove the LV if they have made an error.
2014-02-18 06:18:23 +04:00
lv_devices $vg raid1 2
2014-03-20 02:32:25 +04:00
lvcreate --type raid5 -l2 -n raid5 $vg " $dev1 " " $dev2 " " $dev3 "
RAID: Allow implicit stripe (and parity) when creating RAID LVs
There are typically 2 functions for the more advanced segment types that
deal with parameters in lvcreate.c: _get_*_params() and _check_*_params().
(Not all segment types name their functions according to this scheme.)
The former function is responsible for reading parameters before the VG
has been read. The latter is for sanity checking and possibly setting
parameters after the VG has been read.
This patch adds a _check_raid_parameters() function that will determine
if the user has specified 'stripe' or 'mirror' parameters. If not, the
proper number is computed from the list of PVs the user has supplied or
the number that are available in the VG. Now that _check_raid_parameters()
is available, we move the check for proper number of stripes from
_get_* to _check_*.
This gives the user the ability to create RAID LVs as follows:
# 5-device RAID5, 4-data, 1-parity (i.e. implicit '-i 4')
~> lvcreate --type raid5 -L 100G -n lv vg /dev/sd[abcde]1
# 5-device RAID6, 3-data, 2-parity (i.e. implicit '-i 3')
~> lvcreate --type raid6 -L 100G -n lv vg /dev/sd[abcde]1
# If 5 PVs in VG, 4-data, 1-parity RAID5
~> lvcreate --type raid5 -L 100G -n lv vg
Considerations:
This patch only affects RAID. It might also be useful to apply this to
the 'stripe' segment type. LVM RAID may include RAID0 at some point in
the future and the implicit stripes would apply there. It would be odd
to have RAID0 be able to auto-determine the stripe count while 'stripe'
could not.
The only draw-back of this patch that I can see is that there might be
less error checking. Rather than informing the user that they forgot
to supply an argument (e.g. '-i'), the value would be computed and it
may differ from what the user actually wanted. I don't see this as a
problem, because the user can check the device count after creation
and remove the LV if they have made an error.
2014-02-18 06:18:23 +04:00
lv_devices $vg raid5 3
2014-03-20 02:32:25 +04:00
lvcreate --type raid6 -l3 -n raid6 $vg " $dev1 " " $dev2 " " $dev3 " " $dev4 " " $dev5 "
RAID: Allow implicit stripe (and parity) when creating RAID LVs
There are typically 2 functions for the more advanced segment types that
deal with parameters in lvcreate.c: _get_*_params() and _check_*_params().
(Not all segment types name their functions according to this scheme.)
The former function is responsible for reading parameters before the VG
has been read. The latter is for sanity checking and possibly setting
parameters after the VG has been read.
This patch adds a _check_raid_parameters() function that will determine
if the user has specified 'stripe' or 'mirror' parameters. If not, the
proper number is computed from the list of PVs the user has supplied or
the number that are available in the VG. Now that _check_raid_parameters()
is available, we move the check for proper number of stripes from
_get_* to _check_*.
This gives the user the ability to create RAID LVs as follows:
# 5-device RAID5, 4-data, 1-parity (i.e. implicit '-i 4')
~> lvcreate --type raid5 -L 100G -n lv vg /dev/sd[abcde]1
# 5-device RAID6, 3-data, 2-parity (i.e. implicit '-i 3')
~> lvcreate --type raid6 -L 100G -n lv vg /dev/sd[abcde]1
# If 5 PVs in VG, 4-data, 1-parity RAID5
~> lvcreate --type raid5 -L 100G -n lv vg
Considerations:
This patch only affects RAID. It might also be useful to apply this to
the 'stripe' segment type. LVM RAID may include RAID0 at some point in
the future and the implicit stripes would apply there. It would be odd
to have RAID0 be able to auto-determine the stripe count while 'stripe'
could not.
The only draw-back of this patch that I can see is that there might be
less error checking. Rather than informing the user that they forgot
to supply an argument (e.g. '-i'), the value would be computed and it
may differ from what the user actually wanted. I don't see this as a
problem, because the user can check the device count after creation
and remove the LV if they have made an error.
2014-02-18 06:18:23 +04:00
lv_devices $vg raid6 5
lvremove -ff $vg
# Implicit count comes from total #PVs in VG (always 2 for mirror though)
lvcreate --type raid1 -l1 -n raid1 $vg
lv_devices $vg raid1 2
lvcreate --type raid5 -l2 -n raid5 $vg
lv_devices $vg raid5 6
lvcreate --type raid6 -l3 -n raid6 $vg
lv_devices $vg raid6 6
2014-02-27 02:25:30 +04:00
vgremove -ff $vg