2017-07-02 22:38:32 +03:00
#!/usr/bin/env bash
2012-10-09 18:32:11 +04:00
# Copyright (C) 2012 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
2016-01-21 13:49:46 +03:00
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
2012-10-09 18:32:11 +04:00
2021-02-24 19:30:51 +03:00
SKIP_WITH_LVMLOCKD = 1
2015-10-27 17:10:06 +03:00
SKIP_WITH_LVMPOLLD = 1
2015-05-18 11:14:12 +03:00
export LVM_TEST_THIN_REPAIR_CMD = ${ LVM_TEST_THIN_REPAIR_CMD -/bin/false }
2014-06-06 19:40:04 +04:00
. lib/inittest
2012-10-09 18:32:11 +04:00
2014-07-11 14:15:46 +04:00
prepare_lvs( ) {
2012-11-19 15:31:11 +04:00
lvremove -f $vg
lvcreate -L10M -n $lv1 $vg
lvcreate -L8M -n $lv2 $vg
}
2012-10-09 18:32:11 +04:00
#
# Main
#
2012-10-10 02:20:22 +04:00
aux have_thin 1 0 0 || skip
2012-10-09 18:32:11 +04:00
2012-11-27 04:01:38 +04:00
aux prepare_pvs 4 64
2017-07-06 20:41:25 +03:00
get_devs
2012-10-09 18:32:11 +04:00
2012-11-27 04:01:38 +04:00
# build one large PV
2021-02-24 19:30:51 +03:00
vgcreate $vg1 " $dev1 " " $dev2 " " $dev3 "
2014-10-06 13:56:09 +04:00
2013-11-16 02:47:14 +04:00
# 32bit linux kernels are fragille with device size >= 16T
# maybe uname -m [ x86_64 | i686 ]
TSIZE = 64T
aux can_use_16T || TSIZE = 15T
2015-07-04 12:41:04 +03:00
lvcreate --type snapshot -l 100%FREE -n $lv $vg1 --virtualsize $TSIZE
2013-05-27 04:03:00 +04:00
aux extend_filter_LVMTEST
2012-11-27 04:01:38 +04:00
pvcreate " $DM_DEV_DIR / $vg1 / $lv "
2021-02-24 19:30:51 +03:00
vgcreate $vg -s 64K " $dev4 " " $DM_DEV_DIR / $vg1 / $lv "
2012-10-09 18:32:11 +04:00
2014-07-11 14:15:46 +04:00
lvcreate -L1T -n $lv1 $vg
2023-07-03 21:50:49 +03:00
lvconvert --yes -c 8M --type thin --poolmetadatasize 1G $vg /$lv1
2014-07-11 14:15:46 +04:00
# needs some --cachepool or --thinpool
invalid lvconvert --yes --poolmetadatasize 1G $vg /$lv1
lvremove -f $vg
2012-10-09 18:32:11 +04:00
# create mirrored LVs for data and metadata volumes
2013-08-08 00:48:31 +04:00
lvcreate -aey -L10M --type mirror -m1 --mirrorlog core -n $lv1 $vg
Mirror/Thin: Disallow thinpools on mirror logical volumes
The same corner cases that exist for snapshots on mirrors exist for
any logical volume layered on top of mirror. (One example is when
a mirror image fails and a non-repair LVM command is the first to
detect it via label reading. In this case, the LVM command will hang
and prevent the necessary LVM repair command from running.) When
a better alternative exists, it makes no sense to allow a new target
to stack on mirrors as a new feature. Since, RAID is now capable of
running EX in a cluster and thin is not active-active aware, it makes
sense to pair these two rather than mirror+thinpool.
As further background, here are some additional comments that I made
when addressing a bug related to mirror+thinpool:
(https://bugzilla.redhat.com/show_bug.cgi?id=919604#c9)
I am going to disallow thin* on top of mirror logical volumes.
Users will have to use the "raid1" segment type if they want this.
This bug has come down to a choice between:
1) Disallowing thin-LVs from being used as PVs.
2) Disallowing thinpools on top of mirrors.
The problem is that the code in dev_manager.c:device_is_usable() is unable
to tell whether there is a mirror device lower in the stack from the device
being checked. Pretty much anything layered on top of a mirror will suffer
from this problem. (Snapshots are a good example of this; and option #1
above has been chosen to deal with them. This can also be seen in
dev_manager.c:device_is_usable().) When a mirror failure occurs, the
kernel blocks all I/O to it. If there is an LVM command that comes along
to do the repair (or a different operation that requires label reading), it
would normally avoid the mirror when it sees that it is blocked. However,
if there is a snapshot or a thin-LV that is on a mirror, the above code
will not detect the mirror underneath and will issue label reading I/O.
This causes the command to hang.
Choosing #1 would mean that thin-LVs could never be used as PVs - even if
they are stacked on something other than mirrors.
Choosing #2 means that thinpools can never be placed on mirrors. This is
probably better than we think, since it is preferred that people use the
"raid1" segment type in the first place. However, RAID* cannot currently
be used in a cluster volume group - even in EX-only mode. Thus, a complete
solution for option #2 must include the ability to activate RAID logical
volumes (and perform RAID operations) in a cluster volume group. I've
already begun working on this.
2013-09-12 00:58:44 +04:00
lvcreate -aey -L10M -n $lv2 $vg
2012-11-19 15:31:11 +04:00
lvchange -an $vg /$lv1
Mirror/Thin: Disallow thinpools on mirror logical volumes
The same corner cases that exist for snapshots on mirrors exist for
any logical volume layered on top of mirror. (One example is when
a mirror image fails and a non-repair LVM command is the first to
detect it via label reading. In this case, the LVM command will hang
and prevent the necessary LVM repair command from running.) When
a better alternative exists, it makes no sense to allow a new target
to stack on mirrors as a new feature. Since, RAID is now capable of
running EX in a cluster and thin is not active-active aware, it makes
sense to pair these two rather than mirror+thinpool.
As further background, here are some additional comments that I made
when addressing a bug related to mirror+thinpool:
(https://bugzilla.redhat.com/show_bug.cgi?id=919604#c9)
I am going to disallow thin* on top of mirror logical volumes.
Users will have to use the "raid1" segment type if they want this.
This bug has come down to a choice between:
1) Disallowing thin-LVs from being used as PVs.
2) Disallowing thinpools on top of mirrors.
The problem is that the code in dev_manager.c:device_is_usable() is unable
to tell whether there is a mirror device lower in the stack from the device
being checked. Pretty much anything layered on top of a mirror will suffer
from this problem. (Snapshots are a good example of this; and option #1
above has been chosen to deal with them. This can also be seen in
dev_manager.c:device_is_usable().) When a mirror failure occurs, the
kernel blocks all I/O to it. If there is an LVM command that comes along
to do the repair (or a different operation that requires label reading), it
would normally avoid the mirror when it sees that it is blocked. However,
if there is a snapshot or a thin-LV that is on a mirror, the above code
will not detect the mirror underneath and will issue label reading I/O.
This causes the command to hang.
Choosing #1 would mean that thin-LVs could never be used as PVs - even if
they are stacked on something other than mirrors.
Choosing #2 means that thinpools can never be placed on mirrors. This is
probably better than we think, since it is preferred that people use the
"raid1" segment type in the first place. However, RAID* cannot currently
be used in a cluster volume group - even in EX-only mode. Thus, a complete
solution for option #2 must include the ability to activate RAID logical
volumes (and perform RAID operations) in a cluster volume group. I've
already begun working on this.
2013-09-12 00:58:44 +04:00
# conversion fails for mirror segment type
2014-05-20 23:11:11 +04:00
fail lvconvert --thinpool $vg /$lv1
2017-04-03 16:57:32 +03:00
# FIXME: temporarily we return error code 5
INVALID = not
2014-05-20 23:11:11 +04:00
# cannot use same LV
2017-04-03 16:57:32 +03:00
$INVALID lvconvert --yes --thinpool $vg /$lv2 --poolmetadata $vg /$lv2
Mirror/Thin: Disallow thinpools on mirror logical volumes
The same corner cases that exist for snapshots on mirrors exist for
any logical volume layered on top of mirror. (One example is when
a mirror image fails and a non-repair LVM command is the first to
detect it via label reading. In this case, the LVM command will hang
and prevent the necessary LVM repair command from running.) When
a better alternative exists, it makes no sense to allow a new target
to stack on mirrors as a new feature. Since, RAID is now capable of
running EX in a cluster and thin is not active-active aware, it makes
sense to pair these two rather than mirror+thinpool.
As further background, here are some additional comments that I made
when addressing a bug related to mirror+thinpool:
(https://bugzilla.redhat.com/show_bug.cgi?id=919604#c9)
I am going to disallow thin* on top of mirror logical volumes.
Users will have to use the "raid1" segment type if they want this.
This bug has come down to a choice between:
1) Disallowing thin-LVs from being used as PVs.
2) Disallowing thinpools on top of mirrors.
The problem is that the code in dev_manager.c:device_is_usable() is unable
to tell whether there is a mirror device lower in the stack from the device
being checked. Pretty much anything layered on top of a mirror will suffer
from this problem. (Snapshots are a good example of this; and option #1
above has been chosen to deal with them. This can also be seen in
dev_manager.c:device_is_usable().) When a mirror failure occurs, the
kernel blocks all I/O to it. If there is an LVM command that comes along
to do the repair (or a different operation that requires label reading), it
would normally avoid the mirror when it sees that it is blocked. However,
if there is a snapshot or a thin-LV that is on a mirror, the above code
will not detect the mirror underneath and will issue label reading I/O.
This causes the command to hang.
Choosing #1 would mean that thin-LVs could never be used as PVs - even if
they are stacked on something other than mirrors.
Choosing #2 means that thinpools can never be placed on mirrors. This is
probably better than we think, since it is preferred that people use the
"raid1" segment type in the first place. However, RAID* cannot currently
be used in a cluster volume group - even in EX-only mode. Thus, a complete
solution for option #2 must include the ability to activate RAID logical
volumes (and perform RAID operations) in a cluster volume group. I've
already begun working on this.
2013-09-12 00:58:44 +04:00
2014-05-20 23:11:11 +04:00
prepare_lvs
2012-11-19 15:31:11 +04:00
# conversion fails for internal volumes
# can't use --readahead with --poolmetadata
2017-04-03 16:57:32 +03:00
invalid lvconvert --thinpool $vg /$lv1 --poolmetadata $vg /$lv2 --readahead 512
2014-05-20 23:11:11 +04:00
lvconvert --yes --thinpool $vg /$lv1 --poolmetadata $vg /$lv2
2012-11-19 15:31:11 +04:00
prepare_lvs
2014-05-20 23:11:11 +04:00
lvconvert --yes -c 64 --stripes 2 --thinpool $vg /$lv1 --readahead 48
2012-11-19 15:31:11 +04:00
lvremove -f $vg
2014-10-06 13:56:09 +04:00
# Swaping of metadata volume
lvcreate -L1T -n $lv1 $vg
lvcreate -L32 -n $lv2 $vg
2015-05-14 11:20:24 +03:00
lvconvert --yes -c 8M --type thin-pool $vg /$lv1 2>& 1 | tee err
2017-08-02 01:04:35 +03:00
# Check there is a warning for large chunk size and zeroing enabled
grep "WARNING: Pool zeroing and" err
2023-08-16 00:11:07 +03:00
UUID = $( get lv_field $vg /${ lv1 } _tmeta uuid)
2014-10-06 13:56:09 +04:00
# Fail is pool is active
# TODO maybe detect inactive pool and deactivate
2017-04-03 16:57:32 +03:00
fail lvconvert --yes --thinpool $vg /$lv1 --poolmetadata $lv2
2014-10-06 13:56:09 +04:00
lvchange -an $vg
2017-04-03 16:57:32 +03:00
lvconvert --yes --thinpool $vg /$lv1 --poolmetadata $lv2
2014-10-06 13:56:09 +04:00
check lv_field $vg /${ lv1 } _tmeta uuid " $UUID "
2017-04-12 16:01:12 +03:00
# and swap again with new command --swapmetadata
lvconvert --yes --swapmetadata $vg /$lv1 --poolmetadata $lv2
2023-08-16 00:11:07 +03:00
check lv_field $vg /${ lv1 } _tmeta uuid " $UUID "
2012-11-19 15:31:11 +04:00
lvremove -f $vg
2014-10-06 13:56:09 +04:00
2012-11-19 15:31:11 +04:00
# test with bigger sizes
lvcreate -L1T -n $lv1 $vg
lvcreate -L8M -n $lv2 $vg
lvcreate -L1M -n $lv3 $vg
2012-10-09 18:32:11 +04:00
2012-11-19 15:31:11 +04:00
# chunk size is bigger then size of thin pool data
2014-05-20 23:11:11 +04:00
fail lvconvert --yes -c 1G --thinpool $vg /$lv3
2012-11-19 15:31:11 +04:00
# stripes can't be used with poolmetadata
2017-04-03 16:57:32 +03:00
invalid lvconvert --stripes 2 --thinpool $vg /$lv1 --poolmetadata $vg /$lv2
2012-11-19 15:31:11 +04:00
# too small metadata (<2M)
2014-05-20 23:11:11 +04:00
fail lvconvert --yes -c 64 --thinpool $vg /$lv1 --poolmetadata $vg /$lv3
2012-11-19 15:31:11 +04:00
# too small chunk size fails
2017-04-03 16:57:32 +03:00
$INVALID lvconvert -c 4 --thinpool $vg /$lv1 --poolmetadata $vg /$lv2
2012-11-19 15:31:11 +04:00
# too big chunk size fails
2017-04-03 16:57:32 +03:00
$INVALID lvconvert -c 2G --thinpool $vg /$lv1 --poolmetadata $vg /$lv2
2012-11-19 15:31:11 +04:00
# negative chunk size fails
2017-04-03 16:57:32 +03:00
$INVALID lvconvert -c -256 --thinpool $vg /$lv1 --poolmetadata $vg /$lv2
2014-10-06 13:56:09 +04:00
# non multiple of 64KiB fails
2017-04-03 16:57:32 +03:00
$INVALID lvconvert -c 88 --thinpool $vg /$lv1 --poolmetadata $vg /$lv2
2012-10-09 18:32:11 +04:00
2014-11-23 01:37:31 +03:00
# cannot use same LV for pool and convertion
2017-04-03 16:57:32 +03:00
$INVALID lvconvert --yes --thinpool $vg /$lv3 -T $vg /$lv3
2014-11-23 01:37:31 +03:00
2012-11-19 15:31:11 +04:00
# Warning about smaller then suggested
2015-05-14 11:20:24 +03:00
lvconvert --yes -c 256 --thinpool $vg /$lv1 --poolmetadata $vg /$lv2 2>& 1 | tee err
2012-11-19 15:31:11 +04:00
grep "WARNING: Chunk size is smaller" err
lvremove -f $vg
2014-10-06 13:56:09 +04:00
2012-11-19 15:31:11 +04:00
lvcreate -L1T -n $lv1 $vg
lvcreate -L32G -n $lv2 $vg
# Warning about bigger then needed
2015-05-14 11:20:24 +03:00
lvconvert --yes --thinpool $vg /$lv1 --poolmetadata $vg /$lv2 2>& 1 | tee err
2021-01-30 15:13:17 +03:00
grep -i "maximum" err
2012-11-19 15:31:11 +04:00
lvremove -f $vg
2013-11-16 02:47:14 +04:00
2014-10-06 13:56:09 +04:00
2014-06-06 01:06:45 +04:00
if test " $TSIZE " = 64T; then
2012-11-19 15:31:11 +04:00
lvcreate -L24T -n $lv1 $vg
# Warning about bigger then needed (24T data and 16G -> 128K chunk)
2017-06-09 13:58:25 +03:00
fail lvconvert --yes -c 64 --thinpool $vg /$lv1 2>& 1 | tee err
2017-04-03 16:57:32 +03:00
grep "WARNING: Chunk size is too small" err
2014-10-06 13:56:09 +04:00
lvremove -f $vg
2013-11-16 02:47:14 +04:00
fi
2012-10-09 18:32:11 +04:00
2012-11-19 15:31:11 +04:00
#lvs -a -o+chunk_size,stripe_size,seg_pe_ranges
2012-10-09 18:32:11 +04:00
2014-10-06 13:56:09 +04:00
####################################
# Prohibites thin pool conversions #
####################################
lvcreate -L32 -n $lv1 $vg
lvcreate -L16 -n $lv2 $vg
lvconvert --yes --thinpool $vg /$lv1
2014-10-07 12:12:49 +04:00
not aux have_cache 1 3 0 || fail lvconvert --yes --type cache-pool $vg /$lv1
2014-10-06 13:56:09 +04:00
fail lvconvert --yes --type mirror -m1 $vg /$lv1
2014-10-07 12:12:49 +04:00
not aux have_raid 1 0 0 || fail lvconvert --yes --type raid1 -m1 $vg /$lv1
2014-10-06 13:56:09 +04:00
fail lvconvert --yes --type snapshot $vg /$lv1 $vg /$lv2
fail lvconvert --yes --type snapshot $vg /$lv2 $vg /$lv1
fail lvconvert --yes --type thin-pool $vg /$lv1
2012-10-09 18:32:11 +04:00
vgremove -ff $vg
2016-12-11 12:18:44 +03:00
vgremove -ff $vg1