2001-11-06 13:29:56 +03:00
/*
2004-03-30 23:35:44 +04:00
* Copyright ( C ) 2001 - 2004 Sistina Software , Inc . All rights reserved .
2017-02-24 02:50:00 +03:00
* Copyright ( C ) 2004 - 2017 Red Hat , Inc . All rights reserved .
2001-11-06 13:29:56 +03:00
*
2004-03-30 23:35:44 +04:00
* This file is part of LVM2 .
*
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
2007-08-21 00:55:30 +04:00
* of the GNU Lesser General Public License v .2 .1 .
2004-03-30 23:35:44 +04:00
*
2007-08-21 00:55:30 +04:00
* You should have received a copy of the GNU Lesser General Public License
2004-03-30 23:35:44 +04:00
* along with this program ; if not , write to the Free Software Foundation ,
2016-01-21 13:49:46 +03:00
* Inc . , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA
2001-11-06 13:29:56 +03:00
*/
2002-11-18 17:04:08 +03:00
# include "lib.h"
2001-11-06 13:29:56 +03:00
# include "metadata.h"
2003-04-25 02:23:24 +04:00
# include "locking.h"
2001-11-06 13:29:56 +03:00
# include "pv_map.h"
2001-12-31 22:09:51 +03:00
# include "lvm-string.h"
2002-02-11 23:50:53 +03:00
# include "toolcontext.h"
2003-09-15 22:22:50 +04:00
# include "lv_alloc.h"
2005-05-03 21:28:23 +04:00
# include "pv_alloc.h"
2004-05-05 01:25:57 +04:00
# include "display.h"
2004-09-16 22:40:56 +04:00
# include "segtype.h"
2007-08-04 01:22:10 +04:00
# include "archiver.h"
2007-08-20 21:04:53 +04:00
# include "activate.h"
2009-07-26 06:33:35 +04:00
# include "str_list.h"
2011-02-27 03:38:31 +03:00
# include "defaults.h"
2013-03-14 02:00:29 +04:00
# include "lvm-exec.h"
2014-09-19 03:09:36 +04:00
# include "memlock.h"
2015-03-05 23:00:44 +03:00
# include "lvmlockd.h"
2011-02-27 03:38:31 +03:00
typedef enum {
PREFERRED ,
USE_AREA ,
NEXT_PV ,
NEXT_AREA
} area_use_t ;
2011-08-03 02:07:20 +04:00
/* FIXME: remove RAID_METADATA_AREA_LEN macro after defining 'raid_log_extents'*/
# define RAID_METADATA_AREA_LEN 1
2011-02-27 03:38:31 +03:00
/* FIXME These ended up getting used differently from first intended. Refactor. */
2012-05-11 22:59:01 +04:00
/* Only one of A_CONTIGUOUS_TO_LVSEG, A_CLING_TO_LVSEG, A_CLING_TO_ALLOCED may be set */
# define A_CONTIGUOUS_TO_LVSEG 0x01 /* Must be contiguous to an existing segment */
# define A_CLING_TO_LVSEG 0x02 /* Must use same disks as existing LV segment */
# define A_CLING_TO_ALLOCED 0x04 /* Must use same disks as already-allocated segment */
# define A_CLING_BY_TAGS 0x08 /* Must match tags against existing segment */
2011-02-27 03:38:31 +03:00
# define A_CAN_SPLIT 0x10
2013-07-29 22:35:45 +04:00
# define A_AREA_COUNT_MATCHES 0x20 /* Existing lvseg has same number of areas as new segment */
2011-02-27 03:38:31 +03:00
2014-04-15 04:13:47 +04:00
# define A_POSITIONAL_FILL 0x40 /* Slots are positional and filled using PREFERRED */
2015-04-10 23:57:52 +03:00
# define A_PARTITION_BY_TAGS 0x80 /* No allocated area may share any tag with any other */
2014-04-15 04:13:47 +04:00
2011-02-27 03:38:31 +03:00
/*
* Constant parameters during a single allocation attempt .
*/
struct alloc_parms {
alloc_policy_t alloc ;
unsigned flags ; /* Holds A_* */
struct lv_segment * prev_lvseg ;
uint32_t extents_still_needed ;
} ;
/*
* Holds varying state of each allocation attempt .
*/
struct alloc_state {
2014-04-15 04:05:34 +04:00
const struct alloc_parms * alloc_parms ;
2011-02-27 03:38:31 +03:00
struct pv_area_used * areas ;
uint32_t areas_size ;
uint32_t log_area_count_still_needed ; /* Number of areas still needing to be allocated for the log */
uint32_t allocated ; /* Total number of extents allocated so far */
2015-07-16 01:12:54 +03:00
uint32_t num_positional_areas ; /* Number of parallel allocations that must be contiguous/cling */
2011-02-27 03:38:31 +03:00
} ;
2001-11-06 13:29:56 +03:00
2007-08-07 22:55:38 +04:00
struct lv_names {
const char * old ;
const char * new ;
} ;
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
enum {
2014-07-01 12:37:01 +04:00
LV_TYPE_UNKNOWN ,
2016-03-01 17:25:04 +03:00
LV_TYPE_NONE ,
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
LV_TYPE_PUBLIC ,
LV_TYPE_PRIVATE ,
2016-03-01 17:25:04 +03:00
LV_TYPE_HISTORY ,
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
LV_TYPE_LINEAR ,
LV_TYPE_STRIPED ,
LV_TYPE_MIRROR ,
LV_TYPE_RAID ,
2014-07-01 12:37:01 +04:00
LV_TYPE_THIN ,
LV_TYPE_CACHE ,
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
LV_TYPE_SPARSE ,
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
LV_TYPE_ORIGIN ,
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
LV_TYPE_THINORIGIN ,
LV_TYPE_MULTITHINORIGIN ,
LV_TYPE_THICKORIGIN ,
LV_TYPE_MULTITHICKORIGIN ,
LV_TYPE_CACHEORIGIN ,
LV_TYPE_EXTTHINORIGIN ,
LV_TYPE_MULTIEXTTHINORIGIN ,
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
LV_TYPE_SNAPSHOT ,
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
LV_TYPE_THINSNAPSHOT ,
LV_TYPE_THICKSNAPSHOT ,
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
LV_TYPE_PVMOVE ,
LV_TYPE_IMAGE ,
LV_TYPE_LOG ,
LV_TYPE_METADATA ,
LV_TYPE_POOL ,
LV_TYPE_DATA ,
LV_TYPE_SPARE ,
2014-07-01 12:37:01 +04:00
LV_TYPE_VIRTUAL ,
2016-05-23 18:46:38 +03:00
LV_TYPE_RAID0 ,
2016-07-02 00:20:54 +03:00
LV_TYPE_RAID0_META ,
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
LV_TYPE_RAID1 ,
LV_TYPE_RAID10 ,
LV_TYPE_RAID4 ,
LV_TYPE_RAID5 ,
2016-08-05 16:00:08 +03:00
LV_TYPE_RAID5_N ,
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
LV_TYPE_RAID5_LA ,
LV_TYPE_RAID5_RA ,
LV_TYPE_RAID5_LS ,
LV_TYPE_RAID5_RS ,
LV_TYPE_RAID6 ,
LV_TYPE_RAID6_ZR ,
LV_TYPE_RAID6_NR ,
LV_TYPE_RAID6_NC ,
2015-10-08 14:44:21 +03:00
LV_TYPE_LOCKD ,
LV_TYPE_SANLOCK
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
} ;
2014-07-01 12:37:01 +04:00
static const char * _lv_type_names [ ] = {
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
[ LV_TYPE_UNKNOWN ] = " unknown " ,
2016-03-01 17:25:04 +03:00
[ LV_TYPE_NONE ] = " none " ,
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
[ LV_TYPE_PUBLIC ] = " public " ,
[ LV_TYPE_PRIVATE ] = " private " ,
2016-03-01 17:25:04 +03:00
[ LV_TYPE_HISTORY ] = " history " ,
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
[ LV_TYPE_LINEAR ] = " linear " ,
[ LV_TYPE_STRIPED ] = " striped " ,
[ LV_TYPE_MIRROR ] = " mirror " ,
[ LV_TYPE_RAID ] = " raid " ,
[ LV_TYPE_THIN ] = " thin " ,
[ LV_TYPE_CACHE ] = " cache " ,
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
[ LV_TYPE_SPARSE ] = " sparse " ,
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
[ LV_TYPE_ORIGIN ] = " origin " ,
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
[ LV_TYPE_THINORIGIN ] = " thinorigin " ,
[ LV_TYPE_MULTITHINORIGIN ] = " multithinorigin " ,
[ LV_TYPE_THICKORIGIN ] = " thickorigin " ,
[ LV_TYPE_MULTITHICKORIGIN ] = " multithickorigin " ,
[ LV_TYPE_CACHEORIGIN ] = " cacheorigin " ,
[ LV_TYPE_EXTTHINORIGIN ] = " extthinorigin " ,
[ LV_TYPE_MULTIEXTTHINORIGIN ] = " multiextthinorigin " ,
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
[ LV_TYPE_SNAPSHOT ] = " snapshot " ,
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
[ LV_TYPE_THINSNAPSHOT ] = " thinsnapshot " ,
[ LV_TYPE_THICKSNAPSHOT ] = " thicksnapshot " ,
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
[ LV_TYPE_PVMOVE ] = " pvmove " ,
[ LV_TYPE_IMAGE ] = " image " ,
[ LV_TYPE_LOG ] = " log " ,
[ LV_TYPE_METADATA ] = " metadata " ,
[ LV_TYPE_POOL ] = " pool " ,
[ LV_TYPE_DATA ] = " data " ,
[ LV_TYPE_SPARE ] = " spare " ,
[ LV_TYPE_VIRTUAL ] = " virtual " ,
2016-05-23 18:46:38 +03:00
[ LV_TYPE_RAID0 ] = SEG_TYPE_NAME_RAID0 ,
2016-07-02 00:20:54 +03:00
[ LV_TYPE_RAID0_META ] = SEG_TYPE_NAME_RAID0_META ,
2014-09-24 17:24:41 +04:00
[ LV_TYPE_RAID1 ] = SEG_TYPE_NAME_RAID1 ,
[ LV_TYPE_RAID10 ] = SEG_TYPE_NAME_RAID10 ,
[ LV_TYPE_RAID4 ] = SEG_TYPE_NAME_RAID4 ,
[ LV_TYPE_RAID5 ] = SEG_TYPE_NAME_RAID5 ,
2016-08-05 16:00:08 +03:00
[ LV_TYPE_RAID5_N ] = SEG_TYPE_NAME_RAID5_N ,
2014-09-24 17:24:41 +04:00
[ LV_TYPE_RAID5_LA ] = SEG_TYPE_NAME_RAID5_LA ,
[ LV_TYPE_RAID5_RA ] = SEG_TYPE_NAME_RAID5_RA ,
[ LV_TYPE_RAID5_LS ] = SEG_TYPE_NAME_RAID5_LS ,
[ LV_TYPE_RAID5_RS ] = SEG_TYPE_NAME_RAID5_RS ,
[ LV_TYPE_RAID6 ] = SEG_TYPE_NAME_RAID6 ,
[ LV_TYPE_RAID6_ZR ] = SEG_TYPE_NAME_RAID6_ZR ,
[ LV_TYPE_RAID6_NR ] = SEG_TYPE_NAME_RAID6_NR ,
[ LV_TYPE_RAID6_NC ] = SEG_TYPE_NAME_RAID6_NC ,
2015-10-08 14:44:21 +03:00
[ LV_TYPE_LOCKD ] = " lockd " ,
[ LV_TYPE_SANLOCK ] = " sanlock " ,
2014-07-01 12:37:01 +04:00
} ;
2014-08-25 11:07:03 +04:00
static int _lv_layout_and_role_mirror ( struct dm_pool * mem ,
const struct logical_volume * lv ,
struct dm_list * layout ,
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
struct dm_list * role ,
int * public_lv )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
{
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
int top_level = 0 ;
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
/* non-top-level LVs */
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
if ( lv_is_mirror_image ( lv ) ) {
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
if ( ! str_list_add_no_dup_check ( mem , role , _lv_type_names [ LV_TYPE_MIRROR ] ) | |
! str_list_add_no_dup_check ( mem , role , _lv_type_names [ LV_TYPE_IMAGE ] ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
} else if ( lv_is_mirror_log ( lv ) ) {
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
if ( ! str_list_add_no_dup_check ( mem , role , _lv_type_names [ LV_TYPE_MIRROR ] ) | |
! str_list_add_no_dup_check ( mem , role , _lv_type_names [ LV_TYPE_LOG ] ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
report: also display "mirror" keyword in lv_layout for mirrored mirror log and "cache" keyword in lv_layout for cached cache pool
$ lvs -a -o name,vg_name,attr,layout,type
LV VG Attr Layout Type
lvol0 vg mwi-a-m--- mirror mirror
[lvol0_mimage_0] vg iwi-aom--- linear image,mirror
[lvol0_mimage_1] vg iwi-aom--- linear image,mirror
[lvol0_mlog] vg mwi-aom--- mirror log,mirror
[lvol0_mlog_mimage_0] vg iwi-aom--- linear image,mirror
[lvol0_mlog_mimage_1] vg iwi-aom--- linear image,mirror
(lvol0_mlog properly displayed as "mirror" layout for mirrored mirror log)
$ lvs -a -o name,vg_name,attr,layout,type
LV VG Attr Layout Type
lvol0 vg Cwi---C--- cache,pool cache,pool
[lvol0_cdata] vg Cwi------- linear cache,data,pool
[lvol0_cmeta] vg ewi------- linear cache,metadata,pool
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
lvol2 vg Cwi---C--- cache,pool cache,pool
[lvol2_cdata] vg Cwi---C--- cache cache,data,pool
[lvol2_cdata_corig] vg owi---C--- linear cache,origin
[lvol2_cmeta] vg ewi------- linear cache,metadata,pool
(lvol2_cdata properly displayed as cached cache pool data)
2014-08-19 15:58:32 +04:00
if ( lv_is_mirrored ( lv ) & &
! str_list_add_no_dup_check ( mem , layout , _lv_type_names [ LV_TYPE_MIRROR ] ) )
goto_bad ;
2014-09-16 00:33:53 +04:00
} else if ( lv_is_pvmove ( lv ) ) {
2014-08-25 11:07:03 +04:00
if ( ! str_list_add_no_dup_check ( mem , role , _lv_type_names [ LV_TYPE_PVMOVE ] ) | |
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
! str_list_add_no_dup_check ( mem , layout , _lv_type_names [ LV_TYPE_MIRROR ] ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
} else
top_level = 1 ;
2014-07-01 12:37:01 +04:00
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
if ( ! top_level ) {
* public_lv = 0 ;
return 1 ;
2014-07-01 12:37:01 +04:00
}
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
/* top-level LVs */
if ( ! str_list_add_no_dup_check ( mem , layout , _lv_type_names [ LV_TYPE_MIRROR ] ) )
goto_bad ;
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
return 1 ;
bad :
return 0 ;
2014-07-01 12:37:01 +04:00
}
2014-08-25 11:07:03 +04:00
static int _lv_layout_and_role_raid ( struct dm_pool * mem ,
const struct logical_volume * lv ,
struct dm_list * layout ,
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
struct dm_list * role ,
int * public_lv )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
{
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
int top_level = 0 ;
2015-09-24 16:59:07 +03:00
const struct segment_type * segtype ;
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
/* non-top-level LVs */
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
if ( lv_is_raid_image ( lv ) ) {
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
if ( ! str_list_add_no_dup_check ( mem , role , _lv_type_names [ LV_TYPE_RAID ] ) | |
! str_list_add_no_dup_check ( mem , role , _lv_type_names [ LV_TYPE_IMAGE ] ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
} else if ( lv_is_raid_metadata ( lv ) ) {
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
if ( ! str_list_add_no_dup_check ( mem , role , _lv_type_names [ LV_TYPE_RAID ] ) | |
! str_list_add_no_dup_check ( mem , role , _lv_type_names [ LV_TYPE_METADATA ] ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
2014-09-16 03:13:46 +04:00
} else if ( lv_is_pvmove ( lv ) ) {
if ( ! str_list_add_no_dup_check ( mem , role , _lv_type_names [ LV_TYPE_PVMOVE ] ) | |
! str_list_add_no_dup_check ( mem , layout , _lv_type_names [ LV_TYPE_RAID ] ) )
goto_bad ;
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
} else
top_level = 1 ;
if ( ! top_level ) {
* public_lv = 0 ;
return 1 ;
}
/* top-level LVs */
if ( ! str_list_add_no_dup_check ( mem , layout , _lv_type_names [ LV_TYPE_RAID ] ) )
goto_bad ;
2015-09-24 16:59:07 +03:00
segtype = first_seg ( lv ) - > segtype ;
2015-09-04 10:42:45 +03:00
2016-05-23 18:46:38 +03:00
if ( segtype_is_raid0 ( segtype ) ) {
if ( ! str_list_add_no_dup_check ( mem , layout , _lv_type_names [ LV_TYPE_RAID0 ] ) )
goto_bad ;
} else if ( segtype_is_raid1 ( segtype ) ) {
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
if ( ! str_list_add_no_dup_check ( mem , layout , _lv_type_names [ LV_TYPE_RAID1 ] ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
2015-09-24 16:59:07 +03:00
} else if ( segtype_is_raid10 ( segtype ) ) {
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
if ( ! str_list_add_no_dup_check ( mem , layout , _lv_type_names [ LV_TYPE_RAID10 ] ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
2015-09-24 16:59:07 +03:00
} else if ( segtype_is_raid4 ( segtype ) ) {
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
if ( ! str_list_add_no_dup_check ( mem , layout , _lv_type_names [ LV_TYPE_RAID4 ] ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
2015-09-24 16:59:07 +03:00
} else if ( segtype_is_any_raid5 ( segtype ) ) {
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
if ( ! str_list_add_no_dup_check ( mem , layout , _lv_type_names [ LV_TYPE_RAID5 ] ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
2015-09-24 16:59:07 +03:00
if ( segtype_is_raid5_la ( segtype ) ) {
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
if ( ! str_list_add_no_dup_check ( mem , layout , _lv_type_names [ LV_TYPE_RAID5_LA ] ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
2015-09-24 16:59:07 +03:00
} else if ( segtype_is_raid5_ra ( segtype ) ) {
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
if ( ! str_list_add_no_dup_check ( mem , layout , _lv_type_names [ LV_TYPE_RAID5_RA ] ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
2015-09-24 16:59:07 +03:00
} else if ( segtype_is_raid5_ls ( segtype ) ) {
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
if ( ! str_list_add_no_dup_check ( mem , layout , _lv_type_names [ LV_TYPE_RAID5_LS ] ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
2015-09-24 16:59:07 +03:00
} else if ( segtype_is_raid5_rs ( segtype ) ) {
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
if ( ! str_list_add_no_dup_check ( mem , layout , _lv_type_names [ LV_TYPE_RAID5_RS ] ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
}
2015-09-24 16:59:07 +03:00
} else if ( segtype_is_any_raid6 ( segtype ) ) {
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
if ( ! str_list_add_no_dup_check ( mem , layout , _lv_type_names [ LV_TYPE_RAID6 ] ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
2015-09-24 16:59:07 +03:00
if ( segtype_is_raid6_zr ( segtype ) ) {
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
if ( ! str_list_add_no_dup_check ( mem , layout , _lv_type_names [ LV_TYPE_RAID6_ZR ] ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
2015-09-24 16:59:07 +03:00
} else if ( segtype_is_raid6_nr ( segtype ) ) {
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
if ( ! str_list_add_no_dup_check ( mem , layout , _lv_type_names [ LV_TYPE_RAID6_NR ] ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
2015-09-24 16:59:07 +03:00
} else if ( segtype_is_raid6_nc ( segtype ) ) {
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
if ( ! str_list_add_no_dup_check ( mem , layout , _lv_type_names [ LV_TYPE_RAID6_NC ] ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
}
}
return 1 ;
bad :
return 0 ;
2014-07-01 12:37:01 +04:00
}
2014-08-25 11:07:03 +04:00
static int _lv_layout_and_role_thin ( struct dm_pool * mem ,
const struct logical_volume * lv ,
struct dm_list * layout ,
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
struct dm_list * role ,
int * public_lv )
2014-07-01 12:37:01 +04:00
{
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
int top_level = 0 ;
2014-08-15 17:32:04 +04:00
unsigned snap_count ;
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
/* non-top-level LVs */
if ( lv_is_thin_pool_metadata ( lv ) ) {
2014-08-25 11:07:03 +04:00
if ( ! str_list_add_no_dup_check ( mem , role , _lv_type_names [ LV_TYPE_THIN ] ) | |
! str_list_add_no_dup_check ( mem , role , _lv_type_names [ LV_TYPE_POOL ] ) | |
! str_list_add_no_dup_check ( mem , role , _lv_type_names [ LV_TYPE_METADATA ] ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
} else if ( lv_is_thin_pool_data ( lv ) ) {
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
if ( ! str_list_add_no_dup_check ( mem , role , _lv_type_names [ LV_TYPE_THIN ] ) | |
! str_list_add_no_dup_check ( mem , role , _lv_type_names [ LV_TYPE_POOL ] ) | |
2014-08-25 11:07:03 +04:00
! str_list_add_no_dup_check ( mem , role , _lv_type_names [ LV_TYPE_DATA ] ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
} else
top_level = 1 ;
if ( ! top_level ) {
* public_lv = 0 ;
return 1 ;
}
/* top-level LVs */
if ( lv_is_thin_volume ( lv ) ) {
if ( ! str_list_add_no_dup_check ( mem , layout , _lv_type_names [ LV_TYPE_THIN ] ) | |
! str_list_add_no_dup_check ( mem , layout , _lv_type_names [ LV_TYPE_SPARSE ] ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
if ( lv_is_thin_origin ( lv , & snap_count ) ) {
if ( ! str_list_add ( mem , role , _lv_type_names [ LV_TYPE_ORIGIN ] ) | |
! str_list_add_no_dup_check ( mem , role , _lv_type_names [ LV_TYPE_THINORIGIN ] ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
if ( snap_count > 1 & &
! str_list_add_no_dup_check ( mem , role , _lv_type_names [ LV_TYPE_MULTITHINORIGIN ] ) )
goto_bad ;
}
commands: new method for defining commands
. Define a prototype for every lvm command.
. Match every user command with one definition.
. Generate help text and man pages from them.
The new file command-lines.in defines a prototype for every
unique lvm command. A unique lvm command is a unique
combination of: command name + required option args +
required positional args. Each of these prototypes also
includes the optional option args and optional positional
args that the command will accept, a description, and a
unique string ID for the definition. Any valid command
will match one of the prototypes.
Here's an example of the lvresize command definitions from
command-lines.in, there are three unique lvresize commands:
lvresize --size SizeMB LV
OO: --alloc Alloc, --autobackup Bool, --force,
--nofsck, --nosync, --noudevsync, --reportformat String, --resizefs,
--stripes Number, --stripesize SizeKB, --poolmetadatasize SizeMB
OP: PV ...
ID: lvresize_by_size
DESC: Resize an LV by a specified size.
lvresize LV PV ...
OO: --alloc Alloc, --autobackup Bool, --force,
--nofsck, --nosync, --noudevsync,
--reportformat String, --resizefs, --stripes Number, --stripesize SizeKB
ID: lvresize_by_pv
DESC: Resize an LV by specified PV extents.
FLAGS: SECONDARY_SYNTAX
lvresize --poolmetadatasize SizeMB LV_thinpool
OO: --alloc Alloc, --autobackup Bool, --force,
--nofsck, --nosync, --noudevsync,
--reportformat String, --stripes Number, --stripesize SizeKB
OP: PV ...
ID: lvresize_pool_metadata_by_size
DESC: Resize a pool metadata SubLV by a specified size.
The three commands have separate definitions because they have
different required parameters. Required parameters are specified
on the first line of the definition. Optional options are
listed after OO, and optional positional args are listed after OP.
This data is used to generate corresponding command definition
structures for lvm in command-lines.h. usage/help output is also
auto generated, so it is always in sync with the definitions.
Every user-entered command is compared against the set of
command structures, and matched with one. An error is
reported if an entered command does not have the required
parameters for any definition. The closest match is printed
as a suggestion, and running lvresize --help will display
the usage for each possible lvresize command.
The prototype syntax used for help/man output includes
required --option and positional args on the first line,
and optional --option and positional args enclosed in [ ]
on subsequent lines.
command_name <required_opt_args> <required_pos_args>
[ <optional_opt_args> ]
[ <optional_pos_args> ]
Command definitions that are not to be advertised/suggested
have the flag SECONDARY_SYNTAX. These commands will not be
printed in the normal help output.
Man page prototypes are also generated from the same original
command definitions, and are always in sync with the code
and help text.
Very early in command execution, a matching command definition
is found. lvm then knows the operation being done, and that
the provided args conform to the definition. This will allow
lots of ad hoc checking/validation to be removed throughout
the code.
Each command definition can also be routed to a specific
function to implement it. The function is associated with
an enum value for the command definition (generated from
the ID string.) These per-command-definition implementation
functions have not yet been created, so all commands
currently fall back to the existing per-command-name
implementation functions.
Using per-command-definition functions will allow lots of
code to be removed which tries to figure out what the
command is meant to do. This is currently based on ad hoc
and complicated option analysis. When using the new
functions, what the command is doing is already known
from the associated command definition.
2016-08-12 23:52:18 +03:00
if ( lv_is_thin_snapshot ( lv ) )
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
if ( ! str_list_add ( mem , role , _lv_type_names [ LV_TYPE_SNAPSHOT ] ) | |
! str_list_add_no_dup_check ( mem , role , _lv_type_names [ LV_TYPE_THINSNAPSHOT ] ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
} else if ( lv_is_thin_pool ( lv ) ) {
if ( ! str_list_add_no_dup_check ( mem , layout , _lv_type_names [ LV_TYPE_THIN ] ) | |
! str_list_add_no_dup_check ( mem , layout , _lv_type_names [ LV_TYPE_POOL ] ) )
goto_bad ;
* public_lv = 0 ;
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
}
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
if ( lv_is_external_origin ( lv ) ) {
if ( ! str_list_add ( mem , role , _lv_type_names [ LV_TYPE_ORIGIN ] ) | |
! str_list_add_no_dup_check ( mem , role , _lv_type_names [ LV_TYPE_EXTTHINORIGIN ] ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
if ( lv - > external_count > 1 & &
! str_list_add_no_dup_check ( mem , role , _lv_type_names [ LV_TYPE_MULTIEXTTHINORIGIN ] ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
}
return 1 ;
bad :
return 0 ;
2014-07-01 12:37:01 +04:00
}
2014-08-25 11:07:03 +04:00
static int _lv_layout_and_role_cache ( struct dm_pool * mem ,
const struct logical_volume * lv ,
struct dm_list * layout ,
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
struct dm_list * role ,
int * public_lv )
2014-07-01 12:37:01 +04:00
{
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
int top_level = 0 ;
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
/* non-top-level LVs */
if ( lv_is_cache_pool_metadata ( lv ) ) {
2014-08-25 11:07:03 +04:00
if ( ! str_list_add_no_dup_check ( mem , role , _lv_type_names [ LV_TYPE_CACHE ] ) | |
! str_list_add_no_dup_check ( mem , role , _lv_type_names [ LV_TYPE_POOL ] ) | |
! str_list_add_no_dup_check ( mem , role , _lv_type_names [ LV_TYPE_METADATA ] ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
} else if ( lv_is_cache_pool_data ( lv ) ) {
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
if ( ! str_list_add_no_dup_check ( mem , role , _lv_type_names [ LV_TYPE_CACHE ] ) | |
! str_list_add_no_dup_check ( mem , role , _lv_type_names [ LV_TYPE_POOL ] ) | |
2014-08-25 11:07:03 +04:00
! str_list_add_no_dup_check ( mem , role , _lv_type_names [ LV_TYPE_DATA ] ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
report: also display "mirror" keyword in lv_layout for mirrored mirror log and "cache" keyword in lv_layout for cached cache pool
$ lvs -a -o name,vg_name,attr,layout,type
LV VG Attr Layout Type
lvol0 vg mwi-a-m--- mirror mirror
[lvol0_mimage_0] vg iwi-aom--- linear image,mirror
[lvol0_mimage_1] vg iwi-aom--- linear image,mirror
[lvol0_mlog] vg mwi-aom--- mirror log,mirror
[lvol0_mlog_mimage_0] vg iwi-aom--- linear image,mirror
[lvol0_mlog_mimage_1] vg iwi-aom--- linear image,mirror
(lvol0_mlog properly displayed as "mirror" layout for mirrored mirror log)
$ lvs -a -o name,vg_name,attr,layout,type
LV VG Attr Layout Type
lvol0 vg Cwi---C--- cache,pool cache,pool
[lvol0_cdata] vg Cwi------- linear cache,data,pool
[lvol0_cmeta] vg ewi------- linear cache,metadata,pool
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
lvol2 vg Cwi---C--- cache,pool cache,pool
[lvol2_cdata] vg Cwi---C--- cache cache,data,pool
[lvol2_cdata_corig] vg owi---C--- linear cache,origin
[lvol2_cmeta] vg ewi------- linear cache,metadata,pool
(lvol2_cdata properly displayed as cached cache pool data)
2014-08-19 15:58:32 +04:00
if ( lv_is_cache ( lv ) & &
! str_list_add_no_dup_check ( mem , layout , _lv_type_names [ LV_TYPE_CACHE ] ) )
goto_bad ;
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
} else if ( lv_is_cache_origin ( lv ) ) {
if ( ! str_list_add ( mem , role , _lv_type_names [ LV_TYPE_CACHE ] ) | |
! str_list_add ( mem , role , _lv_type_names [ LV_TYPE_ORIGIN ] ) | |
! str_list_add_no_dup_check ( mem , role , _lv_type_names [ LV_TYPE_CACHEORIGIN ] ) )
goto_bad ;
if ( lv_is_cache ( lv ) & &
! str_list_add_no_dup_check ( mem , layout , _lv_type_names [ LV_TYPE_CACHE ] ) )
goto_bad ;
} else
top_level = 1 ;
if ( ! top_level ) {
* public_lv = 0 ;
return 1 ;
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
}
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
/* top-level LVs */
if ( lv_is_cache ( lv ) & &
! str_list_add_no_dup_check ( mem , layout , _lv_type_names [ LV_TYPE_CACHE ] ) )
goto_bad ;
else if ( lv_is_cache_pool ( lv ) ) {
if ( ! str_list_add_no_dup_check ( mem , layout , _lv_type_names [ LV_TYPE_CACHE ] ) | |
! str_list_add_no_dup_check ( mem , layout , _lv_type_names [ LV_TYPE_POOL ] ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
* public_lv = 0 ;
}
return 1 ;
bad :
return 0 ;
}
static int _lv_layout_and_role_thick_origin_snapshot ( struct dm_pool * mem ,
const struct logical_volume * lv ,
struct dm_list * layout ,
struct dm_list * role ,
int * public_lv )
{
if ( lv_is_origin ( lv ) ) {
if ( ! str_list_add ( mem , role , _lv_type_names [ LV_TYPE_ORIGIN ] ) | |
! str_list_add_no_dup_check ( mem , role , _lv_type_names [ LV_TYPE_THICKORIGIN ] ) )
goto_bad ;
/*
* Thin volumes are also marked with virtual flag , but we don ' t show " virtual "
* layout for thin LVs as they have their own keyword for layout - " thin " !
* So rule thin LVs out here !
*/
if ( lv_is_virtual ( lv ) & & ! lv_is_thin_volume ( lv ) ) {
if ( ! str_list_add_no_dup_check ( mem , layout , _lv_type_names [ LV_TYPE_VIRTUAL ] ) )
goto_bad ;
* public_lv = 0 ;
}
if ( lv - > origin_count > 1 & &
! str_list_add_no_dup_check ( mem , role , _lv_type_names [ LV_TYPE_MULTITHICKORIGIN ] ) )
goto_bad ;
} else if ( lv_is_cow ( lv ) ) {
if ( ! str_list_add ( mem , role , _lv_type_names [ LV_TYPE_SNAPSHOT ] ) | |
! str_list_add_no_dup_check ( mem , role , _lv_type_names [ LV_TYPE_THICKSNAPSHOT ] ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
}
return 1 ;
bad :
return 0 ;
}
2014-08-25 11:07:03 +04:00
int lv_layout_and_role ( struct dm_pool * mem , const struct logical_volume * lv ,
struct dm_list * * layout , struct dm_list * * role ) {
2015-02-19 16:03:45 +03:00
int linear , striped ;
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
struct lv_segment * seg ;
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
int public_lv = 1 ;
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
2014-08-25 11:07:03 +04:00
* layout = * role = NULL ;
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
if ( ! ( * layout = str_list_create ( mem ) ) ) {
log_error ( " LV layout list allocation failed " ) ;
2015-02-19 16:06:17 +03:00
return 0 ;
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
}
2014-08-25 11:07:03 +04:00
if ( ! ( * role = str_list_create ( mem ) ) ) {
log_error ( " LV role list allocation failed " ) ;
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto bad ;
}
2016-03-01 17:25:04 +03:00
if ( lv_is_historical ( lv ) ) {
if ( ! str_list_add_no_dup_check ( mem , * layout , _lv_type_names [ LV_TYPE_NONE ] ) | |
! str_list_add_no_dup_check ( mem , * role , _lv_type_names [ LV_TYPE_HISTORY ] ) )
goto_bad ;
}
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
/* Mirrors and related */
2014-09-16 03:13:46 +04:00
if ( ( lv_is_mirror_type ( lv ) | | lv_is_pvmove ( lv ) ) & &
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
! _lv_layout_and_role_mirror ( mem , lv , * layout , * role , & public_lv ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
/* RAIDs and related */
if ( lv_is_raid_type ( lv ) & &
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
! _lv_layout_and_role_raid ( mem , lv , * layout , * role , & public_lv ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
/* Thins and related */
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
if ( ( lv_is_thin_type ( lv ) | | lv_is_external_origin ( lv ) ) & &
! _lv_layout_and_role_thin ( mem , lv , * layout , * role , & public_lv ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
/* Caches and related */
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
if ( ( lv_is_cache_type ( lv ) | | lv_is_cache_origin ( lv ) ) & &
! _lv_layout_and_role_cache ( mem , lv , * layout , * role , & public_lv ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
/* Pool-specific */
if ( lv_is_pool_metadata_spare ( lv ) ) {
if ( ! str_list_add_no_dup_check ( mem , * role , _lv_type_names [ LV_TYPE_POOL ] ) | |
! str_list_add_no_dup_check ( mem , * role , _lv_type_names [ LV_TYPE_SPARE ] ) )
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
goto_bad ;
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
public_lv = 0 ;
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
}
/* Old-style origins/snapshots, virtual origins */
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
if ( ! _lv_layout_and_role_thick_origin_snapshot ( mem , lv , * layout , * role , & public_lv ) )
goto_bad ;
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
2015-10-08 14:44:21 +03:00
if ( lv_is_lockd_sanlock_lv ( lv ) ) {
if ( ! str_list_add_no_dup_check ( mem , * role , _lv_type_names [ LV_TYPE_LOCKD ] ) | |
! str_list_add_no_dup_check ( mem , * role , _lv_type_names [ LV_TYPE_SANLOCK ] ) )
goto_bad ;
public_lv = 0 ;
}
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
/*
* If layout not yet determined , it must be either
* linear or striped or mixture of these two .
*/
if ( dm_list_empty ( * layout ) ) {
2015-02-19 16:03:45 +03:00
linear = striped = 0 ;
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
dm_list_iterate_items ( seg , & lv - > segments ) {
if ( seg_is_linear ( seg ) )
linear = 1 ;
else if ( seg_is_striped ( seg ) )
striped = 1 ;
else {
/*
* This should not happen but if it does
* we ' ll see that there ' s " unknown " layout
* present . This means we forgot to detect
2014-08-25 11:07:03 +04:00
* the role above and we need add proper
* detection for such role !
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
*/
2015-02-19 16:03:45 +03:00
log_warn ( INTERNAL_ERROR " WARNING: Failed to properly detect "
" layout and role for LV %s/%s. " ,
lv - > vg - > name , lv - > name ) ;
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
}
}
if ( linear & &
! str_list_add_no_dup_check ( mem , * layout , _lv_type_names [ LV_TYPE_LINEAR ] ) )
goto_bad ;
if ( striped & &
! str_list_add_no_dup_check ( mem , * layout , _lv_type_names [ LV_TYPE_STRIPED ] ) )
goto_bad ;
if ( ! linear & & ! striped & &
! str_list_add_no_dup_check ( mem , * layout , _lv_type_names [ LV_TYPE_UNKNOWN ] ) )
goto_bad ;
}
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
/* finally, add either 'public' or 'private' role to the LV */
if ( public_lv ) {
if ( ! str_list_add_h_no_dup_check ( mem , * role , _lv_type_names [ LV_TYPE_PUBLIC ] ) )
goto_bad ;
} else {
if ( ! str_list_add_h_no_dup_check ( mem , * role , _lv_type_names [ LV_TYPE_PRIVATE ] ) )
goto_bad ;
}
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
return 1 ;
bad :
2015-02-19 16:06:17 +03:00
dm_pool_free ( mem , * layout ) ;
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
return 0 ;
2014-07-01 12:37:01 +04:00
}
2013-08-23 17:40:13 +04:00
struct dm_list_and_mempool {
struct dm_list * list ;
struct dm_pool * mem ;
} ;
2014-03-27 13:35:07 +04:00
static int _get_pv_list_for_lv ( struct logical_volume * lv , void * data )
2013-08-23 17:40:13 +04:00
{
int dup_found ;
uint32_t s ;
struct pv_list * pvl ;
struct lv_segment * seg ;
struct dm_list * pvs = ( ( struct dm_list_and_mempool * ) data ) - > list ;
struct dm_pool * mem = ( ( struct dm_list_and_mempool * ) data ) - > mem ;
dm_list_iterate_items ( seg , & lv - > segments ) {
for ( s = 0 ; s < seg - > area_count ; s + + ) {
dup_found = 0 ;
if ( seg_type ( seg , s ) ! = AREA_PV )
continue ;
/* do not add duplicates */
dm_list_iterate_items ( pvl , pvs )
if ( pvl - > pv = = seg_pv ( seg , s ) )
dup_found = 1 ;
if ( dup_found )
continue ;
if ( ! ( pvl = dm_pool_zalloc ( mem , sizeof ( * pvl ) ) ) ) {
log_error ( " Failed to allocate memory " ) ;
return 0 ;
}
pvl - > pv = seg_pv ( seg , s ) ;
log_debug_metadata ( " %s/%s uses %s " , lv - > vg - > name ,
lv - > name , pv_dev_name ( pvl - > pv ) ) ;
dm_list_add ( pvs , & pvl - > list ) ;
}
}
return 1 ;
}
/*
* get_pv_list_for_lv
* @ mem - mempool to allocate the list from .
* @ lv
* @ pvs - The list to add pv_list items to .
*
* ' pvs ' is filled with ' pv_list ' items for PVs that compose the LV .
* If the ' pvs ' list already has items in it , duplicates will not be
* added . So , it is safe to repeatedly call this function for different
* LVs and build up a list of PVs for them all .
*
* Memory to create the list is obtained from the mempool provided .
*
* Returns : 1 on success , 0 on error
*/
int get_pv_list_for_lv ( struct dm_pool * mem ,
struct logical_volume * lv , struct dm_list * pvs )
{
struct dm_list_and_mempool context = { pvs , mem } ;
log_debug_metadata ( " Generating list of PVs that %s/%s uses: " ,
lv - > vg - > name , lv - > name ) ;
2014-03-27 13:35:07 +04:00
if ( ! _get_pv_list_for_lv ( lv , & context ) )
2013-08-23 17:40:13 +04:00
return_0 ;
2014-03-27 13:35:07 +04:00
return for_each_sub_lv ( lv , & _get_pv_list_for_lv , & context ) ;
2013-08-23 17:40:13 +04:00
}
2013-02-21 00:40:17 +04:00
/*
* get_default_region_size
* @ cmd
*
* ' mirror_region_size ' and ' raid_region_size ' are effectively the same thing .
* However , " raid " is more inclusive than " mirror " , so the name has been
* changed . This function checks for the old setting and warns the user if
* it is being overridden by the new setting ( i . e . warn if both settings are
* present ) .
*
* Note that the config files give defaults in kiB terms , but we
* return the value in terms of sectors .
*
* Returns : default region_size in sectors
*/
2014-12-04 01:47:08 +03:00
static int _get_default_region_size ( struct cmd_context * cmd )
2013-02-21 00:40:17 +04:00
{
int mrs , rrs ;
/*
* ' mirror_region_size ' is the old setting . It is overridden
* by the new setting , ' raid_region_size ' .
*/
2013-06-25 14:30:34 +04:00
mrs = 2 * find_config_tree_int ( cmd , activation_mirror_region_size_CFG , NULL ) ;
rrs = 2 * find_config_tree_int ( cmd , activation_raid_region_size_CFG , NULL ) ;
2013-02-21 00:40:17 +04:00
if ( ! mrs & & ! rrs )
return DEFAULT_RAID_REGION_SIZE * 2 ;
if ( ! mrs )
return rrs ;
if ( ! rrs )
return mrs ;
if ( mrs ! = rrs )
log_verbose ( " Overriding default 'mirror_region_size' setting "
" with 'raid_region_size' setting of %u kiB " ,
rrs / 2 ) ;
return rrs ;
}
2014-12-04 01:47:08 +03:00
static int _round_down_pow2 ( int r )
{
/* Set all bits to the right of the leftmost set bit */
r | = ( r > > 1 ) ;
r | = ( r > > 2 ) ;
r | = ( r > > 4 ) ;
r | = ( r > > 8 ) ;
r | = ( r > > 16 ) ;
/* Pull out the leftmost set bit */
return r & ~ ( r > > 1 ) ;
}
int get_default_region_size ( struct cmd_context * cmd )
{
2017-02-08 00:12:24 +03:00
int pagesize = lvm_getpagesize ( ) ;
2014-12-04 01:47:08 +03:00
int region_size = _get_default_region_size ( cmd ) ;
2016-06-30 19:59:44 +03:00
if ( ! is_power_of_2 ( region_size ) ) {
2014-12-04 01:47:08 +03:00
region_size = _round_down_pow2 ( region_size ) ;
2015-09-23 16:37:52 +03:00
log_verbose ( " Reducing region size to %u kiB (power of 2). " ,
2014-12-04 01:47:08 +03:00
region_size / 2 ) ;
}
2017-02-08 00:12:24 +03:00
if ( region_size % ( pagesize > > SECTOR_SHIFT ) ) {
region_size = DEFAULT_RAID_REGION_SIZE * 2 ;
log_verbose ( " Using default region size %u kiB (multiple of page size). " ,
region_size / 2 ) ;
}
2014-12-04 01:47:08 +03:00
return region_size ;
}
2008-01-17 20:17:09 +03:00
int add_seg_to_segs_using_this_lv ( struct logical_volume * lv ,
struct lv_segment * seg )
2008-01-16 22:00:59 +03:00
{
struct seg_list * sl ;
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( sl , & lv - > segs_using_this_lv ) {
2008-01-16 22:00:59 +03:00
if ( sl - > seg = = seg ) {
sl - > count + + ;
return 1 ;
}
}
2016-11-25 16:08:39 +03:00
log_very_verbose ( " Adding %s: " FMTu32 " as an user of %s. " ,
display_lvname ( seg - > lv ) , seg - > le , display_lvname ( lv ) ) ;
2008-01-19 01:00:46 +03:00
2010-03-31 21:23:18 +04:00
if ( ! ( sl = dm_pool_zalloc ( lv - > vg - > vgmem , sizeof ( * sl ) ) ) ) {
2016-11-25 16:08:39 +03:00
log_error ( " Failed to allocate segment list. " ) ;
2008-01-16 22:00:59 +03:00
return 0 ;
}
sl - > count = 1 ;
sl - > seg = seg ;
2008-11-04 01:14:30 +03:00
dm_list_add ( & lv - > segs_using_this_lv , & sl - > list ) ;
2008-01-16 22:00:59 +03:00
return 1 ;
}
2008-01-17 20:17:09 +03:00
int remove_seg_from_segs_using_this_lv ( struct logical_volume * lv ,
struct lv_segment * seg )
2008-01-16 22:00:59 +03:00
{
struct seg_list * sl ;
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( sl , & lv - > segs_using_this_lv ) {
2008-01-16 22:00:59 +03:00
if ( sl - > seg ! = seg )
continue ;
if ( sl - > count > 1 )
sl - > count - - ;
2008-01-19 01:00:46 +03:00
else {
2016-11-25 16:08:39 +03:00
log_very_verbose ( " %s: " FMTu32 " is no longer a user of %s. " ,
display_lvname ( seg - > lv ) , seg - > le ,
display_lvname ( lv ) ) ;
2008-11-04 01:14:30 +03:00
dm_list_del ( & sl - > list ) ;
2008-01-19 01:00:46 +03:00
}
2008-01-16 22:00:59 +03:00
return 1 ;
}
2016-11-25 16:08:39 +03:00
log_error ( INTERNAL_ERROR " Segment %s: " FMTu32 " is not a user of %s. " ,
display_lvname ( seg - > lv ) , seg - > le , display_lvname ( lv ) ) ;
2008-01-16 22:00:59 +03:00
return 0 ;
}
/*
* This is a function specialized for the common case where there is
* only one segment which uses the LV .
* e . g . the LV is a layer inserted by insert_layer_for_lv ( ) .
*
* In general , walk through lv - > segs_using_this_lv .
*/
2014-07-04 02:49:34 +04:00
struct lv_segment * get_only_segment_using_this_lv ( const struct logical_volume * lv )
2008-01-16 22:00:59 +03:00
{
struct seg_list * sl ;
2014-02-22 04:26:01 +04:00
if ( ! lv ) {
log_error ( INTERNAL_ERROR " get_only_segment_using_this_lv() called with NULL LV. " ) ;
return NULL ;
}
2014-11-05 01:06:21 +03:00
dm_list_iterate_items ( sl , & lv - > segs_using_this_lv ) {
/* Needs to be he only item in list */
if ( ! dm_list_end ( & lv - > segs_using_this_lv , & sl - > list ) )
break ;
if ( sl - > count ! = 1 ) {
log_error ( " %s is expected to have only one segment using it, "
2016-11-25 16:08:39 +03:00
" while %s: " FMTu32 " uses it %d times. " ,
display_lvname ( lv ) , display_lvname ( sl - > seg - > lv ) ,
sl - > seg - > le , sl - > count ) ;
2014-11-05 01:06:21 +03:00
return NULL ;
}
2008-01-16 22:00:59 +03:00
2014-11-05 01:06:21 +03:00
return sl - > seg ;
2008-01-16 23:00:01 +03:00
}
2014-11-05 01:06:21 +03:00
log_error ( " %s is expected to have only one segment using it, while it has %d. " ,
display_lvname ( lv ) , dm_list_size ( & lv - > segs_using_this_lv ) ) ;
return NULL ;
2008-01-16 22:00:59 +03:00
}
2005-11-28 23:01:00 +03:00
/*
* PVs used by a segment of an LV
*/
struct seg_pvs {
2008-11-04 01:14:30 +03:00
struct dm_list list ;
2005-11-28 23:01:00 +03:00
2008-11-04 01:14:30 +03:00
struct dm_list pvs ; /* struct pv_list */
2005-11-28 23:01:00 +03:00
uint32_t le ;
uint32_t len ;
} ;
2008-11-04 01:14:30 +03:00
static struct seg_pvs * _find_seg_pvs_by_le ( struct dm_list * list , uint32_t le )
2007-12-20 18:42:55 +03:00
{
struct seg_pvs * spvs ;
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( spvs , list )
2007-12-20 18:42:55 +03:00
if ( le > = spvs - > le & & le < spvs - > le + spvs - > len )
return spvs ;
return NULL ;
}
2001-11-27 19:37:33 +03:00
/*
2005-06-01 20:51:55 +04:00
* Find first unused LV number .
2001-11-27 19:37:33 +03:00
*/
2005-06-01 20:51:55 +04:00
uint32_t find_free_lvnum ( struct logical_volume * lv )
2001-11-27 19:37:33 +03:00
{
2012-06-21 23:19:28 +04:00
int lvnum_used [ MAX_RESTRICTED_LVS + 1 ] = { 0 } ;
2005-06-01 20:51:55 +04:00
uint32_t i = 0 ;
struct lv_list * lvl ;
int lvnum ;
2001-11-27 19:37:33 +03:00
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( lvl , & lv - > vg - > lvs ) {
2005-06-01 20:51:55 +04:00
lvnum = lvnum_from_lvid ( & lvl - > lv - > lvid ) ;
if ( lvnum < = MAX_RESTRICTED_LVS )
lvnum_used [ lvnum ] = 1 ;
2001-11-27 19:37:33 +03:00
}
2005-06-01 20:51:55 +04:00
while ( lvnum_used [ i ] )
i + + ;
2003-04-25 02:23:24 +04:00
2005-06-01 20:51:55 +04:00
/* FIXME What if none are free? */
2001-11-27 19:37:33 +03:00
2005-06-01 20:51:55 +04:00
return i ;
2001-11-27 19:37:33 +03:00
}
2014-06-09 14:08:27 +04:00
dm_percent_t copy_percent ( const struct logical_volume * lv )
2012-10-24 05:33:54 +04:00
{
uint32_t numerator = 0u , denominator = 0u ;
struct lv_segment * seg ;
dm_list_iterate_items ( seg , & lv - > segments ) {
denominator + = seg - > area_len ;
2015-09-23 16:37:52 +03:00
/* FIXME Generalise name of 'extents_copied' field */
2016-05-23 18:46:38 +03:00
if ( ( ( seg_is_raid ( seg ) & & ! seg_is_any_raid0 ( seg ) ) | | seg_is_mirrored ( seg ) ) & &
2013-07-09 15:34:48 +04:00
( seg - > area_count > 1 ) )
2012-10-24 05:33:54 +04:00
numerator + = seg - > extents_copied ;
else
numerator + = seg - > area_len ;
}
2016-05-27 14:48:30 +03:00
return denominator ? dm_make_percent ( numerator , denominator ) : DM_PERCENT_100 ;
2012-10-24 05:33:54 +04:00
}
2015-09-24 19:50:53 +03:00
/* Round up extents to next stripe boundary for number of stripes */
static uint32_t _round_to_stripe_boundary ( struct volume_group * vg , uint32_t extents ,
uint32_t stripes , int extend )
{
uint32_t size_rest , new_extents = extents ;
if ( ! stripes )
return extents ;
/* Round up extents to stripe divisible amount */
if ( ( size_rest = extents % stripes ) ) {
new_extents + = extend ? stripes - size_rest : - size_rest ;
2017-02-10 00:41:28 +03:00
log_print_unless_silent ( " Rounding size %s (%u extents) %s to stripe boundary size %s(%u extents). " ,
2015-11-07 19:16:49 +03:00
display_size ( vg - > cmd , ( uint64_t ) extents * vg - > extent_size ) , extents ,
2017-02-10 00:41:28 +03:00
new_extents < extents ? " down " : " up " ,
2015-11-07 19:16:49 +03:00
display_size ( vg - > cmd , ( uint64_t ) new_extents * vg - > extent_size ) , new_extents ) ;
2015-09-24 19:50:53 +03:00
}
return new_extents ;
}
2005-06-01 20:51:55 +04:00
/*
* All lv_segments get created here .
*/
2011-10-23 20:02:01 +04:00
struct lv_segment * alloc_lv_segment ( const struct segment_type * segtype ,
2005-04-22 19:44:00 +04:00
struct logical_volume * lv ,
uint32_t le , uint32_t len ,
2017-02-24 02:50:00 +03:00
uint32_t reshape_len ,
2009-11-25 01:55:55 +03:00
uint64_t status ,
2005-04-22 19:44:00 +04:00
uint32_t stripe_size ,
2005-06-01 20:51:55 +04:00
struct logical_volume * log_lv ,
2005-04-22 19:44:00 +04:00
uint32_t area_count ,
uint32_t area_len ,
2017-02-24 02:50:00 +03:00
uint32_t data_copies ,
2005-04-22 19:44:00 +04:00
uint32_t chunk_size ,
2005-06-01 20:51:55 +04:00
uint32_t region_size ,
2010-04-08 04:28:57 +04:00
uint32_t extents_copied ,
struct lv_segment * pvmove_source_seg )
2001-11-29 21:45:35 +03:00
{
2002-11-18 17:04:08 +03:00
struct lv_segment * seg ;
2011-10-23 20:02:01 +04:00
struct dm_pool * mem = lv - > vg - > vgmem ;
2005-10-18 17:43:40 +04:00
uint32_t areas_sz = area_count * sizeof ( * seg - > areas ) ;
2001-11-29 21:45:35 +03:00
2011-08-03 02:07:20 +04:00
if ( ! segtype ) {
log_error ( INTERNAL_ERROR " alloc_lv_segment: Missing segtype. " ) ;
return NULL ;
}
2008-01-30 16:19:47 +03:00
if ( ! ( seg = dm_pool_zalloc ( mem , sizeof ( * seg ) ) ) )
return_NULL ;
2005-10-18 17:43:40 +04:00
if ( ! ( seg - > areas = dm_pool_zalloc ( mem , areas_sz ) ) ) {
dm_pool_free ( mem , seg ) ;
2008-01-30 16:19:47 +03:00
return_NULL ;
2001-11-29 21:45:35 +03:00
}
2017-11-01 01:16:13 +03:00
if ( segtype_is_raid_with_meta ( segtype ) & &
2011-08-03 02:07:20 +04:00
! ( seg - > meta_areas = dm_pool_zalloc ( mem , areas_sz ) ) ) {
dm_pool_free ( mem , seg ) ; /* frees everything alloced since seg */
return_NULL ;
2005-06-03 18:49:51 +04:00
}
2005-04-22 19:44:00 +04:00
seg - > segtype = segtype ;
seg - > lv = lv ;
seg - > le = le ;
seg - > len = len ;
2017-02-24 02:50:00 +03:00
seg - > reshape_len = reshape_len ;
2005-04-22 19:44:00 +04:00
seg - > status = status ;
seg - > stripe_size = stripe_size ;
seg - > area_count = area_count ;
seg - > area_len = area_len ;
2017-02-24 03:57:04 +03:00
seg - > data_copies = data_copies ? : lv_raid_data_copies ( segtype , area_count ) ;
2005-04-22 19:44:00 +04:00
seg - > chunk_size = chunk_size ;
2005-06-01 20:51:55 +04:00
seg - > region_size = region_size ;
2005-04-22 19:44:00 +04:00
seg - > extents_copied = extents_copied ;
2010-04-08 04:28:57 +04:00
seg - > pvmove_source_seg = pvmove_source_seg ;
2008-11-04 01:14:30 +03:00
dm_list_init ( & seg - > tags ) ;
2016-07-27 19:17:29 +03:00
dm_list_init ( & seg - > origin_list ) ;
2011-10-17 18:17:09 +04:00
dm_list_init ( & seg - > thin_messages ) ;
2004-03-08 20:19:15 +03:00
2011-09-08 20:41:18 +04:00
if ( log_lv & & ! attach_mirror_log ( seg , log_lv ) )
return_NULL ;
2011-09-06 04:26:42 +04:00
2014-09-16 03:13:46 +04:00
if ( segtype_is_mirror ( segtype ) )
lv - > status | = MIRROR ;
if ( segtype_is_mirrored ( segtype ) )
lv - > status | = MIRRORED ;
2005-06-01 20:51:55 +04:00
return seg ;
}
2017-02-10 00:41:28 +03:00
/*
* Temporary helper to return number of data copies for
* RAID segment @ seg until seg - > data_copies got added
*/
static uint32_t _raid_data_copies ( struct lv_segment * seg )
{
/*
* FIXME : needs to change once more than 2 are supported .
* I . e . use seg - > data_copies then
*/
if ( seg_is_raid10 ( seg ) )
return 2 ;
2017-07-19 17:16:12 +03:00
if ( seg_is_raid1 ( seg ) )
2017-02-10 00:41:28 +03:00
return seg - > area_count ;
return seg - > segtype - > parity_devs + 1 ;
}
/* Data image count for RAID segment @seg */
static uint32_t _raid_stripes_count ( struct lv_segment * seg )
{
/*
* FIXME : raid10 needs to change once more than
* 2 data_copies and odd # of legs supported .
*/
if ( seg_is_raid10 ( seg ) )
return seg - > area_count / _raid_data_copies ( seg ) ;
return seg - > area_count - seg - > segtype - > parity_devs ;
}
2012-06-28 00:53:02 +04:00
static int _release_and_discard_lv_segment_area ( struct lv_segment * seg , uint32_t s ,
uint32_t area_reduction , int with_discard )
2005-06-14 21:54:48 +04:00
{
2014-02-05 02:50:16 +04:00
struct lv_segment * cache_seg ;
2015-09-23 16:37:52 +03:00
struct logical_volume * lv = seg_lv ( seg , s ) ;
2014-02-05 02:50:16 +04:00
2005-06-14 21:54:48 +04:00
if ( seg_type ( seg , s ) = = AREA_UNASSIGNED )
2012-06-27 22:37:54 +04:00
return 1 ;
2005-06-14 21:54:48 +04:00
if ( seg_type ( seg , s ) = = AREA_PV ) {
2012-06-28 00:53:02 +04:00
if ( with_discard & & ! discard_pv_segment ( seg_pvseg ( seg , s ) , area_reduction ) )
return_0 ;
2012-06-27 22:37:54 +04:00
if ( ! release_pv_segment ( seg_pvseg ( seg , s ) , area_reduction ) )
return_0 ;
2012-06-28 00:53:02 +04:00
2012-06-27 22:37:54 +04:00
if ( seg - > area_len = = area_reduction )
2007-12-20 18:42:55 +03:00
seg_type ( seg , s ) = AREA_UNASSIGNED ;
2012-06-28 00:53:02 +04:00
2012-06-27 22:37:54 +04:00
return 1 ;
2005-06-14 21:54:48 +04:00
}
2015-09-23 16:37:52 +03:00
if ( lv_is_mirror_image ( lv ) | |
lv_is_thin_pool_data ( lv ) | |
lv_is_cache_pool_data ( lv ) ) {
if ( ! lv_reduce ( lv , area_reduction ) )
2012-06-27 22:37:54 +04:00
return_0 ; /* FIXME: any upper level reporting */
return 1 ;
2011-09-08 20:41:18 +04:00
}
2014-02-05 02:50:16 +04:00
if ( seg_is_cache_pool ( seg ) & &
! dm_list_empty ( & seg - > lv - > segs_using_this_lv ) ) {
if ( ! ( cache_seg = get_only_segment_using_this_lv ( seg - > lv ) ) )
return_0 ;
if ( ! lv_cache_remove ( cache_seg - > lv ) )
return_0 ;
}
2015-09-23 16:37:52 +03:00
if ( lv_is_raid_image ( lv ) ) {
2017-02-10 00:41:28 +03:00
/* Calculate the amount of extents to reduce per rmate/rimage LV */
uint32_t rimage_extents ;
2017-02-24 02:50:00 +03:00
struct lv_segment * seg1 = first_seg ( lv ) ;
2017-02-10 00:41:28 +03:00
2017-02-24 02:50:00 +03:00
/* FIXME: avoid extra seg_is_*() conditionals here */
rimage_extents = raid_rimage_extents ( seg1 - > segtype , area_reduction , seg_is_any_raid0 ( seg ) ? 0 : _raid_stripes_count ( seg ) ,
2017-02-10 00:41:28 +03:00
seg_is_raid10 ( seg ) ? 1 : _raid_data_copies ( seg ) ) ;
if ( ! rimage_extents )
2016-11-25 15:46:06 +03:00
return 0 ;
2011-08-03 02:07:20 +04:00
2017-02-10 00:41:28 +03:00
if ( seg - > meta_areas ) {
uint32_t meta_area_reduction ;
struct logical_volume * mlv ;
struct volume_group * vg = lv - > vg ;
if ( seg_metatype ( seg , s ) ! = AREA_LV | |
! ( mlv = seg_metalv ( seg , s ) ) )
2012-06-27 22:37:54 +04:00
return 0 ;
2017-02-10 00:41:28 +03:00
meta_area_reduction = raid_rmeta_extents_delta ( vg - > cmd , lv - > le_count , lv - > le_count - rimage_extents ,
seg - > region_size , vg - > extent_size ) ;
/* Limit for raid0_meta not having region size set */
if ( meta_area_reduction > mlv - > le_count | |
! ( lv - > le_count - rimage_extents ) )
meta_area_reduction = mlv - > le_count ;
if ( meta_area_reduction & &
! lv_reduce ( mlv , meta_area_reduction ) )
return_0 ; /* FIXME: any upper level reporting */
2011-08-03 02:07:20 +04:00
}
2016-11-25 16:21:34 +03:00
2017-02-10 00:41:28 +03:00
if ( ! lv_reduce ( lv , rimage_extents ) )
return_0 ; /* FIXME: any upper level reporting */
2012-06-27 22:37:54 +04:00
return 1 ;
2011-08-03 02:07:20 +04:00
}
2005-06-14 21:54:48 +04:00
if ( area_reduction = = seg - > area_len ) {
2016-11-25 16:08:39 +03:00
log_very_verbose ( " Remove %s: " FMTu32 " [ " FMTu32 " ] from "
" the top of LV %s: " FMTu32 " . " ,
display_lvname ( seg - > lv ) , seg - > le , s ,
display_lvname ( lv ) , seg_le ( seg , s ) ) ;
2008-01-19 01:00:46 +03:00
2015-09-23 16:37:52 +03:00
if ( ! remove_seg_from_segs_using_this_lv ( lv , seg ) )
2013-04-21 15:18:53 +04:00
return_0 ;
2016-11-25 16:08:39 +03:00
2005-06-14 21:54:48 +04:00
seg_lv ( seg , s ) = NULL ;
seg_le ( seg , s ) = 0 ;
seg_type ( seg , s ) = AREA_UNASSIGNED ;
}
2012-06-27 22:37:54 +04:00
return 1 ;
2005-06-14 21:54:48 +04:00
}
2012-06-28 00:53:02 +04:00
int release_and_discard_lv_segment_area ( struct lv_segment * seg , uint32_t s , uint32_t area_reduction )
{
return _release_and_discard_lv_segment_area ( seg , s , area_reduction , 1 ) ;
}
int release_lv_segment_area ( struct lv_segment * seg , uint32_t s , uint32_t area_reduction )
{
return _release_and_discard_lv_segment_area ( seg , s , area_reduction , 0 ) ;
}
2005-06-14 21:54:48 +04:00
/*
* Move a segment area from one segment to another
*/
int move_lv_segment_area ( struct lv_segment * seg_to , uint32_t area_to ,
struct lv_segment * seg_from , uint32_t area_from )
{
struct physical_volume * pv ;
struct logical_volume * lv ;
uint32_t pe , le ;
switch ( seg_type ( seg_from , area_from ) ) {
case AREA_PV :
pv = seg_pv ( seg_from , area_from ) ;
pe = seg_pe ( seg_from , area_from ) ;
2012-06-27 22:37:54 +04:00
if ( ! release_lv_segment_area ( seg_from , area_from , seg_from - > area_len ) )
return_0 ;
if ( ! release_lv_segment_area ( seg_to , area_to , seg_to - > area_len ) )
return_0 ;
2005-06-14 21:54:48 +04:00
2008-01-30 16:19:47 +03:00
if ( ! set_lv_segment_area_pv ( seg_to , area_to , pv , pe ) )
return_0 ;
2005-06-14 21:54:48 +04:00
break ;
case AREA_LV :
lv = seg_lv ( seg_from , area_from ) ;
le = seg_le ( seg_from , area_from ) ;
2012-06-27 22:37:54 +04:00
if ( ! release_lv_segment_area ( seg_from , area_from , seg_from - > area_len ) )
return_0 ;
if ( ! release_lv_segment_area ( seg_to , area_to , seg_to - > area_len ) )
return_0 ;
2005-06-14 21:54:48 +04:00
2008-01-16 22:00:59 +03:00
if ( ! set_lv_segment_area_lv ( seg_to , area_to , lv , le , 0 ) )
return_0 ;
2005-06-14 21:54:48 +04:00
break ;
case AREA_UNASSIGNED :
2012-06-27 22:37:54 +04:00
if ( ! release_lv_segment_area ( seg_to , area_to , seg_to - > area_len ) )
return_0 ;
2005-06-14 21:54:48 +04:00
}
return 1 ;
}
2005-06-01 20:51:55 +04:00
/*
* Link part of a PV to an LV segment .
*/
2005-05-03 21:28:23 +04:00
int set_lv_segment_area_pv ( struct lv_segment * seg , uint32_t area_num ,
struct physical_volume * pv , uint32_t pe )
2005-04-22 19:43:02 +04:00
{
2005-10-18 17:43:40 +04:00
seg - > areas [ area_num ] . type = AREA_PV ;
2005-05-03 21:28:23 +04:00
2005-06-01 20:51:55 +04:00
if ( ! ( seg_pvseg ( seg , area_num ) =
2008-01-30 16:19:47 +03:00
assign_peg_to_lvseg ( pv , pe , seg - > area_len , seg , area_num ) ) )
return_0 ;
2005-05-03 21:28:23 +04:00
return 1 ;
2005-04-22 19:43:02 +04:00
}
2005-06-01 20:51:55 +04:00
/*
* Link one LV segment to another . Assumes sizes already match .
*/
2008-01-16 22:00:59 +03:00
int set_lv_segment_area_lv ( struct lv_segment * seg , uint32_t area_num ,
struct logical_volume * lv , uint32_t le ,
2009-12-04 20:48:32 +03:00
uint64_t status )
2005-04-22 19:43:02 +04:00
{
2016-11-16 17:11:07 +03:00
log_very_verbose ( " Stack %s: " FMTu32 " [ " FMTu32 " ] on LV %s: " FMTu32 " . " ,
display_lvname ( seg - > lv ) , seg - > le , area_num ,
display_lvname ( lv ) , le ) ;
2008-01-19 01:00:46 +03:00
2016-12-13 02:09:15 +03:00
lv - > status | = status ;
if ( lv_is_raid_metadata ( lv ) ) {
2011-08-03 02:07:20 +04:00
seg - > meta_areas [ area_num ] . type = AREA_LV ;
seg_metalv ( seg , area_num ) = lv ;
if ( le ) {
2016-11-16 17:11:07 +03:00
log_error ( INTERNAL_ERROR " Meta le != 0. " ) ;
2011-08-03 02:07:20 +04:00
return 0 ;
}
seg_metale ( seg , area_num ) = 0 ;
} else {
seg - > areas [ area_num ] . type = AREA_LV ;
seg_lv ( seg , area_num ) = lv ;
seg_le ( seg , area_num ) = le ;
}
2008-01-16 22:00:59 +03:00
if ( ! add_seg_to_segs_using_this_lv ( lv , seg ) )
return_0 ;
return 1 ;
2005-04-22 19:43:02 +04:00
}
2005-10-18 17:43:40 +04:00
/*
* Prepare for adding parallel areas to an existing segment .
*/
static int _lv_segment_add_areas ( struct logical_volume * lv ,
struct lv_segment * seg ,
uint32_t new_area_count )
{
struct lv_segment_area * newareas ;
uint32_t areas_sz = new_area_count * sizeof ( * newareas ) ;
2008-01-30 16:19:47 +03:00
if ( ! ( newareas = dm_pool_zalloc ( lv - > vg - > cmd - > mem , areas_sz ) ) )
return_0 ;
2005-10-18 17:43:40 +04:00
memcpy ( newareas , seg - > areas , seg - > area_count * sizeof ( * seg - > areas ) ) ;
seg - > areas = newareas ;
seg - > area_count = new_area_count ;
return 1 ;
}
2015-09-24 19:56:19 +03:00
static uint32_t _calc_area_multiple ( const struct segment_type * segtype ,
const uint32_t area_count ,
const uint32_t stripes )
{
if ( ! area_count )
return 1 ;
/* Striped */
if ( segtype_is_striped ( segtype ) )
return area_count ;
/* Parity RAID (e.g. RAID 4/5/6) */
if ( segtype_is_raid ( segtype ) & & segtype - > parity_devs ) {
/*
* As articulated in _alloc_init , we can tell by
* the area_count whether a replacement drive is
* being allocated ; and if this is the case , then
* there is no area_multiple that should be used .
*/
if ( area_count < = segtype - > parity_devs )
return 1 ;
return area_count - segtype - > parity_devs ;
}
/*
* RAID10 - only has 2 - way mirror right now .
* If we are to move beyond 2 - way RAID10 , then
* the ' stripes ' argument will always need to
* be given .
*/
2017-02-24 02:50:00 +03:00
if ( segtype_is_raid10 ( segtype ) ) {
2015-09-24 19:56:19 +03:00
if ( ! stripes )
return area_count / 2 ;
return stripes ;
}
/* Mirrored stripes */
if ( stripes )
return stripes ;
/* Mirrored */
return 1 ;
}
2005-06-01 20:51:55 +04:00
/*
* Reduce the size of an lv_segment . New size can be zero .
*/
static int _lv_segment_reduce ( struct lv_segment * seg , uint32_t reduction )
2005-05-03 21:28:23 +04:00
{
2005-06-01 20:51:55 +04:00
uint32_t area_reduction , s ;
2017-02-24 02:50:00 +03:00
uint32_t areas = ( seg - > area_count / ( seg_is_raid10 ( seg ) ? seg - > data_copies : 1 ) ) - seg - > segtype - > parity_devs ;
2005-06-01 20:51:55 +04:00
/* Caller must ensure exact divisibility */
2017-02-24 02:50:00 +03:00
if ( seg_is_striped ( seg ) | | seg_is_striped_raid ( seg ) ) {
if ( reduction % areas ) {
2005-06-01 20:51:55 +04:00
log_error ( " Segment extent reduction % " PRIu32
2011-06-06 16:08:42 +04:00
" not divisible by #stripes % " PRIu32 ,
2005-06-01 20:51:55 +04:00
reduction , seg - > area_count ) ;
return 0 ;
}
2017-02-24 02:50:00 +03:00
area_reduction = reduction / areas ;
2005-06-01 20:51:55 +04:00
} else
area_reduction = reduction ;
2005-06-14 21:54:48 +04:00
for ( s = 0 ; s < seg - > area_count ; s + + )
2012-06-28 00:53:02 +04:00
if ( ! release_and_discard_lv_segment_area ( seg , s , area_reduction ) )
2012-06-27 22:37:54 +04:00
return_0 ;
2005-06-14 21:54:48 +04:00
2005-06-01 20:51:55 +04:00
seg - > len - = reduction ;
2017-02-24 02:50:00 +03:00
if ( seg_is_raid ( seg ) )
seg - > area_len = seg - > len ;
else
seg - > area_len - = area_reduction ;
2005-05-03 21:28:23 +04:00
2005-06-01 20:51:55 +04:00
return 1 ;
2005-05-03 21:28:23 +04:00
}
2005-05-17 17:49:45 +04:00
/*
2005-06-01 20:51:55 +04:00
* Entry point for all LV reductions in size .
2005-05-17 17:49:45 +04:00
*/
2005-06-14 21:54:48 +04:00
static int _lv_reduce ( struct logical_volume * lv , uint32_t extents , int delete )
2001-11-29 21:45:35 +03:00
{
2017-10-30 16:35:31 +03:00
struct lv_segment * seg = NULL ;
2005-06-01 20:51:55 +04:00
uint32_t count = extents ;
uint32_t reduction ;
2014-10-04 01:36:11 +04:00
struct logical_volume * pool_lv ;
2016-12-17 23:55:02 +03:00
struct logical_volume * external_lv = NULL ;
2017-10-30 16:35:31 +03:00
int is_raid10 = 0 ;
uint32_t data_copies = 0 ;
if ( ! dm_list_empty ( & lv - > segments ) ) {
seg = first_seg ( lv ) ;
is_raid10 = seg_is_any_raid10 ( seg ) & & seg - > reshape_len ;
data_copies = seg - > data_copies ;
}
2005-06-01 20:51:55 +04:00
2013-11-30 00:21:00 +04:00
if ( lv_is_merging_origin ( lv ) ) {
log_debug_metadata ( " Dropping snapshot merge of %s to removed origin %s. " ,
find_snapshot ( lv ) - > lv - > name , lv - > name ) ;
clear_snapshot_merge ( lv ) ;
}
2008-11-04 01:14:30 +03:00
dm_list_iterate_back_items ( seg , & lv - > segments ) {
2005-06-01 20:51:55 +04:00
if ( ! count )
break ;
2016-12-17 23:55:02 +03:00
if ( seg - > external_lv )
external_lv = seg - > external_lv ;
2005-06-01 20:51:55 +04:00
if ( seg - > len < = count ) {
2013-11-30 00:21:00 +04:00
if ( seg - > merge_lv ) {
log_debug_metadata ( " Dropping snapshot merge of removed %s to origin %s. " ,
seg - > lv - > name , seg - > merge_lv - > name ) ;
clear_snapshot_merge ( seg - > merge_lv ) ;
}
2005-06-01 20:51:55 +04:00
/* remove this segment completely */
/* FIXME Check this is safe */
2008-01-30 16:19:47 +03:00
if ( seg - > log_lv & & ! lv_remove ( seg - > log_lv ) )
return_0 ;
2011-10-19 20:36:39 +04:00
2012-01-19 19:23:50 +04:00
if ( seg - > metadata_lv & & ! lv_remove ( seg - > metadata_lv ) )
2011-09-08 20:41:18 +04:00
return_0 ;
2014-04-01 19:57:14 +04:00
/* Remove cache origin only when removing (not on lv_empty()) */
2014-11-11 15:31:25 +03:00
if ( delete & & seg_is_cache ( seg ) ) {
if ( lv_is_pending_delete ( seg - > lv ) ) {
/* Just dropping reference on origin when pending delete */
if ( ! remove_seg_from_segs_using_this_lv ( seg_lv ( seg , 0 ) , seg ) )
return_0 ;
seg_lv ( seg , 0 ) = NULL ;
seg_le ( seg , 0 ) = 0 ;
seg_type ( seg , 0 ) = AREA_UNASSIGNED ;
if ( seg - > pool_lv & & ! detach_pool_lv ( seg ) )
return_0 ;
} else if ( ! lv_remove ( seg_lv ( seg , 0 ) ) )
return_0 ;
}
2014-04-01 19:57:14 +04:00
2014-10-04 01:36:11 +04:00
if ( ( pool_lv = seg - > pool_lv ) ) {
if ( ! detach_pool_lv ( seg ) )
return_0 ;
/* When removing cached LV, remove pool as well */
if ( seg_is_cache ( seg ) & & ! lv_remove ( pool_lv ) )
return_0 ;
}
2011-09-08 20:41:18 +04:00
2008-11-04 01:14:30 +03:00
dm_list_del ( & seg - > list ) ;
2005-06-01 20:51:55 +04:00
reduction = seg - > len ;
} else
reduction = count ;
2008-01-30 16:19:47 +03:00
if ( ! _lv_segment_reduce ( seg , reduction ) )
return_0 ;
2005-06-01 20:51:55 +04:00
count - = reduction ;
}
2017-02-24 02:50:00 +03:00
seg = first_seg ( lv ) ;
if ( is_raid10 ) {
lv - > le_count - = extents * data_copies ;
if ( seg )
seg - > len = seg - > area_len = lv - > le_count ;
} else
lv - > le_count - = extents ;
2005-06-01 20:51:55 +04:00
lv - > size = ( uint64_t ) lv - > le_count * lv - > vg - > extent_size ;
2017-03-08 01:28:09 +03:00
if ( seg )
seg - > extents_copied = seg - > len ;
2005-06-01 20:51:55 +04:00
2005-06-14 21:54:48 +04:00
if ( ! delete )
return 1 ;
2014-11-13 15:09:07 +03:00
if ( lv = = lv - > vg - > pool_metadata_spare_lv ) {
lv - > status & = ~ POOL_METADATA_SPARE ;
lv - > vg - > pool_metadata_spare_lv = NULL ;
}
2005-06-03 19:44:12 +04:00
/* Remove the LV if it is now empty */
2009-05-14 01:25:01 +04:00
if ( ! lv - > le_count & & ! unlink_lv_from_vg ( lv ) )
return_0 ;
else if ( lv - > vg - > fid - > fmt - > ops - > lv_setup & &
2008-01-30 16:19:47 +03:00
! lv - > vg - > fid - > fmt - > ops - > lv_setup ( lv - > vg - > fid , lv ) )
return_0 ;
2005-06-01 20:51:55 +04:00
2016-12-17 23:55:02 +03:00
/* Removal of last user enforces refresh */
if ( external_lv & & ! lv_is_external_origin ( external_lv ) & &
lv_is_active ( external_lv ) & &
! lv_update_and_reload ( external_lv ) )
return_0 ;
2005-06-01 20:51:55 +04:00
return 1 ;
}
2005-06-14 21:54:48 +04:00
/*
2005-11-24 23:58:44 +03:00
* Empty an LV .
2005-06-14 21:54:48 +04:00
*/
int lv_empty ( struct logical_volume * lv )
{
2005-11-24 21:46:51 +03:00
return _lv_reduce ( lv , lv - > le_count , 0 ) ;
2005-06-14 21:54:48 +04:00
}
2008-01-17 16:13:54 +03:00
/*
* Empty an LV and add error segment .
*/
2008-01-17 16:54:05 +03:00
int replace_lv_with_error_segment ( struct logical_volume * lv )
2008-01-17 16:13:54 +03:00
{
uint32_t len = lv - > le_count ;
2011-06-11 04:03:06 +04:00
if ( len & & ! lv_empty ( lv ) )
2008-01-17 16:13:54 +03:00
return_0 ;
2011-06-11 04:03:06 +04:00
/* Minimum size required for a table. */
if ( ! len )
len = 1 ;
2010-10-15 00:03:12 +04:00
/*
* Since we are replacing the whatever - was - there with
* an error segment , we should also clear any flags
* that suggest it is anything other than " error " .
*/
2014-09-16 03:13:46 +04:00
/* FIXME Check for other flags that need removing */
lv - > status & = ~ ( MIRROR | MIRRORED | PVMOVE | LOCKED ) ;
2010-10-15 00:03:12 +04:00
2014-09-16 03:13:46 +04:00
/* FIXME Check for any attached LVs that will become orphans e.g. mirror logs */
2010-10-15 00:03:12 +04:00
2015-09-22 21:04:12 +03:00
if ( ! lv_add_virtual_segment ( lv , 0 , len , get_segtype_from_string ( lv - > vg - > cmd , SEG_TYPE_NAME_ERROR ) ) )
2008-01-17 16:13:54 +03:00
return_0 ;
return 1 ;
}
2016-12-01 00:56:37 +03:00
static int _lv_refresh_suspend_resume ( const struct logical_volume * lv )
2015-06-16 21:38:40 +03:00
{
2016-05-20 11:55:05 +03:00
struct cmd_context * cmd = lv - > vg - > cmd ;
2016-05-20 14:20:54 +03:00
int r = 1 ;
2016-05-20 11:55:05 +03:00
2016-03-02 22:59:03 +03:00
if ( ! cmd - > partial_activation & & lv_is_partial ( lv ) ) {
2015-06-16 21:38:40 +03:00
log_error ( " Refusing refresh of partial LV %s. "
" Use '--activationmode partial' to override. " ,
display_lvname ( lv ) ) ;
return 0 ;
}
if ( ! suspend_lv ( cmd , lv ) ) {
log_error ( " Failed to suspend %s. " , display_lvname ( lv ) ) ;
2016-05-20 14:20:54 +03:00
r = 0 ;
2015-06-16 21:38:40 +03:00
}
if ( ! resume_lv ( cmd , lv ) ) {
log_error ( " Failed to reactivate %s. " , display_lvname ( lv ) ) ;
2016-05-20 14:20:54 +03:00
r = 0 ;
2015-06-16 21:38:40 +03:00
}
2016-05-20 14:20:54 +03:00
return r ;
2015-06-16 21:38:40 +03:00
}
2016-12-01 00:56:37 +03:00
int lv_refresh_suspend_resume ( const struct logical_volume * lv )
{
2016-12-23 05:35:13 +03:00
if ( ! _lv_refresh_suspend_resume ( lv ) )
return 0 ;
2016-12-01 00:56:37 +03:00
/*
2016-12-23 05:35:13 +03:00
* Remove any transiently activated error
* devices which arean ' t used any more .
2016-12-01 00:56:37 +03:00
*/
2016-12-23 05:35:13 +03:00
if ( lv_is_raid ( lv ) & & ! lv_deactivate_any_missing_subdevs ( lv ) ) {
log_error ( " Failed to remove temporary SubLVs from %s " , display_lvname ( lv ) ) ;
return 0 ;
2016-12-01 00:56:37 +03:00
}
2016-12-23 05:35:13 +03:00
return 1 ;
2016-12-01 00:56:37 +03:00
}
2005-11-24 21:46:51 +03:00
/*
2005-11-24 23:58:44 +03:00
* Remove given number of extents from LV .
2005-11-24 21:46:51 +03:00
*/
2005-06-14 21:54:48 +04:00
int lv_reduce ( struct logical_volume * lv , uint32_t extents )
{
2017-02-10 00:41:28 +03:00
struct lv_segment * seg = first_seg ( lv ) ;
2017-03-08 00:05:23 +03:00
/* Ensure stripe boundary extents on RAID LVs */
2017-02-10 00:41:28 +03:00
if ( lv_is_raid ( lv ) & & extents ! = lv - > le_count )
extents = _round_to_stripe_boundary ( lv - > vg , extents ,
seg_is_raid1 ( seg ) ? 0 : _raid_stripes_count ( seg ) , 0 ) ;
2005-06-14 21:54:48 +04:00
return _lv_reduce ( lv , extents , 1 ) ;
}
2016-03-01 17:26:57 +03:00
int historical_glv_remove ( struct generic_logical_volume * glv )
{
struct generic_logical_volume * origin_glv ;
struct glv_list * glvl , * user_glvl ;
struct historical_logical_volume * hlv ;
int reconnected ;
if ( ! glv | | ! glv - > is_historical )
return_0 ;
hlv = glv - > historical ;
if ( ! ( glv = find_historical_glv ( hlv - > vg , hlv - > name , 0 , & glvl ) ) ) {
if ( ! ( find_historical_glv ( hlv - > vg , hlv - > name , 1 , NULL ) ) ) {
log_error ( INTERNAL_ERROR " historical_glv_remove: historical LV %s/-%s not found " ,
hlv - > vg - > name , hlv - > name ) ;
return 0 ;
}
2017-07-19 17:16:12 +03:00
log_verbose ( " Historical LV %s/-%s already on removed list " ,
hlv - > vg - > name , hlv - > name ) ;
return 1 ;
2016-03-01 17:26:57 +03:00
}
if ( ( origin_glv = hlv - > indirect_origin ) & &
! remove_glv_from_indirect_glvs ( origin_glv , glv ) )
return_0 ;
dm_list_iterate_items ( user_glvl , & hlv - > indirect_glvs ) {
reconnected = 0 ;
if ( ( origin_glv & & ! origin_glv - > is_historical ) & & ! user_glvl - > glv - > is_historical )
log_verbose ( " Removing historical connection between %s and %s. " ,
origin_glv - > live - > name , user_glvl - > glv - > live - > name ) ;
else if ( hlv - > vg - > cmd - > record_historical_lvs ) {
if ( ! add_glv_to_indirect_glvs ( hlv - > vg - > vgmem , origin_glv , user_glvl - > glv ) )
return_0 ;
reconnected = 1 ;
}
if ( ! reconnected ) {
/*
* Break ancestry chain if we ' re removing historical LV and tracking
* historical LVs is switched off either via :
* - " metadata/record_lvs_history=0 " config
* - " --nohistory " cmd line option
*
* Also , break the chain if we ' re unable to store such connection at all
* because we ' re removing the very last historical LV that was in between
* live LVs - pure live LVs can ' t store any indirect origin relation in
* metadata - we need at least one historical LV to do that !
*/
if ( user_glvl - > glv - > is_historical )
user_glvl - > glv - > historical - > indirect_origin = NULL ;
else
first_seg ( user_glvl - > glv - > live ) - > indirect_origin = NULL ;
}
}
dm_list_move ( & hlv - > vg - > removed_historical_lvs , & glvl - > list ) ;
return 1 ;
}
2005-06-01 20:51:55 +04:00
/*
* Completely remove an LV .
*/
int lv_remove ( struct logical_volume * lv )
{
2016-03-01 17:26:57 +03:00
if ( lv_is_historical ( lv ) )
return historical_glv_remove ( lv - > this_glv ) ;
2005-06-01 20:51:55 +04:00
2008-01-30 16:19:47 +03:00
if ( ! lv_reduce ( lv , lv - > le_count ) )
return_0 ;
2005-06-01 20:51:55 +04:00
return 1 ;
}
/*
* A set of contiguous physical extents allocated
*/
struct alloced_area {
2008-11-04 01:14:30 +03:00
struct dm_list list ;
2005-06-01 20:51:55 +04:00
struct physical_volume * pv ;
uint32_t pe ;
uint32_t len ;
} ;
/*
* Details of an allocation attempt
*/
struct alloc_handle {
2006-10-08 03:40:36 +04:00
struct cmd_context * cmd ;
2005-10-17 03:03:59 +04:00
struct dm_pool * mem ;
2005-06-01 20:51:55 +04:00
alloc_policy_t alloc ; /* Overall policy */
2015-09-23 16:37:52 +03:00
int approx_alloc ; /* get as much as possible up to new_extents */
2010-03-01 23:00:20 +03:00
uint32_t new_extents ; /* Number of new extents required */
2005-06-01 20:51:55 +04:00
uint32_t area_count ; /* Number of parallel areas */
2015-09-23 16:37:52 +03:00
uint32_t parity_count ; /* Adds to area_count, but not area_multiple */
2005-06-01 20:51:55 +04:00
uint32_t area_multiple ; /* seg->len = area_len * area_multiple */
2010-03-01 23:00:20 +03:00
uint32_t log_area_count ; /* Number of parallel logs */
2011-08-03 02:07:20 +04:00
uint32_t metadata_area_count ; /* Number of parallel metadata areas */
uint32_t log_len ; /* Length of log/metadata_area */
2010-03-01 23:00:20 +03:00
uint32_t region_size ; /* Mirror region size */
2005-06-03 18:49:51 +04:00
uint32_t total_area_len ; /* Total number of parallel extents */
2005-06-01 20:51:55 +04:00
2011-02-27 03:38:31 +03:00
unsigned maximise_cling ;
2011-08-03 02:07:20 +04:00
unsigned mirror_logs_separate ; /* Force mirror logs on separate PVs? */
/*
* RAID devices require a metadata area that accompanies each
* device . During initial creation , it is best to look for space
* that is new_extents + log_len and then split that between two
* allocated areas when found . ' alloc_and_split_meta ' indicates
* that this is the desired dynamic .
2014-01-28 22:25:02 +04:00
*
* This same idea is used by cache LVs to get the metadata device
* and data device allocated together .
2011-08-03 02:07:20 +04:00
*/
unsigned alloc_and_split_meta ;
2014-02-22 04:26:01 +04:00
unsigned split_metadata_is_allocated ; /* Metadata has been allocated */
2011-02-27 03:38:31 +03:00
2011-08-30 18:55:15 +04:00
const struct dm_config_node * cling_tag_list_cn ;
2010-11-09 15:34:40 +03:00
2008-11-04 01:14:30 +03:00
struct dm_list * parallel_areas ; /* PVs to avoid */
2005-11-24 23:58:44 +03:00
2010-03-01 23:00:20 +03:00
/*
* Contains area_count lists of areas allocated to data stripes
* followed by log_area_count lists of areas allocated to log stripes .
*/
struct dm_list alloced_areas [ 0 ] ;
2005-06-01 20:51:55 +04:00
} ;
2010-03-01 23:00:20 +03:00
/*
* Returns log device size in extents , algorithm from kernel code
*/
# define BYTE_SHIFT 3
2015-11-06 02:40:47 +03:00
static uint32_t _mirror_log_extents ( uint32_t region_size , uint32_t pe_size , uint32_t area_len )
2010-03-01 23:00:20 +03:00
{
size_t area_size , bitset_size , log_size , region_count ;
2012-08-16 22:07:30 +04:00
area_size = ( size_t ) area_len * pe_size ;
2010-03-01 23:00:20 +03:00
region_count = dm_div_up ( area_size , region_size ) ;
/* Work out how many "unsigned long"s we need to hold the bitset. */
bitset_size = dm_round_up ( region_count , sizeof ( uint32_t ) < < BYTE_SHIFT ) ;
bitset_size > > = BYTE_SHIFT ;
/* Log device holds both header and bitset. */
log_size = dm_round_up ( ( MIRROR_LOG_OFFSET < < SECTOR_SHIFT ) + bitset_size , 1 < < SECTOR_SHIFT ) ;
log_size > > = SECTOR_SHIFT ;
2011-09-13 22:42:57 +04:00
log_size = dm_div_up ( log_size , pe_size ) ;
2010-03-01 23:00:20 +03:00
2011-09-13 22:42:57 +04:00
/*
* Kernel requires a mirror to be at least 1 region large . So ,
* if our mirror log is itself a mirror , it must be at least
* 1 region large . This restriction may not be necessary for
* non - mirrored logs , but we apply the rule anyway .
*
* ( The other option is to make the region size of the log
* mirror smaller than the mirror it is acting as a log for ,
* but that really complicates things . It ' s much easier to
* keep the region_size the same for both . )
*/
return ( log_size > ( region_size / pe_size ) ) ? log_size :
( region_size / pe_size ) ;
2010-03-01 23:00:20 +03:00
}
2011-02-27 03:38:31 +03:00
/* Is there enough total space or should we give up immediately? */
2011-08-03 02:07:20 +04:00
static int _sufficient_pes_free ( struct alloc_handle * ah , struct dm_list * pvms ,
uint32_t allocated , uint32_t extents_still_needed )
2011-02-27 03:38:31 +03:00
{
2011-08-03 02:07:20 +04:00
uint32_t area_extents_needed = ( extents_still_needed - allocated ) * ah - > area_count / ah - > area_multiple ;
uint32_t parity_extents_needed = ( extents_still_needed - allocated ) * ah - > parity_count / ah - > area_multiple ;
2015-09-24 15:43:58 +03:00
uint32_t metadata_extents_needed = ah - > alloc_and_split_meta ? 0 : ah - > metadata_area_count * RAID_METADATA_AREA_LEN ; /* One each */
2011-08-03 02:07:20 +04:00
uint32_t total_extents_needed = area_extents_needed + parity_extents_needed + metadata_extents_needed ;
2011-02-27 03:38:31 +03:00
uint32_t free_pes = pv_maps_size ( pvms ) ;
if ( total_extents_needed > free_pes ) {
2014-02-22 04:26:01 +04:00
log_error ( " Insufficient free space: % " PRIu32 " extents needed, "
" but only % " PRIu32 " available " ,
total_extents_needed , free_pes ) ;
return 0 ;
2011-02-27 03:38:31 +03:00
}
return 1 ;
}
/* For striped mirrors, all the areas are counted, through the mirror layer */
static uint32_t _stripes_per_mimage ( struct lv_segment * seg )
{
struct lv_segment * last_lvseg ;
if ( seg_is_mirrored ( seg ) & & seg - > area_count & & seg_type ( seg , 0 ) = = AREA_LV ) {
last_lvseg = dm_list_item ( dm_list_last ( & seg_lv ( seg , 0 ) - > segments ) , struct lv_segment ) ;
if ( seg_is_striped ( last_lvseg ) )
return last_lvseg - > area_count ;
}
return 1 ;
}
2013-09-25 06:32:53 +04:00
static void _init_alloc_parms ( struct alloc_handle * ah ,
struct alloc_parms * alloc_parms ,
alloc_policy_t alloc ,
2011-02-27 03:38:31 +03:00
struct lv_segment * prev_lvseg , unsigned can_split ,
uint32_t allocated , uint32_t extents_still_needed )
{
alloc_parms - > alloc = alloc ;
alloc_parms - > prev_lvseg = prev_lvseg ;
alloc_parms - > flags = 0 ;
alloc_parms - > extents_still_needed = extents_still_needed ;
2013-09-25 06:32:53 +04:00
/*
* Only attempt contiguous / cling allocation to previous segment
* areas if the number of areas matches .
*/
2013-09-25 06:32:10 +04:00
if ( alloc_parms - > prev_lvseg & &
2014-04-15 05:34:35 +04:00
( ( ah - > area_count + ah - > parity_count ) = = prev_lvseg - > area_count ) ) {
2013-07-29 22:35:45 +04:00
alloc_parms - > flags | = A_AREA_COUNT_MATCHES ;
2014-04-15 05:34:35 +04:00
/* Are there any preceding segments we must follow on from? */
if ( alloc_parms - > alloc = = ALLOC_CONTIGUOUS ) {
2012-05-11 22:59:01 +04:00
alloc_parms - > flags | = A_CONTIGUOUS_TO_LVSEG ;
2014-04-15 05:34:35 +04:00
alloc_parms - > flags | = A_POSITIONAL_FILL ;
} else if ( ( alloc_parms - > alloc = = ALLOC_CLING ) | |
( alloc_parms - > alloc = = ALLOC_CLING_BY_TAGS ) ) {
2012-05-11 22:59:01 +04:00
alloc_parms - > flags | = A_CLING_TO_LVSEG ;
2014-04-15 05:34:35 +04:00
alloc_parms - > flags | = A_POSITIONAL_FILL ;
}
2012-05-12 02:53:13 +04:00
} else
/*
2013-09-25 06:32:53 +04:00
* A cling allocation that follows a successful contiguous
* allocation must use the same PVs ( or else fail ) .
2012-05-12 02:53:13 +04:00
*/
2013-09-25 06:32:53 +04:00
if ( ( alloc_parms - > alloc = = ALLOC_CLING ) | |
2014-04-15 04:13:47 +04:00
( alloc_parms - > alloc = = ALLOC_CLING_BY_TAGS ) ) {
2012-05-12 02:53:13 +04:00
alloc_parms - > flags | = A_CLING_TO_ALLOCED ;
2014-04-15 04:13:47 +04:00
alloc_parms - > flags | = A_POSITIONAL_FILL ;
}
2012-05-12 02:53:13 +04:00
if ( alloc_parms - > alloc = = ALLOC_CLING_BY_TAGS )
alloc_parms - > flags | = A_CLING_BY_TAGS ;
2011-02-27 03:38:31 +03:00
2015-04-10 23:57:52 +03:00
if ( ! ( alloc_parms - > alloc & A_POSITIONAL_FILL ) & &
( alloc_parms - > alloc = = ALLOC_CONTIGUOUS ) & &
ah - > cling_tag_list_cn )
alloc_parms - > flags | = A_PARTITION_BY_TAGS ;
2011-02-27 03:38:31 +03:00
/*
2016-01-11 16:11:37 +03:00
* For normal allocations , if any extents have already been found
2011-02-27 03:38:31 +03:00
* for allocation , prefer to place further extents on the same disks as
* have already been used .
*/
2013-09-25 06:32:53 +04:00
if ( ah - > maximise_cling & &
( alloc_parms - > alloc = = ALLOC_NORMAL ) & &
( allocated ! = alloc_parms - > extents_still_needed ) )
2011-02-27 03:38:31 +03:00
alloc_parms - > flags | = A_CLING_TO_ALLOCED ;
if ( can_split )
alloc_parms - > flags | = A_CAN_SPLIT ;
}
2016-03-16 01:13:28 +03:00
/* Handles also stacking */
static int _setup_lv_size ( struct logical_volume * lv , uint32_t extents )
{
struct lv_segment * thin_pool_seg ;
lv - > le_count = extents ;
lv - > size = ( uint64_t ) extents * lv - > vg - > extent_size ;
if ( lv_is_thin_pool_data ( lv ) ) {
if ( ! ( thin_pool_seg = get_only_segment_using_this_lv ( lv ) ) )
return_0 ;
/* Update thin pool segment from the layered LV */
thin_pool_seg - > lv - > le_count =
thin_pool_seg - > len =
thin_pool_seg - > area_len = lv - > le_count ;
thin_pool_seg - > lv - > size = lv - > size ;
}
return 1 ;
}
2009-11-25 01:55:55 +03:00
static int _setup_alloced_segment ( struct logical_volume * lv , uint64_t status ,
2005-06-01 20:51:55 +04:00
uint32_t area_count ,
uint32_t stripe_size ,
2006-05-10 01:23:51 +04:00
const struct segment_type * segtype ,
2005-06-01 20:51:55 +04:00
struct alloced_area * aa ,
2010-03-01 23:00:20 +03:00
uint32_t region_size )
2005-06-01 20:51:55 +04:00
{
2007-12-20 18:42:55 +03:00
uint32_t s , extents , area_multiple ;
2016-03-16 01:13:28 +03:00
struct lv_segment * seg ;
2004-03-26 23:35:14 +03:00
2010-04-09 05:00:10 +04:00
area_multiple = _calc_area_multiple ( segtype , area_count , 0 ) ;
2015-09-24 15:43:58 +03:00
extents = aa [ 0 ] . len * area_multiple ;
2001-11-29 21:45:35 +03:00
2017-02-24 02:50:00 +03:00
if ( ! ( seg = alloc_lv_segment ( segtype , lv , lv - > le_count , extents , 0 ,
2014-10-26 10:13:59 +03:00
status , stripe_size , NULL ,
2007-12-20 18:42:55 +03:00
area_count ,
2017-02-24 02:50:00 +03:00
aa [ 0 ] . len , 0 , 0u , region_size , 0u , NULL ) ) ) {
2005-05-17 17:49:45 +04:00
log_error ( " Couldn't allocate new LV segment. " ) ;
2001-11-29 21:45:35 +03:00
return 0 ;
}
2008-01-30 16:19:47 +03:00
for ( s = 0 ; s < area_count ; s + + )
if ( ! set_lv_segment_area_pv ( seg , s , aa [ s ] . pv , aa [ s ] . pe ) )
return_0 ;
2001-11-29 21:45:35 +03:00
2008-11-04 01:14:30 +03:00
dm_list_add ( & lv - > segments , & seg - > list ) ;
2004-05-05 01:25:57 +04:00
2005-06-01 20:51:55 +04:00
extents = aa [ 0 ] . len * area_multiple ;
2016-02-23 23:36:51 +03:00
2016-03-16 01:13:28 +03:00
if ( ! _setup_lv_size ( lv , lv - > le_count + extents ) )
return_0 ;
2016-01-19 01:22:48 +03:00
2001-11-29 21:45:35 +03:00
return 1 ;
}
2005-06-01 20:51:55 +04:00
static int _setup_alloced_segments ( struct logical_volume * lv ,
2008-11-04 01:14:30 +03:00
struct dm_list * alloced_areas ,
2005-06-01 20:51:55 +04:00
uint32_t area_count ,
2009-11-25 01:55:55 +03:00
uint64_t status ,
2005-06-01 20:51:55 +04:00
uint32_t stripe_size ,
2006-05-10 01:23:51 +04:00
const struct segment_type * segtype ,
2010-03-01 23:00:20 +03:00
uint32_t region_size )
2005-06-01 20:51:55 +04:00
{
struct alloced_area * aa ;
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( aa , & alloced_areas [ 0 ] ) {
2005-06-01 20:51:55 +04:00
if ( ! _setup_alloced_segment ( lv , status , area_count ,
stripe_size , segtype , aa ,
2010-03-01 23:00:20 +03:00
region_size ) )
2008-01-30 16:19:47 +03:00
return_0 ;
2005-06-01 20:51:55 +04:00
}
return 1 ;
}
/*
* This function takes a list of pv_areas and adds them to allocated_areas .
* If the complete area is not needed then it gets split .
* The part used is removed from the pv_map so it can ' t be allocated twice .
*/
2011-02-27 03:38:31 +03:00
static int _alloc_parallel_area ( struct alloc_handle * ah , uint32_t max_to_allocate ,
struct alloc_state * alloc_state , uint32_t ix_log_offset )
2005-06-01 20:51:55 +04:00
{
2011-02-27 03:38:31 +03:00
uint32_t area_len , len ;
2014-04-10 23:48:59 +04:00
uint32_t s , smeta ;
2010-03-01 23:00:20 +03:00
uint32_t ix_log_skip = 0 ; /* How many areas to skip in middle of array to reach log areas */
2011-08-03 02:07:20 +04:00
uint32_t total_area_count ;
2005-06-01 20:51:55 +04:00
struct alloced_area * aa ;
2011-08-03 02:07:20 +04:00
struct pv_area * pva ;
2005-06-01 20:51:55 +04:00
2014-05-14 19:25:43 +04:00
total_area_count = ah - > area_count + ah - > parity_count + alloc_state - > log_area_count_still_needed ;
2010-03-25 21:16:54 +03:00
if ( ! total_area_count ) {
2012-10-16 12:14:41 +04:00
log_warn ( INTERNAL_ERROR " _alloc_parallel_area called without any allocation to do. " ) ;
2010-03-25 21:16:54 +03:00
return 1 ;
}
2011-02-27 03:38:31 +03:00
area_len = max_to_allocate / ah - > area_multiple ;
2005-06-01 20:51:55 +04:00
2006-12-12 22:30:10 +03:00
/* Reduce area_len to the smallest of the areas */
2011-08-03 02:07:20 +04:00
for ( s = 0 ; s < ah - > area_count + ah - > parity_count ; s + + )
2011-02-27 03:38:31 +03:00
if ( area_len > alloc_state - > areas [ s ] . used )
area_len = alloc_state - > areas [ s ] . used ;
2005-06-01 20:51:55 +04:00
2014-02-22 04:26:01 +04:00
len = ( ah - > alloc_and_split_meta & & ! ah - > split_metadata_is_allocated ) ? total_area_count * 2 : total_area_count ;
2011-08-03 02:07:20 +04:00
len * = sizeof ( * aa ) ;
if ( ! ( aa = dm_pool_alloc ( ah - > mem , len ) ) ) {
2005-06-01 20:51:55 +04:00
log_error ( " alloced_area allocation failed " ) ;
return 0 ;
}
2010-03-01 23:00:20 +03:00
/*
* Areas consists of area_count areas for data stripes , then
* ix_log_skip areas to skip , then log_area_count areas to use for the
* log , then some areas too small for the log .
*/
len = area_len ;
for ( s = 0 ; s < total_area_count ; s + + ) {
2011-08-03 02:07:20 +04:00
if ( s = = ( ah - > area_count + ah - > parity_count ) ) {
2010-03-01 23:00:20 +03:00
ix_log_skip = ix_log_offset - ah - > area_count ;
len = ah - > log_len ;
}
2005-06-01 20:51:55 +04:00
2011-08-03 02:07:20 +04:00
pva = alloc_state - > areas [ s + ix_log_skip ] . pva ;
2014-02-22 04:26:01 +04:00
if ( ah - > alloc_and_split_meta & & ! ah - > split_metadata_is_allocated ) {
2011-08-03 02:07:20 +04:00
/*
* The metadata area goes at the front of the allocated
* space for now , but could easily go at the end ( or
* middle ! ) .
*
* Even though we split these two from the same
* allocation , we store the images at the beginning
* of the areas array and the metadata at the end .
*/
2014-04-10 23:48:59 +04:00
smeta = s + ah - > area_count + ah - > parity_count ;
aa [ smeta ] . pv = pva - > map - > pv ;
aa [ smeta ] . pe = pva - > start ;
aa [ smeta ] . len = ah - > log_len ;
2011-08-03 02:07:20 +04:00
2013-01-08 02:30:29 +04:00
log_debug_alloc ( " Allocating parallel metadata area % " PRIu32
" on %s start PE % " PRIu32
" length % " PRIu32 " . " ,
2014-04-10 23:48:59 +04:00
( smeta - ( ah - > area_count + ah - > parity_count ) ) ,
pv_dev_name ( aa [ smeta ] . pv ) , aa [ smeta ] . pe ,
2013-01-08 02:30:29 +04:00
ah - > log_len ) ;
2011-08-03 02:07:20 +04:00
consume_pv_area ( pva , ah - > log_len ) ;
2014-04-10 23:48:59 +04:00
dm_list_add ( & ah - > alloced_areas [ smeta ] , & aa [ smeta ] . list ) ;
2011-08-03 02:07:20 +04:00
}
2014-02-22 04:26:01 +04:00
aa [ s ] . len = ( ah - > alloc_and_split_meta & & ! ah - > split_metadata_is_allocated ) ? len - ah - > log_len : len ;
2012-06-29 02:26:42 +04:00
/* Skip empty allocations */
if ( ! aa [ s ] . len )
continue ;
2011-08-03 02:07:20 +04:00
aa [ s ] . pv = pva - > map - > pv ;
aa [ s ] . pe = pva - > start ;
2005-06-03 18:49:51 +04:00
2013-01-08 02:30:29 +04:00
log_debug_alloc ( " Allocating parallel area % " PRIu32
" on %s start PE % " PRIu32 " length % " PRIu32 " . " ,
s , pv_dev_name ( aa [ s ] . pv ) , aa [ s ] . pe , aa [ s ] . len ) ;
2010-03-25 05:31:48 +03:00
2011-08-03 02:07:20 +04:00
consume_pv_area ( pva , aa [ s ] . len ) ;
2005-06-01 20:51:55 +04:00
2010-03-01 23:00:20 +03:00
dm_list_add ( & ah - > alloced_areas [ s ] , & aa [ s ] . list ) ;
2005-06-01 20:51:55 +04:00
}
2011-08-03 02:07:20 +04:00
/* Only need to alloc metadata from the first batch */
2014-02-22 04:26:01 +04:00
if ( ah - > alloc_and_split_meta )
ah - > split_metadata_is_allocated = 1 ;
2011-08-03 02:07:20 +04:00
2010-03-01 23:00:20 +03:00
ah - > total_area_len + = area_len ;
2011-02-27 03:38:31 +03:00
alloc_state - > allocated + = area_len * ah - > area_multiple ;
2010-04-09 05:00:10 +04:00
return 1 ;
}
2006-10-07 15:23:22 +04:00
/*
* Call fn for each AREA_PV used by the LV segment at lv : le of length * max_seg_len .
* If any constituent area contains more than one segment , max_seg_len is
* reduced to cover only the first .
2006-10-07 16:41:06 +04:00
* fn should return 0 on error , 1 to continue scanning or > 1 to terminate without error .
* In the last case , this function passes on the return code .
2014-08-22 04:26:14 +04:00
* FIXME I think some callers are expecting this to check all PV segments used by an LV .
2006-10-07 15:23:22 +04:00
*/
static int _for_each_pv ( struct cmd_context * cmd , struct logical_volume * lv ,
2010-04-08 04:28:57 +04:00
uint32_t le , uint32_t len , struct lv_segment * seg ,
uint32_t * max_seg_len ,
2006-10-08 03:40:36 +04:00
uint32_t first_area , uint32_t max_areas ,
int top_level_area_index ,
int only_single_area_segments ,
int ( * fn ) ( struct cmd_context * cmd ,
struct pv_segment * peg , uint32_t s ,
void * data ) ,
2006-10-07 15:23:22 +04:00
void * data )
{
uint32_t s ;
uint32_t remaining_seg_len , area_len , area_multiple ;
2010-04-09 05:00:10 +04:00
uint32_t stripes_per_mimage = 1 ;
2006-10-07 20:00:28 +04:00
int r = 1 ;
2006-10-07 15:23:22 +04:00
2010-04-08 04:28:57 +04:00
if ( ! seg & & ! ( seg = find_seg_by_le ( lv , le ) ) ) {
2006-10-07 15:23:22 +04:00
log_error ( " Failed to find segment for %s extent % " PRIu32 ,
lv - > name , le ) ;
return 0 ;
}
/* Remaining logical length of segment */
remaining_seg_len = seg - > len - ( le - seg - > le ) ;
if ( remaining_seg_len > len )
remaining_seg_len = len ;
if ( max_seg_len & & * max_seg_len > remaining_seg_len )
* max_seg_len = remaining_seg_len ;
2010-04-09 05:00:10 +04:00
area_multiple = _calc_area_multiple ( seg - > segtype , seg - > area_count , 0 ) ;
2015-10-01 15:59:21 +03:00
area_len = ( remaining_seg_len / area_multiple ) ? : 1 ;
2006-10-07 15:23:22 +04:00
2010-04-09 05:00:10 +04:00
/* For striped mirrors, all the areas are counted, through the mirror layer */
if ( top_level_area_index = = - 1 )
stripes_per_mimage = _stripes_per_mimage ( seg ) ;
2006-10-08 03:40:36 +04:00
for ( s = first_area ;
s < seg - > area_count & & ( ! max_areas | | s < = max_areas ) ;
s + + ) {
2006-10-07 15:23:22 +04:00
if ( seg_type ( seg , s ) = = AREA_LV ) {
2006-10-07 16:41:06 +04:00
if ( ! ( r = _for_each_pv ( cmd , seg_lv ( seg , s ) ,
seg_le ( seg , s ) +
( le - seg - > le ) / area_multiple ,
2010-04-09 05:00:10 +04:00
area_len , NULL , max_seg_len , 0 ,
( stripes_per_mimage = = 1 ) & & only_single_area_segments ? 1U : 0U ,
2011-04-08 18:40:18 +04:00
( top_level_area_index ! = - 1 ) ? top_level_area_index : ( int ) ( s * stripes_per_mimage ) ,
2006-10-08 03:40:36 +04:00
only_single_area_segments , fn ,
2006-10-07 16:41:06 +04:00
data ) ) )
stack ;
} else if ( seg_type ( seg , s ) = = AREA_PV )
2010-04-09 05:00:10 +04:00
if ( ! ( r = fn ( cmd , seg_pvseg ( seg , s ) , top_level_area_index ! = - 1 ? ( uint32_t ) top_level_area_index + s : s , data ) ) )
2006-10-07 16:41:06 +04:00
stack ;
if ( r ! = 1 )
return r ;
}
2006-10-08 03:40:36 +04:00
/* FIXME only_single_area_segments used as workaround to skip log LV - needs new param? */
if ( ! only_single_area_segments & & seg_is_mirrored ( seg ) & & seg - > log_lv ) {
2010-04-08 04:28:57 +04:00
if ( ! ( r = _for_each_pv ( cmd , seg - > log_lv , 0 , seg - > log_lv - > le_count , NULL ,
2006-10-08 03:40:36 +04:00
NULL , 0 , 0 , 0 , only_single_area_segments ,
fn , data ) ) )
2006-10-07 16:41:06 +04:00
stack ;
if ( r ! = 1 )
return r ;
}
2006-10-07 15:23:22 +04:00
2014-08-22 04:26:14 +04:00
/* FIXME Add snapshot cow, thin meta etc. */
/*
if ( ! only_single_area_segments & & ! max_areas & & seg_is_raid ( seg ) ) {
for ( s = first_area ; s < seg - > area_count ; s + + ) {
if ( seg_metalv ( seg , s ) )
if ( ! ( r = _for_each_pv ( cmd , seg_metalv ( seg , s ) , 0 , seg_metalv ( seg , s ) - > le_count , NULL ,
NULL , 0 , 0 , 0 , 0 , fn , data ) ) )
stack ;
if ( r ! = 1 )
return r ;
}
}
*/
2006-10-07 15:23:22 +04:00
return 1 ;
}
2001-11-29 21:45:35 +03:00
static int _comp_area ( const void * l , const void * r )
{
2010-04-01 00:26:04 +04:00
const struct pv_area_used * lhs = ( const struct pv_area_used * ) l ;
const struct pv_area_used * rhs = ( const struct pv_area_used * ) r ;
2001-11-29 21:45:35 +03:00
2010-03-25 05:31:48 +03:00
if ( lhs - > used < rhs - > used )
2001-11-30 12:19:46 +03:00
return 1 ;
2001-11-29 21:45:35 +03:00
2017-07-19 17:16:12 +03:00
if ( lhs - > used > rhs - > used )
2001-11-30 12:19:46 +03:00
return - 1 ;
2001-11-29 21:45:35 +03:00
return 0 ;
}
2006-10-08 03:40:36 +04:00
/*
* Search for pvseg that matches condition
*/
struct pv_match {
2010-11-09 15:34:40 +03:00
int ( * condition ) ( struct pv_match * pvmatch , struct pv_segment * pvseg , struct pv_area * pva ) ;
2006-10-08 03:40:36 +04:00
2015-03-26 23:32:59 +03:00
struct alloc_handle * ah ;
2014-04-10 23:48:59 +04:00
struct alloc_state * alloc_state ;
2006-10-08 03:40:36 +04:00
struct pv_area * pva ;
2011-08-30 18:55:15 +04:00
const struct dm_config_node * cling_tag_list_cn ;
2006-10-08 03:40:36 +04:00
int s ; /* Area index of match */
} ;
2006-10-08 16:01:13 +04:00
/*
* Is PV area on the same PV ?
*/
2010-11-09 15:34:40 +03:00
static int _is_same_pv ( struct pv_match * pvmatch __attribute ( ( unused ) ) , struct pv_segment * pvseg , struct pv_area * pva )
2006-10-08 16:01:13 +04:00
{
if ( pvseg - > pv ! = pva - > map - > pv )
return 0 ;
return 1 ;
}
2010-11-09 15:34:40 +03:00
/*
2016-01-11 16:11:37 +03:00
* Does PV area have a tag listed in allocation / cling_tag_list that
2015-04-10 23:57:52 +03:00
* matches EITHER a tag of the PV of the existing segment OR a tag in pv_tags ?
2016-08-24 17:49:34 +03:00
* If mem is set , then instead we append a list of matching tags for printing to the object there .
2010-11-09 15:34:40 +03:00
*/
2015-03-26 22:43:51 +03:00
static int _match_pv_tags ( const struct dm_config_node * cling_tag_list_cn ,
2015-04-10 23:57:52 +03:00
struct physical_volume * pv1 , uint32_t pv1_start_pe , uint32_t area_num ,
struct physical_volume * pv2 , struct dm_list * pv_tags , unsigned validate_only ,
2016-08-24 21:37:50 +03:00
struct dm_pool * mem , unsigned parallel_pv )
2010-11-09 15:34:40 +03:00
{
2011-08-30 18:55:15 +04:00
const struct dm_config_value * cv ;
2010-12-20 16:12:55 +03:00
const char * str ;
2010-11-11 15:32:33 +03:00
const char * tag_matched ;
2016-08-24 17:49:34 +03:00
struct dm_list * tags_to_match = mem ? NULL : pv_tags ? : & pv2 - > tags ;
2015-03-27 00:13:26 +03:00
struct dm_str_list * sl ;
unsigned first_tag = 1 ;
2012-05-11 19:26:30 +04:00
for ( cv = cling_tag_list_cn - > v ; cv ; cv = cv - > next ) {
2011-08-30 18:55:15 +04:00
if ( cv - > type ! = DM_CFG_STRING ) {
2015-03-26 22:43:51 +03:00
if ( validate_only )
log_warn ( " WARNING: Ignoring invalid string in config file entry "
" allocation/cling_tag_list " ) ;
2010-11-09 15:34:40 +03:00
continue ;
}
str = cv - > v . str ;
if ( ! * str ) {
2015-03-26 22:43:51 +03:00
if ( validate_only )
log_warn ( " WARNING: Ignoring empty string in config file entry "
" allocation/cling_tag_list " ) ;
2010-11-09 15:34:40 +03:00
continue ;
}
if ( * str ! = ' @ ' ) {
2015-03-26 22:43:51 +03:00
if ( validate_only )
log_warn ( " WARNING: Ignoring string not starting with @ in config file entry "
" allocation/cling_tag_list: %s " , str ) ;
2010-11-09 15:34:40 +03:00
continue ;
}
str + + ;
if ( ! * str ) {
2015-03-26 22:43:51 +03:00
if ( validate_only )
log_warn ( " WARNING: Ignoring empty tag in config file entry "
" allocation/cling_tag_list " ) ;
2010-11-09 15:34:40 +03:00
continue ;
}
2015-03-26 22:43:51 +03:00
if ( validate_only )
continue ;
2010-11-09 15:34:40 +03:00
/* Wildcard matches any tag against any tag. */
if ( ! strcmp ( str , " * " ) ) {
2016-08-24 17:49:34 +03:00
if ( mem ) {
2015-03-27 00:13:26 +03:00
dm_list_iterate_items ( sl , & pv1 - > tags ) {
if ( ! first_tag & & ! dm_pool_grow_object ( mem , " , " , 0 ) ) {
log_error ( " PV tags string extension failed. " ) ;
return 0 ;
}
first_tag = 0 ;
if ( ! dm_pool_grow_object ( mem , sl - > str , 0 ) ) {
log_error ( " PV tags string extension failed. " ) ;
return 0 ;
}
}
continue ;
}
2017-07-20 00:12:48 +03:00
2015-04-10 23:57:52 +03:00
if ( ! str_list_match_list ( & pv1 - > tags , tags_to_match , & tag_matched ) )
2010-11-09 15:34:40 +03:00
continue ;
2016-08-24 21:37:50 +03:00
if ( ! pv_tags ) {
if ( parallel_pv )
log_debug_alloc ( " Not using free space on %s: Matched allocation PV tag %s on existing parallel PV %s. " ,
2017-07-20 00:12:48 +03:00
pv_dev_name ( pv1 ) , tag_matched , pv2 ? pv_dev_name ( pv2 ) : " - " ) ;
2016-08-24 21:37:50 +03:00
else
log_debug_alloc ( " Matched allocation PV tag %s on existing %s with free space on %s. " ,
2017-07-20 00:12:48 +03:00
tag_matched , pv_dev_name ( pv1 ) , pv2 ? pv_dev_name ( pv2 ) : " - " ) ;
2016-08-24 21:37:50 +03:00
} else
2015-04-10 23:57:52 +03:00
log_debug_alloc ( " Eliminating allocation area % " PRIu32 " at PV %s start PE % " PRIu32
" from consideration: PV tag %s already used. " ,
2017-07-20 00:12:48 +03:00
area_num , pv_dev_name ( pv1 ) , pv1_start_pe , tag_matched ) ;
2010-11-09 15:34:40 +03:00
return 1 ;
}
2017-07-20 00:12:48 +03:00
if ( ! str_list_match_item ( & pv1 - > tags , str ) | |
( tags_to_match & & ! str_list_match_item ( tags_to_match , str ) ) )
continue ;
if ( mem ) {
if ( ! first_tag & & ! dm_pool_grow_object ( mem , " , " , 0 ) ) {
log_error ( " PV tags string extension failed. " ) ;
return 0 ;
}
first_tag = 0 ;
if ( ! dm_pool_grow_object ( mem , str , 0 ) ) {
log_error ( " PV tags string extension failed. " ) ;
return 0 ;
}
continue ;
}
if ( ! pv_tags ) {
if ( parallel_pv )
log_debug_alloc ( " Not using free space on %s: Matched allocation PV tag %s on existing parallel PV %s. " ,
pv2 ? pv_dev_name ( pv2 ) : " - " , str , pv_dev_name ( pv1 ) ) ;
else
log_debug_alloc ( " Matched allocation PV tag %s on existing %s with free space on %s. " ,
str , pv_dev_name ( pv1 ) , pv2 ? pv_dev_name ( pv2 ) : " - " ) ;
} else
log_debug_alloc ( " Eliminating allocation area % " PRIu32 " at PV %s start PE % " PRIu32
" from consideration: PV tag %s already used. " ,
area_num , pv_dev_name ( pv1 ) , pv1_start_pe , str ) ;
return 1 ;
2010-11-09 15:34:40 +03:00
}
2016-08-24 17:49:34 +03:00
if ( mem )
2015-03-27 00:13:26 +03:00
return 1 ;
2010-11-09 15:34:40 +03:00
return 0 ;
}
2015-03-26 22:43:51 +03:00
static int _validate_tag_list ( const struct dm_config_node * cling_tag_list_cn )
{
2016-08-24 21:37:50 +03:00
return _match_pv_tags ( cling_tag_list_cn , NULL , 0 , 0 , NULL , NULL , 1 , NULL , 0 ) ;
2015-03-27 00:13:26 +03:00
}
2016-08-24 17:49:34 +03:00
static int _tags_list_str ( struct dm_pool * mem , struct physical_volume * pv1 , const struct dm_config_node * cling_tag_list_cn )
2015-03-27 00:13:26 +03:00
{
2016-08-24 21:37:50 +03:00
if ( ! _match_pv_tags ( cling_tag_list_cn , pv1 , 0 , 0 , NULL , NULL , 0 , mem , 0 ) ) {
2016-08-24 17:49:34 +03:00
dm_pool_abandon_object ( mem ) ;
return_0 ;
}
2015-03-27 00:13:26 +03:00
2016-08-24 17:49:34 +03:00
return 1 ;
2015-03-26 22:43:51 +03:00
}
2015-04-11 03:55:24 +03:00
/*
* Does PV area have a tag listed in allocation / cling_tag_list that
* matches a tag in the pv_tags list ?
*/
static int _pv_has_matching_tag ( const struct dm_config_node * cling_tag_list_cn ,
struct physical_volume * pv1 , uint32_t pv1_start_pe , uint32_t area_num ,
struct dm_list * pv_tags )
{
2016-08-24 21:37:50 +03:00
return _match_pv_tags ( cling_tag_list_cn , pv1 , pv1_start_pe , area_num , NULL , pv_tags , 0 , NULL , 0 ) ;
2015-04-11 03:55:24 +03:00
}
2015-03-26 22:43:51 +03:00
/*
2016-01-11 16:11:37 +03:00
* Does PV area have a tag listed in allocation / cling_tag_list that
2015-03-26 22:43:51 +03:00
* matches a tag of the PV of the existing segment ?
*/
static int _pvs_have_matching_tag ( const struct dm_config_node * cling_tag_list_cn ,
2016-08-24 21:37:50 +03:00
struct physical_volume * pv1 , struct physical_volume * pv2 ,
unsigned parallel_pv )
2015-03-26 22:43:51 +03:00
{
2016-08-24 21:37:50 +03:00
return _match_pv_tags ( cling_tag_list_cn , pv1 , 0 , 0 , pv2 , NULL , 0 , NULL , parallel_pv ) ;
2015-03-26 22:43:51 +03:00
}
2012-05-11 19:26:30 +04:00
static int _has_matching_pv_tag ( struct pv_match * pvmatch , struct pv_segment * pvseg , struct pv_area * pva )
{
2016-08-24 21:37:50 +03:00
return _pvs_have_matching_tag ( pvmatch - > cling_tag_list_cn , pvseg - > pv , pva - > map - > pv , 0 ) ;
2012-05-11 19:26:30 +04:00
}
2016-08-24 17:49:34 +03:00
static int _log_parallel_areas ( struct dm_pool * mem , struct dm_list * parallel_areas ,
const struct dm_config_node * cling_tag_list_cn )
2016-08-24 17:39:56 +03:00
{
struct seg_pvs * spvs ;
struct pv_list * pvl ;
char * pvnames ;
2016-08-24 17:49:34 +03:00
unsigned first ;
2016-08-24 17:39:56 +03:00
if ( ! parallel_areas )
return 1 ;
dm_list_iterate_items ( spvs , parallel_areas ) {
2016-08-24 17:49:34 +03:00
first = 1 ;
2016-08-24 17:39:56 +03:00
if ( ! dm_pool_begin_object ( mem , 256 ) ) {
log_error ( " dm_pool_begin_object failed " ) ;
return 0 ;
}
dm_list_iterate_items ( pvl , & spvs - > pvs ) {
2016-08-24 17:49:34 +03:00
if ( ! first & & ! dm_pool_grow_object ( mem , " " , 1 ) ) {
2016-08-24 17:39:56 +03:00
log_error ( " dm_pool_grow_object failed " ) ;
dm_pool_abandon_object ( mem ) ;
return 0 ;
}
2016-08-24 17:49:34 +03:00
if ( ! dm_pool_grow_object ( mem , pv_dev_name ( pvl - > pv ) , strlen ( pv_dev_name ( pvl - > pv ) ) ) ) {
2016-08-24 17:39:56 +03:00
log_error ( " dm_pool_grow_object failed " ) ;
dm_pool_abandon_object ( mem ) ;
return 0 ;
}
2016-08-24 17:49:34 +03:00
if ( cling_tag_list_cn ) {
if ( ! dm_pool_grow_object ( mem , " ( " , 1 ) ) {
log_error ( " dm_pool_grow_object failed " ) ;
dm_pool_abandon_object ( mem ) ;
return 0 ;
}
if ( ! _tags_list_str ( mem , pvl - > pv , cling_tag_list_cn ) ) {
dm_pool_abandon_object ( mem ) ;
return_0 ;
}
if ( ! dm_pool_grow_object ( mem , " ) " , 1 ) ) {
log_error ( " dm_pool_grow_object failed " ) ;
dm_pool_abandon_object ( mem ) ;
return 0 ;
}
}
first = 0 ;
2016-08-24 17:39:56 +03:00
}
if ( ! dm_pool_grow_object ( mem , " \0 " , 1 ) ) {
log_error ( " dm_pool_grow_object failed " ) ;
dm_pool_abandon_object ( mem ) ;
return 0 ;
}
pvnames = dm_pool_end_object ( mem ) ;
log_debug_alloc ( " Parallel PVs at LE % " PRIu32 " length % " PRIu32 " : %s " ,
spvs - > le , spvs - > len , pvnames ) ;
dm_pool_free ( mem , pvnames ) ;
}
return 1 ;
}
2006-10-07 15:34:53 +04:00
/*
* Is PV area contiguous to PV segment ?
*/
2010-11-09 15:34:40 +03:00
static int _is_contiguous ( struct pv_match * pvmatch __attribute ( ( unused ) ) , struct pv_segment * pvseg , struct pv_area * pva )
2006-10-07 15:34:53 +04:00
{
if ( pvseg - > pv ! = pva - > map - > pv )
return 0 ;
if ( pvseg - > pe + pvseg - > len ! = pva - > start )
return 0 ;
return 1 ;
}
2015-03-26 23:32:59 +03:00
static void _reserve_area ( struct alloc_handle * ah , struct alloc_state * alloc_state , struct pv_area * pva ,
uint32_t required , uint32_t ix_pva , uint32_t unreserved )
2011-02-27 03:38:31 +03:00
{
2014-04-10 23:48:59 +04:00
struct pv_area_used * area_used = & alloc_state - > areas [ ix_pva ] ;
2015-03-27 00:13:26 +03:00
const char * pv_tag_list = NULL ;
2016-08-24 17:49:34 +03:00
if ( ah - > cling_tag_list_cn ) {
if ( ! dm_pool_begin_object ( ah - > mem , 256 ) )
log_error ( " PV tags string allocation failed " ) ;
else if ( ! _tags_list_str ( ah - > mem , pva - > map - > pv , ah - > cling_tag_list_cn ) )
dm_pool_abandon_object ( ah - > mem ) ;
else if ( ! dm_pool_grow_object ( ah - > mem , " \0 " , 1 ) ) {
dm_pool_abandon_object ( ah - > mem ) ;
log_error ( " PV tags string extension failed. " ) ;
} else
pv_tag_list = dm_pool_end_object ( ah - > mem ) ;
}
2014-04-10 23:48:59 +04:00
2013-01-08 02:30:29 +04:00
log_debug_alloc ( " %s allocation area % " PRIu32 " %s %s start PE % " PRIu32
2015-03-27 00:13:26 +03:00
" length % " PRIu32 " leaving % " PRIu32 " %s%s. " ,
2016-01-11 16:11:37 +03:00
area_used - > pva ? " Changing " : " Considering " ,
ix_pva , area_used - > pva ? " to " : " as " ,
2015-03-27 00:13:26 +03:00
dev_name ( pva - > map - > pv - > dev ) , pva - > start , required , unreserved ,
pv_tag_list ? " with PV tags: " : " " ,
pv_tag_list ? : " " ) ;
if ( pv_tag_list )
dm_pool_free ( ah - > mem , ( void * ) pv_tag_list ) ;
2011-02-27 03:38:31 +03:00
area_used - > pva = pva ;
area_used - > used = required ;
}
2015-03-26 23:32:59 +03:00
static int _reserve_required_area ( struct alloc_handle * ah , struct alloc_state * alloc_state , struct pv_area * pva ,
uint32_t required , uint32_t ix_pva , uint32_t unreserved )
2014-04-10 23:48:59 +04:00
{
uint32_t s ;
/* Expand areas array if needed after an area was split. */
if ( ix_pva > = alloc_state - > areas_size ) {
alloc_state - > areas_size * = 2 ;
if ( ! ( alloc_state - > areas = dm_realloc ( alloc_state - > areas , sizeof ( * alloc_state - > areas ) * ( alloc_state - > areas_size ) ) ) ) {
log_error ( " Memory reallocation for parallel areas failed. " ) ;
return 0 ;
}
for ( s = alloc_state - > areas_size / 2 ; s < alloc_state - > areas_size ; s + + )
alloc_state - > areas [ s ] . pva = NULL ;
}
2015-03-26 23:32:59 +03:00
_reserve_area ( ah , alloc_state , pva , required , ix_pva , unreserved ) ;
2014-04-10 23:48:59 +04:00
return 1 ;
}
2010-07-09 19:34:40 +04:00
static int _is_condition ( struct cmd_context * cmd __attribute__ ( ( unused ) ) ,
2006-10-08 16:01:13 +04:00
struct pv_segment * pvseg , uint32_t s ,
void * data )
2006-10-07 20:00:28 +04:00
{
2006-10-08 03:40:36 +04:00
struct pv_match * pvmatch = data ;
2014-04-15 04:13:47 +04:00
int positional = pvmatch - > alloc_state - > alloc_parms - > flags & A_POSITIONAL_FILL ;
2006-10-07 20:00:28 +04:00
2014-04-15 04:13:47 +04:00
if ( positional & & pvmatch - > alloc_state - > areas [ s ] . pva )
2011-02-27 03:38:31 +03:00
return 1 ; /* Area already assigned */
2010-11-09 15:34:40 +03:00
if ( ! pvmatch - > condition ( pvmatch , pvseg , pvmatch - > pva ) )
2006-10-08 03:40:36 +04:00
return 1 ; /* Continue */
2006-10-07 20:00:28 +04:00
2015-07-16 01:12:54 +03:00
if ( positional & & ( s > = pvmatch - > alloc_state - > num_positional_areas ) )
return 1 ;
/* FIXME The previous test should make this one redundant. */
2014-04-15 04:13:47 +04:00
if ( positional & & ( s > = pvmatch - > alloc_state - > areas_size ) )
2006-10-08 03:40:36 +04:00
return 1 ;
2010-03-25 05:31:48 +03:00
/*
2011-02-27 03:38:31 +03:00
* Only used for cling and contiguous policies ( which only make one allocation per PV )
* so it ' s safe to say all the available space is used .
2010-03-25 05:31:48 +03:00
*/
2014-04-15 04:13:47 +04:00
if ( positional )
2015-03-26 23:32:59 +03:00
_reserve_required_area ( pvmatch - > ah , pvmatch - > alloc_state , pvmatch - > pva , pvmatch - > pva - > count , s , 0 ) ;
2010-04-09 05:00:10 +04:00
2006-10-08 03:40:36 +04:00
return 2 ; /* Finished */
2006-10-07 20:00:28 +04:00
}
2006-10-08 16:01:13 +04:00
/*
* Is pva on same PV as any existing areas ?
*/
2011-02-27 03:38:31 +03:00
static int _check_cling ( struct alloc_handle * ah ,
2011-08-30 18:55:15 +04:00
const struct dm_config_node * cling_tag_list_cn ,
2006-10-08 16:01:13 +04:00
struct lv_segment * prev_lvseg , struct pv_area * pva ,
2011-02-27 03:38:31 +03:00
struct alloc_state * alloc_state )
2006-10-08 16:01:13 +04:00
{
struct pv_match pvmatch ;
int r ;
2011-02-27 03:38:31 +03:00
uint32_t le , len ;
2006-10-08 16:01:13 +04:00
2015-03-26 23:32:59 +03:00
pvmatch . ah = ah ;
2010-11-09 15:34:40 +03:00
pvmatch . condition = cling_tag_list_cn ? _has_matching_pv_tag : _is_same_pv ;
2014-04-10 23:48:59 +04:00
pvmatch . alloc_state = alloc_state ;
2006-10-08 16:01:13 +04:00
pvmatch . pva = pva ;
2010-11-09 15:34:40 +03:00
pvmatch . cling_tag_list_cn = cling_tag_list_cn ;
2006-10-08 16:01:13 +04:00
2011-02-27 03:38:31 +03:00
if ( ah - > maximise_cling ) {
/* Check entire LV */
le = 0 ;
len = prev_lvseg - > le + prev_lvseg - > len ;
} else {
/* Only check 1 LE at end of previous LV segment */
le = prev_lvseg - > le + prev_lvseg - > len - 1 ;
len = 1 ;
}
2006-10-08 16:01:13 +04:00
/* FIXME Cope with stacks by flattening */
2011-02-27 03:38:31 +03:00
if ( ! ( r = _for_each_pv ( ah - > cmd , prev_lvseg - > lv , le , len , NULL , NULL ,
2006-10-08 16:01:13 +04:00
0 , 0 , - 1 , 1 ,
_is_condition , & pvmatch ) ) )
stack ;
if ( r ! = 2 )
return 0 ;
return 1 ;
}
2005-06-01 20:51:55 +04:00
/*
* Is pva contiguous to any existing areas or on the same PV ?
*/
2015-03-26 23:32:59 +03:00
static int _check_contiguous ( struct alloc_handle * ah ,
2006-10-08 03:40:36 +04:00
struct lv_segment * prev_lvseg , struct pv_area * pva ,
2011-02-27 03:38:31 +03:00
struct alloc_state * alloc_state )
2001-11-29 21:45:35 +03:00
{
2006-10-08 03:40:36 +04:00
struct pv_match pvmatch ;
2006-10-07 20:00:28 +04:00
int r ;
2005-06-01 20:51:55 +04:00
2015-03-26 23:32:59 +03:00
pvmatch . ah = ah ;
2006-10-08 03:40:36 +04:00
pvmatch . condition = _is_contiguous ;
2014-04-10 23:48:59 +04:00
pvmatch . alloc_state = alloc_state ;
2006-10-08 03:40:36 +04:00
pvmatch . pva = pva ;
2010-11-09 15:34:40 +03:00
pvmatch . cling_tag_list_cn = NULL ;
2006-10-08 03:40:36 +04:00
/* FIXME Cope with stacks by flattening */
2015-03-26 23:32:59 +03:00
if ( ! ( r = _for_each_pv ( ah - > cmd , prev_lvseg - > lv ,
2010-04-08 04:28:57 +04:00
prev_lvseg - > le + prev_lvseg - > len - 1 , 1 , NULL , NULL ,
2006-10-08 03:40:36 +04:00
0 , 0 , - 1 , 1 ,
2006-10-08 16:01:13 +04:00
_is_condition , & pvmatch ) ) )
2006-10-07 20:00:28 +04:00
stack ;
2005-05-17 17:49:45 +04:00
2006-10-08 03:40:36 +04:00
if ( r ! = 2 )
return 0 ;
return 1 ;
2005-06-01 20:51:55 +04:00
}
2001-11-29 21:45:35 +03:00
2005-06-01 20:51:55 +04:00
/*
2011-02-27 03:38:31 +03:00
* Is pva on same PV as any areas already used in this allocation attempt ?
*/
2012-05-12 02:53:13 +04:00
static int _check_cling_to_alloced ( struct alloc_handle * ah , const struct dm_config_node * cling_tag_list_cn ,
struct pv_area * pva , struct alloc_state * alloc_state )
2011-02-27 03:38:31 +03:00
{
unsigned s ;
struct alloced_area * aa ;
2014-04-15 04:13:47 +04:00
int positional = alloc_state - > alloc_parms - > flags & A_POSITIONAL_FILL ;
2011-02-27 03:38:31 +03:00
/*
* Ignore log areas . They are always allocated whole as part of the
* first allocation . If they aren ' t yet set , we know we ' ve nothing to do .
*/
if ( alloc_state - > log_area_count_still_needed )
return 0 ;
for ( s = 0 ; s < ah - > area_count ; s + + ) {
2014-04-15 04:13:47 +04:00
if ( positional & & alloc_state - > areas [ s ] . pva )
2011-02-27 03:38:31 +03:00
continue ; /* Area already assigned */
dm_list_iterate_items ( aa , & ah - > alloced_areas [ s ] ) {
2012-05-12 02:53:13 +04:00
if ( ( ! cling_tag_list_cn & & ( pva - > map - > pv = = aa [ 0 ] . pv ) ) | |
2016-08-24 21:37:50 +03:00
( cling_tag_list_cn & & _pvs_have_matching_tag ( cling_tag_list_cn , pva - > map - > pv , aa [ 0 ] . pv , 0 ) ) ) {
2014-04-15 04:13:47 +04:00
if ( positional )
2015-03-26 23:32:59 +03:00
_reserve_required_area ( ah , alloc_state , pva , pva - > count , s , 0 ) ;
2011-02-27 03:38:31 +03:00
return 1 ;
}
}
}
return 0 ;
}
2016-08-24 21:37:50 +03:00
static int _pv_is_parallel ( struct physical_volume * pv , struct dm_list * parallel_pvs , const struct dm_config_node * cling_tag_list_cn )
2011-02-27 03:38:31 +03:00
{
struct pv_list * pvl ;
2016-08-24 21:37:50 +03:00
dm_list_iterate_items ( pvl , parallel_pvs ) {
if ( pv = = pvl - > pv ) {
log_debug_alloc ( " Not using free space on existing parallel PV %s. " ,
pv_dev_name ( pvl - > pv ) ) ;
2011-02-27 03:38:31 +03:00
return 1 ;
2016-08-24 21:37:50 +03:00
}
if ( cling_tag_list_cn & & _pvs_have_matching_tag ( cling_tag_list_cn , pvl - > pv , pv , 1 ) )
return 1 ;
}
2011-02-27 03:38:31 +03:00
return 0 ;
}
/*
* Decide whether or not to try allocation from supplied area pva .
* alloc_state - > areas may get modified .
2005-06-01 20:51:55 +04:00
*/
2011-02-27 03:38:31 +03:00
static area_use_t _check_pva ( struct alloc_handle * ah , struct pv_area * pva , uint32_t still_needed ,
2014-04-15 04:05:34 +04:00
struct alloc_state * alloc_state ,
2011-02-27 03:38:31 +03:00
unsigned already_found_one , unsigned iteration_count , unsigned log_iteration_count )
{
2014-04-15 04:05:34 +04:00
const struct alloc_parms * alloc_parms = alloc_state - > alloc_parms ;
2012-02-01 06:11:43 +04:00
unsigned s ;
2011-02-27 03:38:31 +03:00
/* Skip fully-reserved areas (which are not currently removed from the list). */
if ( ! pva - > unreserved )
return NEXT_AREA ;
2012-02-01 06:10:45 +04:00
/* FIXME Should this test be removed? */
if ( iteration_count )
2011-02-27 03:38:31 +03:00
/*
2012-02-01 06:10:45 +04:00
* Don ' t use an area twice .
*/
2011-02-27 03:38:31 +03:00
for ( s = 0 ; s < alloc_state - > areas_size ; s + + )
if ( alloc_state - > areas [ s ] . pva = = pva )
return NEXT_AREA ;
/* If maximise_cling is set, perform several checks, otherwise perform exactly one. */
2012-05-11 22:59:01 +04:00
if ( ! iteration_count & & ! log_iteration_count & & alloc_parms - > flags & ( A_CONTIGUOUS_TO_LVSEG | A_CLING_TO_LVSEG | A_CLING_TO_ALLOCED ) ) {
2011-02-27 03:38:31 +03:00
/* Contiguous? */
2013-07-29 22:35:45 +04:00
if ( ( ( alloc_parms - > flags & A_CONTIGUOUS_TO_LVSEG ) | |
2014-04-15 05:34:35 +04:00
( ah - > maximise_cling & & ( alloc_parms - > flags & A_AREA_COUNT_MATCHES ) ) ) & &
2015-03-26 23:32:59 +03:00
_check_contiguous ( ah , alloc_parms - > prev_lvseg , pva , alloc_state ) )
2014-04-10 23:48:59 +04:00
goto found ;
2013-07-06 06:28:21 +04:00
2011-02-27 03:38:31 +03:00
/* Try next area on same PV if looking for contiguous space */
2012-05-11 22:59:01 +04:00
if ( alloc_parms - > flags & A_CONTIGUOUS_TO_LVSEG )
2011-02-27 03:38:31 +03:00
return NEXT_AREA ;
2012-05-12 02:53:13 +04:00
/* Cling to prev_lvseg? */
2013-07-29 22:35:45 +04:00
if ( ( ( alloc_parms - > flags & A_CLING_TO_LVSEG ) | |
2014-04-15 05:34:35 +04:00
( ah - > maximise_cling & & ( alloc_parms - > flags & A_AREA_COUNT_MATCHES ) ) ) & &
2012-05-12 02:53:13 +04:00
_check_cling ( ah , NULL , alloc_parms - > prev_lvseg , pva , alloc_state ) )
2011-02-27 03:38:31 +03:00
/* If this PV is suitable, use this first area */
2014-04-10 23:48:59 +04:00
goto found ;
2011-02-27 03:38:31 +03:00
2012-05-12 02:53:13 +04:00
/* Cling_to_alloced? */
if ( ( alloc_parms - > flags & A_CLING_TO_ALLOCED ) & &
_check_cling_to_alloced ( ah , NULL , pva , alloc_state ) )
2014-04-10 23:48:59 +04:00
goto found ;
2011-02-27 03:38:31 +03:00
/* Cling_by_tags? */
2012-05-12 02:53:13 +04:00
if ( ! ( alloc_parms - > flags & A_CLING_BY_TAGS ) | | ! ah - > cling_tag_list_cn )
2011-02-27 03:38:31 +03:00
return NEXT_PV ;
2014-04-15 05:34:35 +04:00
if ( ( alloc_parms - > flags & A_AREA_COUNT_MATCHES ) ) {
2012-05-12 02:53:13 +04:00
if ( _check_cling ( ah , ah - > cling_tag_list_cn , alloc_parms - > prev_lvseg , pva , alloc_state ) )
2014-04-10 23:48:59 +04:00
goto found ;
2012-05-12 02:53:13 +04:00
} else if ( _check_cling_to_alloced ( ah , ah - > cling_tag_list_cn , pva , alloc_state ) )
2014-04-10 23:48:59 +04:00
goto found ;
2012-05-12 02:53:13 +04:00
2011-02-27 03:38:31 +03:00
/* All areas on this PV give same result so pointless checking more */
return NEXT_PV ;
}
/* Normal/Anywhere */
/* Is it big enough on its own? */
if ( pva - > unreserved * ah - > area_multiple < still_needed & &
( ( ! ( alloc_parms - > flags & A_CAN_SPLIT ) & & ! ah - > log_area_count ) | |
( already_found_one & & alloc_parms - > alloc ! = ALLOC_ANYWHERE ) ) )
return NEXT_PV ;
2014-04-10 23:48:59 +04:00
found :
2014-04-15 04:13:47 +04:00
if ( alloc_parms - > flags & A_POSITIONAL_FILL )
return PREFERRED ;
return USE_AREA ;
2011-02-27 03:38:31 +03:00
}
/*
* Decide how many extents we ' re trying to obtain from a given area .
* Removes the extents from further consideration .
*/
static uint32_t _calc_required_extents ( struct alloc_handle * ah , struct pv_area * pva , unsigned ix_pva , uint32_t max_to_allocate , alloc_policy_t alloc )
{
uint32_t required = max_to_allocate / ah - > area_multiple ;
2012-02-01 06:10:45 +04:00
/*
2016-01-11 16:11:37 +03:00
* Update amount unreserved - effectively splitting an area
2012-02-01 06:10:45 +04:00
* into two or more parts . If the whole stripe doesn ' t fit ,
* reduce amount we ' re looking for .
*/
2011-02-27 03:38:31 +03:00
if ( alloc = = ALLOC_ANYWHERE ) {
2014-05-14 19:25:43 +04:00
if ( ix_pva > = ah - > area_count + ah - > parity_count )
2011-02-27 03:38:31 +03:00
required = ah - > log_len ;
2012-02-01 06:10:45 +04:00
} else if ( required < ah - > log_len )
required = ah - > log_len ;
if ( required > = pva - > unreserved ) {
required = pva - > unreserved ;
pva - > unreserved = 0 ;
2011-02-27 03:38:31 +03:00
} else {
2012-02-01 06:10:45 +04:00
pva - > unreserved - = required ;
reinsert_changed_pv_area ( pva ) ;
2011-02-27 03:38:31 +03:00
}
return required ;
}
static void _clear_areas ( struct alloc_state * alloc_state )
{
uint32_t s ;
2015-07-16 01:12:54 +03:00
alloc_state - > num_positional_areas = 0 ;
2011-02-27 03:38:31 +03:00
for ( s = 0 ; s < alloc_state - > areas_size ; s + + )
alloc_state - > areas [ s ] . pva = NULL ;
}
2012-02-01 06:10:45 +04:00
static void _reset_unreserved ( struct dm_list * pvms )
{
struct pv_map * pvm ;
struct pv_area * pva ;
dm_list_iterate_items ( pvm , pvms )
dm_list_iterate_items ( pva , & pvm - > areas )
if ( pva - > unreserved ! = pva - > count ) {
pva - > unreserved = pva - > count ;
reinsert_changed_pv_area ( pva ) ;
}
}
2011-08-18 23:41:21 +04:00
static void _report_needed_allocation_space ( struct alloc_handle * ah ,
2014-02-22 04:26:01 +04:00
struct alloc_state * alloc_state ,
struct dm_list * pvms )
2011-08-18 23:41:21 +04:00
{
const char * metadata_type ;
2011-08-19 20:41:26 +04:00
uint32_t parallel_areas_count , parallel_area_size ;
2011-08-18 23:41:21 +04:00
uint32_t metadata_count , metadata_size ;
2014-01-28 22:25:02 +04:00
parallel_area_size = ah - > new_extents - alloc_state - > allocated ;
parallel_area_size / = ah - > area_multiple ;
2014-02-22 04:26:01 +04:00
parallel_area_size - = ( ah - > alloc_and_split_meta & & ! ah - > split_metadata_is_allocated ) ? ah - > log_len : 0 ;
2011-08-19 20:41:26 +04:00
parallel_areas_count = ah - > area_count + ah - > parity_count ;
2011-08-18 23:41:21 +04:00
metadata_size = ah - > log_len ;
if ( ah - > alloc_and_split_meta ) {
2014-01-28 22:25:02 +04:00
metadata_type = " metadata area " ;
2011-08-19 20:41:26 +04:00
metadata_count = parallel_areas_count ;
2014-02-22 04:26:01 +04:00
if ( ah - > split_metadata_is_allocated )
metadata_size = 0 ;
2011-08-18 23:41:21 +04:00
} else {
metadata_type = " mirror log " ;
metadata_count = alloc_state - > log_area_count_still_needed ;
}
2015-07-16 01:12:54 +03:00
log_debug_alloc ( " Still need %s% " PRIu32 " total extents from % " PRIu32 " remaining (% " PRIu32 " positional slots): " ,
2014-02-14 07:10:28 +04:00
ah - > approx_alloc ? " up to " : " " ,
2015-07-16 01:12:54 +03:00
parallel_area_size * parallel_areas_count + metadata_size * metadata_count , pv_maps_size ( pvms ) ,
alloc_state - > num_positional_areas ) ;
2013-01-08 02:30:29 +04:00
log_debug_alloc ( " % " PRIu32 " (% " PRIu32 " data/% " PRIu32
" parity) parallel areas of % " PRIu32 " extents each " ,
parallel_areas_count , ah - > area_count , ah - > parity_count , parallel_area_size ) ;
2014-01-28 22:25:02 +04:00
log_debug_alloc ( " % " PRIu32 " %s%s of % " PRIu32 " extents each " ,
metadata_count , metadata_type ,
( metadata_count = = 1 ) ? " " : " s " ,
metadata_size ) ;
2011-08-18 23:41:21 +04:00
}
2015-04-11 03:55:24 +03:00
/* Work through the array, removing any entries with tags already used by previous areas. */
static int _limit_to_one_area_per_tag ( struct alloc_handle * ah , struct alloc_state * alloc_state ,
uint32_t ix_log_offset , unsigned * ix )
{
uint32_t s = 0 , u = 0 ;
DM_LIST_INIT ( pv_tags ) ;
while ( s < alloc_state - > areas_size & & alloc_state - > areas [ s ] . pva ) {
/* Start again with an empty tag list when we reach the log devices */
if ( u = = ix_log_offset )
dm_list_init ( & pv_tags ) ;
if ( ! _pv_has_matching_tag ( ah - > cling_tag_list_cn , alloc_state - > areas [ s ] . pva - > map - > pv , alloc_state - > areas [ s ] . pva - > start , s , & pv_tags ) ) {
/* The comparison fn will ignore any non-cling tags so just add everything */
if ( ! str_list_add_list ( ah - > mem , & pv_tags , & alloc_state - > areas [ s ] . pva - > map - > pv - > tags ) )
return_0 ;
if ( s ! = u )
alloc_state - > areas [ u ] = alloc_state - > areas [ s ] ;
u + + ;
} else
( * ix ) - - ; /* One area removed */
s + + ;
}
alloc_state - > areas [ u ] . pva = NULL ;
return 1 ;
}
2011-02-27 03:38:31 +03:00
/*
* Returns 1 regardless of whether any space was found , except on error .
*/
2014-04-15 04:05:34 +04:00
static int _find_some_parallel_space ( struct alloc_handle * ah ,
2011-02-27 03:38:31 +03:00
struct dm_list * pvms , struct alloc_state * alloc_state ,
struct dm_list * parallel_pvs , uint32_t max_to_allocate )
{
2014-04-15 04:05:34 +04:00
const struct alloc_parms * alloc_parms = alloc_state - > alloc_parms ;
2011-02-27 03:38:31 +03:00
unsigned ix = 0 ;
unsigned last_ix ;
2005-06-01 20:51:55 +04:00
struct pv_map * pvm ;
struct pv_area * pva ;
2011-02-27 03:38:31 +03:00
unsigned preferred_count = 0 ;
unsigned already_found_one ;
2010-03-01 23:00:20 +03:00
unsigned ix_log_offset ; /* Offset to start of areas to use for log */
2009-05-30 04:09:27 +04:00
unsigned too_small_for_log_count ; /* How many too small for log? */
2011-02-27 03:38:31 +03:00
unsigned iteration_count = 0 ; /* cling_to_alloced may need 2 iterations */
unsigned log_iteration_count = 0 ; /* extra iteration for logs on data devices */
2010-03-01 23:00:20 +03:00
struct alloced_area * aa ;
uint32_t s ;
2011-08-03 02:07:20 +04:00
uint32_t devices_needed = ah - > area_count + ah - > parity_count ;
2014-04-10 23:48:59 +04:00
uint32_t required ;
2006-10-23 19:54:51 +04:00
2015-07-16 01:12:54 +03:00
_clear_areas ( alloc_state ) ;
_reset_unreserved ( pvms ) ;
/* num_positional_areas holds the number of parallel allocations that must be contiguous/cling */
/* These appear first in the array, so it is also the offset to the non-preferred allocations */
2012-05-12 02:53:13 +04:00
/* At most one of A_CONTIGUOUS_TO_LVSEG, A_CLING_TO_LVSEG or A_CLING_TO_ALLOCED may be set */
2014-04-15 05:34:35 +04:00
if ( ! ( alloc_parms - > flags & A_POSITIONAL_FILL ) )
2015-07-16 01:12:54 +03:00
alloc_state - > num_positional_areas = 0 ;
2014-04-15 05:34:35 +04:00
else if ( alloc_parms - > flags & ( A_CONTIGUOUS_TO_LVSEG | A_CLING_TO_LVSEG ) )
2015-07-16 01:12:54 +03:00
alloc_state - > num_positional_areas = _stripes_per_mimage ( alloc_parms - > prev_lvseg ) * alloc_parms - > prev_lvseg - > area_count ;
2014-04-15 05:34:35 +04:00
else if ( alloc_parms - > flags & A_CLING_TO_ALLOCED )
2015-07-16 01:12:54 +03:00
alloc_state - > num_positional_areas = ah - > area_count ;
2005-06-01 20:51:55 +04:00
2012-05-11 19:32:19 +04:00
if ( alloc_parms - > alloc = = ALLOC_NORMAL | | ( alloc_parms - > flags & A_CLING_TO_ALLOCED ) )
2013-01-08 02:30:29 +04:00
log_debug_alloc ( " Cling_to_allocated is %sset " ,
alloc_parms - > flags & A_CLING_TO_ALLOCED ? " " : " not " ) ;
2011-02-27 03:38:31 +03:00
2014-04-15 04:13:47 +04:00
if ( alloc_parms - > flags & A_POSITIONAL_FILL )
2015-07-16 01:12:54 +03:00
log_debug_alloc ( " %u preferred area(s) to be filled positionally. " , alloc_state - > num_positional_areas ) ;
2014-04-15 05:34:35 +04:00
else
log_debug_alloc ( " Areas to be sorted and filled sequentially. " ) ;
2014-04-15 04:13:47 +04:00
2014-02-22 04:26:01 +04:00
_report_needed_allocation_space ( ah , alloc_state , pvms ) ;
2001-11-29 21:45:35 +03:00
2005-06-01 20:51:55 +04:00
/* ix holds the number of areas found on other PVs */
do {
2011-02-27 03:38:31 +03:00
if ( log_iteration_count ) {
2013-01-08 02:30:29 +04:00
log_debug_alloc ( " Found %u areas for % " PRIu32 " parallel areas and % " PRIu32 " log areas so far. " , ix , devices_needed , alloc_state - > log_area_count_still_needed ) ;
2011-02-27 03:38:31 +03:00
} else if ( iteration_count )
2015-07-16 01:12:54 +03:00
log_debug_alloc ( " Filled %u out of %u preferred areas so far. " , preferred_count , alloc_state - > num_positional_areas ) ;
2005-06-01 20:51:55 +04:00
2011-02-27 03:38:31 +03:00
/*
* Provide for escape from the loop if no progress is made .
* This should not happen : ALLOC_ANYWHERE should be able to use
* all available space . ( If there aren ' t enough extents , the code
* should not reach this point . )
*/
last_ix = ix ;
2005-11-28 23:01:00 +03:00
/*
2011-02-27 03:38:31 +03:00
* Put the smallest area of each PV that is at least the
* size we need into areas array . If there isn ' t one
* that fits completely and we ' re allowed more than one
* LV segment , then take the largest remaining instead .
2005-11-28 23:01:00 +03:00
*/
2011-02-27 03:38:31 +03:00
dm_list_iterate_items ( pvm , pvms ) {
/* PV-level checks */
if ( dm_list_empty ( & pvm - > areas ) )
continue ; /* Next PV */
if ( alloc_parms - > alloc ! = ALLOC_ANYWHERE ) {
/* Don't allocate onto the log PVs */
if ( ah - > log_area_count )
dm_list_iterate_items ( aa , & ah - > alloced_areas [ ah - > area_count ] )
for ( s = 0 ; s < ah - > log_area_count ; s + + )
if ( ! aa [ s ] . pv )
goto next_pv ;
2006-09-11 18:24:58 +04:00
2011-02-27 03:38:31 +03:00
/* FIXME Split into log and non-log parallel_pvs and only check the log ones if log_iteration? */
/* (I've temporatily disabled the check.) */
/* Avoid PVs used by existing parallel areas */
2016-08-24 21:37:50 +03:00
if ( ! log_iteration_count & & parallel_pvs & & _pv_is_parallel ( pvm - > pv , parallel_pvs , ah - > cling_tag_list_cn ) )
2011-02-27 03:38:31 +03:00
goto next_pv ;
/*
2016-01-11 16:11:37 +03:00
* Avoid PVs already set aside for log .
2011-02-27 03:38:31 +03:00
* We only reach here if there were enough PVs for the main areas but
* not enough for the logs .
*/
if ( log_iteration_count ) {
2015-07-16 01:12:54 +03:00
for ( s = devices_needed ; s < ix + alloc_state - > num_positional_areas ; s + + )
2011-02-27 03:38:31 +03:00
if ( alloc_state - > areas [ s ] . pva & & alloc_state - > areas [ s ] . pva - > map - > pv = = pvm - > pv )
goto next_pv ;
/* On a second pass, avoid PVs already used in an uncommitted area */
2011-09-06 22:49:31 +04:00
} else if ( iteration_count )
2011-08-03 02:07:20 +04:00
for ( s = 0 ; s < devices_needed ; s + + )
2011-02-27 03:38:31 +03:00
if ( alloc_state - > areas [ s ] . pva & & alloc_state - > areas [ s ] . pva - > map - > pv = = pvm - > pv )
goto next_pv ;
2005-11-28 23:01:00 +03:00
}
2011-02-27 03:38:31 +03:00
already_found_one = 0 ;
/* First area in each list is the largest */
dm_list_iterate_items ( pva , & pvm - > areas ) {
/*
2013-07-29 22:35:45 +04:00
* There are two types of allocations , which can ' t be mixed at present :
*
2011-02-27 03:38:31 +03:00
* PREFERRED are stored immediately in a specific parallel slot .
2014-04-15 04:13:47 +04:00
* This is only used if the A_POSITIONAL_FILL flag is set .
2013-07-29 22:35:45 +04:00
* This requires the number of slots to match , so if comparing with
* prev_lvseg then A_AREA_COUNT_MATCHES must be set .
*
2011-02-27 03:38:31 +03:00
* USE_AREA are stored for later , then sorted and chosen from .
*/
2016-01-11 16:11:37 +03:00
switch ( _check_pva ( ah , pva , max_to_allocate ,
2011-02-27 03:38:31 +03:00
alloc_state , already_found_one , iteration_count , log_iteration_count ) ) {
2005-11-25 00:23:55 +03:00
2011-02-27 03:38:31 +03:00
case PREFERRED :
preferred_count + + ;
2011-02-28 22:53:03 +03:00
/* Fall through */
2005-06-01 20:51:55 +04:00
2011-02-27 03:38:31 +03:00
case NEXT_PV :
goto next_pv ;
2001-11-29 21:45:35 +03:00
2011-02-27 03:38:31 +03:00
case NEXT_AREA :
continue ;
2010-03-25 05:31:48 +03:00
2011-02-27 03:38:31 +03:00
case USE_AREA :
2010-03-26 00:19:26 +03:00
/*
* Except with ALLOC_ANYWHERE , replace first area with this
* one which is smaller but still big enough .
*/
if ( ! already_found_one | |
2011-02-27 03:38:31 +03:00
alloc_parms - > alloc = = ALLOC_ANYWHERE ) {
2010-03-26 00:19:26 +03:00
ix + + ;
already_found_one = 1 ;
}
2010-03-25 05:31:48 +03:00
2011-02-27 03:38:31 +03:00
/* Reserve required amount of pva */
2015-07-16 01:12:54 +03:00
required = _calc_required_extents ( ah , pva , ix + alloc_state - > num_positional_areas - 1 , max_to_allocate , alloc_parms - > alloc ) ;
if ( ! _reserve_required_area ( ah , alloc_state , pva , required , ix + alloc_state - > num_positional_areas - 1 , pva - > unreserved ) )
2011-02-27 03:38:31 +03:00
return_0 ;
2010-03-23 18:07:55 +03:00
}
2011-02-27 03:38:31 +03:00
2005-05-17 17:49:45 +04:00
}
2001-11-29 21:45:35 +03:00
2011-02-27 03:38:31 +03:00
next_pv :
/* With ALLOC_ANYWHERE we ignore further PVs once we have at least enough areas */
/* With cling and contiguous we stop if we found a match for *all* the areas */
/* FIXME Rename these variables! */
if ( ( alloc_parms - > alloc = = ALLOC_ANYWHERE & &
2015-07-16 01:12:54 +03:00
ix + alloc_state - > num_positional_areas > = devices_needed + alloc_state - > log_area_count_still_needed ) | |
( preferred_count = = alloc_state - > num_positional_areas & &
( alloc_state - > num_positional_areas = = devices_needed + alloc_state - > log_area_count_still_needed ) ) )
2011-02-27 03:38:31 +03:00
break ;
}
2011-08-03 02:07:20 +04:00
} while ( ( alloc_parms - > alloc = = ALLOC_ANYWHERE & & last_ix ! = ix & & ix < devices_needed + alloc_state - > log_area_count_still_needed ) | |
2012-05-12 02:53:13 +04:00
/* With cling_to_alloced and normal, if there were gaps in the preferred areas, have a second iteration */
2011-02-27 03:38:31 +03:00
( alloc_parms - > alloc = = ALLOC_NORMAL & & preferred_count & &
2015-07-16 01:12:54 +03:00
( preferred_count < alloc_state - > num_positional_areas | | alloc_state - > log_area_count_still_needed ) & &
2011-02-27 03:38:31 +03:00
( alloc_parms - > flags & A_CLING_TO_ALLOCED ) & & ! iteration_count + + ) | |
/* Extra iteration needed to fill log areas on PVs already used? */
2015-07-16 01:12:54 +03:00
( alloc_parms - > alloc = = ALLOC_NORMAL & & preferred_count = = alloc_state - > num_positional_areas & & ! ah - > mirror_logs_separate & &
2011-08-03 02:07:20 +04:00
( ix + preferred_count > = devices_needed ) & &
( ix + preferred_count < devices_needed + alloc_state - > log_area_count_still_needed ) & & ! log_iteration_count + + ) ) ;
2005-06-01 20:51:55 +04:00
2014-04-15 05:34:35 +04:00
/* Non-zero ix means at least one USE_AREA was returned */
2015-07-16 01:12:54 +03:00
if ( preferred_count < alloc_state - > num_positional_areas & & ! ( alloc_parms - > flags & A_CLING_TO_ALLOCED ) & & ! ix )
2011-02-27 03:38:31 +03:00
return 1 ;
2011-08-03 02:07:20 +04:00
if ( ix + preferred_count < devices_needed + alloc_state - > log_area_count_still_needed )
2011-02-27 03:38:31 +03:00
return 1 ;
/* Sort the areas so we allocate from the biggest */
if ( log_iteration_count ) {
2011-08-03 02:07:20 +04:00
if ( ix > devices_needed + 1 ) {
2013-01-08 02:30:29 +04:00
log_debug_alloc ( " Sorting %u log areas " , ix - devices_needed ) ;
2011-08-03 02:07:20 +04:00
qsort ( alloc_state - > areas + devices_needed , ix - devices_needed , sizeof ( * alloc_state - > areas ) ,
2005-06-01 20:51:55 +04:00
_comp_area ) ;
2011-02-27 03:38:31 +03:00
}
} else if ( ix > 1 ) {
2013-01-08 02:30:29 +04:00
log_debug_alloc ( " Sorting %u areas " , ix ) ;
2015-07-16 01:12:54 +03:00
qsort ( alloc_state - > areas + alloc_state - > num_positional_areas , ix , sizeof ( * alloc_state - > areas ) ,
2011-02-27 03:38:31 +03:00
_comp_area ) ;
}
2015-04-10 23:57:52 +03:00
/* If there are gaps in our preferred areas, fill them from the sorted part of the array */
2015-07-16 01:12:54 +03:00
if ( preferred_count & & preferred_count ! = alloc_state - > num_positional_areas ) {
2011-08-03 02:07:20 +04:00
for ( s = 0 ; s < devices_needed ; s + + )
2011-02-27 03:38:31 +03:00
if ( ! alloc_state - > areas [ s ] . pva ) {
2015-07-16 01:12:54 +03:00
alloc_state - > areas [ s ] . pva = alloc_state - > areas [ alloc_state - > num_positional_areas ] . pva ;
alloc_state - > areas [ s ] . used = alloc_state - > areas [ alloc_state - > num_positional_areas ] . used ;
alloc_state - > areas [ alloc_state - > num_positional_areas + + ] . pva = NULL ;
2011-02-27 03:38:31 +03:00
}
}
2013-07-06 06:28:21 +04:00
2011-02-27 03:38:31 +03:00
/*
* First time around , if there ' s a log , allocate it on the
* smallest device that has space for it .
*/
too_small_for_log_count = 0 ;
ix_log_offset = 0 ;
/* FIXME This logic is due to its heritage and can be simplified! */
if ( alloc_state - > log_area_count_still_needed ) {
/* How many areas are too small for the log? */
2015-07-16 01:12:54 +03:00
while ( too_small_for_log_count < alloc_state - > num_positional_areas + ix & &
( * ( alloc_state - > areas + alloc_state - > num_positional_areas + ix - 1 -
2011-02-27 03:38:31 +03:00
too_small_for_log_count ) ) . used < ah - > log_len )
too_small_for_log_count + + ;
2015-07-16 01:12:54 +03:00
ix_log_offset = alloc_state - > num_positional_areas + ix - too_small_for_log_count - ah - > log_area_count ;
2011-02-27 03:38:31 +03:00
}
2015-07-16 01:12:54 +03:00
if ( ix + alloc_state - > num_positional_areas < devices_needed +
2011-02-27 03:38:31 +03:00
( alloc_state - > log_area_count_still_needed ? alloc_state - > log_area_count_still_needed +
too_small_for_log_count : 0 ) )
return 1 ;
2015-04-11 03:55:24 +03:00
/*
2016-01-11 16:11:37 +03:00
* FIXME We should change the code to do separate calls for the log allocation
2015-04-11 03:55:24 +03:00
* and the data allocation so that _limit_to_one_area_per_tag doesn ' t have to guess
* where the split is going to occur .
*/
/*
* This code covers the initial allocation - after that there is something to ' cling ' to
* and we shouldn ' t get this far .
2015-07-16 01:12:54 +03:00
* alloc_state - > num_positional_areas is assumed to be 0 with A_PARTITION_BY_TAGS .
2015-04-11 03:55:24 +03:00
*
* FIXME Consider a second attempt with A_PARTITION_BY_TAGS if , for example , the largest area
* had all the tags set , but other areas don ' t .
*/
2015-07-16 01:12:54 +03:00
if ( ( alloc_parms - > flags & A_PARTITION_BY_TAGS ) & & ! alloc_state - > num_positional_areas ) {
2015-04-11 03:55:24 +03:00
if ( ! _limit_to_one_area_per_tag ( ah , alloc_state , ix_log_offset , & ix ) )
return_0 ;
/* Recalculate log position because we might have removed some areas from consideration */
if ( alloc_state - > log_area_count_still_needed ) {
/* How many areas are too small for the log? */
too_small_for_log_count = 0 ;
while ( too_small_for_log_count < ix & &
( * ( alloc_state - > areas + ix - 1 - too_small_for_log_count ) ) . pva & &
( * ( alloc_state - > areas + ix - 1 - too_small_for_log_count ) ) . used < ah - > log_len )
too_small_for_log_count + + ;
if ( ix < too_small_for_log_count + ah - > log_area_count )
return 1 ;
ix_log_offset = ix - too_small_for_log_count - ah - > log_area_count ;
}
if ( ix < devices_needed +
( alloc_state - > log_area_count_still_needed ? alloc_state - > log_area_count_still_needed +
too_small_for_log_count : 0 ) )
return 1 ;
}
2011-02-27 03:38:31 +03:00
/*
* Finally add the space identified to the list of areas to be used .
*/
if ( ! _alloc_parallel_area ( ah , max_to_allocate , alloc_state , ix_log_offset ) )
return_0 ;
/*
* Log is always allocated first time .
*/
alloc_state - > log_area_count_still_needed = 0 ;
return 1 ;
}
/*
2016-01-11 16:11:37 +03:00
* Choose sets of parallel areas to use , respecting any constraints
2011-02-27 03:38:31 +03:00
* supplied in alloc_parms .
*/
static int _find_max_parallel_space_for_one_policy ( struct alloc_handle * ah , struct alloc_parms * alloc_parms ,
struct dm_list * pvms , struct alloc_state * alloc_state )
{
2011-08-18 23:41:21 +04:00
uint32_t max_tmp ;
2011-02-27 03:38:31 +03:00
uint32_t max_to_allocate ; /* Maximum extents to allocate this time */
uint32_t old_allocated ;
uint32_t next_le ;
struct seg_pvs * spvs ;
struct dm_list * parallel_pvs ;
2014-04-15 04:05:34 +04:00
alloc_state - > alloc_parms = alloc_parms ;
2011-02-27 03:38:31 +03:00
/* FIXME This algorithm needs a lot of cleaning up! */
/* FIXME anywhere doesn't find all space yet */
do {
parallel_pvs = NULL ;
max_to_allocate = alloc_parms - > extents_still_needed - alloc_state - > allocated ;
2005-06-01 20:51:55 +04:00
2009-05-30 04:09:27 +04:00
/*
2011-02-27 03:38:31 +03:00
* If there are existing parallel PVs , avoid them and reduce
* the maximum we can allocate in one go accordingly .
2009-05-30 04:09:27 +04:00
*/
2011-02-27 03:38:31 +03:00
if ( ah - > parallel_areas ) {
next_le = ( alloc_parms - > prev_lvseg ? alloc_parms - > prev_lvseg - > le + alloc_parms - > prev_lvseg - > len : 0 ) + alloc_state - > allocated / ah - > area_multiple ;
dm_list_iterate_items ( spvs , ah - > parallel_areas ) {
if ( next_le > = spvs - > le + spvs - > len )
continue ;
2011-08-18 23:41:21 +04:00
max_tmp = max_to_allocate +
alloc_state - > allocated ;
/*
* Because a request that groups metadata and
* data together will be split , we must adjust
* the comparison accordingly .
*/
2014-02-22 04:26:01 +04:00
if ( ah - > alloc_and_split_meta & & ! ah - > split_metadata_is_allocated )
2011-08-18 23:41:21 +04:00
max_tmp - = ah - > log_len ;
if ( max_tmp > ( spvs - > le + spvs - > len ) * ah - > area_multiple ) {
2011-02-27 03:38:31 +03:00
max_to_allocate = ( spvs - > le + spvs - > len ) * ah - > area_multiple - alloc_state - > allocated ;
2014-02-22 04:26:01 +04:00
max_to_allocate + = ( ah - > alloc_and_split_meta & & ! ah - > split_metadata_is_allocated ) ? ah - > log_len : 0 ;
2011-08-18 23:41:21 +04:00
}
2011-02-27 03:38:31 +03:00
parallel_pvs = & spvs - > pvs ;
break ;
}
2009-05-30 04:09:27 +04:00
}
2011-02-27 03:38:31 +03:00
old_allocated = alloc_state - > allocated ;
2009-05-30 04:09:27 +04:00
2014-04-15 04:05:34 +04:00
if ( ! _find_some_parallel_space ( ah , pvms , alloc_state , parallel_pvs , max_to_allocate ) )
2008-01-30 16:19:47 +03:00
return_0 ;
2005-06-01 20:51:55 +04:00
2011-02-27 03:38:31 +03:00
/*
2014-04-15 05:34:35 +04:00
* For ALLOC_CLING , if the number of areas matches and maximise_cling is
* set we allow two passes , first with A_POSITIONAL_FILL then without .
*
2012-05-12 02:53:13 +04:00
* If we didn ' t allocate anything this time with ALLOC_NORMAL and had
2011-02-27 03:38:31 +03:00
* A_CLING_TO_ALLOCED set , try again without it .
*
* For ALLOC_NORMAL , if we did allocate something without the
* flag set , set it and continue so that further allocations
* remain on the same disks where possible .
*/
if ( old_allocated = = alloc_state - > allocated ) {
2014-04-15 05:34:35 +04:00
if ( ah - > maximise_cling & & ( ( alloc_parms - > alloc = = ALLOC_CLING ) | | ( alloc_parms - > alloc = = ALLOC_CLING_BY_TAGS ) ) & &
( alloc_parms - > flags & A_CLING_TO_LVSEG ) & & ( alloc_parms - > flags & A_POSITIONAL_FILL ) )
alloc_parms - > flags & = ~ A_POSITIONAL_FILL ;
else if ( ( alloc_parms - > alloc = = ALLOC_NORMAL ) & & ( alloc_parms - > flags & A_CLING_TO_ALLOCED ) )
2011-02-27 03:38:31 +03:00
alloc_parms - > flags & = ~ A_CLING_TO_ALLOCED ;
else
break ; /* Give up */
} else if ( ah - > maximise_cling & & alloc_parms - > alloc = = ALLOC_NORMAL & &
! ( alloc_parms - > flags & A_CLING_TO_ALLOCED ) )
alloc_parms - > flags | = A_CLING_TO_ALLOCED ;
2014-02-22 04:26:01 +04:00
} while ( ( alloc_parms - > alloc ! = ALLOC_CONTIGUOUS ) & & alloc_state - > allocated ! = alloc_parms - > extents_still_needed & & ( alloc_parms - > flags & A_CAN_SPLIT ) & & ( ! ah - > approx_alloc | | pv_maps_size ( pvms ) ) ) ;
2005-06-01 20:51:55 +04:00
return 1 ;
}
/*
* Allocate several segments , each the same size , in parallel .
* If mirrored_pv and mirrored_pe are supplied , it is used as
* the first area , and additional areas are allocated parallel to it .
*/
static int _allocate ( struct alloc_handle * ah ,
struct volume_group * vg ,
2007-08-22 18:38:18 +04:00
struct logical_volume * lv ,
2007-12-06 01:11:20 +03:00
unsigned can_split ,
2008-11-04 01:14:30 +03:00
struct dm_list * allocatable_pvs )
2005-06-01 20:51:55 +04:00
{
uint32_t old_allocated ;
struct lv_segment * prev_lvseg = NULL ;
int r = 0 ;
2008-11-04 01:14:30 +03:00
struct dm_list * pvms ;
2006-12-13 06:39:58 +03:00
alloc_policy_t alloc ;
2011-02-27 03:38:31 +03:00
struct alloc_parms alloc_parms ;
struct alloc_state alloc_state ;
alloc_state . allocated = lv ? lv - > le_count : 0 ;
2005-06-01 20:51:55 +04:00
2011-02-27 03:38:31 +03:00
if ( alloc_state . allocated > = ah - > new_extents & & ! ah - > log_area_count ) {
2012-10-16 12:14:41 +04:00
log_warn ( " _allocate called with no work to do! " ) ;
2005-06-01 20:51:55 +04:00
return 1 ;
}
2011-09-16 13:59:42 +04:00
if ( ah - > area_multiple > 1 & &
( ah - > new_extents - alloc_state . allocated ) % ah - > area_multiple ) {
2015-09-23 16:37:52 +03:00
log_error ( " Number of extents requested ( " FMTu32 " ) needs to be divisible by " FMTu32 " . " ,
2011-09-16 13:59:42 +04:00
ah - > new_extents - alloc_state . allocated ,
ah - > area_multiple ) ;
2011-06-23 14:53:24 +04:00
return 0 ;
}
2011-02-27 03:38:31 +03:00
alloc_state . log_area_count_still_needed = ah - > log_area_count ;
2010-03-25 05:31:48 +03:00
2007-11-22 17:54:35 +03:00
if ( ah - > alloc = = ALLOC_CONTIGUOUS )
2005-06-01 20:51:55 +04:00
can_split = 0 ;
2008-11-04 01:14:30 +03:00
if ( lv & & ! dm_list_empty ( & lv - > segments ) )
prev_lvseg = dm_list_item ( dm_list_last ( & lv - > segments ) ,
2005-06-01 20:51:55 +04:00
struct lv_segment ) ;
/*
* Build the sets of available areas on the pv ' s .
*/
2008-01-30 16:19:47 +03:00
if ( ! ( pvms = create_pv_maps ( ah - > mem , vg , allocatable_pvs ) ) )
return_0 ;
2005-06-01 20:51:55 +04:00
2016-08-24 17:49:34 +03:00
if ( ! _log_parallel_areas ( ah - > mem , ah - > parallel_areas , ah - > cling_tag_list_cn ) )
2006-09-11 18:24:58 +04:00
stack ;
2011-02-27 03:38:31 +03:00
alloc_state . areas_size = dm_list_size ( pvms ) ;
2011-08-03 02:07:20 +04:00
if ( alloc_state . areas_size & &
alloc_state . areas_size < ( ah - > area_count + ah - > parity_count + ah - > log_area_count ) ) {
2011-02-27 03:38:31 +03:00
if ( ah - > alloc ! = ALLOC_ANYWHERE & & ah - > mirror_logs_separate ) {
2005-06-01 20:51:55 +04:00
log_error ( " Not enough PVs with free space available "
" for parallel allocation. " ) ;
log_error ( " Consider --alloc anywhere if desperate. " ) ;
return 0 ;
}
2011-08-03 02:07:20 +04:00
alloc_state . areas_size = ah - > area_count + ah - > parity_count + ah - > log_area_count ;
2005-06-01 20:51:55 +04:00
}
2005-11-10 17:45:39 +03:00
/* Upper bound if none of the PVs in prev_lvseg is in pvms */
/* FIXME Work size out properly */
if ( prev_lvseg )
2011-02-27 03:38:31 +03:00
alloc_state . areas_size + = _stripes_per_mimage ( prev_lvseg ) * prev_lvseg - > area_count ;
2005-11-10 17:45:39 +03:00
2005-06-01 20:51:55 +04:00
/* Allocate an array of pv_areas to hold the largest space on each PV */
2011-02-27 03:38:31 +03:00
if ( ! ( alloc_state . areas = dm_malloc ( sizeof ( * alloc_state . areas ) * alloc_state . areas_size ) ) ) {
2009-07-16 00:02:46 +04:00
log_error ( " Couldn't allocate areas array. " ) ;
2005-06-01 20:51:55 +04:00
return 0 ;
}
2010-11-09 15:34:40 +03:00
/*
* cling includes implicit cling_by_tags
* but it does nothing unless the lvm . conf setting is present .
*/
if ( ah - > alloc = = ALLOC_CLING )
ah - > alloc = ALLOC_CLING_BY_TAGS ;
2006-12-13 06:39:58 +03:00
/* Attempt each defined allocation policy in turn */
2012-05-12 02:19:12 +04:00
for ( alloc = ALLOC_CONTIGUOUS ; alloc < = ah - > alloc ; alloc + + ) {
2010-11-09 15:34:40 +03:00
/* Skip cling_by_tags if no list defined */
if ( alloc = = ALLOC_CLING_BY_TAGS & & ! ah - > cling_tag_list_cn )
continue ;
2011-02-27 03:38:31 +03:00
old_allocated = alloc_state . allocated ;
2013-01-08 02:30:29 +04:00
log_debug_alloc ( " Trying allocation using %s policy. " , get_alloc_string ( alloc ) ) ;
2011-02-27 03:38:31 +03:00
2014-02-22 04:26:01 +04:00
if ( ! ah - > approx_alloc & & ! _sufficient_pes_free ( ah , pvms , alloc_state . allocated , ah - > new_extents ) )
2006-12-13 06:39:58 +03:00
goto_out ;
2011-02-27 03:38:31 +03:00
2011-08-03 02:07:20 +04:00
_init_alloc_parms ( ah , & alloc_parms , alloc , prev_lvseg ,
can_split , alloc_state . allocated ,
ah - > new_extents ) ;
2011-02-27 03:38:31 +03:00
if ( ! _find_max_parallel_space_for_one_policy ( ah , & alloc_parms , pvms , & alloc_state ) )
goto_out ;
2016-09-16 04:11:58 +03:00
/* As a workaround, if only the log is missing now, fall through and try later policies up to normal. */
/* FIXME Change the core algorithm so the log extents cling to parallel LVs instead of avoiding them. */
if ( alloc_state . allocated = = ah - > new_extents & &
alloc_state . log_area_count_still_needed & &
ah - > alloc < ALLOC_NORMAL ) {
ah - > alloc = ALLOC_NORMAL ;
continue ;
}
2014-02-14 07:10:28 +04:00
if ( ( alloc_state . allocated = = ah - > new_extents & &
! alloc_state . log_area_count_still_needed ) | |
2011-02-27 03:38:31 +03:00
( ! can_split & & ( alloc_state . allocated ! = old_allocated ) ) )
2006-12-13 06:39:58 +03:00
break ;
2003-04-30 19:23:43 +04:00
}
2011-02-27 03:38:31 +03:00
if ( alloc_state . allocated ! = ah - > new_extents ) {
2014-02-14 07:10:28 +04:00
if ( ! ah - > approx_alloc ) {
log_error ( " Insufficient suitable %sallocatable extents "
" for logical volume %s: %u more required " ,
can_split ? " " : " contiguous " ,
lv ? lv - > name : " " ,
( ah - > new_extents - alloc_state . allocated ) *
ah - > area_count / ah - > area_multiple ) ;
goto out ;
}
2014-02-22 04:26:01 +04:00
if ( ! alloc_state . allocated ) {
log_error ( " Insufficient suitable %sallocatable extents "
" found for logical volume %s. " ,
can_split ? " " : " contiguous " ,
lv ? lv - > name : " " ) ;
goto out ;
}
log_verbose ( " Found fewer %sallocatable extents "
" for logical volume %s than requested: using % " PRIu32 " extents (reduced by %u). " ,
2014-02-14 07:10:28 +04:00
can_split ? " " : " contiguous " ,
lv ? lv - > name : " " ,
2014-02-22 04:26:01 +04:00
alloc_state . allocated ,
( ah - > new_extents - alloc_state . allocated ) * ah - > area_count / ah - > area_multiple ) ;
2014-02-14 07:10:28 +04:00
ah - > new_extents = alloc_state . allocated ;
2005-05-03 21:28:23 +04:00
}
2005-04-22 19:44:00 +04:00
2011-02-27 03:38:31 +03:00
if ( alloc_state . log_area_count_still_needed ) {
2011-01-28 05:58:00 +03:00
log_error ( " Insufficient free space for log allocation "
2010-04-01 17:58:13 +04:00
" for logical volume %s. " ,
lv ? lv - > name : " " ) ;
goto out ;
}
2006-12-13 06:39:58 +03:00
2005-05-17 17:49:45 +04:00
r = 1 ;
2005-04-22 19:44:00 +04:00
2005-05-17 17:49:45 +04:00
out :
2011-02-27 03:38:31 +03:00
dm_free ( alloc_state . areas ) ;
2005-05-17 17:49:45 +04:00
return r ;
2003-04-30 19:23:43 +04:00
}
2009-11-25 01:55:55 +03:00
int lv_add_virtual_segment ( struct logical_volume * lv , uint64_t status ,
2014-10-26 10:13:59 +03:00
uint32_t extents , const struct segment_type * segtype )
2001-11-06 13:29:56 +03:00
{
2005-06-01 20:51:55 +04:00
struct lv_segment * seg ;
2005-05-17 17:49:45 +04:00
2012-02-28 14:08:20 +04:00
if ( ! dm_list_empty ( & lv - > segments ) & &
( seg = last_seg ( lv ) ) & & ( seg - > segtype = = segtype ) ) {
2011-10-29 00:17:55 +04:00
seg - > area_len + = extents ;
seg - > len + = extents ;
} else {
2017-02-24 02:50:00 +03:00
if ( ! ( seg = alloc_lv_segment ( segtype , lv , lv - > le_count , extents , 0 ,
2014-10-26 10:13:59 +03:00
status , 0 , NULL , 0 ,
2017-02-24 02:50:00 +03:00
extents , 0 , 0 , 0 , 0 , NULL ) ) ) {
2014-10-26 10:13:59 +03:00
log_error ( " Couldn't allocate new %s segment. " , segtype - > name ) ;
2011-10-29 00:17:55 +04:00
return 0 ;
}
lv - > status | = VIRTUAL ;
dm_list_add ( & lv - > segments , & seg - > list ) ;
2005-06-01 20:51:55 +04:00
}
2005-05-11 20:46:59 +04:00
2005-06-01 20:51:55 +04:00
lv - > le_count + = extents ;
lv - > size + = ( uint64_t ) extents * lv - > vg - > extent_size ;
2001-11-06 13:55:01 +03:00
2005-06-01 20:51:55 +04:00
return 1 ;
}
2001-11-06 13:55:01 +03:00
2015-03-26 21:44:24 +03:00
/*
* Preparation for a specific allocation attempt
* stripes and mirrors refer to the parallel areas used for data .
* If log_area_count > 1 it is always mirrored ( not striped ) .
*/
static struct alloc_handle * _alloc_init ( struct cmd_context * cmd ,
const struct segment_type * segtype ,
alloc_policy_t alloc , int approx_alloc ,
uint32_t existing_extents ,
uint32_t new_extents ,
uint32_t mirrors ,
uint32_t stripes ,
uint32_t metadata_area_count ,
uint32_t extent_size ,
uint32_t region_size ,
struct dm_list * parallel_areas )
{
2015-11-04 16:54:07 +03:00
struct dm_pool * mem ;
2015-03-26 21:44:24 +03:00
struct alloc_handle * ah ;
uint32_t s , area_count , alloc_count , parity_count , total_extents ;
size_t size = 0 ;
2016-01-06 15:54:15 +03:00
if ( segtype_is_virtual ( segtype ) ) {
log_error ( INTERNAL_ERROR " _alloc_init called for virtual segment. " ) ;
return NULL ;
}
2015-03-26 21:44:24 +03:00
/* FIXME Caller should ensure this */
if ( mirrors & & ! stripes )
stripes = 1 ;
2016-01-14 13:54:37 +03:00
if ( mirrors > 1 )
2015-03-26 21:44:24 +03:00
area_count = mirrors * stripes ;
else
area_count = stripes ;
2016-01-06 15:54:15 +03:00
if ( ! ( area_count + metadata_area_count ) ) {
2015-11-04 16:53:25 +03:00
log_error ( INTERNAL_ERROR " _alloc_init called for non-virtual segment with no disk space. " ) ;
return NULL ;
}
2015-03-26 21:44:24 +03:00
size = sizeof ( * ah ) ;
/*
* It is a requirement that RAID 4 / 5 / 6 are created with a number of
* stripes that is greater than the number of parity devices . ( e . g
* RAID4 / 5 must have at least 2 stripes and RAID6 must have at least
* 3. ) It is also a constraint that , when replacing individual devices
* in a RAID 4 / 5 / 6 array , no more devices can be replaced than
* there are parity devices . ( Otherwise , there would not be enough
* redundancy to maintain the array . ) Understanding these two
* constraints allows us to infer whether the caller of this function
* is intending to allocate an entire array or just replacement
* component devices . In the former case , we must account for the
* necessary parity_count . In the later case , we do not need to
* account for the extra parity devices because the array already
* exists and they only want replacement drives .
*/
parity_count = ( area_count < = segtype - > parity_devs ) ? 0 : segtype - > parity_devs ;
alloc_count = area_count + parity_count ;
if ( segtype_is_raid ( segtype ) & & metadata_area_count )
/* RAID has a meta area for each device */
alloc_count * = 2 ;
else
/* mirrors specify their exact log count */
alloc_count + = metadata_area_count ;
size + = sizeof ( ah - > alloced_areas [ 0 ] ) * alloc_count ;
2015-11-04 16:54:07 +03:00
if ( ! ( mem = dm_pool_create ( " allocation " , 1024 ) ) ) {
log_error ( " allocation pool creation failed " ) ;
return NULL ;
}
2015-03-26 21:44:24 +03:00
if ( ! ( ah = dm_pool_zalloc ( mem , size ) ) ) {
log_error ( " allocation handle allocation failed " ) ;
2015-11-04 16:54:07 +03:00
dm_pool_destroy ( mem ) ;
2015-03-26 21:44:24 +03:00
return NULL ;
}
ah - > cmd = cmd ;
2015-11-04 16:54:07 +03:00
ah - > mem = mem ;
2015-03-26 21:44:24 +03:00
ah - > area_count = area_count ;
ah - > parity_count = parity_count ;
ah - > region_size = region_size ;
ah - > alloc = alloc ;
/*
* For the purposes of allocation , area_count and parity_count are
* kept separately . However , the ' area_count ' field in an
* lv_segment includes both ; and this is what ' _calc_area_multiple '
* is calculated from . So , we must pass in the total count to get
* a correct area_multiple .
*/
ah - > area_multiple = _calc_area_multiple ( segtype , area_count + parity_count , stripes ) ;
//FIXME: s/mirror_logs_separate/metadata_separate/ so it can be used by others?
ah - > mirror_logs_separate = find_config_tree_bool ( cmd , allocation_mirror_logs_require_separate_pvs_CFG , NULL ) ;
if ( mirrors | | stripes )
total_extents = new_extents ;
else
total_extents = 0 ;
if ( segtype_is_raid ( segtype ) ) {
if ( metadata_area_count ) {
2017-02-10 00:41:28 +03:00
uint32_t cur_rimage_extents , new_rimage_extents ;
2015-03-26 21:44:24 +03:00
if ( metadata_area_count ! = area_count )
log_error ( INTERNAL_ERROR
" Bad metadata_area_count " ) ;
2017-02-10 00:41:28 +03:00
/* Calculate log_len (i.e. length of each rmeta device) for RAID */
cur_rimage_extents = raid_rimage_extents ( segtype , existing_extents , stripes , mirrors ) ;
new_rimage_extents = raid_rimage_extents ( segtype , existing_extents + new_extents , stripes , mirrors ) ,
ah - > log_len = raid_rmeta_extents_delta ( cmd , cur_rimage_extents , new_rimage_extents ,
region_size , extent_size ) ;
ah - > metadata_area_count = metadata_area_count ;
ah - > alloc_and_split_meta = ! ! ah - > log_len ;
2015-03-26 21:44:24 +03:00
/*
* We need ' log_len ' extents for each
* RAID device ' s metadata_area
*/
2017-02-10 00:41:28 +03:00
total_extents + = ah - > log_len * ( segtype_is_raid1 ( segtype ) ? 1 : ah - > area_multiple ) ;
2015-03-26 21:44:24 +03:00
} else {
ah - > log_area_count = 0 ;
ah - > log_len = 0 ;
}
} else if ( segtype_is_thin_pool ( segtype ) ) {
/*
* thin_pool uses ah - > region_size to
* pass metadata size in extents
*/
ah - > log_len = ah - > region_size ;
ah - > log_area_count = metadata_area_count ;
ah - > region_size = 0 ;
ah - > mirror_logs_separate =
find_config_tree_bool ( cmd , allocation_thin_pool_metadata_require_separate_pvs_CFG , NULL ) ;
} else if ( segtype_is_cache_pool ( segtype ) ) {
/*
* Like thin_pool , cache_pool uses ah - > region_size to
* pass metadata size in extents
*/
ah - > log_len = ah - > region_size ;
/* use metadata_area_count, not log_area_count */
ah - > metadata_area_count = metadata_area_count ;
ah - > region_size = 0 ;
ah - > mirror_logs_separate =
find_config_tree_bool ( cmd , allocation_cache_pool_metadata_require_separate_pvs_CFG , NULL ) ;
if ( ! ah - > mirror_logs_separate ) {
ah - > alloc_and_split_meta = 1 ;
total_extents + = ah - > log_len ;
}
} else {
ah - > log_area_count = metadata_area_count ;
ah - > log_len = ! metadata_area_count ? 0 :
2015-11-06 02:40:47 +03:00
_mirror_log_extents ( ah - > region_size , extent_size ,
( existing_extents + new_extents ) / ah - > area_multiple ) ;
2015-03-26 21:44:24 +03:00
}
log_debug ( " Adjusted allocation request to % " PRIu32 " logical extents. Existing size % " PRIu32 " . New size % " PRIu32 " . " ,
total_extents , existing_extents , total_extents + existing_extents ) ;
2015-11-06 02:40:47 +03:00
if ( ah - > log_len )
2016-08-07 01:29:27 +03:00
log_debug ( " Mirror log of % " PRIu32 " extents of size % " PRIu32 " sectors needed for region size % " PRIu32 " . " ,
2015-11-06 02:40:47 +03:00
ah - > log_len , extent_size , ah - > region_size ) ;
2015-03-26 21:44:24 +03:00
if ( mirrors | | stripes )
total_extents + = existing_extents ;
ah - > new_extents = total_extents ;
for ( s = 0 ; s < alloc_count ; s + + )
dm_list_init ( & ah - > alloced_areas [ s ] ) ;
ah - > parallel_areas = parallel_areas ;
2015-07-08 12:22:24 +03:00
if ( ( ah - > cling_tag_list_cn = find_config_tree_array ( cmd , allocation_cling_tag_list_CFG , NULL ) ) )
2015-03-26 22:43:51 +03:00
( void ) _validate_tag_list ( ah - > cling_tag_list_cn ) ;
2015-03-26 21:44:24 +03:00
ah - > maximise_cling = find_config_tree_bool ( cmd , allocation_maximise_cling_CFG , NULL ) ;
ah - > approx_alloc = approx_alloc ;
return ah ;
}
void alloc_destroy ( struct alloc_handle * ah )
{
2016-06-29 01:44:15 +03:00
if ( ah )
dm_pool_destroy ( ah - > mem ) ;
2015-03-26 21:44:24 +03:00
}
2005-06-01 20:51:55 +04:00
/*
* Entry point for all extent allocations .
*/
struct alloc_handle * allocate_extents ( struct volume_group * vg ,
struct logical_volume * lv ,
2006-05-10 01:23:51 +04:00
const struct segment_type * segtype ,
2005-06-01 20:51:55 +04:00
uint32_t stripes ,
uint32_t mirrors , uint32_t log_count ,
2010-03-01 23:00:20 +03:00
uint32_t region_size , uint32_t extents ,
2008-11-04 01:14:30 +03:00
struct dm_list * allocatable_pvs ,
2014-02-14 07:10:28 +04:00
alloc_policy_t alloc , int approx_alloc ,
2008-11-04 01:14:30 +03:00
struct dm_list * parallel_areas )
2005-06-01 20:51:55 +04:00
{
struct alloc_handle * ah ;
2005-05-11 20:46:59 +04:00
2005-06-01 20:51:55 +04:00
if ( segtype_is_virtual ( segtype ) ) {
log_error ( " allocate_extents does not handle virtual segments " ) ;
return NULL ;
}
2012-02-13 15:25:56 +04:00
if ( ! allocatable_pvs ) {
log_error ( INTERNAL_ERROR " Missing allocatable pvs. " ) ;
return NULL ;
}
2005-06-01 20:51:55 +04:00
if ( vg - > fid - > fmt - > ops - > segtype_supported & &
! vg - > fid - > fmt - > ops - > segtype_supported ( vg - > fid , segtype ) ) {
log_error ( " Metadata format (%s) does not support required "
" LV segment type (%s). " , vg - > fid - > fmt - > name ,
segtype - > name ) ;
log_error ( " Consider changing the metadata format by running "
" vgconvert. " ) ;
return NULL ;
}
2012-05-12 02:19:12 +04:00
if ( alloc > = ALLOC_INHERIT )
2005-06-01 20:51:55 +04:00
alloc = vg - > alloc ;
2001-11-27 19:37:33 +03:00
2015-11-04 16:54:07 +03:00
if ( ! ( ah = _alloc_init ( vg - > cmd , segtype , alloc , approx_alloc ,
2014-08-22 04:26:14 +04:00
lv ? lv - > le_count : 0 , extents , mirrors , stripes , log_count ,
2010-03-01 23:00:20 +03:00
vg - > extent_size , region_size ,
parallel_areas ) ) )
2007-11-22 17:54:35 +03:00
return_NULL ;
2001-11-06 13:55:01 +03:00
2011-01-11 20:05:08 +03:00
if ( ! _allocate ( ah , vg , lv , 1 , allocatable_pvs ) ) {
2005-06-01 20:51:55 +04:00
alloc_destroy ( ah ) ;
2008-01-30 16:19:47 +03:00
return_NULL ;
2001-11-06 13:29:56 +03:00
}
2005-06-01 20:51:55 +04:00
return ah ;
2001-11-06 13:29:56 +03:00
}
2005-06-01 20:51:55 +04:00
/*
* Add new segments to an LV from supplied list of areas .
*/
int lv_add_segment ( struct alloc_handle * ah ,
uint32_t first_area , uint32_t num_areas ,
struct logical_volume * lv ,
2006-05-10 01:23:51 +04:00
const struct segment_type * segtype ,
2005-06-01 20:51:55 +04:00
uint32_t stripe_size ,
2009-11-25 01:55:55 +03:00
uint64_t status ,
2010-03-01 23:00:20 +03:00
uint32_t region_size )
2004-05-11 20:01:58 +04:00
{
2005-06-03 18:49:51 +04:00
if ( ! segtype ) {
log_error ( " Missing segtype in lv_add_segment(). " ) ;
return 0 ;
}
2005-06-01 20:51:55 +04:00
if ( segtype_is_virtual ( segtype ) ) {
log_error ( " lv_add_segment cannot handle virtual segments " ) ;
return 0 ;
}
2004-05-11 20:01:58 +04:00
2013-07-17 16:49:21 +04:00
if ( ( status & MIRROR_LOG ) & & ! dm_list_empty ( & lv - > segments ) ) {
2010-03-01 23:00:20 +03:00
log_error ( " Log segments can only be added to an empty LV " ) ;
return 0 ;
}
2010-01-12 17:00:51 +03:00
if ( ! _setup_alloced_segments ( lv , & ah - > alloced_areas [ first_area ] ,
num_areas , status ,
stripe_size , segtype ,
2010-03-01 23:00:20 +03:00
region_size ) )
2008-01-30 16:19:47 +03:00
return_0 ;
2004-05-11 20:01:58 +04:00
2016-12-25 01:46:47 +03:00
if ( segtype_can_split ( segtype ) & & ! lv_merge_segments ( lv ) ) {
2009-07-16 00:02:46 +04:00
log_error ( " Couldn't merge segments after extending "
" logical volume. " ) ;
2005-06-01 20:51:55 +04:00
return 0 ;
}
if ( lv - > vg - > fid - > fmt - > ops - > lv_setup & &
2008-01-30 16:19:47 +03:00
! lv - > vg - > fid - > fmt - > ops - > lv_setup ( lv - > vg - > fid , lv ) )
return_0 ;
2004-05-11 20:01:58 +04:00
return 1 ;
}
2005-06-01 20:51:55 +04:00
/*
2007-12-20 18:42:55 +03:00
* " mirror " segment type doesn ' t support split .
* So , when adding mirrors to linear LV segment , first split it ,
* then convert it to " mirror " and add areas .
2005-06-01 20:51:55 +04:00
*/
2007-12-20 18:42:55 +03:00
static struct lv_segment * _convert_seg_to_mirror ( struct lv_segment * seg ,
uint32_t region_size ,
struct logical_volume * log_lv )
2005-04-07 16:39:44 +04:00
{
2007-12-20 18:42:55 +03:00
struct lv_segment * newseg ;
uint32_t s ;
2005-04-07 16:39:44 +04:00
2007-12-20 18:42:55 +03:00
if ( ! seg_is_striped ( seg ) ) {
log_error ( " Can't convert non-striped segment to mirrored. " ) ;
return NULL ;
2005-04-07 16:39:44 +04:00
}
2007-12-20 18:42:55 +03:00
if ( seg - > area_count > 1 ) {
log_error ( " Can't convert striped segment with multiple areas "
" to mirrored. " ) ;
return NULL ;
2005-04-07 16:39:44 +04:00
}
2005-04-22 19:44:00 +04:00
2015-09-22 21:04:12 +03:00
if ( ! ( newseg = alloc_lv_segment ( get_segtype_from_string ( seg - > lv - > vg - > cmd , SEG_TYPE_NAME_MIRROR ) ,
2017-02-24 02:50:00 +03:00
seg - > lv , seg - > le , seg - > len , 0 ,
2007-12-20 18:42:55 +03:00
seg - > status , seg - > stripe_size ,
2014-10-26 10:13:59 +03:00
log_lv ,
2017-02-24 02:50:00 +03:00
seg - > area_count , seg - > area_len , 0 ,
2007-12-20 18:42:55 +03:00
seg - > chunk_size , region_size ,
2010-04-08 04:28:57 +04:00
seg - > extents_copied , NULL ) ) ) {
2016-11-25 16:08:39 +03:00
log_error ( " Couldn't allocate converted LV segment. " ) ;
2007-12-20 18:42:55 +03:00
return NULL ;
2005-06-01 20:51:55 +04:00
}
2005-04-07 16:39:44 +04:00
2007-12-20 18:42:55 +03:00
for ( s = 0 ; s < seg - > area_count ; s + + )
if ( ! move_lv_segment_area ( newseg , s , seg , s ) )
return_NULL ;
2005-06-01 20:51:55 +04:00
2010-04-08 04:28:57 +04:00
seg - > pvmove_source_seg = NULL ; /* Not maintained after allocation */
2008-11-04 01:14:30 +03:00
dm_list_add ( & seg - > list , & newseg - > list ) ;
dm_list_del ( & seg - > list ) ;
2005-06-01 20:51:55 +04:00
2007-12-20 18:42:55 +03:00
return newseg ;
2005-04-07 16:39:44 +04:00
}
pvmove: Enable all-or-nothing (atomic) pvmoves
pvmove can be used to move single LVs by name or multiple LVs that
lie within the specified PV range (e.g. /dev/sdb1:0-1000). When
moving more than one LV, the portions of those LVs that are in the
range to be moved are added to a new temporary pvmove LV. The LVs
then point to the range in the pvmove LV, rather than the PV
range.
Example 1:
We have two LVs in this example. After they were
created, the first LV was grown, yeilding two segments
in LV1. So, there are two LVs with a total of three
segments.
Before pvmove:
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
PV | 000 - 255 | 256 - 511 | 512 - 767 |
-------------------------------------
After pvmove inserts the temporary pvmove LV:
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
pvmove0 | seg 0 | seg 1 | seg 2 |
-------------------------------------
| | |
-------------------------------------
PV | 000 - 255 | 256 - 511 | 512 - 767 |
-------------------------------------
Each of the affected LV segments now point to a
range of blocks in the pvmove LV, which purposefully
corresponds to the segments moved from the original
LVs into the temporary pvmove LV.
The current implementation goes on from here to mirror the temporary
pvmove LV by segment. Further, as the pvmove LV is activated, only
one of its segments is actually mirrored (i.e. "moving") at a time.
The rest are either complete or not addressed yet. If the pvmove
is aborted, those segments that are completed will remain on the
destination and those that are not yet addressed or in the process
of moving will stay on the source PV. Thus, it is possible to have
a partially completed move - some LVs (or certain segments of LVs)
on the source PV and some on the destination.
Example 2:
What 'example 1' might look if it was half-way
through the move.
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
pvmove0 | seg 0 | seg 1 | seg 2 |
-------------------------------------
| | |
| -------------------------
source PV | | 256 - 511 | 512 - 767 |
| -------------------------
| ||
-------------------------
dest PV | 000 - 255 | 256 - 511 |
-------------------------
This update allows the user to specify that they would like the
pvmove mirror created "by LV" rather than "by segment". That is,
the pvmove LV becomes an image in an encapsulating mirror along
with the allocated copy image.
Example 3:
A pvmove that is performed "by LV" rather than "by segment".
--------- ---------
| LV1s0 | | LV2s0 |
--------- ---------
| |
-------------------------
pvmove0 | * LV-level mirror * |
-------------------------
/ \
pvmove_mimage0 / pvmove_mimage1
------------------------- -------------------------
| seg 0 | seg 1 | | seg 0 | seg 1 |
------------------------- -------------------------
| | | |
------------------------- -------------------------
| 000 - 255 | 256 - 511 | | 000 - 255 | 256 - 511 |
------------------------- -------------------------
source PV dest PV
The thing that differentiates a pvmove done in this way and a simple
"up-convert" from linear to mirror is the preservation of the
distinct segments. A normal up-convert would simply allocate the
necessary space with no regard for segment boundaries. The pvmove
operation must preserve the segments because they are the critical
boundary between the segments of the LVs being moved. So, when the
pvmove copy image is allocated, all corresponding segments must be
allocated. The code that merges ajoining segments that are part of
the same LV when the metadata is written must also be avoided in
this case. This method of mirroring is unique enough to warrant its
own definitional macro, MIRROR_BY_SEGMENTED_LV. This joins the two
existing macros: MIRROR_BY_SEG (for original pvmove) and MIRROR_BY_LV
(for user created mirrors).
The advantages of performing pvmove in this way is that all of the
LVs affected can be moved together. It is an all-or-nothing approach
that leaves all LV segments on the source PV if the move is aborted.
Additionally, a mirror log can be used (in the future) to provide tracking
of progress; allowing the copy to continue where it left off in the event
there is a deactivation.
2014-06-18 07:59:36 +04:00
/*
* Add new areas to mirrored segments
*/
int lv_add_segmented_mirror_image ( struct alloc_handle * ah ,
struct logical_volume * lv , uint32_t le ,
uint32_t region_size )
{
char * image_name ;
struct alloced_area * aa ;
struct lv_segment * seg , * new_seg ;
uint32_t current_le = le ;
uint32_t s ;
struct segment_type * segtype ;
struct logical_volume * orig_lv , * copy_lv ;
2014-09-16 00:33:53 +04:00
if ( ! lv_is_pvmove ( lv ) ) {
pvmove: Enable all-or-nothing (atomic) pvmoves
pvmove can be used to move single LVs by name or multiple LVs that
lie within the specified PV range (e.g. /dev/sdb1:0-1000). When
moving more than one LV, the portions of those LVs that are in the
range to be moved are added to a new temporary pvmove LV. The LVs
then point to the range in the pvmove LV, rather than the PV
range.
Example 1:
We have two LVs in this example. After they were
created, the first LV was grown, yeilding two segments
in LV1. So, there are two LVs with a total of three
segments.
Before pvmove:
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
PV | 000 - 255 | 256 - 511 | 512 - 767 |
-------------------------------------
After pvmove inserts the temporary pvmove LV:
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
pvmove0 | seg 0 | seg 1 | seg 2 |
-------------------------------------
| | |
-------------------------------------
PV | 000 - 255 | 256 - 511 | 512 - 767 |
-------------------------------------
Each of the affected LV segments now point to a
range of blocks in the pvmove LV, which purposefully
corresponds to the segments moved from the original
LVs into the temporary pvmove LV.
The current implementation goes on from here to mirror the temporary
pvmove LV by segment. Further, as the pvmove LV is activated, only
one of its segments is actually mirrored (i.e. "moving") at a time.
The rest are either complete or not addressed yet. If the pvmove
is aborted, those segments that are completed will remain on the
destination and those that are not yet addressed or in the process
of moving will stay on the source PV. Thus, it is possible to have
a partially completed move - some LVs (or certain segments of LVs)
on the source PV and some on the destination.
Example 2:
What 'example 1' might look if it was half-way
through the move.
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
pvmove0 | seg 0 | seg 1 | seg 2 |
-------------------------------------
| | |
| -------------------------
source PV | | 256 - 511 | 512 - 767 |
| -------------------------
| ||
-------------------------
dest PV | 000 - 255 | 256 - 511 |
-------------------------
This update allows the user to specify that they would like the
pvmove mirror created "by LV" rather than "by segment". That is,
the pvmove LV becomes an image in an encapsulating mirror along
with the allocated copy image.
Example 3:
A pvmove that is performed "by LV" rather than "by segment".
--------- ---------
| LV1s0 | | LV2s0 |
--------- ---------
| |
-------------------------
pvmove0 | * LV-level mirror * |
-------------------------
/ \
pvmove_mimage0 / pvmove_mimage1
------------------------- -------------------------
| seg 0 | seg 1 | | seg 0 | seg 1 |
------------------------- -------------------------
| | | |
------------------------- -------------------------
| 000 - 255 | 256 - 511 | | 000 - 255 | 256 - 511 |
------------------------- -------------------------
source PV dest PV
The thing that differentiates a pvmove done in this way and a simple
"up-convert" from linear to mirror is the preservation of the
distinct segments. A normal up-convert would simply allocate the
necessary space with no regard for segment boundaries. The pvmove
operation must preserve the segments because they are the critical
boundary between the segments of the LVs being moved. So, when the
pvmove copy image is allocated, all corresponding segments must be
allocated. The code that merges ajoining segments that are part of
the same LV when the metadata is written must also be avoided in
this case. This method of mirroring is unique enough to warrant its
own definitional macro, MIRROR_BY_SEGMENTED_LV. This joins the two
existing macros: MIRROR_BY_SEG (for original pvmove) and MIRROR_BY_LV
(for user created mirrors).
The advantages of performing pvmove in this way is that all of the
LVs affected can be moved together. It is an all-or-nothing approach
that leaves all LV segments on the source PV if the move is aborted.
Additionally, a mirror log can be used (in the future) to provide tracking
of progress; allowing the copy to continue where it left off in the event
there is a deactivation.
2014-06-18 07:59:36 +04:00
log_error ( INTERNAL_ERROR
2016-11-25 16:08:39 +03:00
" Non-pvmove LV, %s, passed as argument. " ,
display_lvname ( lv ) ) ;
pvmove: Enable all-or-nothing (atomic) pvmoves
pvmove can be used to move single LVs by name or multiple LVs that
lie within the specified PV range (e.g. /dev/sdb1:0-1000). When
moving more than one LV, the portions of those LVs that are in the
range to be moved are added to a new temporary pvmove LV. The LVs
then point to the range in the pvmove LV, rather than the PV
range.
Example 1:
We have two LVs in this example. After they were
created, the first LV was grown, yeilding two segments
in LV1. So, there are two LVs with a total of three
segments.
Before pvmove:
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
PV | 000 - 255 | 256 - 511 | 512 - 767 |
-------------------------------------
After pvmove inserts the temporary pvmove LV:
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
pvmove0 | seg 0 | seg 1 | seg 2 |
-------------------------------------
| | |
-------------------------------------
PV | 000 - 255 | 256 - 511 | 512 - 767 |
-------------------------------------
Each of the affected LV segments now point to a
range of blocks in the pvmove LV, which purposefully
corresponds to the segments moved from the original
LVs into the temporary pvmove LV.
The current implementation goes on from here to mirror the temporary
pvmove LV by segment. Further, as the pvmove LV is activated, only
one of its segments is actually mirrored (i.e. "moving") at a time.
The rest are either complete or not addressed yet. If the pvmove
is aborted, those segments that are completed will remain on the
destination and those that are not yet addressed or in the process
of moving will stay on the source PV. Thus, it is possible to have
a partially completed move - some LVs (or certain segments of LVs)
on the source PV and some on the destination.
Example 2:
What 'example 1' might look if it was half-way
through the move.
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
pvmove0 | seg 0 | seg 1 | seg 2 |
-------------------------------------
| | |
| -------------------------
source PV | | 256 - 511 | 512 - 767 |
| -------------------------
| ||
-------------------------
dest PV | 000 - 255 | 256 - 511 |
-------------------------
This update allows the user to specify that they would like the
pvmove mirror created "by LV" rather than "by segment". That is,
the pvmove LV becomes an image in an encapsulating mirror along
with the allocated copy image.
Example 3:
A pvmove that is performed "by LV" rather than "by segment".
--------- ---------
| LV1s0 | | LV2s0 |
--------- ---------
| |
-------------------------
pvmove0 | * LV-level mirror * |
-------------------------
/ \
pvmove_mimage0 / pvmove_mimage1
------------------------- -------------------------
| seg 0 | seg 1 | | seg 0 | seg 1 |
------------------------- -------------------------
| | | |
------------------------- -------------------------
| 000 - 255 | 256 - 511 | | 000 - 255 | 256 - 511 |
------------------------- -------------------------
source PV dest PV
The thing that differentiates a pvmove done in this way and a simple
"up-convert" from linear to mirror is the preservation of the
distinct segments. A normal up-convert would simply allocate the
necessary space with no regard for segment boundaries. The pvmove
operation must preserve the segments because they are the critical
boundary between the segments of the LVs being moved. So, when the
pvmove copy image is allocated, all corresponding segments must be
allocated. The code that merges ajoining segments that are part of
the same LV when the metadata is written must also be avoided in
this case. This method of mirroring is unique enough to warrant its
own definitional macro, MIRROR_BY_SEGMENTED_LV. This joins the two
existing macros: MIRROR_BY_SEG (for original pvmove) and MIRROR_BY_LV
(for user created mirrors).
The advantages of performing pvmove in this way is that all of the
LVs affected can be moved together. It is an all-or-nothing approach
that leaves all LV segments on the source PV if the move is aborted.
Additionally, a mirror log can be used (in the future) to provide tracking
of progress; allowing the copy to continue where it left off in the event
there is a deactivation.
2014-06-18 07:59:36 +04:00
return 0 ;
}
if ( seg_type ( first_seg ( lv ) , 0 ) ! = AREA_PV ) {
log_error ( INTERNAL_ERROR
2016-11-25 16:08:39 +03:00
" Bad segment type for first segment area. " ) ;
pvmove: Enable all-or-nothing (atomic) pvmoves
pvmove can be used to move single LVs by name or multiple LVs that
lie within the specified PV range (e.g. /dev/sdb1:0-1000). When
moving more than one LV, the portions of those LVs that are in the
range to be moved are added to a new temporary pvmove LV. The LVs
then point to the range in the pvmove LV, rather than the PV
range.
Example 1:
We have two LVs in this example. After they were
created, the first LV was grown, yeilding two segments
in LV1. So, there are two LVs with a total of three
segments.
Before pvmove:
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
PV | 000 - 255 | 256 - 511 | 512 - 767 |
-------------------------------------
After pvmove inserts the temporary pvmove LV:
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
pvmove0 | seg 0 | seg 1 | seg 2 |
-------------------------------------
| | |
-------------------------------------
PV | 000 - 255 | 256 - 511 | 512 - 767 |
-------------------------------------
Each of the affected LV segments now point to a
range of blocks in the pvmove LV, which purposefully
corresponds to the segments moved from the original
LVs into the temporary pvmove LV.
The current implementation goes on from here to mirror the temporary
pvmove LV by segment. Further, as the pvmove LV is activated, only
one of its segments is actually mirrored (i.e. "moving") at a time.
The rest are either complete or not addressed yet. If the pvmove
is aborted, those segments that are completed will remain on the
destination and those that are not yet addressed or in the process
of moving will stay on the source PV. Thus, it is possible to have
a partially completed move - some LVs (or certain segments of LVs)
on the source PV and some on the destination.
Example 2:
What 'example 1' might look if it was half-way
through the move.
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
pvmove0 | seg 0 | seg 1 | seg 2 |
-------------------------------------
| | |
| -------------------------
source PV | | 256 - 511 | 512 - 767 |
| -------------------------
| ||
-------------------------
dest PV | 000 - 255 | 256 - 511 |
-------------------------
This update allows the user to specify that they would like the
pvmove mirror created "by LV" rather than "by segment". That is,
the pvmove LV becomes an image in an encapsulating mirror along
with the allocated copy image.
Example 3:
A pvmove that is performed "by LV" rather than "by segment".
--------- ---------
| LV1s0 | | LV2s0 |
--------- ---------
| |
-------------------------
pvmove0 | * LV-level mirror * |
-------------------------
/ \
pvmove_mimage0 / pvmove_mimage1
------------------------- -------------------------
| seg 0 | seg 1 | | seg 0 | seg 1 |
------------------------- -------------------------
| | | |
------------------------- -------------------------
| 000 - 255 | 256 - 511 | | 000 - 255 | 256 - 511 |
------------------------- -------------------------
source PV dest PV
The thing that differentiates a pvmove done in this way and a simple
"up-convert" from linear to mirror is the preservation of the
distinct segments. A normal up-convert would simply allocate the
necessary space with no regard for segment boundaries. The pvmove
operation must preserve the segments because they are the critical
boundary between the segments of the LVs being moved. So, when the
pvmove copy image is allocated, all corresponding segments must be
allocated. The code that merges ajoining segments that are part of
the same LV when the metadata is written must also be avoided in
this case. This method of mirroring is unique enough to warrant its
own definitional macro, MIRROR_BY_SEGMENTED_LV. This joins the two
existing macros: MIRROR_BY_SEG (for original pvmove) and MIRROR_BY_LV
(for user created mirrors).
The advantages of performing pvmove in this way is that all of the
LVs affected can be moved together. It is an all-or-nothing approach
that leaves all LV segments on the source PV if the move is aborted.
Additionally, a mirror log can be used (in the future) to provide tracking
of progress; allowing the copy to continue where it left off in the event
there is a deactivation.
2014-06-18 07:59:36 +04:00
return 0 ;
}
/*
2014-06-20 14:41:20 +04:00
* If the allocator provided two or more PV allocations for any
* single segment of the original LV , that LV segment must be
* split up to match .
pvmove: Enable all-or-nothing (atomic) pvmoves
pvmove can be used to move single LVs by name or multiple LVs that
lie within the specified PV range (e.g. /dev/sdb1:0-1000). When
moving more than one LV, the portions of those LVs that are in the
range to be moved are added to a new temporary pvmove LV. The LVs
then point to the range in the pvmove LV, rather than the PV
range.
Example 1:
We have two LVs in this example. After they were
created, the first LV was grown, yeilding two segments
in LV1. So, there are two LVs with a total of three
segments.
Before pvmove:
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
PV | 000 - 255 | 256 - 511 | 512 - 767 |
-------------------------------------
After pvmove inserts the temporary pvmove LV:
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
pvmove0 | seg 0 | seg 1 | seg 2 |
-------------------------------------
| | |
-------------------------------------
PV | 000 - 255 | 256 - 511 | 512 - 767 |
-------------------------------------
Each of the affected LV segments now point to a
range of blocks in the pvmove LV, which purposefully
corresponds to the segments moved from the original
LVs into the temporary pvmove LV.
The current implementation goes on from here to mirror the temporary
pvmove LV by segment. Further, as the pvmove LV is activated, only
one of its segments is actually mirrored (i.e. "moving") at a time.
The rest are either complete or not addressed yet. If the pvmove
is aborted, those segments that are completed will remain on the
destination and those that are not yet addressed or in the process
of moving will stay on the source PV. Thus, it is possible to have
a partially completed move - some LVs (or certain segments of LVs)
on the source PV and some on the destination.
Example 2:
What 'example 1' might look if it was half-way
through the move.
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
pvmove0 | seg 0 | seg 1 | seg 2 |
-------------------------------------
| | |
| -------------------------
source PV | | 256 - 511 | 512 - 767 |
| -------------------------
| ||
-------------------------
dest PV | 000 - 255 | 256 - 511 |
-------------------------
This update allows the user to specify that they would like the
pvmove mirror created "by LV" rather than "by segment". That is,
the pvmove LV becomes an image in an encapsulating mirror along
with the allocated copy image.
Example 3:
A pvmove that is performed "by LV" rather than "by segment".
--------- ---------
| LV1s0 | | LV2s0 |
--------- ---------
| |
-------------------------
pvmove0 | * LV-level mirror * |
-------------------------
/ \
pvmove_mimage0 / pvmove_mimage1
------------------------- -------------------------
| seg 0 | seg 1 | | seg 0 | seg 1 |
------------------------- -------------------------
| | | |
------------------------- -------------------------
| 000 - 255 | 256 - 511 | | 000 - 255 | 256 - 511 |
------------------------- -------------------------
source PV dest PV
The thing that differentiates a pvmove done in this way and a simple
"up-convert" from linear to mirror is the preservation of the
distinct segments. A normal up-convert would simply allocate the
necessary space with no regard for segment boundaries. The pvmove
operation must preserve the segments because they are the critical
boundary between the segments of the LVs being moved. So, when the
pvmove copy image is allocated, all corresponding segments must be
allocated. The code that merges ajoining segments that are part of
the same LV when the metadata is written must also be avoided in
this case. This method of mirroring is unique enough to warrant its
own definitional macro, MIRROR_BY_SEGMENTED_LV. This joins the two
existing macros: MIRROR_BY_SEG (for original pvmove) and MIRROR_BY_LV
(for user created mirrors).
The advantages of performing pvmove in this way is that all of the
LVs affected can be moved together. It is an all-or-nothing approach
that leaves all LV segments on the source PV if the move is aborted.
Additionally, a mirror log can be used (in the future) to provide tracking
of progress; allowing the copy to continue where it left off in the event
there is a deactivation.
2014-06-18 07:59:36 +04:00
*/
dm_list_iterate_items ( aa , & ah - > alloced_areas [ 0 ] ) {
if ( ! ( seg = find_seg_by_le ( lv , current_le ) ) ) {
2016-11-25 16:08:39 +03:00
log_error ( " Failed to find segment for %s extent " FMTu32 " . " ,
display_lvname ( lv ) , current_le ) ;
pvmove: Enable all-or-nothing (atomic) pvmoves
pvmove can be used to move single LVs by name or multiple LVs that
lie within the specified PV range (e.g. /dev/sdb1:0-1000). When
moving more than one LV, the portions of those LVs that are in the
range to be moved are added to a new temporary pvmove LV. The LVs
then point to the range in the pvmove LV, rather than the PV
range.
Example 1:
We have two LVs in this example. After they were
created, the first LV was grown, yeilding two segments
in LV1. So, there are two LVs with a total of three
segments.
Before pvmove:
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
PV | 000 - 255 | 256 - 511 | 512 - 767 |
-------------------------------------
After pvmove inserts the temporary pvmove LV:
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
pvmove0 | seg 0 | seg 1 | seg 2 |
-------------------------------------
| | |
-------------------------------------
PV | 000 - 255 | 256 - 511 | 512 - 767 |
-------------------------------------
Each of the affected LV segments now point to a
range of blocks in the pvmove LV, which purposefully
corresponds to the segments moved from the original
LVs into the temporary pvmove LV.
The current implementation goes on from here to mirror the temporary
pvmove LV by segment. Further, as the pvmove LV is activated, only
one of its segments is actually mirrored (i.e. "moving") at a time.
The rest are either complete or not addressed yet. If the pvmove
is aborted, those segments that are completed will remain on the
destination and those that are not yet addressed or in the process
of moving will stay on the source PV. Thus, it is possible to have
a partially completed move - some LVs (or certain segments of LVs)
on the source PV and some on the destination.
Example 2:
What 'example 1' might look if it was half-way
through the move.
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
pvmove0 | seg 0 | seg 1 | seg 2 |
-------------------------------------
| | |
| -------------------------
source PV | | 256 - 511 | 512 - 767 |
| -------------------------
| ||
-------------------------
dest PV | 000 - 255 | 256 - 511 |
-------------------------
This update allows the user to specify that they would like the
pvmove mirror created "by LV" rather than "by segment". That is,
the pvmove LV becomes an image in an encapsulating mirror along
with the allocated copy image.
Example 3:
A pvmove that is performed "by LV" rather than "by segment".
--------- ---------
| LV1s0 | | LV2s0 |
--------- ---------
| |
-------------------------
pvmove0 | * LV-level mirror * |
-------------------------
/ \
pvmove_mimage0 / pvmove_mimage1
------------------------- -------------------------
| seg 0 | seg 1 | | seg 0 | seg 1 |
------------------------- -------------------------
| | | |
------------------------- -------------------------
| 000 - 255 | 256 - 511 | | 000 - 255 | 256 - 511 |
------------------------- -------------------------
source PV dest PV
The thing that differentiates a pvmove done in this way and a simple
"up-convert" from linear to mirror is the preservation of the
distinct segments. A normal up-convert would simply allocate the
necessary space with no regard for segment boundaries. The pvmove
operation must preserve the segments because they are the critical
boundary between the segments of the LVs being moved. So, when the
pvmove copy image is allocated, all corresponding segments must be
allocated. The code that merges ajoining segments that are part of
the same LV when the metadata is written must also be avoided in
this case. This method of mirroring is unique enough to warrant its
own definitional macro, MIRROR_BY_SEGMENTED_LV. This joins the two
existing macros: MIRROR_BY_SEG (for original pvmove) and MIRROR_BY_LV
(for user created mirrors).
The advantages of performing pvmove in this way is that all of the
LVs affected can be moved together. It is an all-or-nothing approach
that leaves all LV segments on the source PV if the move is aborted.
Additionally, a mirror log can be used (in the future) to provide tracking
of progress; allowing the copy to continue where it left off in the event
there is a deactivation.
2014-06-18 07:59:36 +04:00
return 0 ;
}
/* Allocator assures aa[0].len <= seg->area_len */
if ( aa [ 0 ] . len < seg - > area_len ) {
if ( ! lv_split_segment ( lv , seg - > le + aa [ 0 ] . len ) ) {
log_error ( " Failed to split segment at %s "
2016-11-25 16:08:39 +03:00
" extent " FMTu32 " . " ,
display_lvname ( lv ) , le ) ;
pvmove: Enable all-or-nothing (atomic) pvmoves
pvmove can be used to move single LVs by name or multiple LVs that
lie within the specified PV range (e.g. /dev/sdb1:0-1000). When
moving more than one LV, the portions of those LVs that are in the
range to be moved are added to a new temporary pvmove LV. The LVs
then point to the range in the pvmove LV, rather than the PV
range.
Example 1:
We have two LVs in this example. After they were
created, the first LV was grown, yeilding two segments
in LV1. So, there are two LVs with a total of three
segments.
Before pvmove:
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
PV | 000 - 255 | 256 - 511 | 512 - 767 |
-------------------------------------
After pvmove inserts the temporary pvmove LV:
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
pvmove0 | seg 0 | seg 1 | seg 2 |
-------------------------------------
| | |
-------------------------------------
PV | 000 - 255 | 256 - 511 | 512 - 767 |
-------------------------------------
Each of the affected LV segments now point to a
range of blocks in the pvmove LV, which purposefully
corresponds to the segments moved from the original
LVs into the temporary pvmove LV.
The current implementation goes on from here to mirror the temporary
pvmove LV by segment. Further, as the pvmove LV is activated, only
one of its segments is actually mirrored (i.e. "moving") at a time.
The rest are either complete or not addressed yet. If the pvmove
is aborted, those segments that are completed will remain on the
destination and those that are not yet addressed or in the process
of moving will stay on the source PV. Thus, it is possible to have
a partially completed move - some LVs (or certain segments of LVs)
on the source PV and some on the destination.
Example 2:
What 'example 1' might look if it was half-way
through the move.
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
pvmove0 | seg 0 | seg 1 | seg 2 |
-------------------------------------
| | |
| -------------------------
source PV | | 256 - 511 | 512 - 767 |
| -------------------------
| ||
-------------------------
dest PV | 000 - 255 | 256 - 511 |
-------------------------
This update allows the user to specify that they would like the
pvmove mirror created "by LV" rather than "by segment". That is,
the pvmove LV becomes an image in an encapsulating mirror along
with the allocated copy image.
Example 3:
A pvmove that is performed "by LV" rather than "by segment".
--------- ---------
| LV1s0 | | LV2s0 |
--------- ---------
| |
-------------------------
pvmove0 | * LV-level mirror * |
-------------------------
/ \
pvmove_mimage0 / pvmove_mimage1
------------------------- -------------------------
| seg 0 | seg 1 | | seg 0 | seg 1 |
------------------------- -------------------------
| | | |
------------------------- -------------------------
| 000 - 255 | 256 - 511 | | 000 - 255 | 256 - 511 |
------------------------- -------------------------
source PV dest PV
The thing that differentiates a pvmove done in this way and a simple
"up-convert" from linear to mirror is the preservation of the
distinct segments. A normal up-convert would simply allocate the
necessary space with no regard for segment boundaries. The pvmove
operation must preserve the segments because they are the critical
boundary between the segments of the LVs being moved. So, when the
pvmove copy image is allocated, all corresponding segments must be
allocated. The code that merges ajoining segments that are part of
the same LV when the metadata is written must also be avoided in
this case. This method of mirroring is unique enough to warrant its
own definitional macro, MIRROR_BY_SEGMENTED_LV. This joins the two
existing macros: MIRROR_BY_SEG (for original pvmove) and MIRROR_BY_LV
(for user created mirrors).
The advantages of performing pvmove in this way is that all of the
LVs affected can be moved together. It is an all-or-nothing approach
that leaves all LV segments on the source PV if the move is aborted.
Additionally, a mirror log can be used (in the future) to provide tracking
of progress; allowing the copy to continue where it left off in the event
there is a deactivation.
2014-06-18 07:59:36 +04:00
return 0 ;
}
}
current_le + = seg - > area_len ;
}
2014-06-20 14:41:20 +04:00
pvmove: Enable all-or-nothing (atomic) pvmoves
pvmove can be used to move single LVs by name or multiple LVs that
lie within the specified PV range (e.g. /dev/sdb1:0-1000). When
moving more than one LV, the portions of those LVs that are in the
range to be moved are added to a new temporary pvmove LV. The LVs
then point to the range in the pvmove LV, rather than the PV
range.
Example 1:
We have two LVs in this example. After they were
created, the first LV was grown, yeilding two segments
in LV1. So, there are two LVs with a total of three
segments.
Before pvmove:
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
PV | 000 - 255 | 256 - 511 | 512 - 767 |
-------------------------------------
After pvmove inserts the temporary pvmove LV:
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
pvmove0 | seg 0 | seg 1 | seg 2 |
-------------------------------------
| | |
-------------------------------------
PV | 000 - 255 | 256 - 511 | 512 - 767 |
-------------------------------------
Each of the affected LV segments now point to a
range of blocks in the pvmove LV, which purposefully
corresponds to the segments moved from the original
LVs into the temporary pvmove LV.
The current implementation goes on from here to mirror the temporary
pvmove LV by segment. Further, as the pvmove LV is activated, only
one of its segments is actually mirrored (i.e. "moving") at a time.
The rest are either complete or not addressed yet. If the pvmove
is aborted, those segments that are completed will remain on the
destination and those that are not yet addressed or in the process
of moving will stay on the source PV. Thus, it is possible to have
a partially completed move - some LVs (or certain segments of LVs)
on the source PV and some on the destination.
Example 2:
What 'example 1' might look if it was half-way
through the move.
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
pvmove0 | seg 0 | seg 1 | seg 2 |
-------------------------------------
| | |
| -------------------------
source PV | | 256 - 511 | 512 - 767 |
| -------------------------
| ||
-------------------------
dest PV | 000 - 255 | 256 - 511 |
-------------------------
This update allows the user to specify that they would like the
pvmove mirror created "by LV" rather than "by segment". That is,
the pvmove LV becomes an image in an encapsulating mirror along
with the allocated copy image.
Example 3:
A pvmove that is performed "by LV" rather than "by segment".
--------- ---------
| LV1s0 | | LV2s0 |
--------- ---------
| |
-------------------------
pvmove0 | * LV-level mirror * |
-------------------------
/ \
pvmove_mimage0 / pvmove_mimage1
------------------------- -------------------------
| seg 0 | seg 1 | | seg 0 | seg 1 |
------------------------- -------------------------
| | | |
------------------------- -------------------------
| 000 - 255 | 256 - 511 | | 000 - 255 | 256 - 511 |
------------------------- -------------------------
source PV dest PV
The thing that differentiates a pvmove done in this way and a simple
"up-convert" from linear to mirror is the preservation of the
distinct segments. A normal up-convert would simply allocate the
necessary space with no regard for segment boundaries. The pvmove
operation must preserve the segments because they are the critical
boundary between the segments of the LVs being moved. So, when the
pvmove copy image is allocated, all corresponding segments must be
allocated. The code that merges ajoining segments that are part of
the same LV when the metadata is written must also be avoided in
this case. This method of mirroring is unique enough to warrant its
own definitional macro, MIRROR_BY_SEGMENTED_LV. This joins the two
existing macros: MIRROR_BY_SEG (for original pvmove) and MIRROR_BY_LV
(for user created mirrors).
The advantages of performing pvmove in this way is that all of the
LVs affected can be moved together. It is an all-or-nothing approach
that leaves all LV segments on the source PV if the move is aborted.
Additionally, a mirror log can be used (in the future) to provide tracking
of progress; allowing the copy to continue where it left off in the event
there is a deactivation.
2014-06-18 07:59:36 +04:00
current_le = le ;
if ( ! insert_layer_for_lv ( lv - > vg - > cmd , lv , PVMOVE , " _mimage_0 " ) ) {
2016-11-25 16:08:39 +03:00
log_error ( " Failed to build pvmove LV-type mirror %s. " ,
display_lvname ( lv ) ) ;
pvmove: Enable all-or-nothing (atomic) pvmoves
pvmove can be used to move single LVs by name or multiple LVs that
lie within the specified PV range (e.g. /dev/sdb1:0-1000). When
moving more than one LV, the portions of those LVs that are in the
range to be moved are added to a new temporary pvmove LV. The LVs
then point to the range in the pvmove LV, rather than the PV
range.
Example 1:
We have two LVs in this example. After they were
created, the first LV was grown, yeilding two segments
in LV1. So, there are two LVs with a total of three
segments.
Before pvmove:
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
PV | 000 - 255 | 256 - 511 | 512 - 767 |
-------------------------------------
After pvmove inserts the temporary pvmove LV:
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
pvmove0 | seg 0 | seg 1 | seg 2 |
-------------------------------------
| | |
-------------------------------------
PV | 000 - 255 | 256 - 511 | 512 - 767 |
-------------------------------------
Each of the affected LV segments now point to a
range of blocks in the pvmove LV, which purposefully
corresponds to the segments moved from the original
LVs into the temporary pvmove LV.
The current implementation goes on from here to mirror the temporary
pvmove LV by segment. Further, as the pvmove LV is activated, only
one of its segments is actually mirrored (i.e. "moving") at a time.
The rest are either complete or not addressed yet. If the pvmove
is aborted, those segments that are completed will remain on the
destination and those that are not yet addressed or in the process
of moving will stay on the source PV. Thus, it is possible to have
a partially completed move - some LVs (or certain segments of LVs)
on the source PV and some on the destination.
Example 2:
What 'example 1' might look if it was half-way
through the move.
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
pvmove0 | seg 0 | seg 1 | seg 2 |
-------------------------------------
| | |
| -------------------------
source PV | | 256 - 511 | 512 - 767 |
| -------------------------
| ||
-------------------------
dest PV | 000 - 255 | 256 - 511 |
-------------------------
This update allows the user to specify that they would like the
pvmove mirror created "by LV" rather than "by segment". That is,
the pvmove LV becomes an image in an encapsulating mirror along
with the allocated copy image.
Example 3:
A pvmove that is performed "by LV" rather than "by segment".
--------- ---------
| LV1s0 | | LV2s0 |
--------- ---------
| |
-------------------------
pvmove0 | * LV-level mirror * |
-------------------------
/ \
pvmove_mimage0 / pvmove_mimage1
------------------------- -------------------------
| seg 0 | seg 1 | | seg 0 | seg 1 |
------------------------- -------------------------
| | | |
------------------------- -------------------------
| 000 - 255 | 256 - 511 | | 000 - 255 | 256 - 511 |
------------------------- -------------------------
source PV dest PV
The thing that differentiates a pvmove done in this way and a simple
"up-convert" from linear to mirror is the preservation of the
distinct segments. A normal up-convert would simply allocate the
necessary space with no regard for segment boundaries. The pvmove
operation must preserve the segments because they are the critical
boundary between the segments of the LVs being moved. So, when the
pvmove copy image is allocated, all corresponding segments must be
allocated. The code that merges ajoining segments that are part of
the same LV when the metadata is written must also be avoided in
this case. This method of mirroring is unique enough to warrant its
own definitional macro, MIRROR_BY_SEGMENTED_LV. This joins the two
existing macros: MIRROR_BY_SEG (for original pvmove) and MIRROR_BY_LV
(for user created mirrors).
The advantages of performing pvmove in this way is that all of the
LVs affected can be moved together. It is an all-or-nothing approach
that leaves all LV segments on the source PV if the move is aborted.
Additionally, a mirror log can be used (in the future) to provide tracking
of progress; allowing the copy to continue where it left off in the event
there is a deactivation.
2014-06-18 07:59:36 +04:00
return 0 ;
}
orig_lv = seg_lv ( first_seg ( lv ) , 0 ) ;
if ( ! ( image_name = dm_pool_strdup ( lv - > vg - > vgmem , orig_lv - > name ) ) )
return_0 ;
image_name [ strlen ( image_name ) - 1 ] = ' 1 ' ;
if ( ! ( copy_lv = lv_create_empty ( image_name , NULL ,
orig_lv - > status ,
ALLOC_INHERIT , lv - > vg ) ) )
return_0 ;
if ( ! lv_add_mirror_lvs ( lv , & copy_lv , 1 , MIRROR_IMAGE , region_size ) )
return_0 ;
2015-09-22 21:04:12 +03:00
if ( ! ( segtype = get_segtype_from_string ( lv - > vg - > cmd , SEG_TYPE_NAME_STRIPED ) ) )
pvmove: Enable all-or-nothing (atomic) pvmoves
pvmove can be used to move single LVs by name or multiple LVs that
lie within the specified PV range (e.g. /dev/sdb1:0-1000). When
moving more than one LV, the portions of those LVs that are in the
range to be moved are added to a new temporary pvmove LV. The LVs
then point to the range in the pvmove LV, rather than the PV
range.
Example 1:
We have two LVs in this example. After they were
created, the first LV was grown, yeilding two segments
in LV1. So, there are two LVs with a total of three
segments.
Before pvmove:
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
PV | 000 - 255 | 256 - 511 | 512 - 767 |
-------------------------------------
After pvmove inserts the temporary pvmove LV:
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
pvmove0 | seg 0 | seg 1 | seg 2 |
-------------------------------------
| | |
-------------------------------------
PV | 000 - 255 | 256 - 511 | 512 - 767 |
-------------------------------------
Each of the affected LV segments now point to a
range of blocks in the pvmove LV, which purposefully
corresponds to the segments moved from the original
LVs into the temporary pvmove LV.
The current implementation goes on from here to mirror the temporary
pvmove LV by segment. Further, as the pvmove LV is activated, only
one of its segments is actually mirrored (i.e. "moving") at a time.
The rest are either complete or not addressed yet. If the pvmove
is aborted, those segments that are completed will remain on the
destination and those that are not yet addressed or in the process
of moving will stay on the source PV. Thus, it is possible to have
a partially completed move - some LVs (or certain segments of LVs)
on the source PV and some on the destination.
Example 2:
What 'example 1' might look if it was half-way
through the move.
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
pvmove0 | seg 0 | seg 1 | seg 2 |
-------------------------------------
| | |
| -------------------------
source PV | | 256 - 511 | 512 - 767 |
| -------------------------
| ||
-------------------------
dest PV | 000 - 255 | 256 - 511 |
-------------------------
This update allows the user to specify that they would like the
pvmove mirror created "by LV" rather than "by segment". That is,
the pvmove LV becomes an image in an encapsulating mirror along
with the allocated copy image.
Example 3:
A pvmove that is performed "by LV" rather than "by segment".
--------- ---------
| LV1s0 | | LV2s0 |
--------- ---------
| |
-------------------------
pvmove0 | * LV-level mirror * |
-------------------------
/ \
pvmove_mimage0 / pvmove_mimage1
------------------------- -------------------------
| seg 0 | seg 1 | | seg 0 | seg 1 |
------------------------- -------------------------
| | | |
------------------------- -------------------------
| 000 - 255 | 256 - 511 | | 000 - 255 | 256 - 511 |
------------------------- -------------------------
source PV dest PV
The thing that differentiates a pvmove done in this way and a simple
"up-convert" from linear to mirror is the preservation of the
distinct segments. A normal up-convert would simply allocate the
necessary space with no regard for segment boundaries. The pvmove
operation must preserve the segments because they are the critical
boundary between the segments of the LVs being moved. So, when the
pvmove copy image is allocated, all corresponding segments must be
allocated. The code that merges ajoining segments that are part of
the same LV when the metadata is written must also be avoided in
this case. This method of mirroring is unique enough to warrant its
own definitional macro, MIRROR_BY_SEGMENTED_LV. This joins the two
existing macros: MIRROR_BY_SEG (for original pvmove) and MIRROR_BY_LV
(for user created mirrors).
The advantages of performing pvmove in this way is that all of the
LVs affected can be moved together. It is an all-or-nothing approach
that leaves all LV segments on the source PV if the move is aborted.
Additionally, a mirror log can be used (in the future) to provide tracking
of progress; allowing the copy to continue where it left off in the event
there is a deactivation.
2014-06-18 07:59:36 +04:00
return_0 ;
dm_list_iterate_items ( aa , & ah - > alloced_areas [ 0 ] ) {
if ( ! ( seg = find_seg_by_le ( orig_lv , current_le ) ) ) {
2016-11-25 16:08:39 +03:00
log_error ( " Failed to find segment for %s extent " FMTu32 " . " ,
display_lvname ( lv ) , current_le ) ;
pvmove: Enable all-or-nothing (atomic) pvmoves
pvmove can be used to move single LVs by name or multiple LVs that
lie within the specified PV range (e.g. /dev/sdb1:0-1000). When
moving more than one LV, the portions of those LVs that are in the
range to be moved are added to a new temporary pvmove LV. The LVs
then point to the range in the pvmove LV, rather than the PV
range.
Example 1:
We have two LVs in this example. After they were
created, the first LV was grown, yeilding two segments
in LV1. So, there are two LVs with a total of three
segments.
Before pvmove:
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
PV | 000 - 255 | 256 - 511 | 512 - 767 |
-------------------------------------
After pvmove inserts the temporary pvmove LV:
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
pvmove0 | seg 0 | seg 1 | seg 2 |
-------------------------------------
| | |
-------------------------------------
PV | 000 - 255 | 256 - 511 | 512 - 767 |
-------------------------------------
Each of the affected LV segments now point to a
range of blocks in the pvmove LV, which purposefully
corresponds to the segments moved from the original
LVs into the temporary pvmove LV.
The current implementation goes on from here to mirror the temporary
pvmove LV by segment. Further, as the pvmove LV is activated, only
one of its segments is actually mirrored (i.e. "moving") at a time.
The rest are either complete or not addressed yet. If the pvmove
is aborted, those segments that are completed will remain on the
destination and those that are not yet addressed or in the process
of moving will stay on the source PV. Thus, it is possible to have
a partially completed move - some LVs (or certain segments of LVs)
on the source PV and some on the destination.
Example 2:
What 'example 1' might look if it was half-way
through the move.
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
pvmove0 | seg 0 | seg 1 | seg 2 |
-------------------------------------
| | |
| -------------------------
source PV | | 256 - 511 | 512 - 767 |
| -------------------------
| ||
-------------------------
dest PV | 000 - 255 | 256 - 511 |
-------------------------
This update allows the user to specify that they would like the
pvmove mirror created "by LV" rather than "by segment". That is,
the pvmove LV becomes an image in an encapsulating mirror along
with the allocated copy image.
Example 3:
A pvmove that is performed "by LV" rather than "by segment".
--------- ---------
| LV1s0 | | LV2s0 |
--------- ---------
| |
-------------------------
pvmove0 | * LV-level mirror * |
-------------------------
/ \
pvmove_mimage0 / pvmove_mimage1
------------------------- -------------------------
| seg 0 | seg 1 | | seg 0 | seg 1 |
------------------------- -------------------------
| | | |
------------------------- -------------------------
| 000 - 255 | 256 - 511 | | 000 - 255 | 256 - 511 |
------------------------- -------------------------
source PV dest PV
The thing that differentiates a pvmove done in this way and a simple
"up-convert" from linear to mirror is the preservation of the
distinct segments. A normal up-convert would simply allocate the
necessary space with no regard for segment boundaries. The pvmove
operation must preserve the segments because they are the critical
boundary between the segments of the LVs being moved. So, when the
pvmove copy image is allocated, all corresponding segments must be
allocated. The code that merges ajoining segments that are part of
the same LV when the metadata is written must also be avoided in
this case. This method of mirroring is unique enough to warrant its
own definitional macro, MIRROR_BY_SEGMENTED_LV. This joins the two
existing macros: MIRROR_BY_SEG (for original pvmove) and MIRROR_BY_LV
(for user created mirrors).
The advantages of performing pvmove in this way is that all of the
LVs affected can be moved together. It is an all-or-nothing approach
that leaves all LV segments on the source PV if the move is aborted.
Additionally, a mirror log can be used (in the future) to provide tracking
of progress; allowing the copy to continue where it left off in the event
there is a deactivation.
2014-06-18 07:59:36 +04:00
return 0 ;
}
if ( ! ( new_seg = alloc_lv_segment ( segtype , copy_lv ,
2017-02-24 02:50:00 +03:00
seg - > le , seg - > len , 0 , PVMOVE , 0 ,
NULL , 1 , seg - > len , 0 ,
pvmove: Enable all-or-nothing (atomic) pvmoves
pvmove can be used to move single LVs by name or multiple LVs that
lie within the specified PV range (e.g. /dev/sdb1:0-1000). When
moving more than one LV, the portions of those LVs that are in the
range to be moved are added to a new temporary pvmove LV. The LVs
then point to the range in the pvmove LV, rather than the PV
range.
Example 1:
We have two LVs in this example. After they were
created, the first LV was grown, yeilding two segments
in LV1. So, there are two LVs with a total of three
segments.
Before pvmove:
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
PV | 000 - 255 | 256 - 511 | 512 - 767 |
-------------------------------------
After pvmove inserts the temporary pvmove LV:
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
pvmove0 | seg 0 | seg 1 | seg 2 |
-------------------------------------
| | |
-------------------------------------
PV | 000 - 255 | 256 - 511 | 512 - 767 |
-------------------------------------
Each of the affected LV segments now point to a
range of blocks in the pvmove LV, which purposefully
corresponds to the segments moved from the original
LVs into the temporary pvmove LV.
The current implementation goes on from here to mirror the temporary
pvmove LV by segment. Further, as the pvmove LV is activated, only
one of its segments is actually mirrored (i.e. "moving") at a time.
The rest are either complete or not addressed yet. If the pvmove
is aborted, those segments that are completed will remain on the
destination and those that are not yet addressed or in the process
of moving will stay on the source PV. Thus, it is possible to have
a partially completed move - some LVs (or certain segments of LVs)
on the source PV and some on the destination.
Example 2:
What 'example 1' might look if it was half-way
through the move.
--------- --------- ---------
| LV1s0 | | LV2s0 | | LV1s1 |
--------- --------- ---------
| | |
-------------------------------------
pvmove0 | seg 0 | seg 1 | seg 2 |
-------------------------------------
| | |
| -------------------------
source PV | | 256 - 511 | 512 - 767 |
| -------------------------
| ||
-------------------------
dest PV | 000 - 255 | 256 - 511 |
-------------------------
This update allows the user to specify that they would like the
pvmove mirror created "by LV" rather than "by segment". That is,
the pvmove LV becomes an image in an encapsulating mirror along
with the allocated copy image.
Example 3:
A pvmove that is performed "by LV" rather than "by segment".
--------- ---------
| LV1s0 | | LV2s0 |
--------- ---------
| |
-------------------------
pvmove0 | * LV-level mirror * |
-------------------------
/ \
pvmove_mimage0 / pvmove_mimage1
------------------------- -------------------------
| seg 0 | seg 1 | | seg 0 | seg 1 |
------------------------- -------------------------
| | | |
------------------------- -------------------------
| 000 - 255 | 256 - 511 | | 000 - 255 | 256 - 511 |
------------------------- -------------------------
source PV dest PV
The thing that differentiates a pvmove done in this way and a simple
"up-convert" from linear to mirror is the preservation of the
distinct segments. A normal up-convert would simply allocate the
necessary space with no regard for segment boundaries. The pvmove
operation must preserve the segments because they are the critical
boundary between the segments of the LVs being moved. So, when the
pvmove copy image is allocated, all corresponding segments must be
allocated. The code that merges ajoining segments that are part of
the same LV when the metadata is written must also be avoided in
this case. This method of mirroring is unique enough to warrant its
own definitional macro, MIRROR_BY_SEGMENTED_LV. This joins the two
existing macros: MIRROR_BY_SEG (for original pvmove) and MIRROR_BY_LV
(for user created mirrors).
The advantages of performing pvmove in this way is that all of the
LVs affected can be moved together. It is an all-or-nothing approach
that leaves all LV segments on the source PV if the move is aborted.
Additionally, a mirror log can be used (in the future) to provide tracking
of progress; allowing the copy to continue where it left off in the event
there is a deactivation.
2014-06-18 07:59:36 +04:00
0 , 0 , 0 , NULL ) ) )
return_0 ;
for ( s = 0 ; s < ah - > area_count ; s + + ) {
if ( ! set_lv_segment_area_pv ( new_seg , s ,
aa [ s ] . pv , aa [ s ] . pe ) )
return_0 ;
}
dm_list_add ( & copy_lv - > segments , & new_seg - > list ) ;
current_le + = seg - > area_len ;
copy_lv - > le_count + = seg - > area_len ;
}
lv - > status | = MIRRORED ;
/* FIXME: add log */
if ( lv - > vg - > fid - > fmt - > ops - > lv_setup & &
! lv - > vg - > fid - > fmt - > ops - > lv_setup ( lv - > vg - > fid , lv ) )
return_0 ;
return 1 ;
}
2005-06-03 18:49:51 +04:00
/*
2007-12-20 18:42:55 +03:00
* Add new areas to mirrored segments
2005-06-03 18:49:51 +04:00
*/
2007-12-20 18:42:55 +03:00
int lv_add_mirror_areas ( struct alloc_handle * ah ,
struct logical_volume * lv , uint32_t le ,
uint32_t region_size )
2005-06-03 18:49:51 +04:00
{
2007-12-20 18:42:55 +03:00
struct alloced_area * aa ;
2005-06-03 18:49:51 +04:00
struct lv_segment * seg ;
2007-12-20 18:42:55 +03:00
uint32_t current_le = le ;
uint32_t s , old_area_count , new_area_count ;
2005-06-03 18:49:51 +04:00
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( aa , & ah - > alloced_areas [ 0 ] ) {
2007-12-20 18:42:55 +03:00
if ( ! ( seg = find_seg_by_le ( lv , current_le ) ) ) {
2016-11-16 17:11:07 +03:00
log_error ( " Failed to find segment for %s extent " FMTu32 " . " ,
display_lvname ( lv ) , current_le ) ;
2007-12-20 18:42:55 +03:00
return 0 ;
}
2005-06-03 18:49:51 +04:00
2007-12-20 18:42:55 +03:00
/* Allocator assures aa[0].len <= seg->area_len */
if ( aa [ 0 ] . len < seg - > area_len ) {
if ( ! lv_split_segment ( lv , seg - > le + aa [ 0 ] . len ) ) {
2016-11-16 17:11:07 +03:00
log_error ( " Failed to split segment at %s extent " FMTu32 " . " ,
display_lvname ( lv ) , le ) ;
2007-12-20 18:42:55 +03:00
return 0 ;
}
}
if ( ! seg_is_mirrored ( seg ) & &
( ! ( seg = _convert_seg_to_mirror ( seg , region_size , NULL ) ) ) )
2008-01-17 20:17:09 +03:00
return_0 ;
2005-06-03 18:49:51 +04:00
2007-12-20 18:42:55 +03:00
old_area_count = seg - > area_count ;
new_area_count = old_area_count + ah - > area_count ;
if ( ! _lv_segment_add_areas ( lv , seg , new_area_count ) )
return_0 ;
for ( s = 0 ; s < ah - > area_count ; s + + ) {
if ( ! set_lv_segment_area_pv ( seg , s + old_area_count ,
aa [ s ] . pv , aa [ s ] . pe ) )
return_0 ;
}
current_le + = seg - > area_len ;
2005-10-27 23:58:22 +04:00
}
2005-06-03 18:49:51 +04:00
2007-12-20 18:42:55 +03:00
lv - > status | = MIRRORED ;
2005-06-03 18:49:51 +04:00
if ( lv - > vg - > fid - > fmt - > ops - > lv_setup & &
2007-12-20 18:42:55 +03:00
! lv - > vg - > fid - > fmt - > ops - > lv_setup ( lv - > vg - > fid , lv ) )
return_0 ;
2005-06-03 18:49:51 +04:00
return 1 ;
}
2005-10-28 16:48:50 +04:00
/*
2007-12-20 18:42:55 +03:00
* Add mirror image LVs to mirrored segments
2005-10-28 16:48:50 +04:00
*/
2007-12-20 18:42:55 +03:00
int lv_add_mirror_lvs ( struct logical_volume * lv ,
struct logical_volume * * sub_lvs ,
uint32_t num_extra_areas ,
2009-11-25 01:55:55 +03:00
uint64_t status , uint32_t region_size )
2005-10-28 16:48:50 +04:00
{
uint32_t m ;
2016-11-23 12:48:33 +03:00
uint32_t old_area_count , new_area_count ;
2007-12-20 18:42:55 +03:00
struct segment_type * mirror_segtype ;
2016-11-23 12:48:33 +03:00
struct lv_segment * seg = first_seg ( lv ) ;
2007-12-20 18:42:55 +03:00
2008-11-04 01:14:30 +03:00
if ( dm_list_size ( & lv - > segments ) ! = 1 | | seg_type ( seg , 0 ) ! = AREA_LV ) {
2016-11-23 19:15:10 +03:00
log_error ( INTERNAL_ERROR " Mirror layer must be inserted before adding mirrors. " ) ;
2012-02-24 02:24:47 +04:00
return 0 ;
2005-10-28 16:48:50 +04:00
}
2015-09-22 21:04:12 +03:00
mirror_segtype = get_segtype_from_string ( lv - > vg - > cmd , SEG_TYPE_NAME_MIRROR ) ;
2007-12-20 18:42:55 +03:00
if ( seg - > segtype ! = mirror_segtype )
if ( ! ( seg = _convert_seg_to_mirror ( seg , region_size , NULL ) ) )
return_0 ;
if ( region_size & & region_size ! = seg - > region_size ) {
2016-11-16 17:11:07 +03:00
log_error ( " Conflicting region_size. " ) ;
2007-12-20 18:42:55 +03:00
return 0 ;
}
2005-10-28 16:48:50 +04:00
old_area_count = seg - > area_count ;
new_area_count = old_area_count + num_extra_areas ;
if ( ! _lv_segment_add_areas ( lv , seg , new_area_count ) ) {
log_error ( " Failed to allocate widened LV segment for %s. " ,
2016-11-16 17:11:07 +03:00
display_lvname ( lv ) ) ;
2005-10-28 16:48:50 +04:00
return 0 ;
}
2008-01-16 22:00:59 +03:00
for ( m = 0 ; m < old_area_count ; m + + )
2007-12-20 18:42:55 +03:00
seg_lv ( seg , m ) - > status | = status ;
2005-10-28 16:48:50 +04:00
for ( m = old_area_count ; m < new_area_count ; m + + ) {
2008-01-16 22:00:59 +03:00
if ( ! set_lv_segment_area_lv ( seg , m , sub_lvs [ m - old_area_count ] ,
0 , status ) )
return_0 ;
2009-05-21 07:04:52 +04:00
lv_set_hidden ( sub_lvs [ m - old_area_count ] ) ;
2005-10-28 16:48:50 +04:00
}
2007-12-21 02:12:27 +03:00
lv - > status | = MIRRORED ;
2005-10-28 16:48:50 +04:00
return 1 ;
}
2005-06-03 18:49:51 +04:00
2007-12-20 18:42:55 +03:00
/*
* Turn an empty LV into a mirror log .
2010-03-27 01:15:43 +03:00
*
* FIXME : Mirrored logs are built inefficiently .
* A mirrored log currently uses the same layout that a mirror
* LV uses . The mirror layer sits on top of AREA_LVs which form the
* legs , rather on AREA_PVs . This is done to allow re - use of the
* various mirror functions to also handle the mirrored LV that makes
* up the log .
*
* If we used AREA_PVs under the mirror layer of a log , we could
* assemble it all at once by calling ' lv_add_segment ' with the
* appropriate segtype ( mirror / stripe ) , like this :
2011-09-06 22:49:31 +04:00
* lv_add_segment ( ah , ah - > area_count , ah - > log_area_count ,
2010-03-27 01:15:43 +03:00
* log_lv , segtype , 0 , MIRROR_LOG , 0 ) ;
*
* For now , we use the same mechanism to build a mirrored log as we
* do for building a mirrored LV : 1 ) create initial LV , 2 ) add a
* mirror layer , and 3 ) add the remaining copy LVs
2007-12-20 18:42:55 +03:00
*/
2010-03-27 01:15:43 +03:00
int lv_add_log_segment ( struct alloc_handle * ah , uint32_t first_area ,
struct logical_volume * log_lv , uint64_t status )
2007-12-20 18:42:55 +03:00
{
2010-03-27 01:15:43 +03:00
return lv_add_segment ( ah , ah - > area_count + first_area , 1 , log_lv ,
2015-09-24 16:59:07 +03:00
get_segtype_from_string ( log_lv - > vg - > cmd , SEG_TYPE_NAME_STRIPED ) ,
2010-03-27 01:15:43 +03:00
0 , status , 0 ) ;
2007-12-20 18:42:55 +03:00
}
2011-04-07 01:32:20 +04:00
static int _lv_insert_empty_sublvs ( struct logical_volume * lv ,
const struct segment_type * segtype ,
2011-08-03 02:07:20 +04:00
uint32_t stripe_size , uint32_t region_size ,
2011-04-07 01:32:20 +04:00
uint32_t devices )
2007-12-20 21:55:46 +03:00
{
2011-04-07 01:32:20 +04:00
struct logical_volume * sub_lv ;
uint32_t i ;
2011-09-08 20:41:18 +04:00
uint64_t sub_lv_status = 0 ;
2011-08-03 02:07:20 +04:00
const char * layer_name ;
2015-11-05 13:58:03 +03:00
char img_name [ NAME_LEN ] ;
2011-04-07 01:32:20 +04:00
struct lv_segment * mapseg ;
2012-02-28 14:08:20 +04:00
if ( lv - > le_count | | ! dm_list_empty ( & lv - > segments ) ) {
2011-04-07 01:32:20 +04:00
log_error ( INTERNAL_ERROR
" Non-empty LV passed to _lv_insert_empty_sublv " ) ;
return 0 ;
}
2011-08-03 02:07:20 +04:00
if ( segtype_is_raid ( segtype ) ) {
lv - > status | = RAID ;
2011-09-08 20:41:18 +04:00
sub_lv_status = RAID_IMAGE ;
2011-08-03 02:07:20 +04:00
layer_name = " rimage " ;
} else if ( segtype_is_mirrored ( segtype ) ) {
lv - > status | = MIRRORED ;
2011-09-08 20:41:18 +04:00
sub_lv_status = MIRROR_IMAGE ;
2011-08-03 02:07:20 +04:00
layer_name = " mimage " ;
2011-09-06 19:39:46 +04:00
} else
2011-04-07 01:32:20 +04:00
return_0 ;
/*
* First , create our top - level segment for our top - level LV
*/
2017-02-24 02:50:00 +03:00
if ( ! ( mapseg = alloc_lv_segment ( segtype , lv , 0 , 0 , 0 , lv - > status ,
2014-10-26 10:13:59 +03:00
stripe_size , NULL ,
2017-02-24 02:50:00 +03:00
devices , 0 , 0 , 0 , region_size , 0 , NULL ) ) ) {
2015-11-05 13:58:03 +03:00
log_error ( " Failed to create mapping segment for %s. " ,
display_lvname ( lv ) ) ;
2011-04-07 01:32:20 +04:00
return 0 ;
}
/*
* Next , create all of our sub_lv ' s and link them in .
*/
for ( i = 0 ; i < devices ; i + + ) {
2011-08-03 02:07:20 +04:00
/* Data LVs */
2011-09-08 20:41:18 +04:00
if ( devices > 1 ) {
2015-11-05 13:58:03 +03:00
if ( dm_snprintf ( img_name , sizeof ( img_name ) , " %s_%s_%u " ,
2011-09-08 20:41:18 +04:00
lv - > name , layer_name , i ) < 0 )
2015-11-05 13:58:03 +03:00
goto_bad ;
2011-09-08 20:41:18 +04:00
} else {
2015-11-05 13:58:03 +03:00
if ( dm_snprintf ( img_name , sizeof ( img_name ) , " %s_%s " ,
2011-09-08 20:41:18 +04:00
lv - > name , layer_name ) < 0 )
2015-11-05 13:58:03 +03:00
goto_bad ;
2011-09-08 20:41:18 +04:00
}
2011-08-03 02:07:20 +04:00
2011-09-08 20:41:18 +04:00
/* FIXME Should use ALLOC_INHERIT here and inherit from parent LV */
if ( ! ( sub_lv = lv_create_empty ( img_name , NULL ,
2015-11-05 13:58:03 +03:00
LVM_READ | LVM_WRITE ,
lv - > alloc , lv - > vg ) ) )
2011-04-07 01:32:20 +04:00
return_0 ;
2011-09-08 20:41:18 +04:00
2011-10-22 20:48:59 +04:00
if ( ! set_lv_segment_area_lv ( mapseg , i , sub_lv , 0 , sub_lv_status ) )
2011-04-07 01:32:20 +04:00
return_0 ;
2011-09-08 20:41:18 +04:00
2011-10-22 20:48:59 +04:00
/* Metadata LVs for raid */
2017-11-01 01:16:13 +03:00
if ( segtype_is_raid_with_meta ( segtype ) ) {
2015-11-05 13:58:03 +03:00
if ( dm_snprintf ( img_name , sizeof ( img_name ) , " %s_rmeta_%u " ,
lv - > name , i ) < 0 )
goto_bad ;
/* FIXME Should use ALLOC_INHERIT here and inherit from parent LV */
if ( ! ( sub_lv = lv_create_empty ( img_name , NULL ,
LVM_READ | LVM_WRITE ,
lv - > alloc , lv - > vg ) ) )
2011-09-08 20:41:18 +04:00
return_0 ;
2011-08-03 02:07:20 +04:00
2015-11-05 13:58:03 +03:00
if ( ! set_lv_segment_area_lv ( mapseg , i , sub_lv , 0 , RAID_META ) )
2011-09-08 20:41:18 +04:00
return_0 ;
2015-11-05 13:58:03 +03:00
}
2011-04-07 01:32:20 +04:00
}
2011-10-22 20:48:59 +04:00
2011-04-07 01:32:20 +04:00
dm_list_add ( & lv - > segments , & mapseg - > list ) ;
return 1 ;
2015-11-05 13:58:03 +03:00
bad :
log_error ( " Failed to create sub LV name for LV %s. " ,
display_lvname ( lv ) ) ;
return 0 ;
2011-04-07 01:32:20 +04:00
}
static int _lv_extend_layered_lv ( struct alloc_handle * ah ,
struct logical_volume * lv ,
uint32_t extents , uint32_t first_area ,
2017-03-08 00:05:23 +03:00
uint32_t mirrors , uint32_t stripes , uint32_t stripe_size )
2011-04-07 01:32:20 +04:00
{
2011-08-03 02:07:20 +04:00
const struct segment_type * segtype ;
struct logical_volume * sub_lv , * meta_lv ;
2015-09-23 16:37:52 +03:00
struct lv_segment * seg = first_seg ( lv ) ;
2011-08-03 02:07:20 +04:00
uint32_t fa , s ;
int clear_metadata = 0 ;
2016-05-23 18:46:38 +03:00
uint32_t area_multiple = 1 ;
2016-11-08 13:54:28 +03:00
int fail ;
2011-08-03 02:07:20 +04:00
2015-09-23 16:37:52 +03:00
if ( ! ( segtype = get_segtype_from_string ( lv - > vg - > cmd , SEG_TYPE_NAME_STRIPED ) ) )
return_0 ;
2011-08-03 02:07:20 +04:00
/*
* The component devices of a " striped " LV all go in the same
* LV . However , RAID has an LV for each device - making the
* ' stripes ' and ' stripe_size ' parameters meaningless .
*/
2015-09-23 16:37:52 +03:00
if ( seg_is_raid ( seg ) ) {
2011-08-03 02:07:20 +04:00
stripes = 1 ;
stripe_size = 0 ;
2016-07-02 00:20:54 +03:00
if ( seg_is_any_raid0 ( seg ) )
2016-05-24 16:27:05 +03:00
area_multiple = seg - > area_count ;
2011-08-03 02:07:20 +04:00
}
2007-12-20 21:55:46 +03:00
2011-08-03 02:07:20 +04:00
for ( fa = first_area , s = 0 ; s < seg - > area_count ; s + + ) {
2007-12-20 21:55:46 +03:00
if ( is_temporary_mirror_layer ( seg_lv ( seg , s ) ) ) {
2016-05-23 18:46:38 +03:00
if ( ! _lv_extend_layered_lv ( ah , seg_lv ( seg , s ) , extents / area_multiple ,
2017-03-08 00:05:23 +03:00
fa , mirrors , stripes , stripe_size ) )
2007-12-20 21:55:46 +03:00
return_0 ;
2011-08-03 02:07:20 +04:00
fa + = lv_mirror_count ( seg_lv ( seg , s ) ) ;
2007-12-20 21:55:46 +03:00
continue ;
}
2011-04-07 01:32:20 +04:00
sub_lv = seg_lv ( seg , s ) ;
2011-08-03 02:07:20 +04:00
if ( ! lv_add_segment ( ah , fa , stripes , sub_lv , segtype ,
2011-04-07 01:32:20 +04:00
stripe_size , sub_lv - > status , 0 ) ) {
log_error ( " Aborting. Failed to extend %s in %s. " ,
sub_lv - > name , lv - > name ) ;
2007-12-20 21:55:46 +03:00
return 0 ;
}
2011-08-03 02:07:20 +04:00
2017-03-08 00:05:23 +03:00
last_seg ( lv ) - > data_copies = mirrors ;
2011-08-03 02:07:20 +04:00
/* Extend metadata LVs only on initial creation */
2016-05-24 00:55:13 +03:00
if ( seg_is_raid_with_meta ( seg ) & & ! lv - > le_count ) {
2011-08-03 02:07:20 +04:00
if ( ! seg - > meta_areas ) {
log_error ( " No meta_areas for RAID type " ) ;
return 0 ;
}
meta_lv = seg_metalv ( seg , s ) ;
if ( ! lv_add_segment ( ah , fa + seg - > area_count , 1 ,
meta_lv , segtype , 0 ,
meta_lv - > status , 0 ) ) {
log_error ( " Failed to extend %s in %s. " ,
meta_lv - > name , lv - > name ) ;
return 0 ;
}
lv_set_visible ( meta_lv ) ;
2016-04-29 21:49:21 +03:00
/*
* Copy any tags from the new LV to the metadata LV so
* it can be activated temporarily .
*/
if ( ! str_list_dup ( meta_lv - > vg - > vgmem , & meta_lv - > tags , & lv - > tags ) ) {
log_error ( " Failed to copy tags onto LV %s to clear metadata. " , display_lvname ( meta_lv ) ) ;
return 0 ;
}
2011-08-03 02:07:20 +04:00
clear_metadata = 1 ;
}
fa + = stripes ;
}
2017-10-30 19:23:56 +03:00
seg - > len + = extents ;
if ( seg_is_raid ( seg ) )
seg - > area_len = seg - > len ;
else
seg - > area_len + = extents / area_multiple ;
if ( ! _setup_lv_size ( lv , lv - > le_count + extents ) )
return_0 ;
2011-08-03 02:07:20 +04:00
if ( clear_metadata ) {
/*
* We must clear the metadata areas upon creation .
*/
2016-05-23 18:46:38 +03:00
/* FIXME VG is not in a fully-consistent state here and should not be committed! */
2011-08-10 20:44:17 +04:00
if ( ! vg_write ( lv - > vg ) | | ! vg_commit ( lv - > vg ) )
2011-08-03 02:07:20 +04:00
return_0 ;
2016-11-08 13:54:28 +03:00
if ( test_mode ( ) )
log_verbose ( " Test mode: Skipping wiping of metadata areas. " ) ;
else {
fail = 0 ;
/* Activate all rmeta devices locally first (more efficient) */
for ( s = 0 ; ! fail & & s < seg - > area_count ; s + + ) {
meta_lv = seg_metalv ( seg , s ) ;
if ( ! activate_lv_local ( meta_lv - > vg - > cmd , meta_lv ) ) {
log_error ( " Failed to activate %s for clearing. " ,
display_lvname ( meta_lv ) ) ;
fail = 1 ;
}
2012-09-05 23:32:06 +04:00
}
2016-11-08 13:54:28 +03:00
/* Clear all rmeta devices */
for ( s = 0 ; ! fail & & s < seg - > area_count ; s + + ) {
meta_lv = seg_metalv ( seg , s ) ;
2011-08-03 02:07:20 +04:00
2016-11-08 13:54:28 +03:00
log_verbose ( " Clearing metadata area of %s. " ,
display_lvname ( meta_lv ) ) ;
/*
* Rather than wiping meta_lv - > size , we can simply
* wipe ' 1 ' to remove the superblock of any previous
* RAID devices . It is much quicker .
*/
if ( ! wipe_lv ( meta_lv , ( struct wipe_params )
{ . do_zero = 1 , . zero_sectors = 1 } ) ) {
stack ;
fail = 1 ;
}
2011-08-03 02:07:20 +04:00
}
2016-11-08 13:54:28 +03:00
/* Deactivate all rmeta devices */
for ( s = 0 ; s < seg - > area_count ; s + + ) {
meta_lv = seg_metalv ( seg , s ) ;
if ( ! deactivate_lv ( meta_lv - > vg - > cmd , meta_lv ) ) {
log_error ( " Failed to deactivate %s after clearing. " ,
display_lvname ( meta_lv ) ) ;
fail = 1 ;
}
/* Wipe any temporary tags required for activation. */
str_list_wipe ( & meta_lv - > tags ) ;
2011-08-03 02:07:20 +04:00
}
2016-04-29 21:49:21 +03:00
2016-11-08 13:54:28 +03:00
if ( fail )
/* Fail, after trying to deactivate all we could */
return_0 ;
2011-08-03 02:07:20 +04:00
}
2016-11-08 13:54:28 +03:00
for ( s = 0 ; s < seg - > area_count ; s + + )
lv_set_hidden ( seg_metalv ( seg , s ) ) ;
2007-12-20 21:55:46 +03:00
}
2011-08-03 02:07:20 +04:00
2007-12-20 21:55:46 +03:00
return 1 ;
}
2001-11-06 14:31:29 +03:00
/*
2005-06-01 20:51:55 +04:00
* Entry point for single - step LV allocation + extension .
2014-02-22 04:26:01 +04:00
* Extents is the number of logical extents to append to the LV unless
* approx_alloc is set when it is an upper limit for the total number of
* extents to use from the VG .
*
* FIXME The approx_alloc raid / stripe conversion should be performed
* before calling this function .
2001-11-06 14:31:29 +03:00
*/
2005-06-01 20:51:55 +04:00
int lv_extend ( struct logical_volume * lv ,
2006-05-10 01:23:51 +04:00
const struct segment_type * segtype ,
2005-06-01 20:51:55 +04:00
uint32_t stripes , uint32_t stripe_size ,
2011-04-07 01:32:20 +04:00
uint32_t mirrors , uint32_t region_size ,
2014-10-26 10:13:59 +03:00
uint32_t extents ,
2014-02-14 07:10:28 +04:00
struct dm_list * allocatable_pvs , alloc_policy_t alloc ,
int approx_alloc )
2001-11-06 14:31:29 +03:00
{
2005-06-01 20:51:55 +04:00
int r = 1 ;
2011-09-08 20:41:18 +04:00
int log_count = 0 ;
2005-06-01 20:51:55 +04:00
struct alloc_handle * ah ;
2011-09-08 20:41:18 +04:00
uint32_t sub_lv_count ;
2014-08-28 03:40:09 +04:00
uint32_t old_extents ;
uint32_t new_extents ; /* Total logical size after extension. */
2017-02-12 19:47:35 +03:00
uint64_t raid_size ;
2011-09-08 20:41:18 +04:00
2014-02-22 04:26:01 +04:00
log_very_verbose ( " Adding segment of type %s to LV %s. " , segtype - > name , lv - > name ) ;
2001-11-06 14:31:29 +03:00
2005-06-01 20:51:55 +04:00
if ( segtype_is_virtual ( segtype ) )
2014-10-26 10:13:59 +03:00
return lv_add_virtual_segment ( lv , 0u , extents , segtype ) ;
2004-05-11 20:01:58 +04:00
2016-07-02 00:20:54 +03:00
if ( ! lv - > le_count ) {
if ( segtype_is_pool ( segtype ) )
/*
* Pool allocations treat the metadata device like a mirror log .
*/
/* FIXME Support striped metadata pool */
log_count = 1 ;
else if ( segtype_is_raid0_meta ( segtype ) )
/* Extend raid0 metadata LVs too */
log_count = stripes ;
2017-11-01 01:16:13 +03:00
else if ( segtype_is_raid_with_meta ( segtype ) )
2016-07-02 00:20:54 +03:00
log_count = mirrors * stripes ;
}
2012-02-01 06:10:45 +04:00
/* FIXME log_count should be 1 for mirrors */
2011-09-06 04:26:42 +04:00
2017-02-10 00:41:28 +03:00
if ( segtype_is_raid ( segtype ) & & ! segtype_is_any_raid0 ( segtype ) ) {
2017-02-12 19:47:35 +03:00
raid_size = ( ( uint64_t ) lv - > le_count + extents ) * lv - > vg - > extent_size ;
2017-02-10 00:41:28 +03:00
/*
* The MD bitmap is limited to being able to track 2 ^ 21 regions .
* The region_size must be adjusted to meet that criteria
* unless raid0 / raid0_meta , which doesn ' t have a bitmap .
*/
2017-02-12 19:47:35 +03:00
region_size = raid_ensure_min_region_size ( lv , raid_size , region_size ) ;
2017-02-10 00:41:28 +03:00
if ( first_seg ( lv ) )
first_seg ( lv ) - > region_size = region_size ;
}
2011-08-03 02:07:20 +04:00
if ( ! ( ah = allocate_extents ( lv - > vg , lv , segtype , stripes , mirrors ,
2011-09-08 20:41:18 +04:00
log_count , region_size , extents ,
2014-02-14 07:10:28 +04:00
allocatable_pvs , alloc , approx_alloc , NULL ) ) )
2007-11-22 17:54:35 +03:00
return_0 ;
2001-11-06 14:31:29 +03:00
2014-08-28 03:40:09 +04:00
new_extents = ah - > new_extents ;
2017-11-01 01:16:13 +03:00
if ( segtype_is_raid_with_meta ( segtype ) )
2014-08-28 03:40:09 +04:00
new_extents - = ah - > log_len * ah - > area_multiple ;
2014-02-14 07:10:28 +04:00
2014-10-26 10:13:59 +03:00
if ( segtype_is_pool ( segtype ) ) {
2013-09-06 12:54:50 +04:00
if ( ! ( r = create_pool ( lv , segtype , ah , stripes , stripe_size ) ) )
2011-10-22 20:48:59 +04:00
stack ;
2011-10-29 00:23:24 +04:00
} else if ( ! segtype_is_mirrored ( segtype ) & & ! segtype_is_raid ( segtype ) ) {
if ( ! ( r = lv_add_segment ( ah , 0 , ah - > area_count , lv , segtype ,
stripe_size , 0u , 0 ) ) )
stack ;
} else {
2011-10-22 20:48:59 +04:00
/*
* For RAID , all the devices are AREA_LV .
* However , for ' mirror on stripe ' using non - RAID targets ,
* the mirror legs are AREA_LV while the stripes underneath
* are AREA_PV .
*/
if ( segtype_is_raid ( segtype ) )
sub_lv_count = mirrors * stripes + segtype - > parity_devs ;
else
sub_lv_count = mirrors ;
2014-08-28 03:40:09 +04:00
old_extents = lv - > le_count ;
2011-04-07 01:32:20 +04:00
if ( ! lv - > le_count & &
2011-10-29 00:23:24 +04:00
! ( r = _lv_insert_empty_sublvs ( lv , segtype , stripe_size ,
region_size , sub_lv_count ) ) ) {
2011-04-07 01:32:20 +04:00
log_error ( " Failed to insert layer for %s " , lv - > name ) ;
2011-10-29 00:23:24 +04:00
goto out ;
2011-04-07 01:32:20 +04:00
}
2001-11-06 14:31:29 +03:00
2014-08-28 03:40:09 +04:00
if ( ! ( r = _lv_extend_layered_lv ( ah , lv , new_extents - lv - > le_count , 0 ,
2017-03-08 00:05:23 +03:00
mirrors , stripes , stripe_size ) ) )
2011-10-21 13:55:50 +04:00
goto_out ;
Allow 'nosync' extension of mirrors.
This patch allows a mirror to be extended without an initial resync of the
extended portion. It compliments the existing '--nosync' option to lvcreate.
This action can be done implicitly if the mirror was created with the '--nosync'
option, or explicitly if the '--nosync' option is used when extending the device.
Here are the operational criteria:
1) A mirror created with '--nosync' should extend with 'nosync' implicitly
[EXAMPLE]# lvs vg; lvextend -L +5G vg/lv ; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.00g lv_mlog 100.00
Extending 2 mirror images.
Extending logical volume lv to 10.00 GiB
Logical volume lv successfully resized
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 10.00g lv_mlog 100.00
2) The 'M' attribute ('M' signifies a mirror created with '--nosync', while 'm'
signifies a mirror created w/o '--nosync') must be preserved when extending a
mirror created with '--nosync'. See #1 for example of 'M' attribute.
3) A mirror created without '--nosync' should extend with 'nosync' only when
'--nosync' is explicitly used when extending.
[EXAMPLE]# lvs vg; lvextend -L +5G vg/lv; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg mwi-a-m- 20.00m lv_mlog 100.00
Extending 2 mirror images.
Extending logical volume lv to 5.02 GiB
Logical volume lv successfully resized
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg mwi-a-m- 5.02g lv_mlog 0.39
vs.
[EXAMPLE]# lvs vg; lvextend -L +5G vg/lv --nosync; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg mwi-a-m- 20.00m lv_mlog 100.00
Extending 2 mirror images.
Extending logical volume lv to 5.02 GiB
Logical volume lv successfully resized
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.02g lv_mlog 100.00
4) The 'm' attribute must change to 'M' when extending a mirror created without
'--nosync' is extended with the '--nosync' option. (See #3 examples above.)
5) An inactive mirror's sync percent cannot be determined definitively, so it
must not be allowed to skip resync. Instead, the extend should ask the user if
they want to extend while performing a resync.
[EXAMPLE]# lvchange -an vg/lv
[EXAMPLE]# lvextend -L +5G vg/lv
Extending 2 mirror images.
Extending logical volume lv to 10.00 GiB
vg/lv is not active. Unable to get sync percent.
Do full resync of extended portion of vg/lv? [y/n]: y
Logical volume lv successfully resized
6) A mirror that is performing recovery (as opposed to an initial sync) - like
after a failure - is not allowed to extend with either an implicit or
explicit nosync option. [You can simulate this with a 'corelog' mirror because
when it is reactivated, it must be recovered every time.]
[EXAMPLE]# lvcreate -m1 -L 5G -n lv vg --nosync --corelog
WARNING: New mirror won't be synchronised. Don't read what you didn't write!
Logical volume "lv" created
[EXAMPLE]# lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.00g 100.00
[EXAMPLE]# lvchange -an vg/lv; lvchange -ay vg/lv; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.00g 0.08
[EXAMPLE]# lvextend -L +5G vg/lv
Extending 2 mirror images.
Extending logical volume lv to 10.00 GiB
vg/lv cannot be extended while it is recovering.
7) If 'no' is selected in #5 or if the condition in #6 is hit, it should not
result in the mirror being resized or the 'm/M' attribute being changed.
NOTE: A mirror created with '--nosync' behaves differently than one created
without it when performing an extension. The former cannot be extended when
the mirror is recovering (unless in-active), while the latter can. This is
a reasonable thing to do since recovery of a mirror doesn't take long (at
least in the case of an on-disk log) and it would cause far more time in
degraded mode if the extension w/o '--nosync' was allowed. It might be
reasonable to add the ability to force the operation in the future. This
should /not/ force a nosync extension, but rather force a sync'ed extension.
IOW, the user would be saying, "Yes, yes... I know recovery won't take long
and that I'll be adding significantly to the time spent in degraded mode, but
I need the extra space right now!".
2011-10-06 19:32:26 +04:00
/*
* If we are expanding an existing mirror , we can skip the
* resync of the extension if the LV is currently in - sync
* and the LV has the LV_NOTSYNCED flag set .
*/
2014-08-28 03:40:09 +04:00
if ( old_extents & &
Allow 'nosync' extension of mirrors.
This patch allows a mirror to be extended without an initial resync of the
extended portion. It compliments the existing '--nosync' option to lvcreate.
This action can be done implicitly if the mirror was created with the '--nosync'
option, or explicitly if the '--nosync' option is used when extending the device.
Here are the operational criteria:
1) A mirror created with '--nosync' should extend with 'nosync' implicitly
[EXAMPLE]# lvs vg; lvextend -L +5G vg/lv ; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.00g lv_mlog 100.00
Extending 2 mirror images.
Extending logical volume lv to 10.00 GiB
Logical volume lv successfully resized
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 10.00g lv_mlog 100.00
2) The 'M' attribute ('M' signifies a mirror created with '--nosync', while 'm'
signifies a mirror created w/o '--nosync') must be preserved when extending a
mirror created with '--nosync'. See #1 for example of 'M' attribute.
3) A mirror created without '--nosync' should extend with 'nosync' only when
'--nosync' is explicitly used when extending.
[EXAMPLE]# lvs vg; lvextend -L +5G vg/lv; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg mwi-a-m- 20.00m lv_mlog 100.00
Extending 2 mirror images.
Extending logical volume lv to 5.02 GiB
Logical volume lv successfully resized
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg mwi-a-m- 5.02g lv_mlog 0.39
vs.
[EXAMPLE]# lvs vg; lvextend -L +5G vg/lv --nosync; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg mwi-a-m- 20.00m lv_mlog 100.00
Extending 2 mirror images.
Extending logical volume lv to 5.02 GiB
Logical volume lv successfully resized
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.02g lv_mlog 100.00
4) The 'm' attribute must change to 'M' when extending a mirror created without
'--nosync' is extended with the '--nosync' option. (See #3 examples above.)
5) An inactive mirror's sync percent cannot be determined definitively, so it
must not be allowed to skip resync. Instead, the extend should ask the user if
they want to extend while performing a resync.
[EXAMPLE]# lvchange -an vg/lv
[EXAMPLE]# lvextend -L +5G vg/lv
Extending 2 mirror images.
Extending logical volume lv to 10.00 GiB
vg/lv is not active. Unable to get sync percent.
Do full resync of extended portion of vg/lv? [y/n]: y
Logical volume lv successfully resized
6) A mirror that is performing recovery (as opposed to an initial sync) - like
after a failure - is not allowed to extend with either an implicit or
explicit nosync option. [You can simulate this with a 'corelog' mirror because
when it is reactivated, it must be recovered every time.]
[EXAMPLE]# lvcreate -m1 -L 5G -n lv vg --nosync --corelog
WARNING: New mirror won't be synchronised. Don't read what you didn't write!
Logical volume "lv" created
[EXAMPLE]# lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.00g 100.00
[EXAMPLE]# lvchange -an vg/lv; lvchange -ay vg/lv; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.00g 0.08
[EXAMPLE]# lvextend -L +5G vg/lv
Extending 2 mirror images.
Extending logical volume lv to 10.00 GiB
vg/lv cannot be extended while it is recovering.
7) If 'no' is selected in #5 or if the condition in #6 is hit, it should not
result in the mirror being resized or the 'm/M' attribute being changed.
NOTE: A mirror created with '--nosync' behaves differently than one created
without it when performing an extension. The former cannot be extended when
the mirror is recovering (unless in-active), while the latter can. This is
a reasonable thing to do since recovery of a mirror doesn't take long (at
least in the case of an on-disk log) and it would cause far more time in
degraded mode if the extension w/o '--nosync' was allowed. It might be
reasonable to add the ability to force the operation in the future. This
should /not/ force a nosync extension, but rather force a sync'ed extension.
IOW, the user would be saying, "Yes, yes... I know recovery won't take long
and that I'll be adding significantly to the time spent in degraded mode, but
I need the extra space right now!".
2011-10-06 19:32:26 +04:00
segtype_is_mirrored ( segtype ) & &
2016-07-14 16:21:01 +03:00
( lv_is_not_synced ( lv ) ) ) {
2014-06-09 14:08:27 +04:00
dm_percent_t sync_percent = DM_PERCENT_INVALID ;
Allow 'nosync' extension of mirrors.
This patch allows a mirror to be extended without an initial resync of the
extended portion. It compliments the existing '--nosync' option to lvcreate.
This action can be done implicitly if the mirror was created with the '--nosync'
option, or explicitly if the '--nosync' option is used when extending the device.
Here are the operational criteria:
1) A mirror created with '--nosync' should extend with 'nosync' implicitly
[EXAMPLE]# lvs vg; lvextend -L +5G vg/lv ; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.00g lv_mlog 100.00
Extending 2 mirror images.
Extending logical volume lv to 10.00 GiB
Logical volume lv successfully resized
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 10.00g lv_mlog 100.00
2) The 'M' attribute ('M' signifies a mirror created with '--nosync', while 'm'
signifies a mirror created w/o '--nosync') must be preserved when extending a
mirror created with '--nosync'. See #1 for example of 'M' attribute.
3) A mirror created without '--nosync' should extend with 'nosync' only when
'--nosync' is explicitly used when extending.
[EXAMPLE]# lvs vg; lvextend -L +5G vg/lv; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg mwi-a-m- 20.00m lv_mlog 100.00
Extending 2 mirror images.
Extending logical volume lv to 5.02 GiB
Logical volume lv successfully resized
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg mwi-a-m- 5.02g lv_mlog 0.39
vs.
[EXAMPLE]# lvs vg; lvextend -L +5G vg/lv --nosync; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg mwi-a-m- 20.00m lv_mlog 100.00
Extending 2 mirror images.
Extending logical volume lv to 5.02 GiB
Logical volume lv successfully resized
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.02g lv_mlog 100.00
4) The 'm' attribute must change to 'M' when extending a mirror created without
'--nosync' is extended with the '--nosync' option. (See #3 examples above.)
5) An inactive mirror's sync percent cannot be determined definitively, so it
must not be allowed to skip resync. Instead, the extend should ask the user if
they want to extend while performing a resync.
[EXAMPLE]# lvchange -an vg/lv
[EXAMPLE]# lvextend -L +5G vg/lv
Extending 2 mirror images.
Extending logical volume lv to 10.00 GiB
vg/lv is not active. Unable to get sync percent.
Do full resync of extended portion of vg/lv? [y/n]: y
Logical volume lv successfully resized
6) A mirror that is performing recovery (as opposed to an initial sync) - like
after a failure - is not allowed to extend with either an implicit or
explicit nosync option. [You can simulate this with a 'corelog' mirror because
when it is reactivated, it must be recovered every time.]
[EXAMPLE]# lvcreate -m1 -L 5G -n lv vg --nosync --corelog
WARNING: New mirror won't be synchronised. Don't read what you didn't write!
Logical volume "lv" created
[EXAMPLE]# lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.00g 100.00
[EXAMPLE]# lvchange -an vg/lv; lvchange -ay vg/lv; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.00g 0.08
[EXAMPLE]# lvextend -L +5G vg/lv
Extending 2 mirror images.
Extending logical volume lv to 10.00 GiB
vg/lv cannot be extended while it is recovering.
7) If 'no' is selected in #5 or if the condition in #6 is hit, it should not
result in the mirror being resized or the 'm/M' attribute being changed.
NOTE: A mirror created with '--nosync' behaves differently than one created
without it when performing an extension. The former cannot be extended when
the mirror is recovering (unless in-active), while the latter can. This is
a reasonable thing to do since recovery of a mirror doesn't take long (at
least in the case of an on-disk log) and it would cause far more time in
degraded mode if the extension w/o '--nosync' was allowed. It might be
reasonable to add the ability to force the operation in the future. This
should /not/ force a nosync extension, but rather force a sync'ed extension.
IOW, the user would be saying, "Yes, yes... I know recovery won't take long
and that I'll be adding significantly to the time spent in degraded mode, but
I need the extra space right now!".
2011-10-06 19:32:26 +04:00
2013-05-16 19:36:56 +04:00
if ( ! lv_is_active_locally ( lv ) ) {
2015-11-05 14:29:44 +03:00
log_error ( " Unable to read sync percent while LV %s "
" is not locally active. " , display_lvname ( lv ) ) ;
2012-08-26 03:15:45 +04:00
/* FIXME Support --force */
Allow 'nosync' extension of mirrors.
This patch allows a mirror to be extended without an initial resync of the
extended portion. It compliments the existing '--nosync' option to lvcreate.
This action can be done implicitly if the mirror was created with the '--nosync'
option, or explicitly if the '--nosync' option is used when extending the device.
Here are the operational criteria:
1) A mirror created with '--nosync' should extend with 'nosync' implicitly
[EXAMPLE]# lvs vg; lvextend -L +5G vg/lv ; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.00g lv_mlog 100.00
Extending 2 mirror images.
Extending logical volume lv to 10.00 GiB
Logical volume lv successfully resized
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 10.00g lv_mlog 100.00
2) The 'M' attribute ('M' signifies a mirror created with '--nosync', while 'm'
signifies a mirror created w/o '--nosync') must be preserved when extending a
mirror created with '--nosync'. See #1 for example of 'M' attribute.
3) A mirror created without '--nosync' should extend with 'nosync' only when
'--nosync' is explicitly used when extending.
[EXAMPLE]# lvs vg; lvextend -L +5G vg/lv; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg mwi-a-m- 20.00m lv_mlog 100.00
Extending 2 mirror images.
Extending logical volume lv to 5.02 GiB
Logical volume lv successfully resized
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg mwi-a-m- 5.02g lv_mlog 0.39
vs.
[EXAMPLE]# lvs vg; lvextend -L +5G vg/lv --nosync; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg mwi-a-m- 20.00m lv_mlog 100.00
Extending 2 mirror images.
Extending logical volume lv to 5.02 GiB
Logical volume lv successfully resized
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.02g lv_mlog 100.00
4) The 'm' attribute must change to 'M' when extending a mirror created without
'--nosync' is extended with the '--nosync' option. (See #3 examples above.)
5) An inactive mirror's sync percent cannot be determined definitively, so it
must not be allowed to skip resync. Instead, the extend should ask the user if
they want to extend while performing a resync.
[EXAMPLE]# lvchange -an vg/lv
[EXAMPLE]# lvextend -L +5G vg/lv
Extending 2 mirror images.
Extending logical volume lv to 10.00 GiB
vg/lv is not active. Unable to get sync percent.
Do full resync of extended portion of vg/lv? [y/n]: y
Logical volume lv successfully resized
6) A mirror that is performing recovery (as opposed to an initial sync) - like
after a failure - is not allowed to extend with either an implicit or
explicit nosync option. [You can simulate this with a 'corelog' mirror because
when it is reactivated, it must be recovered every time.]
[EXAMPLE]# lvcreate -m1 -L 5G -n lv vg --nosync --corelog
WARNING: New mirror won't be synchronised. Don't read what you didn't write!
Logical volume "lv" created
[EXAMPLE]# lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.00g 100.00
[EXAMPLE]# lvchange -an vg/lv; lvchange -ay vg/lv; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.00g 0.08
[EXAMPLE]# lvextend -L +5G vg/lv
Extending 2 mirror images.
Extending logical volume lv to 10.00 GiB
vg/lv cannot be extended while it is recovering.
7) If 'no' is selected in #5 or if the condition in #6 is hit, it should not
result in the mirror being resized or the 'm/M' attribute being changed.
NOTE: A mirror created with '--nosync' behaves differently than one created
without it when performing an extension. The former cannot be extended when
the mirror is recovering (unless in-active), while the latter can. This is
a reasonable thing to do since recovery of a mirror doesn't take long (at
least in the case of an on-disk log) and it would cause far more time in
degraded mode if the extension w/o '--nosync' was allowed. It might be
reasonable to add the ability to force the operation in the future. This
should /not/ force a nosync extension, but rather force a sync'ed extension.
IOW, the user would be saying, "Yes, yes... I know recovery won't take long
and that I'll be adding significantly to the time spent in degraded mode, but
I need the extra space right now!".
2011-10-06 19:32:26 +04:00
if ( yes_no_prompt ( " Do full resync of extended "
2015-11-05 14:29:44 +03:00
" portion of %s? [y/n]: " ,
display_lvname ( lv ) ) = = ' n ' ) {
2014-05-18 22:07:24 +04:00
r = 0 ;
goto_out ;
}
Allow 'nosync' extension of mirrors.
This patch allows a mirror to be extended without an initial resync of the
extended portion. It compliments the existing '--nosync' option to lvcreate.
This action can be done implicitly if the mirror was created with the '--nosync'
option, or explicitly if the '--nosync' option is used when extending the device.
Here are the operational criteria:
1) A mirror created with '--nosync' should extend with 'nosync' implicitly
[EXAMPLE]# lvs vg; lvextend -L +5G vg/lv ; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.00g lv_mlog 100.00
Extending 2 mirror images.
Extending logical volume lv to 10.00 GiB
Logical volume lv successfully resized
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 10.00g lv_mlog 100.00
2) The 'M' attribute ('M' signifies a mirror created with '--nosync', while 'm'
signifies a mirror created w/o '--nosync') must be preserved when extending a
mirror created with '--nosync'. See #1 for example of 'M' attribute.
3) A mirror created without '--nosync' should extend with 'nosync' only when
'--nosync' is explicitly used when extending.
[EXAMPLE]# lvs vg; lvextend -L +5G vg/lv; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg mwi-a-m- 20.00m lv_mlog 100.00
Extending 2 mirror images.
Extending logical volume lv to 5.02 GiB
Logical volume lv successfully resized
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg mwi-a-m- 5.02g lv_mlog 0.39
vs.
[EXAMPLE]# lvs vg; lvextend -L +5G vg/lv --nosync; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg mwi-a-m- 20.00m lv_mlog 100.00
Extending 2 mirror images.
Extending logical volume lv to 5.02 GiB
Logical volume lv successfully resized
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.02g lv_mlog 100.00
4) The 'm' attribute must change to 'M' when extending a mirror created without
'--nosync' is extended with the '--nosync' option. (See #3 examples above.)
5) An inactive mirror's sync percent cannot be determined definitively, so it
must not be allowed to skip resync. Instead, the extend should ask the user if
they want to extend while performing a resync.
[EXAMPLE]# lvchange -an vg/lv
[EXAMPLE]# lvextend -L +5G vg/lv
Extending 2 mirror images.
Extending logical volume lv to 10.00 GiB
vg/lv is not active. Unable to get sync percent.
Do full resync of extended portion of vg/lv? [y/n]: y
Logical volume lv successfully resized
6) A mirror that is performing recovery (as opposed to an initial sync) - like
after a failure - is not allowed to extend with either an implicit or
explicit nosync option. [You can simulate this with a 'corelog' mirror because
when it is reactivated, it must be recovered every time.]
[EXAMPLE]# lvcreate -m1 -L 5G -n lv vg --nosync --corelog
WARNING: New mirror won't be synchronised. Don't read what you didn't write!
Logical volume "lv" created
[EXAMPLE]# lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.00g 100.00
[EXAMPLE]# lvchange -an vg/lv; lvchange -ay vg/lv; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.00g 0.08
[EXAMPLE]# lvextend -L +5G vg/lv
Extending 2 mirror images.
Extending logical volume lv to 10.00 GiB
vg/lv cannot be extended while it is recovering.
7) If 'no' is selected in #5 or if the condition in #6 is hit, it should not
result in the mirror being resized or the 'm/M' attribute being changed.
NOTE: A mirror created with '--nosync' behaves differently than one created
without it when performing an extension. The former cannot be extended when
the mirror is recovering (unless in-active), while the latter can. This is
a reasonable thing to do since recovery of a mirror doesn't take long (at
least in the case of an on-disk log) and it would cause far more time in
degraded mode if the extension w/o '--nosync' was allowed. It might be
reasonable to add the ability to force the operation in the future. This
should /not/ force a nosync extension, but rather force a sync'ed extension.
IOW, the user would be saying, "Yes, yes... I know recovery won't take long
and that I'll be adding significantly to the time spent in degraded mode, but
I need the extra space right now!".
2011-10-06 19:32:26 +04:00
goto out ;
}
2011-10-29 00:23:24 +04:00
if ( ! ( r = lv_mirror_percent ( lv - > vg - > cmd , lv , 0 ,
& sync_percent , NULL ) ) ) {
2015-11-05 14:29:44 +03:00
log_error ( " Failed to get sync percent for %s. " ,
display_lvname ( lv ) ) ;
Allow 'nosync' extension of mirrors.
This patch allows a mirror to be extended without an initial resync of the
extended portion. It compliments the existing '--nosync' option to lvcreate.
This action can be done implicitly if the mirror was created with the '--nosync'
option, or explicitly if the '--nosync' option is used when extending the device.
Here are the operational criteria:
1) A mirror created with '--nosync' should extend with 'nosync' implicitly
[EXAMPLE]# lvs vg; lvextend -L +5G vg/lv ; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.00g lv_mlog 100.00
Extending 2 mirror images.
Extending logical volume lv to 10.00 GiB
Logical volume lv successfully resized
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 10.00g lv_mlog 100.00
2) The 'M' attribute ('M' signifies a mirror created with '--nosync', while 'm'
signifies a mirror created w/o '--nosync') must be preserved when extending a
mirror created with '--nosync'. See #1 for example of 'M' attribute.
3) A mirror created without '--nosync' should extend with 'nosync' only when
'--nosync' is explicitly used when extending.
[EXAMPLE]# lvs vg; lvextend -L +5G vg/lv; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg mwi-a-m- 20.00m lv_mlog 100.00
Extending 2 mirror images.
Extending logical volume lv to 5.02 GiB
Logical volume lv successfully resized
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg mwi-a-m- 5.02g lv_mlog 0.39
vs.
[EXAMPLE]# lvs vg; lvextend -L +5G vg/lv --nosync; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg mwi-a-m- 20.00m lv_mlog 100.00
Extending 2 mirror images.
Extending logical volume lv to 5.02 GiB
Logical volume lv successfully resized
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.02g lv_mlog 100.00
4) The 'm' attribute must change to 'M' when extending a mirror created without
'--nosync' is extended with the '--nosync' option. (See #3 examples above.)
5) An inactive mirror's sync percent cannot be determined definitively, so it
must not be allowed to skip resync. Instead, the extend should ask the user if
they want to extend while performing a resync.
[EXAMPLE]# lvchange -an vg/lv
[EXAMPLE]# lvextend -L +5G vg/lv
Extending 2 mirror images.
Extending logical volume lv to 10.00 GiB
vg/lv is not active. Unable to get sync percent.
Do full resync of extended portion of vg/lv? [y/n]: y
Logical volume lv successfully resized
6) A mirror that is performing recovery (as opposed to an initial sync) - like
after a failure - is not allowed to extend with either an implicit or
explicit nosync option. [You can simulate this with a 'corelog' mirror because
when it is reactivated, it must be recovered every time.]
[EXAMPLE]# lvcreate -m1 -L 5G -n lv vg --nosync --corelog
WARNING: New mirror won't be synchronised. Don't read what you didn't write!
Logical volume "lv" created
[EXAMPLE]# lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.00g 100.00
[EXAMPLE]# lvchange -an vg/lv; lvchange -ay vg/lv; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.00g 0.08
[EXAMPLE]# lvextend -L +5G vg/lv
Extending 2 mirror images.
Extending logical volume lv to 10.00 GiB
vg/lv cannot be extended while it is recovering.
7) If 'no' is selected in #5 or if the condition in #6 is hit, it should not
result in the mirror being resized or the 'm/M' attribute being changed.
NOTE: A mirror created with '--nosync' behaves differently than one created
without it when performing an extension. The former cannot be extended when
the mirror is recovering (unless in-active), while the latter can. This is
a reasonable thing to do since recovery of a mirror doesn't take long (at
least in the case of an on-disk log) and it would cause far more time in
degraded mode if the extension w/o '--nosync' was allowed. It might be
reasonable to add the ability to force the operation in the future. This
should /not/ force a nosync extension, but rather force a sync'ed extension.
IOW, the user would be saying, "Yes, yes... I know recovery won't take long
and that I'll be adding significantly to the time spent in degraded mode, but
I need the extra space right now!".
2011-10-06 19:32:26 +04:00
goto out ;
2014-06-09 14:08:27 +04:00
} else if ( sync_percent = = DM_PERCENT_100 ) {
Allow 'nosync' extension of mirrors.
This patch allows a mirror to be extended without an initial resync of the
extended portion. It compliments the existing '--nosync' option to lvcreate.
This action can be done implicitly if the mirror was created with the '--nosync'
option, or explicitly if the '--nosync' option is used when extending the device.
Here are the operational criteria:
1) A mirror created with '--nosync' should extend with 'nosync' implicitly
[EXAMPLE]# lvs vg; lvextend -L +5G vg/lv ; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.00g lv_mlog 100.00
Extending 2 mirror images.
Extending logical volume lv to 10.00 GiB
Logical volume lv successfully resized
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 10.00g lv_mlog 100.00
2) The 'M' attribute ('M' signifies a mirror created with '--nosync', while 'm'
signifies a mirror created w/o '--nosync') must be preserved when extending a
mirror created with '--nosync'. See #1 for example of 'M' attribute.
3) A mirror created without '--nosync' should extend with 'nosync' only when
'--nosync' is explicitly used when extending.
[EXAMPLE]# lvs vg; lvextend -L +5G vg/lv; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg mwi-a-m- 20.00m lv_mlog 100.00
Extending 2 mirror images.
Extending logical volume lv to 5.02 GiB
Logical volume lv successfully resized
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg mwi-a-m- 5.02g lv_mlog 0.39
vs.
[EXAMPLE]# lvs vg; lvextend -L +5G vg/lv --nosync; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg mwi-a-m- 20.00m lv_mlog 100.00
Extending 2 mirror images.
Extending logical volume lv to 5.02 GiB
Logical volume lv successfully resized
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.02g lv_mlog 100.00
4) The 'm' attribute must change to 'M' when extending a mirror created without
'--nosync' is extended with the '--nosync' option. (See #3 examples above.)
5) An inactive mirror's sync percent cannot be determined definitively, so it
must not be allowed to skip resync. Instead, the extend should ask the user if
they want to extend while performing a resync.
[EXAMPLE]# lvchange -an vg/lv
[EXAMPLE]# lvextend -L +5G vg/lv
Extending 2 mirror images.
Extending logical volume lv to 10.00 GiB
vg/lv is not active. Unable to get sync percent.
Do full resync of extended portion of vg/lv? [y/n]: y
Logical volume lv successfully resized
6) A mirror that is performing recovery (as opposed to an initial sync) - like
after a failure - is not allowed to extend with either an implicit or
explicit nosync option. [You can simulate this with a 'corelog' mirror because
when it is reactivated, it must be recovered every time.]
[EXAMPLE]# lvcreate -m1 -L 5G -n lv vg --nosync --corelog
WARNING: New mirror won't be synchronised. Don't read what you didn't write!
Logical volume "lv" created
[EXAMPLE]# lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.00g 100.00
[EXAMPLE]# lvchange -an vg/lv; lvchange -ay vg/lv; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.00g 0.08
[EXAMPLE]# lvextend -L +5G vg/lv
Extending 2 mirror images.
Extending logical volume lv to 10.00 GiB
vg/lv cannot be extended while it is recovering.
7) If 'no' is selected in #5 or if the condition in #6 is hit, it should not
result in the mirror being resized or the 'm/M' attribute being changed.
NOTE: A mirror created with '--nosync' behaves differently than one created
without it when performing an extension. The former cannot be extended when
the mirror is recovering (unless in-active), while the latter can. This is
a reasonable thing to do since recovery of a mirror doesn't take long (at
least in the case of an on-disk log) and it would cause far more time in
degraded mode if the extension w/o '--nosync' was allowed. It might be
reasonable to add the ability to force the operation in the future. This
should /not/ force a nosync extension, but rather force a sync'ed extension.
IOW, the user would be saying, "Yes, yes... I know recovery won't take long
and that I'll be adding significantly to the time spent in degraded mode, but
I need the extra space right now!".
2011-10-06 19:32:26 +04:00
log_verbose ( " Skipping initial resync for "
2015-11-05 14:29:44 +03:00
" extended portion of %s " ,
display_lvname ( lv ) ) ;
Allow 'nosync' extension of mirrors.
This patch allows a mirror to be extended without an initial resync of the
extended portion. It compliments the existing '--nosync' option to lvcreate.
This action can be done implicitly if the mirror was created with the '--nosync'
option, or explicitly if the '--nosync' option is used when extending the device.
Here are the operational criteria:
1) A mirror created with '--nosync' should extend with 'nosync' implicitly
[EXAMPLE]# lvs vg; lvextend -L +5G vg/lv ; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.00g lv_mlog 100.00
Extending 2 mirror images.
Extending logical volume lv to 10.00 GiB
Logical volume lv successfully resized
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 10.00g lv_mlog 100.00
2) The 'M' attribute ('M' signifies a mirror created with '--nosync', while 'm'
signifies a mirror created w/o '--nosync') must be preserved when extending a
mirror created with '--nosync'. See #1 for example of 'M' attribute.
3) A mirror created without '--nosync' should extend with 'nosync' only when
'--nosync' is explicitly used when extending.
[EXAMPLE]# lvs vg; lvextend -L +5G vg/lv; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg mwi-a-m- 20.00m lv_mlog 100.00
Extending 2 mirror images.
Extending logical volume lv to 5.02 GiB
Logical volume lv successfully resized
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg mwi-a-m- 5.02g lv_mlog 0.39
vs.
[EXAMPLE]# lvs vg; lvextend -L +5G vg/lv --nosync; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg mwi-a-m- 20.00m lv_mlog 100.00
Extending 2 mirror images.
Extending logical volume lv to 5.02 GiB
Logical volume lv successfully resized
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.02g lv_mlog 100.00
4) The 'm' attribute must change to 'M' when extending a mirror created without
'--nosync' is extended with the '--nosync' option. (See #3 examples above.)
5) An inactive mirror's sync percent cannot be determined definitively, so it
must not be allowed to skip resync. Instead, the extend should ask the user if
they want to extend while performing a resync.
[EXAMPLE]# lvchange -an vg/lv
[EXAMPLE]# lvextend -L +5G vg/lv
Extending 2 mirror images.
Extending logical volume lv to 10.00 GiB
vg/lv is not active. Unable to get sync percent.
Do full resync of extended portion of vg/lv? [y/n]: y
Logical volume lv successfully resized
6) A mirror that is performing recovery (as opposed to an initial sync) - like
after a failure - is not allowed to extend with either an implicit or
explicit nosync option. [You can simulate this with a 'corelog' mirror because
when it is reactivated, it must be recovered every time.]
[EXAMPLE]# lvcreate -m1 -L 5G -n lv vg --nosync --corelog
WARNING: New mirror won't be synchronised. Don't read what you didn't write!
Logical volume "lv" created
[EXAMPLE]# lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.00g 100.00
[EXAMPLE]# lvchange -an vg/lv; lvchange -ay vg/lv; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.00g 0.08
[EXAMPLE]# lvextend -L +5G vg/lv
Extending 2 mirror images.
Extending logical volume lv to 10.00 GiB
vg/lv cannot be extended while it is recovering.
7) If 'no' is selected in #5 or if the condition in #6 is hit, it should not
result in the mirror being resized or the 'm/M' attribute being changed.
NOTE: A mirror created with '--nosync' behaves differently than one created
without it when performing an extension. The former cannot be extended when
the mirror is recovering (unless in-active), while the latter can. This is
a reasonable thing to do since recovery of a mirror doesn't take long (at
least in the case of an on-disk log) and it would cause far more time in
degraded mode if the extension w/o '--nosync' was allowed. It might be
reasonable to add the ability to force the operation in the future. This
should /not/ force a nosync extension, but rather force a sync'ed extension.
IOW, the user would be saying, "Yes, yes... I know recovery won't take long
and that I'll be adding significantly to the time spent in degraded mode, but
I need the extra space right now!".
2011-10-06 19:32:26 +04:00
init_mirror_in_sync ( 1 ) ;
lv - > status | = LV_NOTSYNCED ;
} else {
2015-11-05 14:29:44 +03:00
log_error ( " LV %s cannot be extended while it "
" is recovering. " , display_lvname ( lv ) ) ;
2012-02-09 19:13:42 +04:00
r = 0 ;
Allow 'nosync' extension of mirrors.
This patch allows a mirror to be extended without an initial resync of the
extended portion. It compliments the existing '--nosync' option to lvcreate.
This action can be done implicitly if the mirror was created with the '--nosync'
option, or explicitly if the '--nosync' option is used when extending the device.
Here are the operational criteria:
1) A mirror created with '--nosync' should extend with 'nosync' implicitly
[EXAMPLE]# lvs vg; lvextend -L +5G vg/lv ; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.00g lv_mlog 100.00
Extending 2 mirror images.
Extending logical volume lv to 10.00 GiB
Logical volume lv successfully resized
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 10.00g lv_mlog 100.00
2) The 'M' attribute ('M' signifies a mirror created with '--nosync', while 'm'
signifies a mirror created w/o '--nosync') must be preserved when extending a
mirror created with '--nosync'. See #1 for example of 'M' attribute.
3) A mirror created without '--nosync' should extend with 'nosync' only when
'--nosync' is explicitly used when extending.
[EXAMPLE]# lvs vg; lvextend -L +5G vg/lv; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg mwi-a-m- 20.00m lv_mlog 100.00
Extending 2 mirror images.
Extending logical volume lv to 5.02 GiB
Logical volume lv successfully resized
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg mwi-a-m- 5.02g lv_mlog 0.39
vs.
[EXAMPLE]# lvs vg; lvextend -L +5G vg/lv --nosync; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg mwi-a-m- 20.00m lv_mlog 100.00
Extending 2 mirror images.
Extending logical volume lv to 5.02 GiB
Logical volume lv successfully resized
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.02g lv_mlog 100.00
4) The 'm' attribute must change to 'M' when extending a mirror created without
'--nosync' is extended with the '--nosync' option. (See #3 examples above.)
5) An inactive mirror's sync percent cannot be determined definitively, so it
must not be allowed to skip resync. Instead, the extend should ask the user if
they want to extend while performing a resync.
[EXAMPLE]# lvchange -an vg/lv
[EXAMPLE]# lvextend -L +5G vg/lv
Extending 2 mirror images.
Extending logical volume lv to 10.00 GiB
vg/lv is not active. Unable to get sync percent.
Do full resync of extended portion of vg/lv? [y/n]: y
Logical volume lv successfully resized
6) A mirror that is performing recovery (as opposed to an initial sync) - like
after a failure - is not allowed to extend with either an implicit or
explicit nosync option. [You can simulate this with a 'corelog' mirror because
when it is reactivated, it must be recovered every time.]
[EXAMPLE]# lvcreate -m1 -L 5G -n lv vg --nosync --corelog
WARNING: New mirror won't be synchronised. Don't read what you didn't write!
Logical volume "lv" created
[EXAMPLE]# lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.00g 100.00
[EXAMPLE]# lvchange -an vg/lv; lvchange -ay vg/lv; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.00g 0.08
[EXAMPLE]# lvextend -L +5G vg/lv
Extending 2 mirror images.
Extending logical volume lv to 10.00 GiB
vg/lv cannot be extended while it is recovering.
7) If 'no' is selected in #5 or if the condition in #6 is hit, it should not
result in the mirror being resized or the 'm/M' attribute being changed.
NOTE: A mirror created with '--nosync' behaves differently than one created
without it when performing an extension. The former cannot be extended when
the mirror is recovering (unless in-active), while the latter can. This is
a reasonable thing to do since recovery of a mirror doesn't take long (at
least in the case of an on-disk log) and it would cause far more time in
degraded mode if the extension w/o '--nosync' was allowed. It might be
reasonable to add the ability to force the operation in the future. This
should /not/ force a nosync extension, but rather force a sync'ed extension.
IOW, the user would be saying, "Yes, yes... I know recovery won't take long
and that I'll be adding significantly to the time spent in degraded mode, but
I need the extra space right now!".
2011-10-06 19:32:26 +04:00
goto out ;
}
}
2011-04-07 01:32:20 +04:00
}
Allow 'nosync' extension of mirrors.
This patch allows a mirror to be extended without an initial resync of the
extended portion. It compliments the existing '--nosync' option to lvcreate.
This action can be done implicitly if the mirror was created with the '--nosync'
option, or explicitly if the '--nosync' option is used when extending the device.
Here are the operational criteria:
1) A mirror created with '--nosync' should extend with 'nosync' implicitly
[EXAMPLE]# lvs vg; lvextend -L +5G vg/lv ; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.00g lv_mlog 100.00
Extending 2 mirror images.
Extending logical volume lv to 10.00 GiB
Logical volume lv successfully resized
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 10.00g lv_mlog 100.00
2) The 'M' attribute ('M' signifies a mirror created with '--nosync', while 'm'
signifies a mirror created w/o '--nosync') must be preserved when extending a
mirror created with '--nosync'. See #1 for example of 'M' attribute.
3) A mirror created without '--nosync' should extend with 'nosync' only when
'--nosync' is explicitly used when extending.
[EXAMPLE]# lvs vg; lvextend -L +5G vg/lv; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg mwi-a-m- 20.00m lv_mlog 100.00
Extending 2 mirror images.
Extending logical volume lv to 5.02 GiB
Logical volume lv successfully resized
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg mwi-a-m- 5.02g lv_mlog 0.39
vs.
[EXAMPLE]# lvs vg; lvextend -L +5G vg/lv --nosync; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg mwi-a-m- 20.00m lv_mlog 100.00
Extending 2 mirror images.
Extending logical volume lv to 5.02 GiB
Logical volume lv successfully resized
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.02g lv_mlog 100.00
4) The 'm' attribute must change to 'M' when extending a mirror created without
'--nosync' is extended with the '--nosync' option. (See #3 examples above.)
5) An inactive mirror's sync percent cannot be determined definitively, so it
must not be allowed to skip resync. Instead, the extend should ask the user if
they want to extend while performing a resync.
[EXAMPLE]# lvchange -an vg/lv
[EXAMPLE]# lvextend -L +5G vg/lv
Extending 2 mirror images.
Extending logical volume lv to 10.00 GiB
vg/lv is not active. Unable to get sync percent.
Do full resync of extended portion of vg/lv? [y/n]: y
Logical volume lv successfully resized
6) A mirror that is performing recovery (as opposed to an initial sync) - like
after a failure - is not allowed to extend with either an implicit or
explicit nosync option. [You can simulate this with a 'corelog' mirror because
when it is reactivated, it must be recovered every time.]
[EXAMPLE]# lvcreate -m1 -L 5G -n lv vg --nosync --corelog
WARNING: New mirror won't be synchronised. Don't read what you didn't write!
Logical volume "lv" created
[EXAMPLE]# lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.00g 100.00
[EXAMPLE]# lvchange -an vg/lv; lvchange -ay vg/lv; lvs vg
LV VG Attr LSize Pool Origin Snap% Move Log Copy% Convert
lv vg Mwi-a-m- 5.00g 0.08
[EXAMPLE]# lvextend -L +5G vg/lv
Extending 2 mirror images.
Extending logical volume lv to 10.00 GiB
vg/lv cannot be extended while it is recovering.
7) If 'no' is selected in #5 or if the condition in #6 is hit, it should not
result in the mirror being resized or the 'm/M' attribute being changed.
NOTE: A mirror created with '--nosync' behaves differently than one created
without it when performing an extension. The former cannot be extended when
the mirror is recovering (unless in-active), while the latter can. This is
a reasonable thing to do since recovery of a mirror doesn't take long (at
least in the case of an on-disk log) and it would cause far more time in
degraded mode if the extension w/o '--nosync' was allowed. It might be
reasonable to add the ability to force the operation in the future. This
should /not/ force a nosync extension, but rather force a sync'ed extension.
IOW, the user would be saying, "Yes, yes... I know recovery won't take long
and that I'll be adding significantly to the time spent in degraded mode, but
I need the extra space right now!".
2011-10-06 19:32:26 +04:00
out :
2005-06-01 20:51:55 +04:00
alloc_destroy ( ah ) ;
2001-11-06 14:31:29 +03:00
return r ;
}
2007-08-07 00:35:48 +04:00
/*
* Minimal LV renaming function .
* Metadata transaction should be made by caller .
2015-11-09 12:41:17 +03:00
* Assumes new_name is allocated from lv - > vgmem pool .
2007-08-07 00:35:48 +04:00
*/
static int _rename_single_lv ( struct logical_volume * lv , char * new_name )
{
struct volume_group * vg = lv - > vg ;
2016-03-01 17:31:48 +03:00
int historical ;
2007-08-07 00:35:48 +04:00
2016-03-01 17:31:48 +03:00
if ( lv_name_is_used_in_vg ( vg , new_name , & historical ) ) {
log_error ( " %sLogical Volume \" %s \" already exists in "
" volume group \" %s \" " , historical ? " historical " : " " ,
new_name , vg - > name ) ;
2007-08-07 20:57:09 +04:00
return 0 ;
2007-08-07 00:35:48 +04:00
}
2014-09-16 00:33:53 +04:00
if ( lv_is_locked ( lv ) ) {
2007-08-07 00:35:48 +04:00
log_error ( " Cannot rename locked LV %s " , lv - > name ) ;
2007-08-07 20:57:09 +04:00
return 0 ;
2007-08-07 00:35:48 +04:00
}
lv - > name = new_name ;
return 1 ;
}
/*
* Rename sub LV .
2007-08-07 20:57:09 +04:00
* ' lv_name_old ' and ' lv_name_new ' are old and new names of the main LV .
2007-08-07 00:35:48 +04:00
*/
2014-03-27 13:35:07 +04:00
static int _rename_sub_lv ( struct logical_volume * lv ,
2007-08-07 20:57:09 +04:00
const char * lv_name_old , const char * lv_name_new )
2007-08-07 00:35:48 +04:00
{
2011-02-18 17:47:28 +03:00
const char * suffix ;
char * new_name ;
2007-08-07 20:57:09 +04:00
size_t len ;
2007-08-07 00:35:48 +04:00
2007-08-08 22:00:36 +04:00
/*
* A sub LV name starts with lv_name_old + ' _ ' .
* The suffix follows lv_name_old and includes ' _ ' .
*/
len = strlen ( lv_name_old ) ;
if ( strncmp ( lv - > name , lv_name_old , len ) | | lv - > name [ len ] ! = ' _ ' ) {
log_error ( " Cannot rename \" %s \" : name format not recognized "
" for internal LV \" %s \" " ,
lv_name_old , lv - > name ) ;
return 0 ;
}
suffix = lv - > name + len ;
2007-08-07 00:35:48 +04:00
2008-01-17 20:17:09 +03:00
/*
2007-08-07 00:35:48 +04:00
* Compose a new name for sub lv :
* e . g . new name is " lvol1_mlog "
2010-11-09 15:34:40 +03:00
* if the sub LV is " lvol0_mlog " and
* a new name for main LV is " lvol1 "
2007-08-07 00:35:48 +04:00
*/
2007-08-07 20:57:09 +04:00
len = strlen ( lv_name_new ) + strlen ( suffix ) + 1 ;
2014-03-27 13:35:51 +04:00
new_name = dm_pool_alloc ( lv - > vg - > vgmem , len ) ;
2007-08-07 00:35:48 +04:00
if ( ! new_name ) {
log_error ( " Failed to allocate space for new name " ) ;
2007-08-07 20:57:09 +04:00
return 0 ;
2007-08-07 00:35:48 +04:00
}
2011-04-09 23:05:23 +04:00
if ( dm_snprintf ( new_name , len , " %s%s " , lv_name_new , suffix ) < 0 ) {
2007-08-07 00:35:48 +04:00
log_error ( " Failed to create new name " ) ;
2007-08-07 20:57:09 +04:00
return 0 ;
2007-08-07 00:35:48 +04:00
}
2014-03-27 16:49:24 +04:00
if ( ! validate_name ( new_name ) ) {
2014-03-27 13:35:51 +04:00
log_error ( " Cannot rename \" %s \" . New logical volume name \" %s \" is invalid. " ,
lv - > name , new_name ) ;
return 0 ;
}
2007-08-07 00:35:48 +04:00
/* Rename it */
return _rename_single_lv ( lv , new_name ) ;
}
2011-06-30 22:25:18 +04:00
/* Callback for for_each_sub_lv */
2014-03-27 13:35:07 +04:00
static int _rename_cb ( struct logical_volume * lv , void * data )
2007-08-07 00:35:48 +04:00
{
2007-08-07 22:55:38 +04:00
struct lv_names * lv_names = ( struct lv_names * ) data ;
2007-08-07 00:35:48 +04:00
2014-03-27 13:35:07 +04:00
return _rename_sub_lv ( lv , lv_names - > old , lv_names - > new ) ;
2007-08-07 00:35:48 +04:00
}
/*
2011-06-30 22:25:18 +04:00
* Loop down sub LVs and call fn for each .
* fn is responsible to log necessary information on failure .
2007-08-07 00:35:48 +04:00
*/
2015-01-30 14:27:49 +03:00
static int _for_each_sub_lv ( struct logical_volume * lv , int skip_pools ,
int ( * fn ) ( struct logical_volume * lv , void * data ) ,
void * data )
2007-08-07 00:35:48 +04:00
{
2009-05-28 04:29:14 +04:00
struct logical_volume * org ;
2007-08-07 00:35:48 +04:00
struct lv_segment * seg ;
2007-08-22 18:38:18 +04:00
uint32_t s ;
2007-08-07 00:35:48 +04:00
2011-11-04 05:31:23 +04:00
if ( lv_is_cow ( lv ) & & lv_is_virtual_origin ( org = origin_from_cow ( lv ) ) ) {
2014-03-27 13:35:07 +04:00
if ( ! fn ( org , data ) )
2009-05-28 04:29:14 +04:00
return_0 ;
2015-12-07 15:53:00 +03:00
if ( ! _for_each_sub_lv ( org , skip_pools , fn , data ) )
2011-11-04 05:31:23 +04:00
return_0 ;
}
2009-05-28 04:29:14 +04:00
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( seg , & lv - > segments ) {
2011-11-04 05:31:23 +04:00
if ( seg - > log_lv ) {
2014-03-27 13:35:07 +04:00
if ( ! fn ( seg - > log_lv , data ) )
2011-11-04 05:31:23 +04:00
return_0 ;
2015-12-07 15:53:00 +03:00
if ( ! _for_each_sub_lv ( seg - > log_lv , skip_pools , fn , data ) )
2011-11-04 05:31:23 +04:00
return_0 ;
}
2012-01-27 01:39:32 +04:00
if ( seg - > metadata_lv ) {
2014-03-27 13:35:07 +04:00
if ( ! fn ( seg - > metadata_lv , data ) )
2012-01-27 01:39:32 +04:00
return_0 ;
2015-12-07 15:53:00 +03:00
if ( ! _for_each_sub_lv ( seg - > metadata_lv , skip_pools , fn , data ) )
2012-01-27 01:39:32 +04:00
return_0 ;
}
2015-01-30 14:27:49 +03:00
if ( seg - > pool_lv & & ! skip_pools ) {
if ( ! fn ( seg - > pool_lv , data ) )
return_0 ;
2015-12-07 15:53:00 +03:00
if ( ! _for_each_sub_lv ( seg - > pool_lv , skip_pools , fn , data ) )
2015-01-30 14:27:49 +03:00
return_0 ;
}
2007-12-20 21:55:46 +03:00
for ( s = 0 ; s < seg - > area_count ; s + + ) {
if ( seg_type ( seg , s ) ! = AREA_LV )
continue ;
2014-03-27 13:35:07 +04:00
if ( ! fn ( seg_lv ( seg , s ) , data ) )
2009-05-28 04:29:14 +04:00
return_0 ;
2015-12-07 15:53:00 +03:00
if ( ! _for_each_sub_lv ( seg_lv ( seg , s ) , skip_pools , fn , data ) )
2009-05-28 04:29:14 +04:00
return_0 ;
2007-12-20 21:55:46 +03:00
}
2011-08-11 07:29:51 +04:00
2016-05-24 00:55:13 +03:00
if ( ! seg_is_raid_with_meta ( seg ) )
2011-08-11 07:29:51 +04:00
continue ;
/* RAID has meta_areas */
for ( s = 0 ; s < seg - > area_count ; s + + ) {
2016-05-24 00:55:13 +03:00
if ( ( seg_metatype ( seg , s ) ! = AREA_LV ) | | ! seg_metalv ( seg , s ) )
2011-08-11 07:29:51 +04:00
continue ;
2014-03-27 13:35:07 +04:00
if ( ! fn ( seg_metalv ( seg , s ) , data ) )
2011-08-11 07:29:51 +04:00
return_0 ;
2015-12-07 15:53:00 +03:00
if ( ! _for_each_sub_lv ( seg_metalv ( seg , s ) , skip_pools , fn , data ) )
2011-08-11 07:29:51 +04:00
return_0 ;
}
2007-08-07 00:35:48 +04:00
}
return 1 ;
}
2015-01-30 14:27:49 +03:00
int for_each_sub_lv ( struct logical_volume * lv ,
int ( * fn ) ( struct logical_volume * lv , void * data ) ,
void * data )
{
return _for_each_sub_lv ( lv , 0 , fn , data ) ;
}
int for_each_sub_lv_except_pools ( struct logical_volume * lv ,
int ( * fn ) ( struct logical_volume * lv , void * data ) ,
void * data )
{
return _for_each_sub_lv ( lv , 1 , fn , data ) ;
}
2007-08-04 01:22:10 +04:00
/*
* Core of LV renaming routine .
* VG must be locked by caller .
*/
2012-09-27 11:48:25 +04:00
int lv_rename_update ( struct cmd_context * cmd , struct logical_volume * lv ,
const char * new_name , int update_mda )
2007-08-04 01:22:10 +04:00
{
struct volume_group * vg = lv - > vg ;
2014-09-09 20:47:27 +04:00
struct lv_names lv_names = { . old = lv - > name } ;
2016-03-04 13:36:24 +03:00
int old_lv_is_historical = lv_is_historical ( lv ) ;
2016-03-01 17:31:48 +03:00
int historical ;
2007-08-07 00:35:48 +04:00
2015-12-09 15:52:47 +03:00
/*
* rename is not allowed on sub LVs except for pools
* ( thin pool is ' visible ' , but cache may not )
*/
if ( ! lv_is_pool ( lv ) & &
! lv_is_visible ( lv ) ) {
2007-08-07 20:57:09 +04:00
log_error ( " Cannot rename internal LV \" %s \" . " , lv - > name ) ;
2007-08-07 00:35:48 +04:00
return 0 ;
}
2007-08-04 01:22:10 +04:00
2016-03-01 17:31:48 +03:00
if ( lv_name_is_used_in_vg ( vg , new_name , & historical ) ) {
log_error ( " %sLogical Volume \" %s \" already exists in "
2016-03-04 13:36:24 +03:00
" volume group \" %s \" " , historical ? " Historical " : " " ,
2016-03-01 17:31:48 +03:00
new_name , vg - > name ) ;
2007-08-04 01:22:10 +04:00
return 0 ;
}
2014-09-16 00:33:53 +04:00
if ( lv_is_locked ( lv ) ) {
2007-08-04 01:22:10 +04:00
log_error ( " Cannot rename locked LV %s " , lv - > name ) ;
return 0 ;
}
2012-09-27 11:48:25 +04:00
if ( update_mda & & ! archive ( vg ) )
2014-09-09 00:36:42 +04:00
return_0 ;
2007-08-04 01:22:10 +04:00
2016-03-04 13:36:24 +03:00
if ( old_lv_is_historical ) {
2016-03-04 13:46:29 +03:00
/*
* Historical LVs have neither sub LVs nor any
* devices to reload , so just update metadata .
*/
2016-03-04 13:36:24 +03:00
lv - > this_glv - > historical - > name = lv - > name = new_name ;
if ( update_mda & &
( ! vg_write ( vg ) | | ! vg_commit ( vg ) ) )
return_0 ;
} else {
if ( ! ( lv_names . new = dm_pool_strdup ( cmd - > mem , new_name ) ) ) {
log_error ( " Failed to allocate space for new name. " ) ;
return 0 ;
}
2014-09-09 22:15:51 +04:00
2016-03-04 13:36:24 +03:00
/* rename sub LVs */
if ( ! for_each_sub_lv_except_pools ( lv , _rename_cb , ( void * ) & lv_names ) )
return_0 ;
2007-08-07 00:35:48 +04:00
2016-03-04 13:36:24 +03:00
/* rename main LV */
lv - > name = lv_names . new ;
2007-08-04 01:22:10 +04:00
2016-03-04 13:36:24 +03:00
if ( lv_is_cow ( lv ) )
lv = origin_from_cow ( lv ) ;
2014-09-09 22:15:51 +04:00
2016-03-04 13:36:24 +03:00
if ( update_mda & & ! lv_update_and_reload ( ( struct logical_volume * ) lv_lock_holder ( lv ) ) )
return_0 ;
}
2007-08-04 01:22:10 +04:00
2014-09-09 20:47:27 +04:00
return 1 ;
2007-08-04 01:22:10 +04:00
}
2012-09-27 11:48:25 +04:00
/*
* Core of LV renaming routine .
* VG must be locked by caller .
*/
int lv_rename ( struct cmd_context * cmd , struct logical_volume * lv ,
const char * new_name )
{
return lv_rename_update ( cmd , lv , new_name , 1 ) ;
}
2013-03-14 02:00:29 +04:00
/*
* Core lv resize code
*/
# define SIZE_BUF 128
2014-02-26 06:14:18 +04:00
/* TODO: unify stripe size validation across source code */
2016-01-19 01:24:32 +03:00
static int _validate_stripesize ( const struct volume_group * vg ,
2013-03-14 02:00:29 +04:00
struct lvresize_params * lp )
{
2016-06-17 14:25:41 +03:00
if ( lp - > stripe_size > ( STRIPE_SIZE_LIMIT * 2 ) ) {
2016-01-15 12:35:10 +03:00
log_error ( " Stripe size cannot be larger than %s. " ,
2016-01-19 01:24:32 +03:00
display_size ( vg - > cmd , ( uint64_t ) STRIPE_SIZE_LIMIT ) ) ;
2013-03-14 02:00:29 +04:00
return 0 ;
}
2016-06-17 14:25:41 +03:00
if ( lp - > stripe_size > vg - > extent_size ) {
2014-02-26 06:14:18 +04:00
log_print_unless_silent ( " Reducing stripe size %s to maximum, "
2016-01-15 12:35:10 +03:00
" physical extent size %s. " ,
2016-06-17 14:25:41 +03:00
display_size ( vg - > cmd , lp - > stripe_size ) ,
2016-01-19 01:24:32 +03:00
display_size ( vg - > cmd , vg - > extent_size ) ) ;
2013-03-14 02:00:29 +04:00
lp - > stripe_size = vg - > extent_size ;
2016-06-17 14:25:41 +03:00
}
2013-03-14 02:00:29 +04:00
2016-06-30 19:59:44 +03:00
if ( ! is_power_of_2 ( lp - > stripe_size ) ) {
2016-01-15 12:35:10 +03:00
log_error ( " Stripe size must be power of 2. " ) ;
2013-03-14 02:00:29 +04:00
return 0 ;
}
return 1 ;
}
2016-06-14 16:32:21 +03:00
static int _request_confirmation ( const struct logical_volume * lv ,
2013-03-14 02:00:29 +04:00
const struct lvresize_params * lp )
{
2016-06-14 16:32:21 +03:00
const struct volume_group * vg = lv - > vg ;
2013-03-14 02:00:29 +04:00
struct lvinfo info = { 0 } ;
2016-01-19 01:24:32 +03:00
if ( ! lv_info ( vg - > cmd , lv , 0 , & info , 1 , 0 ) & & driver_version ( NULL , 0 ) ) {
2016-06-14 15:56:17 +03:00
log_error ( " lv_info failed: aborting. " ) ;
2013-03-14 02:00:29 +04:00
return 0 ;
}
if ( lp - > resizefs ) {
if ( ! info . exists ) {
log_error ( " Logical volume %s must be activated "
2016-02-08 15:08:54 +03:00
" before resizing filesystem. " ,
display_lvname ( lv ) ) ;
2013-03-14 02:00:29 +04:00
return 0 ;
}
return 1 ;
}
if ( ! info . exists )
return 1 ;
2016-02-08 15:08:54 +03:00
log_warn ( " WARNING: Reducing active%s logical volume to %s. " ,
2013-03-14 02:00:29 +04:00
info . open_count ? " and open " : " " ,
2016-01-19 01:24:32 +03:00
display_size ( vg - > cmd , ( uint64_t ) lp - > extents * vg - > extent_size ) ) ;
2013-03-14 02:00:29 +04:00
log_warn ( " THIS MAY DESTROY YOUR DATA (filesystem etc.) " ) ;
2016-06-17 14:25:41 +03:00
if ( ! lp - > force ) {
2013-03-14 02:00:29 +04:00
if ( yes_no_prompt ( " Do you really want to reduce %s? [y/n]: " ,
2016-02-08 15:08:54 +03:00
display_lvname ( lv ) ) = = ' n ' ) {
2016-06-14 15:56:17 +03:00
log_error ( " Logical volume %s NOT reduced. " ,
2016-02-08 15:08:54 +03:00
display_lvname ( lv ) ) ;
2013-03-14 02:00:29 +04:00
return 0 ;
}
}
return 1 ;
}
enum fsadm_cmd_e { FSADM_CMD_CHECK , FSADM_CMD_RESIZE } ;
2017-04-08 20:43:20 +03:00
2013-03-14 02:00:29 +04:00
# define FSADM_CMD_MAX_ARGS 6
# define FSADM_CHECK_FAILS_FOR_MOUNTED 3 /* shell exist status code */
/*
2017-04-08 20:43:20 +03:00
* fsadm - - dry - run - - verbose - - force check lv_path
* fsadm - - dry - run - - verbose - - force resize lv_path size
2013-03-14 02:00:29 +04:00
*/
2016-06-15 16:46:03 +03:00
static int _fsadm_cmd ( enum fsadm_cmd_e fcmd ,
struct logical_volume * lv ,
uint32_t extents ,
2017-06-21 15:02:57 +03:00
int yes ,
2016-06-15 16:46:03 +03:00
int force ,
2013-03-14 02:00:29 +04:00
int * status )
{
2016-06-15 16:46:03 +03:00
struct volume_group * vg = lv - > vg ;
struct cmd_context * cmd = vg - > cmd ;
2013-03-14 02:00:29 +04:00
char lv_path [ PATH_MAX ] ;
char size_buf [ SIZE_BUF ] ;
2017-06-21 15:02:57 +03:00
const char * argv [ FSADM_CMD_MAX_ARGS + 4 ] ;
2013-03-14 02:00:29 +04:00
unsigned i = 0 ;
2017-04-08 20:43:20 +03:00
argv [ i + + ] = find_config_tree_str ( cmd , global_fsadm_executable_CFG , NULL ) ;
2013-03-14 02:00:29 +04:00
if ( test_mode ( ) )
argv [ i + + ] = " --dry-run " ;
if ( verbose_level ( ) > = _LOG_NOTICE )
argv [ i + + ] = " --verbose " ;
2017-06-21 15:02:57 +03:00
if ( yes )
argv [ i + + ] = " --yes " ;
2016-06-15 16:46:03 +03:00
if ( force )
2013-03-14 02:00:29 +04:00
argv [ i + + ] = " --force " ;
argv [ i + + ] = ( fcmd = = FSADM_CMD_RESIZE ) ? " resize " : " check " ;
if ( status )
* status = - 1 ;
2014-06-30 14:02:05 +04:00
if ( dm_snprintf ( lv_path , sizeof ( lv_path ) , " %s%s/%s " , cmd - > dev_dir ,
2016-06-15 16:46:03 +03:00
vg - > name , lv - > name ) < 0 ) {
log_error ( " Couldn't create LV path for %s. " , display_lvname ( lv ) ) ;
2013-03-14 02:00:29 +04:00
return 0 ;
}
argv [ i + + ] = lv_path ;
if ( fcmd = = FSADM_CMD_RESIZE ) {
2015-07-06 17:09:17 +03:00
if ( dm_snprintf ( size_buf , sizeof ( size_buf ) , FMTu64 " K " ,
2016-06-15 16:46:03 +03:00
( uint64_t ) extents * ( vg - > extent_size / 2 ) ) < 0 ) {
log_error ( " Couldn't generate new LV size string. " ) ;
2013-03-14 02:00:29 +04:00
return 0 ;
}
argv [ i + + ] = size_buf ;
}
argv [ i ] = NULL ;
return exec_cmd ( cmd , argv , status , 1 ) ;
}
2016-06-23 00:32:35 +03:00
static uint32_t _adjust_amount ( dm_percent_t percent , int policy_threshold , int policy_amount )
2015-10-25 21:19:39 +03:00
{
if ( ! ( DM_PERCENT_0 < percent & & percent < = DM_PERCENT_100 ) | |
percent < = ( policy_threshold * DM_PERCENT_1 ) )
2016-02-08 15:08:54 +03:00
return 0 ; /* nothing to do */
2015-10-25 21:19:39 +03:00
/*
* Evaluate the minimal amount needed to get bellow threshold .
* Keep using DM_PERCENT_1 units for better precision .
* Round - up to needed percentage value
*/
2016-02-08 15:08:54 +03:00
percent = ( percent / policy_threshold + ( DM_PERCENT_1 - 1 ) / 100 ) / ( DM_PERCENT_1 / 100 ) - 100 ;
2015-10-25 21:19:39 +03:00
/* Use it if current policy amount is smaller */
2016-06-24 01:24:26 +03:00
return ( policy_amount < percent ) ? ( uint32_t ) percent : ( uint32_t ) policy_amount ;
2015-10-25 21:19:39 +03:00
}
2016-06-23 00:32:35 +03:00
static int _lvresize_adjust_policy ( const struct logical_volume * lv ,
uint32_t * amount , uint32_t * meta_amount )
2013-03-14 02:00:29 +04:00
{
2016-01-19 01:24:32 +03:00
struct cmd_context * cmd = lv - > vg - > cmd ;
2014-06-09 14:08:27 +04:00
dm_percent_t percent ;
2016-09-16 22:50:14 +03:00
dm_percent_t min_threshold ;
2013-03-14 02:00:29 +04:00
int policy_threshold , policy_amount ;
2016-06-23 00:32:35 +03:00
* amount = * meta_amount = 0 ;
2013-03-14 02:00:29 +04:00
if ( lv_is_thin_pool ( lv ) ) {
policy_threshold =
find_config_tree_int ( cmd , activation_thin_pool_autoextend_threshold_CFG ,
2015-10-22 11:46:39 +03:00
lv_config_profile ( lv ) ) ;
2013-03-14 02:00:29 +04:00
policy_amount =
find_config_tree_int ( cmd , activation_thin_pool_autoextend_percent_CFG ,
lv_config_profile ( lv ) ) ;
2015-10-22 11:46:39 +03:00
if ( policy_threshold < 50 ) {
2015-10-23 17:38:31 +03:00
log_warn ( " WARNING: Thin pool autoextend threshold %d%% is set below "
" minimum supported 50%%. " , policy_threshold ) ;
2015-10-22 11:46:39 +03:00
policy_threshold = 50 ;
2015-09-14 13:58:21 +03:00
}
2013-03-14 02:00:29 +04:00
} else {
policy_threshold =
2015-10-22 11:46:39 +03:00
find_config_tree_int ( cmd , activation_snapshot_autoextend_threshold_CFG , NULL ) ;
2013-03-14 02:00:29 +04:00
policy_amount =
find_config_tree_int ( cmd , activation_snapshot_autoextend_percent_CFG , NULL ) ;
2015-10-22 11:46:39 +03:00
if ( policy_threshold < 50 ) {
log_warn ( " WARNING: Snapshot autoextend threshold %d%% is set bellow "
" minimal supported value 50%%. " , policy_threshold ) ;
policy_threshold = 50 ;
}
2013-03-14 02:00:29 +04:00
}
2016-06-23 00:32:35 +03:00
if ( policy_threshold > = 100 )
2016-01-15 16:39:58 +03:00
return 1 ; /* nothing to do */
if ( ! policy_amount ) {
2015-10-22 11:46:39 +03:00
log_error ( " Can't extend %s with %s autoextend percent set to 0%%. " ,
2016-12-25 02:29:30 +03:00
display_lvname ( lv ) , lvseg_name ( first_seg ( lv ) ) ) ;
2015-10-22 11:46:39 +03:00
return 0 ;
}
2015-09-14 13:58:21 +03:00
if ( ! lv_is_active_locally ( lv ) ) {
log_error ( " Can't read state of locally inactive LV %s. " ,
display_lvname ( lv ) ) ;
return 0 ;
}
2013-03-14 02:00:29 +04:00
if ( lv_is_thin_pool ( lv ) ) {
if ( ! lv_thin_pool_percent ( lv , 1 , & percent ) )
return_0 ;
2016-06-23 00:32:35 +03:00
2016-09-16 22:50:14 +03:00
/* Resize below the minimal usable value */
min_threshold = pool_metadata_min_threshold ( first_seg ( lv ) ) / DM_PERCENT_1 ;
* meta_amount = _adjust_amount ( percent , ( min_threshold < policy_threshold ) ?
min_threshold : policy_threshold , policy_amount ) ;
2016-06-23 00:32:35 +03:00
2013-03-14 02:00:29 +04:00
if ( ! lv_thin_pool_percent ( lv , 0 , & percent ) )
return_0 ;
} else {
if ( ! lv_snapshot_percent ( lv , & percent ) )
return_0 ;
}
2016-06-23 00:32:35 +03:00
* amount = _adjust_amount ( percent , policy_threshold , policy_amount ) ;
2013-03-14 02:00:29 +04:00
return 1 ;
}
2017-10-18 17:57:46 +03:00
static uint32_t _lvseg_get_stripes ( struct lv_segment * seg , uint32_t * stripesize )
2013-03-14 02:00:29 +04:00
{
uint32_t s ;
struct lv_segment * seg_mirr ;
/* If segment mirrored, check if images are striped */
if ( seg_is_mirrored ( seg ) )
for ( s = 0 ; s < seg - > area_count ; s + + ) {
if ( seg_type ( seg , s ) ! = AREA_LV )
continue ;
seg_mirr = first_seg ( seg_lv ( seg , s ) ) ;
if ( seg_is_striped ( seg_mirr ) ) {
seg = seg_mirr ;
break ;
}
}
if ( seg_is_striped ( seg ) ) {
* stripesize = seg - > stripe_size ;
return seg - > area_count ;
}
2017-02-10 00:41:28 +03:00
if ( seg_is_raid ( seg ) ) {
* stripesize = seg - > stripe_size ;
return _raid_stripes_count ( seg ) ;
}
2013-03-14 02:00:29 +04:00
* stripesize = 0 ;
return 0 ;
}
2016-06-23 00:32:35 +03:00
static int _lvresize_check ( struct logical_volume * lv ,
struct lvresize_params * lp )
2013-03-14 02:00:29 +04:00
{
2013-07-06 06:28:21 +04:00
struct volume_group * vg = lv - > vg ;
2013-03-14 02:00:29 +04:00
if ( lv_is_external_origin ( lv ) ) {
/*
* Since external - origin can be activated read - only ,
* there is no way to use extended areas .
*/
2016-01-15 16:41:02 +03:00
log_error ( " Cannot resize external origin logical volume %s. " ,
display_lvname ( lv ) ) ;
2013-07-06 06:28:21 +04:00
return 0 ;
2013-03-14 02:00:29 +04:00
}
2014-09-16 00:33:53 +04:00
if ( lv_is_raid_image ( lv ) | | lv_is_raid_metadata ( lv ) ) {
2013-03-14 02:00:29 +04:00
log_error ( " Cannot resize a RAID %s directly " ,
2016-12-13 02:09:15 +03:00
lv_is_raid_image ( lv ) ? " image " : " metadata area " ) ;
2013-07-06 06:28:21 +04:00
return 0 ;
2013-03-14 02:00:29 +04:00
}
if ( lv_is_raid_with_tracking ( lv ) ) {
2016-01-15 16:41:02 +03:00
log_error ( " Cannot resize logical volume %s while it is "
" tracking a split image. " , display_lvname ( lv ) ) ;
2013-07-06 06:28:21 +04:00
return 0 ;
2013-03-14 02:00:29 +04:00
}
2017-03-17 18:46:33 +03:00
if ( lv_is_raid ( lv ) & &
lp - > resize = = LV_REDUCE ) {
unsigned attrs ;
const struct segment_type * segtype = first_seg ( lv ) - > segtype ;
if ( ! segtype - > ops - > target_present | |
! segtype - > ops - > target_present ( lv - > vg - > cmd , NULL , & attrs ) | |
! ( attrs & RAID_FEATURE_SHRINK ) ) {
log_error ( " RAID module does not support shrinking. " ) ;
return 0 ;
}
}
2016-06-17 14:25:41 +03:00
if ( lp - > use_policies & & ! lv_is_cow ( lv ) & & ! lv_is_thin_pool ( lv ) ) {
2013-07-06 06:28:21 +04:00
log_error ( " Policy-based resize is supported only for snapshot and thin pool volumes. " ) ;
return 0 ;
2013-03-14 02:00:29 +04:00
}
2017-10-23 12:20:32 +03:00
if ( lv_is_cache_type ( lv ) | |
( lv_is_thin_pool ( lv ) & & lv_is_cache_type ( seg_lv ( first_seg ( lv ) , 0 ) ) ) ) {
2017-01-05 17:32:25 +03:00
log_error ( " Unable to resize logical volumes of cache type. " ) ;
return 0 ;
}
2016-01-15 16:41:02 +03:00
if ( ! lv_is_visible ( lv ) & &
! lv_is_thin_pool_metadata ( lv ) & &
! lv_is_lockd_sanlock_lv ( lv ) ) {
log_error ( " Can't resize internal logical volume %s. " , display_lvname ( lv ) ) ;
2013-07-06 06:28:21 +04:00
return 0 ;
2013-03-14 02:00:29 +04:00
}
2014-09-16 00:33:53 +04:00
if ( lv_is_locked ( lv ) ) {
2016-01-15 16:41:02 +03:00
log_error ( " Can't resize locked logical volume %s. " , display_lvname ( lv ) ) ;
2013-07-06 06:28:21 +04:00
return 0 ;
2013-03-14 02:00:29 +04:00
}
2014-09-16 00:33:53 +04:00
if ( lv_is_converting ( lv ) ) {
2016-01-15 16:41:02 +03:00
log_error ( " Can't resize logical volume %s while "
" lvconvert in progress. " , display_lvname ( lv ) ) ;
2013-07-06 06:28:21 +04:00
return 0 ;
2013-03-14 02:00:29 +04:00
}
2016-06-17 14:25:41 +03:00
if ( ! lv_is_thin_pool ( lv ) & & lp - > poolmetadata_size ) {
2013-07-06 06:28:21 +04:00
log_error ( " --poolmetadatasize can be used only with thin pools. " ) ;
return 0 ;
}
2016-06-17 14:25:41 +03:00
if ( lp - > stripe_size ) {
if ( ! ( vg - > fid - > fmt - > features & FMT_SEGMENTS ) ) {
log_print_unless_silent ( " Varied stripesize not supported. Ignoring. " ) ;
lp - > stripe_size = lp - > stripes = 0 ;
} else if ( ! _validate_stripesize ( vg , lp ) )
return_0 ;
}
2016-01-15 16:41:02 +03:00
2016-01-20 15:16:53 +03:00
if ( lp - > resizefs & &
( lv_is_thin_pool ( lv ) | |
lv_is_thin_pool_data ( lv ) | |
lv_is_thin_pool_metadata ( lv ) | |
lv_is_pool_metadata_spare ( lv ) | |
lv_is_lockd_sanlock_lv ( lv ) ) ) {
log_print_unless_silent ( " Ignoring --resizefs as volume %s does not have a filesystem. " ,
display_lvname ( lv ) ) ;
lp - > resizefs = 0 ;
}
2016-06-17 14:25:41 +03:00
if ( lp - > stripes & &
! ( vg - > fid - > fmt - > features & FMT_SEGMENTS ) ) {
log_print_unless_silent ( " Varied striping not supported. Ignoring. " ) ;
lp - > stripes = 0 ;
2016-01-15 16:41:02 +03:00
}
2016-06-17 14:25:41 +03:00
if ( lp - > mirrors & &
! ( vg - > fid - > fmt - > features & FMT_SEGMENTS ) ) {
log_print_unless_silent ( " Mirrors not supported. Ignoring. " ) ;
lp - > mirrors = 0 ;
2016-01-15 16:41:02 +03:00
}
2013-07-06 06:28:21 +04:00
return 1 ;
}
2016-01-19 13:44:11 +03:00
static int _lvresize_adjust_size ( struct volume_group * vg ,
uint64_t size , sign_t sign ,
uint32_t * extents )
2013-07-06 06:28:21 +04:00
{
2016-01-19 13:44:11 +03:00
uint32_t extent_size = vg - > extent_size ;
uint32_t adjust ;
2013-03-14 02:00:29 +04:00
/*
* First adjust to an exact multiple of extent size .
2016-01-19 13:44:11 +03:00
* When changing to an absolute size , we round that size up .
2013-03-14 02:00:29 +04:00
* When extending by a relative amount we round that amount up .
* When reducing by a relative amount we remove at most that amount .
*/
2016-01-19 13:44:11 +03:00
if ( ( adjust = ( size % extent_size ) ) ) {
if ( sign ! = SIGN_MINUS ) /* not reducing */
size + = extent_size ;
2013-03-14 02:00:29 +04:00
2016-01-19 13:44:11 +03:00
size - = adjust ;
log_print_unless_silent ( " Rounding size to boundary between physical extents: %s. " ,
display_size ( vg - > cmd , size ) ) ;
2013-03-14 02:00:29 +04:00
}
2016-01-19 13:44:11 +03:00
* extents = size / extent_size ;
2013-07-06 06:28:21 +04:00
return 1 ;
}
2014-08-22 04:26:14 +04:00
/*
* If percent options were used , convert them into actual numbers of extents .
*/
2016-06-14 16:32:21 +03:00
static int _lvresize_extents_from_percent ( const struct logical_volume * lv ,
struct lvresize_params * lp ,
2014-08-22 04:26:14 +04:00
struct dm_list * pvh )
2013-07-06 06:28:21 +04:00
{
2016-06-14 16:32:21 +03:00
const struct volume_group * vg = lv - > vg ;
2013-07-06 06:28:21 +04:00
uint32_t pv_extent_count ;
2014-08-22 04:26:14 +04:00
uint32_t old_extents = lp - > extents ;
2013-07-06 06:28:21 +04:00
switch ( lp - > percent ) {
2013-03-14 02:00:29 +04:00
case PERCENT_VG :
2014-08-22 04:26:14 +04:00
lp - > extents = percent_of_extents ( lp - > extents , vg - > extent_count ,
2013-03-14 02:00:29 +04:00
( lp - > sign ! = SIGN_MINUS ) ) ;
break ;
case PERCENT_FREE :
2014-08-22 04:26:14 +04:00
lp - > extents = percent_of_extents ( lp - > extents , vg - > free_count ,
2013-03-14 02:00:29 +04:00
( lp - > sign ! = SIGN_MINUS ) ) ;
break ;
case PERCENT_LV :
2014-08-22 04:26:14 +04:00
lp - > extents = percent_of_extents ( lp - > extents , lv - > le_count ,
2013-03-14 02:00:29 +04:00
( lp - > sign ! = SIGN_MINUS ) ) ;
break ;
case PERCENT_PVS :
2016-06-16 15:39:26 +03:00
if ( pvh ! = & vg - > pvs ) {
2013-03-14 02:00:29 +04:00
pv_extent_count = pv_list_extents_free ( pvh ) ;
2014-08-22 04:26:14 +04:00
lp - > extents = percent_of_extents ( lp - > extents , pv_extent_count ,
2013-03-14 02:00:29 +04:00
( lp - > sign ! = SIGN_MINUS ) ) ;
} else
2014-08-22 04:26:14 +04:00
lp - > extents = percent_of_extents ( lp - > extents , vg - > extent_count ,
2013-03-14 02:00:29 +04:00
( lp - > sign ! = SIGN_MINUS ) ) ;
break ;
case PERCENT_ORIGIN :
if ( ! lv_is_cow ( lv ) ) {
log_error ( " Specified LV does not have an origin LV. " ) ;
2013-07-06 06:28:21 +04:00
return 0 ;
2013-03-14 02:00:29 +04:00
}
2014-08-22 04:26:14 +04:00
lp - > extents = percent_of_extents ( lp - > extents , origin_from_cow ( lv ) - > le_count ,
2013-03-14 02:00:29 +04:00
( lp - > sign ! = SIGN_MINUS ) ) ;
break ;
case PERCENT_NONE :
2014-08-22 04:26:14 +04:00
return 1 ; /* Nothing to do */
2014-02-25 12:36:26 +04:00
default :
log_error ( INTERNAL_ERROR " Unsupported percent type %u. " , lp - > percent ) ;
return 0 ;
2013-03-14 02:00:29 +04:00
}
2014-08-22 04:26:14 +04:00
if ( lp - > percent = = PERCENT_VG | | lp - > percent = = PERCENT_FREE | | lp - > percent = = PERCENT_PVS )
lp - > extents_are_pes = 1 ;
2014-02-25 02:48:23 +04:00
2014-08-22 04:26:14 +04:00
if ( lp - > sign = = SIGN_NONE & & ( lp - > percent = = PERCENT_VG | | lp - > percent = = PERCENT_FREE | | lp - > percent = = PERCENT_PVS ) )
lp - > approx_alloc = 1 ;
2013-03-14 02:00:29 +04:00
2014-08-22 04:26:14 +04:00
if ( lp - > sign = = SIGN_PLUS & & lp - > percent = = PERCENT_FREE )
lp - > approx_alloc = 1 ;
2013-03-14 02:00:29 +04:00
2014-08-22 04:26:14 +04:00
log_verbose ( " Converted % " PRIu32 " %%%s into %s% " PRIu32 " %s extents. " , old_extents , get_percent_string ( lp - > percent ) ,
lp - > approx_alloc ? " at most " : " " , lp - > extents , lp - > extents_are_pes ? " physical " : " logical " ) ;
2013-03-14 02:00:29 +04:00
2014-08-22 04:26:14 +04:00
return 1 ;
}
2013-07-06 06:28:21 +04:00
2014-08-22 04:26:14 +04:00
static int _add_pes ( struct logical_volume * lv , void * data )
{
uint32_t * pe_total = data ;
struct lv_segment * seg ;
uint32_t s ;
dm_list_iterate_items ( seg , & lv - > segments ) {
for ( s = 0 ; s < seg - > area_count ; s + + ) {
if ( seg_type ( seg , s ) ! = AREA_PV )
continue ;
* pe_total + = seg_pvseg ( seg , s ) - > len ;
2013-03-14 02:00:29 +04:00
}
}
2014-08-22 04:26:14 +04:00
return 1 ;
}
static uint32_t _lv_pe_count ( struct logical_volume * lv )
{
uint32_t pe_total = 0 ;
/* Top-level LV first */
if ( ! _add_pes ( lv , & pe_total ) )
stack ;
/* Any sub-LVs */
if ( ! for_each_sub_lv ( lv , _add_pes , & pe_total ) )
stack ;
return pe_total ;
}
/* FIXME Avoid having variables like lp->extents mean different things at different places */
2016-06-14 16:32:21 +03:00
static int _lvresize_adjust_extents ( struct logical_volume * lv ,
struct lvresize_params * lp ,
struct dm_list * pvh )
2014-08-22 04:26:14 +04:00
{
struct volume_group * vg = lv - > vg ;
2016-06-14 16:32:21 +03:00
struct cmd_context * cmd = vg - > cmd ;
2014-08-22 04:26:14 +04:00
uint32_t logical_extents_used = 0 ;
uint32_t physical_extents_used = 0 ;
uint32_t seg_stripes = 0 , seg_stripesize = 0 ;
uint32_t seg_mirrors = 0 ;
2016-01-19 13:44:11 +03:00
struct lv_segment * seg , * seg_last ;
2014-08-22 04:26:14 +04:00
uint32_t sz , str ;
uint32_t seg_logical_extents ;
uint32_t seg_physical_extents ;
uint32_t area_multiple ;
2016-01-22 18:11:29 +03:00
uint32_t stripes_extents ;
2014-08-22 04:26:14 +04:00
uint32_t size_rest ;
uint32_t existing_logical_extents = lv - > le_count ;
uint32_t existing_physical_extents , saved_existing_physical_extents ;
2016-01-19 13:44:11 +03:00
uint32_t existing_extents ;
2014-08-22 04:26:14 +04:00
uint32_t seg_size = 0 ;
uint32_t new_extents ;
int reducing = 0 ;
2016-01-19 13:44:11 +03:00
seg_last = last_seg ( lv ) ;
2013-03-14 02:00:29 +04:00
/* FIXME Support LVs with mixed segment types */
2016-06-17 14:25:41 +03:00
if ( lp - > segtype & & ( lp - > segtype ! = seg_last - > segtype ) ) {
2016-06-14 15:56:17 +03:00
log_error ( " VolumeType does not match (%s). " , lp - > segtype - > name ) ;
2013-07-06 06:28:21 +04:00
return 0 ;
2013-03-14 02:00:29 +04:00
}
2016-06-17 14:25:41 +03:00
/* Use segment type of last segment */
lp - > segtype = seg_last - > segtype ;
2014-08-22 04:26:14 +04:00
/* For virtual devices, just pretend the physical size matches. */
existing_physical_extents = saved_existing_physical_extents = _lv_pe_count ( lv ) ;
if ( ! existing_physical_extents ) {
existing_physical_extents = lv - > le_count ;
lp - > extents_are_pes = 0 ;
}
2013-03-14 02:00:29 +04:00
2016-01-19 13:44:11 +03:00
existing_extents = ( lp - > extents_are_pes )
? existing_physical_extents : existing_logical_extents ;
2014-08-22 04:26:14 +04:00
/* Initial decision on whether we are extending or reducing */
if ( lp - > sign = = SIGN_MINUS | |
2016-01-19 13:44:11 +03:00
( lp - > sign = = SIGN_NONE & & ( lp - > extents < existing_extents ) ) )
2014-08-22 04:26:14 +04:00
reducing = 1 ;
/* If extending, find properties of last segment */
if ( ! reducing ) {
2016-01-19 13:44:11 +03:00
seg_mirrors = seg_is_mirrored ( seg_last ) ? lv_mirror_count ( lv ) : 0 ;
2013-03-14 02:00:29 +04:00
2016-06-17 14:25:41 +03:00
if ( ! lp - > mirrors & & seg_mirrors ) {
2014-08-22 04:26:14 +04:00
log_print_unless_silent ( " Extending % " PRIu32 " mirror images. " , seg_mirrors ) ;
2013-03-14 02:00:29 +04:00
lp - > mirrors = seg_mirrors ;
2016-06-17 14:25:41 +03:00
} else if ( ( lp - > mirrors | | seg_mirrors ) & & ( lp - > mirrors ! = seg_mirrors ) ) {
2013-03-14 02:00:29 +04:00
log_error ( " Cannot vary number of mirrors in LV yet. " ) ;
2013-07-06 06:28:21 +04:00
return 0 ;
2013-03-14 02:00:29 +04:00
}
2016-01-19 13:44:11 +03:00
if ( seg_is_raid10 ( seg_last ) ) {
2015-11-13 11:49:59 +03:00
if ( ! seg_mirrors ) {
log_error ( INTERNAL_ERROR " Missing mirror segments for %s. " ,
display_lvname ( lv ) ) ;
return 0 ;
}
2014-08-22 04:26:14 +04:00
/* FIXME Warn if command line values are being overridden? */
2016-01-19 13:44:11 +03:00
lp - > stripes = seg_last - > area_count / seg_mirrors ;
lp - > stripe_size = seg_last - > stripe_size ;
2014-08-22 04:26:14 +04:00
} else if ( ! ( lp - > stripes = = 1 | | ( lp - > stripes > 1 & & lp - > stripe_size ) ) ) {
/* If extending, find stripes, stripesize & size of last segment */
/* FIXME Don't assume mirror seg will always be AREA_LV */
/* FIXME We will need to support resize for metadata LV as well,
* and data LV could be any type ( i . e . mirror ) ) */
2016-01-19 13:44:11 +03:00
dm_list_iterate_items ( seg , seg_mirrors ? & seg_lv ( seg_last , 0 ) - > segments : & lv - > segments ) {
2014-08-22 04:26:14 +04:00
/* Allow through "striped" and RAID 4/5/6/10 */
if ( ! seg_is_striped ( seg ) & &
( ! seg_is_raid ( seg ) | | seg_is_mirrored ( seg ) ) & &
2015-11-13 11:49:59 +03:00
! seg_is_raid10 ( seg ) )
2014-08-22 04:26:14 +04:00
continue ;
2016-01-11 16:11:37 +03:00
2014-08-22 04:26:14 +04:00
sz = seg - > stripe_size ;
str = seg - > area_count - lp - > segtype - > parity_devs ;
2016-01-11 16:11:37 +03:00
2014-08-22 04:26:14 +04:00
if ( ( seg_stripesize & & seg_stripesize ! = sz & &
sz & & ! lp - > stripe_size ) | |
( seg_stripes & & seg_stripes ! = str & & ! lp - > stripes ) ) {
log_error ( " Please specify number of "
" stripes (-i) and stripesize (-I) " ) ;
return 0 ;
}
2016-01-11 16:11:37 +03:00
2014-08-22 04:26:14 +04:00
seg_stripesize = sz ;
seg_stripes = str ;
}
2016-01-11 16:11:37 +03:00
2014-08-22 04:26:14 +04:00
if ( ! lp - > stripes )
lp - > stripes = seg_stripes ;
else if ( seg_is_raid ( first_seg ( lv ) ) & &
( lp - > stripes ! = seg_stripes ) ) {
log_error ( " Unable to extend \" %s \" segment type with different number of stripes. " ,
2014-10-20 20:40:39 +04:00
lvseg_name ( first_seg ( lv ) ) ) ;
2013-07-06 06:28:21 +04:00
return 0 ;
2013-03-14 02:00:29 +04:00
}
2016-01-11 16:11:37 +03:00
2014-08-22 04:26:14 +04:00
if ( ! lp - > stripe_size & & lp - > stripes > 1 ) {
if ( seg_stripesize ) {
log_print_unless_silent ( " Using stripesize of last segment %s " ,
display_size ( cmd , ( uint64_t ) seg_stripesize ) ) ;
lp - > stripe_size = seg_stripesize ;
} else {
lp - > stripe_size =
find_config_tree_int ( cmd , metadata_stripesize_CFG , NULL ) * 2 ;
log_print_unless_silent ( " Using default stripesize %s " ,
display_size ( cmd , ( uint64_t ) lp - > stripe_size ) ) ;
}
}
2013-03-14 02:00:29 +04:00
}
2016-01-19 13:44:11 +03:00
if ( lp - > stripes > 1 & & ! lp - > stripe_size ) {
log_error ( " Stripesize for striped segment should not be 0! " ) ;
return 0 ;
}
2014-08-22 04:26:14 +04:00
/* Determine the amount to extend by */
if ( lp - > sign = = SIGN_PLUS )
seg_size = lp - > extents ;
else
2016-01-19 13:44:11 +03:00
seg_size = lp - > extents - existing_extents ;
2013-03-14 02:00:29 +04:00
2014-08-22 04:26:14 +04:00
/* Convert PEs to LEs */
2016-01-19 13:44:11 +03:00
if ( lp - > extents_are_pes & & ! seg_is_striped ( seg_last ) & & ! seg_is_virtual ( seg_last ) ) {
area_multiple = _calc_area_multiple ( seg_last - > segtype , seg_last - > area_count , 0 ) ;
seg_size = seg_size * area_multiple / ( seg_last - > area_count - seg_last - > segtype - > parity_devs ) ;
2014-08-22 04:26:14 +04:00
seg_size = ( seg_size / area_multiple ) * area_multiple ;
2013-03-14 02:00:29 +04:00
}
2016-01-19 13:44:11 +03:00
if ( seg_size > = ( MAX_EXTENT_COUNT - existing_logical_extents ) ) {
log_error ( " Unable to extend %s by %u logical extents: exceeds limit (%u). " ,
2016-06-23 14:04:37 +03:00
display_lvname ( lv ) , seg_size , MAX_EXTENT_COUNT ) ;
2016-01-19 13:44:11 +03:00
return 0 ;
}
lp - > extents = existing_logical_extents + seg_size ;
/* Don't allow a cow to grow larger than necessary. */
if ( lv_is_cow ( lv ) ) {
logical_extents_used = cow_max_extents ( origin_from_cow ( lv ) , find_snapshot ( lv ) - > chunk_size ) ;
if ( logical_extents_used < lp - > extents ) {
log_print_unless_silent ( " Reached maximum COW size %s (% " PRIu32 " extents). " ,
display_size ( vg - > cmd , ( uint64_t ) vg - > extent_size * logical_extents_used ) ,
logical_extents_used ) ;
lp - > extents = logical_extents_used ; // CHANGES lp->extents
seg_size = lp - > extents - existing_logical_extents ; // Recalculate
if ( lp - > extents = = existing_logical_extents ) {
/* Signal that normal resizing is not required */
return 1 ;
}
}
}
} else { /* If reducing, find stripes, stripesize & size of last segment */
2016-06-17 14:25:41 +03:00
if ( lp - > stripes | | lp - > stripe_size | | lp - > mirrors )
2014-10-30 13:38:03 +03:00
log_print_unless_silent ( " Ignoring stripes, stripesize and mirrors "
" arguments when reducing. " ) ;
2013-03-14 02:00:29 +04:00
2016-01-08 13:07:47 +03:00
if ( lp - > sign = = SIGN_MINUS ) {
2016-01-19 13:44:11 +03:00
if ( lp - > extents > = existing_extents ) {
2016-06-23 14:04:37 +03:00
log_error ( " Unable to reduce %s below 1 extent. " ,
display_lvname ( lv ) ) ;
2016-01-08 13:07:47 +03:00
return 0 ;
2014-08-22 04:26:14 +04:00
}
2016-01-19 13:44:11 +03:00
new_extents = existing_extents - lp - > extents ;
2016-01-08 13:07:47 +03:00
} else
2014-08-22 04:26:14 +04:00
new_extents = lp - > extents ;
2013-03-14 02:00:29 +04:00
dm_list_iterate_items ( seg , & lv - > segments ) {
2014-08-22 04:26:14 +04:00
seg_logical_extents = seg - > len ;
seg_physical_extents = seg - > area_len * seg - > area_count ; /* FIXME Also metadata, cow etc. */
2013-03-14 02:00:29 +04:00
/* Check for underlying stripe sizes */
2017-10-18 17:57:46 +03:00
seg_stripes = _lvseg_get_stripes ( seg , & seg_stripesize ) ;
2013-03-14 02:00:29 +04:00
if ( seg_is_mirrored ( seg ) )
seg_mirrors = lv_mirror_count ( seg - > lv ) ;
else
seg_mirrors = 0 ;
2014-08-22 04:26:14 +04:00
/* Have we reached the final segment of the new LV? */
if ( lp - > extents_are_pes ) {
if ( new_extents < = physical_extents_used + seg_physical_extents ) {
seg_size = new_extents - physical_extents_used ;
if ( seg_mirrors )
seg_size / = seg_mirrors ;
lp - > extents = logical_extents_used + seg_size ;
break ;
2016-07-02 00:20:54 +03:00
}
2014-08-22 04:26:14 +04:00
} else if ( new_extents < = logical_extents_used + seg_logical_extents ) {
seg_size = new_extents - logical_extents_used ;
lp - > extents = new_extents ;
2013-03-14 02:00:29 +04:00
break ;
2014-08-22 04:26:14 +04:00
}
2013-03-14 02:00:29 +04:00
2014-08-22 04:26:14 +04:00
logical_extents_used + = seg_logical_extents ;
physical_extents_used + = seg_physical_extents ;
2013-03-14 02:00:29 +04:00
}
lp - > stripe_size = seg_stripesize ;
lp - > stripes = seg_stripes ;
lp - > mirrors = seg_mirrors ;
}
2014-08-22 04:26:14 +04:00
/* At this point, lp->extents should hold the correct NEW logical size required. */
if ( ! lp - > extents ) {
2016-07-12 23:25:21 +03:00
log_error ( " New size of 0 not permitted. " ) ;
2014-08-22 04:26:14 +04:00
return 0 ;
}
if ( lp - > extents = = existing_logical_extents ) {
if ( ! lp - > resizefs ) {
2016-07-12 23:25:21 +03:00
log_error ( " New size (%d extents) matches existing size (%d extents). " ,
lp - > extents , existing_logical_extents ) ;
2014-08-22 04:26:14 +04:00
return 0 ;
}
lp - > resize = LV_EXTEND ; /* lets pretend zero size extension */
}
/* Perform any rounding to produce complete stripes. */
2013-03-14 02:00:29 +04:00
if ( lp - > stripes > 1 ) {
if ( lp - > stripe_size < STRIPE_SIZE_MIN ) {
2016-07-12 23:25:21 +03:00
log_error ( " Invalid stripe size %s. " ,
2013-03-14 02:00:29 +04:00
display_size ( cmd , ( uint64_t ) lp - > stripe_size ) ) ;
2013-07-06 06:28:21 +04:00
return 0 ;
2013-03-14 02:00:29 +04:00
}
2016-01-22 18:11:29 +03:00
/* Segment size in extents must be divisible by stripes */
stripes_extents = lp - > stripes ;
if ( lp - > stripe_size > vg - > extent_size )
/* Strip size is bigger then extent size needs more extents */
stripes_extents * = ( lp - > stripe_size / vg - > extent_size ) ;
2013-03-14 02:00:29 +04:00
2016-01-22 18:11:29 +03:00
size_rest = seg_size % stripes_extents ;
2013-03-14 02:00:29 +04:00
/* Round toward the original size. */
if ( size_rest & &
2014-08-22 04:26:14 +04:00
( ( lp - > extents < existing_logical_extents ) | |
2013-03-14 02:00:29 +04:00
! lp - > percent | |
2014-08-22 04:26:14 +04:00
( vg - > free_count > = ( lp - > extents - existing_logical_extents - size_rest +
2016-01-22 18:11:29 +03:00
stripes_extents ) ) ) ) {
2013-03-14 02:00:29 +04:00
log_print_unless_silent ( " Rounding size (%d extents) up to stripe "
2016-07-12 23:25:21 +03:00
" boundary size for segment (%d extents). " ,
2016-01-22 18:11:29 +03:00
lp - > extents ,
lp - > extents - size_rest + stripes_extents ) ;
lp - > extents = lp - > extents - size_rest + stripes_extents ;
2013-03-14 02:00:29 +04:00
} else if ( size_rest ) {
log_print_unless_silent ( " Rounding size (%d extents) down to stripe "
" boundary size for segment (%d extents) " ,
lp - > extents , lp - > extents - size_rest ) ;
lp - > extents = lp - > extents - size_rest ;
}
}
2014-08-22 04:26:14 +04:00
/* Final sanity checking */
if ( lp - > extents < existing_logical_extents ) {
2013-03-14 02:00:29 +04:00
if ( lp - > resize = = LV_EXTEND ) {
log_error ( " New size given (%d extents) not larger "
" than existing size (%d extents) " ,
2014-08-22 04:26:14 +04:00
lp - > extents , existing_logical_extents ) ;
2013-07-06 06:28:21 +04:00
return 0 ;
2013-03-14 02:00:29 +04:00
}
lp - > resize = LV_REDUCE ;
2014-08-22 04:26:14 +04:00
} else if ( lp - > extents > existing_logical_extents ) {
2013-03-14 02:00:29 +04:00
if ( lp - > resize = = LV_REDUCE ) {
log_error ( " New size given (%d extents) not less than "
" existing size (%d extents) " , lp - > extents ,
2014-08-22 04:26:14 +04:00
existing_logical_extents ) ;
2013-07-06 06:28:21 +04:00
return 0 ;
2013-03-14 02:00:29 +04:00
}
lp - > resize = LV_EXTEND ;
2016-06-17 14:25:41 +03:00
} else if ( ( lp - > extents = = existing_logical_extents ) & & ! lp - > use_policies ) {
2013-03-14 02:00:29 +04:00
if ( ! lp - > resizefs ) {
log_error ( " New size (%d extents) matches existing size "
2014-08-22 04:26:14 +04:00
" (%d extents) " , lp - > extents , existing_logical_extents ) ;
2013-07-06 06:28:21 +04:00
return 0 ;
2013-03-14 02:00:29 +04:00
}
lp - > resize = LV_EXTEND ;
}
2014-08-22 04:26:14 +04:00
/*
* Has the user specified that they would like the additional
* extents of a mirror not to have an initial sync ?
*/
if ( ( lp - > extents > existing_logical_extents ) ) {
2016-06-17 14:25:41 +03:00
if ( seg_is_mirrored ( first_seg ( lv ) ) & & lp - > nosync )
2014-08-22 04:26:14 +04:00
lv - > status | = LV_NOTSYNCED ;
}
log_debug ( " New size for %s: % " PRIu32 " . Existing logical extents: % " PRIu32 " / physical extents: % " PRIu32 " . " ,
display_lvname ( lv ) , lp - > extents , existing_logical_extents , saved_existing_physical_extents ) ;
2013-07-06 06:28:21 +04:00
return 1 ;
}
2016-01-19 01:24:32 +03:00
static int _lvresize_check_type ( const struct logical_volume * lv ,
2016-06-14 16:32:21 +03:00
const struct lvresize_params * lp )
2013-07-06 06:28:21 +04:00
{
2017-06-27 11:16:13 +03:00
struct lv_segment * seg ;
2013-03-14 02:00:29 +04:00
if ( lv_is_origin ( lv ) ) {
if ( lp - > resize = = LV_REDUCE ) {
2016-06-14 15:56:17 +03:00
log_error ( " Snapshot origin volumes cannot be reduced in size yet. " ) ;
2013-07-06 06:28:21 +04:00
return 0 ;
2013-03-14 02:00:29 +04:00
}
if ( lv_is_active ( lv ) ) {
log_error ( " Snapshot origin volumes can be resized "
2016-07-12 23:25:21 +03:00
" only while inactive: try lvchange -an. " ) ;
2013-07-06 06:28:21 +04:00
return 0 ;
2013-03-14 02:00:29 +04:00
}
}
2015-12-15 17:13:11 +03:00
if ( lp - > resize = = LV_REDUCE ) {
2016-06-23 00:32:35 +03:00
if ( lv_is_thin_pool_data ( lv ) ) {
log_error ( " Thin pool volumes %s cannot be reduced in size yet. " ,
display_lvname ( lv ) ) ;
2013-07-06 06:28:21 +04:00
return 0 ;
2013-03-14 02:00:29 +04:00
}
2015-12-15 17:13:11 +03:00
if ( lv_is_thin_pool_metadata ( lv ) ) {
log_error ( " Thin pool metadata volumes cannot be reduced. " ) ;
return 0 ;
}
} else if ( lp - > resize = = LV_EXTEND ) {
2016-06-23 00:32:35 +03:00
if ( lv_is_thin_pool_metadata ( lv ) & &
2017-06-27 11:16:13 +03:00
( ! ( seg = find_pool_seg ( first_seg ( lv ) ) ) | |
! thin_pool_feature_supported ( seg - > lv , THIN_FEATURE_METADATA_RESIZE ) ) ) {
2016-06-23 00:32:35 +03:00
log_error ( " Support for online metadata resize of %s not detected. " ,
display_lvname ( lv ) ) ;
return 0 ;
}
2014-01-28 13:24:50 +04:00
/* Validate thin target supports bigger size of thin volume then external origin */
2015-12-15 17:13:11 +03:00
if ( lv_is_thin_volume ( lv ) & & first_seg ( lv ) - > external_lv & &
( lv - > size > first_seg ( lv ) - > external_lv - > size ) & &
2014-01-28 13:24:50 +04:00
! thin_pool_feature_supported ( first_seg ( lv ) - > pool_lv , THIN_FEATURE_EXTERNAL_ORIGIN_EXTEND ) ) {
log_error ( " Thin target does not support external origin smaller then thin volume. " ) ;
return 0 ;
}
}
2013-07-06 06:28:21 +04:00
return 1 ;
}
2016-06-17 12:29:28 +03:00
static int _lvresize_volume ( struct logical_volume * lv ,
struct lvresize_params * lp ,
struct dm_list * pvh )
2013-07-06 06:28:21 +04:00
{
struct volume_group * vg = lv - > vg ;
2016-06-14 16:32:21 +03:00
struct cmd_context * cmd = vg - > cmd ;
2014-08-01 03:35:43 +04:00
uint32_t old_extents ;
2016-06-23 00:32:35 +03:00
alloc_policy_t alloc = lp - > alloc ? : lv - > alloc ;
2013-03-14 02:00:29 +04:00
2014-08-01 03:35:43 +04:00
old_extents = lv - > le_count ;
log_verbose ( " %sing logical volume %s to %s%s " ,
( lp - > resize = = LV_REDUCE ) ? " Reduc " : " Extend " ,
display_lvname ( lv ) , lp - > approx_alloc ? " up to " : " " ,
display_size ( cmd , ( uint64_t ) lp - > extents * vg - > extent_size ) ) ;
2013-03-14 02:00:29 +04:00
if ( lp - > resize = = LV_REDUCE ) {
if ( ! lv_reduce ( lv , lv - > le_count - lp - > extents ) )
2016-06-17 12:29:28 +03:00
return_0 ;
2013-03-14 02:00:29 +04:00
} else if ( ( lp - > extents > lv - > le_count ) & & /* Ensure we extend */
! lv_extend ( lv , lp - > segtype ,
lp - > stripes , lp - > stripe_size ,
lp - > mirrors , first_seg ( lv ) - > region_size ,
2014-10-26 10:13:59 +03:00
lp - > extents - lv - > le_count ,
2014-02-25 02:48:23 +04:00
pvh , alloc , lp - > approx_alloc ) )
2016-06-17 12:29:28 +03:00
return_0 ;
2016-06-14 15:56:17 +03:00
/* Check for over provisioning only when lv_extend() passed,
* ATM this check does not fail */
2015-07-03 16:31:31 +03:00
else if ( ! pool_check_overprovisioning ( lv ) )
2016-06-17 12:29:28 +03:00
return_0 ;
2013-03-14 02:00:29 +04:00
2014-08-01 03:35:43 +04:00
if ( old_extents = = lv - > le_count )
2014-08-22 04:26:14 +04:00
log_print_unless_silent ( " Size of logical volume %s unchanged from %s (% " PRIu32 " extents). " ,
2014-08-01 03:35:43 +04:00
display_lvname ( lv ) ,
2014-08-22 04:26:14 +04:00
display_size ( cmd , ( uint64_t ) old_extents * vg - > extent_size ) , old_extents ) ;
2016-07-13 17:23:25 +03:00
else {
lp - > size_changed = 1 ;
2014-08-22 04:26:14 +04:00
log_print_unless_silent ( " Size of logical volume %s changed from %s (% " PRIu32 " extents) to %s (% " PRIu32 " extents). " ,
2014-08-01 03:35:43 +04:00
display_lvname ( lv ) ,
2014-08-22 04:26:14 +04:00
display_size ( cmd , ( uint64_t ) old_extents * vg - > extent_size ) , old_extents ,
display_size ( cmd , ( uint64_t ) lv - > le_count * vg - > extent_size ) , lv - > le_count ) ;
2016-07-13 17:23:25 +03:00
}
2014-08-01 03:35:43 +04:00
2016-06-17 12:29:28 +03:00
return 1 ;
2013-07-06 06:28:21 +04:00
}
2016-06-23 00:32:35 +03:00
static int _lvresize_prepare ( struct logical_volume * * lv ,
2016-06-14 16:32:21 +03:00
struct lvresize_params * lp ,
struct dm_list * pvh )
2013-07-06 06:28:21 +04:00
{
2016-06-23 00:32:35 +03:00
struct volume_group * vg = ( * lv ) - > vg ;
2013-07-06 06:28:21 +04:00
2016-06-23 00:32:35 +03:00
if ( lv_is_thin_pool ( * lv ) )
* lv = seg_lv ( first_seg ( * lv ) , 0 ) ; /* switch to data LV */
2013-07-06 06:28:21 +04:00
2016-06-23 00:32:35 +03:00
/* Resolve extents from size */
if ( lp - > size & & ! _lvresize_adjust_size ( vg , lp - > size , lp - > sign , & lp - > extents ) )
2016-01-19 13:44:11 +03:00
return_0 ;
2016-06-23 00:32:35 +03:00
else if ( lp - > extents & & ! _lvresize_extents_from_percent ( * lv , lp , pvh ) )
2013-07-06 06:28:21 +04:00
return_0 ;
2016-07-13 17:23:25 +03:00
if ( ! _lvresize_adjust_extents ( * lv , lp , pvh ) )
2013-07-06 06:28:21 +04:00
return_0 ;
2016-07-13 17:23:25 +03:00
if ( ! _lvresize_check_type ( * lv , lp ) )
2014-05-23 16:25:12 +04:00
return_0 ;
2013-09-06 12:54:50 +04:00
2013-07-06 06:28:21 +04:00
return 1 ;
}
2016-06-23 00:32:35 +03:00
/* Set aux LV properties, we can't use those from command line */
static struct logical_volume * _lvresize_setup_aux ( struct logical_volume * lv ,
struct lvresize_params * lp )
{
struct lv_segment * mseg = last_seg ( lv ) ;
lp - > alloc = lv - > alloc ;
lp - > mirrors = seg_is_mirrored ( mseg ) ? lv_mirror_count ( lv ) : 0 ;
lp - > resizefs = 0 ;
lp - > stripes = lp - > mirrors ? mseg - > area_count / lp - > mirrors : 0 ;
lp - > stripe_size = mseg - > stripe_size ;
return lv ;
}
2016-06-14 16:32:21 +03:00
int lv_resize ( struct logical_volume * lv ,
struct lvresize_params * lp ,
struct dm_list * pvh )
2013-07-06 06:28:21 +04:00
{
struct volume_group * vg = lv - > vg ;
2016-06-14 16:32:21 +03:00
struct cmd_context * cmd = vg - > cmd ;
2016-06-23 00:32:35 +03:00
struct logical_volume * lock_lv = ( struct logical_volume * ) lv_lock_holder ( lv ) ;
struct logical_volume * aux_lv = NULL ; /* Note: aux_lv never resizes fs */
struct lvresize_params aux_lp ;
2017-02-10 00:41:28 +03:00
struct lv_segment * seg = first_seg ( lv ) ;
2016-06-23 00:32:35 +03:00
int activated = 0 ;
int ret = 0 ;
2016-07-13 17:23:25 +03:00
int status ;
2013-07-06 06:28:21 +04:00
2016-06-23 00:32:35 +03:00
if ( ! _lvresize_check ( lv , lp ) )
2016-06-14 16:32:21 +03:00
return_0 ;
2017-03-08 00:05:23 +03:00
if ( seg - > reshape_len ) {
/* Prevent resizing on out-of-sync reshapable raid */
if ( ! lv_raid_in_sync ( lv ) ) {
log_error ( " Can't resize reshaping LV %s. " , display_lvname ( lv ) ) ;
return 0 ;
}
/* Remove any striped raid reshape space for LV resizing */
if ( ! lv_raid_free_reshape_space ( lv ) )
return_0 ;
}
2016-06-23 00:32:35 +03:00
if ( lp - > use_policies ) {
2016-06-24 01:24:26 +03:00
lp - > extents = 0 ;
lp - > sign = SIGN_PLUS ;
lp - > percent = PERCENT_LV ;
2016-06-23 00:32:35 +03:00
aux_lp = * lp ;
if ( ! _lvresize_adjust_policy ( lv , & lp - > extents , & aux_lp . extents ) )
return_0 ;
if ( ! lp - > extents ) {
if ( ! aux_lp . extents )
return 1 ; /* Nothing to do */
/* Resize thin-pool metadata as mainlv */
lv = first_seg ( lv ) - > metadata_lv ; /* metadata LV */
lp - > extents = aux_lp . extents ;
} else if ( aux_lp . extents ) {
/* Also resize thin-pool metadata */
aux_lv = _lvresize_setup_aux ( first_seg ( lv ) - > metadata_lv , & aux_lp ) ;
}
} else if ( lp - > poolmetadata_size ) {
if ( ! lp - > extents & & ! lp - > size ) {
2016-07-13 17:23:25 +03:00
/* When only --poolmetadatasize given and not --size
* switch directly to resize metadata LV */
2016-06-23 00:32:35 +03:00
lv = first_seg ( lv ) - > metadata_lv ;
lp - > size = lp - > poolmetadata_size ;
lp - > sign = lp - > poolmetadata_sign ;
} else {
aux_lp = * lp ;
aux_lv = _lvresize_setup_aux ( first_seg ( lv ) - > metadata_lv , & aux_lp ) ;
aux_lp . size = lp - > poolmetadata_size ;
aux_lp . sign = lp - > poolmetadata_sign ;
}
2014-02-24 20:19:50 +04:00
}
2017-02-10 21:10:05 +03:00
/* Ensure stripe boundary extents! */
2017-02-10 00:41:28 +03:00
if ( ! lp - > percent & & lv_is_raid ( lv ) )
lp - > extents = _round_to_stripe_boundary ( lv - > vg , lp - > extents ,
seg_is_raid1 ( seg ) ? 0 : _raid_stripes_count ( seg ) ,
lp - > resize = = LV_REDUCE ? 0 : 1 ) ;
2016-06-23 00:32:35 +03:00
if ( aux_lv & & ! _lvresize_prepare ( & aux_lv , & aux_lp , pvh ) )
2015-03-05 23:00:44 +03:00
return_0 ;
2016-07-13 17:23:25 +03:00
/* Always should have lp->size or lp->extents */
2016-06-23 00:32:35 +03:00
if ( ! _lvresize_prepare ( & lv , lp , pvh ) )
2013-09-06 12:54:50 +04:00
return_0 ;
2013-03-14 02:00:29 +04:00
2016-07-13 17:23:25 +03:00
if ( ( ( lp - > resize = = LV_REDUCE ) | |
( aux_lv & & aux_lp . resize = = LV_REDUCE ) ) & &
( pvh ! = & vg - > pvs ) )
log_print_unless_silent ( " Ignoring PVs on command line when reducing. " ) ;
/* Request confirmation before operations that are often mistakes. */
/* aux_lv never resize fs */
if ( ( lp - > resizefs | | ( lp - > resize = = LV_REDUCE ) ) & &
! _request_confirmation ( lv , lp ) )
return_0 ;
if ( lp - > resizefs ) {
if ( ! lp - > nofsck & &
2017-06-21 15:02:57 +03:00
! _fsadm_cmd ( FSADM_CMD_CHECK , lv , 0 , lp - > yes , lp - > force , & status ) ) {
2016-07-13 17:23:25 +03:00
if ( status ! = FSADM_CHECK_FAILS_FOR_MOUNTED ) {
log_error ( " Filesystem check failed. " ) ;
return 0 ;
}
/* some filesystems support online resize */
}
/* FIXME forks here */
if ( ( lp - > resize = = LV_REDUCE ) & &
2017-06-21 15:02:57 +03:00
! _fsadm_cmd ( FSADM_CMD_RESIZE , lv , lp - > extents , lp - > yes , lp - > force , NULL ) ) {
2016-07-13 17:23:25 +03:00
log_error ( " Filesystem resize failed. " ) ;
return 0 ;
}
}
if ( ! lp - > extents & & ( ! aux_lv | | ! aux_lp . extents ) ) {
lp - > extents = lv - > le_count ;
goto out ; /* Nothing to do */
}
2016-06-23 00:32:35 +03:00
if ( lv_is_thin_pool ( lock_lv ) & & /* Lock holder is thin-pool */
2013-09-06 12:54:50 +04:00
! lv_is_active ( lock_lv ) ) {
2016-06-23 00:32:35 +03:00
if ( ! activation ( ) ) {
log_error ( " Cannot resize %s without using "
" device-mapper kernel driver. " ,
display_lvname ( lock_lv ) ) ;
return 0 ;
}
2013-09-06 12:54:50 +04:00
/*
* Active ' hidden ' - tpool can be waiting for resize , but the
* pool LV itself might be inactive .
* Here plain suspend / resume would not work .
* So active temporarily pool LV ( with on disk metadata )
* then use suspend and resume and deactivate pool LV ,
* instead of searching for an active thin volume .
*/
if ( ! activate_lv_excl ( cmd , lock_lv ) ) {
2015-08-21 12:05:21 +03:00
log_error ( " Failed to activate %s. " , display_lvname ( lock_lv ) ) ;
2013-09-06 12:54:50 +04:00
return 0 ;
}
2016-06-23 00:32:35 +03:00
activated = 1 ;
2013-09-06 12:54:50 +04:00
}
2016-06-23 00:32:35 +03:00
/*
* If the LV is locked from activation , this lock call is a no - op .
* Otherwise , this acquires a transient lock on the lv ( not PERSISTENT ) .
*/
if ( ! lockd_lv ( cmd , lock_lv , " ex " , 0 ) )
return_0 ;
2016-07-13 17:23:25 +03:00
if ( ! archive ( vg ) )
return_0 ;
2016-06-23 00:32:35 +03:00
if ( aux_lv ) {
if ( ! _lvresize_volume ( aux_lv , & aux_lp , pvh ) )
goto_bad ;
/* store vg on disk(s) */
2016-07-13 17:23:25 +03:00
if ( aux_lp . size_changed & & ! lv_update_and_reload ( lock_lv ) )
2016-06-23 00:32:35 +03:00
goto_bad ;
}
if ( ! _lvresize_volume ( lv , lp , pvh ) )
goto_bad ;
2013-03-14 02:00:29 +04:00
/* store vg on disk(s) */
2016-07-13 17:23:25 +03:00
if ( ! lp - > size_changed )
goto out ; /* No table reload needed */
2014-09-09 16:40:32 +04:00
if ( ! lv_update_and_reload ( lock_lv ) )
goto_bad ;
2013-03-14 02:00:29 +04:00
if ( lv_is_cow_covering_origin ( lv ) )
if ( ! monitor_dev_for_events ( cmd , lv , 0 , 0 ) )
stack ;
2013-09-06 12:54:50 +04:00
if ( lv_is_thin_pool ( lock_lv ) ) {
/* Update lvm pool metadata (drop messages). */
if ( ! update_pool_lv ( lock_lv , 0 ) )
goto_bad ;
2014-09-09 16:40:32 +04:00
backup ( vg ) ;
2013-09-06 12:54:50 +04:00
}
2016-07-13 17:23:25 +03:00
out :
2016-06-23 14:04:37 +03:00
log_print_unless_silent ( " Logical volume %s successfully resized. " ,
display_lvname ( lv ) ) ;
2013-03-14 02:00:29 +04:00
if ( lp - > resizefs & & ( lp - > resize = = LV_EXTEND ) & &
2017-06-21 15:02:57 +03:00
! _fsadm_cmd ( FSADM_CMD_RESIZE , lv , lp - > extents , lp - > yes , lp - > force , NULL ) )
2013-07-06 06:28:21 +04:00
return_0 ;
2013-03-14 02:00:29 +04:00
2016-06-23 00:32:35 +03:00
ret = 1 ;
2013-09-06 12:54:50 +04:00
bad :
2016-06-23 00:32:35 +03:00
if ( activated & & ! deactivate_lv ( cmd , lock_lv ) ) {
2016-06-01 18:40:26 +03:00
log_error ( " Problem deactivating %s. " , display_lvname ( lock_lv ) ) ;
2016-06-23 00:32:35 +03:00
ret = 0 ;
}
2013-09-06 12:54:50 +04:00
2016-06-23 00:32:35 +03:00
return ret ;
2013-03-14 02:00:29 +04:00
}
2005-06-03 18:49:51 +04:00
char * generate_lv_name ( struct volume_group * vg , const char * format ,
char * buffer , size_t len )
2001-11-14 15:07:37 +03:00
{
2005-05-11 20:46:59 +04:00
struct lv_list * lvl ;
2016-03-01 17:31:48 +03:00
struct glv_list * glvl ;
2002-01-07 12:16:20 +03:00
int high = - 1 , i ;
2001-11-14 15:07:37 +03:00
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( lvl , & vg - > lvs ) {
2005-05-11 20:46:59 +04:00
if ( sscanf ( lvl - > lv - > name , format , & i ) ! = 1 )
2001-11-14 15:07:37 +03:00
continue ;
if ( i > high )
2001-11-14 17:12:01 +03:00
high = i ;
2001-11-14 15:07:37 +03:00
}
2016-03-01 17:31:48 +03:00
dm_list_iterate_items ( glvl , & vg - > historical_lvs ) {
if ( sscanf ( glvl - > glv - > historical - > name , format , & i ) ! = 1 )
continue ;
if ( i > high )
high = i ;
}
2006-08-21 16:54:53 +04:00
if ( dm_snprintf ( buffer , len , format , high + 1 ) < 0 )
2001-11-14 15:07:37 +03:00
return NULL ;
return buffer ;
}
2016-03-01 17:19:23 +03:00
struct generic_logical_volume * get_or_create_glv ( struct dm_pool * mem , struct logical_volume * lv , int * glv_created )
{
struct generic_logical_volume * glv ;
if ( ! ( glv = lv - > this_glv ) ) {
if ( ! ( glv = dm_pool_zalloc ( mem , sizeof ( struct generic_logical_volume ) ) ) ) {
log_error ( " Failed to allocate generic logical volume structure. " ) ;
return NULL ;
}
glv - > live = lv ;
lv - > this_glv = glv ;
if ( glv_created )
* glv_created = 1 ;
} else if ( glv_created )
* glv_created = 0 ;
return glv ;
}
struct glv_list * get_or_create_glvl ( struct dm_pool * mem , struct logical_volume * lv , int * glv_created )
{
struct glv_list * glvl ;
if ( ! ( glvl = dm_pool_zalloc ( mem , sizeof ( struct glv_list ) ) ) ) {
log_error ( " Failed to allocate generic logical volume list item. " ) ;
return NULL ;
}
if ( ! ( glvl - > glv = get_or_create_glv ( mem , lv , glv_created ) ) ) {
dm_pool_free ( mem , glvl ) ;
return_NULL ;
}
return glvl ;
}
2016-03-01 17:19:57 +03:00
int add_glv_to_indirect_glvs ( struct dm_pool * mem ,
struct generic_logical_volume * origin_glv ,
struct generic_logical_volume * glv )
{
struct glv_list * glvl ;
if ( ! ( glvl = dm_pool_zalloc ( mem , sizeof ( struct glv_list ) ) ) ) {
log_error ( " Failed to allocate generic volume list item "
" for indirect glv %s " , glv - > is_historical ? glv - > historical - > name
: glv - > live - > name ) ;
return 0 ;
}
glvl - > glv = glv ;
if ( glv - > is_historical )
glv - > historical - > indirect_origin = origin_glv ;
else
first_seg ( glv - > live ) - > indirect_origin = origin_glv ;
if ( origin_glv ) {
if ( origin_glv - > is_historical )
dm_list_add ( & origin_glv - > historical - > indirect_glvs , & glvl - > list ) ;
else
dm_list_add ( & origin_glv - > live - > indirect_glvs , & glvl - > list ) ;
}
return 1 ;
}
int remove_glv_from_indirect_glvs ( struct generic_logical_volume * origin_glv ,
struct generic_logical_volume * glv )
{
struct glv_list * glvl , * tglvl ;
struct dm_list * list = origin_glv - > is_historical ? & origin_glv - > historical - > indirect_glvs
: & origin_glv - > live - > indirect_glvs ;
dm_list_iterate_items_safe ( glvl , tglvl , list ) {
if ( glvl - > glv ! = glv )
continue ;
dm_list_del ( & glvl - > list ) ;
if ( glvl - > glv - > is_historical )
glvl - > glv - > historical - > indirect_origin = NULL ;
else
first_seg ( glvl - > glv - > live ) - > indirect_origin = NULL ;
return 1 ;
}
log_error ( INTERNAL_ERROR " %s logical volume %s is not a user of %s. " ,
glv - > is_historical ? " historical " : " Live " ,
glv - > is_historical ? glv - > historical - > name : glv - > live - > name ,
origin_glv - > is_historical ? origin_glv - > historical - > name : origin_glv - > live - > name ) ;
return 0 ;
}
2009-09-28 21:46:15 +04:00
struct logical_volume * alloc_lv ( struct dm_pool * mem )
{
struct logical_volume * lv ;
if ( ! ( lv = dm_pool_zalloc ( mem , sizeof ( * lv ) ) ) ) {
log_error ( " Unable to allocate logical volume structure " ) ;
return NULL ;
}
dm_list_init ( & lv - > snapshot_segs ) ;
dm_list_init ( & lv - > segments ) ;
dm_list_init ( & lv - > tags ) ;
dm_list_init ( & lv - > segs_using_this_lv ) ;
2016-03-01 17:18:42 +03:00
dm_list_init ( & lv - > indirect_glvs ) ;
2009-09-28 21:46:15 +04:00
return lv ;
}
2005-06-01 20:51:55 +04:00
/*
* Create a new empty LV .
*/
2007-10-11 23:20:38 +04:00
struct logical_volume * lv_create_empty ( const char * name ,
2005-04-07 16:29:46 +04:00
union lvid * lvid ,
2009-11-25 01:55:55 +03:00
uint64_t status ,
2003-04-25 02:23:24 +04:00
alloc_policy_t alloc ,
struct volume_group * vg )
2001-11-06 13:29:56 +03:00
{
2007-10-11 23:20:38 +04:00
struct format_instance * fi = vg - > fid ;
2001-11-06 13:29:56 +03:00
struct logical_volume * lv ;
2007-01-05 18:53:40 +03:00
char dname [ NAME_LEN ] ;
2016-03-01 17:31:48 +03:00
int historical ;
2001-11-06 13:29:56 +03:00
2009-05-14 01:29:10 +04:00
if ( vg_max_lv_reached ( vg ) )
stack ;
2001-11-06 13:29:56 +03:00
2005-06-03 18:49:51 +04:00
if ( strstr ( name , " %d " ) & &
! ( name = generate_lv_name ( vg , name , dname , sizeof ( dname ) ) ) ) {
2001-11-14 16:52:38 +03:00
log_error ( " Failed to generate unique name for the new "
" logical volume " ) ;
2001-11-14 15:07:37 +03:00
return NULL ;
2017-07-19 17:16:12 +03:00
}
if ( lv_name_is_used_in_vg ( vg , name , & historical ) ) {
2009-05-28 05:59:37 +04:00
log_error ( " Unable to create LV %s in Volume Group %s: "
2016-03-01 17:31:48 +03:00
" name already in use%s. " , name , vg - > name ,
historical ? " by historical LV " : " " ) ;
2009-05-28 05:59:37 +04:00
return NULL ;
2001-11-14 15:07:37 +03:00
}
2009-05-14 01:28:31 +04:00
log_verbose ( " Creating logical volume %s " , name ) ;
2001-11-14 15:07:37 +03:00
2009-09-28 21:46:15 +04:00
if ( ! ( lv = alloc_lv ( vg - > vgmem ) ) )
2009-05-14 01:25:01 +04:00
return_NULL ;
2001-11-06 13:29:56 +03:00
2009-05-14 01:25:01 +04:00
if ( ! ( lv - > name = dm_pool_strdup ( vg - > vgmem , name ) ) )
goto_bad ;
2001-11-06 13:29:56 +03:00
lv - > status = status ;
2002-07-11 18:21:49 +04:00
lv - > alloc = alloc ;
2007-11-09 19:51:54 +03:00
lv - > read_ahead = vg - > cmd - > default_settings . read_ahead ;
2003-04-02 23:14:43 +04:00
lv - > major = - 1 ;
2002-02-01 20:54:39 +03:00
lv - > minor = - 1 ;
2003-04-25 02:23:24 +04:00
lv - > size = UINT64_C ( 0 ) ;
lv - > le_count = 0 ;
2001-11-06 13:29:56 +03:00
2005-04-07 16:29:46 +04:00
if ( lvid )
lv - > lvid = * lvid ;
2009-05-14 01:25:01 +04:00
if ( ! link_lv_to_vg ( vg , lv ) )
goto_bad ;
2012-01-19 19:31:45 +04:00
if ( ! lv_set_creation ( lv , NULL , 0 ) )
goto_bad ;
2016-01-11 16:11:37 +03:00
2009-05-14 01:25:01 +04:00
if ( fi - > fmt - > ops - > lv_setup & & ! fi - > fmt - > ops - > lv_setup ( fi , lv ) )
goto_bad ;
2013-07-17 13:31:54 +04:00
if ( vg - > fid - > fmt - > features & FMT_CONFIG_PROFILE )
config: differentiate command and metadata profiles and consolidate profile handling code
- When defining configuration source, the code now uses separate
CONFIG_PROFILE_COMMAND and CONFIG_PROFILE_METADATA markers
(before, it was just CONFIG_PROFILE that did not make the
difference between the two). This helps when checking the
configuration if it contains correct set of options which
are all in either command-profilable or metadata-profilable
group without mixing these groups together - so it's a firm
distinction. The "command profile" can't contain
"metadata profile" and vice versa! This is strictly checked
and if the settings are mixed, such profile is rejected and
it's not used. So in the end, the CONFIG_PROFILE_COMMAND
set of options and CONFIG_PROFILE_METADATA are mutually exclusive
sets.
- Marking configuration with one or the other marker will also
determine the way these configuration sources are positioned
in the configuration cascade which is now:
CONFIG_STRING -> CONFIG_PROFILE_COMMAND -> CONFIG_PROFILE_METADATA -> CONFIG_FILE/CONFIG_MERGED_FILES
- Marking configuration with one or the other marker will also make
it possible to issue a command context refresh (will be probably
a part of a future patch) if needed for settings in global profile
set. For settings in metadata profile set this is impossible since
we can't refresh cmd context in the middle of reading VG/LV metadata
and for each VG/LV separately because each VG/LV can have a different
metadata profile assinged and it's not possible to change these
settings at this level.
- When command profile is incorrect, it's rejected *and also* the
command exits immediately - the profile *must* be correct for the
command that was run with a profile to be executed. Before this
patch, when the profile was found incorrect, there was just the
warning message and the command continued without profile applied.
But it's more correct to exit immediately in this case.
- When metadata profile is incorrect, we reject it during command
runtime (as we know the profile name from metadata and not early
from command line as it is in case of command profiles) and we
*do continue* with the command as we're in the middle of operation.
Also, the metadata profile is applied directly and on the fly on
find_config_tree_* fn call and even if the metadata profile is
found incorrect, we still need to return the non-profiled value
as found in the other configuration provided or default value.
To exit immediately even in this case, we'd need to refactor
existing find_config_tree_* fns so they can return error. Currently,
these fns return only config values (which end up with default
values in the end if the config is not found).
- To check the profile validity before use to be sure it's correct,
one can use :
lvm dumpconfig --commandprofile/--metadataprofile ProfileName --validate
(the --commandprofile/--metadataprofile for dumpconfig will come
as part of the subsequent patch)
- This patch also adds a reference to --commandprofile and
--metadataprofile in the cmd help string (which was missing before
for the --profile for some commands). We do not mention --profile
now as people should use --commandprofile or --metadataprofile
directly. However, the --profile is still supported for backward
compatibility and it's translated as:
--profile == --metadataprofile for lvcreate, vgcreate, lvchange and vgchange
(as these commands are able to attach profile to metadata)
--profile == --commandprofile for all the other commands
(--metadataprofile is not allowed there as it makes no sense)
- This patch also contains some cleanups to make the code handling
the profiles more readable...
2014-05-20 16:13:10 +04:00
lv - > profile = vg - > cmd - > profile_params - > global_metadata_profile ;
2016-01-11 16:11:37 +03:00
2001-11-06 13:29:56 +03:00
return lv ;
2009-05-14 01:25:01 +04:00
bad :
dm_pool_free ( vg - > vgmem , lv ) ;
return NULL ;
2003-04-25 02:23:24 +04:00
}
2005-11-24 23:58:44 +03:00
2006-10-08 03:40:36 +04:00
static int _add_pvs ( struct cmd_context * cmd , struct pv_segment * peg ,
2010-07-09 19:34:40 +04:00
uint32_t s __attribute__ ( ( unused ) ) , void * data )
2005-11-28 23:01:00 +03:00
{
2006-09-12 01:14:56 +04:00
struct seg_pvs * spvs = ( struct seg_pvs * ) data ;
2005-11-28 23:01:00 +03:00
struct pv_list * pvl ;
2006-09-12 01:14:56 +04:00
/* Don't add again if it's already on list. */
2008-03-28 22:08:23 +03:00
if ( find_pv_in_pv_list ( & spvs - > pvs , peg - > pv ) )
2014-04-10 23:48:59 +04:00
return 1 ;
2005-11-28 23:01:00 +03:00
if ( ! ( pvl = dm_pool_alloc ( cmd - > mem , sizeof ( * pvl ) ) ) ) {
log_error ( " pv_list allocation failed " ) ;
return 0 ;
}
pvl - > pv = peg - > pv ;
2008-11-04 01:14:30 +03:00
dm_list_add ( & spvs - > pvs , & pvl - > list ) ;
2005-11-28 23:01:00 +03:00
return 1 ;
}
2005-11-24 23:58:44 +03:00
/*
2014-06-26 06:20:41 +04:00
* build_parallel_areas_from_lv
* @ lv
* @ use_pvmove_parent_lv
* @ create_single_list
*
* For each segment in an LV , create a list of PVs used by the segment .
* Thus , the returned list is really a list of segments ( seg_pvs )
* containing a list of PVs that are in use by that segment .
*
* use_pvmove_parent_lv : For pvmove we use the * parent * LV so we can
* pick up stripes & existing mirrors etc .
* create_single_list : Instead of creating a list of segments that
* each contain a list of PVs , return a list
* containing just one segment ( i . e . seg_pvs )
* that contains a list of all the PVs used by
* the entire LV and all it ' s segments .
2005-11-24 23:58:44 +03:00
*/
2011-07-19 20:37:42 +04:00
struct dm_list * build_parallel_areas_from_lv ( struct logical_volume * lv ,
2014-06-26 06:20:41 +04:00
unsigned use_pvmove_parent_lv ,
unsigned create_single_list )
2005-11-24 23:58:44 +03:00
{
2011-07-19 20:37:42 +04:00
struct cmd_context * cmd = lv - > vg - > cmd ;
2008-11-04 01:14:30 +03:00
struct dm_list * parallel_areas ;
2014-06-26 06:20:41 +04:00
struct seg_pvs * spvs = NULL ;
2005-11-28 23:01:00 +03:00
uint32_t current_le = 0 ;
2012-02-22 21:14:38 +04:00
uint32_t raid_multiple ;
struct lv_segment * seg = first_seg ( lv ) ;
2005-11-24 23:58:44 +03:00
if ( ! ( parallel_areas = dm_pool_alloc ( cmd - > mem , sizeof ( * parallel_areas ) ) ) ) {
log_error ( " parallel_areas allocation failed " ) ;
return NULL ;
}
2008-11-04 01:14:30 +03:00
dm_list_init ( parallel_areas ) ;
2005-11-24 23:58:44 +03:00
2005-11-28 23:01:00 +03:00
do {
2014-06-26 06:20:41 +04:00
if ( ! spvs | | ! create_single_list ) {
if ( ! ( spvs = dm_pool_zalloc ( cmd - > mem , sizeof ( * spvs ) ) ) ) {
log_error ( " allocation failed " ) ;
return NULL ;
}
2005-11-28 23:01:00 +03:00
2014-06-26 06:20:41 +04:00
dm_list_init ( & spvs - > pvs ) ;
dm_list_add ( parallel_areas , & spvs - > list ) ;
}
2005-11-28 23:01:00 +03:00
spvs - > le = current_le ;
spvs - > len = lv - > le_count - current_le ;
2014-06-26 06:20:41 +04:00
if ( use_pvmove_parent_lv & &
! ( seg = find_seg_by_le ( lv , current_le ) ) ) {
2010-04-08 04:28:57 +04:00
log_error ( " Failed to find segment for %s extent % " PRIu32 ,
lv - > name , current_le ) ;
return 0 ;
}
2005-11-28 23:01:00 +03:00
/* Find next segment end */
/* FIXME Unnecessary nesting! */
2010-04-08 04:28:57 +04:00
if ( ! _for_each_pv ( cmd , use_pvmove_parent_lv ? seg - > pvmove_source_seg - > lv : lv ,
use_pvmove_parent_lv ? seg - > pvmove_source_seg - > le : current_le ,
2010-04-09 05:00:10 +04:00
use_pvmove_parent_lv ? spvs - > len * _calc_area_multiple ( seg - > pvmove_source_seg - > segtype , seg - > pvmove_source_seg - > area_count , 0 ) : spvs - > len ,
2010-04-08 04:56:26 +04:00
use_pvmove_parent_lv ? seg - > pvmove_source_seg : NULL ,
2010-04-08 04:28:57 +04:00
& spvs - > len ,
2008-01-30 16:19:47 +03:00
0 , 0 , - 1 , 0 , _add_pvs , ( void * ) spvs ) )
return_NULL ;
2005-11-28 23:01:00 +03:00
current_le = spvs - > le + spvs - > len ;
2012-02-22 21:14:38 +04:00
raid_multiple = ( seg - > segtype - > parity_devs ) ?
seg - > area_count - seg - > segtype - > parity_devs : 1 ;
} while ( ( current_le * raid_multiple ) < lv - > le_count ) ;
2005-11-28 23:01:00 +03:00
2014-06-26 06:20:41 +04:00
if ( create_single_list ) {
spvs - > le = 0 ;
spvs - > len = lv - > le_count ;
}
/*
* FIXME : Merge adjacent segments with identical PV lists
* ( avoids need for contiguous allocation attempts between
* successful allocations )
*/
2005-11-28 23:01:00 +03:00
2005-11-24 23:58:44 +03:00
return parallel_areas ;
}
2007-08-20 21:04:53 +04:00
2009-05-14 01:26:45 +04:00
void lv_set_visible ( struct logical_volume * lv )
{
if ( lv_is_visible ( lv ) )
return ;
lv - > status | = VISIBLE_LV ;
2013-01-08 02:30:29 +04:00
log_debug_metadata ( " LV %s in VG %s is now visible. " , lv - > name , lv - > vg - > name ) ;
2009-05-14 01:26:45 +04:00
}
2009-05-21 07:04:52 +04:00
void lv_set_hidden ( struct logical_volume * lv )
2009-05-14 01:26:45 +04:00
{
if ( ! lv_is_visible ( lv ) )
return ;
lv - > status & = ~ VISIBLE_LV ;
2013-01-08 02:30:29 +04:00
log_debug_metadata ( " LV %s in VG %s is now hidden. " , lv - > name , lv - > vg - > name ) ;
2009-05-14 01:26:45 +04:00
}
2007-08-20 21:04:53 +04:00
int lv_remove_single ( struct cmd_context * cmd , struct logical_volume * lv ,
2013-12-05 15:39:02 +04:00
force_t force , int suppress_remove_message )
2007-08-20 21:04:53 +04:00
{
struct volume_group * vg ;
2011-12-20 04:02:18 +04:00
struct logical_volume * format1_origin = NULL ;
int format1_reload_required = 0 ;
2016-03-01 17:27:08 +03:00
int visible , historical ;
2012-01-25 15:27:42 +04:00
struct logical_volume * pool_lv = NULL ;
2015-03-05 23:00:44 +03:00
struct logical_volume * lock_lv = lv ;
2014-02-04 21:57:08 +04:00
struct lv_segment * cache_seg = NULL ;
2012-09-26 14:23:44 +04:00
int ask_discard ;
2014-10-04 01:34:16 +04:00
struct lv_list * lvl ;
2014-11-10 12:56:43 +03:00
struct seg_list * sl ;
2017-03-08 00:05:23 +03:00
struct lv_segment * seg = first_seg ( lv ) ;
2014-11-13 15:09:07 +03:00
int is_last_pool = lv_is_pool ( lv ) ;
2007-08-20 21:04:53 +04:00
vg = lv - > vg ;
if ( ! vg_check_status ( vg , LVM_WRITE ) )
2011-11-13 02:51:20 +04:00
return_0 ;
2007-08-20 21:04:53 +04:00
if ( lv_is_origin ( lv ) ) {
2016-05-20 12:28:23 +03:00
log_error ( " Can't remove logical volume %s under snapshot. " ,
display_lvname ( lv ) ) ;
2007-08-20 21:04:53 +04:00
return 0 ;
}
2013-02-20 19:14:24 +04:00
if ( lv_is_external_origin ( lv ) ) {
2016-05-20 12:28:23 +03:00
log_error ( " Can't remove external origin logical volume %s. " ,
display_lvname ( lv ) ) ;
2013-02-20 19:14:24 +04:00
return 0 ;
}
2014-09-16 00:33:53 +04:00
if ( lv_is_mirror_image ( lv ) ) {
2016-05-20 12:28:23 +03:00
log_error ( " Can't remove logical volume %s used by a mirror. " ,
display_lvname ( lv ) ) ;
2007-08-20 21:04:53 +04:00
return 0 ;
}
2014-09-16 00:33:53 +04:00
if ( lv_is_mirror_log ( lv ) ) {
2016-05-20 12:28:23 +03:00
log_error ( " Can't remove logical volume %s used as mirror log. " ,
display_lvname ( lv ) ) ;
2007-08-20 21:04:53 +04:00
return 0 ;
}
2014-09-16 00:33:53 +04:00
if ( lv_is_raid_metadata ( lv ) | | lv_is_raid_image ( lv ) ) {
2016-05-20 12:28:23 +03:00
log_error ( " Can't remove logical volume %s used as RAID device. " ,
display_lvname ( lv ) ) ;
2011-08-03 02:07:20 +04:00
return 0 ;
}
2014-04-01 22:06:54 +04:00
if ( lv_is_thin_pool_data ( lv ) | | lv_is_thin_pool_metadata ( lv ) | |
lv_is_cache_pool_data ( lv ) | | lv_is_cache_pool_metadata ( lv ) ) {
log_error ( " Can't remove logical volume %s used by a pool. " ,
2016-05-20 12:28:23 +03:00
display_lvname ( lv ) ) ;
2012-01-25 12:57:25 +04:00
return 0 ;
2017-07-19 17:16:12 +03:00
}
if ( lv_is_thin_volume ( lv ) ) {
2015-11-13 11:49:59 +03:00
if ( ! ( pool_lv = first_seg ( lv ) - > pool_lv ) ) {
log_error ( INTERNAL_ERROR " Thin LV %s without pool. " ,
display_lvname ( lv ) ) ;
return 0 ;
}
2015-03-05 23:00:44 +03:00
lock_lv = pool_lv ;
}
2012-01-25 12:57:25 +04:00
2014-09-16 00:33:53 +04:00
if ( lv_is_locked ( lv ) ) {
2016-05-20 12:28:23 +03:00
log_error ( " Can't remove locked logical volume %s. " , display_lvname ( lv ) ) ;
2007-08-20 21:04:53 +04:00
return 0 ;
}
2015-03-05 23:00:44 +03:00
if ( ! lockd_lv ( cmd , lock_lv , " ex " , LDLV_PERSISTENT ) )
return_0 ;
2007-08-20 21:04:53 +04:00
/* FIXME Ensure not referred to by another existing LVs */
2013-06-25 14:31:53 +04:00
ask_discard = find_config_tree_bool ( cmd , devices_issue_discards_CFG , NULL ) ;
2007-08-20 21:04:53 +04:00
2014-10-04 01:49:57 +04:00
if ( ! lv_is_cache_pool ( lv ) & & /* cache pool cannot be active */
lv_is_active ( lv ) ) {
2016-04-22 00:14:10 +03:00
if ( ! lv_check_not_in_use ( lv , 1 ) )
2011-09-22 21:33:50 +04:00
return_0 ;
2007-08-20 21:04:53 +04:00
2011-11-15 21:27:41 +04:00
if ( ( force = = PROMPT ) & &
2014-11-11 15:31:25 +03:00
! lv_is_pending_delete ( lv ) & &
2009-05-27 22:19:21 +04:00
lv_is_visible ( lv ) & &
2012-09-26 14:23:44 +04:00
lv_is_active ( lv ) ) {
if ( yes_no_prompt ( " Do you really want to remove%s active "
" %slogical volume %s? [y/n]: " ,
ask_discard ? " and DISCARD " : " " ,
vg_is_clustered ( vg ) ? " clustered " : " " ,
2016-05-20 12:28:23 +03:00
display_lvname ( lv ) ) = = ' n ' ) {
log_error ( " Logical volume %s not removed. " , display_lvname ( lv ) ) ;
2012-09-26 14:23:44 +04:00
return 0 ;
}
2017-07-19 17:16:12 +03:00
ask_discard = 0 ;
2007-11-04 19:28:57 +03:00
}
}
2007-08-28 20:14:49 +04:00
2016-03-01 17:27:08 +03:00
if ( ! lv_is_historical ( lv ) & & ( force = = PROMPT ) & & ask_discard & &
2012-09-26 14:23:44 +04:00
yes_no_prompt ( " Do you really want to remove and DISCARD "
" logical volume %s? [y/n]: " ,
2016-05-20 12:28:23 +03:00
display_lvname ( lv ) ) = = ' n ' ) {
log_error ( " Logical volume %s not removed. " , display_lvname ( lv ) ) ;
2012-09-26 14:23:44 +04:00
return 0 ;
}
2014-11-11 15:31:25 +03:00
if ( lv_is_cache ( lv ) & & ! lv_is_pending_delete ( lv ) ) {
2014-10-04 01:36:11 +04:00
if ( ! lv_remove_single ( cmd , first_seg ( lv ) - > pool_lv , force ,
suppress_remove_message ) ) {
if ( force < DONT_PROMPT_OVERRIDE ) {
log_error ( " Failed to uncache %s. " , display_lvname ( lv ) ) ;
return 0 ;
}
/* Proceed with -ff */
log_print_unless_silent ( " Ignoring uncache failure of %s. " ,
display_lvname ( lv ) ) ;
}
2014-11-13 15:09:07 +03:00
is_last_pool = 1 ;
}
2017-03-08 00:05:23 +03:00
/* Special case removing a striped raid LV with allocated reshape space */
if ( seg & & seg - > reshape_len ) {
if ( ! ( seg - > segtype = get_segtype_from_string ( cmd , SEG_TYPE_NAME_STRIPED ) ) )
return_0 ;
lv - > le_count = seg - > len = seg - > area_len = seg_lv ( seg , 0 ) - > le_count * seg - > area_count ;
}
2016-03-01 17:26:57 +03:00
/* Used cache pool, COW or historical LV cannot be activated */
2014-11-13 15:09:07 +03:00
if ( ( ! lv_is_cache_pool ( lv ) | | dm_list_empty ( & lv - > segs_using_this_lv ) ) & &
2016-03-01 17:26:57 +03:00
! lv_is_cow ( lv ) & & ! lv_is_historical ( lv ) & &
2014-11-13 15:09:07 +03:00
! deactivate_lv ( cmd , lv ) ) {
/* FIXME Review and fix the snapshot error paths! */
log_error ( " Unable to deactivate logical volume %s. " ,
display_lvname ( lv ) ) ;
return 0 ;
2014-10-04 01:36:11 +04:00
}
2014-11-09 01:15:15 +03:00
if ( ! archive ( vg ) )
return 0 ;
2014-11-13 15:09:07 +03:00
/* Clear thin pool stacked messages */
if ( pool_lv & & ! pool_has_message ( first_seg ( pool_lv ) , lv , 0 ) & &
! update_pool_lv ( pool_lv , 1 ) ) {
if ( force < DONT_PROMPT_OVERRIDE ) {
log_error ( " Failed to update pool %s. " , display_lvname ( pool_lv ) ) ;
return 0 ;
}
log_print_unless_silent ( " Ignoring update failure of pool %s. " ,
display_lvname ( pool_lv ) ) ;
pool_lv = NULL ; /* Do not retry */
}
2014-11-10 12:56:43 +03:00
/* When referenced by the LV with pending delete flag, remove this deleted LV first */
dm_list_iterate_items ( sl , & lv - > segs_using_this_lv )
if ( lv_is_pending_delete ( sl - > seg - > lv ) & & ! lv_remove ( sl - > seg - > lv ) ) {
log_error ( " Error releasing logical volume %s with pending delete. " ,
display_lvname ( sl - > seg - > lv ) ) ;
return 0 ;
}
2007-08-20 21:04:53 +04:00
if ( lv_is_cow ( lv ) ) {
2011-12-20 04:02:18 +04:00
/* Old format1 code */
if ( ! ( lv - > vg - > fid - > fmt - > features & FMT_MDAS ) )
format1_origin = origin_from_cow ( lv ) ;
2016-05-20 12:28:23 +03:00
log_verbose ( " Removing snapshot volume %s. " , display_lvname ( lv ) ) ;
2011-07-08 16:48:41 +04:00
/* vg_remove_snapshot() will preload origin/former snapshots */
2008-01-30 16:19:47 +03:00
if ( ! vg_remove_snapshot ( lv ) )
return_0 ;
2010-02-18 01:59:46 +03:00
2014-11-13 15:09:07 +03:00
if ( ! deactivate_lv ( cmd , lv ) ) {
/* FIXME Review and fix the snapshot error paths! */
log_error ( " Unable to deactivate logical volume %s. " ,
display_lvname ( lv ) ) ;
return 0 ;
2014-10-04 01:34:16 +04:00
}
}
2014-04-01 20:08:38 +04:00
if ( lv_is_cache_pool ( lv ) ) {
/* Cache pool removal drops cache layer
* If the cache pool is not linked , we can simply remove it . */
if ( ! dm_list_empty ( & lv - > segs_using_this_lv ) ) {
if ( ! ( cache_seg = get_only_segment_using_this_lv ( lv ) ) )
return_0 ;
/* TODO: polling */
if ( ! lv_cache_remove ( cache_seg - > lv ) )
return_0 ;
}
2012-01-25 15:27:42 +04:00
}
2012-01-24 18:15:52 +04:00
visible = lv_is_visible ( lv ) ;
2016-03-01 17:27:08 +03:00
historical = lv_is_historical ( lv ) ;
2012-01-24 18:15:52 +04:00
2016-03-01 17:27:08 +03:00
log_verbose ( " Releasing %slogical volume \" %s \" " ,
historical ? " historical " : " " ,
historical ? lv - > this_glv - > historical - > name : lv - > name ) ;
2007-08-20 21:04:53 +04:00
if ( ! lv_remove ( lv ) ) {
2016-03-01 17:27:08 +03:00
log_error ( " Error releasing %slogical volume \" %s \" " ,
historical ? " historical " : " " ,
historical ? lv - > this_glv - > historical - > name : lv - > name ) ;
2007-08-20 21:04:53 +04:00
return 0 ;
}
2014-11-13 15:09:07 +03:00
if ( is_last_pool & & vg - > pool_metadata_spare_lv ) {
/* When removed last pool, also remove the spare */
dm_list_iterate_items ( lvl , & vg - > lvs )
if ( lv_is_pool_metadata ( lvl - > lv ) ) {
is_last_pool = 0 ;
break ;
}
if ( is_last_pool ) {
/* This is purely internal LV volume, no question */
if ( ! deactivate_lv ( cmd , vg - > pool_metadata_spare_lv ) ) {
log_error ( " Unable to deactivate spare logical volume %s. " ,
display_lvname ( vg - > pool_metadata_spare_lv ) ) ;
return 0 ;
}
if ( ! lv_remove ( vg - > pool_metadata_spare_lv ) )
return_0 ;
}
}
2011-12-20 04:02:18 +04:00
/*
* Old format1 code : If no snapshots left reload without - real .
*/
if ( format1_origin & & ! lv_is_origin ( format1_origin ) ) {
log_warn ( " WARNING: Support for snapshots with old LVM1-style metadata is deprecated. " ) ;
log_warn ( " WARNING: Please use lvconvert to update to lvm2 metadata at your convenience. " ) ;
format1_reload_required = 1 ;
}
2007-08-20 21:04:53 +04:00
/* store it on disks */
2013-06-10 19:31:13 +04:00
if ( ! vg_write ( vg ) | | ! vg_commit ( vg ) )
2010-01-21 00:53:10 +03:00
return_0 ;
2011-12-20 04:02:18 +04:00
/* format1 */
2013-06-10 19:31:13 +04:00
if ( format1_reload_required ) {
if ( ! suspend_lv ( cmd , format1_origin ) )
log_error ( " Failed to refresh %s without snapshot. " , format1_origin - > name ) ;
2011-12-20 04:02:18 +04:00
2013-06-10 19:31:13 +04:00
if ( ! resume_lv ( cmd , format1_origin ) ) {
log_error ( " Failed to resume %s. " , format1_origin - > name ) ;
return 0 ;
}
2011-12-20 04:02:18 +04:00
}
2011-06-14 02:28:04 +04:00
2012-01-25 15:27:42 +04:00
/* Release unneeded blocks in thin pool */
/* TODO: defer when multiple LVs relased at once */
if ( pool_lv & & ! update_pool_lv ( pool_lv , 1 ) ) {
2014-08-26 13:53:56 +04:00
if ( force < DONT_PROMPT_OVERRIDE ) {
log_error ( " Failed to update pool %s. " , display_lvname ( pool_lv ) ) ;
return 0 ;
}
log_print_unless_silent ( " Ignoring update failure of pool %s. " ,
display_lvname ( pool_lv ) ) ;
2012-01-25 15:27:42 +04:00
}
2010-01-13 04:52:58 +03:00
backup ( vg ) ;
2015-03-05 23:00:44 +03:00
lockd_lv ( cmd , lock_lv , " un " , LDLV_PERSISTENT ) ;
lockd_free_lv ( cmd , vg , lv - > name , & lv - > lvid . id [ 1 ] , lv - > lock_args ) ;
2016-03-01 17:27:08 +03:00
if ( ! suppress_remove_message & & ( visible | | historical ) )
log_print_unless_silent ( " %sogical volume \" %s \" successfully removed " ,
historical ? " Historical l " : " L " ,
historical ? lv - > this_glv - > historical - > name : lv - > name ) ;
2009-05-27 22:19:21 +04:00
2007-08-20 21:04:53 +04:00
return 1 ;
}
2007-12-20 18:42:55 +03:00
2013-02-20 19:13:17 +04:00
static int _lv_remove_segs_using_this_lv ( struct cmd_context * cmd , struct logical_volume * lv ,
const force_t force , unsigned level ,
const char * lv_type )
{
struct seg_list * sl ;
if ( ( force = = PROMPT ) & &
yes_no_prompt ( " Removing %s \" %s \" will remove %u dependent volume(s). "
" Proceed? [y/n]: " , lv_type , lv - > name ,
dm_list_size ( & lv - > segs_using_this_lv ) ) = = ' n ' ) {
log_error ( " Logical volume \" %s \" not removed. " , lv - > name ) ;
return 0 ;
}
/*
* Not using _safe iterator here - since we may delete whole subtree
* ( similar as process_each_lv_in_vg ( ) )
* the code is roughly equivalent to this :
*
* while ( ! dm_list_empty ( & lv - > segs_using_this_lv ) )
* dm_list_iterate_items ( sl , & lv - > segs_using_this_lv )
* break ;
*/
dm_list_iterate_items ( sl , & lv - > segs_using_this_lv )
if ( ! lv_remove_with_dependencies ( cmd , sl - > seg - > lv ,
force , level + 1 ) )
return_0 ;
return 1 ;
}
2008-08-05 16:05:26 +04:00
/*
* remove LVs with its dependencies - LV leaf nodes should be removed first
*/
int lv_remove_with_dependencies ( struct cmd_context * cmd , struct logical_volume * lv ,
2010-04-23 23:27:10 +04:00
const force_t force , unsigned level )
2008-08-05 16:05:26 +04:00
{
2014-06-09 14:08:27 +04:00
dm_percent_t snap_percent ;
2008-11-04 01:14:30 +03:00
struct dm_list * snh , * snht ;
2012-01-21 02:03:48 +04:00
struct lvinfo info ;
2014-11-08 15:53:13 +03:00
struct lv_list * lvl ;
2013-10-12 00:42:34 +04:00
struct logical_volume * origin ;
2008-08-05 16:05:26 +04:00
2010-04-23 23:27:10 +04:00
if ( lv_is_cow ( lv ) ) {
2012-01-21 02:03:48 +04:00
/*
* A merging snapshot cannot be removed directly unless
2012-01-21 02:04:16 +04:00
* it has been invalidated or failed merge removal is requested .
2012-01-21 02:03:48 +04:00
*/
2010-04-23 23:27:10 +04:00
if ( lv_is_merging_cow ( lv ) & & ! level ) {
2012-01-21 02:03:48 +04:00
if ( lv_info ( lv - > vg - > cmd , lv , 0 , & info , 1 , 0 ) & &
info . exists & & info . live_table ) {
2012-02-13 01:37:03 +04:00
if ( ! lv_snapshot_percent ( lv , & snap_percent ) ) {
2016-06-23 14:04:37 +03:00
log_error ( " Failed to obtain merging snapshot progress "
" percentage for logical volume %s. " ,
display_lvname ( lv ) ) ;
2012-02-13 01:37:03 +04:00
return 0 ;
}
2017-07-19 17:16:12 +03:00
2014-06-09 14:08:27 +04:00
if ( ( snap_percent ! = DM_PERCENT_INVALID ) & &
( snap_percent ! = LVM_PERCENT_MERGE_FAILED ) ) {
2016-06-23 14:04:37 +03:00
log_error ( " Can't remove merging snapshot logical volume %s. " ,
display_lvname ( lv ) ) ;
2012-01-21 02:03:48 +04:00
return 0 ;
2017-07-19 17:16:12 +03:00
}
if ( ( snap_percent = = LVM_PERCENT_MERGE_FAILED ) & &
( force = = PROMPT ) & &
yes_no_prompt ( " Removing snapshot %s that failed to merge "
" may leave origin %s inconsistent. Proceed? [y/n]: " ,
display_lvname ( lv ) ,
display_lvname ( origin_from_cow ( lv ) ) ) = = ' n ' )
goto no_remove ;
2012-01-21 02:03:48 +04:00
}
2013-10-12 00:42:34 +04:00
} else if ( ! level & & lv_is_virtual_origin ( origin = origin_from_cow ( lv ) ) )
/* If this is a sparse device, remove its origin too. */
/* Stacking is not supported */
lv = origin ;
2010-04-23 23:27:10 +04:00
}
if ( lv_is_origin ( lv ) ) {
2011-11-18 23:25:20 +04:00
/* Remove snapshot LVs first */
if ( ( force = = PROMPT ) & &
/* Active snapshot already needs to confirm each active LV */
2017-02-22 17:12:54 +03:00
( yes_no_prompt ( " Do you really want to remove%s "
" %sorigin logical volume %s with %u snapshot(s)? [y/n]: " ,
lv_is_active ( lv ) ? " active " : " " ,
vg_is_clustered ( lv - > vg ) ? " clustered " : " " ,
display_lvname ( lv ) ,
lv - > origin_count ) = = ' n ' ) )
2013-11-28 14:35:53 +04:00
goto no_remove ;
2011-11-18 23:25:20 +04:00
2017-02-22 17:12:54 +03:00
if ( ! deactivate_lv ( cmd , lv ) ) {
stack ;
goto no_remove ;
}
log_verbose ( " Removing origin logical volume %s with %u snapshots(s). " ,
display_lvname ( lv ) , lv - > origin_count ) ;
2011-11-13 02:53:23 +04:00
dm_list_iterate_safe ( snh , snht , & lv - > snapshot_segs )
2008-11-04 01:14:30 +03:00
if ( ! lv_remove_with_dependencies ( cmd , dm_list_struct_base ( snh , struct lv_segment ,
2010-04-23 23:27:10 +04:00
origin_list ) - > cow ,
force , level + 1 ) )
2011-11-13 02:53:23 +04:00
return_0 ;
2008-08-05 16:05:26 +04:00
}
2013-11-30 00:21:00 +04:00
if ( lv_is_merging_origin ( lv ) ) {
if ( ! deactivate_lv ( cmd , lv ) ) {
2016-06-23 14:04:37 +03:00
log_error ( " Unable to fully deactivate merging origin %s. " ,
display_lvname ( lv ) ) ;
2013-11-30 00:21:00 +04:00
return 0 ;
}
if ( ! lv_remove_with_dependencies ( cmd , find_snapshot ( lv ) - > lv ,
force , level + 1 ) ) {
2016-06-23 14:04:37 +03:00
log_error ( " Unable to remove merging origin %s. " ,
display_lvname ( lv ) ) ;
2013-11-30 00:21:00 +04:00
return 0 ;
}
}
if ( ! level & & lv_is_merging_thin_snapshot ( lv ) ) {
/* Merged snapshot LV is no longer available for the user */
2016-06-23 14:04:37 +03:00
log_error ( " Unable to remove %s, volume is merged to %s. " ,
display_lvname ( lv ) , display_lvname ( first_seg ( lv ) - > merge_lv ) ) ;
2013-11-30 00:21:00 +04:00
return 0 ;
}
2013-02-20 19:14:24 +04:00
if ( lv_is_external_origin ( lv ) & &
! _lv_remove_segs_using_this_lv ( cmd , lv , force , level , " external origin " ) )
return_0 ;
2013-02-20 19:13:17 +04:00
if ( lv_is_used_thin_pool ( lv ) & &
! _lv_remove_segs_using_this_lv ( cmd , lv , force , level , " pool " ) )
return_0 ;
2014-08-26 13:39:51 +04:00
2017-11-11 02:19:04 +03:00
if ( lv_is_cache_pool ( lv ) & & ! lv_is_used_cache_pool ( lv ) ) {
if ( ! deactivate_lv ( cmd , first_seg ( lv ) - > metadata_lv ) | |
! deactivate_lv ( cmd , seg_lv ( first_seg ( lv ) , 0 ) ) ) {
log_error ( " Unable to fully deactivate unused cache-pool %s. " ,
display_lvname ( lv ) ) ;
return 0 ;
}
}
2013-06-17 21:54:15 +04:00
if ( lv_is_pool_metadata_spare ( lv ) & &
2014-11-08 15:53:13 +03:00
( force = = PROMPT ) ) {
dm_list_iterate_items ( lvl , & lv - > vg - > lvs )
if ( lv_is_pool_metadata ( lvl - > lv ) ) {
2016-06-23 14:04:37 +03:00
if ( yes_no_prompt ( " Removal of pool metadata spare logical volume "
" %s disables automatic recovery attempts "
" after damage to a thin or cache pool. "
" Proceed? [y/n]: " , display_lvname ( lv ) ) = = ' n ' )
2014-11-08 15:53:13 +03:00
goto no_remove ;
break ;
}
}
2013-06-17 21:54:15 +04:00
2013-11-29 20:00:55 +04:00
return lv_remove_single ( cmd , lv , force , 0 ) ;
2013-11-12 18:08:35 +04:00
no_remove :
2016-06-23 14:04:37 +03:00
log_error ( " Logical volume %s not removed. " , display_lvname ( lv ) ) ;
2013-11-12 18:08:35 +04:00
return 0 ;
2008-08-05 16:05:26 +04:00
}
2014-09-09 20:47:27 +04:00
static int _lv_update_and_reload ( struct logical_volume * lv , int origin_only )
{
struct volume_group * vg = lv - > vg ;
int do_backup = 0 , r = 0 ;
2014-09-22 15:57:47 +04:00
const struct logical_volume * lock_lv = lv_lock_holder ( lv ) ;
2014-09-09 20:47:27 +04:00
2016-09-19 14:48:54 +03:00
log_very_verbose ( " Updating logical volume %s on disk(s)%s. " ,
display_lvname ( lock_lv ) , origin_only ? " (origin only) " : " " ) ;
2014-09-09 20:47:27 +04:00
if ( ! vg_write ( vg ) )
return_0 ;
2017-06-22 18:14:47 +03:00
if ( origin_only & & ( lock_lv ! = lv ) ) {
2017-06-20 18:11:42 +03:00
log_debug_activation ( " Dropping origin_only for %s as lock holds %s " ,
display_lvname ( lv ) , display_lvname ( lock_lv ) ) ;
origin_only = 0 ;
}
2014-09-22 15:57:47 +04:00
if ( ! ( origin_only ? suspend_lv_origin ( vg - > cmd , lock_lv ) : suspend_lv ( vg - > cmd , lock_lv ) ) ) {
2014-09-09 20:47:27 +04:00
log_error ( " Failed to lock logical volume %s. " ,
2014-09-22 15:57:47 +04:00
display_lvname ( lock_lv ) ) ;
2014-09-09 20:47:27 +04:00
vg_revert ( vg ) ;
} else if ( ! ( r = vg_commit ( vg ) ) )
stack ; /* !vg_commit() has implict vg_revert() */
else
do_backup = 1 ;
log_very_verbose ( " Updating logical volume %s in kernel. " ,
2014-09-22 15:57:47 +04:00
display_lvname ( lock_lv ) ) ;
2014-09-09 20:47:27 +04:00
2014-09-22 15:57:47 +04:00
if ( ! ( origin_only ? resume_lv_origin ( vg - > cmd , lock_lv ) : resume_lv ( vg - > cmd , lock_lv ) ) ) {
2014-09-09 20:47:27 +04:00
log_error ( " Problem reactivating logical volume %s. " ,
2014-09-22 15:57:47 +04:00
display_lvname ( lock_lv ) ) ;
2014-09-09 20:47:27 +04:00
r = 0 ;
}
2017-11-10 15:28:33 +03:00
if ( do_backup & & ! critical_section ( ) )
2014-09-09 20:47:27 +04:00
backup ( vg ) ;
return r ;
}
int lv_update_and_reload ( struct logical_volume * lv )
{
2016-12-14 13:34:28 +03:00
return _lv_update_and_reload ( lv , 0 ) ;
2014-09-09 20:47:27 +04:00
}
int lv_update_and_reload_origin ( struct logical_volume * lv )
{
2016-12-14 13:34:28 +03:00
return _lv_update_and_reload ( lv , 1 ) ;
2014-09-09 20:47:27 +04:00
}
2007-12-20 18:42:55 +03:00
/*
* insert_layer_for_segments_on_pv ( ) inserts a layer segment for a segment area .
* However , layer modification could split the underlying layer segment .
* This function splits the parent area according to keep the 1 : 1 relationship
* between the parent area and the underlying layer segment .
* Since the layer LV might have other layers below , build_parallel_areas ( )
* is used to find the lowest - level segment boundaries .
*/
static int _split_parent_area ( struct lv_segment * seg , uint32_t s ,
2008-11-04 01:14:30 +03:00
struct dm_list * layer_seg_pvs )
2007-12-20 18:42:55 +03:00
{
uint32_t parent_area_len , parent_le , layer_le ;
uint32_t area_multiple ;
struct seg_pvs * spvs ;
if ( seg_is_striped ( seg ) )
area_multiple = seg - > area_count ;
else
area_multiple = 1 ;
parent_area_len = seg - > area_len ;
parent_le = seg - > le ;
layer_le = seg_le ( seg , s ) ;
while ( parent_area_len > 0 ) {
/* Find the layer segment pointed at */
if ( ! ( spvs = _find_seg_pvs_by_le ( layer_seg_pvs , layer_le ) ) ) {
2016-06-23 14:04:37 +03:00
log_error ( " layer segment for %s: " FMTu32 " not found. " ,
display_lvname ( seg - > lv ) , parent_le ) ;
2007-12-20 18:42:55 +03:00
return 0 ;
}
if ( spvs - > le ! = layer_le ) {
log_error ( " Incompatible layer boundary: "
2016-06-23 14:04:37 +03:00
" %s: " FMTu32 " [ " FMTu32 " ] on %s: " FMTu32 " . " ,
display_lvname ( seg - > lv ) , parent_le , s ,
display_lvname ( seg_lv ( seg , s ) ) , layer_le ) ;
2007-12-20 18:42:55 +03:00
return 0 ;
}
if ( spvs - > len < parent_area_len ) {
parent_le + = spvs - > len * area_multiple ;
if ( ! lv_split_segment ( seg - > lv , parent_le ) )
return_0 ;
}
parent_area_len - = spvs - > len ;
layer_le + = spvs - > len ;
}
return 1 ;
}
/*
* Split the parent LV segments if the layer LV below it is splitted .
*/
int split_parent_segments_for_layer ( struct cmd_context * cmd ,
struct logical_volume * layer_lv )
{
struct lv_list * lvl ;
struct logical_volume * parent_lv ;
struct lv_segment * seg ;
uint32_t s ;
2008-11-04 01:14:30 +03:00
struct dm_list * parallel_areas ;
2007-12-20 18:42:55 +03:00
2014-06-26 06:20:41 +04:00
if ( ! ( parallel_areas = build_parallel_areas_from_lv ( layer_lv , 0 , 0 ) ) )
2007-12-20 18:42:55 +03:00
return_0 ;
/* Loop through all LVs except itself */
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( lvl , & layer_lv - > vg - > lvs ) {
2007-12-20 18:42:55 +03:00
parent_lv = lvl - > lv ;
if ( parent_lv = = layer_lv )
continue ;
/* Find all segments that point at the layer LV */
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( seg , & parent_lv - > segments ) {
2007-12-20 18:42:55 +03:00
for ( s = 0 ; s < seg - > area_count ; s + + ) {
if ( seg_type ( seg , s ) ! = AREA_LV | |
seg_lv ( seg , s ) ! = layer_lv )
continue ;
if ( ! _split_parent_area ( seg , s , parallel_areas ) )
return_0 ;
}
}
}
return 1 ;
}
/* Remove a layer from the LV */
int remove_layers_for_segments ( struct cmd_context * cmd ,
struct logical_volume * lv ,
struct logical_volume * layer_lv ,
2009-11-25 01:55:55 +03:00
uint64_t status_mask , struct dm_list * lvs_changed )
2007-12-20 18:42:55 +03:00
{
struct lv_segment * seg , * lseg ;
uint32_t s ;
int lv_changed = 0 ;
struct lv_list * lvl ;
2008-01-19 01:00:46 +03:00
log_very_verbose ( " Removing layer %s for segments of %s " ,
layer_lv - > name , lv - > name ) ;
2007-12-20 18:42:55 +03:00
/* Find all segments that point at the temporary mirror */
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( seg , & lv - > segments ) {
2007-12-20 18:42:55 +03:00
for ( s = 0 ; s < seg - > area_count ; s + + ) {
if ( seg_type ( seg , s ) ! = AREA_LV | |
seg_lv ( seg , s ) ! = layer_lv )
continue ;
/* Find the layer segment pointed at */
if ( ! ( lseg = find_seg_by_le ( layer_lv , seg_le ( seg , s ) ) ) ) {
log_error ( " Layer segment found: %s:% " PRIu32 ,
layer_lv - > name , seg_le ( seg , s ) ) ;
return 0 ;
}
/* Check the segment params are compatible */
if ( ! seg_is_striped ( lseg ) | | lseg - > area_count ! = 1 ) {
log_error ( " Layer is not linear: %s:% " PRIu32 ,
layer_lv - > name , lseg - > le ) ;
return 0 ;
}
2014-06-20 14:41:20 +04:00
if ( ( lseg - > status & status_mask ) ! = status_mask ) {
2007-12-20 18:42:55 +03:00
log_error ( " Layer status does not match: "
2009-11-25 01:55:55 +03:00
" %s:% " PRIu32 " status: 0x% " PRIx64 " /0x% " PRIx64 ,
2007-12-20 18:42:55 +03:00
layer_lv - > name , lseg - > le ,
lseg - > status , status_mask ) ;
return 0 ;
}
if ( lseg - > le ! = seg_le ( seg , s ) | |
lseg - > area_len ! = seg - > area_len ) {
log_error ( " Layer boundary mismatch: "
" %s:% " PRIu32 " -% " PRIu32 " on "
" %s:% " PRIu32 " / "
2015-07-06 17:09:17 +03:00
FMTu32 " - " FMTu32 " / " ,
2007-12-20 18:42:55 +03:00
lv - > name , seg - > le , seg - > area_len ,
layer_lv - > name , seg_le ( seg , s ) ,
lseg - > le , lseg - > area_len ) ;
return 0 ;
}
if ( ! move_lv_segment_area ( seg , s , lseg , 0 ) )
return_0 ;
/* Replace mirror with error segment */
if ( ! ( lseg - > segtype =
2015-09-22 21:04:12 +03:00
get_segtype_from_string ( lv - > vg - > cmd , SEG_TYPE_NAME_ERROR ) ) ) {
2007-12-20 18:42:55 +03:00
log_error ( " Missing error segtype " ) ;
return 0 ;
}
lseg - > area_count = 0 ;
/* First time, add LV to list of LVs affected */
if ( ! lv_changed & & lvs_changed ) {
if ( ! ( lvl = dm_pool_alloc ( cmd - > mem , sizeof ( * lvl ) ) ) ) {
log_error ( " lv_list alloc failed " ) ;
return 0 ;
}
lvl - > lv = lv ;
2008-11-04 01:14:30 +03:00
dm_list_add ( lvs_changed , & lvl - > list ) ;
2007-12-20 18:42:55 +03:00
lv_changed = 1 ;
}
}
}
if ( lv_changed & & ! lv_merge_segments ( lv ) )
stack ;
return 1 ;
}
/* Remove a layer */
int remove_layers_for_segments_all ( struct cmd_context * cmd ,
struct logical_volume * layer_lv ,
2009-11-25 01:55:55 +03:00
uint64_t status_mask ,
2008-11-04 01:14:30 +03:00
struct dm_list * lvs_changed )
2007-12-20 18:42:55 +03:00
{
struct lv_list * lvl ;
struct logical_volume * lv1 ;
/* Loop through all LVs except the temporary mirror */
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( lvl , & layer_lv - > vg - > lvs ) {
2007-12-20 18:42:55 +03:00
lv1 = lvl - > lv ;
if ( lv1 = = layer_lv )
continue ;
if ( ! remove_layers_for_segments ( cmd , lv1 , layer_lv ,
status_mask , lvs_changed ) )
2008-01-17 20:17:09 +03:00
return_0 ;
2007-12-20 18:42:55 +03:00
}
if ( ! lv_empty ( layer_lv ) )
return_0 ;
return 1 ;
}
2011-10-22 20:42:10 +04:00
int move_lv_segments ( struct logical_volume * lv_to ,
struct logical_volume * lv_from ,
uint64_t set_status , uint64_t reset_status )
2007-12-20 18:42:55 +03:00
{
2015-09-10 16:08:29 +03:00
const uint64_t MOVE_BITS = ( RAID | MIRROR | THIN_VOLUME ) ;
2007-12-20 18:42:55 +03:00
struct lv_segment * seg ;
2011-10-22 20:42:10 +04:00
dm_list_iterate_items ( seg , & lv_to - > segments )
2007-12-21 02:12:27 +03:00
if ( seg - > origin ) {
2011-10-22 20:42:10 +04:00
log_error ( " Can't move snapshot segment. " ) ;
2007-12-21 02:12:27 +03:00
return 0 ;
}
2011-10-22 20:42:10 +04:00
dm_list_init ( & lv_to - > segments ) ;
dm_list_splice ( & lv_to - > segments , & lv_from - > segments ) ;
2007-12-20 18:42:55 +03:00
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( seg , & lv_to - > segments ) {
2007-12-20 18:42:55 +03:00
seg - > lv = lv_to ;
seg - > status & = ~ reset_status ;
seg - > status | = set_status ;
}
2015-09-10 16:08:29 +03:00
/*
* Move LV status bits for selected types with their segments
* i . e . when inserting layer to cache LV , we move raid segments
* to a new place , thus ' raid ' LV property now belongs to this LV .
*
* Bits should match to those which appears after read from disk .
*/
lv_to - > status | = lv_from - > status & MOVE_BITS ;
lv_from - > status & = ~ MOVE_BITS ;
2007-12-20 18:42:55 +03:00
lv_to - > le_count = lv_from - > le_count ;
lv_to - > size = lv_from - > size ;
lv_from - > le_count = 0 ;
lv_from - > size = 0 ;
2007-12-21 02:12:27 +03:00
return 1 ;
2007-12-20 18:42:55 +03:00
}
/* Remove a layer from the LV */
2007-12-20 21:55:46 +03:00
int remove_layer_from_lv ( struct logical_volume * lv ,
struct logical_volume * layer_lv )
2007-12-20 18:42:55 +03:00
{
2014-11-09 21:48:39 +03:00
static const char _suffixes [ ] [ 8 ] = { " _tdata " , " _cdata " , " _corig " } ;
struct logical_volume * parent_lv ;
2008-01-16 22:00:59 +03:00
struct lv_segment * parent_seg ;
2007-12-20 21:55:46 +03:00
struct segment_type * segtype ;
2014-11-09 21:48:39 +03:00
struct lv_names lv_names ;
2016-02-23 14:18:48 +03:00
unsigned r ;
2007-12-20 21:55:46 +03:00
2008-01-19 01:00:46 +03:00
log_very_verbose ( " Removing layer %s for %s " , layer_lv - > name , lv - > name ) ;
2008-01-16 22:00:59 +03:00
if ( ! ( parent_seg = get_only_segment_using_this_lv ( layer_lv ) ) ) {
2007-12-20 21:55:46 +03:00
log_error ( " Failed to find layer %s in %s " ,
2014-04-01 22:11:50 +04:00
layer_lv - > name , lv - > name ) ;
2007-12-20 21:55:46 +03:00
return 0 ;
}
2014-11-09 21:48:39 +03:00
parent_lv = parent_seg - > lv ;
if ( parent_lv ! = lv ) {
2014-04-01 22:11:50 +04:00
log_error ( INTERNAL_ERROR " Wrong layer %s in %s " ,
layer_lv - > name , lv - > name ) ;
return 0 ;
}
2007-12-20 18:42:55 +03:00
/*
* Before removal , the layer should be cleaned up ,
* i . e . additional segments and areas should have been removed .
*/
2017-01-06 14:41:38 +03:00
/* FIXME:
* These are all INTERNAL_ERROR , but ATM there is
* some internal API problem and this code is wrongle
* executed with certain mirror manipulations .
* So we need to fix mirror code first , then switch . . .
*/
if ( dm_list_size ( & parent_lv - > segments ) ! = 1 ) {
log_error ( " Invalid %d segments in %s, expected only 1. " ,
dm_list_size ( & parent_lv - > segments ) ,
display_lvname ( parent_lv ) ) ;
return 0 ;
}
if ( parent_seg - > area_count ! = 1 ) {
log_error ( " Invalid %d area count(s) in %s, expected only 1. " ,
parent_seg - > area_count , display_lvname ( parent_lv ) ) ;
return 0 ;
}
if ( seg_type ( parent_seg , 0 ) ! = AREA_LV ) {
log_error ( " Invalid seg_type %d in %s, expected LV. " ,
seg_type ( parent_seg , 0 ) , display_lvname ( parent_lv ) ) ;
return 0 ;
}
if ( layer_lv ! = seg_lv ( parent_seg , 0 ) ) {
log_error ( " Layer doesn't match segment in %s. " ,
display_lvname ( parent_lv ) ) ;
return 0 ;
}
if ( parent_lv - > le_count ! = layer_lv - > le_count ) {
log_error ( " Inconsistent extent count (%u != %u) of layer %s. " ,
parent_lv - > le_count , layer_lv - > le_count ,
display_lvname ( parent_lv ) ) ;
2017-01-05 17:49:07 +03:00
return 0 ;
}
2007-12-20 21:55:46 +03:00
2014-11-09 21:48:39 +03:00
if ( ! lv_empty ( parent_lv ) )
2008-02-22 16:22:21 +03:00
return_0 ;
2014-11-09 21:48:39 +03:00
if ( ! move_lv_segments ( parent_lv , layer_lv , 0 , 0 ) )
2007-12-21 02:12:27 +03:00
return_0 ;
2007-12-20 18:42:55 +03:00
2007-12-20 21:55:46 +03:00
/* Replace the empty layer with error segment */
2015-09-22 21:04:12 +03:00
if ( ! ( segtype = get_segtype_from_string ( lv - > vg - > cmd , SEG_TYPE_NAME_ERROR ) ) )
2014-10-18 13:01:29 +04:00
return_0 ;
2014-11-09 21:48:39 +03:00
if ( ! lv_add_virtual_segment ( layer_lv , 0 , parent_lv - > le_count , segtype ) )
2007-12-20 21:55:46 +03:00
return_0 ;
2007-12-20 18:42:55 +03:00
2014-11-09 21:48:39 +03:00
/*
* recuresively rename sub LVs
* currently supported only for thin data layer
* FIXME : without strcmp it breaks mirrors . . . .
*/
2014-11-11 00:32:43 +03:00
if ( ! strstr ( layer_lv - > name , " _mimage " ) )
for ( r = 0 ; r < DM_ARRAY_SIZE ( _suffixes ) ; + + r )
if ( strstr ( layer_lv - > name , _suffixes [ r ] ) = = 0 ) {
lv_names . old = layer_lv - > name ;
lv_names . new = parent_lv - > name ;
if ( ! for_each_sub_lv ( parent_lv , _rename_cb , ( void * ) & lv_names ) )
return_0 ;
break ;
}
2014-11-09 21:48:39 +03:00
2007-12-20 18:42:55 +03:00
return 1 ;
}
/*
* Create and insert a linear LV " above " lv_where .
* After the insertion , a new LV named lv_where - > name + suffix is created
* and all segments of lv_where is moved to the new LV .
* lv_where will have a single segment which maps linearly to the new LV .
*/
struct logical_volume * insert_layer_for_lv ( struct cmd_context * cmd ,
struct logical_volume * lv_where ,
2009-11-25 01:55:55 +03:00
uint64_t status ,
2007-12-20 18:42:55 +03:00
const char * layer_suffix )
{
2014-10-18 13:01:29 +04:00
static const char _suffixes [ ] [ 8 ] = { " _tdata " , " _cdata " , " _corig " } ;
2011-06-23 18:00:58 +04:00
int r ;
2014-10-24 16:22:13 +04:00
char name [ NAME_LEN ] ;
2014-05-29 11:41:03 +04:00
struct dm_str_list * sl ;
2012-05-05 06:08:46 +04:00
struct logical_volume * layer_lv ;
2007-12-20 18:42:55 +03:00
struct segment_type * segtype ;
struct lv_segment * mapseg ;
2012-09-06 17:21:18 +04:00
struct lv_names lv_names ;
2016-02-23 14:18:48 +03:00
unsigned exclusive = 0 , i ;
2007-12-20 18:42:55 +03:00
/* create an empty layer LV */
2014-10-24 16:22:13 +04:00
if ( dm_snprintf ( name , sizeof ( name ) , " %s%s " , lv_where - > name , layer_suffix ) < 0 ) {
log_error ( " Layered name is too long. Please use shorter LV name. " ) ;
2007-12-20 18:42:55 +03:00
return NULL ;
}
2014-10-23 16:27:05 +04:00
if ( ! ( layer_lv = lv_create_empty ( name , NULL ,
/* Preserve read-only flag */
LVM_READ | ( lv_where - > status & LVM_WRITE ) ,
2009-05-14 01:28:31 +04:00
ALLOC_INHERIT , lv_where - > vg ) ) ) {
2007-12-20 18:42:55 +03:00
log_error ( " Creation of layer LV failed " ) ;
return NULL ;
}
2011-06-23 18:00:58 +04:00
if ( lv_is_active_exclusive_locally ( lv_where ) )
exclusive = 1 ;
2016-11-23 12:39:59 +03:00
if ( lv_is_active ( lv_where ) & & strstr ( name , MIRROR_SYNC_LAYER ) ) {
2008-10-23 15:21:04 +04:00
log_very_verbose ( " Creating transient LV %s for mirror conversion in VG %s. " , name , lv_where - > vg - > name ) ;
2008-10-17 14:57:15 +04:00
2015-09-22 21:04:12 +03:00
segtype = get_segtype_from_string ( cmd , SEG_TYPE_NAME_ERROR ) ;
2008-10-17 14:57:15 +04:00
2014-10-26 10:13:59 +03:00
if ( ! lv_add_virtual_segment ( layer_lv , 0 , lv_where - > le_count , segtype ) ) {
2008-10-23 15:21:04 +04:00
log_error ( " Creation of transient LV %s for mirror conversion in VG %s failed. " , name , lv_where - > vg - > name ) ;
2008-10-17 14:57:15 +04:00
return NULL ;
}
2012-05-05 06:08:46 +04:00
/* Temporary tags for activation of the transient LV */
dm_list_iterate_items ( sl , & lv_where - > tags )
if ( ! str_list_add ( cmd - > mem , & layer_lv - > tags , sl - > str ) ) {
log_error ( " Aborting. Unable to tag "
" transient mirror layer. " ) ;
return NULL ;
}
2008-10-17 14:57:15 +04:00
if ( ! vg_write ( lv_where - > vg ) ) {
2008-10-23 15:21:04 +04:00
log_error ( " Failed to write intermediate VG %s metadata for mirror conversion. " , lv_where - > vg - > name ) ;
2008-10-17 14:57:15 +04:00
return NULL ;
}
if ( ! vg_commit ( lv_where - > vg ) ) {
2008-10-23 15:21:04 +04:00
log_error ( " Failed to commit intermediate VG %s metadata for mirror conversion. " , lv_where - > vg - > name ) ;
2008-10-17 14:57:15 +04:00
return NULL ;
}
2011-06-23 18:00:58 +04:00
if ( exclusive )
r = activate_lv_excl ( cmd , layer_lv ) ;
else
r = activate_lv ( cmd , layer_lv ) ;
if ( ! r ) {
log_error ( " Failed to resume transient LV "
" %s for mirror conversion in VG %s. " ,
name , lv_where - > vg - > name ) ;
2008-10-17 14:57:15 +04:00
return NULL ;
}
2012-05-05 06:08:46 +04:00
/* Remove the temporary tags */
dm_list_iterate_items ( sl , & lv_where - > tags )
str_list_del ( & layer_lv - > tags , sl - > str ) ;
2008-10-17 14:57:15 +04:00
}
2007-12-20 18:42:55 +03:00
log_very_verbose ( " Inserting layer %s for %s " ,
layer_lv - > name , lv_where - > name ) ;
2011-10-22 20:42:10 +04:00
if ( ! move_lv_segments ( layer_lv , lv_where , 0 , 0 ) )
2007-12-21 02:12:27 +03:00
return_NULL ;
2007-12-20 18:42:55 +03:00
2015-09-22 21:04:12 +03:00
if ( ! ( segtype = get_segtype_from_string ( cmd , SEG_TYPE_NAME_STRIPED ) ) )
2008-10-17 14:57:15 +04:00
return_NULL ;
2007-12-20 18:42:55 +03:00
/* allocate a new linear segment */
2017-02-24 02:50:00 +03:00
if ( ! ( mapseg = alloc_lv_segment ( segtype , lv_where , 0 , layer_lv - > le_count , 0 ,
status , 0 , NULL , 1 , layer_lv - > le_count , 0 ,
2010-04-08 04:28:57 +04:00
0 , 0 , 0 , NULL ) ) )
2007-12-20 18:42:55 +03:00
return_NULL ;
/* map the new segment to the original underlying are */
2008-01-16 22:00:59 +03:00
if ( ! set_lv_segment_area_lv ( mapseg , 0 , layer_lv , 0 , 0 ) )
return_NULL ;
2007-12-20 18:42:55 +03:00
/* add the new segment to the layer LV */
2008-11-04 01:14:30 +03:00
dm_list_add ( & lv_where - > segments , & mapseg - > list ) ;
2007-12-20 18:42:55 +03:00
lv_where - > le_count = layer_lv - > le_count ;
2012-03-05 19:05:24 +04:00
lv_where - > size = ( uint64_t ) lv_where - > le_count * lv_where - > vg - > extent_size ;
2007-12-20 18:42:55 +03:00
2012-09-06 17:21:18 +04:00
/*
* recuresively rename sub LVs
* currently supported only for thin data layer
* FIXME : without strcmp it breaks mirrors . . . .
*/
2016-02-23 14:18:48 +03:00
for ( i = 0 ; i < DM_ARRAY_SIZE ( _suffixes ) ; + + i )
if ( strcmp ( layer_suffix , _suffixes [ i ] ) = = 0 ) {
2014-06-18 16:58:09 +04:00
lv_names . old = lv_where - > name ;
lv_names . new = layer_lv - > name ;
if ( ! for_each_sub_lv ( layer_lv , _rename_cb , ( void * ) & lv_names ) )
2014-10-18 13:01:29 +04:00
return_NULL ;
2014-06-18 16:58:09 +04:00
break ;
}
2012-09-06 17:21:18 +04:00
2007-12-20 18:42:55 +03:00
return layer_lv ;
}
/*
* Extend and insert a linear layer LV beneath the source segment area .
*/
static int _extend_layer_lv_for_segment ( struct logical_volume * layer_lv ,
struct lv_segment * seg , uint32_t s ,
2009-11-25 01:55:55 +03:00
uint64_t status )
2007-12-20 18:42:55 +03:00
{
struct lv_segment * mapseg ;
struct segment_type * segtype ;
struct physical_volume * src_pv = seg_pv ( seg , s ) ;
uint32_t src_pe = seg_pe ( seg , s ) ;
if ( seg_type ( seg , s ) ! = AREA_PV & & seg_type ( seg , s ) ! = AREA_LV )
return_0 ;
2015-09-22 21:04:12 +03:00
if ( ! ( segtype = get_segtype_from_string ( layer_lv - > vg - > cmd , SEG_TYPE_NAME_STRIPED ) ) )
2007-12-20 18:42:55 +03:00
return_0 ;
/* FIXME Incomplete message? Needs more context */
log_very_verbose ( " Inserting %s:% " PRIu32 " -% " PRIu32 " of %s/%s " ,
pv_dev_name ( src_pv ) ,
src_pe , src_pe + seg - > area_len - 1 ,
seg - > lv - > vg - > name , seg - > lv - > name ) ;
/* allocate a new segment */
2011-10-23 20:02:01 +04:00
if ( ! ( mapseg = alloc_lv_segment ( segtype , layer_lv , layer_lv - > le_count ,
2017-02-24 02:50:00 +03:00
seg - > area_len , 0 , status , 0 ,
NULL , 1 , seg - > area_len , 0 , 0 , 0 , 0 , seg ) ) )
2007-12-20 18:42:55 +03:00
return_0 ;
/* map the new segment to the original underlying are */
if ( ! move_lv_segment_area ( mapseg , 0 , seg , s ) )
return_0 ;
/* add the new segment to the layer LV */
2008-11-04 01:14:30 +03:00
dm_list_add ( & layer_lv - > segments , & mapseg - > list ) ;
2007-12-20 18:42:55 +03:00
layer_lv - > le_count + = seg - > area_len ;
2012-03-05 19:05:24 +04:00
layer_lv - > size + = ( uint64_t ) seg - > area_len * layer_lv - > vg - > extent_size ;
2007-12-20 18:42:55 +03:00
/* map the original area to the new segment */
2008-01-16 22:00:59 +03:00
if ( ! set_lv_segment_area_lv ( seg , s , layer_lv , mapseg - > le , 0 ) )
return_0 ;
2007-12-20 18:42:55 +03:00
return 1 ;
}
/*
* Match the segment area to PEs in the pvl
* ( the segment area boundary should be aligned to PE ranges by
* _adjust_layer_segments ( ) so that there is no partial overlap . )
*/
static int _match_seg_area_to_pe_range ( struct lv_segment * seg , uint32_t s ,
struct pv_list * pvl )
{
struct pe_range * per ;
uint32_t pe_start , per_end ;
if ( ! pvl )
return 1 ;
if ( seg_type ( seg , s ) ! = AREA_PV | | seg_dev ( seg , s ) ! = pvl - > pv - > dev )
return 0 ;
pe_start = seg_pe ( seg , s ) ;
/* Do these PEs match to any of the PEs in pvl? */
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( per , pvl - > pe_ranges ) {
2007-12-20 18:42:55 +03:00
per_end = per - > start + per - > count - 1 ;
if ( ( pe_start < per - > start ) | | ( pe_start > per_end ) )
continue ;
/* FIXME Missing context in this message - add LV/seg details */
2013-01-08 02:30:29 +04:00
log_debug_alloc ( " Matched PE range %s:% " PRIu32 " -% " PRIu32 " against "
" %s % " PRIu32 " len % " PRIu32 , dev_name ( pvl - > pv - > dev ) ,
per - > start , per_end , dev_name ( seg_dev ( seg , s ) ) ,
seg_pe ( seg , s ) , seg - > area_len ) ;
2007-12-20 18:42:55 +03:00
return 1 ;
}
return 0 ;
}
/*
* For each segment in lv_where that uses a PV in pvl directly ,
* split the segment if it spans more than one underlying PV .
*/
static int _align_segment_boundary_to_pe_range ( struct logical_volume * lv_where ,
struct pv_list * pvl )
{
struct lv_segment * seg ;
struct pe_range * per ;
uint32_t pe_start , pe_end , per_end , stripe_multiplier , s ;
if ( ! pvl )
return 1 ;
/* Split LV segments to match PE ranges */
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( seg , & lv_where - > segments ) {
2007-12-20 18:42:55 +03:00
for ( s = 0 ; s < seg - > area_count ; s + + ) {
if ( seg_type ( seg , s ) ! = AREA_PV | |
seg_dev ( seg , s ) ! = pvl - > pv - > dev )
continue ;
/* Do these PEs match with the condition? */
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( per , pvl - > pe_ranges ) {
2007-12-20 18:42:55 +03:00
pe_start = seg_pe ( seg , s ) ;
pe_end = pe_start + seg - > area_len - 1 ;
per_end = per - > start + per - > count - 1 ;
/* No overlap? */
if ( ( pe_end < per - > start ) | |
( pe_start > per_end ) )
continue ;
if ( seg_is_striped ( seg ) )
stripe_multiplier = seg - > area_count ;
else
stripe_multiplier = 1 ;
if ( ( per - > start ! = pe_start & &
per - > start > pe_start ) & &
! lv_split_segment ( lv_where , seg - > le +
( per - > start - pe_start ) *
stripe_multiplier ) )
return_0 ;
if ( ( per_end ! = pe_end & &
per_end < pe_end ) & &
! lv_split_segment ( lv_where , seg - > le +
( per_end - pe_start + 1 ) *
stripe_multiplier ) )
return_0 ;
}
}
}
return 1 ;
}
/*
* Scan lv_where for segments on a PV in pvl , and for each one found
* append a linear segment to lv_layer and insert it between the two .
*
* If pvl is empty , a layer is placed under the whole of lv_where .
* If the layer is inserted , lv_where is added to lvs_changed .
*/
int insert_layer_for_segments_on_pv ( struct cmd_context * cmd ,
struct logical_volume * lv_where ,
struct logical_volume * layer_lv ,
2009-11-25 01:55:55 +03:00
uint64_t status ,
2007-12-20 18:42:55 +03:00
struct pv_list * pvl ,
2008-11-04 01:14:30 +03:00
struct dm_list * lvs_changed )
2007-12-20 18:42:55 +03:00
{
struct lv_segment * seg ;
struct lv_list * lvl ;
int lv_used = 0 ;
uint32_t s ;
2017-11-09 13:41:16 +03:00
struct logical_volume * holder = ( struct logical_volume * ) lv_lock_holder ( lv_where ) ;
2007-12-20 18:42:55 +03:00
2008-01-19 01:00:46 +03:00
log_very_verbose ( " Inserting layer %s for segments of %s on %s " ,
layer_lv - > name , lv_where - > name ,
pvl ? pv_dev_name ( pvl - > pv ) : " any " ) ;
2017-11-01 02:51:39 +03:00
/* Temporarily hide layer_lv from vg->lvs list
* so the lv_split_segment ( ) passes vg_validate ( )
* since here layer_lv has empty segment list */
if ( ! ( lvl = find_lv_in_vg ( lv_where - > vg , layer_lv - > name ) ) )
return_0 ;
dm_list_del ( & lvl - > list ) ;
2007-12-20 18:42:55 +03:00
if ( ! _align_segment_boundary_to_pe_range ( lv_where , pvl ) )
return_0 ;
2017-11-01 02:51:39 +03:00
/* Put back layer_lv in vg->lv */
dm_list_add ( & lv_where - > vg - > lvs , & lvl - > list ) ;
2007-12-20 18:42:55 +03:00
/* Work through all segments on the supplied PV */
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( seg , & lv_where - > segments ) {
2007-12-20 18:42:55 +03:00
for ( s = 0 ; s < seg - > area_count ; s + + ) {
if ( ! _match_seg_area_to_pe_range ( seg , s , pvl ) )
continue ;
/* First time, add LV to list of LVs affected */
if ( ! lv_used & & lvs_changed ) {
2017-11-09 13:41:16 +03:00
/* First check if LV is listed already */
dm_list_iterate_items ( lvl , lvs_changed )
if ( lvl - > lv = = holder ) {
lv_used = 1 ;
break ;
}
if ( ! lv_used ) {
if ( ! ( lvl = dm_pool_alloc ( cmd - > mem , sizeof ( * lvl ) ) ) ) {
log_error ( " lv_list alloc failed. " ) ;
return 0 ;
}
lvl - > lv = holder ;
dm_list_add ( lvs_changed , & lvl - > list ) ;
lv_used = 1 ;
2007-12-20 18:42:55 +03:00
}
}
if ( ! _extend_layer_lv_for_segment ( layer_lv , seg , s ,
status ) ) {
log_error ( " Failed to insert segment in layer "
" LV %s under %s:% " PRIu32 " -% " PRIu32 ,
layer_lv - > name , lv_where - > name ,
seg - > le , seg - > le + seg - > len ) ;
return 0 ;
}
}
}
return 1 ;
}
2007-12-21 01:37:42 +03:00
/*
* Initialize the LV with ' value ' .
*/
2013-11-28 14:22:24 +04:00
int wipe_lv ( struct logical_volume * lv , struct wipe_params wp )
2007-12-21 01:37:42 +03:00
{
struct device * dev ;
2013-11-28 14:22:24 +04:00
char name [ PATH_MAX ] ;
2013-11-06 19:16:34 +04:00
uint64_t zero_sectors ;
2007-12-21 01:37:42 +03:00
2013-11-28 14:22:24 +04:00
if ( ! wp . do_zero & & ! wp . do_wipe_signatures )
2013-11-06 19:16:34 +04:00
/* nothing to do */
return 1 ;
2015-06-30 20:54:38 +03:00
/* Wait until devices are available */
if ( ! sync_local_dev_names ( lv - > vg - > cmd ) ) {
log_error ( " Failed to sync local devices before wiping LV %s. " ,
display_lvname ( lv ) ) ;
return 0 ;
}
2014-09-19 13:06:31 +04:00
2013-11-28 14:22:24 +04:00
if ( ! lv_is_active_locally ( lv ) ) {
2013-11-01 13:28:42 +04:00
log_error ( " Volume \" %s/%s \" is not active locally. " ,
2013-11-28 14:22:24 +04:00
lv - > vg - > name , lv - > name ) ;
2013-11-01 13:28:42 +04:00
return 0 ;
}
2007-12-21 01:37:42 +03:00
/*
* FIXME :
* < clausen > also , more than 4 k
* < clausen > say , reiserfs puts it ' s superblock 32 k in , IIRC
* < ejt_ > k , I ' ll drop a fixme to that effect
* ( I know the device is at least 4 k , but not 32 k )
*/
2013-11-28 14:22:24 +04:00
if ( dm_snprintf ( name , sizeof ( name ) , " %s%s/%s " , lv - > vg - > cmd - > dev_dir ,
lv - > vg - > name , lv - > name ) < 0 ) {
log_error ( " Name too long - device not cleared (%s) " , lv - > name ) ;
2007-12-21 01:37:42 +03:00
return 0 ;
}
if ( ! ( dev = dev_cache_get ( name , NULL ) ) ) {
log_error ( " %s: not found: device not cleared " , name ) ;
return 0 ;
}
if ( ! dev_open_quiet ( dev ) )
2008-01-17 20:17:09 +03:00
return_0 ;
2007-12-21 01:37:42 +03:00
2013-11-28 14:22:24 +04:00
if ( wp . do_wipe_signatures ) {
2013-11-06 19:16:34 +04:00
log_verbose ( " Wiping known signatures on logical volume \" %s/%s \" " ,
2013-11-28 14:22:24 +04:00
lv - > vg - > name , lv - > name ) ;
2014-02-10 16:28:13 +04:00
if ( ! wipe_known_signatures ( lv - > vg - > cmd , dev , name , 0 ,
TYPE_DM_SNAPSHOT_COW ,
2015-02-17 11:46:34 +03:00
wp . yes , wp . force , NULL ) )
2013-11-06 19:16:34 +04:00
stack ;
}
2008-12-19 18:26:01 +03:00
2013-11-28 14:22:24 +04:00
if ( wp . do_zero ) {
zero_sectors = wp . zero_sectors ? : UINT64_C ( 4096 ) > > SECTOR_SHIFT ;
2008-12-19 18:26:01 +03:00
2013-11-28 14:22:24 +04:00
if ( zero_sectors > lv - > size )
zero_sectors = lv - > size ;
2013-11-06 19:16:34 +04:00
2014-02-11 16:30:41 +04:00
log_verbose ( " Initializing %s of logical volume \" %s/%s \" with value %d. " ,
display_size ( lv - > vg - > cmd , zero_sectors ) ,
lv - > vg - > name , lv - > name , wp . zero_value ) ;
2013-11-06 19:16:34 +04:00
2013-11-28 14:22:24 +04:00
if ( ! dev_set ( dev , UINT64_C ( 0 ) , ( size_t ) zero_sectors < < SECTOR_SHIFT , wp . zero_value ) )
2013-11-06 19:16:34 +04:00
stack ;
}
2010-12-01 15:56:39 +03:00
2007-12-21 01:37:42 +03:00
dev_flush ( dev ) ;
2010-12-01 15:56:39 +03:00
if ( ! dev_close_immediate ( dev ) )
2011-09-06 22:49:31 +04:00
stack ;
2007-12-21 01:37:42 +03:00
2013-11-28 14:22:24 +04:00
lv - > status & = ~ LV_NOSCAN ;
2013-10-08 15:27:21 +04:00
2007-12-21 01:37:42 +03:00
return 1 ;
}
2009-07-26 06:33:35 +04:00
static struct logical_volume * _create_virtual_origin ( struct cmd_context * cmd ,
struct volume_group * vg ,
const char * lv_name ,
uint32_t permission ,
uint64_t voriginextents )
{
const struct segment_type * segtype ;
2014-10-24 16:22:13 +04:00
char vorigin_name [ NAME_LEN ] ;
2009-07-26 06:33:35 +04:00
struct logical_volume * lv ;
2015-09-22 21:04:12 +03:00
if ( ! ( segtype = get_segtype_from_string ( cmd , SEG_TYPE_NAME_ZERO ) ) ) {
2009-07-26 06:33:35 +04:00
log_error ( " Zero segment type for virtual origin not found " ) ;
return NULL ;
}
2014-10-24 16:22:13 +04:00
if ( dm_snprintf ( vorigin_name , sizeof ( vorigin_name ) , " %s_vorigin " , lv_name ) < 0 ) {
log_error ( " Virtual origin name is too long. " ) ;
2009-07-26 06:33:35 +04:00
return NULL ;
}
if ( ! ( lv = lv_create_empty ( vorigin_name , NULL , permission ,
ALLOC_INHERIT , vg ) ) )
return_NULL ;
2011-04-07 01:32:20 +04:00
if ( ! lv_extend ( lv , segtype , 1 , 0 , 1 , 0 , voriginextents ,
2014-10-26 10:13:59 +03:00
NULL , ALLOC_INHERIT , 0 ) )
2009-07-26 06:33:35 +04:00
return_NULL ;
return lv ;
}
2013-07-10 16:06:50 +04:00
/*
* Automatically set ACTIVATION_SKIP flag for the LV supplied - this
* is default behaviour . If override_default is set , then override
* the default behaviour and add / clear the flag based on ' add_skip ' arg
* supplied instead .
*/
void lv_set_activation_skip ( struct logical_volume * lv , int override_default ,
int add_skip )
{
2013-07-12 11:27:17 +04:00
int skip = 0 ;
2013-07-10 16:06:50 +04:00
/* override default behaviour */
if ( override_default )
skip = add_skip ;
/* default behaviour */
2013-07-12 11:27:17 +04:00
else if ( lv - > vg - > cmd - > auto_set_activation_skip ) {
/* skip activation for thin snapshots by default */
if ( lv_is_thin_volume ( lv ) & & first_seg ( lv ) - > origin )
skip = 1 ;
}
2013-07-10 16:06:50 +04:00
if ( skip )
lv - > status | = LV_ACTIVATION_SKIP ;
else
lv - > status & = ~ LV_ACTIVATION_SKIP ;
}
/*
* Get indication whether the LV should be skipped during activation
* based on the ACTIVATION_SKIP flag ( deactivation is never skipped ! ) .
* If ' override_lv_skip_flag ' is set , then override it based on the value
* of the ' skip ' arg supplied instead .
*/
int lv_activation_skip ( struct logical_volume * lv , activation_change_t activate ,
2014-02-19 14:02:16 +04:00
int override_lv_skip_flag )
2013-07-10 16:06:50 +04:00
{
2014-02-18 23:49:32 +04:00
if ( ! ( lv - > status & LV_ACTIVATION_SKIP ) | |
! is_change_activating ( activate ) | | /* Do not skip deactivation */
2014-02-19 14:02:16 +04:00
override_lv_skip_flag )
2013-07-10 16:06:50 +04:00
return 0 ;
2014-06-30 13:02:45 +04:00
log_verbose ( " ACTIVATION_SKIP flag set for LV %s/%s, skipping activation. " ,
2014-02-18 23:49:32 +04:00
lv - > vg - > name , lv - > name ) ;
return 1 ;
2013-07-10 16:06:50 +04:00
}
2014-10-22 15:29:25 +04:00
static int _should_wipe_lv ( struct lvcreate_params * lp ,
struct logical_volume * lv , int warn )
{
/* Unzeroable segment */
2016-12-25 01:46:47 +03:00
if ( seg_cannot_be_zeroed ( first_seg ( lv ) ) )
2014-10-22 15:29:25 +04:00
return 0 ;
2014-01-20 15:38:21 +04:00
2014-10-22 15:29:25 +04:00
/* Thin snapshot need not to be zeroed */
/* Thin pool with zeroing doesn't need zeroing or wiping */
if ( lv_is_thin_volume ( lv ) & &
( first_seg ( lv ) - > origin | |
first_seg ( first_seg ( lv ) - > pool_lv ) - > zero_new_blocks ) )
return 0 ;
2014-01-20 15:38:21 +04:00
2014-10-22 15:29:25 +04:00
/* Cannot zero read-only volume */
if ( ( lv - > status & LVM_WRITE ) & &
( lp - > zero | | lp - > wipe_signatures ) )
return 1 ;
if ( warn & & ( ! lp - > zero | | ! ( lv - > status & LVM_WRITE ) ) )
log_warn ( " WARNING: Logical volume %s not zeroed. " ,
display_lvname ( lv ) ) ;
if ( warn & & ( ! lp - > wipe_signatures | | ! ( lv - > status & LVM_WRITE ) ) )
log_verbose ( " Signature wiping on logical volume %s not requested. " ,
display_lvname ( lv ) ) ;
2014-01-20 15:38:21 +04:00
return 0 ;
}
2014-10-21 13:23:33 +04:00
/* Check if VG metadata supports needed features */
static int _vg_check_features ( struct volume_group * vg ,
struct lvcreate_params * lp )
{
uint32_t features = vg - > fid - > fmt - > features ;
if ( vg_max_lv_reached ( vg ) ) {
log_error ( " Maximum number of logical volumes (%u) reached "
" in volume group %s " , vg - > max_lv , vg - > name ) ;
return 0 ;
}
if ( ! ( features & FMT_SEGMENTS ) & &
( seg_is_cache ( lp ) | |
seg_is_cache_pool ( lp ) | |
seg_is_mirrored ( lp ) | |
seg_is_raid ( lp ) | |
seg_is_thin ( lp ) ) ) {
log_error ( " Metadata does not support %s segments. " ,
lp - > segtype - > name ) ;
return 0 ;
}
if ( ! ( features & FMT_TAGS ) & & ! dm_list_empty ( & lp - > tags ) ) {
log_error ( " Volume group %s does not support tags. " , vg - > name ) ;
return 0 ;
}
if ( ( features & FMT_RESTRICTED_READAHEAD ) & &
lp - > read_ahead ! = DM_READ_AHEAD_AUTO & &
lp - > read_ahead ! = DM_READ_AHEAD_NONE & &
( lp - > read_ahead < 2 | | lp - > read_ahead > 120 ) ) {
log_error ( " Metadata only supports readahead values between 2 and 120. " ) ;
return 0 ;
}
/* Need to check the vg's format to verify this - the cmd format isn't setup properly yet */
if ( ! ( features & FMT_UNLIMITED_STRIPESIZE ) & &
( lp - > stripes > 1 ) & & ( lp - > stripe_size > STRIPE_SIZE_MAX ) ) {
log_error ( " Stripe size may not exceed %s. " ,
display_size ( vg - > cmd , ( uint64_t ) STRIPE_SIZE_MAX ) ) ;
return 0 ;
}
return 1 ;
}
2011-09-06 04:26:42 +04:00
/* Thin notes:
* If lp - > thin OR lp - > activate is AY * , activate the pool if not already active .
* If lp - > thin , create thin LV within the pool - as a snapshot if lp - > snapshot .
* If lp - > activate is AY * , activate it .
2017-07-08 20:15:49 +03:00
* If lp - > activate is AN * and the pool was originally not active , deactivate it .
2011-09-06 04:26:42 +04:00
*/
2013-09-11 01:33:22 +04:00
static struct logical_volume * _lv_create_an_lv ( struct volume_group * vg ,
struct lvcreate_params * lp ,
2011-09-06 04:26:42 +04:00
const char * new_lv_name )
2009-07-26 06:33:35 +04:00
{
struct cmd_context * cmd = vg - > cmd ;
2015-09-24 19:50:53 +03:00
uint32_t size ;
2014-10-18 13:01:34 +04:00
uint64_t status = lp - > permission | VISIBLE_LV ;
2014-10-24 17:26:41 +04:00
const struct segment_type * create_segtype = lp - > segtype ;
2014-10-07 12:43:47 +04:00
struct logical_volume * lv , * origin_lv = NULL ;
2014-10-26 10:13:59 +03:00
struct logical_volume * pool_lv = NULL ;
2014-10-04 01:51:54 +04:00
struct logical_volume * tmp_lv ;
2014-10-26 10:13:59 +03:00
struct lv_segment * seg , * pool_seg ;
2014-11-04 17:08:15 +03:00
int thin_pool_was_active = - 1 ; /* not scanned, inactive, active */
2016-03-01 17:31:48 +03:00
int historical ;
2009-07-26 06:33:35 +04:00
2016-03-01 17:31:48 +03:00
if ( new_lv_name & & lv_name_is_used_in_vg ( vg , new_lv_name , & historical ) ) {
log_error ( " %sLogical Volume \" %s \" already exists in "
" volume group \" %s \" " , historical ? " historical " : " " ,
new_lv_name , vg - > name ) ;
2011-09-06 04:26:42 +04:00
return NULL ;
2009-07-26 06:33:35 +04:00
}
2014-10-21 13:23:33 +04:00
if ( ! _vg_check_features ( vg , lp ) )
return_NULL ;
2009-07-26 06:33:35 +04:00
2014-10-21 14:12:45 +04:00
if ( ! activation ( ) ) {
if ( seg_is_cache ( lp ) | |
seg_is_mirror ( lp ) | |
2016-05-23 18:46:38 +03:00
( seg_is_raid ( lp ) & & ! seg_is_raid0 ( lp ) ) | |
2014-10-21 14:12:45 +04:00
seg_is_thin ( lp ) | |
lp - > snapshot ) {
/*
* FIXME : For thin pool add some code to allow delayed
* initialization of empty thin pool volume .
* i . e . using some LV flag , fake message , . . .
* and testing for metadata pool header signature ?
*/
log_error ( " Can't create %s without using "
" device-mapper kernel driver. " ,
lp - > segtype - > name ) ;
return NULL ;
}
/* Does LV need to be zeroed? */
if ( lp - > zero & & ! seg_is_thin ( lp ) ) {
log_error ( " Can't wipe start of new LV without using "
" device-mapper kernel driver. " ) ;
return NULL ;
}
}
2009-07-26 06:33:35 +04:00
if ( lp - > stripe_size > vg - > extent_size ) {
2014-10-18 13:01:29 +04:00
if ( seg_is_raid ( lp ) & & ( vg - > extent_size < STRIPE_SIZE_MIN ) ) {
2014-08-16 06:15:34 +04:00
/*
* FIXME : RAID will simply fail to load the table if
* this is the case , but we should probably
* honor the stripe minimum for regular stripe
* volumes as well . Avoiding doing that now
* only to minimize the change .
*/
2014-08-26 19:34:14 +04:00
log_error ( " The extent size in volume group %s is too "
" small to support striped RAID volumes. " ,
2014-08-16 06:15:34 +04:00
vg - > name ) ;
return NULL ;
}
2013-06-20 16:08:25 +04:00
log_print_unless_silent ( " Reducing requested stripe size %s to maximum, "
" physical extent size %s. " ,
display_size ( cmd , ( uint64_t ) lp - > stripe_size ) ,
display_size ( cmd , ( uint64_t ) vg - > extent_size ) ) ;
2009-07-26 06:33:35 +04:00
lp - > stripe_size = vg - > extent_size ;
}
2015-09-24 19:50:53 +03:00
lp - > extents = _round_to_stripe_boundary ( vg , lp - > extents , lp - > stripes , 1 ) ;
2009-07-26 06:33:35 +04:00
2014-10-24 17:26:41 +04:00
if ( ! lp - > extents & & ! seg_is_thin_volume ( lp ) ) {
log_error ( INTERNAL_ERROR " Unable to create new logical volume with no extents. " ) ;
2016-09-16 15:42:37 +03:00
return NULL ;
2014-10-24 17:26:41 +04:00
}
2014-10-26 18:17:14 +03:00
if ( ( seg_is_pool ( lp ) | | seg_is_cache ( lp ) ) & &
2014-10-24 17:26:41 +04:00
( ( uint64_t ) lp - > extents * vg - > extent_size < lp - > chunk_size ) ) {
log_error ( " Unable to create %s smaller than 1 chunk. " ,
lp - > segtype - > name ) ;
return NULL ;
}
if ( ( lp - > alloc ! = ALLOC_ANYWHERE ) & & ( lp - > stripes > dm_list_size ( lp - > pvh ) ) ) {
log_error ( " Number of stripes (%u) must not exceed "
" number of physical volumes (%d) " , lp - > stripes ,
dm_list_size ( lp - > pvh ) ) ;
return NULL ;
}
if ( seg_is_pool ( lp ) )
status | = LVM_WRITE ; /* Pool is always writable */
2015-07-21 12:18:42 +03:00
else if ( seg_is_cache ( lp ) | | seg_is_thin_volume ( lp ) ) {
2014-10-26 20:17:40 +03:00
/* Resolve pool volume */
if ( ! lp - > pool_name ) {
/* Should be already checked */
log_error ( INTERNAL_ERROR " Cannot create %s volume without %s pool. " ,
lp - > segtype - > name , lp - > segtype - > name ) ;
return NULL ;
}
2014-10-24 17:26:41 +04:00
2014-10-26 20:17:40 +03:00
if ( ! ( pool_lv = find_lv ( vg , lp - > pool_name ) ) ) {
log_error ( " Couldn't find volume %s in Volume group %s. " ,
lp - > pool_name , vg - > name ) ;
2014-10-24 17:26:41 +04:00
return NULL ;
}
2014-10-26 20:17:40 +03:00
if ( lv_is_locked ( pool_lv ) ) {
log_error ( " Cannot use locked pool volume %s. " ,
display_lvname ( pool_lv ) ) ;
return NULL ;
}
2014-10-26 20:17:59 +03:00
2017-03-09 19:15:56 +03:00
if ( seg_is_thin_volume ( lp ) ) {
/* Validate volume size to to aling on chunk for small extents */
size = first_seg ( pool_lv ) - > chunk_size ;
if ( size > vg - > extent_size ) {
/* Align extents on chunk boundary size */
size = ( ( uint64_t ) vg - > extent_size * lp - > extents + size - 1 ) /
size * size / vg - > extent_size ;
if ( size ! = lp - > extents ) {
log_print_unless_silent ( " Rounding size (%d extents) up to chunk boundary "
" size (%d extents). " , lp - > extents , size ) ;
lp - > extents = size ;
}
2014-10-26 20:17:59 +03:00
}
2014-11-02 21:41:46 +03:00
2015-10-14 02:00:35 +03:00
thin_pool_was_active = lv_is_active ( pool_lv ) ;
2015-10-07 16:10:03 +03:00
if ( lv_is_new_thin_pool ( pool_lv ) ) {
if ( ! check_new_thin_pool ( pool_lv ) )
return_NULL ;
/* New pool is now inactive */
2015-10-14 02:00:35 +03:00
} else {
if ( ! activate_lv_excl_local ( cmd , pool_lv ) ) {
log_error ( " Aborting. Failed to locally activate thin pool %s. " ,
display_lvname ( pool_lv ) ) ;
2017-02-28 14:31:36 +03:00
return NULL ;
2015-10-14 02:00:35 +03:00
}
if ( ! pool_below_threshold ( first_seg ( pool_lv ) ) ) {
log_error ( " Cannot create new thin volume, free space in "
" thin pool %s reached threshold. " ,
display_lvname ( pool_lv ) ) ;
return NULL ;
}
2015-10-07 16:10:03 +03:00
}
2014-11-04 17:08:15 +03:00
}
2014-11-02 21:41:46 +03:00
if ( seg_is_cache ( lp ) & &
! wipe_cache_pool ( pool_lv ) )
return_NULL ;
2014-10-26 20:17:40 +03:00
}
/* Resolve origin volume */
if ( lp - > origin_name & &
! ( origin_lv = find_lv ( vg , lp - > origin_name ) ) ) {
log_error ( " Origin volume %s not found in Volume group %s. " ,
lp - > origin_name , vg - > name ) ;
return NULL ;
}
if ( origin_lv & & seg_is_cache_pool ( lp ) ) {
/* Converting exiting origin and creating cache pool */
2014-10-22 23:01:03 +04:00
if ( ! validate_lv_cache_create_origin ( origin_lv ) )
2014-10-24 17:26:41 +04:00
return_NULL ;
2014-10-26 20:17:40 +03:00
if ( origin_lv - > size < lp - > chunk_size ) {
log_error ( " Caching of origin cache volume smaller then chunk size is unsupported. " ) ;
return NULL ;
}
2014-10-24 17:26:41 +04:00
/* Validate cache origin is exclusively active */
if ( vg_is_clustered ( origin_lv - > vg ) & &
locking_is_clustered ( ) & &
locking_supports_remote_queries ( ) & &
lv_is_active ( origin_lv ) & &
! lv_is_active_exclusive ( origin_lv ) ) {
log_error ( " Cannot cache not exclusively active origin volume %s. " ,
display_lvname ( origin_lv ) ) ;
return NULL ;
}
2015-08-11 16:11:45 +03:00
} else if ( seg_is_cache ( lp ) ) {
if ( ! pool_lv ) {
log_error ( INTERNAL_ERROR " Pool LV for cache is missing. " ) ;
return NULL ;
}
2014-10-24 17:26:41 +04:00
if ( ! lv_is_cache_pool ( pool_lv ) ) {
log_error ( " Logical volume %s is not a cache pool. " ,
display_lvname ( pool_lv ) ) ;
2014-02-05 02:50:16 +04:00
return NULL ;
}
2014-10-24 17:26:41 +04:00
/* Create cache origin for cache pool */
2014-11-11 17:13:00 +03:00
/* FIXME Eventually support raid/mirrors with -m */
2015-09-22 21:04:12 +03:00
if ( ! ( create_segtype = get_segtype_from_string ( vg - > cmd , SEG_TYPE_NAME_STRIPED ) ) )
2014-02-05 02:50:16 +04:00
return_0 ;
2016-05-23 18:46:38 +03:00
} else if ( seg_is_mirrored ( lp ) | | ( seg_is_raid ( lp ) & & ! seg_is_any_raid0 ( lp ) ) ) {
2015-01-06 11:59:04 +03:00
if ( is_change_activating ( lp - > activate ) & & ( lp - > activate ! = CHANGE_AEY ) & &
vg_is_clustered ( vg ) & & seg_is_mirrored ( lp ) & & ! seg_is_raid ( lp ) & &
2015-01-05 18:45:30 +03:00
! cluster_mirror_is_available ( vg - > cmd ) ) {
log_error ( " Shared cluster mirrors are not available. " ) ;
return NULL ;
}
2014-11-11 17:13:00 +03:00
/* FIXME This will not pass cluster lock! */
2014-10-24 17:26:41 +04:00
init_mirror_in_sync ( lp - > nosync ) ;
if ( lp - > nosync ) {
log_warn ( " WARNING: New %s won't be synchronised. "
" Don't read what you didn't write! " ,
lp - > segtype - > name ) ;
status | = LV_NOTSYNCED ;
2014-05-07 13:14:22 +04:00
}
2014-10-24 17:26:41 +04:00
2017-10-09 15:34:10 +03:00
if ( seg_is_raid ( lp ) ) {
/* Value raid target constraint */
if ( lp - > region_size > ( uint64_t ) vg - > extent_size * lp - > extents ) {
log_error ( " Cannot create RAID LV with region size larger than LV size. " ) ;
return NULL ;
}
} else
lp - > region_size = adjusted_mirror_region_size ( vg - > cmd ,
vg - > extent_size ,
lp - > extents ,
lp - > region_size , 0 ,
vg_is_clustered ( vg ) ) ;
2014-10-26 20:17:40 +03:00
} else if ( pool_lv & & seg_is_thin_volume ( lp ) ) {
2014-10-24 17:26:41 +04:00
if ( ! lv_is_thin_pool ( pool_lv ) ) {
log_error ( " Logical volume %s is not a thin pool. " ,
display_lvname ( pool_lv ) ) ;
2011-09-06 04:26:42 +04:00
return NULL ;
2009-07-26 06:33:35 +04:00
}
2014-10-26 20:17:40 +03:00
if ( origin_lv ) {
2014-10-24 17:26:41 +04:00
if ( lv_is_locked ( origin_lv ) ) {
log_error ( " Snapshots of locked devices are not supported. " ) ;
return NULL ;
}
2014-10-31 13:33:19 +03:00
lp - > virtual_extents = origin_lv - > le_count ;
2014-10-26 20:17:40 +03:00
/*
* Check if using ' external origin ' or the ' normal ' snapshot
* within the same thin pool
*/
if ( first_seg ( origin_lv ) - > pool_lv ! = pool_lv ) {
if ( ! pool_supports_external_origin ( first_seg ( pool_lv ) , origin_lv ) )
return_NULL ;
if ( origin_lv - > status & LVM_WRITE ) {
log_error ( " Cannot use writable LV as the external origin. " ) ;
2014-11-11 17:13:00 +03:00
return NULL ; /* FIXME conversion for inactive */
2014-10-26 20:17:40 +03:00
}
if ( lv_is_active ( origin_lv ) & & ! lv_is_external_origin ( origin_lv ) ) {
log_error ( " Cannot use active LV for the external origin. " ) ;
2014-11-11 17:13:00 +03:00
return NULL ; /* We can't be sure device is read-only */
2014-10-26 20:17:40 +03:00
}
}
2014-10-24 17:26:41 +04:00
}
} else if ( lp - > snapshot ) {
2014-10-31 13:33:19 +03:00
if ( ! lp - > virtual_extents ) {
2014-10-26 20:17:40 +03:00
if ( ! origin_lv ) {
2009-07-26 06:33:35 +04:00
log_error ( " Couldn't find origin volume '%s'. " ,
2014-10-07 12:43:47 +04:00
lp - > origin_name ) ;
2011-09-06 04:26:42 +04:00
return NULL ;
2009-07-26 06:33:35 +04:00
}
2014-10-07 12:43:47 +04:00
if ( lv_is_virtual_origin ( origin_lv ) ) {
2009-07-26 06:33:35 +04:00
log_error ( " Can't share virtual origins. "
" Use --virtualsize. " ) ;
2011-09-06 04:26:42 +04:00
return NULL ;
2009-07-26 06:33:35 +04:00
}
2014-11-05 01:21:07 +03:00
2017-10-27 17:48:57 +03:00
if ( ! validate_snapshot_origin ( origin_lv ) )
return_0 ;
2009-07-26 06:33:35 +04:00
}
2011-11-10 16:42:36 +04:00
2014-10-24 17:26:41 +04:00
if ( ! cow_has_min_chunks ( vg , lp - > extents , lp - > chunk_size ) )
return_NULL ;
2010-10-13 17:52:53 +04:00
2014-10-24 17:26:41 +04:00
/* The snapshot segment gets created later */
2015-09-22 21:04:12 +03:00
if ( ! ( create_segtype = get_segtype_from_string ( cmd , SEG_TYPE_NAME_STRIPED ) ) )
2014-10-24 17:26:41 +04:00
return_NULL ;
2009-07-26 06:33:35 +04:00
2014-10-24 17:26:41 +04:00
/* Must zero cow */
status | = LVM_WRITE ;
lp - > zero = 1 ;
lp - > wipe_signatures = 0 ;
2009-07-26 06:33:35 +04:00
}
2016-04-27 16:02:54 +03:00
if ( ! segtype_is_virtual ( create_segtype ) & & ! lp - > approx_alloc & &
2014-10-26 18:18:53 +03:00
( vg - > free_count < lp - > extents ) ) {
log_error ( " Volume group \" %s \" has insufficient free space "
" (%u extents): %u required. " ,
vg - > name , vg - > free_count , lp - > extents ) ;
return NULL ;
}
2013-07-17 16:52:21 +04:00
if ( ! archive ( vg ) )
return_NULL ;
2016-04-27 16:02:54 +03:00
if ( pool_lv & & segtype_is_thin_volume ( create_segtype ) ) {
2011-11-03 18:56:20 +04:00
/* Ensure all stacked messages are submitted */
2014-10-04 01:51:54 +04:00
if ( ( pool_is_active ( pool_lv ) | | is_change_activating ( lp - > activate ) ) & &
! update_pool_lv ( pool_lv , 1 ) )
2014-02-18 23:52:17 +04:00
return_NULL ;
2009-07-26 06:33:35 +04:00
}
2011-09-06 04:26:42 +04:00
if ( ! ( lv = lv_create_empty ( new_lv_name ? : " lvol%d " , NULL ,
2009-07-26 06:33:35 +04:00
status , lp - > alloc , vg ) ) )
2011-09-06 04:26:42 +04:00
return_NULL ;
2009-07-26 06:33:35 +04:00
2009-12-03 04:47:33 +03:00
if ( lp - > read_ahead ! = lv - > read_ahead ) {
2009-07-26 06:33:35 +04:00
lv - > read_ahead = lp - > read_ahead ;
2014-10-18 13:01:29 +04:00
log_debug_metadata ( " Setting read ahead sectors %u. " , lv - > read_ahead ) ;
2009-07-26 06:33:35 +04:00
}
2016-04-27 16:02:54 +03:00
if ( ! segtype_is_pool ( create_segtype ) & & lp - > minor > = 0 ) {
2009-07-26 06:33:35 +04:00
lv - > major = lp - > major ;
lv - > minor = lp - > minor ;
lv - > status | = FIXED_MINOR ;
2014-10-18 13:01:29 +04:00
log_debug_metadata ( " Setting device number to (%d, %d). " ,
lv - > major , lv - > minor ) ;
2009-07-26 06:33:35 +04:00
}
2015-03-05 23:00:44 +03:00
/*
* The specific LV may not use a lock . lockd_init_lv ( ) sets
* lv - > lock_args to NULL if this LV does not use its own lock .
*/
if ( ! lockd_init_lv ( vg - > cmd , vg , lv , lp ) )
return_NULL ;
2011-10-22 20:46:34 +04:00
dm_list_splice ( & lv - > tags , & lp - > tags ) ;
2009-07-26 06:33:35 +04:00
2014-10-24 17:26:41 +04:00
if ( ! lv_extend ( lv , create_segtype ,
2011-08-03 02:07:20 +04:00
lp - > stripes , lp - > stripe_size ,
2011-11-05 02:43:10 +04:00
lp - > mirrors ,
2016-04-27 16:02:54 +03:00
segtype_is_pool ( create_segtype ) ? lp - > pool_metadata_extents : lp - > region_size ,
segtype_is_thin_volume ( create_segtype ) ? lp - > virtual_extents : lp - > extents ,
2017-10-30 16:35:31 +03:00
lp - > pvh , lp - > alloc , lp - > approx_alloc ) ) {
unlink_lv_from_vg ( lv ) ; /* Keep VG consistent and remove LV without any segment */
2011-09-06 04:26:42 +04:00
return_NULL ;
2017-10-30 16:35:31 +03:00
}
2011-09-06 04:26:42 +04:00
2017-10-12 18:43:24 +03:00
/* rhbz1269533: allow for 100%FREE allocation to work with "mirror" and a disk log */
if ( segtype_is_mirror ( create_segtype ) & &
lp - > log_count & &
! vg - > free_count & &
lv - > le_count > 1 )
lv_reduce ( lv , 1 ) ;
2014-09-19 03:09:36 +04:00
/* Unlock memory if possible */
memlock_unlock ( vg - > cmd ) ;
2016-05-05 22:34:21 +03:00
if ( lv_is_cache_pool ( lv ) ) {
if ( ! cache_set_params ( first_seg ( lv ) ,
2017-03-09 17:54:30 +03:00
lp - > chunk_size ,
2017-02-26 22:18:37 +03:00
lp - > cache_metadata_format ,
2016-05-05 22:34:21 +03:00
lp - > cache_mode ,
lp - > policy_name ,
2017-03-09 17:54:30 +03:00
lp - > policy_settings ) ) {
2014-10-24 17:21:48 +04:00
stack ;
goto revert_new_lv ;
}
2016-05-23 18:46:38 +03:00
} else if ( lv_is_raid ( lv ) & & ! seg_is_any_raid0 ( first_seg ( lv ) ) ) {
2014-10-18 13:01:34 +04:00
first_seg ( lv ) - > min_recovery_rate = lp - > min_recovery_rate ;
first_seg ( lv ) - > max_recovery_rate = lp - > max_recovery_rate ;
2016-04-27 16:02:54 +03:00
} else if ( lv_is_thin_pool ( lv ) ) {
2014-07-23 00:20:18 +04:00
first_seg ( lv ) - > chunk_size = lp - > chunk_size ;
2017-03-03 22:46:13 +03:00
first_seg ( lv ) - > zero_new_blocks = lp - > zero_new_blocks ;
2012-08-09 14:20:47 +04:00
first_seg ( lv ) - > discards = lp - > discards ;
2017-03-09 18:24:28 +03:00
if ( ! recalculate_pool_chunk_size_with_dev_hints ( lv , lp - > thin_chunk_size_calc_policy ) ) {
2014-10-24 17:21:48 +04:00
stack ;
goto revert_new_lv ;
}
2015-01-13 17:23:03 +03:00
if ( lp - > error_when_full )
lv - > status | = LV_ERROR_WHEN_FULL ;
2016-04-27 16:02:54 +03:00
} else if ( pool_lv & & lv_is_virtual ( lv ) ) { /* going to be a thin volume */
2014-10-26 10:13:59 +03:00
seg = first_seg ( lv ) ;
pool_seg = first_seg ( pool_lv ) ;
if ( ! ( seg - > device_id = get_free_pool_device_id ( pool_seg ) ) )
2013-07-17 17:26:41 +04:00
return_NULL ;
2014-10-26 10:13:59 +03:00
seg - > transaction_id = pool_seg - > transaction_id ;
if ( origin_lv & & lv_is_thin_volume ( origin_lv ) & &
( first_seg ( origin_lv ) - > pool_lv = = pool_lv ) ) {
/* For thin snapshot pool must match */
2016-03-01 17:21:36 +03:00
if ( ! attach_pool_lv ( seg , pool_lv , origin_lv , NULL , NULL ) )
2014-10-26 10:13:59 +03:00
return_NULL ;
/* Use the same external origin */
if ( ! attach_thin_external_origin ( seg , first_seg ( origin_lv ) - > external_lv ) )
return_NULL ;
} else {
2016-03-01 17:21:36 +03:00
if ( ! attach_pool_lv ( seg , pool_lv , NULL , NULL , NULL ) )
2014-10-26 10:13:59 +03:00
return_NULL ;
/* If there is an external origin... */
if ( ! attach_thin_external_origin ( seg , origin_lv ) )
2013-07-17 17:26:41 +04:00
return_NULL ;
2013-04-02 16:53:58 +04:00
}
2011-10-03 22:43:39 +04:00
2014-10-26 10:13:59 +03:00
if ( ! attach_pool_message ( pool_seg , DM_THIN_MESSAGE_CREATE_THIN , lv , 0 , 0 ) )
2013-07-17 17:26:41 +04:00
return_NULL ;
2011-10-03 22:43:39 +04:00
}
2009-07-26 06:33:35 +04:00
2015-07-03 16:31:31 +03:00
if ( ! pool_check_overprovisioning ( lv ) )
return_NULL ;
2012-02-01 06:10:45 +04:00
/* FIXME Log allocation and attachment should have happened inside lv_extend. */
2011-08-03 02:07:20 +04:00
if ( lp - > log_count & &
! seg_is_raid ( first_seg ( lv ) ) & & seg_is_mirrored ( first_seg ( lv ) ) ) {
2011-04-07 01:32:20 +04:00
if ( ! add_mirror_log ( cmd , lv , lp - > log_count ,
first_seg ( lv ) - > region_size ,
lp - > pvh , lp - > alloc ) ) {
2009-07-26 06:33:35 +04:00
stack ;
goto revert_new_lv ;
}
}
2013-07-10 16:06:50 +04:00
lv_set_activation_skip ( lv , lp - > activation_skip & ACTIVATION_SKIP_SET ,
lp - > activation_skip & ACTIVATION_SKIP_SET_ENABLED ) ;
2012-06-28 12:15:07 +04:00
/*
* Check for autoactivation .
* If the LV passes the auto activation filter , activate
* it just as if CHANGE_AY was used , CHANGE_AN otherwise .
*/
if ( lp - > activate = = CHANGE_AAY )
2014-10-18 13:01:29 +04:00
lp - > activate = lv_passes_auto_activation_filter ( cmd , lv )
? CHANGE_ALY : CHANGE_ALN ;
2012-06-28 12:15:07 +04:00
2014-02-19 14:02:16 +04:00
if ( lv_activation_skip ( lv , lp - > activate , lp - > activation_skip & ACTIVATION_SKIP_IGNORE ) )
2013-07-10 16:06:50 +04:00
lp - > activate = CHANGE_AN ;
2013-07-18 18:25:16 +04:00
/* store vg on disk(s) */
2014-11-05 22:27:51 +03:00
if ( ! vg_write ( vg ) | | ! vg_commit ( vg ) )
/* Pool created metadata LV, but better avoid recover when vg_write/commit fails */
2013-07-18 18:25:16 +04:00
return_NULL ;
backup ( vg ) ;
if ( test_mode ( ) ) {
2013-11-06 19:16:34 +04:00
log_verbose ( " Test mode: Skipping activation, zeroing and signature wiping. " ) ;
2013-07-18 18:25:16 +04:00
goto out ;
2014-04-01 19:53:18 +04:00
}
2013-11-06 19:16:34 +04:00
/* Do not scan this LV until properly zeroed/wiped. */
2014-10-22 15:29:25 +04:00
if ( _should_wipe_lv ( lp , lv , 0 ) )
2013-10-08 15:27:21 +04:00
lv - > status | = LV_NOSCAN ;
activation: flag temporary LVs internally
Add LV_TEMPORARY flag for LVs with limited existence during command
execution. Such LVs are temporary in way that they need to be activated,
some action done and then removed immediately. Such LVs are just like
any normal LV - the only difference is that they are removed during
LVM command execution. This is also the case for LVs representing
future pool metadata spare LVs which we need to initialize by using
the usual LV before they are declared as pool metadata spare.
We can optimize some other parts like udev to do a better job if
it knows that the LV is temporary and any processing on it is just
useless.
This flag is orthogonal to LV_NOSCAN flag introduced recently
as LV_NOSCAN flag is primarily used to mark an LV for the scanning
to be avoided before the zeroing of the device happens. The LV_TEMPORARY
flag makes a difference between a full-fledged LV visible in the system
and the LV just used as a temporary overlay for some action that needs to
be done on underlying PVs.
For example: lvcreate --thinpool POOL --zero n -L 1G vg
- first, the usual LV is created to do a clean up for pool metadata
spare. The LV is activated, zeroed, deactivated.
- between "activated" and "zeroed" stage, the LV_NOSCAN flag is used
to avoid any scanning in udev
- betwen "zeroed" and "deactivated" stage, we need to avoid the WATCH
udev rule, but since the LV is just a usual LV, we can't make a
difference. The LV_TEMPORARY internal LV flag helps here. If we
create the LV with this flag, the DM_UDEV_DISABLE_DISK_RULES
and DM_UDEV_DISABLE_OTHER_RULES flag are set (just like as it is
with "invisible" and non-top-level LVs) - udev is directed to
skip WATCH rule use.
- if the LV_TEMPORARY flag was not used, there would normally be
a WATCH event generated once the LV is closed after "zeroed"
stage. This will make problems with immediated deactivation that
follows.
2013-10-23 16:06:39 +04:00
if ( lp - > temporary )
lv - > status | = LV_TEMPORARY ;
2014-10-24 17:26:41 +04:00
if ( seg_is_cache ( lp ) ) {
2014-11-11 17:13:00 +03:00
/* FIXME Support remote exclusive activation? */
2014-11-06 22:36:53 +03:00
/* Not yet 'cache' LV, it is stripe volume for wiping */
2014-10-24 17:26:41 +04:00
if ( is_change_activating ( lp - > activate ) & &
! activate_lv_excl_local ( cmd , lv ) ) {
log_error ( " Aborting. Failed to activate LV %s locally exclusively. " ,
display_lvname ( lv ) ) ;
goto revert_new_lv ;
}
} else if ( lv_is_cache_pool ( lv ) ) {
/* Cache pool cannot be actived and zeroed */
log_very_verbose ( " Cache pool is prepared. " ) ;
} else if ( lv_is_thin_volume ( lv ) ) {
2012-01-25 13:02:35 +04:00
/* For snapshot, suspend active thin origin first */
2014-10-07 12:43:47 +04:00
if ( origin_lv & & lv_is_active ( origin_lv ) & & lv_is_thin_volume ( origin_lv ) ) {
if ( ! suspend_lv_origin ( cmd , origin_lv ) ) {
2012-02-08 17:05:38 +04:00
log_error ( " Failed to suspend thin snapshot origin %s/%s. " ,
2014-10-07 12:43:47 +04:00
origin_lv - > vg - > name , origin_lv - > name ) ;
2011-11-07 15:03:47 +04:00
goto revert_new_lv ;
2012-01-25 13:14:25 +04:00
}
2014-10-07 12:43:47 +04:00
if ( ! resume_lv_origin ( cmd , origin_lv ) ) { /* deptree updates thin-pool */
2012-02-08 17:05:38 +04:00
log_error ( " Failed to resume thin snapshot origin %s/%s. " ,
2014-10-07 12:43:47 +04:00
origin_lv - > vg - > name , origin_lv - > name ) ;
2011-11-07 15:03:47 +04:00
goto revert_new_lv ;
}
2012-01-25 13:14:25 +04:00
/* At this point remove pool messages, snapshot is active */
2014-11-04 17:08:15 +03:00
if ( ! update_pool_lv ( pool_lv , 0 ) ) {
2011-11-07 15:03:47 +04:00
stack ;
2013-07-17 17:26:41 +04:00
goto revert_new_lv ;
2011-11-07 15:03:47 +04:00
}
}
thin: move pool messaging from resume to suspend
Existing messaging intarface for thin-pool has a few 'weak' points:
* Message were posted with each 'resume' operation, thus not allowing
activation of thin-pool with the existing state.
* Acceleration skipped suspend step has not worked in cluster,
since clvmd resumes only nodes which are suspended (have proper lock
state).
* Resume may fail and code is not really designed to 'fail' in this
phase (generic rule here is resume DOES NOT fail unless something serious
is wrong and lvm2 tool usually doesn't handle recovery path in this case.)
* Full thin-pool suspend happened, when taken a thin-volume snapshot.
With this patch the new method relocates message passing into suspend
state.
This has a few drawbacks with current API, but overal it performs
better and gives are more posibilities to deal with errors.
Patch introduces a new logic for 'origin-only' suspend of thin-pool and
this also relates to thin-volume when taking snapshot.
When suspend_origin_only operation is invoked on a pool with
queued messages then only those messages are posted to thin-pool and
actual suspend of thin pool and data and metadata volume is skipped.
This makes taking a snapshot of thin-volume lighter operation and
avoids blocking of other unrelated active thin volumes.
Also fail now happens in 'suspend' state where the 'Fail' is more expected
and it is better handled through error paths.
Activation of thin-pool is now not sending any message and leaves upto a tool
to decided later how to finish unfinished double-commit transaction.
Problem which needs some API improvements relates to the lvm2 tree
construction. For the suspend tree we do not add target table line
into the tree, but only a device is inserted into a tree.
Current mechanism to attach messages for thin-pool requires the libdm
to know about thin-pool target, so lvm2 currently takes assumption, node
is really a thin-pool and fills in the table line for this node (which
should be ensured by the PRELOAD phase, but it's a misuse of internal API)
we would possibly need to be able to attach message to 'any' node.
Other thing to notice - current messaging interface in thin-pool
target requires to suspend thin volume origin first and then send
a create message, but this could not have any 'nice' solution on lvm2
side and IMHO we should introduce something like 'create_after_resume'
message.
Patch also changes the moment, where lvm2 transaction id is increased.
Now it happens only after successful finish of kernel transaction id
change. This change was needed to handle properly activation of pool,
which is in the middle of unfinished transaction, and also this corrects
usage of thin-pool by external apps like Docker.
2015-07-01 14:31:37 +03:00
if ( ! dm_list_empty ( & first_seg ( pool_lv ) - > thin_messages ) ) {
2013-07-18 18:44:39 +04:00
/* Send message so that table preload knows new thin */
2014-11-04 17:08:15 +03:00
if ( ! lv_is_active ( pool_lv ) ) {
/* Avoid multiple thin-pool activations in this case */
if ( thin_pool_was_active < 0 )
thin_pool_was_active = 0 ;
if ( ! activate_lv_excl ( cmd , pool_lv ) ) {
2014-11-19 20:57:40 +03:00
log_error ( " Failed to activate thin pool %s. " ,
display_lvname ( pool_lv ) ) ;
2014-11-04 17:08:15 +03:00
goto revert_new_lv ;
}
if ( ! lv_is_active ( pool_lv ) ) {
log_error ( " Cannot activate thin pool %s, perhaps skipped in lvm.conf volume_list? " ,
display_lvname ( pool_lv ) ) ;
return 0 ;
}
}
/* Keep thin pool active until thin volume is activated */
thin: move pool messaging from resume to suspend
Existing messaging intarface for thin-pool has a few 'weak' points:
* Message were posted with each 'resume' operation, thus not allowing
activation of thin-pool with the existing state.
* Acceleration skipped suspend step has not worked in cluster,
since clvmd resumes only nodes which are suspended (have proper lock
state).
* Resume may fail and code is not really designed to 'fail' in this
phase (generic rule here is resume DOES NOT fail unless something serious
is wrong and lvm2 tool usually doesn't handle recovery path in this case.)
* Full thin-pool suspend happened, when taken a thin-volume snapshot.
With this patch the new method relocates message passing into suspend
state.
This has a few drawbacks with current API, but overal it performs
better and gives are more posibilities to deal with errors.
Patch introduces a new logic for 'origin-only' suspend of thin-pool and
this also relates to thin-volume when taking snapshot.
When suspend_origin_only operation is invoked on a pool with
queued messages then only those messages are posted to thin-pool and
actual suspend of thin pool and data and metadata volume is skipped.
This makes taking a snapshot of thin-volume lighter operation and
avoids blocking of other unrelated active thin volumes.
Also fail now happens in 'suspend' state where the 'Fail' is more expected
and it is better handled through error paths.
Activation of thin-pool is now not sending any message and leaves upto a tool
to decided later how to finish unfinished double-commit transaction.
Problem which needs some API improvements relates to the lvm2 tree
construction. For the suspend tree we do not add target table line
into the tree, but only a device is inserted into a tree.
Current mechanism to attach messages for thin-pool requires the libdm
to know about thin-pool target, so lvm2 currently takes assumption, node
is really a thin-pool and fills in the table line for this node (which
should be ensured by the PRELOAD phase, but it's a misuse of internal API)
we would possibly need to be able to attach message to 'any' node.
Other thing to notice - current messaging interface in thin-pool
target requires to suspend thin volume origin first and then send
a create message, but this could not have any 'nice' solution on lvm2
side and IMHO we should introduce something like 'create_after_resume'
message.
Patch also changes the moment, where lvm2 transaction id is increased.
Now it happens only after successful finish of kernel transaction id
change. This change was needed to handle properly activation of pool,
which is in the middle of unfinished transaction, and also this corrects
usage of thin-pool by external apps like Docker.
2015-07-01 14:31:37 +03:00
if ( ! update_pool_lv ( pool_lv , 1 ) ) {
2012-01-25 13:15:44 +04:00
stack ;
2013-07-18 18:44:39 +04:00
goto revert_new_lv ;
2012-01-25 13:15:44 +04:00
}
thin: move pool messaging from resume to suspend
Existing messaging intarface for thin-pool has a few 'weak' points:
* Message were posted with each 'resume' operation, thus not allowing
activation of thin-pool with the existing state.
* Acceleration skipped suspend step has not worked in cluster,
since clvmd resumes only nodes which are suspended (have proper lock
state).
* Resume may fail and code is not really designed to 'fail' in this
phase (generic rule here is resume DOES NOT fail unless something serious
is wrong and lvm2 tool usually doesn't handle recovery path in this case.)
* Full thin-pool suspend happened, when taken a thin-volume snapshot.
With this patch the new method relocates message passing into suspend
state.
This has a few drawbacks with current API, but overal it performs
better and gives are more posibilities to deal with errors.
Patch introduces a new logic for 'origin-only' suspend of thin-pool and
this also relates to thin-volume when taking snapshot.
When suspend_origin_only operation is invoked on a pool with
queued messages then only those messages are posted to thin-pool and
actual suspend of thin pool and data and metadata volume is skipped.
This makes taking a snapshot of thin-volume lighter operation and
avoids blocking of other unrelated active thin volumes.
Also fail now happens in 'suspend' state where the 'Fail' is more expected
and it is better handled through error paths.
Activation of thin-pool is now not sending any message and leaves upto a tool
to decided later how to finish unfinished double-commit transaction.
Problem which needs some API improvements relates to the lvm2 tree
construction. For the suspend tree we do not add target table line
into the tree, but only a device is inserted into a tree.
Current mechanism to attach messages for thin-pool requires the libdm
to know about thin-pool target, so lvm2 currently takes assumption, node
is really a thin-pool and fills in the table line for this node (which
should be ensured by the PRELOAD phase, but it's a misuse of internal API)
we would possibly need to be able to attach message to 'any' node.
Other thing to notice - current messaging interface in thin-pool
target requires to suspend thin volume origin first and then send
a create message, but this could not have any 'nice' solution on lvm2
side and IMHO we should introduce something like 'create_after_resume'
message.
Patch also changes the moment, where lvm2 transaction id is increased.
Now it happens only after successful finish of kernel transaction id
change. This change was needed to handle properly activation of pool,
which is in the middle of unfinished transaction, and also this corrects
usage of thin-pool by external apps like Docker.
2015-07-01 14:31:37 +03:00
}
backup ( vg ) ;
2014-10-03 20:39:54 +04:00
thin: move pool messaging from resume to suspend
Existing messaging intarface for thin-pool has a few 'weak' points:
* Message were posted with each 'resume' operation, thus not allowing
activation of thin-pool with the existing state.
* Acceleration skipped suspend step has not worked in cluster,
since clvmd resumes only nodes which are suspended (have proper lock
state).
* Resume may fail and code is not really designed to 'fail' in this
phase (generic rule here is resume DOES NOT fail unless something serious
is wrong and lvm2 tool usually doesn't handle recovery path in this case.)
* Full thin-pool suspend happened, when taken a thin-volume snapshot.
With this patch the new method relocates message passing into suspend
state.
This has a few drawbacks with current API, but overal it performs
better and gives are more posibilities to deal with errors.
Patch introduces a new logic for 'origin-only' suspend of thin-pool and
this also relates to thin-volume when taking snapshot.
When suspend_origin_only operation is invoked on a pool with
queued messages then only those messages are posted to thin-pool and
actual suspend of thin pool and data and metadata volume is skipped.
This makes taking a snapshot of thin-volume lighter operation and
avoids blocking of other unrelated active thin volumes.
Also fail now happens in 'suspend' state where the 'Fail' is more expected
and it is better handled through error paths.
Activation of thin-pool is now not sending any message and leaves upto a tool
to decided later how to finish unfinished double-commit transaction.
Problem which needs some API improvements relates to the lvm2 tree
construction. For the suspend tree we do not add target table line
into the tree, but only a device is inserted into a tree.
Current mechanism to attach messages for thin-pool requires the libdm
to know about thin-pool target, so lvm2 currently takes assumption, node
is really a thin-pool and fills in the table line for this node (which
should be ensured by the PRELOAD phase, but it's a misuse of internal API)
we would possibly need to be able to attach message to 'any' node.
Other thing to notice - current messaging interface in thin-pool
target requires to suspend thin volume origin first and then send
a create message, but this could not have any 'nice' solution on lvm2
side and IMHO we should introduce something like 'create_after_resume'
message.
Patch also changes the moment, where lvm2 transaction id is increased.
Now it happens only after successful finish of kernel transaction id
change. This change was needed to handle properly activation of pool,
which is in the middle of unfinished transaction, and also this corrects
usage of thin-pool by external apps like Docker.
2015-07-01 14:31:37 +03:00
if ( ! lv_active_change ( cmd , lv , lp - > activate , 0 ) ) {
log_error ( " Failed to activate thin %s. " , lv - > name ) ;
goto deactivate_and_revert_new_lv ;
}
2014-11-04 17:08:15 +03:00
thin: move pool messaging from resume to suspend
Existing messaging intarface for thin-pool has a few 'weak' points:
* Message were posted with each 'resume' operation, thus not allowing
activation of thin-pool with the existing state.
* Acceleration skipped suspend step has not worked in cluster,
since clvmd resumes only nodes which are suspended (have proper lock
state).
* Resume may fail and code is not really designed to 'fail' in this
phase (generic rule here is resume DOES NOT fail unless something serious
is wrong and lvm2 tool usually doesn't handle recovery path in this case.)
* Full thin-pool suspend happened, when taken a thin-volume snapshot.
With this patch the new method relocates message passing into suspend
state.
This has a few drawbacks with current API, but overal it performs
better and gives are more posibilities to deal with errors.
Patch introduces a new logic for 'origin-only' suspend of thin-pool and
this also relates to thin-volume when taking snapshot.
When suspend_origin_only operation is invoked on a pool with
queued messages then only those messages are posted to thin-pool and
actual suspend of thin pool and data and metadata volume is skipped.
This makes taking a snapshot of thin-volume lighter operation and
avoids blocking of other unrelated active thin volumes.
Also fail now happens in 'suspend' state where the 'Fail' is more expected
and it is better handled through error paths.
Activation of thin-pool is now not sending any message and leaves upto a tool
to decided later how to finish unfinished double-commit transaction.
Problem which needs some API improvements relates to the lvm2 tree
construction. For the suspend tree we do not add target table line
into the tree, but only a device is inserted into a tree.
Current mechanism to attach messages for thin-pool requires the libdm
to know about thin-pool target, so lvm2 currently takes assumption, node
is really a thin-pool and fills in the table line for this node (which
should be ensured by the PRELOAD phase, but it's a misuse of internal API)
we would possibly need to be able to attach message to 'any' node.
Other thing to notice - current messaging interface in thin-pool
target requires to suspend thin volume origin first and then send
a create message, but this could not have any 'nice' solution on lvm2
side and IMHO we should introduce something like 'create_after_resume'
message.
Patch also changes the moment, where lvm2 transaction id is increased.
Now it happens only after successful finish of kernel transaction id
change. This change was needed to handle properly activation of pool,
which is in the middle of unfinished transaction, and also this corrects
usage of thin-pool by external apps like Docker.
2015-07-01 14:31:37 +03:00
/* Restore inactive state if needed */
if ( ! thin_pool_was_active & &
! deactivate_lv ( cmd , pool_lv ) ) {
log_error ( " Failed to deactivate thin pool %s. " ,
display_lvname ( pool_lv ) ) ;
return NULL ;
2011-10-20 14:35:14 +04:00
}
} else if ( lp - > snapshot ) {
2014-11-05 17:15:07 +03:00
lv - > status | = LV_TEMPORARY ;
2013-07-18 18:20:48 +04:00
if ( ! activate_lv_local ( cmd , lv ) ) {
2009-07-26 06:33:35 +04:00
log_error ( " Aborting. Failed to activate snapshot "
" exception store. " ) ;
goto revert_new_lv ;
}
2014-11-05 17:15:07 +03:00
lv - > status & = ~ LV_TEMPORARY ;
2014-11-05 17:14:58 +03:00
} else if ( ! lv_active_change ( cmd , lv , lp - > activate , 0 ) ) {
2009-07-26 06:33:35 +04:00
log_error ( " Failed to activate new LV. " ) ;
2014-10-24 17:26:41 +04:00
goto deactivate_and_revert_new_lv ;
2009-07-26 06:33:35 +04:00
}
2016-07-27 14:36:25 +03:00
if ( _should_wipe_lv ( lp , lv , ! lp - > suppress_zero_warn ) ) {
2013-11-28 14:22:24 +04:00
if ( ! wipe_lv ( lv , ( struct wipe_params )
{
. do_zero = lp - > zero ,
. do_wipe_signatures = lp - > wipe_signatures ,
. yes = lp - > yes ,
. force = lp - > force
} ) ) {
2014-10-18 13:01:29 +04:00
log_error ( " Aborting. Failed to wipe %s. " , lp - > snapshot
? " snapshot exception store " : " start of new LV " ) ;
2013-11-06 19:16:34 +04:00
goto deactivate_and_revert_new_lv ;
}
2009-07-26 06:33:35 +04:00
}
2014-10-24 17:26:41 +04:00
if ( seg_is_cache ( lp ) | | ( origin_lv & & lv_is_cache_pool ( lv ) ) ) {
/* Finish cache conversion magic */
if ( origin_lv ) {
/* Convert origin to cached LV */
if ( ! ( tmp_lv = lv_cache_create ( lv , origin_lv ) ) ) {
2014-11-11 17:13:00 +03:00
/* FIXME Do a better revert */
2014-10-24 17:26:41 +04:00
log_error ( " Aborting. Leaving cache pool %s and uncached origin volume %s. " ,
display_lvname ( lv ) , display_lvname ( origin_lv ) ) ;
return NULL ;
}
} else {
if ( ! ( tmp_lv = lv_cache_create ( pool_lv , lv ) ) ) {
/* 'lv' still keeps created new LV */
stack ;
goto deactivate_and_revert_new_lv ;
}
}
lv = tmp_lv ;
2014-11-27 22:20:48 +03:00
2016-05-05 22:34:21 +03:00
if ( ! cache_set_params ( first_seg ( lv ) ,
2017-03-09 17:54:30 +03:00
lp - > chunk_size ,
2017-02-26 22:18:37 +03:00
lp - > cache_metadata_format ,
2016-05-05 22:34:21 +03:00
lp - > cache_mode ,
lp - > policy_name ,
2017-03-09 17:54:30 +03:00
lp - > policy_settings ) )
2015-08-11 15:01:12 +03:00
return_NULL ; /* revert? */
2014-10-24 17:26:41 +04:00
if ( ! lv_update_and_reload ( lv ) ) {
2014-11-11 17:13:00 +03:00
/* FIXME Do a better revert */
2014-10-24 17:26:41 +04:00
log_error ( " Aborting. Manual intervention required. " ) ;
return NULL ; /* FIXME: revert */
}
} else if ( lp - > snapshot ) {
2014-11-05 17:15:07 +03:00
/* Deactivate zeroed COW, avoid any race usage */
if ( ! deactivate_lv ( cmd , lv ) ) {
log_error ( " Aborting. Couldn't deactivate snapshot COW area %s. " ,
display_lvname ( lv ) ) ;
goto deactivate_and_revert_new_lv ; /* Let's retry on error path */
}
2015-06-24 16:12:43 +03:00
/* Get in sync with deactivation, before reusing LV as snapshot */
2015-06-30 20:54:38 +03:00
if ( ! sync_local_dev_names ( lv - > vg - > cmd ) ) {
log_error ( " Failed to sync local devices before creating snapshot using %s. " ,
display_lvname ( lv ) ) ;
goto revert_new_lv ;
}
2015-06-24 16:12:43 +03:00
2014-11-05 17:15:07 +03:00
/* Create zero origin volume for spare snapshot */
if ( lp - > virtual_extents & &
! ( origin_lv = _create_virtual_origin ( cmd , vg , lv - > name ,
lp - > permission ,
lp - > virtual_extents ) ) )
goto revert_new_lv ;
2009-07-26 06:33:35 +04:00
/* Reset permission after zeroing */
if ( ! ( lp - > permission & LVM_WRITE ) )
lv - > status & = ~ LVM_WRITE ;
2013-04-21 12:37:52 +04:00
/*
* COW LV is activated via implicit activation of origin LV
* Only the snapshot origin holds the LV lock in cluster
*/
2014-10-07 12:43:47 +04:00
if ( ! vg_add_snapshot ( origin_lv , lv , NULL ,
origin_lv - > le_count , lp - > chunk_size ) ) {
2009-07-26 06:33:35 +04:00
log_error ( " Couldn't create snapshot. " ) ;
goto deactivate_and_revert_new_lv ;
}
2014-11-05 17:15:07 +03:00
if ( lp - > virtual_extents ) {
/* Store vg on disk(s) */
if ( ! vg_write ( vg ) | | ! vg_commit ( vg ) )
return_NULL ; /* Metadata update fails, deep troubles */
backup ( vg ) ;
/*
2014-11-11 17:13:00 +03:00
* FIXME We do not actually need snapshot - origin as an active device ,
* as virtual origin is already ' hidden ' private device without
* vg / lv links . As such it is not supposed to be used by any user .
* Also it would save one dm table entry , but it needs quite a few
* changes in the libdm / lvm2 code base to support it .
2014-11-05 17:15:07 +03:00
*/
2014-11-11 17:13:00 +03:00
2014-11-05 17:15:07 +03:00
/* Activate spare snapshot once it is a complete LV */
if ( ! lv_active_change ( cmd , origin_lv , lp - > activate , 1 ) ) {
log_error ( " Failed to activate sparce volume %s. " ,
display_lvname ( origin_lv ) ) ;
return NULL ;
}
} else if ( ! lv_update_and_reload ( origin_lv ) ) {
log_error ( " Aborting. Manual intervention required. " ) ;
return NULL ; /* FIXME: revert */
}
2009-07-26 06:33:35 +04:00
}
2011-01-24 17:19:05 +03:00
out :
2011-09-06 04:26:42 +04:00
return lv ;
2009-07-26 06:33:35 +04:00
deactivate_and_revert_new_lv :
if ( ! deactivate_lv ( cmd , lv ) ) {
2014-11-05 17:15:07 +03:00
log_error ( " Unable to deactivate failed new LV %s. "
" Manual intervention required. " , display_lvname ( lv ) ) ;
2011-09-06 04:26:42 +04:00
return NULL ;
2009-07-26 06:33:35 +04:00
}
revert_new_lv :
2017-07-07 22:42:25 +03:00
lockd_lv ( cmd , lv , " un " , LDLV_PERSISTENT ) ;
lockd_free_lv ( vg - > cmd , vg , lp - > lv_name , & lv - > lvid . id [ 1 ] , lv - > lock_args ) ;
2015-03-05 23:00:44 +03:00
2009-07-26 06:33:35 +04:00
/* FIXME Better to revert to backup of metadata? */
if ( ! lv_remove ( lv ) | | ! vg_write ( vg ) | | ! vg_commit ( vg ) )
log_error ( " Manual intervention may be required to remove "
" abandoned LV(s) before retrying. " ) ;
else
backup ( vg ) ;
2011-09-06 04:26:42 +04:00
return NULL ;
2009-07-26 06:33:35 +04:00
}
2012-11-13 13:49:32 +04:00
struct logical_volume * lv_create_single ( struct volume_group * vg ,
struct lvcreate_params * lp )
2011-09-06 04:26:42 +04:00
{
2014-10-24 17:26:41 +04:00
const struct segment_type * segtype ;
2011-09-06 04:26:42 +04:00
struct logical_volume * lv ;
2014-10-04 01:51:54 +04:00
/* Create pool first if necessary */
2014-10-24 17:26:41 +04:00
if ( lp - > create_pool & & ! seg_is_pool ( lp ) ) {
segtype = lp - > segtype ;
if ( seg_is_thin_volume ( lp ) ) {
2015-09-22 21:04:12 +03:00
if ( ! ( lp - > segtype = get_segtype_from_string ( vg - > cmd , SEG_TYPE_NAME_THIN_POOL ) ) )
2014-10-07 17:41:54 +04:00
return_NULL ;
2014-10-07 12:43:47 +04:00
if ( ! ( lv = _lv_create_an_lv ( vg , lp , lp - > pool_name ) ) )
2014-10-04 01:51:54 +04:00
return_NULL ;
2014-10-24 17:26:41 +04:00
} else if ( seg_is_cache ( lp ) ) {
if ( ! lp - > origin_name ) {
/* Until we have --pooldatasize we are lost */
log_error ( INTERNAL_ERROR " Unsupported creation of cache and cache pool volume. " ) ;
return NULL ;
}
/* origin_name is defined -> creates cache LV with new cache pool */
2015-09-22 21:04:12 +03:00
if ( ! ( lp - > segtype = get_segtype_from_string ( vg - > cmd , SEG_TYPE_NAME_CACHE_POOL ) ) )
2014-10-07 17:41:54 +04:00
return_NULL ;
2014-10-07 12:43:47 +04:00
if ( ! ( lv = _lv_create_an_lv ( vg , lp , lp - > pool_name ) ) )
2014-10-04 01:51:54 +04:00
return_NULL ;
2016-05-06 00:12:30 +03:00
if ( ! lv_is_cache ( lv ) ) {
log_error ( INTERNAL_ERROR " Logical volume is not cache %s. " ,
display_lvname ( lv ) ) ;
return NULL ;
2014-10-04 01:51:54 +04:00
}
2016-05-06 00:12:30 +03:00
/* Convertion via lvcreate */
log_print_unless_silent ( " Logical volume %s is now cached. " ,
display_lvname ( lv ) ) ;
return lv ;
2014-10-24 17:26:41 +04:00
} else {
log_error ( INTERNAL_ERROR " Creation of pool for unsupported segment type %s. " ,
lp - > segtype - > name ) ;
2014-10-04 01:51:54 +04:00
return NULL ;
}
2014-10-24 17:26:41 +04:00
lp - > pool_name = lv - > name ;
lp - > segtype = segtype ;
2011-09-06 04:26:42 +04:00
}
if ( ! ( lv = _lv_create_an_lv ( vg , lp , lp - > lv_name ) ) )
2014-10-04 01:51:54 +04:00
return_NULL ;
2011-09-06 04:26:42 +04:00
2014-10-20 23:53:48 +04:00
if ( lp - > temporary )
log_verbose ( " Temporary logical volume \" %s \" created. " , lv - > name ) ;
else
log_print_unless_silent ( " Logical volume \" %s \" created. " , lv - > name ) ;
2011-09-06 04:26:42 +04:00
2012-11-13 13:49:32 +04:00
return lv ;
2011-09-06 04:26:42 +04:00
}