2003-09-18 00:35:57 +04:00
/*
2004-03-30 23:35:44 +04:00
* Copyright ( C ) 2003 - 2004 Sistina Software , Inc . All rights reserved .
2012-02-08 16:52:58 +04:00
* Copyright ( C ) 2004 - 2012 Red Hat , Inc . All rights reserved .
2003-09-18 00:35:57 +04:00
*
2004-03-30 23:35:44 +04:00
* This file is part of LVM2 .
*
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
2007-08-21 00:55:30 +04:00
* of the GNU Lesser General Public License v .2 .1 .
2004-03-30 23:35:44 +04:00
*
2007-08-21 00:55:30 +04:00
* You should have received a copy of the GNU Lesser General Public License
2004-03-30 23:35:44 +04:00
* along with this program ; if not , write to the Free Software Foundation ,
2016-01-21 13:49:46 +03:00
* Inc . , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA
2003-09-18 00:35:57 +04:00
*/
# include "lib.h"
# include "str_list.h"
2008-11-04 01:14:30 +03:00
struct dm_list * str_list_create ( struct dm_pool * mem )
2003-10-16 00:04:29 +04:00
{
2008-11-04 01:14:30 +03:00
struct dm_list * sl ;
2003-10-16 00:04:29 +04:00
2015-11-11 18:09:39 +03:00
if ( ! ( sl = dm_pool_alloc ( mem , sizeof ( struct dm_list ) ) ) ) {
2009-07-27 15:00:17 +04:00
log_errno ( ENOMEM , " str_list allocation failed " ) ;
return NULL ;
}
2003-10-16 00:04:29 +04:00
2008-11-04 01:14:30 +03:00
dm_list_init ( sl ) ;
2003-10-16 00:04:29 +04:00
return sl ;
}
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
static int _str_list_add_no_dup_check ( struct dm_pool * mem , struct dm_list * sll , const char * str , int as_first )
2003-09-18 00:35:57 +04:00
{
2014-05-29 11:41:03 +04:00
struct dm_str_list * sln ;
2003-09-18 00:35:57 +04:00
2008-01-30 16:19:47 +03:00
if ( ! str )
return_0 ;
2003-09-18 00:35:57 +04:00
2015-11-11 18:09:39 +03:00
if ( ! ( sln = dm_pool_alloc ( mem , sizeof ( * sln ) ) ) )
2008-01-30 16:19:47 +03:00
return_0 ;
2003-09-18 00:35:57 +04:00
sln - > str = str ;
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
if ( as_first )
dm_list_add_h ( sll , & sln - > list ) ;
else
dm_list_add ( sll , & sln - > list ) ;
2003-09-18 00:35:57 +04:00
return 1 ;
}
2003-10-16 00:01:12 +04:00
cleanup: consolidate lv_layout and lv_role reporting
This patch makes the keyword combinations found in "lv_layout" and
"lv_role" much more understandable - there were some ambiguities
for some of the combinations which lead to confusion before.
Now, the scheme used is:
LAYOUTS ("how the LV is laid out"):
===================================
[linear] (all segments have number of stripes = 1)
[striped] (all segments have number of stripes > 1)
[linear,striped] (mixed linear and striped)
raid (raid layout always reported together with raid level, raid layout == image + metadata LVs underneath that make up raid LV)
[raid,raid1]
[raid,raid10]
[raid,raid4]
[raid,raid5] (exact sublayout not specified during creation - default one used - raid5_ls)
[raid,raid5,raid5_ls]
[raid,raid5,raid6_rs]
[raid,raid5,raid5_la]
[raid,raid5,raid5_ra]
[raid6,raid] (exact sublayout not specified during creation - default one used - raid6_zr)
[raid,raid6,raid6_zr]
[raid,raid6,raid6_nc]
[raid,raid6,raid6_ns]
[mirror] (mirror layout == log + image LVs underneath that make up mirror LV)
thin (thin layout always reported together with sublayout)
[thin,sparse] (thin layout == allocated out of thin pool)
[thin,pool] (thin pool layout == data + metadata volumes underneath that make up thin pool LV, not supposed to be used for direct use!!!)
[cache] (cache layout == allocated out of cache pool in conjunction with cache origin)
[cache,pool] (cache pool layout == data + metadata volumes underneath that make up cache pool LV, not supposed to be used for direct use!!!)
[virtual] (virtual layout == not hitting disk underneath, currently this layout denotes only 'zero' device used for origin,thickorigin role)
[unknown] (either error state or missing recognition for such layout)
ROLES ("what's the purpose or use of the LV - what is its role"):
=================================================================
- each LV has either of these two roles at least: [public] (public LV that users may use freely to write their data to)
[public] (public LV that users may use freely to write their data to)
[private] (private LV that LVM maintains; not supposed to be directly used by user to write his data to)
- and then some special-purpose roles in addition to that:
[origin,thickorigin] (origin for thick-style snapshot; "thick" as opposed to "thin")
[origin,multithickorigin] (there are more than 2 thick-style snapshots for this origin)
[origin,thinorigin] (origin for thin snapshot)
[origin,multithinorigin] (there are more than 2 thin snapshots for this origin)
[origin,extorigin] (external origin for thin snapshot)
[origin,multiextoriginl (there are more than 2 thin snapshots using this external origin)
[origin,cacheorigin] (cache origin)
[snapshot,thicksnapshot] (thick-style snapshot; "thick" as opposed to "thin")
[snapshot,thinsnapshot] (thin-style snapshot)
[raid,metadata] (raid metadata LV)
[raid,image] (raid image LV)
[mirror,image] (mirror image LV)
[mirror,log] (mirror log LV)
[pvmove] (pvmove LV)
[thin,pool,data] (thin pool data LV)
[thin,pool,metadata] (thin pool metadata LV)
[cache,pool,data] (cache pool data LV)
[cache,pool,metadata] (cache pool metadata LV)
[pool,spare] (pool spare LV - common role of LV that makes it used for both thin and cache repairs)
2014-08-25 12:02:32 +04:00
int str_list_add_no_dup_check ( struct dm_pool * mem , struct dm_list * sll , const char * str )
{
return _str_list_add_no_dup_check ( mem , sll , str , 0 ) ;
}
int str_list_add_h_no_dup_check ( struct dm_pool * mem , struct dm_list * sll , const char * str )
{
return _str_list_add_no_dup_check ( mem , sll , str , 1 ) ;
}
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
int str_list_add ( struct dm_pool * mem , struct dm_list * sll , const char * str )
{
if ( ! str )
return_0 ;
/* Already in list? */
if ( str_list_match_item ( sll , str ) )
return 1 ;
return str_list_add_no_dup_check ( mem , sll , str ) ;
}
2015-03-26 21:30:37 +03:00
/* Add contents of sll2 to sll */
int str_list_add_list ( struct dm_pool * mem , struct dm_list * sll , struct dm_list * sll2 )
{
struct dm_str_list * sl ;
if ( ! sll2 )
return_0 ;
dm_list_iterate_items ( sl , sll2 )
if ( ! str_list_add ( mem , sll , sl - > str ) )
return_0 ;
return 1 ;
}
2012-02-08 16:52:58 +04:00
void str_list_del ( struct dm_list * sll , const char * str )
2003-10-16 00:01:12 +04:00
{
2008-11-04 01:14:30 +03:00
struct dm_list * slh , * slht ;
2003-10-16 00:01:12 +04:00
2012-02-08 16:52:58 +04:00
dm_list_iterate_safe ( slh , slht , sll )
2014-05-29 11:41:03 +04:00
if ( ! strcmp ( str , dm_list_item ( slh , struct dm_str_list ) - > str ) )
2008-11-04 01:14:30 +03:00
dm_list_del ( slh ) ;
2003-10-16 00:01:12 +04:00
}
2008-11-04 01:14:30 +03:00
int str_list_dup ( struct dm_pool * mem , struct dm_list * sllnew ,
const struct dm_list * sllold )
2004-03-08 18:23:01 +03:00
{
2014-05-29 11:41:03 +04:00
struct dm_str_list * sl ;
2004-03-08 18:23:01 +03:00
2008-11-04 01:14:30 +03:00
dm_list_init ( sllnew ) ;
2004-03-08 18:23:01 +03:00
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( sl , sllold ) {
2008-01-30 16:19:47 +03:00
if ( ! str_list_add ( mem , sllnew , dm_pool_strdup ( mem , sl - > str ) ) )
return_0 ;
2004-03-08 18:23:01 +03:00
}
return 1 ;
}
/*
* Is item on list ?
*/
2008-11-04 01:14:30 +03:00
int str_list_match_item ( const struct dm_list * sll , const char * str )
2003-10-16 00:04:29 +04:00
{
2014-05-29 11:41:03 +04:00
struct dm_str_list * sl ;
2003-10-16 00:04:29 +04:00
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( sl , sll )
2003-10-16 00:04:29 +04:00
if ( ! strcmp ( str , sl - > str ) )
return 1 ;
return 0 ;
}
2004-03-08 18:23:01 +03:00
/*
* Is at least one item on both lists ?
2010-11-09 15:34:40 +03:00
* If tag_matched is non - NULL , it is set to the tag that matched .
2004-03-08 18:23:01 +03:00
*/
2010-11-11 15:32:33 +03:00
int str_list_match_list ( const struct dm_list * sll , const struct dm_list * sll2 , const char * * tag_matched )
2003-10-16 00:04:29 +04:00
{
2014-05-29 11:41:03 +04:00
struct dm_str_list * sl ;
2003-10-16 00:04:29 +04:00
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( sl , sll )
2010-11-09 15:34:40 +03:00
if ( str_list_match_item ( sll2 , sl - > str ) ) {
if ( tag_matched )
* tag_matched = sl - > str ;
return 1 ;
}
2003-10-16 00:04:29 +04:00
return 0 ;
}
2004-03-08 18:23:01 +03:00
/*
* Do both lists contain the same set of items ?
*/
2008-11-04 01:14:30 +03:00
int str_list_lists_equal ( const struct dm_list * sll , const struct dm_list * sll2 )
2004-03-08 18:23:01 +03:00
{
2014-05-29 11:41:03 +04:00
struct dm_str_list * sl ;
2004-03-08 18:23:01 +03:00
2008-11-04 01:14:30 +03:00
if ( dm_list_size ( sll ) ! = dm_list_size ( sll2 ) )
2004-03-08 18:23:01 +03:00
return 0 ;
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( sl , sll )
2004-03-08 18:23:01 +03:00
if ( ! str_list_match_item ( sll2 , sl - > str ) )
return 0 ;
return 1 ;
}
2015-10-20 17:01:10 +03:00
char * str_list_to_str ( struct dm_pool * mem , const struct dm_list * list ,
const char * delim )
{
size_t delim_len = strlen ( delim ) ;
unsigned list_size = dm_list_size ( list ) ;
struct dm_str_list * sl ;
char * str , * p ;
size_t len = 0 ;
unsigned i = 0 ;
dm_list_iterate_items ( sl , list )
len + = strlen ( sl - > str ) ;
if ( list_size > 1 )
len + = ( ( list_size - 1 ) * delim_len ) ;
2015-11-11 18:09:39 +03:00
str = dm_pool_alloc ( mem , len + 1 ) ;
2015-10-20 17:01:10 +03:00
if ( ! str ) {
log_error ( " str_list_to_str: string allocation failed. " ) ;
return NULL ;
}
str [ len ] = ' \0 ' ;
p = str ;
dm_list_iterate_items ( sl , list ) {
len = strlen ( sl - > str ) ;
memcpy ( p , sl - > str , len ) ;
p + = len ;
if ( + + i ! = list_size ) {
memcpy ( p , delim , delim_len ) ;
p + = delim_len ;
}
}
return str ;
}
struct dm_list * str_to_str_list ( struct dm_pool * mem , const char * str ,
const char * delim , int ignore_multiple_delim )
{
size_t delim_len = strlen ( delim ) ;
struct dm_list * list ;
const char * p1 , * p2 , * next ;
char * str_item ;
size_t len ;
if ( ! ( list = str_list_create ( mem ) ) ) {
log_error ( " str_to_str_list: string list allocation failed. " ) ;
return NULL ;
}
p1 = p2 = str ;
while ( * p1 ) {
if ( ! ( p2 = strstr ( p1 , delim ) ) )
next = p2 = str + strlen ( str ) ;
else
next = p2 + delim_len ;
len = p2 - p1 ;
2015-11-11 18:09:39 +03:00
str_item = dm_pool_alloc ( mem , len + 1 ) ;
2015-10-20 17:01:10 +03:00
if ( ! str_item ) {
log_error ( " str_to_str_list: string list item allocation failed. " ) ;
goto bad ;
}
memcpy ( str_item , p1 , len ) ;
str_item [ len ] = ' \0 ' ;
if ( ! str_list_add_no_dup_check ( mem , list , str_item ) )
goto_bad ;
if ( ignore_multiple_delim ) {
while ( ! strncmp ( next , delim , delim_len ) )
next + = delim_len ;
}
p1 = next ;
}
return list ;
bad :
2015-11-11 22:45:53 +03:00
dm_pool_free ( mem , list ) ;
2015-10-20 17:01:10 +03:00
return NULL ;
}