2002-12-12 23:55:49 +03:00
/*
2004-03-30 23:35:44 +04:00
* Copyright ( C ) 2002 - 2004 Sistina Software , Inc . All rights reserved .
2013-02-05 16:59:15 +04:00
* Copyright ( C ) 2004 - 2013 Red Hat , Inc . All rights reserved .
2002-12-12 23:55:49 +03:00
*
2004-03-30 23:35:44 +04:00
* This file is part of LVM2 .
2002-12-12 23:55:49 +03:00
*
2004-03-30 23:35:44 +04:00
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
2007-08-21 00:55:30 +04:00
* of the GNU Lesser General Public License v .2 .1 .
2002-12-12 23:55:49 +03:00
*
2007-08-21 00:55:30 +04:00
* You should have received a copy of the GNU Lesser General Public License
2004-03-30 23:35:44 +04:00
* along with this program ; if not , write to the Free Software Foundation ,
* Inc . , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
2002-12-12 23:55:49 +03:00
*/
# include "lib.h"
# include "metadata.h"
# include "report.h"
# include "toolcontext.h"
# include "lvm-string.h"
# include "display.h"
# include "activate.h"
2004-09-16 22:40:56 +04:00
# include "segtype.h"
2007-01-16 21:06:12 +03:00
# include "lvmcache.h"
2013-09-18 04:09:15 +04:00
# include "device-types.h"
2014-05-29 11:41:36 +04:00
# include "str_list.h"
2002-12-12 23:55:49 +03:00
2010-01-07 17:37:11 +03:00
# include <stddef.h> /* offsetof() */
2014-07-02 11:45:53 +04:00
struct lv_with_info {
struct logical_volume * lv ;
struct lvinfo * info ;
} ;
2007-01-16 21:06:12 +03:00
struct lvm_report_object {
struct volume_group * vg ;
2014-07-02 11:45:53 +04:00
struct lv_with_info * lvi ;
2007-01-16 21:06:12 +03:00
struct physical_volume * pv ;
struct lv_segment * seg ;
struct pv_segment * pvseg ;
2013-07-29 21:07:11 +04:00
struct label * label ;
2007-01-16 21:06:12 +03:00
} ;
2014-07-08 14:40:45 +04:00
/*
* Enum for field_num index to use in per - field reserved value definition .
* Each field is represented by enum value with name " field_<id> " where < id >
* is the field_id of the field as registered in columns . h .
*/
report: define reserved values/synonyms for some attribute fields
All binary attr fields have synonyms so selection criteria can use
either 0/1 or words to match against the field value (base type
for these binary fields is numeric one - DM_REPORT_FIELD_TYPE_NUMBER
so words are registered as reserved values):
pv_allocatable - "allocatable"
pv_exported - "exported"
pv_missing - "missing"
vg_extendable - "extendable"
vg_exported - "exported"
vg_partial - "partial"
vg_clustered - "clustered"
lv_initial_image_sync - "initial image sync", "sync"
lv_image_synced_names - "image synced", "synced"
lv_merging_names - "merging"
lv_converting_names - "converting"
lv_allocation_locked - "allocation locked", "locked"
lv_fixed_minor - "fixed minor", "fixed"
lv_merge_failed - "merge failed", "failed"
For example, these three are all equivalent:
$ lvs -o name,fixed_minor -S 'fixed_minor=fixed'
LV FixMin
lvol8 fixed minor
$ lvs -o name,fixed_minor -S 'fixed_minor="fixed minor"'
LV FixMin
lvol8 fixed minor
$ lvs -o name,fixed_minor -S 'fixed_minor=1'
LV FixMin
lvol8 fixed minor
The same with binary output - it has no effect on this functionality:
$ lvs -o name,fixed_minor --binary -S 'fixed_minor=fixed'
LV FixMin
lvol8 1
$ lvs -o name,fixed_minor --binary -S 'fixed_minor="fixed
minor"'
LV FixMin
lvol8 1
[1] f20/~ # lvs -o name,fixed_minor --binary -S 'fixed_minor=1'
LV FixMin
lvol8 1
2014-07-04 14:08:52 +04:00
# define FIELD(type, strct, sorttype, head, field_name, width, func, id, desc, writeable) field_ ## id,
enum {
# include "columns.h"
} ;
# undef FIELD
2013-06-15 04:24:16 +04:00
static const uint64_t _zero64 = UINT64_C ( 0 ) ;
2014-07-02 13:09:14 +04:00
static const uint64_t _one64 = UINT64_C ( 1 ) ;
2014-07-08 14:40:45 +04:00
static const char const _str_zero [ ] = " 0 " ;
static const char const _str_one [ ] = " 1 " ;
2014-07-09 17:10:43 +04:00
static const char const _str_no [ ] = " no " ;
static const char const _str_yes [ ] = " yes " ;
2014-07-08 14:40:45 +04:00
static const char const _str_unknown [ ] = " unknown " ;
2007-11-09 19:51:54 +03:00
report: select: add support for reserved value recognition in report selection string - add struct dm_report_reserved_value
Make dm_report_init_with_selection to accept an argument with an
array of reserved values where each element contains a triple:
{dm report field type, reserved value, array of strings representing this value}
When the selection is parsed, we always check whether a string
representation of some reserved value is not hit and if it is,
we use the reserved value assigned for this string instead of
trying to parse it as a value of certain field type.
This makes it possible to define selections like:
... --select lv_major=undefined (or -1 or unknown or undef or whatever string representations are registered for this reserved value in the future)
... --select lv_read_ahead=auto
... --select vg_mda_copies=unmanaged
With this, each time the field value of certain type is hit
and when we compare it with the selection, we use the proper
value for comparison.
For now, register these reserved values that are used at the moment
(also more descriptive names are used for the values):
const uint64_t _reserved_number_undef_64 = UINT64_MAX;
const uint64_t _reserved_number_unmanaged_64 = UINT64_MAX - 1;
const uint64_t _reserved_size_auto_64 = UINT64_MAX;
{
{DM_REPORT_FIELD_TYPE_NUMBER, _reserved_number_undef_64, {"-1", "undefined", "undef", "unknown", NULL}},
{DM_REPORT_FIELD_TYPE_NUMBER, _reserved_number_unmanaged_64, {"unmanaged", NULL}},
{DM_REPORT_FIELD_TYPE_SIZE, _reserved_size_auto_64, {"auto", NULL}},
NULL
}
Same reserved value of different field types do not collide.
All arrays are null-terminated.
The list of reserved values is automatically displayed within
selection help output:
Selection operands
------------------
...
Reserved values
---------------
-1, undefined, undef, unknown - Reserved value for undefined numeric value. [number]
unmanaged - Reserved value for unmanaged number of metadata copies in VG. [number]
auto - Reserved value for size that is automatically calculated. [size]
Selection operators
-------------------
...
2014-05-30 17:02:21 +04:00
/*
* 32 bit signed is casted to 64 bit unsigned in dm_report_field internally !
* So when stored in the struct , the _reserved_number_undef_32 is actually
* equal to _reserved_number_undef_64 .
*/
static const int32_t _reserved_number_undef_32 = INT32_C ( - 1 ) ;
2014-07-08 14:40:45 +04:00
/*
* Reserved values and their assigned names .
* The first name is the one that is also used for reporting .
* All names listed are synonyms recognized in selection criteria .
* For binary - based values we map all reserved names listed onto value 1 , blank onto value 0.
*
2014-07-10 15:37:26 +04:00
* TYPE_RESERVED_VALUE ( type , reserved_value_id , description , value , reserved name , . . . )
* FIELD_RESERVED_VALUE ( field_id , reserved_value_id , description , value , reserved name , . . . )
* FIELD_RESERVED_BINARY_VALUE ( field_id , reserved_value_id , description , reserved name for 1 , . . . )
2014-07-08 14:40:45 +04:00
*
2014-07-10 11:22:45 +04:00
* Note : FIELD_RESERVED_BINARY_VALUE creates :
* - ' reserved_value_id_y ' ( for 1 )
* - ' reserved_value_id_n ' ( for 0 )
2014-07-08 14:40:45 +04:00
*/
2014-07-10 13:54:37 +04:00
# define RESERVED(id) _reserved_ ## id
2014-07-10 11:22:45 +04:00
# define FIRST_NAME(id) _reserved_ ## id ## _names[0]
2014-07-10 13:54:37 +04:00
# define NUM uint64_t
2014-07-10 15:37:26 +04:00
# define TYPE_RESERVED_VALUE(type, id, desc, value, ...) \
2014-07-10 11:22:45 +04:00
static const char * _reserved_ # # id # # _names [ ] = { __VA_ARGS__ , NULL } ; \
static const type _reserved_ # # id = value ;
2014-07-10 13:54:37 +04:00
2014-07-10 15:37:26 +04:00
# define FIELD_RESERVED_VALUE(field_id, id, desc, value, ...) \
2014-07-10 11:22:45 +04:00
static const char * _reserved_ # # id # # _names [ ] = { __VA_ARGS__ , NULL } ; \
static const struct dm_report_field_reserved_value _reserved_ # # id = { field_ # # field_id , & value } ;
2014-07-10 13:54:37 +04:00
2014-07-10 15:37:26 +04:00
# define FIELD_RESERVED_BINARY_VALUE(field_id, id, desc, ...) \
FIELD_RESERVED_VALUE ( field_id , id # # _y , desc , _one64 , __VA_ARGS__ , _str_yes ) \
FIELD_RESERVED_VALUE ( field_id , id # # _n , desc , _zero64 , __VA_ARGS__ , _str_no )
2014-07-09 17:10:43 +04:00
2014-07-10 11:22:45 +04:00
# include "values.h"
2014-07-08 14:40:45 +04:00
2014-07-10 13:54:37 +04:00
# undef NUM
2014-07-10 11:22:45 +04:00
# undef TYPE_RESERVED_VALUE
# undef FIELD_RESERVED_VALUE
# undef FIELD_RESERVED_BINARY_VALUE
/*
* Create array of reserved values to be registered with reporting code via
* dm_report_init_with_selection function that initializes report with
* selection criteria . Selection code then recognizes these reserved values
* when parsing selection criteria .
2014-07-10 15:37:26 +04:00
*/
2014-07-10 13:54:37 +04:00
# define NUM DM_REPORT_FIELD_TYPE_NUMBER
2014-07-10 15:37:26 +04:00
# define TYPE_RESERVED_VALUE(type, id, desc, value, ...) {type, &_reserved_ ## id, _reserved_ ## id ## _names, desc},
2014-07-10 13:54:37 +04:00
2014-07-10 15:37:26 +04:00
# define FIELD_RESERVED_VALUE(field_id, id, desc, value, ...) {DM_REPORT_FIELD_TYPE_NONE, &_reserved_ ## id, _reserved_ ## id ## _names, desc},
2014-07-10 13:54:37 +04:00
2014-07-10 15:37:26 +04:00
# define FIELD_RESERVED_BINARY_VALUE(field_id, id, desc, ...) \
FIELD_RESERVED_VALUE ( field_id , id # # _y , desc , _one64 , __VA_ARGS__ ) \
FIELD_RESERVED_VALUE ( field_id , id # # _n , desc , _zero64 , __VA_ARGS__ )
2014-07-08 14:40:45 +04:00
report: select: add support for reserved value recognition in report selection string - add struct dm_report_reserved_value
Make dm_report_init_with_selection to accept an argument with an
array of reserved values where each element contains a triple:
{dm report field type, reserved value, array of strings representing this value}
When the selection is parsed, we always check whether a string
representation of some reserved value is not hit and if it is,
we use the reserved value assigned for this string instead of
trying to parse it as a value of certain field type.
This makes it possible to define selections like:
... --select lv_major=undefined (or -1 or unknown or undef or whatever string representations are registered for this reserved value in the future)
... --select lv_read_ahead=auto
... --select vg_mda_copies=unmanaged
With this, each time the field value of certain type is hit
and when we compare it with the selection, we use the proper
value for comparison.
For now, register these reserved values that are used at the moment
(also more descriptive names are used for the values):
const uint64_t _reserved_number_undef_64 = UINT64_MAX;
const uint64_t _reserved_number_unmanaged_64 = UINT64_MAX - 1;
const uint64_t _reserved_size_auto_64 = UINT64_MAX;
{
{DM_REPORT_FIELD_TYPE_NUMBER, _reserved_number_undef_64, {"-1", "undefined", "undef", "unknown", NULL}},
{DM_REPORT_FIELD_TYPE_NUMBER, _reserved_number_unmanaged_64, {"unmanaged", NULL}},
{DM_REPORT_FIELD_TYPE_SIZE, _reserved_size_auto_64, {"auto", NULL}},
NULL
}
Same reserved value of different field types do not collide.
All arrays are null-terminated.
The list of reserved values is automatically displayed within
selection help output:
Selection operands
------------------
...
Reserved values
---------------
-1, undefined, undef, unknown - Reserved value for undefined numeric value. [number]
unmanaged - Reserved value for unmanaged number of metadata copies in VG. [number]
auto - Reserved value for size that is automatically calculated. [size]
Selection operators
-------------------
...
2014-05-30 17:02:21 +04:00
static const struct dm_report_reserved_value _report_reserved_values [ ] = {
2014-07-10 15:37:26 +04:00
# include "values.h"
report: select: add support for reserved value recognition in report selection string - add struct dm_report_reserved_value
Make dm_report_init_with_selection to accept an argument with an
array of reserved values where each element contains a triple:
{dm report field type, reserved value, array of strings representing this value}
When the selection is parsed, we always check whether a string
representation of some reserved value is not hit and if it is,
we use the reserved value assigned for this string instead of
trying to parse it as a value of certain field type.
This makes it possible to define selections like:
... --select lv_major=undefined (or -1 or unknown or undef or whatever string representations are registered for this reserved value in the future)
... --select lv_read_ahead=auto
... --select vg_mda_copies=unmanaged
With this, each time the field value of certain type is hit
and when we compare it with the selection, we use the proper
value for comparison.
For now, register these reserved values that are used at the moment
(also more descriptive names are used for the values):
const uint64_t _reserved_number_undef_64 = UINT64_MAX;
const uint64_t _reserved_number_unmanaged_64 = UINT64_MAX - 1;
const uint64_t _reserved_size_auto_64 = UINT64_MAX;
{
{DM_REPORT_FIELD_TYPE_NUMBER, _reserved_number_undef_64, {"-1", "undefined", "undef", "unknown", NULL}},
{DM_REPORT_FIELD_TYPE_NUMBER, _reserved_number_unmanaged_64, {"unmanaged", NULL}},
{DM_REPORT_FIELD_TYPE_SIZE, _reserved_size_auto_64, {"auto", NULL}},
NULL
}
Same reserved value of different field types do not collide.
All arrays are null-terminated.
The list of reserved values is automatically displayed within
selection help output:
Selection operands
------------------
...
Reserved values
---------------
-1, undefined, undef, unknown - Reserved value for undefined numeric value. [number]
unmanaged - Reserved value for unmanaged number of metadata copies in VG. [number]
auto - Reserved value for size that is automatically calculated. [size]
Selection operators
-------------------
...
2014-05-30 17:02:21 +04:00
{ 0 , NULL , NULL }
} ;
2014-07-10 13:54:37 +04:00
# undef NUM
2014-07-10 11:22:45 +04:00
# undef TYPE_RESERVED_VALUE_REG
# undef FIELD_RESERVED_VALUE_REG
# undef FIELD_RESERVED_BINARY_VALUE_REG
2013-09-23 12:02:01 +04:00
static int _field_set_value ( struct dm_report_field * field , const void * data , const void * sort )
{
dm_report_field_set_value ( field , data , sort ) ;
return 1 ;
}
2014-07-10 18:18:45 +04:00
static int _field_set_string_list ( struct dm_report * rh , struct dm_report_field * field ,
2014-08-25 12:05:27 +04:00
const struct dm_list * list , void * private , int sorted )
2014-07-10 18:18:45 +04:00
{
struct cmd_context * cmd = ( struct cmd_context * ) private ;
2014-08-25 12:05:27 +04:00
return sorted ? dm_report_field_string_list ( rh , field , list , cmd - > report_list_item_separator )
: dm_report_field_string_list_unsorted ( rh , field , list , cmd - > report_list_item_separator ) ;
2014-07-10 18:18:45 +04:00
}
2002-12-12 23:55:49 +03:00
/*
* Data - munging functions to prepare each data type for display and sorting
*/
2014-07-08 14:40:45 +04:00
/*
* Display either " 0 " / " 1 " or " " / " word " based on bin_value ,
* cmd - > report_binary_values_as_numeric selects the mode to use .
*/
static int _binary_disp ( struct dm_report * rh , struct dm_pool * mem __attribute__ ( ( unused ) ) ,
struct dm_report_field * field , int bin_value , const char * word ,
void * private )
{
const struct cmd_context * cmd = ( const struct cmd_context * ) private ;
if ( cmd - > report_binary_values_as_numeric )
/* "0"/"1" */
return _field_set_value ( field , bin_value ? _str_one : _str_zero , bin_value ? & _one64 : & _zero64 ) ;
else
/* blank/"word" */
return _field_set_value ( field , bin_value ? word : " " , bin_value ? & _one64 : & _zero64 ) ;
}
static int _binary_undef_disp ( struct dm_report * rh , struct dm_pool * mem __attribute__ ( ( unused ) ) ,
struct dm_report_field * field , void * private )
{
const struct cmd_context * cmd = ( const struct cmd_context * ) private ;
if ( cmd - > report_binary_values_as_numeric )
2014-07-10 13:54:37 +04:00
return _field_set_value ( field , FIRST_NAME ( number_undef_64 ) , & RESERVED ( number_undef_64 ) ) ;
2014-07-08 14:40:45 +04:00
else
2014-07-10 13:54:37 +04:00
return _field_set_value ( field , _str_unknown , & RESERVED ( number_undef_64 ) ) ;
2014-07-08 14:40:45 +04:00
}
2010-07-09 19:34:40 +04:00
static int _string_disp ( struct dm_report * rh , struct dm_pool * mem __attribute__ ( ( unused ) ) ,
2007-01-16 21:06:12 +03:00
struct dm_report_field * field ,
2010-07-09 19:34:40 +04:00
const void * data , void * private __attribute__ ( ( unused ) ) )
2002-12-12 23:55:49 +03:00
{
2011-02-18 17:47:28 +03:00
return dm_report_field_string ( rh , field , ( const char * const * ) data ) ;
2002-12-12 23:55:49 +03:00
}
2013-09-18 04:09:15 +04:00
static int _chars_disp ( struct dm_report * rh , struct dm_pool * mem __attribute__ ( ( unused ) ) ,
struct dm_report_field * field ,
const void * data , void * private __attribute__ ( ( unused ) ) )
{
return dm_report_field_string ( rh , field , ( const char * const * ) & data ) ;
}
2010-07-09 19:34:40 +04:00
static int _dev_name_disp ( struct dm_report * rh , struct dm_pool * mem __attribute__ ( ( unused ) ) ,
2007-01-16 21:06:12 +03:00
struct dm_report_field * field ,
2010-07-09 19:34:40 +04:00
const void * data , void * private __attribute__ ( ( unused ) ) )
2002-12-12 23:55:49 +03:00
{
2010-02-15 21:34:00 +03:00
const char * name = dev_name ( * ( const struct device * const * ) data ) ;
2002-12-12 23:55:49 +03:00
2007-01-22 18:07:21 +03:00
return dm_report_field_string ( rh , field , & name ) ;
2002-12-12 23:55:49 +03:00
}
2011-04-12 16:24:29 +04:00
static int _devices_disp ( struct dm_report * rh __attribute__ ( ( unused ) ) , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private __attribute__ ( ( unused ) ) )
2004-05-05 14:58:44 +04:00
{
2011-04-12 16:24:29 +04:00
char * str ;
2004-05-05 14:58:44 +04:00
2013-09-23 12:17:50 +04:00
if ( ! ( str = lvseg_devices ( mem , ( const struct lv_segment * ) data ) ) )
return_0 ;
2004-05-05 14:58:44 +04:00
2013-09-23 12:17:50 +04:00
return _field_set_value ( field , str , NULL ) ;
2004-05-05 14:58:44 +04:00
}
2006-10-03 21:55:20 +04:00
2010-07-09 19:34:40 +04:00
static int _peranges_disp ( struct dm_report * rh __attribute__ ( ( unused ) ) , struct dm_pool * mem ,
2007-12-15 00:53:02 +03:00
struct dm_report_field * field ,
2010-07-09 19:34:40 +04:00
const void * data , void * private __attribute__ ( ( unused ) ) )
2007-12-15 00:53:02 +03:00
{
2011-04-12 16:24:29 +04:00
char * str ;
2013-09-23 12:17:50 +04:00
if ( ! ( str = lvseg_seg_pe_ranges ( mem , ( const struct lv_segment * ) data ) ) )
return_0 ;
2011-04-12 16:24:29 +04:00
2013-09-23 12:17:50 +04:00
return _field_set_value ( field , str , NULL ) ;
2007-12-15 00:53:02 +03:00
}
2014-05-29 11:41:36 +04:00
static int _tags_disp ( struct dm_report * rh , struct dm_pool * mem ,
2007-01-16 21:06:12 +03:00
struct dm_report_field * field ,
2014-07-10 18:18:45 +04:00
const void * data , void * private )
2004-03-08 20:19:15 +03:00
{
2014-01-31 01:09:28 +04:00
const struct dm_list * tagsl = ( const struct dm_list * ) data ;
2004-03-08 20:19:15 +03:00
2014-08-25 12:05:27 +04:00
return _field_set_string_list ( rh , field , tagsl , private , 1 ) ;
2004-03-08 20:19:15 +03:00
}
2007-01-16 21:06:12 +03:00
static int _modules_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
2006-10-03 21:55:20 +04:00
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
2014-05-29 11:41:36 +04:00
struct dm_list * modules ;
if ( ! ( modules = str_list_create ( mem ) ) ) {
log_error ( " modules str_list allocation failed " ) ;
return 0 ;
}
2006-10-03 21:55:20 +04:00
2014-05-29 11:41:36 +04:00
if ( ! ( list_lv_modules ( mem , lv , modules ) ) )
2013-09-23 12:17:50 +04:00
return_0 ;
2006-10-03 21:55:20 +04:00
2014-08-25 12:05:27 +04:00
return _field_set_string_list ( rh , field , modules , private , 1 ) ;
2006-10-03 21:55:20 +04:00
}
2013-07-02 16:34:52 +04:00
static int _lvprofile_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
if ( lv - > profile )
return dm_report_field_string ( rh , field , & lv - > profile - > name ) ;
2013-09-23 12:17:50 +04:00
return _field_set_value ( field , " " , NULL ) ;
2013-07-02 16:34:52 +04:00
}
2007-01-16 21:06:12 +03:00
static int _vgfmt_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
2002-12-12 23:55:49 +03:00
{
2002-12-20 02:25:55 +03:00
const struct volume_group * vg = ( const struct volume_group * ) data ;
2002-12-12 23:55:49 +03:00
2013-09-23 12:17:50 +04:00
if ( vg - > fid )
return _string_disp ( rh , mem , field , & vg - > fid - > fmt - > name , private ) ;
2002-12-12 23:55:49 +03:00
2013-09-23 12:17:50 +04:00
return _field_set_value ( field , " " , NULL ) ;
2002-12-12 23:55:49 +03:00
}
2007-01-16 21:06:12 +03:00
static int _pvfmt_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
2002-12-12 23:55:49 +03:00
{
2013-07-29 18:00:40 +04:00
const struct label * l =
( const struct label * ) data ;
2002-12-12 23:55:49 +03:00
2013-08-28 16:07:26 +04:00
if ( ! l - > labeller | | ! l - > labeller - > fmt ) {
2013-07-29 18:00:40 +04:00
dm_report_field_set_value ( field , " " , NULL ) ;
return 1 ;
}
2002-12-12 23:55:49 +03:00
2013-07-29 18:00:40 +04:00
return _string_disp ( rh , mem , field , & l - > labeller - > fmt - > name , private ) ;
2002-12-12 23:55:49 +03:00
}
2010-07-09 19:34:40 +04:00
static int _lvkmaj_disp ( struct dm_report * rh , struct dm_pool * mem __attribute__ ( ( unused ) ) ,
2007-01-16 21:06:12 +03:00
struct dm_report_field * field ,
2010-07-09 19:34:40 +04:00
const void * data , void * private __attribute__ ( ( unused ) ) )
2004-07-04 02:07:52 +04:00
{
2014-07-02 16:31:39 +04:00
const struct lv_with_info * lvi = ( const struct lv_with_info * ) data ;
2004-07-04 02:07:52 +04:00
2014-07-02 16:31:39 +04:00
if ( lvi - > info & & lvi - > info - > exists & & lvi - > info - > major > = 0 )
return dm_report_field_int ( rh , field , & lvi - > info - > major ) ;
2004-07-04 02:07:52 +04:00
2014-07-10 13:54:37 +04:00
return dm_report_field_int32 ( rh , field , & RESERVED ( number_undef_32 ) ) ;
2004-07-04 02:07:52 +04:00
}
2010-07-09 19:34:40 +04:00
static int _lvkmin_disp ( struct dm_report * rh , struct dm_pool * mem __attribute__ ( ( unused ) ) ,
2007-01-16 21:06:12 +03:00
struct dm_report_field * field ,
2010-07-09 19:34:40 +04:00
const void * data , void * private __attribute__ ( ( unused ) ) )
2004-07-04 02:07:52 +04:00
{
2014-07-02 16:31:39 +04:00
const struct lv_with_info * lvi = ( const struct lv_with_info * ) data ;
2004-07-04 02:07:52 +04:00
2014-07-02 16:31:39 +04:00
if ( lvi - > info & & lvi - > info - > exists & & lvi - > info - > minor > = 0 )
return dm_report_field_int ( rh , field , & lvi - > info - > minor ) ;
2004-07-04 02:07:52 +04:00
2014-07-10 13:54:37 +04:00
return dm_report_field_int32 ( rh , field , & RESERVED ( number_undef_32 ) ) ;
2004-07-04 02:07:52 +04:00
}
2010-07-09 19:34:40 +04:00
static int _lvstatus_disp ( struct dm_report * rh __attribute__ ( ( unused ) ) , struct dm_pool * mem ,
2007-01-16 21:06:12 +03:00
struct dm_report_field * field ,
2010-07-09 19:34:40 +04:00
const void * data , void * private __attribute__ ( ( unused ) ) )
2002-12-12 23:55:49 +03:00
{
2002-12-20 02:25:55 +03:00
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
char * repstr ;
2002-12-12 23:55:49 +03:00
2010-09-30 17:52:55 +04:00
if ( ! ( repstr = lv_attr_dup ( mem , lv ) ) )
2013-09-23 12:17:50 +04:00
return_0 ;
2002-12-12 23:55:49 +03:00
2013-09-23 12:17:50 +04:00
return _field_set_value ( field , repstr , NULL ) ;
2002-12-12 23:55:49 +03:00
}
2010-07-09 19:34:40 +04:00
static int _pvstatus_disp ( struct dm_report * rh __attribute__ ( ( unused ) ) , struct dm_pool * mem ,
2007-01-16 21:06:12 +03:00
struct dm_report_field * field ,
2010-07-09 19:34:40 +04:00
const void * data , void * private __attribute__ ( ( unused ) ) )
2002-12-12 23:55:49 +03:00
{
2010-09-30 17:52:55 +04:00
const struct physical_volume * pv =
( const struct physical_volume * ) data ;
2002-12-20 02:25:55 +03:00
char * repstr ;
2002-12-12 23:55:49 +03:00
2010-09-30 17:52:55 +04:00
if ( ! ( repstr = pv_attr_dup ( mem , pv ) ) )
2013-09-23 12:17:50 +04:00
return_0 ;
2002-12-12 23:55:49 +03:00
2013-09-23 12:17:50 +04:00
return _field_set_value ( field , repstr , NULL ) ;
2002-12-12 23:55:49 +03:00
}
2010-07-09 19:34:40 +04:00
static int _vgstatus_disp ( struct dm_report * rh __attribute__ ( ( unused ) ) , struct dm_pool * mem ,
2007-01-16 21:06:12 +03:00
struct dm_report_field * field ,
2010-07-09 19:34:40 +04:00
const void * data , void * private __attribute__ ( ( unused ) ) )
2002-12-12 23:55:49 +03:00
{
2004-05-19 02:12:53 +04:00
const struct volume_group * vg = ( const struct volume_group * ) data ;
2002-12-20 02:25:55 +03:00
char * repstr ;
2002-12-12 23:55:49 +03:00
2010-09-30 17:52:55 +04:00
if ( ! ( repstr = vg_attr_dup ( mem , vg ) ) )
2013-09-23 12:17:50 +04:00
return_0 ;
2005-08-16 03:34:11 +04:00
2013-09-23 12:17:50 +04:00
return _field_set_value ( field , repstr , NULL ) ;
2002-12-12 23:55:49 +03:00
}
2010-07-09 19:34:40 +04:00
static int _segtype_disp ( struct dm_report * rh __attribute__ ( ( unused ) ) ,
struct dm_pool * mem __attribute__ ( ( unused ) ) ,
2007-01-16 21:06:12 +03:00
struct dm_report_field * field ,
2010-07-09 19:34:40 +04:00
const void * data , void * private __attribute__ ( ( unused ) ) )
2002-12-12 23:55:49 +03:00
{
2002-12-20 02:25:55 +03:00
const struct lv_segment * seg = ( const struct lv_segment * ) data ;
2010-11-17 23:08:14 +03:00
char * name ;
2011-03-05 15:14:00 +03:00
if ( ! ( name = lvseg_segtype_dup ( mem , seg ) ) ) {
log_error ( " Failed to get segtype. " ) ;
return 0 ;
}
2013-09-23 12:17:50 +04:00
return _field_set_value ( field , name , NULL ) ;
2002-12-12 23:55:49 +03:00
}
2010-07-09 19:34:40 +04:00
static int _loglv_disp ( struct dm_report * rh , struct dm_pool * mem __attribute__ ( ( unused ) ) ,
2007-01-16 21:06:12 +03:00
struct dm_report_field * field ,
2010-07-09 19:34:40 +04:00
const void * data , void * private __attribute__ ( ( unused ) ) )
2005-06-01 20:51:55 +04:00
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
2010-10-12 20:12:50 +04:00
const char * name ;
2005-06-01 20:51:55 +04:00
2010-10-12 20:12:50 +04:00
if ( ( name = lv_mirror_log_dup ( mem , lv ) ) )
return dm_report_field_string ( rh , field , & name ) ;
2005-06-01 20:51:55 +04:00
2013-09-23 12:17:50 +04:00
return _field_set_value ( field , " " , NULL ) ;
2005-06-01 20:51:55 +04:00
}
2007-01-16 21:06:12 +03:00
static int _lvname_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
2010-07-09 19:34:40 +04:00
const void * data , void * private __attribute__ ( ( unused ) ) )
2005-06-03 18:49:51 +04:00
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
2007-01-16 21:06:12 +03:00
char * repstr , * lvname ;
2005-06-03 18:49:51 +04:00
size_t len ;
2011-02-18 17:47:28 +03:00
if ( lv_is_visible ( lv ) )
return dm_report_field_string ( rh , field , & lv - > name ) ;
2005-06-03 18:49:51 +04:00
len = strlen ( lv - > name ) + 3 ;
2007-01-16 21:06:12 +03:00
if ( ! ( repstr = dm_pool_zalloc ( mem , len ) ) ) {
2005-10-17 03:03:59 +04:00
log_error ( " dm_pool_alloc failed " ) ;
2005-06-03 18:49:51 +04:00
return 0 ;
}
2006-08-21 16:54:53 +04:00
if ( dm_snprintf ( repstr , len , " [%s] " , lv - > name ) < 0 ) {
2005-06-03 18:49:51 +04:00
log_error ( " lvname snprintf failed " ) ;
return 0 ;
}
2007-01-16 21:06:12 +03:00
if ( ! ( lvname = dm_pool_strdup ( mem , lv - > name ) ) ) {
2005-10-17 03:03:59 +04:00
log_error ( " dm_pool_strdup failed " ) ;
2005-06-03 18:49:51 +04:00
return 0 ;
}
2013-09-23 12:17:50 +04:00
return _field_set_value ( field , repstr , lvname ) ;
2005-06-03 18:49:51 +04:00
}
2014-07-02 20:24:05 +04:00
static int _lvfullname_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private __attribute__ ( ( unused ) ) )
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
char * repstr ;
if ( ! ( repstr = lv_fullname_dup ( mem , lv ) ) )
return_0 ;
return _field_set_value ( field , repstr , NULL ) ;
}
2014-07-04 02:49:34 +04:00
static int _lvparent_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private __attribute__ ( ( unused ) ) )
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
char * repstr ;
if ( ! ( repstr = lv_parent_dup ( mem , lv ) ) )
return_0 ;
return _field_set_value ( field , repstr , NULL ) ;
}
2012-01-19 19:34:32 +04:00
static int _datalv_disp ( struct dm_report * rh , struct dm_pool * mem __attribute__ ( ( unused ) ) ,
struct dm_report_field * field ,
const void * data , void * private __attribute__ ( ( unused ) ) )
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
2014-02-05 19:44:37 +04:00
const struct lv_segment * seg = ( lv_is_thin_pool ( lv ) | | lv_is_cache_pool ( lv ) ) ? first_seg ( lv ) : NULL ;
2012-01-19 19:34:32 +04:00
2013-06-15 00:02:12 +04:00
if ( seg )
return _lvname_disp ( rh , mem , field , seg_lv ( seg , 0 ) , private ) ;
2012-01-19 19:34:32 +04:00
2013-09-23 12:17:50 +04:00
return _field_set_value ( field , " " , NULL ) ;
2012-01-19 19:34:32 +04:00
}
static int _metadatalv_disp ( struct dm_report * rh , struct dm_pool * mem __attribute__ ( ( unused ) ) ,
struct dm_report_field * field ,
const void * data , void * private __attribute__ ( ( unused ) ) )
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
2014-02-05 19:44:37 +04:00
const struct lv_segment * seg = ( lv_is_thin_pool ( lv ) | | lv_is_cache_pool ( lv ) ) ? first_seg ( lv ) : NULL ;
2012-01-19 19:34:32 +04:00
2013-06-15 00:02:12 +04:00
if ( seg )
return _lvname_disp ( rh , mem , field , seg - > metadata_lv , private ) ;
2012-01-19 19:34:32 +04:00
2013-09-23 12:17:50 +04:00
return _field_set_value ( field , " " , NULL ) ;
2012-01-19 19:34:32 +04:00
}
static int _poollv_disp ( struct dm_report * rh , struct dm_pool * mem __attribute__ ( ( unused ) ) ,
struct dm_report_field * field ,
const void * data , void * private __attribute__ ( ( unused ) ) )
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
2014-02-05 19:44:37 +04:00
struct lv_segment * seg = ( lv_is_thin_volume ( lv ) | | lv_is_cache ( lv ) ) ?
first_seg ( lv ) : NULL ;
2012-01-19 19:34:32 +04:00
2013-09-23 11:44:53 +04:00
if ( seg )
return _lvname_disp ( rh , mem , field , seg - > pool_lv , private ) ;
2012-01-19 19:34:32 +04:00
2013-09-23 12:17:50 +04:00
return _field_set_value ( field , " " , NULL ) ;
2012-01-19 19:34:32 +04:00
}
2010-06-23 16:32:08 +04:00
static int _lvpath_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
2010-07-09 19:34:40 +04:00
const void * data , void * private __attribute__ ( ( unused ) ) )
2010-06-23 16:32:08 +04:00
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
char * repstr ;
2010-10-12 20:11:34 +04:00
if ( ! ( repstr = lv_path_dup ( mem , lv ) ) )
2013-09-23 12:17:50 +04:00
return_0 ;
2014-07-02 20:24:05 +04:00
return _field_set_value ( field , repstr , NULL ) ;
}
static int _lvdmpath_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private __attribute__ ( ( unused ) ) )
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
char * repstr ;
if ( ! ( repstr = lv_dmpath_dup ( mem , lv ) ) )
return_0 ;
2010-06-23 16:32:08 +04:00
2013-09-23 12:17:50 +04:00
return _field_set_value ( field , repstr , NULL ) ;
2010-06-23 16:32:08 +04:00
}
2009-04-25 05:17:59 +04:00
static int _origin_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
2014-02-05 19:44:37 +04:00
const struct lv_segment * seg = first_seg ( lv ) ;
2009-04-25 05:17:59 +04:00
if ( lv_is_cow ( lv ) )
return _lvname_disp ( rh , mem , field , origin_from_cow ( lv ) , private ) ;
2014-02-05 19:44:37 +04:00
if ( lv_is_cache ( lv ) )
return _lvname_disp ( rh , mem , field , seg_lv ( seg , 0 ) , private ) ;
2011-11-07 15:03:47 +04:00
if ( lv_is_thin_volume ( lv ) & & first_seg ( lv ) - > origin )
return _lvname_disp ( rh , mem , field , first_seg ( lv ) - > origin , private ) ;
2013-01-15 18:16:16 +04:00
if ( lv_is_thin_volume ( lv ) & & first_seg ( lv ) - > external_lv )
return _lvname_disp ( rh , mem , field , first_seg ( lv ) - > external_lv , private ) ;
2013-09-23 12:17:50 +04:00
return _field_set_value ( field , " " , NULL ) ;
2009-04-25 05:17:59 +04:00
}
2010-07-09 19:34:40 +04:00
static int _movepv_disp ( struct dm_report * rh , struct dm_pool * mem __attribute__ ( ( unused ) ) ,
2007-01-16 21:06:12 +03:00
struct dm_report_field * field ,
2010-07-09 19:34:40 +04:00
const void * data , void * private __attribute__ ( ( unused ) ) )
2003-05-06 16:06:02 +04:00
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
const char * name ;
2013-09-23 12:17:50 +04:00
if ( ( name = lv_move_pv_dup ( mem , lv ) ) )
2007-01-22 18:07:21 +03:00
return dm_report_field_string ( rh , field , & name ) ;
2013-09-23 12:17:50 +04:00
return _field_set_value ( field , " " , NULL ) ;
2003-05-06 16:06:02 +04:00
}
2010-07-09 19:34:40 +04:00
static int _convertlv_disp ( struct dm_report * rh , struct dm_pool * mem __attribute__ ( ( unused ) ) ,
2008-01-10 21:35:51 +03:00
struct dm_report_field * field ,
2010-07-09 19:34:40 +04:00
const void * data , void * private __attribute__ ( ( unused ) ) )
2008-01-10 21:35:51 +03:00
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
2013-09-23 12:17:50 +04:00
const char * name ;
2008-01-10 21:35:51 +03:00
2013-09-23 12:17:50 +04:00
if ( ( name = lv_convert_lv_dup ( mem , lv ) ) )
2008-01-10 21:35:51 +03:00
return dm_report_field_string ( rh , field , & name ) ;
2013-09-23 12:17:50 +04:00
return _field_set_value ( field , " " , NULL ) ;
2008-01-10 21:35:51 +03:00
}
2010-07-09 19:34:40 +04:00
static int _size32_disp ( struct dm_report * rh __attribute__ ( ( unused ) ) , struct dm_pool * mem ,
2007-01-16 21:06:12 +03:00
struct dm_report_field * field ,
const void * data , void * private )
2002-12-12 23:55:49 +03:00
{
2002-12-20 02:25:55 +03:00
const uint32_t size = * ( const uint32_t * ) data ;
2007-01-16 21:06:12 +03:00
const char * disp , * repstr ;
2002-12-20 02:25:55 +03:00
uint64_t * sortval ;
2002-12-12 23:55:49 +03:00
2008-01-30 16:19:47 +03:00
if ( ! * ( disp = display_size_units ( private , ( uint64_t ) size ) ) )
return_0 ;
2002-12-12 23:55:49 +03:00
2007-01-16 21:06:12 +03:00
if ( ! ( repstr = dm_pool_strdup ( mem , disp ) ) ) {
2005-10-17 03:03:59 +04:00
log_error ( " dm_pool_strdup failed " ) ;
2002-12-12 23:55:49 +03:00
return 0 ;
}
2007-01-16 21:06:12 +03:00
if ( ! ( sortval = dm_pool_alloc ( mem , sizeof ( uint64_t ) ) ) ) {
2005-10-17 03:03:59 +04:00
log_error ( " dm_pool_alloc failed " ) ;
2002-12-12 23:55:49 +03:00
return 0 ;
}
2011-08-04 18:30:51 +04:00
* sortval = ( uint64_t ) size ;
2007-01-16 21:06:12 +03:00
2013-09-23 12:17:50 +04:00
return _field_set_value ( field , repstr , sortval ) ;
2002-12-12 23:55:49 +03:00
}
2010-07-09 19:34:40 +04:00
static int _size64_disp ( struct dm_report * rh __attribute__ ( ( unused ) ) ,
2007-08-22 18:38:18 +04:00
struct dm_pool * mem ,
2007-01-16 21:06:12 +03:00
struct dm_report_field * field ,
const void * data , void * private )
2002-12-12 23:55:49 +03:00
{
2002-12-20 02:25:55 +03:00
const uint64_t size = * ( const uint64_t * ) data ;
2007-01-16 21:06:12 +03:00
const char * disp , * repstr ;
2002-12-20 02:25:55 +03:00
uint64_t * sortval ;
2002-12-12 23:55:49 +03:00
2008-01-30 16:19:47 +03:00
if ( ! * ( disp = display_size_units ( private , size ) ) )
return_0 ;
2002-12-12 23:55:49 +03:00
2007-01-16 21:06:12 +03:00
if ( ! ( repstr = dm_pool_strdup ( mem , disp ) ) ) {
2005-10-17 03:03:59 +04:00
log_error ( " dm_pool_strdup failed " ) ;
2002-12-12 23:55:49 +03:00
return 0 ;
}
2007-01-16 21:06:12 +03:00
if ( ! ( sortval = dm_pool_alloc ( mem , sizeof ( uint64_t ) ) ) ) {
2005-10-17 03:03:59 +04:00
log_error ( " dm_pool_alloc failed " ) ;
2002-12-12 23:55:49 +03:00
return 0 ;
}
2002-12-20 02:25:55 +03:00
* sortval = size ;
2002-12-12 23:55:49 +03:00
2013-09-23 12:17:50 +04:00
return _field_set_value ( field , repstr , sortval ) ;
2002-12-12 23:55:49 +03:00
}
2012-01-19 19:34:32 +04:00
static int _uint32_disp ( struct dm_report * rh , struct dm_pool * mem __attribute__ ( ( unused ) ) ,
struct dm_report_field * field ,
const void * data , void * private __attribute__ ( ( unused ) ) )
{
return dm_report_field_uint32 ( rh , field , data ) ;
}
2013-09-18 04:09:15 +04:00
static int _int8_disp ( struct dm_report * rh , struct dm_pool * mem __attribute__ ( ( unused ) ) ,
struct dm_report_field * field ,
const void * data , void * private __attribute__ ( ( unused ) ) )
{
const int32_t val = * ( const int8_t * ) data ;
return dm_report_field_int32 ( rh , field , & val ) ;
}
2012-01-19 19:34:32 +04:00
static int _int32_disp ( struct dm_report * rh , struct dm_pool * mem __attribute__ ( ( unused ) ) ,
struct dm_report_field * field ,
const void * data , void * private __attribute__ ( ( unused ) ) )
{
return dm_report_field_int32 ( rh , field , data ) ;
}
2007-11-09 19:51:54 +03:00
static int _lvreadahead_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
2010-07-09 19:34:40 +04:00
const void * data , void * private __attribute__ ( ( unused ) ) )
2007-11-09 19:51:54 +03:00
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
2013-09-23 12:17:50 +04:00
if ( lv - > read_ahead = = DM_READ_AHEAD_AUTO )
2014-07-10 13:54:37 +04:00
return _field_set_value ( field , " auto " , & RESERVED ( number_undef_64 ) ) ;
2007-11-09 19:51:54 +03:00
2007-11-14 03:08:25 +03:00
return _size32_disp ( rh , mem , field , & lv - > read_ahead , private ) ;
2007-11-09 19:51:54 +03:00
}
static int _lvkreadahead_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data ,
2007-11-14 03:08:25 +03:00
void * private )
2007-11-09 19:51:54 +03:00
{
2014-07-02 16:31:39 +04:00
const struct lv_with_info * lvi = ( const struct lv_with_info * ) data ;
2007-11-12 23:51:54 +03:00
2014-07-02 16:31:39 +04:00
if ( ! lvi - > info | | ! lvi - > info - > exists )
2014-07-10 13:54:37 +04:00
return dm_report_field_int32 ( rh , field , & RESERVED ( number_undef_32 ) ) ;
2007-11-12 23:51:54 +03:00
2014-07-02 16:31:39 +04:00
return _size32_disp ( rh , mem , field , & lvi - > info - > read_ahead , private ) ;
2007-11-09 19:51:54 +03:00
}
2007-01-16 21:06:12 +03:00
static int _vgsize_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
2002-12-12 23:55:49 +03:00
{
2002-12-20 02:25:55 +03:00
const struct volume_group * vg = ( const struct volume_group * ) data ;
2013-09-23 11:44:53 +04:00
uint64_t size = vg_size ( vg ) ;
2002-12-12 23:55:49 +03:00
2007-01-16 21:06:12 +03:00
return _size64_disp ( rh , mem , field , & size , private ) ;
2002-12-12 23:55:49 +03:00
}
2013-04-25 14:07:57 +04:00
static int _segmonitor_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
char * str ;
if ( ! ( str = lvseg_monitor_dup ( mem , ( const struct lv_segment * ) data ) ) )
return_0 ;
2013-09-23 12:17:50 +04:00
return _field_set_value ( field , str , NULL ) ;
2013-04-25 14:07:57 +04:00
}
2007-01-16 21:06:12 +03:00
static int _segstart_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
2002-12-12 23:55:49 +03:00
{
2002-12-20 02:25:55 +03:00
const struct lv_segment * seg = ( const struct lv_segment * ) data ;
2013-09-23 11:44:53 +04:00
uint64_t start = lvseg_start ( seg ) ;
2002-12-12 23:55:49 +03:00
2007-01-16 21:06:12 +03:00
return _size64_disp ( rh , mem , field , & start , private ) ;
2002-12-12 23:55:49 +03:00
}
2007-12-20 19:49:37 +03:00
static int _segstartpe_disp ( struct dm_report * rh ,
2010-07-09 19:34:40 +04:00
struct dm_pool * mem __attribute__ ( ( unused ) ) ,
2007-12-20 19:49:37 +03:00
struct dm_report_field * field ,
const void * data ,
2010-07-09 19:34:40 +04:00
void * private __attribute__ ( ( unused ) ) )
2007-12-15 00:53:02 +03:00
{
const struct lv_segment * seg = ( const struct lv_segment * ) data ;
return dm_report_field_uint32 ( rh , field , & seg - > le ) ;
}
2007-01-16 21:06:12 +03:00
static int _segsize_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
2002-12-12 23:55:49 +03:00
{
2002-12-20 02:25:55 +03:00
const struct lv_segment * seg = ( const struct lv_segment * ) data ;
2013-09-23 11:44:53 +04:00
uint64_t size = lvseg_size ( seg ) ;
2002-12-12 23:55:49 +03:00
2007-01-16 21:06:12 +03:00
return _size64_disp ( rh , mem , field , & size , private ) ;
2002-12-12 23:55:49 +03:00
}
2013-09-24 00:50:14 +04:00
static int _segsizepe_disp ( struct dm_report * rh ,
struct dm_pool * mem __attribute__ ( ( unused ) ) ,
struct dm_report_field * field ,
const void * data ,
void * private __attribute__ ( ( unused ) ) )
{
const struct lv_segment * seg = ( const struct lv_segment * ) data ;
return dm_report_field_uint32 ( rh , field , & seg - > len ) ;
}
2007-01-16 21:06:12 +03:00
static int _chunksize_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
2005-09-23 21:06:01 +04:00
{
const struct lv_segment * seg = ( const struct lv_segment * ) data ;
2013-09-23 11:44:53 +04:00
uint64_t size = lvseg_chunksize ( seg ) ;
2009-04-25 05:17:59 +04:00
return _size64_disp ( rh , mem , field , & size , private ) ;
}
2012-01-19 19:34:32 +04:00
static int _transactionid_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct lv_segment * seg = ( const struct lv_segment * ) data ;
2013-09-23 12:18:10 +04:00
if ( seg_is_thin_pool ( seg ) )
return dm_report_field_uint64 ( rh , field , & seg - > transaction_id ) ;
2012-01-19 19:34:32 +04:00
2014-07-10 13:54:37 +04:00
return _field_set_value ( field , " " , & RESERVED ( number_undef_64 ) ) ;
2012-01-19 19:34:32 +04:00
}
2013-11-11 13:05:45 +04:00
static int _thinid_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct lv_segment * seg = ( const struct lv_segment * ) data ;
if ( seg_is_thin_volume ( seg ) )
return dm_report_field_uint32 ( rh , field , & seg - > device_id ) ;
2014-07-10 13:54:37 +04:00
return _field_set_value ( field , " " , & RESERVED ( number_undef_64 ) ) ;
2013-11-11 13:05:45 +04:00
}
2012-08-08 00:24:41 +04:00
static int _discards_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
2012-07-09 18:48:28 +04:00
{
const struct lv_segment * seg = ( const struct lv_segment * ) data ;
2012-08-08 00:24:41 +04:00
const char * discards_str ;
2012-07-09 18:48:28 +04:00
if ( seg_is_thin_volume ( seg ) )
seg = first_seg ( seg - > pool_lv ) ;
if ( seg_is_thin_pool ( seg ) ) {
2012-08-08 00:24:41 +04:00
discards_str = get_pool_discards_name ( seg - > discards ) ;
return dm_report_field_string ( rh , field , & discards_str ) ;
2012-08-07 21:48:34 +04:00
}
2012-07-09 18:48:28 +04:00
2013-09-23 12:17:50 +04:00
return _field_set_value ( field , " " , NULL ) ;
2012-07-09 18:48:28 +04:00
}
2012-01-19 19:34:32 +04:00
2009-04-25 05:17:59 +04:00
static int _originsize_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
2013-09-23 11:44:53 +04:00
uint64_t size = lv_origin_size ( lv ) ;
2009-04-25 05:17:59 +04:00
2013-09-23 11:44:53 +04:00
if ( size )
return _size64_disp ( rh , mem , field , & size , private ) ;
2005-09-23 21:06:01 +04:00
2013-09-23 12:17:50 +04:00
return _field_set_value ( field , " " , & _zero64 ) ;
2005-09-23 21:06:01 +04:00
}
2007-01-16 21:06:12 +03:00
static int _pvused_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
2002-12-12 23:55:49 +03:00
{
2002-12-20 02:25:55 +03:00
const struct physical_volume * pv =
( const struct physical_volume * ) data ;
2013-09-23 11:44:53 +04:00
uint64_t used = pv_used ( pv ) ;
2002-12-12 23:55:49 +03:00
2007-01-16 21:06:12 +03:00
return _size64_disp ( rh , mem , field , & used , private ) ;
2002-12-12 23:55:49 +03:00
}
2007-01-16 21:06:12 +03:00
static int _pvfree_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
2002-12-12 23:55:49 +03:00
{
2002-12-20 02:25:55 +03:00
const struct physical_volume * pv =
( const struct physical_volume * ) data ;
2013-09-23 11:44:53 +04:00
uint64_t freespace = pv_free ( pv ) ;
2002-12-12 23:55:49 +03:00
2007-01-16 21:06:12 +03:00
return _size64_disp ( rh , mem , field , & freespace , private ) ;
2002-12-12 23:55:49 +03:00
}
2007-01-16 21:06:12 +03:00
static int _pvsize_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
2002-12-12 23:55:49 +03:00
{
2002-12-20 02:25:55 +03:00
const struct physical_volume * pv =
( const struct physical_volume * ) data ;
2013-09-23 11:44:53 +04:00
uint64_t size = pv_size_field ( pv ) ;
2002-12-12 23:55:49 +03:00
2007-01-16 21:06:12 +03:00
return _size64_disp ( rh , mem , field , & size , private ) ;
2002-12-12 23:55:49 +03:00
}
2007-01-16 21:06:12 +03:00
static int _devsize_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
2004-08-11 17:15:05 +04:00
{
2014-01-14 07:17:27 +04:00
const struct device * dev = * ( const struct device * const * ) data ;
2013-11-22 16:21:52 +04:00
uint64_t size ;
2014-01-14 07:17:27 +04:00
if ( ! dev | | ! dev - > dev | | ! dev_get_size ( dev , & size ) )
size = _zero64 ;
2013-11-22 16:21:52 +04:00
2007-01-16 21:06:12 +03:00
return _size64_disp ( rh , mem , field , & size , private ) ;
2004-08-11 17:15:05 +04:00
}
2007-01-16 21:06:12 +03:00
static int _vgfree_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
2002-12-12 23:55:49 +03:00
{
2002-12-20 02:25:55 +03:00
const struct volume_group * vg = ( const struct volume_group * ) data ;
2013-09-23 11:44:53 +04:00
uint64_t freespace = vg_free ( vg ) ;
2002-12-12 23:55:49 +03:00
2007-01-16 21:06:12 +03:00
return _size64_disp ( rh , mem , field , & freespace , private ) ;
2002-12-12 23:55:49 +03:00
}
2010-07-09 19:34:40 +04:00
static int _uuid_disp ( struct dm_report * rh __attribute__ ( ( unused ) ) , struct dm_pool * mem ,
2007-01-16 21:06:12 +03:00
struct dm_report_field * field ,
2010-07-09 19:34:40 +04:00
const void * data , void * private __attribute__ ( ( unused ) ) )
2002-12-12 23:55:49 +03:00
{
2013-09-23 12:17:50 +04:00
char * repstr ;
2002-12-20 02:25:55 +03:00
2011-02-18 17:47:28 +03:00
if ( ! ( repstr = id_format_and_copy ( mem , data ) ) )
2008-01-30 16:19:47 +03:00
return_0 ;
2002-12-12 23:55:49 +03:00
2013-09-23 12:17:50 +04:00
return _field_set_value ( field , repstr , NULL ) ;
2002-12-12 23:55:49 +03:00
}
2013-07-29 21:15:31 +04:00
static int _pvuuid_disp ( struct dm_report * rh __attribute__ ( ( unused ) ) , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private __attribute__ ( ( unused ) ) )
{
const struct label * label = ( const struct label * ) data ;
2013-11-22 16:23:31 +04:00
const char * repstr = " " ;
2013-07-29 21:15:31 +04:00
2013-11-22 16:23:31 +04:00
if ( label - > dev & &
! ( repstr = id_format_and_copy ( mem , ( const struct id * ) label - > dev - > pvid ) ) )
return_0 ;
2013-07-29 21:15:31 +04:00
2013-11-22 16:23:31 +04:00
return _field_set_value ( field , repstr , NULL ) ;
2013-07-29 21:15:31 +04:00
}
2007-07-09 19:40:43 +04:00
static int _pvmdas_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
2009-07-26 16:41:09 +04:00
const struct physical_volume * pv =
( const struct physical_volume * ) data ;
2013-09-23 11:44:53 +04:00
uint32_t count = pv_mda_count ( pv ) ;
2007-07-09 19:40:43 +04:00
return _uint32_disp ( rh , mem , field , & count , private ) ;
}
Define new functions and vgs/pvs fields related to mda ignore.
Define a new pvs field, pv_mda_used_count, and a new vgs field,
vg_mda_used_count to match the existing pv_mda_count and vg_mda_count.
These new fields count the number of mdas that have the 'ignored' bit
clear (they are in use on the PV / VG). Also define various supporting
functions to implement the counting as well as setting the ignored
flag and determining if an mda is ignored. These high level functions
call into the lower level location independent mda ignore functions
defined by earlier patches.
Note that counting ignored mdas in a vg requires traversing both lists
and checking for the ignored bit on the mda. The count of 'ignored'
mdas then is defined by having the bit set, not by which list the mda
is on. The list does determine whether LVM actually does read/write to
the mda, though we must count the bits in order to return accurate numbers
for the various counts. Also, pv_mda_set_ignored must search both vg
lists for ignored mda. If the state changes and needs to be committed
to disk, the ignored mda will be on the non-ignored list.
Note also in pv_mda_set_ignored(), we must properly manage the mda lists.
If we change the ignored state of an mda, we must change any mdas on
vg->fid->metadata_areas that correspond to this pv. Also, we may
need to allocate a copy of the mda, as is done when fid->metadata_areas
is populated from _vg_read(), if we are un-ignoring an ignored mda.
Signed-off-by: Dave Wysochanski <dwysocha@redhat.com>
2010-06-29 00:33:44 +04:00
static int _pvmdasused_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct physical_volume * pv =
( const struct physical_volume * ) data ;
2013-09-23 11:44:53 +04:00
uint32_t count = pv_mda_used_count ( pv ) ;
Define new functions and vgs/pvs fields related to mda ignore.
Define a new pvs field, pv_mda_used_count, and a new vgs field,
vg_mda_used_count to match the existing pv_mda_count and vg_mda_count.
These new fields count the number of mdas that have the 'ignored' bit
clear (they are in use on the PV / VG). Also define various supporting
functions to implement the counting as well as setting the ignored
flag and determining if an mda is ignored. These high level functions
call into the lower level location independent mda ignore functions
defined by earlier patches.
Note that counting ignored mdas in a vg requires traversing both lists
and checking for the ignored bit on the mda. The count of 'ignored'
mdas then is defined by having the bit set, not by which list the mda
is on. The list does determine whether LVM actually does read/write to
the mda, though we must count the bits in order to return accurate numbers
for the various counts. Also, pv_mda_set_ignored must search both vg
lists for ignored mda. If the state changes and needs to be committed
to disk, the ignored mda will be on the non-ignored list.
Note also in pv_mda_set_ignored(), we must properly manage the mda lists.
If we change the ignored state of an mda, we must change any mdas on
vg->fid->metadata_areas that correspond to this pv. Also, we may
need to allocate a copy of the mda, as is done when fid->metadata_areas
is populated from _vg_read(), if we are un-ignoring an ignored mda.
Signed-off-by: Dave Wysochanski <dwysocha@redhat.com>
2010-06-29 00:33:44 +04:00
return _uint32_disp ( rh , mem , field , & count , private ) ;
}
2007-07-09 19:40:43 +04:00
static int _vgmdas_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct volume_group * vg = ( const struct volume_group * ) data ;
2013-09-23 11:44:53 +04:00
uint32_t count = vg_mda_count ( vg ) ;
2007-07-09 19:40:43 +04:00
return _uint32_disp ( rh , mem , field , & count , private ) ;
}
Define new functions and vgs/pvs fields related to mda ignore.
Define a new pvs field, pv_mda_used_count, and a new vgs field,
vg_mda_used_count to match the existing pv_mda_count and vg_mda_count.
These new fields count the number of mdas that have the 'ignored' bit
clear (they are in use on the PV / VG). Also define various supporting
functions to implement the counting as well as setting the ignored
flag and determining if an mda is ignored. These high level functions
call into the lower level location independent mda ignore functions
defined by earlier patches.
Note that counting ignored mdas in a vg requires traversing both lists
and checking for the ignored bit on the mda. The count of 'ignored'
mdas then is defined by having the bit set, not by which list the mda
is on. The list does determine whether LVM actually does read/write to
the mda, though we must count the bits in order to return accurate numbers
for the various counts. Also, pv_mda_set_ignored must search both vg
lists for ignored mda. If the state changes and needs to be committed
to disk, the ignored mda will be on the non-ignored list.
Note also in pv_mda_set_ignored(), we must properly manage the mda lists.
If we change the ignored state of an mda, we must change any mdas on
vg->fid->metadata_areas that correspond to this pv. Also, we may
need to allocate a copy of the mda, as is done when fid->metadata_areas
is populated from _vg_read(), if we are un-ignoring an ignored mda.
Signed-off-by: Dave Wysochanski <dwysocha@redhat.com>
2010-06-29 00:33:44 +04:00
static int _vgmdasused_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct volume_group * vg = ( const struct volume_group * ) data ;
2013-09-23 11:44:53 +04:00
uint32_t count = vg_mda_used_count ( vg ) ;
Define new functions and vgs/pvs fields related to mda ignore.
Define a new pvs field, pv_mda_used_count, and a new vgs field,
vg_mda_used_count to match the existing pv_mda_count and vg_mda_count.
These new fields count the number of mdas that have the 'ignored' bit
clear (they are in use on the PV / VG). Also define various supporting
functions to implement the counting as well as setting the ignored
flag and determining if an mda is ignored. These high level functions
call into the lower level location independent mda ignore functions
defined by earlier patches.
Note that counting ignored mdas in a vg requires traversing both lists
and checking for the ignored bit on the mda. The count of 'ignored'
mdas then is defined by having the bit set, not by which list the mda
is on. The list does determine whether LVM actually does read/write to
the mda, though we must count the bits in order to return accurate numbers
for the various counts. Also, pv_mda_set_ignored must search both vg
lists for ignored mda. If the state changes and needs to be committed
to disk, the ignored mda will be on the non-ignored list.
Note also in pv_mda_set_ignored(), we must properly manage the mda lists.
If we change the ignored state of an mda, we must change any mdas on
vg->fid->metadata_areas that correspond to this pv. Also, we may
need to allocate a copy of the mda, as is done when fid->metadata_areas
is populated from _vg_read(), if we are un-ignoring an ignored mda.
Signed-off-by: Dave Wysochanski <dwysocha@redhat.com>
2010-06-29 00:33:44 +04:00
return _uint32_disp ( rh , mem , field , & count , private ) ;
}
2010-06-29 00:37:23 +04:00
static int _vgmdacopies_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct volume_group * vg = ( const struct volume_group * ) data ;
2013-09-23 11:44:53 +04:00
uint32_t count = vg_mda_copies ( vg ) ;
2010-06-29 00:37:23 +04:00
2013-09-23 12:17:50 +04:00
if ( count = = VGMETADATACOPIES_UNMANAGED )
2014-07-10 13:54:37 +04:00
return _field_set_value ( field , " unmanaged " , & RESERVED ( number_undef_64 ) ) ;
Allow 'all' and 'unmanaged' values for --vgmetadatacopies.
Allowing an 'all' and 'unmanaged' value is more intuitive, and
provides a simple way for users to get back to original LVM behavior
of metadata written to all PVs in the volume group.
If the user requests "--vgmetadatacopies unmanaged", this instructs
LVM not to manage the ignore bits to achieve a specific number of
metadata copies in the volume group. The user is free to use
"pvchange --metadataignore" to control the mdas on a per-PV basis.
If the user requests "--vgmetadatacopies all", this instructs LVM
to do 2 things: 1) clear all ignore bits, and 2) set the "unmanaged"
policy going forward.
Internally, we use the special MAX_UINT32 value to indicate 'all'.
This 'just' works since it's the largest value possible for the
field and so all 'ignore' bits on all mdas in the VG will get
cleared inside _vg_metadata_balance(). However, after we've
called the _vg_metadata_balance function, we check for the special
'all' value, and if set, we write the "unmanaged" value into the
metadata. As such, the 'all' value is never written to disk.
Signed-off-by: Dave Wysochanski <dwysocha@redhat.com>
2010-06-29 00:40:01 +04:00
2010-06-29 00:37:23 +04:00
return _uint32_disp ( rh , mem , field , & count , private ) ;
}
2013-07-02 16:34:52 +04:00
static int _vgprofile_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct volume_group * vg = ( const struct volume_group * ) data ;
if ( vg - > profile )
return dm_report_field_string ( rh , field , & vg - > profile - > name ) ;
2013-09-23 12:17:50 +04:00
return _field_set_value ( field , " " , NULL ) ;
2013-07-02 16:34:52 +04:00
}
2007-11-05 20:17:55 +03:00
static int _pvmdafree_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
2013-07-29 21:14:10 +04:00
const struct label * label = ( const struct label * ) data ;
uint64_t freespace = lvmcache_info_mda_free ( label - > info ) ;
2007-11-05 20:17:55 +03:00
return _size64_disp ( rh , mem , field , & freespace , private ) ;
}
2009-01-10 01:44:33 +03:00
static int _pvmdasize_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
2013-11-18 00:04:07 +04:00
const struct label * label = ( const struct label * ) data ;
uint64_t min_mda_size = lvmcache_smallest_mda_size ( label - > info ) ;
2009-01-10 01:44:33 +03:00
return _size64_disp ( rh , mem , field , & min_mda_size , private ) ;
}
static int _vgmdasize_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct volume_group * vg = ( const struct volume_group * ) data ;
2013-09-23 11:44:53 +04:00
uint64_t min_mda_size = vg_mda_size ( vg ) ;
2009-01-10 01:44:33 +03:00
return _size64_disp ( rh , mem , field , & min_mda_size , private ) ;
}
2007-11-05 20:17:55 +03:00
static int _vgmdafree_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct volume_group * vg = ( const struct volume_group * ) data ;
2013-09-23 11:44:53 +04:00
uint64_t freespace = vg_mda_free ( vg ) ;
2007-11-05 20:17:55 +03:00
return _size64_disp ( rh , mem , field , & freespace , private ) ;
}
2008-04-10 21:19:02 +04:00
static int _lvcount_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct volume_group * vg = ( const struct volume_group * ) data ;
2013-09-23 11:44:53 +04:00
uint32_t count = vg_visible_lvs ( vg ) ;
2008-04-10 21:19:02 +04:00
return _uint32_disp ( rh , mem , field , & count , private ) ;
}
2007-01-16 21:06:12 +03:00
static int _lvsegcount_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
2002-12-12 23:55:49 +03:00
{
2002-12-20 02:25:55 +03:00
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
2013-09-23 11:44:53 +04:00
uint32_t count = dm_list_size ( & lv - > segments ) ;
2002-12-12 23:55:49 +03:00
2007-01-16 21:06:12 +03:00
return _uint32_disp ( rh , mem , field , & count , private ) ;
2002-12-12 23:55:49 +03:00
}
2009-05-12 23:12:09 +04:00
static int _snapcount_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct volume_group * vg = ( const struct volume_group * ) data ;
2013-09-23 11:44:53 +04:00
uint32_t count = snapshot_count ( vg ) ;
2009-05-12 23:12:09 +04:00
return _uint32_disp ( rh , mem , field , & count , private ) ;
}
2014-06-09 14:08:27 +04:00
static int _snpercent_disp ( struct dm_report * rh , struct dm_pool * mem __attribute__ ( ( unused ) ) ,
2007-01-16 21:06:12 +03:00
struct dm_report_field * field ,
2010-07-09 19:34:40 +04:00
const void * data , void * private __attribute__ ( ( unused ) ) )
2002-12-12 23:55:49 +03:00
{
2002-12-20 02:25:55 +03:00
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
2014-06-09 14:08:27 +04:00
dm_percent_t snap_percent ;
2002-12-12 23:55:49 +03:00
2013-09-23 13:03:02 +04:00
if ( ( lv_is_cow ( lv ) | | lv_is_merging_origin ( lv ) ) & &
lv_snapshot_percent ( lv , & snap_percent ) ) {
2014-06-09 14:08:27 +04:00
if ( ( snap_percent ! = DM_PERCENT_INVALID ) & &
( snap_percent ! = LVM_PERCENT_MERGE_FAILED ) )
return dm_report_field_percent ( rh , field , & snap_percent ) ;
2002-12-12 23:55:49 +03:00
2014-06-09 14:08:27 +04:00
if ( ! lv_is_merging_origin ( lv ) ) {
snap_percent = DM_PERCENT_100 ;
return dm_report_field_percent ( rh , field , & snap_percent ) ;
}
2003-01-21 21:50:50 +03:00
2013-09-23 13:03:02 +04:00
/*
* on activate merge that hasn ' t started yet would
* otherwise display incorrect snap % in origin
*/
2002-12-12 23:55:49 +03:00
}
2014-06-09 14:08:27 +04:00
snap_percent = DM_PERCENT_INVALID ;
return dm_report_field_percent ( rh , field , & snap_percent ) ;
2002-12-12 23:55:49 +03:00
}
2014-06-09 14:08:27 +04:00
static int _copypercent_disp ( struct dm_report * rh ,
struct dm_pool * mem __attribute__ ( ( unused ) ) ,
2007-01-16 21:06:12 +03:00
struct dm_report_field * field ,
2010-07-09 19:34:40 +04:00
const void * data , void * private __attribute__ ( ( unused ) ) )
2003-05-06 16:06:02 +04:00
{
2011-02-18 17:47:28 +03:00
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
2014-06-09 14:08:27 +04:00
dm_percent_t percent = DM_PERCENT_INVALID ;
2003-05-06 16:06:02 +04:00
2013-09-23 13:03:02 +04:00
if ( ( ( lv_is_raid ( lv ) & & lv_raid_percent ( lv , & percent ) ) | |
2003-05-06 16:06:02 +04:00
2013-09-23 13:03:02 +04:00
( ( lv - > status & ( PVMOVE | MIRRORED ) ) & &
lv_mirror_percent ( lv - > vg - > cmd , lv , 0 , & percent , NULL ) ) ) & &
2014-06-09 14:08:27 +04:00
( percent ! = DM_PERCENT_INVALID ) ) {
2013-09-23 13:03:02 +04:00
percent = copy_percent ( lv ) ;
2014-06-09 14:08:27 +04:00
return dm_report_field_percent ( rh , field , & percent ) ;
2003-05-06 16:06:02 +04:00
}
2014-06-09 14:08:27 +04:00
return dm_report_field_percent ( rh , field , & percent ) ;
2003-05-06 16:06:02 +04:00
}
2013-07-19 04:30:02 +04:00
static int _raidsyncaction_disp ( struct dm_report * rh __attribute__ ( ( unused ) ) ,
2013-04-12 00:33:59 +04:00
struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data ,
void * private __attribute__ ( ( unused ) ) )
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
char * sync_action ;
2013-09-23 12:17:50 +04:00
if ( lv_is_raid ( lv ) & & lv_raid_sync_action ( lv , & sync_action ) )
return _string_disp ( rh , mem , field , & sync_action , private ) ;
2013-04-12 00:33:59 +04:00
2013-09-23 12:17:50 +04:00
return _field_set_value ( field , " " , NULL ) ;
2013-04-12 00:33:59 +04:00
}
2013-07-19 04:30:02 +04:00
static int _raidmismatchcount_disp ( struct dm_report * rh __attribute__ ( ( unused ) ) ,
2013-04-12 00:33:59 +04:00
struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data ,
void * private __attribute__ ( ( unused ) ) )
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
uint64_t mismatch_count ;
2013-09-23 12:18:10 +04:00
if ( lv_is_raid ( lv ) & & lv_raid_mismatch_count ( lv , & mismatch_count ) )
return dm_report_field_uint64 ( rh , field , & mismatch_count ) ;
2013-04-12 00:33:59 +04:00
2014-07-10 13:54:37 +04:00
return _field_set_value ( field , " " , & RESERVED ( number_undef_64 ) ) ;
RAID: Add writemostly/writebehind support for RAID1
'lvchange' is used to alter a RAID 1 logical volume's write-mostly and
write-behind characteristics. The '--writemostly' parameter takes a
PV as an argument with an optional trailing character to specify whether
to set ('y'), unset ('n'), or toggle ('t') the value. If no trailing
character is given, it will set the flag.
Synopsis:
lvchange [--writemostly <PV>:{t|y|n}] [--writebehind <count>] vg/lv
Example:
lvchange --writemostly /dev/sdb1:y --writebehind 512 vg/raid1_lv
The last character in the 'lv_attr' field is used to show whether a device
has the WriteMostly flag set. It is signified with a 'w'. If the device
has failed, the 'p'artial flag has priority.
Example ("nosync" raid1 with mismatch_cnt and writemostly):
[~]# lvs -a --segment vg
LV VG Attr #Str Type SSize
raid1 vg Rwi---r-m 2 raid1 500.00m
[raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m
[raid1_rimage_1] vg Iwi---r-w 1 linear 500.00m
[raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m
[raid1_rmeta_1] vg ewi---r-- 1 linear 4.00m
Example (raid1 with mismatch_cnt, writemostly - but failed drive):
[~]# lvs -a --segment vg
LV VG Attr #Str Type SSize
raid1 vg rwi---r-p 2 raid1 500.00m
[raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m
[raid1_rimage_1] vg Iwi---r-p 1 linear 500.00m
[raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m
[raid1_rmeta_1] vg ewi---r-p 1 linear 4.00m
A new reportable field has been added for writebehind as well. If
write-behind has not been set or the LV is not RAID1, the field will
be blank.
Example (writebehind is set):
[~]# lvs -a -o name,attr,writebehind vg
LV Attr WBehind
lv rwi-a-r-- 512
[lv_rimage_0] iwi-aor-w
[lv_rimage_1] iwi-aor--
[lv_rmeta_0] ewi-aor--
[lv_rmeta_1] ewi-aor--
Example (writebehind is not set):
[~]# lvs -a -o name,attr,writebehind vg
LV Attr WBehind
lv rwi-a-r--
[lv_rimage_0] iwi-aor-w
[lv_rimage_1] iwi-aor--
[lv_rmeta_0] ewi-aor--
[lv_rmeta_1] ewi-aor--
2013-04-15 22:59:46 +04:00
}
2013-07-19 04:30:02 +04:00
static int _raidwritebehind_disp ( struct dm_report * rh __attribute__ ( ( unused ) ) ,
RAID: Add writemostly/writebehind support for RAID1
'lvchange' is used to alter a RAID 1 logical volume's write-mostly and
write-behind characteristics. The '--writemostly' parameter takes a
PV as an argument with an optional trailing character to specify whether
to set ('y'), unset ('n'), or toggle ('t') the value. If no trailing
character is given, it will set the flag.
Synopsis:
lvchange [--writemostly <PV>:{t|y|n}] [--writebehind <count>] vg/lv
Example:
lvchange --writemostly /dev/sdb1:y --writebehind 512 vg/raid1_lv
The last character in the 'lv_attr' field is used to show whether a device
has the WriteMostly flag set. It is signified with a 'w'. If the device
has failed, the 'p'artial flag has priority.
Example ("nosync" raid1 with mismatch_cnt and writemostly):
[~]# lvs -a --segment vg
LV VG Attr #Str Type SSize
raid1 vg Rwi---r-m 2 raid1 500.00m
[raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m
[raid1_rimage_1] vg Iwi---r-w 1 linear 500.00m
[raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m
[raid1_rmeta_1] vg ewi---r-- 1 linear 4.00m
Example (raid1 with mismatch_cnt, writemostly - but failed drive):
[~]# lvs -a --segment vg
LV VG Attr #Str Type SSize
raid1 vg rwi---r-p 2 raid1 500.00m
[raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m
[raid1_rimage_1] vg Iwi---r-p 1 linear 500.00m
[raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m
[raid1_rmeta_1] vg ewi---r-p 1 linear 4.00m
A new reportable field has been added for writebehind as well. If
write-behind has not been set or the LV is not RAID1, the field will
be blank.
Example (writebehind is set):
[~]# lvs -a -o name,attr,writebehind vg
LV Attr WBehind
lv rwi-a-r-- 512
[lv_rimage_0] iwi-aor-w
[lv_rimage_1] iwi-aor--
[lv_rmeta_0] ewi-aor--
[lv_rmeta_1] ewi-aor--
Example (writebehind is not set):
[~]# lvs -a -o name,attr,writebehind vg
LV Attr WBehind
lv rwi-a-r--
[lv_rimage_0] iwi-aor-w
[lv_rimage_1] iwi-aor--
[lv_rmeta_0] ewi-aor--
[lv_rmeta_1] ewi-aor--
2013-04-15 22:59:46 +04:00
struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data ,
void * private __attribute__ ( ( unused ) ) )
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
2013-09-23 12:18:10 +04:00
if ( lv_is_raid_type ( lv ) & & first_seg ( lv ) - > writebehind )
return dm_report_field_uint32 ( rh , field , & first_seg ( lv ) - > writebehind ) ;
RAID: Add writemostly/writebehind support for RAID1
'lvchange' is used to alter a RAID 1 logical volume's write-mostly and
write-behind characteristics. The '--writemostly' parameter takes a
PV as an argument with an optional trailing character to specify whether
to set ('y'), unset ('n'), or toggle ('t') the value. If no trailing
character is given, it will set the flag.
Synopsis:
lvchange [--writemostly <PV>:{t|y|n}] [--writebehind <count>] vg/lv
Example:
lvchange --writemostly /dev/sdb1:y --writebehind 512 vg/raid1_lv
The last character in the 'lv_attr' field is used to show whether a device
has the WriteMostly flag set. It is signified with a 'w'. If the device
has failed, the 'p'artial flag has priority.
Example ("nosync" raid1 with mismatch_cnt and writemostly):
[~]# lvs -a --segment vg
LV VG Attr #Str Type SSize
raid1 vg Rwi---r-m 2 raid1 500.00m
[raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m
[raid1_rimage_1] vg Iwi---r-w 1 linear 500.00m
[raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m
[raid1_rmeta_1] vg ewi---r-- 1 linear 4.00m
Example (raid1 with mismatch_cnt, writemostly - but failed drive):
[~]# lvs -a --segment vg
LV VG Attr #Str Type SSize
raid1 vg rwi---r-p 2 raid1 500.00m
[raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m
[raid1_rimage_1] vg Iwi---r-p 1 linear 500.00m
[raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m
[raid1_rmeta_1] vg ewi---r-p 1 linear 4.00m
A new reportable field has been added for writebehind as well. If
write-behind has not been set or the LV is not RAID1, the field will
be blank.
Example (writebehind is set):
[~]# lvs -a -o name,attr,writebehind vg
LV Attr WBehind
lv rwi-a-r-- 512
[lv_rimage_0] iwi-aor-w
[lv_rimage_1] iwi-aor--
[lv_rmeta_0] ewi-aor--
[lv_rmeta_1] ewi-aor--
Example (writebehind is not set):
[~]# lvs -a -o name,attr,writebehind vg
LV Attr WBehind
lv rwi-a-r--
[lv_rimage_0] iwi-aor-w
[lv_rimage_1] iwi-aor--
[lv_rmeta_0] ewi-aor--
[lv_rmeta_1] ewi-aor--
2013-04-15 22:59:46 +04:00
2014-07-10 13:54:37 +04:00
return _field_set_value ( field , " " , & RESERVED ( number_undef_64 ) ) ;
2013-04-12 00:33:59 +04:00
}
2013-07-19 04:30:02 +04:00
static int _raidminrecoveryrate_disp ( struct dm_report * rh __attribute__ ( ( unused ) ) ,
2013-05-31 20:25:52 +04:00
struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data ,
void * private __attribute__ ( ( unused ) ) )
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
2013-09-23 12:18:10 +04:00
if ( lv_is_raid_type ( lv ) & & first_seg ( lv ) - > min_recovery_rate )
return dm_report_field_uint32 ( rh , field ,
& first_seg ( lv ) - > min_recovery_rate ) ;
2013-05-31 20:25:52 +04:00
2014-07-10 13:54:37 +04:00
return _field_set_value ( field , " " , & RESERVED ( number_undef_64 ) ) ;
2013-05-31 20:25:52 +04:00
}
2013-07-19 04:30:02 +04:00
static int _raidmaxrecoveryrate_disp ( struct dm_report * rh __attribute__ ( ( unused ) ) ,
2013-05-31 20:25:52 +04:00
struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data ,
void * private __attribute__ ( ( unused ) ) )
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
2013-09-23 12:18:10 +04:00
if ( lv_is_raid_type ( lv ) & & first_seg ( lv ) - > max_recovery_rate )
return dm_report_field_uint32 ( rh , field ,
& first_seg ( lv ) - > max_recovery_rate ) ;
2013-05-31 20:25:52 +04:00
2014-07-10 13:54:37 +04:00
return _field_set_value ( field , " " , & RESERVED ( number_undef_64 ) ) ;
2013-05-31 20:25:52 +04:00
}
2013-09-23 13:01:46 +04:00
/* Called only with lv_is_thin_pool/volume */
2014-06-09 14:08:27 +04:00
static int _dtpercent_disp ( int metadata , struct dm_report * rh ,
2012-01-19 19:34:32 +04:00
struct dm_report_field * field ,
const void * data , void * private )
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
2014-06-09 14:08:27 +04:00
dm_percent_t percent = DM_PERCENT_INVALID ;
2012-01-19 19:34:32 +04:00
2013-09-23 13:03:02 +04:00
/* Suppress data percent if not using driver */
/* cannot use lv_is_active_locally - need to check for layer -tpool */
if ( ! lv_info ( lv - > vg - > cmd , lv , 1 , NULL , 0 , 0 ) )
2014-06-09 14:08:27 +04:00
return dm_report_field_percent ( rh , field , & percent ) ;
2012-01-19 19:34:32 +04:00
if ( lv_is_thin_pool ( lv ) ) {
if ( ! lv_thin_pool_percent ( lv , metadata , & percent ) )
return_0 ;
} else { /* thin_volume */
if ( ! lv_thin_percent ( lv , 0 , & percent ) )
return_0 ;
}
2014-06-09 14:08:27 +04:00
return dm_report_field_percent ( rh , field , & percent ) ;
2012-01-19 19:34:32 +04:00
}
static int _datapercent_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
2014-06-09 14:08:27 +04:00
dm_percent_t percent = DM_PERCENT_INVALID ;
2012-01-19 19:34:32 +04:00
if ( lv_is_cow ( lv ) )
return _snpercent_disp ( rh , mem , field , data , private ) ;
if ( lv_is_thin_pool ( lv ) | | lv_is_thin_volume ( lv ) )
2014-06-09 14:08:27 +04:00
return _dtpercent_disp ( 0 , rh , field , data , private ) ;
2012-01-19 19:34:32 +04:00
2014-06-09 14:08:27 +04:00
return dm_report_field_percent ( rh , field , & percent ) ;
2012-01-19 19:34:32 +04:00
}
2014-06-09 14:08:27 +04:00
static int _metadatapercent_disp ( struct dm_report * rh ,
struct dm_pool * mem __attribute__ ( ( unused ) ) ,
2012-01-19 19:34:32 +04:00
struct dm_report_field * field ,
const void * data , void * private )
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
if ( lv_is_thin_pool ( lv ) )
2014-06-09 14:08:27 +04:00
return _dtpercent_disp ( 1 , rh , field , data , private ) ;
2012-01-19 19:34:32 +04:00
2014-07-10 13:54:37 +04:00
return _field_set_value ( field , " " , & RESERVED ( number_undef_64 ) ) ;
2012-01-19 19:34:32 +04:00
}
static int _lvmetadatasize_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
uint64_t size ;
2014-02-05 19:44:37 +04:00
if ( lv_is_thin_pool ( lv ) | | lv_is_cache_pool ( lv ) ) {
2013-09-23 12:18:10 +04:00
size = lv_metadata_size ( lv ) ;
return _size64_disp ( rh , mem , field , & size , private ) ;
2012-01-19 19:34:32 +04:00
}
2014-07-10 13:54:37 +04:00
return _field_set_value ( field , " " , & RESERVED ( number_undef_64 ) ) ;
2012-01-19 19:34:32 +04:00
}
static int _thincount_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct lv_segment * seg = ( const struct lv_segment * ) data ;
uint32_t count ;
2013-09-23 12:18:10 +04:00
if ( seg_is_thin_pool ( seg ) ) {
count = dm_list_size ( & seg - > lv - > segs_using_this_lv ) ;
return _uint32_disp ( rh , mem , field , & count , private ) ;
2012-01-19 19:34:32 +04:00
}
2014-07-10 13:54:37 +04:00
return _field_set_value ( field , " " , & RESERVED ( number_undef_64 ) ) ;
2012-01-19 19:34:32 +04:00
}
static int _lvtime_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
char * repstr ;
uint64_t * sortval ;
2013-09-23 11:59:37 +04:00
if ( ! ( repstr = lv_time_dup ( mem , lv ) ) | |
! ( sortval = dm_pool_alloc ( mem , sizeof ( uint64_t ) ) ) ) {
log_error ( " Failed to allocate buffer for time. " ) ;
2012-01-19 19:34:32 +04:00
return 0 ;
}
* sortval = lv - > timestamp ;
2013-09-23 12:17:50 +04:00
return _field_set_value ( field , repstr , sortval ) ;
2012-01-19 19:34:32 +04:00
}
static int _lvhost_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
char * repstr ;
2013-09-23 11:59:37 +04:00
if ( ! ( repstr = lv_host_dup ( mem , lv ) ) ) {
log_error ( " Failed to allocate buffer for host. " ) ;
return 0 ;
}
2012-01-19 19:34:32 +04:00
2013-09-23 12:17:50 +04:00
return _field_set_value ( field , repstr , NULL ) ;
2012-01-19 19:34:32 +04:00
}
2014-07-02 13:09:14 +04:00
/* PV/VG/LV Attributes */
static int _pvallocatable_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
int allocatable = ( ( ( const struct physical_volume * ) data ) - > status & ALLOCATABLE_PV ) ! = 0 ;
2014-07-09 17:10:43 +04:00
return _binary_disp ( rh , mem , field , allocatable , FIRST_NAME ( pv_allocatable_y ) , private ) ;
2014-07-02 13:09:14 +04:00
}
static int _pvexported_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
int exported = ( ( ( const struct physical_volume * ) data ) - > status & EXPORTED_VG ) ! = 0 ;
2014-07-09 17:10:43 +04:00
return _binary_disp ( rh , mem , field , exported , FIRST_NAME ( pv_exported_y ) , private ) ;
2014-07-02 13:09:14 +04:00
}
static int _pvmissing_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
int missing = ( ( ( const struct physical_volume * ) data ) - > status & MISSING_PV ) ! = 0 ;
2014-07-09 17:10:43 +04:00
return _binary_disp ( rh , mem , field , missing , FIRST_NAME ( pv_missing_y ) , private ) ;
2014-07-02 13:09:14 +04:00
}
static int _vgpermissions_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
2014-07-08 14:40:45 +04:00
const char * perms = ( ( const struct volume_group * ) data ) - > status & LVM_WRITE ? FIRST_NAME ( vg_permissions_rw )
: FIRST_NAME ( vg_permissions_r ) ;
2014-07-02 13:09:14 +04:00
return _string_disp ( rh , mem , field , & perms , private ) ;
}
static int _vgextendable_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
int extendable = ( vg_is_resizeable ( ( const struct volume_group * ) data ) ) ! = 0 ;
2014-07-09 17:10:43 +04:00
return _binary_disp ( rh , mem , field , extendable , FIRST_NAME ( vg_extendable_y ) , private ) ;
2014-07-02 13:09:14 +04:00
}
static int _vgexported_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
int exported = ( vg_is_exported ( ( const struct volume_group * ) data ) ) ! = 0 ;
2014-07-09 17:10:43 +04:00
return _binary_disp ( rh , mem , field , exported , FIRST_NAME ( vg_exported_y ) , private ) ;
2014-07-02 13:09:14 +04:00
}
static int _vgpartial_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
int partial = ( vg_missing_pv_count ( ( const struct volume_group * ) data ) ) ! = 0 ;
2014-07-09 17:10:43 +04:00
return _binary_disp ( rh , mem , field , partial , FIRST_NAME ( vg_partial_y ) , private ) ;
2014-07-02 13:09:14 +04:00
}
static int _vgallocationpolicy_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const char * alloc_policy = get_alloc_string ( ( ( const struct volume_group * ) data ) - > alloc ) ? : _str_unknown ;
return _string_disp ( rh , mem , field , & alloc_policy , private ) ;
}
static int _vgclustered_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
int clustered = ( vg_is_clustered ( ( const struct volume_group * ) data ) ) ! = 0 ;
2014-07-09 17:10:43 +04:00
return _binary_disp ( rh , mem , field , clustered , FIRST_NAME ( vg_clustered_y ) , private ) ;
2014-07-02 13:09:14 +04:00
}
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
static int _lvlayout_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
struct dm_list * lv_layout ;
2014-08-25 11:07:03 +04:00
struct dm_list * lv_role ;
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
2014-08-25 11:07:03 +04:00
if ( ! lv_layout_and_role ( mem , lv , & lv_layout , & lv_role ) ) {
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
log_error ( " Failed to display layout for LV %s/%s. " , lv - > vg - > name , lv - > name ) ;
return 0 ;
}
2014-08-25 12:05:27 +04:00
return _field_set_string_list ( rh , field , lv_layout , private , 0 ) ;
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
}
2014-08-25 11:07:03 +04:00
static int _lvrole_disp ( struct dm_report * rh , struct dm_pool * mem ,
2014-07-02 13:09:14 +04:00
struct dm_report_field * field ,
const void * data , void * private )
{
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
struct dm_list * lv_layout ;
2014-08-25 11:07:03 +04:00
struct dm_list * lv_role ;
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
2014-08-25 11:07:03 +04:00
if ( ! lv_layout_and_role ( mem , lv , & lv_layout , & lv_role ) ) {
log_error ( " Failed to display role for LV %s/%s. " , lv - > vg - > name , lv - > name ) ;
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
return 0 ;
}
2014-08-25 12:05:27 +04:00
return _field_set_string_list ( rh , field , lv_role , private , 0 ) ;
2014-07-02 13:09:14 +04:00
}
static int _lvinitialimagesync_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
int initial_image_sync ;
if ( lv_is_raid ( lv ) | | lv_is_mirrored ( lv ) )
initial_image_sync = ( lv - > status & LV_NOTSYNCED ) = = 0 ;
else
initial_image_sync = 0 ;
2014-07-09 17:10:43 +04:00
return _binary_disp ( rh , mem , field , initial_image_sync , FIRST_NAME ( lv_initial_image_sync_y ) , private ) ;
2014-07-02 13:09:14 +04:00
}
static int _lvimagesynced_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
int image_synced ;
if ( lv_is_raid_image ( lv ) )
image_synced = ! lv_is_visible ( lv ) & & lv_raid_image_in_sync ( lv ) ;
else if ( lv_is_mirror_image ( lv ) )
image_synced = lv_mirror_image_in_sync ( lv ) ;
else
image_synced = 0 ;
2014-07-09 17:10:43 +04:00
return _binary_disp ( rh , mem , field , image_synced , FIRST_NAME ( lv_image_synced_y ) , private ) ;
2014-07-02 13:09:14 +04:00
}
static int _lvmerging_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
int merging ;
if ( lv_is_origin ( lv ) | | lv_is_external_origin ( lv ) )
merging = lv_is_merging_origin ( lv ) ;
else if ( lv_is_cow ( lv ) )
merging = lv_is_merging_cow ( lv ) ;
else if ( lv_is_thin_volume ( lv ) )
merging = lv_is_merging_thin_snapshot ( lv ) ;
else
merging = 0 ;
2014-07-09 17:10:43 +04:00
return _binary_disp ( rh , mem , field , merging , FIRST_NAME ( lv_merging_y ) , private ) ;
2014-07-02 13:09:14 +04:00
}
static int _lvconverting_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
int converting = ( ( ( const struct logical_volume * ) data ) - > status & CONVERTING ) ! = 0 ;
return _binary_disp ( rh , mem , field , converting , " converting " , private ) ;
}
static int _lvpermissions_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct lv_with_info * lvi = ( const struct lv_with_info * ) data ;
const char * perms = " " ;
if ( ! ( lvi - > lv - > status & PVMOVE ) ) {
if ( lvi - > lv - > status & LVM_WRITE ) {
2014-07-11 12:18:59 +04:00
if ( ! lvi - > info - > exists )
perms = _str_unknown ;
else if ( lvi - > info - > read_only )
2014-07-08 14:40:45 +04:00
perms = FIRST_NAME ( lv_permissions_r_override ) ;
2014-07-02 13:09:14 +04:00
else
2014-07-08 14:40:45 +04:00
perms = FIRST_NAME ( lv_permissions_rw ) ;
2014-07-02 13:09:14 +04:00
} else if ( lvi - > lv - > status & LVM_READ )
2014-07-08 14:40:45 +04:00
perms = FIRST_NAME ( lv_permissions_r ) ;
2014-07-02 13:09:14 +04:00
else
perms = _str_unknown ;
}
return _string_disp ( rh , mem , field , & perms , private ) ;
}
static int _lvallocationpolicy_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const char * alloc_policy = get_alloc_string ( ( ( const struct logical_volume * ) data ) - > alloc ) ? : _str_unknown ;
return _string_disp ( rh , mem , field , & alloc_policy , private ) ;
}
static int _lvallocationlocked_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
int alloc_locked = ( ( ( const struct logical_volume * ) data ) - > status & LOCKED ) ! = 0 ;
2014-07-09 17:10:43 +04:00
return _binary_disp ( rh , mem , field , alloc_locked , FIRST_NAME ( lv_allocation_locked_y ) , private ) ;
2014-07-02 13:09:14 +04:00
}
static int _lvfixedminor_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
int fixed_minor = ( ( ( const struct logical_volume * ) data ) - > status & FIXED_MINOR ) ! = 0 ;
2014-07-09 17:10:43 +04:00
return _binary_disp ( rh , mem , field , fixed_minor , FIRST_NAME ( lv_fixed_minor_y ) , private ) ;
2014-07-02 13:09:14 +04:00
}
2014-07-09 16:37:01 +04:00
static int _lvactive_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
char * repstr ;
if ( ! ( repstr = lv_active_dup ( mem , ( const struct logical_volume * ) data ) ) ) {
log_error ( " Failed to allocate buffer for active. " ) ;
return 0 ;
}
return _field_set_value ( field , repstr , NULL ) ;
}
2014-07-09 16:28:50 +04:00
static int _lvactivelocally_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
int active_locally ;
2014-07-11 13:15:06 +04:00
if ( ! activation ( ) )
return _binary_undef_disp ( rh , mem , field , private ) ;
2014-07-09 16:28:50 +04:00
if ( vg_is_clustered ( lv - > vg ) ) {
lv = lv_lock_holder ( lv ) ;
active_locally = lv_is_active_locally ( lv ) ;
} else
active_locally = lv_is_active ( lv ) ;
2014-07-09 17:10:43 +04:00
return _binary_disp ( rh , mem , field , active_locally , FIRST_NAME ( lv_active_locally_y ) , private ) ;
2014-07-09 16:28:50 +04:00
}
static int _lvactiveremotely_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
int active_remotely ;
2014-07-11 13:15:06 +04:00
if ( ! activation ( ) )
return _binary_undef_disp ( rh , mem , field , private ) ;
2014-07-09 16:28:50 +04:00
if ( vg_is_clustered ( lv - > vg ) ) {
lv = lv_lock_holder ( lv ) ;
2014-07-11 13:56:50 +04:00
/* FIXME: It seems we have no way to get this info correctly
* with current interface - we ' d need to check number
* of responses from the cluster :
* - if number of nodes that responded = = 1
* - and LV is active on local node
* . . then we may say that LV is * not * active remotely .
*
* Otherwise ( ( responses > 1 & & LV active locally ) | |
* ( responses = = 1 & & LV not active locally ) ) , it ' s
* active remotely .
*
* We have this info , but hidden underneath the
* locking interface ( locking_type . query_resource fn ) .
*
* For now , let ' s use ' unknown ' for remote status if
* the LV is found active locally until we find a way to
* smuggle the proper information out of the interface .
*/
if ( lv_is_active_locally ( lv ) )
return _binary_undef_disp ( rh , mem , field , private ) ;
else
active_remotely = lv_is_active_but_not_locally ( lv ) ;
2014-07-09 16:28:50 +04:00
} else
active_remotely = 0 ;
2014-07-09 17:10:43 +04:00
return _binary_disp ( rh , mem , field , active_remotely , FIRST_NAME ( lv_active_remotely_y ) , private ) ;
2014-07-09 16:28:50 +04:00
}
static int _lvactiveexclusively_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
int active_exclusively ;
2014-07-11 13:15:06 +04:00
if ( ! activation ( ) )
return _binary_undef_disp ( rh , mem , field , private ) ;
2014-07-09 16:28:50 +04:00
if ( vg_is_clustered ( lv - > vg ) ) {
lv = lv_lock_holder ( lv ) ;
active_exclusively = lv_is_active_exclusive ( lv ) ;
} else
active_exclusively = lv_is_active ( lv ) ;
2014-07-09 17:10:43 +04:00
return _binary_disp ( rh , mem , field , active_exclusively , FIRST_NAME ( lv_active_exclusively_y ) , private ) ;
2014-07-09 16:28:50 +04:00
}
2014-07-02 13:09:14 +04:00
static int _lvmergefailed_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
dm_percent_t snap_percent ;
int merge_failed ;
if ( ! lv_is_cow ( lv ) | | ! lv_snapshot_percent ( lv , & snap_percent ) )
2014-07-10 13:54:37 +04:00
return _field_set_value ( field , _str_unknown , & RESERVED ( number_undef_64 ) ) ;
2014-07-02 13:09:14 +04:00
merge_failed = snap_percent = = LVM_PERCENT_MERGE_FAILED ;
2014-07-09 17:10:43 +04:00
return _binary_disp ( rh , mem , field , merge_failed , FIRST_NAME ( lv_merge_failed_y ) , private ) ;
2014-07-02 13:09:14 +04:00
}
static int _lvsnapshotinvalid_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
dm_percent_t snap_percent ;
int snap_invalid ;
if ( ! lv_is_cow ( lv ) )
2014-07-10 13:54:37 +04:00
return _field_set_value ( field , _str_unknown , & RESERVED ( number_undef_64 ) ) ;
2014-07-02 13:09:14 +04:00
snap_invalid = ! lv_snapshot_percent ( lv , & snap_percent ) | | snap_percent = = DM_PERCENT_INVALID ;
2014-07-09 17:10:43 +04:00
return _binary_disp ( rh , mem , field , snap_invalid , FIRST_NAME ( lv_snapshot_invalid_y ) , private ) ;
2014-07-02 13:09:14 +04:00
}
static int _lvsuspended_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct lv_with_info * lvi = ( const struct lv_with_info * ) data ;
if ( lvi - > info - > exists )
2014-07-09 17:10:43 +04:00
return _binary_disp ( rh , mem , field , lvi - > info - > suspended , FIRST_NAME ( lv_suspended_y ) , private ) ;
2014-07-02 13:09:14 +04:00
2014-07-08 14:15:14 +04:00
return _binary_undef_disp ( rh , mem , field , private ) ;
2014-07-02 13:09:14 +04:00
}
static int _lvlivetable_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct lv_with_info * lvi = ( const struct lv_with_info * ) data ;
if ( lvi - > info - > exists )
2014-07-09 17:10:43 +04:00
return _binary_disp ( rh , mem , field , lvi - > info - > live_table , FIRST_NAME ( lv_live_table_y ) , private ) ;
2014-07-02 13:09:14 +04:00
2014-07-08 14:15:14 +04:00
return _binary_undef_disp ( rh , mem , field , private ) ;
2014-07-02 13:09:14 +04:00
}
static int _lvinactivetable_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct lv_with_info * lvi = ( const struct lv_with_info * ) data ;
if ( lvi - > info - > exists )
2014-07-09 17:10:43 +04:00
return _binary_disp ( rh , mem , field , lvi - > info - > inactive_table , FIRST_NAME ( lv_inactive_table_y ) , private ) ;
2014-07-02 13:09:14 +04:00
2014-07-08 14:15:14 +04:00
return _binary_undef_disp ( rh , mem , field , private ) ;
2014-07-02 13:09:14 +04:00
}
static int _lvdeviceopen_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct lv_with_info * lvi = ( const struct lv_with_info * ) data ;
if ( lvi - > info - > exists )
2014-07-09 17:10:43 +04:00
return _binary_disp ( rh , mem , field , lvi - > info - > open_count , FIRST_NAME ( lv_device_open_y ) , private ) ;
2014-07-02 13:09:14 +04:00
2014-07-08 14:15:14 +04:00
return _binary_undef_disp ( rh , mem , field , private ) ;
2014-07-02 13:09:14 +04:00
}
2014-07-09 16:37:01 +04:00
static int _thinzero_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct lv_segment * seg = ( const struct lv_segment * ) data ;
if ( seg_is_thin_pool ( seg ) )
2014-07-10 17:23:56 +04:00
return _binary_disp ( rh , mem , field , seg - > zero_new_blocks , FIRST_NAME ( zero_y ) , private ) ;
2014-07-09 16:37:01 +04:00
2014-07-10 17:23:56 +04:00
return _binary_undef_disp ( rh , mem , field , private ) ;
2014-07-09 16:37:01 +04:00
}
2014-07-02 13:09:14 +04:00
static int _lvhealthstatus_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
const struct logical_volume * lv = ( const struct logical_volume * ) data ;
const char * health = " " ;
uint64_t n ;
if ( lv - > status & PARTIAL_LV )
health = " partial " ;
else if ( lv_is_raid_type ( lv ) ) {
if ( ! activation ( ) )
health = " unknown " ;
else if ( ! lv_raid_healthy ( lv ) )
health = " refresh needed " ;
else if ( lv_is_raid ( lv ) ) {
if ( lv_raid_mismatch_count ( lv , & n ) & & n )
health = " mismatches exist " ;
} else if ( lv - > status & LV_WRITEMOSTLY )
health = " writemostly " ;
}
return _string_disp ( rh , mem , field , & health , private ) ;
}
static int _lvskipactivation_disp ( struct dm_report * rh , struct dm_pool * mem ,
struct dm_report_field * field ,
const void * data , void * private )
{
int skip_activation = ( ( ( const struct logical_volume * ) data ) - > status & LV_ACTIVATION_SKIP ) ! = 0 ;
return _binary_disp ( rh , mem , field , skip_activation , " skip activation " , private ) ;
}
2007-01-16 21:06:12 +03:00
/* Report object types */
2002-12-12 23:55:49 +03:00
2007-01-16 21:06:12 +03:00
/* necessary for displaying something for PVs not belonging to VG */
2009-01-10 20:09:40 +03:00
static struct format_instance _dummy_fid = {
2010-06-29 00:32:44 +04:00
. metadata_areas_in_use = { & ( _dummy_fid . metadata_areas_in_use ) , & ( _dummy_fid . metadata_areas_in_use ) } ,
2010-06-29 00:33:22 +04:00
. metadata_areas_ignored = { & ( _dummy_fid . metadata_areas_ignored ) , & ( _dummy_fid . metadata_areas_ignored ) } ,
2009-01-10 20:09:40 +03:00
} ;
2007-01-16 21:06:12 +03:00
static struct volume_group _dummy_vg = {
2009-01-10 20:09:40 +03:00
. fid = & _dummy_fid ,
2011-02-18 17:47:28 +03:00
. name = " " ,
2009-01-10 20:09:40 +03:00
. system_id = ( char * ) " " ,
. pvs = { & ( _dummy_vg . pvs ) , & ( _dummy_vg . pvs ) } ,
. lvs = { & ( _dummy_vg . lvs ) , & ( _dummy_vg . lvs ) } ,
. tags = { & ( _dummy_vg . tags ) , & ( _dummy_vg . tags ) } ,
2002-12-12 23:55:49 +03:00
} ;
2007-01-16 21:06:12 +03:00
static void * _obj_get_vg ( void * obj )
2006-10-02 20:46:27 +04:00
{
2007-01-16 21:06:12 +03:00
struct volume_group * vg = ( ( struct lvm_report_object * ) obj ) - > vg ;
2006-10-02 20:46:27 +04:00
2007-01-16 21:06:12 +03:00
return vg ? vg : & _dummy_vg ;
2006-10-02 20:46:27 +04:00
}
2007-01-16 21:06:12 +03:00
static void * _obj_get_lv ( void * obj )
2002-12-12 23:55:49 +03:00
{
2014-07-02 11:45:53 +04:00
return ( ( struct lvm_report_object * ) obj ) - > lvi - > lv ;
}
static void * _obj_get_lv_with_info ( void * obj )
{
return ( ( struct lvm_report_object * ) obj ) - > lvi ;
2002-12-12 23:55:49 +03:00
}
2007-01-16 21:06:12 +03:00
static void * _obj_get_pv ( void * obj )
2002-12-12 23:55:49 +03:00
{
2007-01-16 21:06:12 +03:00
return ( ( struct lvm_report_object * ) obj ) - > pv ;
2002-12-12 23:55:49 +03:00
}
2013-07-29 21:07:11 +04:00
static void * _obj_get_label ( void * obj )
{
return ( ( struct lvm_report_object * ) obj ) - > label ;
}
2007-01-16 21:06:12 +03:00
static void * _obj_get_seg ( void * obj )
2002-12-12 23:55:49 +03:00
{
2007-01-16 21:06:12 +03:00
return ( ( struct lvm_report_object * ) obj ) - > seg ;
2002-12-12 23:55:49 +03:00
}
2007-01-16 21:06:12 +03:00
static void * _obj_get_pvseg ( void * obj )
2002-12-12 23:55:49 +03:00
{
2007-01-16 21:06:12 +03:00
return ( ( struct lvm_report_object * ) obj ) - > pvseg ;
2002-12-12 23:55:49 +03:00
}
2013-09-18 04:09:15 +04:00
static void * _obj_get_devtypes ( void * obj )
{
return obj ;
}
2007-01-16 21:06:12 +03:00
static const struct dm_report_object_type _report_types [ ] = {
{ VGS , " Volume Group " , " vg_ " , _obj_get_vg } ,
{ LVS , " Logical Volume " , " lv_ " , _obj_get_lv } ,
2014-07-02 11:45:53 +04:00
{ LVSINFO , " Logical Volume Device " , " lv_ " , _obj_get_lv_with_info } ,
2007-01-16 21:06:12 +03:00
{ PVS , " Physical Volume " , " pv_ " , _obj_get_pv } ,
2013-07-29 21:07:11 +04:00
{ LABEL , " Physical Volume Label " , " pv_ " , _obj_get_label } ,
2007-01-16 21:06:12 +03:00
{ SEGS , " Logical Volume Segment " , " seg_ " , _obj_get_seg } ,
{ PVSEGS , " Physical Volume Segment " , " pvseg_ " , _obj_get_pvseg } ,
{ 0 , " " , " " , NULL } ,
} ;
2002-12-12 23:55:49 +03:00
2013-09-18 04:09:15 +04:00
static const struct dm_report_object_type _devtypes_report_types [ ] = {
{ DEVTYPES , " Device Types " , " devtype_ " , _obj_get_devtypes } ,
{ 0 , " " , " " , NULL } ,
} ;
2007-01-16 21:06:12 +03:00
/*
* Import column definitions
*/
2002-12-12 23:55:49 +03:00
2007-01-18 20:48:29 +03:00
# define STR DM_REPORT_FIELD_TYPE_STRING
# define NUM DM_REPORT_FIELD_TYPE_NUMBER
2014-07-02 13:09:14 +04:00
# define BIN DM_REPORT_FIELD_TYPE_NUMBER
2014-05-29 11:37:22 +04:00
# define SIZ DM_REPORT_FIELD_TYPE_SIZE
2014-06-09 18:23:45 +04:00
# define PCT DM_REPORT_FIELD_TYPE_PERCENT
2014-05-29 11:41:36 +04:00
# define STR_LIST DM_REPORT_FIELD_TYPE_STRING_LIST
2010-08-20 16:44:03 +04:00
# define FIELD(type, strct, sorttype, head, field, width, func, id, desc, writeable) \
2010-01-07 17:37:11 +03:00
{ type , sorttype , offsetof ( type_ # # strct , field ) , width , \
2010-08-20 16:44:17 +04:00
# id, head, &_ ## func ## _disp, desc},
2010-01-07 17:37:11 +03:00
typedef struct physical_volume type_pv ;
typedef struct logical_volume type_lv ;
typedef struct volume_group type_vg ;
typedef struct lv_segment type_seg ;
typedef struct pv_segment type_pvseg ;
2013-07-29 21:07:11 +04:00
typedef struct label type_label ;
2002-12-12 23:55:49 +03:00
2013-09-18 04:09:15 +04:00
typedef dev_known_type_t type_devtype ;
2010-01-07 17:37:11 +03:00
static const struct dm_report_field_type _fields [ ] = {
2007-01-16 21:06:12 +03:00
# include "columns.h"
2007-01-30 02:01:18 +03:00
{ 0 , 0 , 0 , 0 , " " , " " , NULL , NULL } ,
2007-01-16 21:06:12 +03:00
} ;
2002-12-12 23:55:49 +03:00
2013-09-18 04:09:15 +04:00
static const struct dm_report_field_type _devtypes_fields [ ] = {
# include "columns-devtypes.h"
{ 0 , 0 , 0 , 0 , " " , " " , NULL , NULL } ,
} ;
2007-01-16 21:06:12 +03:00
# undef STR
# undef NUM
2014-07-02 13:09:14 +04:00
# undef BIN
2014-05-29 11:37:22 +04:00
# undef SIZ
2014-05-29 11:41:36 +04:00
# undef STR_LIST
2007-01-16 21:06:12 +03:00
# undef FIELD
void * report_init ( struct cmd_context * cmd , const char * format , const char * keys ,
2007-07-10 22:20:00 +04:00
report_type_t * report_type , const char * separator ,
2008-06-25 01:21:04 +04:00
int aligned , int buffered , int headings , int field_prefixes ,
2014-05-29 11:38:59 +04:00
int quoted , int columns_as_rows , const char * selection )
2007-01-16 21:06:12 +03:00
{
uint32_t report_flags = 0 ;
2013-09-18 04:09:15 +04:00
int devtypes_report = * report_type & DEVTYPES ? 1 : 0 ;
2008-04-20 04:15:08 +04:00
void * rh ;
2002-12-12 23:55:49 +03:00
if ( aligned )
2007-01-16 21:06:12 +03:00
report_flags | = DM_REPORT_OUTPUT_ALIGNED ;
2002-12-12 23:55:49 +03:00
if ( buffered )
2007-01-16 21:06:12 +03:00
report_flags | = DM_REPORT_OUTPUT_BUFFERED ;
2002-12-12 23:55:49 +03:00
if ( headings )
2007-01-16 21:06:12 +03:00
report_flags | = DM_REPORT_OUTPUT_HEADINGS ;
2002-12-12 23:55:49 +03:00
2008-06-06 23:28:35 +04:00
if ( field_prefixes )
report_flags | = DM_REPORT_OUTPUT_FIELD_NAME_PREFIX ;
2008-06-25 01:21:04 +04:00
if ( ! quoted )
report_flags | = DM_REPORT_OUTPUT_FIELD_UNQUOTED ;
2008-06-25 02:48:53 +04:00
if ( columns_as_rows )
report_flags | = DM_REPORT_OUTPUT_COLUMNS_AS_ROWS ;
2014-05-29 11:38:59 +04:00
rh = dm_report_init_with_selection ( report_type ,
devtypes_report ? _devtypes_report_types : _report_types ,
report: select: add support for reserved value recognition in report selection string - add struct dm_report_reserved_value
Make dm_report_init_with_selection to accept an argument with an
array of reserved values where each element contains a triple:
{dm report field type, reserved value, array of strings representing this value}
When the selection is parsed, we always check whether a string
representation of some reserved value is not hit and if it is,
we use the reserved value assigned for this string instead of
trying to parse it as a value of certain field type.
This makes it possible to define selections like:
... --select lv_major=undefined (or -1 or unknown or undef or whatever string representations are registered for this reserved value in the future)
... --select lv_read_ahead=auto
... --select vg_mda_copies=unmanaged
With this, each time the field value of certain type is hit
and when we compare it with the selection, we use the proper
value for comparison.
For now, register these reserved values that are used at the moment
(also more descriptive names are used for the values):
const uint64_t _reserved_number_undef_64 = UINT64_MAX;
const uint64_t _reserved_number_unmanaged_64 = UINT64_MAX - 1;
const uint64_t _reserved_size_auto_64 = UINT64_MAX;
{
{DM_REPORT_FIELD_TYPE_NUMBER, _reserved_number_undef_64, {"-1", "undefined", "undef", "unknown", NULL}},
{DM_REPORT_FIELD_TYPE_NUMBER, _reserved_number_unmanaged_64, {"unmanaged", NULL}},
{DM_REPORT_FIELD_TYPE_SIZE, _reserved_size_auto_64, {"auto", NULL}},
NULL
}
Same reserved value of different field types do not collide.
All arrays are null-terminated.
The list of reserved values is automatically displayed within
selection help output:
Selection operands
------------------
...
Reserved values
---------------
-1, undefined, undef, unknown - Reserved value for undefined numeric value. [number]
unmanaged - Reserved value for unmanaged number of metadata copies in VG. [number]
auto - Reserved value for size that is automatically calculated. [size]
Selection operators
-------------------
...
2014-05-30 17:02:21 +04:00
devtypes_report ? _devtypes_fields : _fields ,
format , separator , report_flags , keys ,
selection , _report_reserved_values , cmd ) ;
2008-04-20 04:15:08 +04:00
2008-12-15 16:30:45 +03:00
if ( rh & & field_prefixes )
2008-06-06 23:28:35 +04:00
dm_report_set_output_field_name_prefix ( rh , " lvm2_ " ) ;
2008-04-20 04:15:08 +04:00
return rh ;
2002-12-12 23:55:49 +03:00
}
/*
* Create a row of data for an object
*/
int report_object ( void * handle , struct volume_group * vg ,
struct logical_volume * lv , struct physical_volume * pv ,
2013-07-29 21:07:11 +04:00
struct lv_segment * seg , struct pv_segment * pvseg ,
2014-07-02 11:45:53 +04:00
struct lvinfo * lvinfo , struct label * label )
2002-12-12 23:55:49 +03:00
{
2014-06-12 13:33:16 +04:00
struct device dummy_device = { . dev = 0 } ;
struct label dummy_label = { . dev = & dummy_device } ;
2014-07-02 11:45:53 +04:00
struct lv_with_info lvi = { . lv = lv , . info = lvinfo } ;
2014-06-12 13:33:16 +04:00
struct lvm_report_object obj = {
. vg = vg ,
2014-07-02 11:45:53 +04:00
. lvi = & lvi ,
2014-06-12 13:33:16 +04:00
. pv = pv ,
. seg = seg ,
. pvseg = pvseg ,
. label = label ? : ( pv ? pv_label ( pv ) : NULL )
} ;
/* FIXME workaround for pv_label going through cache; remove once struct
* physical_volume gains a proper " label " pointer */
if ( ! obj . label ) {
if ( pv ) {
if ( pv - > fmt )
dummy_label . labeller = pv - > fmt - > labeller ;
if ( pv - > dev )
dummy_label . dev = pv - > dev ;
else
memcpy ( dummy_device . pvid , & pv - > id , ID_LEN ) ;
}
obj . label = & dummy_label ;
}
2002-12-12 23:55:49 +03:00
2009-01-10 20:09:40 +03:00
/* The two format fields might as well match. */
if ( ! vg & & pv )
_dummy_fid . fmt = pv - > fmt ;
2007-01-16 21:06:12 +03:00
return dm_report_object ( handle , & obj ) ;
2002-12-12 23:55:49 +03:00
}
2013-09-18 04:09:15 +04:00
static int _report_devtype_single ( void * handle , const dev_known_type_t * devtype )
{
return dm_report_object ( handle , ( void * ) devtype ) ;
}
int report_devtypes ( void * handle )
{
int devtypeind = 0 ;
while ( _dev_known_types [ devtypeind ] . name [ 0 ] )
if ( ! _report_devtype_single ( handle , & _dev_known_types [ devtypeind + + ] ) )
return 0 ;
return 1 ;
}