1
0
mirror of git://sourceware.org/git/lvm2.git synced 2024-10-28 20:25:52 +03:00
lvm2/lib/report/properties.c

517 lines
21 KiB
C
Raw Normal View History

/*
* Copyright (C) 2010-2013 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "lib.h"
#include "properties.h"
#include "activate.h"
#include "metadata.h"
#define GET_VG_NUM_PROPERTY_FN(NAME, VALUE) \
GET_NUM_PROPERTY_FN(NAME, VALUE, volume_group, vg)
#define GET_PV_NUM_PROPERTY_FN(NAME, VALUE) \
GET_NUM_PROPERTY_FN(NAME, VALUE, physical_volume, pv)
#define GET_LV_NUM_PROPERTY_FN(NAME, VALUE) \
GET_NUM_PROPERTY_FN(NAME, VALUE, logical_volume, lv)
#define GET_LVSEG_NUM_PROPERTY_FN(NAME, VALUE) \
GET_NUM_PROPERTY_FN(NAME, VALUE, lv_segment, lvseg)
#define GET_PVSEG_NUM_PROPERTY_FN(NAME, VALUE) \
GET_NUM_PROPERTY_FN(NAME, VALUE, pv_segment, pvseg)
#define SET_VG_NUM_PROPERTY_FN(NAME, SETFN) \
SET_NUM_PROPERTY_FN(NAME, SETFN, volume_group, vg)
#define SET_PV_NUM_PROPERTY_FN(NAME, SETFN) \
SET_NUM_PROPERTY_FN(NAME, SETFN, physical_volume, pv)
#define SET_LV_NUM_PROPERTY_FN(NAME, SETFN) \
SET_NUM_PROPERTY_FN(NAME, SETFN, logical_volume, lv)
#define GET_VG_STR_PROPERTY_FN(NAME, VALUE) \
GET_STR_PROPERTY_FN(NAME, VALUE, volume_group, vg)
#define GET_PV_STR_PROPERTY_FN(NAME, VALUE) \
GET_STR_PROPERTY_FN(NAME, VALUE, physical_volume, pv)
#define GET_LV_STR_PROPERTY_FN(NAME, VALUE) \
GET_STR_PROPERTY_FN(NAME, VALUE, logical_volume, lv)
#define GET_LVSEG_STR_PROPERTY_FN(NAME, VALUE) \
GET_STR_PROPERTY_FN(NAME, VALUE, lv_segment, lvseg)
#define GET_PVSEG_STR_PROPERTY_FN(NAME, VALUE) \
GET_STR_PROPERTY_FN(NAME, VALUE, pv_segment, pvseg)
static dm_percent_t _copy_percent(const struct logical_volume *lv)
{
dm_percent_t percent;
if (!lv_mirror_percent(lv->vg->cmd, lv, 0, &percent, NULL))
percent = DM_PERCENT_INVALID;
return percent;
}
static uint64_t _raidmismatchcount(const struct logical_volume *lv)
{
RAID: Add scrubbing support for RAID LVs New options to 'lvchange' allow users to scrub their RAID LVs. Synopsis: lvchange --syncaction {check|repair} vg/raid_lv RAID scrubbing is the process of reading all the data and parity blocks in an array and checking to see whether they are coherent. 'lvchange' can now initaite the two scrubbing operations: "check" and "repair". "check" will go over the array and recored the number of discrepancies but not repair them. "repair" will correct the discrepancies as it finds them. 'lvchange --syncaction repair vg/raid_lv' is not to be confused with 'lvconvert --repair vg/raid_lv'. The former initiates a background synchronization operation on the array, while the latter is designed to repair/replace failed devices in a mirror or RAID logical volume. Additional reporting has been added for 'lvs' to support the new operations. Two new printable fields (which are not printed by default) have been added: "syncaction" and "mismatches". These can be accessed using the '-o' option to 'lvs', like: lvs -o +syncaction,mismatches vg/lv "syncaction" will print the current synchronization operation that the RAID volume is performing. It can be one of the following: - idle: All sync operations complete (doing nothing) - resync: Initializing an array or recovering after a machine failure - recover: Replacing a device in the array - check: Looking for array inconsistencies - repair: Looking for and repairing inconsistencies The "mismatches" field with print the number of descrepancies found during a check or repair operation. The 'Cpy%Sync' field already available to 'lvs' will print the progress of any of the above syncactions, including check and repair. Finally, the lv_attr field has changed to accomadate the scrubbing operations as well. The role of the 'p'artial character in the lv_attr report field as expanded. "Partial" is really an indicator for the health of a logical volume and it makes sense to extend this include other health indicators as well, specifically: 'm'ismatches: Indicates that there are discrepancies in a RAID LV. This character is shown after a scrubbing operation has detected that portions of the RAID are not coherent. 'r'efresh : Indicates that a device in a RAID array has suffered a failure and the kernel regards it as failed - even though LVM can read the device label and considers the device to be ok. The LV should be 'r'efreshed to notify the kernel that the device is now available, or the device should be 'r'eplaced if it is suspected of failing.
2013-04-12 00:33:59 +04:00
uint64_t cnt;
if (!lv_raid_mismatch_count(lv, &cnt))
return 0;
return cnt;
}
static char *_raidsyncaction(const struct logical_volume *lv)
{
RAID: Add scrubbing support for RAID LVs New options to 'lvchange' allow users to scrub their RAID LVs. Synopsis: lvchange --syncaction {check|repair} vg/raid_lv RAID scrubbing is the process of reading all the data and parity blocks in an array and checking to see whether they are coherent. 'lvchange' can now initaite the two scrubbing operations: "check" and "repair". "check" will go over the array and recored the number of discrepancies but not repair them. "repair" will correct the discrepancies as it finds them. 'lvchange --syncaction repair vg/raid_lv' is not to be confused with 'lvconvert --repair vg/raid_lv'. The former initiates a background synchronization operation on the array, while the latter is designed to repair/replace failed devices in a mirror or RAID logical volume. Additional reporting has been added for 'lvs' to support the new operations. Two new printable fields (which are not printed by default) have been added: "syncaction" and "mismatches". These can be accessed using the '-o' option to 'lvs', like: lvs -o +syncaction,mismatches vg/lv "syncaction" will print the current synchronization operation that the RAID volume is performing. It can be one of the following: - idle: All sync operations complete (doing nothing) - resync: Initializing an array or recovering after a machine failure - recover: Replacing a device in the array - check: Looking for array inconsistencies - repair: Looking for and repairing inconsistencies The "mismatches" field with print the number of descrepancies found during a check or repair operation. The 'Cpy%Sync' field already available to 'lvs' will print the progress of any of the above syncactions, including check and repair. Finally, the lv_attr field has changed to accomadate the scrubbing operations as well. The role of the 'p'artial character in the lv_attr report field as expanded. "Partial" is really an indicator for the health of a logical volume and it makes sense to extend this include other health indicators as well, specifically: 'm'ismatches: Indicates that there are discrepancies in a RAID LV. This character is shown after a scrubbing operation has detected that portions of the RAID are not coherent. 'r'efresh : Indicates that a device in a RAID array has suffered a failure and the kernel regards it as failed - even though LVM can read the device label and considers the device to be ok. The LV should be 'r'efreshed to notify the kernel that the device is now available, or the device should be 'r'eplaced if it is suspected of failing.
2013-04-12 00:33:59 +04:00
char *action;
if (!lv_raid_sync_action(lv, &action))
return 0;
RAID: Add scrubbing support for RAID LVs New options to 'lvchange' allow users to scrub their RAID LVs. Synopsis: lvchange --syncaction {check|repair} vg/raid_lv RAID scrubbing is the process of reading all the data and parity blocks in an array and checking to see whether they are coherent. 'lvchange' can now initaite the two scrubbing operations: "check" and "repair". "check" will go over the array and recored the number of discrepancies but not repair them. "repair" will correct the discrepancies as it finds them. 'lvchange --syncaction repair vg/raid_lv' is not to be confused with 'lvconvert --repair vg/raid_lv'. The former initiates a background synchronization operation on the array, while the latter is designed to repair/replace failed devices in a mirror or RAID logical volume. Additional reporting has been added for 'lvs' to support the new operations. Two new printable fields (which are not printed by default) have been added: "syncaction" and "mismatches". These can be accessed using the '-o' option to 'lvs', like: lvs -o +syncaction,mismatches vg/lv "syncaction" will print the current synchronization operation that the RAID volume is performing. It can be one of the following: - idle: All sync operations complete (doing nothing) - resync: Initializing an array or recovering after a machine failure - recover: Replacing a device in the array - check: Looking for array inconsistencies - repair: Looking for and repairing inconsistencies The "mismatches" field with print the number of descrepancies found during a check or repair operation. The 'Cpy%Sync' field already available to 'lvs' will print the progress of any of the above syncactions, including check and repair. Finally, the lv_attr field has changed to accomadate the scrubbing operations as well. The role of the 'p'artial character in the lv_attr report field as expanded. "Partial" is really an indicator for the health of a logical volume and it makes sense to extend this include other health indicators as well, specifically: 'm'ismatches: Indicates that there are discrepancies in a RAID LV. This character is shown after a scrubbing operation has detected that portions of the RAID are not coherent. 'r'efresh : Indicates that a device in a RAID array has suffered a failure and the kernel regards it as failed - even though LVM can read the device label and considers the device to be ok. The LV should be 'r'efreshed to notify the kernel that the device is now available, or the device should be 'r'eplaced if it is suspected of failing.
2013-04-12 00:33:59 +04:00
return action;
}
static uint32_t _raidwritebehind(const struct logical_volume *lv)
{
RAID: Add writemostly/writebehind support for RAID1 'lvchange' is used to alter a RAID 1 logical volume's write-mostly and write-behind characteristics. The '--writemostly' parameter takes a PV as an argument with an optional trailing character to specify whether to set ('y'), unset ('n'), or toggle ('t') the value. If no trailing character is given, it will set the flag. Synopsis: lvchange [--writemostly <PV>:{t|y|n}] [--writebehind <count>] vg/lv Example: lvchange --writemostly /dev/sdb1:y --writebehind 512 vg/raid1_lv The last character in the 'lv_attr' field is used to show whether a device has the WriteMostly flag set. It is signified with a 'w'. If the device has failed, the 'p'artial flag has priority. Example ("nosync" raid1 with mismatch_cnt and writemostly): [~]# lvs -a --segment vg LV VG Attr #Str Type SSize raid1 vg Rwi---r-m 2 raid1 500.00m [raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m [raid1_rimage_1] vg Iwi---r-w 1 linear 500.00m [raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m [raid1_rmeta_1] vg ewi---r-- 1 linear 4.00m Example (raid1 with mismatch_cnt, writemostly - but failed drive): [~]# lvs -a --segment vg LV VG Attr #Str Type SSize raid1 vg rwi---r-p 2 raid1 500.00m [raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m [raid1_rimage_1] vg Iwi---r-p 1 linear 500.00m [raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m [raid1_rmeta_1] vg ewi---r-p 1 linear 4.00m A new reportable field has been added for writebehind as well. If write-behind has not been set or the LV is not RAID1, the field will be blank. Example (writebehind is set): [~]# lvs -a -o name,attr,writebehind vg LV Attr WBehind lv rwi-a-r-- 512 [lv_rimage_0] iwi-aor-w [lv_rimage_1] iwi-aor-- [lv_rmeta_0] ewi-aor-- [lv_rmeta_1] ewi-aor-- Example (writebehind is not set): [~]# lvs -a -o name,attr,writebehind vg LV Attr WBehind lv rwi-a-r-- [lv_rimage_0] iwi-aor-w [lv_rimage_1] iwi-aor-- [lv_rmeta_0] ewi-aor-- [lv_rmeta_1] ewi-aor--
2013-04-15 22:59:46 +04:00
return first_seg(lv)->writebehind;
}
static uint32_t _raidminrecoveryrate(const struct logical_volume *lv)
{
return first_seg(lv)->min_recovery_rate;
}
static uint32_t _raidmaxrecoveryrate(const struct logical_volume *lv)
{
return first_seg(lv)->max_recovery_rate;
}
static dm_percent_t _snap_percent(const struct logical_volume *lv)
{
dm_percent_t percent;
if (!lv_is_cow(lv) || !lv_snapshot_percent(lv, &percent))
percent = DM_PERCENT_INVALID;
return percent;
}
static dm_percent_t _data_percent(const struct logical_volume *lv)
{
dm_percent_t percent;
struct lv_status_cache *status;
if (lv_is_cow(lv))
return _snap_percent(lv);
if (lv_is_cache(lv) || lv_is_cache_pool(lv)) {
if (!lv_cache_status(lv, &status)) {
stack;
return DM_PERCENT_INVALID;
}
percent = status->dirty_usage;
dm_pool_destroy(status->mem);
return percent;
}
if (lv_is_thin_volume(lv))
return lv_thin_percent(lv, 0, &percent) ? percent : DM_PERCENT_INVALID;
return lv_thin_pool_percent(lv, 0, &percent) ? percent : DM_PERCENT_INVALID;
}
static dm_percent_t _metadata_percent(const struct logical_volume *lv)
{
dm_percent_t percent;
struct lv_status_cache *status;
if (lv_is_cache(lv) || lv_is_cache_pool(lv)) {
if (!lv_cache_status(lv, &status)) {
stack;
return DM_PERCENT_INVALID;
}
percent = status->dirty_usage;
dm_pool_destroy(status->mem);
return percent;
}
if (lv_is_thin_pool(lv))
return lv_thin_pool_percent(lv, 1, &percent) ? percent : DM_PERCENT_INVALID;
return DM_PERCENT_INVALID;
}
/* PV */
GET_PV_STR_PROPERTY_FN(pv_fmt, pv_fmt_dup(pv))
#define _pv_fmt_set prop_not_implemented_set
GET_PV_STR_PROPERTY_FN(pv_uuid, pv_uuid_dup(pv))
#define _pv_uuid_set prop_not_implemented_set
GET_PV_NUM_PROPERTY_FN(dev_size, SECTOR_SIZE * pv_dev_size(pv))
#define _dev_size_set prop_not_implemented_set
GET_PV_STR_PROPERTY_FN(pv_name, pv_name_dup(pv))
#define _pv_name_set prop_not_implemented_set
GET_PV_NUM_PROPERTY_FN(pv_mda_free, SECTOR_SIZE * pv_mda_free(pv))
#define _pv_mda_free_set prop_not_implemented_set
GET_PV_NUM_PROPERTY_FN(pv_mda_size, SECTOR_SIZE * pv_mda_size(pv))
#define _pv_mda_size_set prop_not_implemented_set
GET_PV_NUM_PROPERTY_FN(pe_start, SECTOR_SIZE * pv->pe_start)
#define _pe_start_set prop_not_implemented_set
GET_PV_NUM_PROPERTY_FN(pv_size, SECTOR_SIZE * pv_size_field(pv))
#define _pv_size_set prop_not_implemented_set
GET_PV_NUM_PROPERTY_FN(pv_free, SECTOR_SIZE * pv_free(pv))
#define _pv_free_set prop_not_implemented_set
GET_PV_NUM_PROPERTY_FN(pv_used, SECTOR_SIZE * pv_used(pv))
#define _pv_used_set prop_not_implemented_set
GET_PV_STR_PROPERTY_FN(pv_attr, pv_attr_dup(pv->vg->vgmem, pv))
#define _pv_attr_set prop_not_implemented_set
GET_PV_NUM_PROPERTY_FN(pv_pe_count, pv->pe_count)
#define _pv_pe_count_set prop_not_implemented_set
GET_PV_NUM_PROPERTY_FN(pv_pe_alloc_count, pv->pe_alloc_count)
#define _pv_pe_alloc_count_set prop_not_implemented_set
GET_PV_STR_PROPERTY_FN(pv_tags, pv_tags_dup(pv))
#define _pv_tags_set prop_not_implemented_set
GET_PV_NUM_PROPERTY_FN(pv_mda_count, pv_mda_count(pv))
#define _pv_mda_count_set prop_not_implemented_set
GET_PV_NUM_PROPERTY_FN(pv_mda_used_count, pv_mda_used_count(pv))
#define _pv_mda_used_count_set prop_not_implemented_set
GET_PV_NUM_PROPERTY_FN(pv_ba_start, SECTOR_SIZE * pv->ba_start)
#define _pv_ba_start_set prop_not_implemented_set
GET_PV_NUM_PROPERTY_FN(pv_ba_size, SECTOR_SIZE * pv->ba_size)
#define _pv_ba_size_set prop_not_implemented_set
2014-07-02 13:09:14 +04:00
#define _pv_allocatable_set prop_not_implemented_set
#define _pv_allocatable_get prop_not_implemented_get
#define _pv_exported_set prop_not_implemented_set
#define _pv_exported_get prop_not_implemented_get
#define _pv_missing_set prop_not_implemented_set
#define _pv_missing_get prop_not_implemented_get
#define _vg_permissions_set prop_not_implemented_set
#define _vg_permissions_get prop_not_implemented_get
#define _vg_extendable_set prop_not_implemented_set
#define _vg_extendable_get prop_not_implemented_get
#define _vg_exported_set prop_not_implemented_set
#define _vg_exported_get prop_not_implemented_get
#define _vg_partial_set prop_not_implemented_set
#define _vg_partial_get prop_not_implemented_get
#define _vg_allocation_policy_set prop_not_implemented_set
#define _vg_allocation_policy_get prop_not_implemented_get
#define _vg_clustered_set prop_not_implemented_set
#define _vg_clustered_get prop_not_implemented_get
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields. The lv_layout and lv_type fields together help with LV identification. We can do basic identification using the lv_attr field which provides very condensed view. In contrast to that, the new lv_layout and lv_type fields provide more detialed information on exact layout and type used for LVs. For top-level LVs which are pure types not combined with any other LV types, the lv_layout value is equal to lv_type value. For non-top-level LVs which may be combined with other types, the lv_layout describes the underlying layout used, while the lv_type describes the use/type/usage of the LV. These two new fields are both string lists so selection (-S/--select) criteria can be defined using the list operators easily: [] for strict matching {} for subset matching. For example, let's consider this: $ lvs -a -o name,vg_name,lv_attr,layout,type LV VG Attr Layout Type [lvol1_pmspare] vg ewi------- linear metadata,pool,spare pool vg twi-a-tz-- pool,thin pool,thin [pool_tdata] vg rwi-aor--- level10,raid data,pool,thin [pool_tdata_rimage_0] vg iwi-aor--- linear image,raid [pool_tdata_rimage_1] vg iwi-aor--- linear image,raid [pool_tdata_rimage_2] vg iwi-aor--- linear image,raid [pool_tdata_rimage_3] vg iwi-aor--- linear image,raid [pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid [pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid [pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid [pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid [pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin [pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid [pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid [pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid [pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid thin_snap1 vg Vwi---tz-k thin snapshot,thin thin_snap2 vg Vwi---tz-k thin snapshot,thin thin_vol1 vg Vwi-a-tz-- thin thin thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin Which is a situation with thin pool, thin volumes and thin snapshots. We can see internal 'pool_tdata' volume that makes up thin pool has actually a level10 raid layout and the internal 'pool_tmeta' has level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2' are both thin snapshots while 'thin_vol1' is thin origin (having multiple snapshots). Such reporting scheme provides much better base for selection criteria in addition to providing more detailed information, for example: $ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata' LV VG Attr Layout Type [lvol1_pmspare] vg ewi------- linear metadata,pool,spare [pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid [pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid [pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid [pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid [pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin [pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid [pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid (selected all LVs which are related to metadata of any type) lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}' LV VG Attr Layout Type [pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin (selected all LVs which hold metadata related to thin) lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}' LV VG Attr Layout Type thin_snap1 vg Vwi---tz-k thin snapshot,thin thin_snap2 vg Vwi---tz-k thin snapshot,thin (selected all LVs which are thin snapshots) lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid' LV VG Attr Layout Type [pool_tdata] vg rwi-aor--- level10,raid data,pool,thin [pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin (selected all LVs with raid layout, any raid layout) lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}' LV VG Attr Layout Type [pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin (selected all LVs with raid level1 layout exactly) And so on...
2014-08-13 12:03:45 +04:00
#define _lv_layout_set prop_not_implemented_set
#define _lv_layout_get prop_not_implemented_get
#define _lv_role_set prop_not_implemented_set
#define _lv_role_get prop_not_implemented_get
2014-07-02 13:09:14 +04:00
#define _lv_initial_image_sync_set prop_not_implemented_set
#define _lv_initial_image_sync_get prop_not_implemented_get
#define _lv_image_synced_get prop_not_implemented_get
#define _lv_image_synced_set prop_not_implemented_set
#define _lv_image_synced_get prop_not_implemented_get
#define _lv_merging_set prop_not_implemented_set
#define _lv_merging_get prop_not_implemented_get
#define _lv_converting_set prop_not_implemented_set
#define _lv_converting_get prop_not_implemented_get
#define _lv_permissions_set prop_not_implemented_set
#define _lv_permissions_get prop_not_implemented_get
#define _lv_allocation_policy_set prop_not_implemented_set
#define _lv_allocation_policy_get prop_not_implemented_get
#define _lv_allocation_locked_set prop_not_implemented_set
#define _lv_allocation_locked_get prop_not_implemented_get
#define _lv_active_locally_set prop_not_implemented_set
#define _lv_active_locally_get prop_not_implemented_get
#define _lv_active_remotely_set prop_not_implemented_set
#define _lv_active_remotely_get prop_not_implemented_get
#define _lv_active_exclusively_set prop_not_implemented_set
#define _lv_active_exclusively_get prop_not_implemented_get
2014-07-02 13:09:14 +04:00
#define _lv_fixed_minor_set prop_not_implemented_set
#define _lv_fixed_minor_get prop_not_implemented_get
#define _lv_merge_failed_set prop_not_implemented_set
#define _lv_merge_failed_get prop_not_implemented_get
#define _lv_snapshot_invalid_set prop_not_implemented_set
#define _lv_snapshot_invalid_get prop_not_implemented_get
#define _lv_suspended_set prop_not_implemented_set
#define _lv_suspended_get prop_not_implemented_get
#define _lv_live_table_set prop_not_implemented_set
#define _lv_live_table_get prop_not_implemented_get
#define _lv_inactive_table_set prop_not_implemented_set
#define _lv_inactive_table_get prop_not_implemented_get
#define _lv_device_open_set prop_not_implemented_set
#define _lv_device_open_get prop_not_implemented_get
#define _lv_health_status_set prop_not_implemented_set
#define _lv_health_status_get prop_not_implemented_get
#define _lv_skip_activation_set prop_not_implemented_set
#define _lv_skip_activation_get prop_not_implemented_get
#define _cache_total_blocks_set prop_not_implemented_set
#define _cache_total_blocks_get prop_not_implemented_get
#define _cache_used_blocks_set prop_not_implemented_set
#define _cache_used_blocks_get prop_not_implemented_get
#define _cache_dirty_blocks_set prop_not_implemented_set
#define _cache_dirty_blocks_get prop_not_implemented_get
#define _cache_read_hits_set prop_not_implemented_set
#define _cache_read_hits_get prop_not_implemented_get
#define _cache_read_misses_set prop_not_implemented_set
#define _cache_read_misses_get prop_not_implemented_get
#define _cache_write_hits_set prop_not_implemented_set
#define _cache_write_hits_get prop_not_implemented_get
#define _cache_write_misses_set prop_not_implemented_set
#define _cache_write_misses_get prop_not_implemented_get
/* LV */
GET_LV_STR_PROPERTY_FN(lv_uuid, lv_uuid_dup(lv))
#define _lv_uuid_set prop_not_implemented_set
GET_LV_STR_PROPERTY_FN(lv_name, lv_name_dup(lv->vg->vgmem, lv))
#define _lv_name_set prop_not_implemented_set
GET_LV_STR_PROPERTY_FN(lv_full_name, lv_fullname_dup(lv->vg->vgmem, lv))
#define _lv_full_name_set prop_not_implemented_set
GET_LV_STR_PROPERTY_FN(lv_path, lv_path_dup(lv->vg->vgmem, lv))
#define _lv_path_set prop_not_implemented_set
GET_LV_STR_PROPERTY_FN(lv_dm_path, lv_dmpath_dup(lv->vg->vgmem, lv))
#define _lv_dm_path_set prop_not_implemented_set
GET_LV_STR_PROPERTY_FN(lv_parent, lv_parent_dup(lv->vg->vgmem, lv))
#define _lv_parent_set prop_not_implemented_set
GET_LV_STR_PROPERTY_FN(lv_attr, lv_attr_dup(lv->vg->vgmem, lv))
#define _lv_attr_set prop_not_implemented_set
GET_LV_NUM_PROPERTY_FN(lv_major, lv->major)
#define _lv_major_set prop_not_implemented_set
GET_LV_NUM_PROPERTY_FN(lv_minor, lv->minor)
#define _lv_when_full_get prop_not_implemented_get
#define _lv_when_full_set prop_not_implemented_set
#define _lv_minor_set prop_not_implemented_set
GET_LV_NUM_PROPERTY_FN(lv_read_ahead, lv->read_ahead * SECTOR_SIZE)
#define _lv_read_ahead_set prop_not_implemented_set
GET_LV_NUM_PROPERTY_FN(lv_kernel_major, lv_kernel_major(lv))
#define _lv_kernel_major_set prop_not_implemented_set
GET_LV_NUM_PROPERTY_FN(lv_kernel_minor, lv_kernel_minor(lv))
#define _lv_kernel_minor_set prop_not_implemented_set
GET_LV_NUM_PROPERTY_FN(lv_kernel_read_ahead, lv_kernel_read_ahead(lv) * SECTOR_SIZE)
#define _lv_kernel_read_ahead_set prop_not_implemented_set
GET_LV_NUM_PROPERTY_FN(lv_size, lv->size * SECTOR_SIZE)
#define _lv_size_set prop_not_implemented_set
GET_LV_NUM_PROPERTY_FN(seg_count, dm_list_size(&lv->segments))
#define _seg_count_set prop_not_implemented_set
GET_LV_STR_PROPERTY_FN(origin, lv_origin_dup(lv->vg->vgmem, lv))
#define _origin_set prop_not_implemented_set
GET_LV_NUM_PROPERTY_FN(origin_size, (SECTOR_SIZE * lv_origin_size(lv)))
#define _origin_size_set prop_not_implemented_set
GET_LV_NUM_PROPERTY_FN(snap_percent, _snap_percent(lv))
#define _snap_percent_set prop_not_implemented_set
GET_LV_NUM_PROPERTY_FN(copy_percent, _copy_percent(lv))
#define _copy_percent_set prop_not_implemented_set
GET_LV_NUM_PROPERTY_FN(sync_percent, _copy_percent(lv))
#define _sync_percent_set prop_not_implemented_set
GET_LV_NUM_PROPERTY_FN(raid_mismatch_count, _raidmismatchcount(lv))
#define _raid_mismatch_count_set prop_not_implemented_set
GET_LV_NUM_PROPERTY_FN(raid_write_behind, _raidwritebehind(lv))
#define _raid_write_behind_set prop_not_implemented_set
GET_LV_NUM_PROPERTY_FN(raid_min_recovery_rate, _raidminrecoveryrate(lv))
#define _raid_min_recovery_rate_set prop_not_implemented_set
GET_LV_NUM_PROPERTY_FN(raid_max_recovery_rate, _raidmaxrecoveryrate(lv))
#define _raid_max_recovery_rate_set prop_not_implemented_set
GET_LV_STR_PROPERTY_FN(raid_sync_action, _raidsyncaction(lv))
#define _raid_sync_action_set prop_not_implemented_set
GET_LV_STR_PROPERTY_FN(move_pv, lv_move_pv_dup(lv->vg->vgmem, lv))
#define _move_pv_set prop_not_implemented_set
GET_LV_STR_PROPERTY_FN(convert_lv, lv_convert_lv_dup(lv->vg->vgmem, lv))
#define _convert_lv_set prop_not_implemented_set
GET_LV_STR_PROPERTY_FN(lv_tags, lv_tags_dup(lv))
#define _lv_tags_set prop_not_implemented_set
GET_LV_STR_PROPERTY_FN(mirror_log, lv_mirror_log_dup(lv->vg->vgmem, lv))
#define _mirror_log_set prop_not_implemented_set
GET_LV_STR_PROPERTY_FN(lv_modules, lv_modules_dup(lv->vg->vgmem, lv))
#define _lv_modules_set prop_not_implemented_set
GET_LV_STR_PROPERTY_FN(data_lv, lv_data_lv_dup(lv->vg->vgmem, lv))
#define _data_lv_set prop_not_implemented_set
GET_LV_STR_PROPERTY_FN(metadata_lv, lv_metadata_lv_dup(lv->vg->vgmem, lv))
#define _metadata_lv_set prop_not_implemented_set
GET_LV_STR_PROPERTY_FN(pool_lv, lv_pool_lv_dup(lv->vg->vgmem, lv))
#define _pool_lv_set prop_not_implemented_set
GET_LV_NUM_PROPERTY_FN(data_percent, _data_percent(lv))
#define _data_percent_set prop_not_implemented_set
GET_LV_NUM_PROPERTY_FN(metadata_percent, _metadata_percent(lv))
#define _metadata_percent_set prop_not_implemented_set
GET_LV_NUM_PROPERTY_FN(lv_metadata_size, lv_metadata_size(lv) * SECTOR_SIZE)
#define _lv_metadata_size_set prop_not_implemented_set
GET_LV_STR_PROPERTY_FN(lv_time, lv_time_dup(lv->vg->vgmem, lv))
#define _lv_time_set prop_not_implemented_set
GET_LV_STR_PROPERTY_FN(lv_host, lv_host_dup(lv->vg->vgmem, lv))
#define _lv_host_set prop_not_implemented_set
GET_LV_STR_PROPERTY_FN(lv_active, lv_active_dup(lv->vg->vgmem, lv))
#define _lv_active_set prop_not_implemented_set
GET_LV_STR_PROPERTY_FN(lv_profile, lv_profile_dup(lv->vg->vgmem, lv))
#define _lv_profile_set prop_not_implemented_set
/* VG */
GET_VG_STR_PROPERTY_FN(vg_fmt, vg_fmt_dup(vg))
#define _vg_fmt_set prop_not_implemented_set
GET_VG_STR_PROPERTY_FN(vg_uuid, vg_uuid_dup(vg))
#define _vg_uuid_set prop_not_implemented_set
GET_VG_STR_PROPERTY_FN(vg_name, vg_name_dup(vg))
#define _vg_name_set prop_not_implemented_set
GET_VG_STR_PROPERTY_FN(vg_attr, vg_attr_dup(vg->vgmem, vg))
#define _vg_attr_set prop_not_implemented_set
GET_VG_NUM_PROPERTY_FN(vg_size, (SECTOR_SIZE * vg_size(vg)))
#define _vg_size_set prop_not_implemented_set
GET_VG_NUM_PROPERTY_FN(vg_free, (SECTOR_SIZE * vg_free(vg)))
#define _vg_free_set prop_not_implemented_set
GET_VG_STR_PROPERTY_FN(vg_sysid, vg_system_id_dup(vg))
#define _vg_sysid_set prop_not_implemented_set
GET_VG_NUM_PROPERTY_FN(vg_extent_size, (SECTOR_SIZE * vg->extent_size))
#define _vg_extent_size_set prop_not_implemented_set
GET_VG_NUM_PROPERTY_FN(vg_extent_count, vg->extent_count)
#define _vg_extent_count_set prop_not_implemented_set
GET_VG_NUM_PROPERTY_FN(vg_free_count, vg->free_count)
#define _vg_free_count_set prop_not_implemented_set
GET_VG_NUM_PROPERTY_FN(max_lv, vg->max_lv)
#define _max_lv_set prop_not_implemented_set
GET_VG_NUM_PROPERTY_FN(max_pv, vg->max_pv)
#define _max_pv_set prop_not_implemented_set
GET_VG_NUM_PROPERTY_FN(pv_count, vg->pv_count)
#define _pv_count_set prop_not_implemented_set
GET_VG_NUM_PROPERTY_FN(lv_count, (vg_visible_lvs(vg)))
#define _lv_count_set prop_not_implemented_set
GET_VG_NUM_PROPERTY_FN(snap_count, (snapshot_count(vg)))
#define _snap_count_set prop_not_implemented_set
GET_VG_NUM_PROPERTY_FN(vg_seqno, vg->seqno)
#define _vg_seqno_set prop_not_implemented_set
GET_VG_STR_PROPERTY_FN(vg_tags, vg_tags_dup(vg))
#define _vg_tags_set prop_not_implemented_set
GET_VG_NUM_PROPERTY_FN(vg_mda_count, (vg_mda_count(vg)))
#define _vg_mda_count_set prop_not_implemented_set
GET_VG_NUM_PROPERTY_FN(vg_mda_used_count, (vg_mda_used_count(vg)))
#define _vg_mda_used_count_set prop_not_implemented_set
GET_VG_NUM_PROPERTY_FN(vg_mda_free, (SECTOR_SIZE * vg_mda_free(vg)))
#define _vg_mda_free_set prop_not_implemented_set
GET_VG_NUM_PROPERTY_FN(vg_mda_size, (SECTOR_SIZE * vg_mda_size(vg)))
#define _vg_mda_size_set prop_not_implemented_set
GET_VG_NUM_PROPERTY_FN(vg_mda_copies, (vg_mda_copies(vg)))
SET_VG_NUM_PROPERTY_FN(vg_mda_copies, vg_set_mda_copies)
GET_VG_STR_PROPERTY_FN(vg_profile, vg_profile_dup(vg))
#define _vg_profile_set prop_not_implemented_set
/* LVSEG */
GET_LVSEG_STR_PROPERTY_FN(segtype, lvseg_segtype_dup(lvseg->lv->vg->vgmem, lvseg))
#define _segtype_set prop_not_implemented_set
GET_LVSEG_NUM_PROPERTY_FN(stripes, lvseg->area_count)
#define _stripes_set prop_not_implemented_set
GET_LVSEG_NUM_PROPERTY_FN(stripesize, (SECTOR_SIZE * lvseg->stripe_size))
#define _stripesize_set prop_not_implemented_set
GET_LVSEG_NUM_PROPERTY_FN(stripe_size, (SECTOR_SIZE * lvseg->stripe_size))
#define _stripe_size_set prop_not_implemented_set
GET_LVSEG_NUM_PROPERTY_FN(regionsize, (SECTOR_SIZE * lvseg->region_size))
#define _regionsize_set prop_not_implemented_set
GET_LVSEG_NUM_PROPERTY_FN(region_size, (SECTOR_SIZE * lvseg->region_size))
#define _region_size_set prop_not_implemented_set
GET_LVSEG_NUM_PROPERTY_FN(chunksize, (SECTOR_SIZE * lvseg_chunksize(lvseg)))
#define _chunksize_set prop_not_implemented_set
GET_LVSEG_NUM_PROPERTY_FN(chunk_size, (SECTOR_SIZE * lvseg_chunksize(lvseg)))
#define _chunk_size_set prop_not_implemented_set
GET_LVSEG_NUM_PROPERTY_FN(thin_count, dm_list_size(&lvseg->lv->segs_using_this_lv))
#define _thin_count_set prop_not_implemented_set
GET_LVSEG_NUM_PROPERTY_FN(zero, lvseg->zero_new_blocks)
#define _zero_set prop_not_implemented_set
GET_LVSEG_NUM_PROPERTY_FN(transaction_id, lvseg->transaction_id)
#define _transaction_id_set prop_not_implemented_set
GET_LVSEG_NUM_PROPERTY_FN(thin_id, lvseg->device_id)
#define _thin_id_set prop_not_implemented_set
GET_LVSEG_STR_PROPERTY_FN(discards, lvseg_discards_dup(lvseg->lv->vg->vgmem, lvseg))
#define _discards_set prop_not_implemented_set
GET_LVSEG_STR_PROPERTY_FN(cachemode, lvseg_cachemode_dup(lvseg->lv->vg->vgmem, lvseg))
#define _cachemode_set prop_not_implemented_set
GET_LVSEG_NUM_PROPERTY_FN(seg_start, (SECTOR_SIZE * lvseg_start(lvseg)))
#define _seg_start_set prop_not_implemented_set
GET_LVSEG_NUM_PROPERTY_FN(seg_start_pe, lvseg->le)
#define _seg_start_pe_set prop_not_implemented_set
GET_LVSEG_NUM_PROPERTY_FN(seg_size, (SECTOR_SIZE * lvseg_size(lvseg)))
#define _seg_size_set prop_not_implemented_set
GET_LVSEG_NUM_PROPERTY_FN(seg_size_pe, lvseg->len)
#define _seg_size_pe_set prop_not_implemented_set
GET_LVSEG_STR_PROPERTY_FN(seg_tags, lvseg_tags_dup(lvseg))
#define _seg_tags_set prop_not_implemented_set
GET_LVSEG_STR_PROPERTY_FN(seg_pe_ranges,
lvseg_seg_pe_ranges(lvseg->lv->vg->vgmem, lvseg))
#define _seg_pe_ranges_set prop_not_implemented_set
GET_LVSEG_STR_PROPERTY_FN(devices, lvseg_devices(lvseg->lv->vg->vgmem, lvseg))
#define _devices_set prop_not_implemented_set
GET_LVSEG_STR_PROPERTY_FN(seg_monitor, lvseg_monitor_dup(lvseg->lv->vg->vgmem, lvseg))
#define _seg_monitor_set prop_not_implemented_set
#define _cache_policy_get prop_not_implemented_get
#define _cache_policy_set prop_not_implemented_set
#define _cache_settings_get prop_not_implemented_get
#define _cache_settings_set prop_not_implemented_set
/* PVSEG */
GET_PVSEG_NUM_PROPERTY_FN(pvseg_start, pvseg->pe)
#define _pvseg_start_set prop_not_implemented_set
GET_PVSEG_NUM_PROPERTY_FN(pvseg_size, (SECTOR_SIZE * pvseg->len))
#define _pvseg_size_set prop_not_implemented_set
struct lvm_property_type _properties[] = {
#include "columns.h"
{ 0, "", 0, 0, 0, { .integer = 0 }, prop_not_implemented_get, prop_not_implemented_set },
};
#undef STR
#undef NUM
2014-07-02 13:09:14 +04:00
#undef BIN
#undef SIZ
#undef PCT
#undef STR_LIST
#undef FIELD
int lvseg_get_property(const struct lv_segment *lvseg,
struct lvm_property_type *prop)
{
return prop_get_property(_properties, lvseg, prop, SEGS);
}
int lv_get_property(const struct logical_volume *lv,
struct lvm_property_type *prop)
{
return prop_get_property(_properties, lv, prop, LVS);
}
int vg_get_property(const struct volume_group *vg,
struct lvm_property_type *prop)
{
return prop_get_property(_properties, vg, prop, VGS);
}
int pvseg_get_property(const struct pv_segment *pvseg,
struct lvm_property_type *prop)
{
return prop_get_property(_properties, pvseg, prop, PVSEGS);
}
int pv_get_property(const struct physical_volume *pv,
struct lvm_property_type *prop)
{
return prop_get_property(_properties, pv, prop, PVS | LABEL);
}
int lv_set_property(struct logical_volume *lv,
struct lvm_property_type *prop)
{
return prop_set_property(_properties, lv, prop, LVS);
}
int vg_set_property(struct volume_group *vg,
struct lvm_property_type *prop)
{
return prop_set_property(_properties, vg, prop, VGS);
}
int pv_set_property(struct physical_volume *pv,
struct lvm_property_type *prop)
{
return prop_set_property(_properties, pv, prop, PVS | LABEL);
}