2010-09-30 17:16:55 +04:00
/*
2010-10-01 00:47:18 +04:00
* Copyright ( C ) 2001 - 2004 Sistina Software , Inc . All rights reserved .
2013-01-15 18:16:16 +04:00
* Copyright ( C ) 2004 - 2013 Red Hat , Inc . All rights reserved .
2010-09-30 17:16:55 +04:00
*
* This file is part of LVM2 .
*
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v .2 .1 .
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program ; if not , write to the Free Software Foundation ,
* Inc . , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
*/
# include "lib.h"
# include "metadata.h"
2010-10-25 17:54:29 +04:00
# include "display.h"
2010-09-30 17:52:55 +04:00
# include "activate.h"
2010-10-12 20:11:34 +04:00
# include "toolcontext.h"
2010-10-12 20:12:50 +04:00
# include "segtype.h"
2010-10-12 20:13:06 +04:00
# include "str_list.h"
2015-03-05 23:00:44 +03:00
# include "lvmlockd.h"
2010-10-12 20:13:06 +04:00
2012-01-19 19:31:45 +04:00
# include <time.h>
# include <sys/utsname.h>
static struct utsname _utsname ;
static int _utsinit = 0 ;
2011-04-12 16:24:29 +04:00
static char * _format_pvsegs ( struct dm_pool * mem , const struct lv_segment * seg ,
int range_format )
{
unsigned int s ;
const char * name = NULL ;
uint32_t extent = 0 ;
char extent_str [ 32 ] ;
if ( ! dm_pool_begin_object ( mem , 256 ) ) {
log_error ( " dm_pool_begin_object failed " ) ;
return NULL ;
}
for ( s = 0 ; s < seg - > area_count ; s + + ) {
switch ( seg_type ( seg , s ) ) {
case AREA_LV :
name = seg_lv ( seg , s ) - > name ;
extent = seg_le ( seg , s ) ;
break ;
case AREA_PV :
name = dev_name ( seg_dev ( seg , s ) ) ;
extent = seg_pe ( seg , s ) ;
break ;
case AREA_UNASSIGNED :
name = " unassigned " ;
extent = 0 ;
2012-02-27 15:13:48 +04:00
break ;
2012-02-13 15:25:56 +04:00
default :
log_error ( INTERNAL_ERROR " Unknown area segtype. " ) ;
return NULL ;
2011-04-12 16:24:29 +04:00
}
if ( ! dm_pool_grow_object ( mem , name , strlen ( name ) ) ) {
log_error ( " dm_pool_grow_object failed " ) ;
return NULL ;
}
if ( dm_snprintf ( extent_str , sizeof ( extent_str ) ,
" %s% " PRIu32 " %s " ,
range_format ? " : " : " ( " , extent ,
range_format ? " - " : " ) " ) < 0 ) {
log_error ( " Extent number dm_snprintf failed " ) ;
return NULL ;
}
if ( ! dm_pool_grow_object ( mem , extent_str , strlen ( extent_str ) ) ) {
log_error ( " dm_pool_grow_object failed " ) ;
return NULL ;
}
if ( range_format ) {
if ( dm_snprintf ( extent_str , sizeof ( extent_str ) ,
2015-07-06 17:09:17 +03:00
FMTu32 , extent + seg - > area_len - 1 ) < 0 ) {
2011-04-12 16:24:29 +04:00
log_error ( " Extent number dm_snprintf failed " ) ;
return NULL ;
}
if ( ! dm_pool_grow_object ( mem , extent_str , strlen ( extent_str ) ) ) {
log_error ( " dm_pool_grow_object failed " ) ;
return NULL ;
}
}
if ( ( s ! = seg - > area_count - 1 ) & &
! dm_pool_grow_object ( mem , range_format ? " " : " , " , 1 ) ) {
log_error ( " dm_pool_grow_object failed " ) ;
return NULL ;
}
}
if ( ! dm_pool_grow_object ( mem , " \0 " , 1 ) ) {
log_error ( " dm_pool_grow_object failed " ) ;
return NULL ;
}
return dm_pool_end_object ( mem ) ;
}
char * lvseg_devices ( struct dm_pool * mem , const struct lv_segment * seg )
{
return _format_pvsegs ( mem , seg , 0 ) ;
}
char * lvseg_seg_pe_ranges ( struct dm_pool * mem , const struct lv_segment * seg )
{
return _format_pvsegs ( mem , seg , 1 ) ;
}
2010-11-17 23:08:14 +03:00
char * lvseg_tags_dup ( const struct lv_segment * seg )
{
return tags_format_and_copy ( seg - > lv - > vg - > vgmem , & seg - > tags ) ;
}
2011-03-05 15:14:00 +03:00
char * lvseg_segtype_dup ( struct dm_pool * mem , const struct lv_segment * seg )
2010-11-17 23:08:14 +03:00
{
2014-10-20 20:40:39 +04:00
return dm_pool_strdup ( mem , lvseg_name ( seg ) ) ;
2010-11-17 23:08:14 +03:00
}
2012-11-27 14:02:49 +04:00
char * lvseg_discards_dup ( struct dm_pool * mem , const struct lv_segment * seg )
{
return dm_pool_strdup ( mem , get_pool_discards_name ( seg - > discards ) ) ;
}
2014-10-02 01:06:01 +04:00
char * lvseg_cachemode_dup ( struct dm_pool * mem , const struct lv_segment * seg )
{
2014-11-20 18:35:46 +03:00
const char * name = get_cache_pool_cachemode_name ( seg ) ;
if ( ! name )
return_NULL ;
return dm_pool_strdup ( mem , name ) ;
2014-10-02 01:06:01 +04:00
}
2013-04-25 14:07:57 +04:00
# ifdef DMEVENTD
# include "libdevmapper-event.h"
# endif
char * lvseg_monitor_dup ( struct dm_pool * mem , const struct lv_segment * seg )
{
const char * s = " " ;
# ifdef DMEVENTD
struct lvinfo info ;
int pending = 0 , monitored ;
struct lv_segment * segm = ( struct lv_segment * ) seg ;
if ( lv_is_cow ( seg - > lv ) & & ! lv_is_merging_cow ( seg - > lv ) )
segm = first_seg ( seg - > lv - > snapshot - > lv ) ;
// log_debug("Query LV:%s mon:%s segm:%s tgtm:%p segmon:%d statusm:%d", seg->lv->name, segm->lv->name, segm->segtype->name, segm->segtype->ops->target_monitored, seg_monitored(segm), (int)(segm->status & PVMOVE));
2014-04-30 12:13:35 +04:00
if ( ( dmeventd_monitor_mode ( ) ! = 1 ) | |
2015-01-20 16:36:21 +03:00
! segm - > segtype - > ops | |
2014-04-30 12:13:35 +04:00
! segm - > segtype - > ops - > target_monitored )
2013-04-25 14:07:57 +04:00
/* Nothing to do, monitoring not supported */ ;
2013-05-27 12:20:06 +04:00
else if ( lv_is_cow_covering_origin ( seg - > lv ) )
/* Nothing to do, snapshot already covers origin */ ;
2013-04-25 14:07:57 +04:00
else if ( ! seg_monitored ( segm ) | | ( segm - > status & PVMOVE ) )
s = " not monitored " ;
else if ( lv_info ( seg - > lv - > vg - > cmd , seg - > lv , 1 , & info , 0 , 0 ) & & info . exists ) {
2014-04-29 22:14:05 +04:00
monitored = segm - > segtype - > ops - > target_monitored ( segm , & pending ) ;
2013-04-25 14:07:57 +04:00
if ( pending )
s = " pending " ;
else
s = ( monitored ) ? " monitored " : " not monitored " ;
} // else log_debug("Not active");
# endif
return dm_pool_strdup ( mem , s ) ;
}
2010-11-17 23:08:14 +03:00
uint64_t lvseg_chunksize ( const struct lv_segment * seg )
{
uint64_t size ;
if ( lv_is_cow ( seg - > lv ) )
2013-07-03 00:26:03 +04:00
size = ( uint64_t ) find_snapshot ( seg - > lv ) - > chunk_size ;
2014-05-23 16:24:28 +04:00
else if ( seg_is_pool ( seg ) )
2012-01-24 04:55:03 +04:00
size = ( uint64_t ) seg - > chunk_size ;
2014-11-10 23:29:32 +03:00
else if ( seg_is_cache ( seg ) )
return lvseg_chunksize ( first_seg ( seg - > pool_lv ) ) ;
2010-11-17 23:08:14 +03:00
else
size = UINT64_C ( 0 ) ;
2012-01-19 19:34:32 +04:00
2010-11-17 23:08:14 +03:00
return size ;
}
2014-10-20 20:40:39 +04:00
const char * lvseg_name ( const struct lv_segment * seg )
{
/* Support even segtypes without 'ops' */
if ( seg - > segtype - > ops & &
seg - > segtype - > ops - > name )
return seg - > segtype - > ops - > name ( seg ) ;
return seg - > segtype - > name ;
}
2010-11-17 23:08:14 +03:00
uint64_t lvseg_start ( const struct lv_segment * seg )
{
return ( uint64_t ) seg - > le * seg - > lv - > vg - > extent_size ;
}
uint64_t lvseg_size ( const struct lv_segment * seg )
{
return ( uint64_t ) seg - > len * seg - > lv - > vg - > extent_size ;
}
2010-10-21 18:49:31 +04:00
uint32_t lv_kernel_read_ahead ( const struct logical_volume * lv )
{
struct lvinfo info ;
if ( ! lv_info ( lv - > vg - > cmd , lv , 0 , & info , 0 , 1 ) | | ! info . exists )
return UINT32_MAX ;
return info . read_ahead ;
}
2010-10-21 18:49:20 +04:00
char * lv_origin_dup ( struct dm_pool * mem , const struct logical_volume * lv )
{
if ( lv_is_cow ( lv ) )
return lv_name_dup ( mem , origin_from_cow ( lv ) ) ;
2012-10-12 14:02:15 +04:00
2014-02-05 19:44:37 +04:00
if ( lv_is_cache ( lv ) & & first_seg ( lv ) - > origin )
return lv_name_dup ( mem , first_seg ( lv ) - > origin ) ;
2012-10-12 14:02:15 +04:00
if ( lv_is_thin_volume ( lv ) & & first_seg ( lv ) - > origin )
return lv_name_dup ( mem , first_seg ( lv ) - > origin ) ;
2013-01-15 18:16:16 +04:00
if ( lv_is_thin_volume ( lv ) & & first_seg ( lv ) - > external_lv )
return lv_name_dup ( mem , first_seg ( lv ) - > external_lv ) ;
2010-10-21 18:49:20 +04:00
return NULL ;
}
2010-10-21 18:49:10 +04:00
char * lv_name_dup ( struct dm_pool * mem , const struct logical_volume * lv )
{
return dm_pool_strdup ( mem , lv - > name ) ;
}
2014-07-02 20:24:05 +04:00
char * lv_fullname_dup ( struct dm_pool * mem , const struct logical_volume * lv )
{
char lvfullname [ NAME_LEN * 2 + 2 ] ;
if ( dm_snprintf ( lvfullname , sizeof ( lvfullname ) , " %s/%s " , lv - > vg - > name , lv - > name ) < 0 ) {
log_error ( " lvfullname snprintf failed " ) ;
return NULL ;
}
return dm_pool_strdup ( mem , lvfullname ) ;
}
2014-07-04 04:13:51 +04:00
struct logical_volume * lv_parent ( const struct logical_volume * lv )
2014-07-04 02:49:34 +04:00
{
2014-07-04 04:13:51 +04:00
struct logical_volume * parent_lv = NULL ;
2014-07-04 02:49:34 +04:00
if ( lv_is_visible ( lv ) )
;
else if ( lv_is_mirror_image ( lv ) | | lv_is_mirror_log ( lv ) )
2014-07-04 04:13:51 +04:00
parent_lv = get_only_segment_using_this_lv ( lv ) - > lv ;
2014-07-04 02:49:34 +04:00
else if ( lv_is_raid_image ( lv ) | | lv_is_raid_metadata ( lv ) )
2014-07-04 04:13:51 +04:00
parent_lv = get_only_segment_using_this_lv ( lv ) - > lv ;
2014-07-04 02:49:34 +04:00
else if ( lv_is_cache_pool_data ( lv ) | | lv_is_cache_pool_metadata ( lv ) )
2014-07-04 04:13:51 +04:00
parent_lv = get_only_segment_using_this_lv ( lv ) - > lv ;
2014-07-04 02:49:34 +04:00
else if ( lv_is_thin_pool_data ( lv ) | | lv_is_thin_pool_metadata ( lv ) )
2014-07-04 04:13:51 +04:00
parent_lv = get_only_segment_using_this_lv ( lv ) - > lv ;
return parent_lv ;
}
char * lv_parent_dup ( struct dm_pool * mem , const struct logical_volume * lv )
{
struct logical_volume * parent_lv = lv_parent ( lv ) ;
2014-07-04 02:49:34 +04:00
2014-07-04 04:13:51 +04:00
return dm_pool_strdup ( mem , parent_lv ? parent_lv - > name : " " ) ;
2014-07-04 02:49:34 +04:00
}
2010-10-12 20:13:06 +04:00
char * lv_modules_dup ( struct dm_pool * mem , const struct logical_volume * lv )
{
struct dm_list * modules ;
if ( ! ( modules = str_list_create ( mem ) ) ) {
log_error ( " modules str_list allocation failed " ) ;
return NULL ;
}
if ( ! list_lv_modules ( mem , lv , modules ) )
return_NULL ;
2010-10-12 21:09:23 +04:00
return tags_format_and_copy ( mem , modules ) ;
2010-10-12 20:13:06 +04:00
}
2010-10-12 20:12:50 +04:00
char * lv_mirror_log_dup ( struct dm_pool * mem , const struct logical_volume * lv )
{
struct lv_segment * seg ;
2012-01-20 14:56:30 +04:00
dm_list_iterate_items ( seg , & lv - > segments )
if ( seg_is_mirrored ( seg ) & & seg - > log_lv )
return dm_pool_strdup ( mem , seg - > log_lv - > name ) ;
2010-10-12 20:12:50 +04:00
return NULL ;
}
2010-10-12 20:11:34 +04:00
2011-09-09 04:54:49 +04:00
char * lv_pool_lv_dup ( struct dm_pool * mem , const struct logical_volume * lv )
{
struct lv_segment * seg ;
2012-01-20 14:56:30 +04:00
dm_list_iterate_items ( seg , & lv - > segments )
2014-02-05 19:44:37 +04:00
if ( seg - > pool_lv & &
( seg_is_thin_volume ( seg ) | | seg_is_cache ( seg ) ) )
2012-01-20 14:56:30 +04:00
return dm_pool_strdup ( mem , seg - > pool_lv - > name ) ;
2011-09-09 04:54:49 +04:00
return NULL ;
}
2012-01-19 19:34:32 +04:00
char * lv_data_lv_dup ( struct dm_pool * mem , const struct logical_volume * lv )
{
2014-02-05 19:44:37 +04:00
struct lv_segment * seg = ( lv_is_thin_pool ( lv ) | | lv_is_cache_pool ( lv ) ) ?
first_seg ( lv ) : NULL ;
2013-06-15 00:02:12 +04:00
return seg ? dm_pool_strdup ( mem , seg_lv ( seg , 0 ) - > name ) : NULL ;
2012-01-19 19:34:32 +04:00
}
char * lv_metadata_lv_dup ( struct dm_pool * mem , const struct logical_volume * lv )
{
2014-02-05 19:44:37 +04:00
struct lv_segment * seg = ( lv_is_thin_pool ( lv ) | | lv_is_cache_pool ( lv ) ) ?
first_seg ( lv ) : NULL ;
2013-06-15 00:02:12 +04:00
return seg ? dm_pool_strdup ( mem , seg - > metadata_lv - > name ) : NULL ;
2012-01-19 19:34:32 +04:00
}
2013-02-01 14:09:34 +04:00
const char * lv_layer ( const struct logical_volume * lv )
{
if ( lv_is_thin_pool ( lv ) )
return " tpool " ;
2013-02-21 13:25:44 +04:00
else if ( lv_is_origin ( lv ) | | lv_is_external_origin ( lv ) )
2013-02-01 14:09:34 +04:00
return " real " ;
return NULL ;
}
2010-10-12 20:12:33 +04:00
int lv_kernel_minor ( const struct logical_volume * lv )
{
struct lvinfo info ;
if ( lv_info ( lv - > vg - > cmd , lv , 0 , & info , 0 , 0 ) & & info . exists )
return info . minor ;
return - 1 ;
}
int lv_kernel_major ( const struct logical_volume * lv )
{
struct lvinfo info ;
if ( lv_info ( lv - > vg - > cmd , lv , 0 , & info , 0 , 0 ) & & info . exists )
return info . major ;
return - 1 ;
}
2010-10-12 20:12:18 +04:00
char * lv_convert_lv_dup ( struct dm_pool * mem , const struct logical_volume * lv )
{
struct lv_segment * seg ;
2014-09-16 00:33:53 +04:00
if ( lv_is_converting ( lv ) | | lv_is_mirrored ( lv ) ) {
2010-10-12 20:12:18 +04:00
seg = first_seg ( lv ) ;
/* Temporary mirror is always area_num == 0 */
if ( seg_type ( seg , 0 ) = = AREA_LV & &
is_temporary_mirror_layer ( seg_lv ( seg , 0 ) ) )
return dm_pool_strdup ( mem , seg_lv ( seg , 0 ) - > name ) ;
}
return NULL ;
}
2010-10-12 20:12:02 +04:00
char * lv_move_pv_dup ( struct dm_pool * mem , const struct logical_volume * lv )
{
2014-06-19 19:57:08 +04:00
struct logical_volume * mimage0_lv ;
2010-10-12 20:12:02 +04:00
struct lv_segment * seg ;
2014-06-19 19:57:08 +04:00
const struct device * dev ;
dm_list_iterate_items ( seg , & lv - > segments ) {
if ( seg - > status & PVMOVE ) {
if ( seg_type ( seg , 0 ) = = AREA_LV ) { /* atomic pvmove */
mimage0_lv = seg_lv ( seg , 0 ) ;
2014-09-16 03:13:46 +04:00
if ( ! lv_is_mirror_image ( mimage0_lv ) ) {
2014-06-19 19:57:08 +04:00
log_error ( INTERNAL_ERROR
" Bad pvmove structure " ) ;
return NULL ;
}
dev = seg_dev ( first_seg ( mimage0_lv ) , 0 ) ;
} else /* Segment pvmove */
dev = seg_dev ( seg , 0 ) ;
return dm_pool_strdup ( mem , dev_name ( dev ) ) ;
}
}
2013-06-15 00:02:12 +04:00
2010-10-12 20:12:02 +04:00
return NULL ;
}
2010-10-12 20:11:48 +04:00
uint64_t lv_origin_size ( const struct logical_volume * lv )
{
2013-06-15 00:02:12 +04:00
struct lv_segment * seg ;
2010-10-12 20:11:48 +04:00
if ( lv_is_cow ( lv ) )
2013-07-03 00:26:03 +04:00
return ( uint64_t ) find_snapshot ( lv ) - > len * lv - > vg - > extent_size ;
2013-06-15 00:02:12 +04:00
if ( lv_is_thin_volume ( lv ) & & ( seg = first_seg ( lv ) ) & &
seg - > external_lv )
return seg - > external_lv - > size ;
2010-10-12 20:11:48 +04:00
if ( lv_is_origin ( lv ) )
return lv - > size ;
2013-06-15 00:02:12 +04:00
2010-10-12 20:11:48 +04:00
return 0 ;
}
2012-01-19 19:34:32 +04:00
uint64_t lv_metadata_size ( const struct logical_volume * lv )
{
2014-02-05 19:44:37 +04:00
struct lv_segment * seg = ( lv_is_thin_pool ( lv ) | | lv_is_cache_pool ( lv ) ) ?
first_seg ( lv ) : NULL ;
2013-06-15 00:02:12 +04:00
return seg ? seg - > metadata_lv - > size : 0 ;
2012-01-19 19:34:32 +04:00
}
2010-10-12 20:11:34 +04:00
char * lv_path_dup ( struct dm_pool * mem , const struct logical_volume * lv )
{
char * repstr ;
size_t len ;
2014-07-02 17:57:00 +04:00
/* Only for visible devices that get a link from /dev/vg */
if ( ! * lv - > vg - > name | | ! lv_is_visible ( lv ) | | lv_is_thin_pool ( lv ) )
2011-03-09 15:44:42 +03:00
return dm_pool_strdup ( mem , " " ) ;
2010-10-12 20:11:34 +04:00
len = strlen ( lv - > vg - > cmd - > dev_dir ) + strlen ( lv - > vg - > name ) +
strlen ( lv - > name ) + 2 ;
if ( ! ( repstr = dm_pool_zalloc ( mem , len ) ) ) {
log_error ( " dm_pool_alloc failed " ) ;
2014-07-02 20:24:05 +04:00
return NULL ;
2010-10-12 20:11:34 +04:00
}
if ( dm_snprintf ( repstr , len , " %s%s/%s " ,
lv - > vg - > cmd - > dev_dir , lv - > vg - > name , lv - > name ) < 0 ) {
log_error ( " lvpath snprintf failed " ) ;
2014-07-02 20:24:05 +04:00
return NULL ;
}
return repstr ;
}
char * lv_dmpath_dup ( struct dm_pool * mem , const struct logical_volume * lv )
{
char * name ;
char * repstr ;
size_t len ;
if ( ! * lv - > vg - > name )
return dm_pool_strdup ( mem , " " ) ;
if ( ! ( name = dm_build_dm_name ( mem , lv - > vg - > name , lv - > name , NULL ) ) ) {
log_error ( " dm_build_dm_name failed " ) ;
return NULL ;
}
len = strlen ( dm_dir ( ) ) + strlen ( name ) + 2 ;
if ( ! ( repstr = dm_pool_zalloc ( mem , len ) ) ) {
log_error ( " dm_pool_alloc failed " ) ;
return NULL ;
}
if ( dm_snprintf ( repstr , len , " %s/%s " , dm_dir ( ) , name ) < 0 ) {
log_error ( " lv_dmpath snprintf failed " ) ;
return NULL ;
2010-10-12 20:11:34 +04:00
}
2011-03-09 15:44:42 +03:00
2010-10-12 20:11:34 +04:00
return repstr ;
}
2010-09-30 17:16:55 +04:00
2010-09-30 18:07:47 +04:00
char * lv_uuid_dup ( const struct logical_volume * lv )
{
return id_format_and_copy ( lv - > vg - > vgmem , & lv - > lvid . id [ 1 ] ) ;
}
2010-09-30 18:08:19 +04:00
char * lv_tags_dup ( const struct logical_volume * lv )
{
return tags_format_and_copy ( lv - > vg - > vgmem , & lv - > tags ) ;
}
2010-09-30 17:16:55 +04:00
uint64_t lv_size ( const struct logical_volume * lv )
{
return lv - > size ;
}
2010-09-30 17:52:55 +04:00
2014-07-01 11:56:03 +04:00
int lv_mirror_image_in_sync ( const struct logical_volume * lv )
2010-09-30 17:52:55 +04:00
{
2014-06-09 14:08:27 +04:00
dm_percent_t percent ;
2014-02-22 04:26:01 +04:00
struct lv_segment * seg = first_seg ( lv ) ;
struct lv_segment * mirror_seg ;
2010-09-30 17:52:55 +04:00
2014-02-25 12:34:02 +04:00
if ( ! ( lv - > status & MIRROR_IMAGE ) | | ! seg | |
! ( mirror_seg = find_mirror_seg ( seg ) ) ) {
log_error ( INTERNAL_ERROR " Cannot find mirror segment. " ) ;
return 0 ;
}
2010-09-30 17:52:55 +04:00
if ( ! lv_mirror_percent ( lv - > vg - > cmd , mirror_seg - > lv , 0 , & percent ,
2010-11-30 14:53:31 +03:00
NULL ) )
2010-09-30 17:52:55 +04:00
return_0 ;
2014-06-09 14:08:27 +04:00
return ( percent = = DM_PERCENT_100 ) ? 1 : 0 ;
2010-09-30 17:52:55 +04:00
}
2014-07-01 11:56:03 +04:00
int lv_raid_image_in_sync ( const struct logical_volume * lv )
2011-09-23 19:17:54 +04:00
{
RAID: Improve 'lvs' attribute reporting of RAID LVs and sub-LVs
There are currently a few issues with the reporting done on RAID LVs and
sub-LVs. The most concerning is that 'lvs' does not always report the
correct failure status of individual RAID sub-LVs (devices). This can
occur when a device fails and is restored after the failure has been
detected by the kernel. In this case, 'lvs' would report all devices are
fine because it can read the labels on each device just fine.
Example:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
However, 'dmsetup status' on the device tells us a different story:
[root@bp-01 lvm2]# dmsetup status vg-lv
0 1024000 raid raid1 2 DA 1024000/1024000
In this case, we must also be sure to check the RAID LVs kernel status
in order to get the proper information. Here is an example of the correct
output that is displayed after this patch is applied:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-p 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-p /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-p /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
The other case where 'lvs' gives incomplete or improper output is when a
device is replaced or added to a RAID LV. It should display that the RAID
LV is in the process of sync'ing and that the new device is the only one
that is not-in-sync - as indicated by a leading 'I' in the Attr column.
(Remember that 'i' indicates an (i)mage that is in-sync and 'I' indicates
an (I)mage that is not in sync.) Here's an example of the old incorrect
behaviour:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[root@bp-01 lvm2]# lvconvert -m +1 vg/lv; lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 0.00 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg Iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg Iwi-aor-- /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0) ** Note that all the images currently are marked as 'I' even though it is
only the last device that has been added that should be marked.
Here is an example of the correct output after this patch is applied:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[root@bp-01 lvm2]# lvconvert -m +1 vg/lv; lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 0.00 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0)
** Note only the last image is marked with an 'I'. This is correct and we can
tell that it isn't the whole array that is sync'ing, but just the new
device.
It also works under snapshots...
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg owi-a-r-p 33.47 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg Iwi-aor-p /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-p /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0)
snap vg swi-a-s-- /dev/sda1(51201)
2013-02-01 21:33:54 +04:00
unsigned s ;
2014-06-09 14:08:27 +04:00
dm_percent_t percent ;
RAID: Improve 'lvs' attribute reporting of RAID LVs and sub-LVs
There are currently a few issues with the reporting done on RAID LVs and
sub-LVs. The most concerning is that 'lvs' does not always report the
correct failure status of individual RAID sub-LVs (devices). This can
occur when a device fails and is restored after the failure has been
detected by the kernel. In this case, 'lvs' would report all devices are
fine because it can read the labels on each device just fine.
Example:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
However, 'dmsetup status' on the device tells us a different story:
[root@bp-01 lvm2]# dmsetup status vg-lv
0 1024000 raid raid1 2 DA 1024000/1024000
In this case, we must also be sure to check the RAID LVs kernel status
in order to get the proper information. Here is an example of the correct
output that is displayed after this patch is applied:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-p 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-p /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-p /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
The other case where 'lvs' gives incomplete or improper output is when a
device is replaced or added to a RAID LV. It should display that the RAID
LV is in the process of sync'ing and that the new device is the only one
that is not-in-sync - as indicated by a leading 'I' in the Attr column.
(Remember that 'i' indicates an (i)mage that is in-sync and 'I' indicates
an (I)mage that is not in sync.) Here's an example of the old incorrect
behaviour:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[root@bp-01 lvm2]# lvconvert -m +1 vg/lv; lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 0.00 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg Iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg Iwi-aor-- /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0) ** Note that all the images currently are marked as 'I' even though it is
only the last device that has been added that should be marked.
Here is an example of the correct output after this patch is applied:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[root@bp-01 lvm2]# lvconvert -m +1 vg/lv; lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 0.00 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0)
** Note only the last image is marked with an 'I'. This is correct and we can
tell that it isn't the whole array that is sync'ing, but just the new
device.
It also works under snapshots...
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg owi-a-r-p 33.47 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg Iwi-aor-p /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-p /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0)
snap vg swi-a-s-- /dev/sda1(51201)
2013-02-01 21:33:54 +04:00
char * raid_health ;
2014-02-22 04:26:01 +04:00
struct lv_segment * seg , * raid_seg = NULL ;
2011-09-23 19:17:54 +04:00
2013-05-16 19:36:56 +04:00
/*
* If the LV is not active locally ,
* it doesn ' t make sense to check status
*/
if ( ! lv_is_active_locally ( lv ) )
RAID: Improve 'lvs' attribute reporting of RAID LVs and sub-LVs
There are currently a few issues with the reporting done on RAID LVs and
sub-LVs. The most concerning is that 'lvs' does not always report the
correct failure status of individual RAID sub-LVs (devices). This can
occur when a device fails and is restored after the failure has been
detected by the kernel. In this case, 'lvs' would report all devices are
fine because it can read the labels on each device just fine.
Example:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
However, 'dmsetup status' on the device tells us a different story:
[root@bp-01 lvm2]# dmsetup status vg-lv
0 1024000 raid raid1 2 DA 1024000/1024000
In this case, we must also be sure to check the RAID LVs kernel status
in order to get the proper information. Here is an example of the correct
output that is displayed after this patch is applied:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-p 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-p /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-p /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
The other case where 'lvs' gives incomplete or improper output is when a
device is replaced or added to a RAID LV. It should display that the RAID
LV is in the process of sync'ing and that the new device is the only one
that is not-in-sync - as indicated by a leading 'I' in the Attr column.
(Remember that 'i' indicates an (i)mage that is in-sync and 'I' indicates
an (I)mage that is not in sync.) Here's an example of the old incorrect
behaviour:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[root@bp-01 lvm2]# lvconvert -m +1 vg/lv; lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 0.00 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg Iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg Iwi-aor-- /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0) ** Note that all the images currently are marked as 'I' even though it is
only the last device that has been added that should be marked.
Here is an example of the correct output after this patch is applied:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[root@bp-01 lvm2]# lvconvert -m +1 vg/lv; lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 0.00 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0)
** Note only the last image is marked with an 'I'. This is correct and we can
tell that it isn't the whole array that is sync'ing, but just the new
device.
It also works under snapshots...
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg owi-a-r-p 33.47 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg Iwi-aor-p /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-p /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0)
snap vg swi-a-s-- /dev/sda1(51201)
2013-02-01 21:33:54 +04:00
return 0 ; /* Assume not in-sync */
2014-09-16 00:33:53 +04:00
if ( ! lv_is_raid_image ( lv ) ) {
2011-09-23 19:17:54 +04:00
log_error ( INTERNAL_ERROR " %s is not a RAID image " , lv - > name ) ;
return 0 ;
}
2014-02-22 04:26:01 +04:00
if ( ( seg = first_seg ( lv ) ) )
raid_seg = get_only_segment_using_this_lv ( seg - > lv ) ;
2011-09-23 19:17:54 +04:00
if ( ! raid_seg ) {
log_error ( " Failed to find RAID segment for %s " , lv - > name ) ;
return 0 ;
}
if ( ! seg_is_raid ( raid_seg ) ) {
log_error ( " %s on %s is not a RAID segment " ,
raid_seg - > lv - > name , lv - > name ) ;
return 0 ;
}
if ( ! lv_raid_percent ( raid_seg - > lv , & percent ) )
return_0 ;
2014-06-09 14:08:27 +04:00
if ( percent = = DM_PERCENT_100 )
2011-09-23 19:17:54 +04:00
return 1 ;
RAID: Improve 'lvs' attribute reporting of RAID LVs and sub-LVs
There are currently a few issues with the reporting done on RAID LVs and
sub-LVs. The most concerning is that 'lvs' does not always report the
correct failure status of individual RAID sub-LVs (devices). This can
occur when a device fails and is restored after the failure has been
detected by the kernel. In this case, 'lvs' would report all devices are
fine because it can read the labels on each device just fine.
Example:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
However, 'dmsetup status' on the device tells us a different story:
[root@bp-01 lvm2]# dmsetup status vg-lv
0 1024000 raid raid1 2 DA 1024000/1024000
In this case, we must also be sure to check the RAID LVs kernel status
in order to get the proper information. Here is an example of the correct
output that is displayed after this patch is applied:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-p 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-p /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-p /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
The other case where 'lvs' gives incomplete or improper output is when a
device is replaced or added to a RAID LV. It should display that the RAID
LV is in the process of sync'ing and that the new device is the only one
that is not-in-sync - as indicated by a leading 'I' in the Attr column.
(Remember that 'i' indicates an (i)mage that is in-sync and 'I' indicates
an (I)mage that is not in sync.) Here's an example of the old incorrect
behaviour:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[root@bp-01 lvm2]# lvconvert -m +1 vg/lv; lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 0.00 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg Iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg Iwi-aor-- /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0) ** Note that all the images currently are marked as 'I' even though it is
only the last device that has been added that should be marked.
Here is an example of the correct output after this patch is applied:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[root@bp-01 lvm2]# lvconvert -m +1 vg/lv; lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 0.00 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0)
** Note only the last image is marked with an 'I'. This is correct and we can
tell that it isn't the whole array that is sync'ing, but just the new
device.
It also works under snapshots...
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg owi-a-r-p 33.47 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg Iwi-aor-p /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-p /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0)
snap vg swi-a-s-- /dev/sda1(51201)
2013-02-01 21:33:54 +04:00
/* Find out which sub-LV this is. */
for ( s = 0 ; s < raid_seg - > area_count ; s + + )
if ( seg_lv ( raid_seg , s ) = = lv )
break ;
if ( s = = raid_seg - > area_count ) {
log_error ( INTERNAL_ERROR
" sub-LV %s was not found in raid segment " ,
lv - > name ) ;
return 0 ;
}
if ( ! lv_raid_dev_health ( raid_seg - > lv , & raid_health ) )
return_0 ;
if ( raid_health [ s ] = = ' A ' )
return 1 ;
2011-09-23 19:17:54 +04:00
return 0 ;
}
RAID: Improve 'lvs' attribute reporting of RAID LVs and sub-LVs
There are currently a few issues with the reporting done on RAID LVs and
sub-LVs. The most concerning is that 'lvs' does not always report the
correct failure status of individual RAID sub-LVs (devices). This can
occur when a device fails and is restored after the failure has been
detected by the kernel. In this case, 'lvs' would report all devices are
fine because it can read the labels on each device just fine.
Example:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
However, 'dmsetup status' on the device tells us a different story:
[root@bp-01 lvm2]# dmsetup status vg-lv
0 1024000 raid raid1 2 DA 1024000/1024000
In this case, we must also be sure to check the RAID LVs kernel status
in order to get the proper information. Here is an example of the correct
output that is displayed after this patch is applied:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-p 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-p /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-p /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
The other case where 'lvs' gives incomplete or improper output is when a
device is replaced or added to a RAID LV. It should display that the RAID
LV is in the process of sync'ing and that the new device is the only one
that is not-in-sync - as indicated by a leading 'I' in the Attr column.
(Remember that 'i' indicates an (i)mage that is in-sync and 'I' indicates
an (I)mage that is not in sync.) Here's an example of the old incorrect
behaviour:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[root@bp-01 lvm2]# lvconvert -m +1 vg/lv; lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 0.00 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg Iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg Iwi-aor-- /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0) ** Note that all the images currently are marked as 'I' even though it is
only the last device that has been added that should be marked.
Here is an example of the correct output after this patch is applied:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[root@bp-01 lvm2]# lvconvert -m +1 vg/lv; lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 0.00 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0)
** Note only the last image is marked with an 'I'. This is correct and we can
tell that it isn't the whole array that is sync'ing, but just the new
device.
It also works under snapshots...
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg owi-a-r-p 33.47 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg Iwi-aor-p /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-p /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0)
snap vg swi-a-s-- /dev/sda1(51201)
2013-02-01 21:33:54 +04:00
/*
* _lv_raid_healthy
* @ lv : A RAID_IMAGE , RAID_META , or RAID logical volume .
*
* Returns : 1 if healthy , 0 if device is not health
*/
2014-07-01 11:56:03 +04:00
int lv_raid_healthy ( const struct logical_volume * lv )
RAID: Improve 'lvs' attribute reporting of RAID LVs and sub-LVs
There are currently a few issues with the reporting done on RAID LVs and
sub-LVs. The most concerning is that 'lvs' does not always report the
correct failure status of individual RAID sub-LVs (devices). This can
occur when a device fails and is restored after the failure has been
detected by the kernel. In this case, 'lvs' would report all devices are
fine because it can read the labels on each device just fine.
Example:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
However, 'dmsetup status' on the device tells us a different story:
[root@bp-01 lvm2]# dmsetup status vg-lv
0 1024000 raid raid1 2 DA 1024000/1024000
In this case, we must also be sure to check the RAID LVs kernel status
in order to get the proper information. Here is an example of the correct
output that is displayed after this patch is applied:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-p 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-p /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-p /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
The other case where 'lvs' gives incomplete or improper output is when a
device is replaced or added to a RAID LV. It should display that the RAID
LV is in the process of sync'ing and that the new device is the only one
that is not-in-sync - as indicated by a leading 'I' in the Attr column.
(Remember that 'i' indicates an (i)mage that is in-sync and 'I' indicates
an (I)mage that is not in sync.) Here's an example of the old incorrect
behaviour:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[root@bp-01 lvm2]# lvconvert -m +1 vg/lv; lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 0.00 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg Iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg Iwi-aor-- /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0) ** Note that all the images currently are marked as 'I' even though it is
only the last device that has been added that should be marked.
Here is an example of the correct output after this patch is applied:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[root@bp-01 lvm2]# lvconvert -m +1 vg/lv; lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 0.00 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0)
** Note only the last image is marked with an 'I'. This is correct and we can
tell that it isn't the whole array that is sync'ing, but just the new
device.
It also works under snapshots...
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg owi-a-r-p 33.47 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg Iwi-aor-p /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-p /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0)
snap vg swi-a-s-- /dev/sda1(51201)
2013-02-01 21:33:54 +04:00
{
unsigned s ;
char * raid_health ;
2014-02-22 04:26:01 +04:00
struct lv_segment * seg , * raid_seg = NULL ;
RAID: Improve 'lvs' attribute reporting of RAID LVs and sub-LVs
There are currently a few issues with the reporting done on RAID LVs and
sub-LVs. The most concerning is that 'lvs' does not always report the
correct failure status of individual RAID sub-LVs (devices). This can
occur when a device fails and is restored after the failure has been
detected by the kernel. In this case, 'lvs' would report all devices are
fine because it can read the labels on each device just fine.
Example:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
However, 'dmsetup status' on the device tells us a different story:
[root@bp-01 lvm2]# dmsetup status vg-lv
0 1024000 raid raid1 2 DA 1024000/1024000
In this case, we must also be sure to check the RAID LVs kernel status
in order to get the proper information. Here is an example of the correct
output that is displayed after this patch is applied:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-p 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-p /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-p /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
The other case where 'lvs' gives incomplete or improper output is when a
device is replaced or added to a RAID LV. It should display that the RAID
LV is in the process of sync'ing and that the new device is the only one
that is not-in-sync - as indicated by a leading 'I' in the Attr column.
(Remember that 'i' indicates an (i)mage that is in-sync and 'I' indicates
an (I)mage that is not in sync.) Here's an example of the old incorrect
behaviour:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[root@bp-01 lvm2]# lvconvert -m +1 vg/lv; lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 0.00 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg Iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg Iwi-aor-- /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0) ** Note that all the images currently are marked as 'I' even though it is
only the last device that has been added that should be marked.
Here is an example of the correct output after this patch is applied:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[root@bp-01 lvm2]# lvconvert -m +1 vg/lv; lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 0.00 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0)
** Note only the last image is marked with an 'I'. This is correct and we can
tell that it isn't the whole array that is sync'ing, but just the new
device.
It also works under snapshots...
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg owi-a-r-p 33.47 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg Iwi-aor-p /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-p /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0)
snap vg swi-a-s-- /dev/sda1(51201)
2013-02-01 21:33:54 +04:00
2013-05-16 19:36:56 +04:00
/*
* If the LV is not active locally ,
* it doesn ' t make sense to check status
*/
if ( ! lv_is_active_locally ( lv ) )
RAID: Improve 'lvs' attribute reporting of RAID LVs and sub-LVs
There are currently a few issues with the reporting done on RAID LVs and
sub-LVs. The most concerning is that 'lvs' does not always report the
correct failure status of individual RAID sub-LVs (devices). This can
occur when a device fails and is restored after the failure has been
detected by the kernel. In this case, 'lvs' would report all devices are
fine because it can read the labels on each device just fine.
Example:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
However, 'dmsetup status' on the device tells us a different story:
[root@bp-01 lvm2]# dmsetup status vg-lv
0 1024000 raid raid1 2 DA 1024000/1024000
In this case, we must also be sure to check the RAID LVs kernel status
in order to get the proper information. Here is an example of the correct
output that is displayed after this patch is applied:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-p 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-p /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-p /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
The other case where 'lvs' gives incomplete or improper output is when a
device is replaced or added to a RAID LV. It should display that the RAID
LV is in the process of sync'ing and that the new device is the only one
that is not-in-sync - as indicated by a leading 'I' in the Attr column.
(Remember that 'i' indicates an (i)mage that is in-sync and 'I' indicates
an (I)mage that is not in sync.) Here's an example of the old incorrect
behaviour:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[root@bp-01 lvm2]# lvconvert -m +1 vg/lv; lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 0.00 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg Iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg Iwi-aor-- /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0) ** Note that all the images currently are marked as 'I' even though it is
only the last device that has been added that should be marked.
Here is an example of the correct output after this patch is applied:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[root@bp-01 lvm2]# lvconvert -m +1 vg/lv; lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 0.00 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0)
** Note only the last image is marked with an 'I'. This is correct and we can
tell that it isn't the whole array that is sync'ing, but just the new
device.
It also works under snapshots...
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg owi-a-r-p 33.47 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg Iwi-aor-p /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-p /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0)
snap vg swi-a-s-- /dev/sda1(51201)
2013-02-01 21:33:54 +04:00
return 1 ; /* assume healthy */
if ( ! lv_is_raid_type ( lv ) ) {
log_error ( INTERNAL_ERROR " %s is not of RAID type " , lv - > name ) ;
return 0 ;
}
2014-09-16 00:33:53 +04:00
if ( lv_is_raid ( lv ) )
RAID: Improve 'lvs' attribute reporting of RAID LVs and sub-LVs
There are currently a few issues with the reporting done on RAID LVs and
sub-LVs. The most concerning is that 'lvs' does not always report the
correct failure status of individual RAID sub-LVs (devices). This can
occur when a device fails and is restored after the failure has been
detected by the kernel. In this case, 'lvs' would report all devices are
fine because it can read the labels on each device just fine.
Example:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
However, 'dmsetup status' on the device tells us a different story:
[root@bp-01 lvm2]# dmsetup status vg-lv
0 1024000 raid raid1 2 DA 1024000/1024000
In this case, we must also be sure to check the RAID LVs kernel status
in order to get the proper information. Here is an example of the correct
output that is displayed after this patch is applied:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-p 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-p /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-p /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
The other case where 'lvs' gives incomplete or improper output is when a
device is replaced or added to a RAID LV. It should display that the RAID
LV is in the process of sync'ing and that the new device is the only one
that is not-in-sync - as indicated by a leading 'I' in the Attr column.
(Remember that 'i' indicates an (i)mage that is in-sync and 'I' indicates
an (I)mage that is not in sync.) Here's an example of the old incorrect
behaviour:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[root@bp-01 lvm2]# lvconvert -m +1 vg/lv; lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 0.00 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg Iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg Iwi-aor-- /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0) ** Note that all the images currently are marked as 'I' even though it is
only the last device that has been added that should be marked.
Here is an example of the correct output after this patch is applied:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[root@bp-01 lvm2]# lvconvert -m +1 vg/lv; lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 0.00 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0)
** Note only the last image is marked with an 'I'. This is correct and we can
tell that it isn't the whole array that is sync'ing, but just the new
device.
It also works under snapshots...
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg owi-a-r-p 33.47 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg Iwi-aor-p /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-p /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0)
snap vg swi-a-s-- /dev/sda1(51201)
2013-02-01 21:33:54 +04:00
raid_seg = first_seg ( lv ) ;
2014-02-22 04:26:01 +04:00
else if ( ( seg = first_seg ( lv ) ) )
raid_seg = get_only_segment_using_this_lv ( seg - > lv ) ;
RAID: Improve 'lvs' attribute reporting of RAID LVs and sub-LVs
There are currently a few issues with the reporting done on RAID LVs and
sub-LVs. The most concerning is that 'lvs' does not always report the
correct failure status of individual RAID sub-LVs (devices). This can
occur when a device fails and is restored after the failure has been
detected by the kernel. In this case, 'lvs' would report all devices are
fine because it can read the labels on each device just fine.
Example:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
However, 'dmsetup status' on the device tells us a different story:
[root@bp-01 lvm2]# dmsetup status vg-lv
0 1024000 raid raid1 2 DA 1024000/1024000
In this case, we must also be sure to check the RAID LVs kernel status
in order to get the proper information. Here is an example of the correct
output that is displayed after this patch is applied:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-p 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-p /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-p /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
The other case where 'lvs' gives incomplete or improper output is when a
device is replaced or added to a RAID LV. It should display that the RAID
LV is in the process of sync'ing and that the new device is the only one
that is not-in-sync - as indicated by a leading 'I' in the Attr column.
(Remember that 'i' indicates an (i)mage that is in-sync and 'I' indicates
an (I)mage that is not in sync.) Here's an example of the old incorrect
behaviour:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[root@bp-01 lvm2]# lvconvert -m +1 vg/lv; lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 0.00 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg Iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg Iwi-aor-- /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0) ** Note that all the images currently are marked as 'I' even though it is
only the last device that has been added that should be marked.
Here is an example of the correct output after this patch is applied:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[root@bp-01 lvm2]# lvconvert -m +1 vg/lv; lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 0.00 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0)
** Note only the last image is marked with an 'I'. This is correct and we can
tell that it isn't the whole array that is sync'ing, but just the new
device.
It also works under snapshots...
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg owi-a-r-p 33.47 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg Iwi-aor-p /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-p /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0)
snap vg swi-a-s-- /dev/sda1(51201)
2013-02-01 21:33:54 +04:00
if ( ! raid_seg ) {
log_error ( " Failed to find RAID segment for %s " , lv - > name ) ;
return 0 ;
}
if ( ! seg_is_raid ( raid_seg ) ) {
log_error ( " %s on %s is not a RAID segment " ,
raid_seg - > lv - > name , lv - > name ) ;
return 0 ;
}
if ( ! lv_raid_dev_health ( raid_seg - > lv , & raid_health ) )
return_0 ;
2014-09-16 00:33:53 +04:00
if ( lv_is_raid ( lv ) ) {
RAID: Improve 'lvs' attribute reporting of RAID LVs and sub-LVs
There are currently a few issues with the reporting done on RAID LVs and
sub-LVs. The most concerning is that 'lvs' does not always report the
correct failure status of individual RAID sub-LVs (devices). This can
occur when a device fails and is restored after the failure has been
detected by the kernel. In this case, 'lvs' would report all devices are
fine because it can read the labels on each device just fine.
Example:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
However, 'dmsetup status' on the device tells us a different story:
[root@bp-01 lvm2]# dmsetup status vg-lv
0 1024000 raid raid1 2 DA 1024000/1024000
In this case, we must also be sure to check the RAID LVs kernel status
in order to get the proper information. Here is an example of the correct
output that is displayed after this patch is applied:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-p 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-p /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-p /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
The other case where 'lvs' gives incomplete or improper output is when a
device is replaced or added to a RAID LV. It should display that the RAID
LV is in the process of sync'ing and that the new device is the only one
that is not-in-sync - as indicated by a leading 'I' in the Attr column.
(Remember that 'i' indicates an (i)mage that is in-sync and 'I' indicates
an (I)mage that is not in sync.) Here's an example of the old incorrect
behaviour:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[root@bp-01 lvm2]# lvconvert -m +1 vg/lv; lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 0.00 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg Iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg Iwi-aor-- /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0) ** Note that all the images currently are marked as 'I' even though it is
only the last device that has been added that should be marked.
Here is an example of the correct output after this patch is applied:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[root@bp-01 lvm2]# lvconvert -m +1 vg/lv; lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 0.00 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0)
** Note only the last image is marked with an 'I'. This is correct and we can
tell that it isn't the whole array that is sync'ing, but just the new
device.
It also works under snapshots...
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg owi-a-r-p 33.47 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg Iwi-aor-p /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-p /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0)
snap vg swi-a-s-- /dev/sda1(51201)
2013-02-01 21:33:54 +04:00
if ( strchr ( raid_health , ' D ' ) )
return 0 ;
else
return 1 ;
}
/* Find out which sub-LV this is. */
for ( s = 0 ; s < raid_seg - > area_count ; s + + )
2014-09-16 00:33:53 +04:00
if ( ( lv_is_raid_image ( lv ) & & ( seg_lv ( raid_seg , s ) = = lv ) ) | |
( lv_is_raid_metadata ( lv ) & & ( seg_metalv ( raid_seg , s ) = = lv ) ) )
RAID: Improve 'lvs' attribute reporting of RAID LVs and sub-LVs
There are currently a few issues with the reporting done on RAID LVs and
sub-LVs. The most concerning is that 'lvs' does not always report the
correct failure status of individual RAID sub-LVs (devices). This can
occur when a device fails and is restored after the failure has been
detected by the kernel. In this case, 'lvs' would report all devices are
fine because it can read the labels on each device just fine.
Example:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
However, 'dmsetup status' on the device tells us a different story:
[root@bp-01 lvm2]# dmsetup status vg-lv
0 1024000 raid raid1 2 DA 1024000/1024000
In this case, we must also be sure to check the RAID LVs kernel status
in order to get the proper information. Here is an example of the correct
output that is displayed after this patch is applied:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-p 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-p /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-p /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
The other case where 'lvs' gives incomplete or improper output is when a
device is replaced or added to a RAID LV. It should display that the RAID
LV is in the process of sync'ing and that the new device is the only one
that is not-in-sync - as indicated by a leading 'I' in the Attr column.
(Remember that 'i' indicates an (i)mage that is in-sync and 'I' indicates
an (I)mage that is not in sync.) Here's an example of the old incorrect
behaviour:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[root@bp-01 lvm2]# lvconvert -m +1 vg/lv; lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 0.00 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg Iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg Iwi-aor-- /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0) ** Note that all the images currently are marked as 'I' even though it is
only the last device that has been added that should be marked.
Here is an example of the correct output after this patch is applied:
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 100.00 lv_rimage_0(0),lv_rimage_1(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[root@bp-01 lvm2]# lvconvert -m +1 vg/lv; lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg rwi-a-r-- 0.00 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg iwi-aor-- /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-- /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0)
** Note only the last image is marked with an 'I'. This is correct and we can
tell that it isn't the whole array that is sync'ing, but just the new
device.
It also works under snapshots...
[root@bp-01 lvm2]# lvs -a -o name,vg_name,attr,copy_percent,devices vg
LV VG Attr Cpy%Sync Devices
lv vg owi-a-r-p 33.47 lv_rimage_0(0),lv_rimage_1(0),lv_rimage_2(0)
[lv_rimage_0] vg iwi-aor-- /dev/sda1(1)
[lv_rimage_1] vg Iwi-aor-p /dev/sdb1(1)
[lv_rimage_2] vg Iwi-aor-- /dev/sdc1(1)
[lv_rmeta_0] vg ewi-aor-- /dev/sda1(0)
[lv_rmeta_1] vg ewi-aor-p /dev/sdb1(0)
[lv_rmeta_2] vg ewi-aor-- /dev/sdc1(0)
snap vg swi-a-s-- /dev/sda1(51201)
2013-02-01 21:33:54 +04:00
break ;
if ( s = = raid_seg - > area_count ) {
log_error ( INTERNAL_ERROR
" sub-LV %s was not found in raid segment " ,
lv - > name ) ;
return 0 ;
}
if ( raid_health [ s ] = = ' D ' )
return 0 ;
return 1 ;
}
2015-01-20 15:14:16 +03:00
char * lv_attr_dup_with_info_and_seg_status ( struct dm_pool * mem , const struct lv_with_info_and_seg_status * lvdm )
2010-09-30 17:52:55 +04:00
{
2014-06-09 14:08:27 +04:00
dm_percent_t snap_percent ;
2015-01-20 15:14:16 +03:00
const struct logical_volume * lv = lvdm - > lv ;
2011-09-09 05:15:18 +04:00
struct lv_segment * seg ;
2010-09-30 17:52:55 +04:00
char * repstr ;
2013-07-11 16:05:05 +04:00
if ( ! ( repstr = dm_pool_zalloc ( mem , 11 ) ) ) {
2010-09-30 17:52:55 +04:00
log_error ( " dm_pool_alloc failed " ) ;
return 0 ;
}
/* Blank if this is a "free space" LV. */
if ( ! * lv - > name )
goto out ;
2014-09-16 00:33:53 +04:00
if ( lv_is_pvmove ( lv ) )
2010-09-30 17:52:55 +04:00
repstr [ 0 ] = ' p ' ;
else if ( lv - > status & CONVERTING )
repstr [ 0 ] = ' c ' ;
2011-09-09 00:55:39 +04:00
/* Origin takes precedence over mirror and thin volume */
2013-06-05 15:45:31 +04:00
else if ( lv_is_origin ( lv ) | | lv_is_external_origin ( lv ) )
2010-09-30 18:07:19 +04:00
repstr [ 0 ] = ( lv_is_merging_origin ( lv ) ) ? ' O ' : ' o ' ;
2014-07-08 00:26:56 +04:00
else if ( lv_is_pool_metadata ( lv ) | |
lv_is_pool_metadata_spare ( lv ) | |
lv_is_raid_metadata ( lv ) )
2014-02-05 19:44:37 +04:00
repstr [ 0 ] = ' e ' ;
else if ( lv_is_cache_type ( lv ) )
repstr [ 0 ] = ' C ' ;
2014-09-16 00:33:53 +04:00
else if ( lv_is_raid ( lv ) )
2011-09-23 19:17:54 +04:00
repstr [ 0 ] = ( lv - > status & LV_NOTSYNCED ) ? ' R ' : ' r ' ;
2014-09-16 03:13:46 +04:00
else if ( lv_is_mirror ( lv ) )
2011-03-29 16:51:57 +04:00
repstr [ 0 ] = ( lv - > status & LV_NOTSYNCED ) ? ' M ' : ' m ' ;
2011-09-09 00:55:39 +04:00
else if ( lv_is_thin_volume ( lv ) )
2013-11-29 18:54:51 +04:00
repstr [ 0 ] = lv_is_merging_origin ( lv ) ?
' O ' : ( lv_is_merging_thin_snapshot ( lv ) ? ' S ' : ' V ' ) ;
2014-09-16 00:33:53 +04:00
else if ( lv_is_virtual ( lv ) )
2011-09-09 00:55:39 +04:00
repstr [ 0 ] = ' v ' ;
else if ( lv_is_thin_pool ( lv ) )
repstr [ 0 ] = ' t ' ;
else if ( lv_is_thin_pool_data ( lv ) )
repstr [ 0 ] = ' T ' ;
2014-09-16 00:33:53 +04:00
else if ( lv_is_mirror_image ( lv ) )
2014-07-01 11:56:03 +04:00
repstr [ 0 ] = ( lv_mirror_image_in_sync ( lv ) ) ? ' i ' : ' I ' ;
2014-09-16 00:33:53 +04:00
else if ( lv_is_raid_image ( lv ) )
2013-10-14 19:48:44 +04:00
/*
* Visible RAID_IMAGES are sub - LVs that have been exposed for
* top - level use by being split from the RAID array with
* ' - - splitmirrors 1 - - trackchanges ' . They always report ' I ' .
*/
2014-07-01 11:56:03 +04:00
repstr [ 0 ] = ( ! lv_is_visible ( lv ) & & lv_raid_image_in_sync ( lv ) ) ?
2013-10-14 19:48:44 +04:00
' i ' : ' I ' ;
2014-09-16 00:33:53 +04:00
else if ( lv_is_mirror_log ( lv ) )
2010-09-30 17:52:55 +04:00
repstr [ 0 ] = ' l ' ;
2013-06-11 15:45:36 +04:00
else if ( lv_is_cow ( lv ) )
2010-09-30 18:07:19 +04:00
repstr [ 0 ] = ( lv_is_merging_cow ( lv ) ) ? ' S ' : ' s ' ;
2014-08-15 15:21:29 +04:00
else if ( lv_is_cache_origin ( lv ) )
repstr [ 0 ] = ' o ' ;
2013-06-11 15:45:36 +04:00
else
2010-09-30 17:52:55 +04:00
repstr [ 0 ] = ' - ' ;
2014-09-16 00:33:53 +04:00
if ( lv_is_pvmove ( lv ) )
2010-09-30 17:52:55 +04:00
repstr [ 1 ] = ' - ' ;
else if ( lv - > status & LVM_WRITE )
repstr [ 1 ] = ' w ' ;
else if ( lv - > status & LVM_READ )
repstr [ 1 ] = ' r ' ;
else
repstr [ 1 ] = ' - ' ;
repstr [ 2 ] = alloc_policy_char ( lv - > alloc ) ;
2014-09-16 00:33:53 +04:00
if ( lv_is_locked ( lv ) )
2010-09-30 17:52:55 +04:00
repstr [ 2 ] = toupper ( repstr [ 2 ] ) ;
2010-09-30 18:07:19 +04:00
repstr [ 3 ] = ( lv - > status & FIXED_MINOR ) ? ' m ' : ' - ' ;
2010-09-30 17:52:55 +04:00
2015-01-20 15:14:16 +03:00
if ( ! activation ( ) | | ! lvdm - > info_ok ) {
2014-04-18 05:23:39 +04:00
repstr [ 4 ] = ' X ' ; /* Unknown */
repstr [ 5 ] = ' X ' ; /* Unknown */
2015-01-20 15:14:16 +03:00
} else if ( lvdm - > info . exists ) {
if ( lvdm - > info . suspended )
2010-09-30 17:52:55 +04:00
repstr [ 4 ] = ' s ' ; /* Suspended */
2015-01-20 15:14:16 +03:00
else if ( lvdm - > info . live_table )
2010-09-30 17:52:55 +04:00
repstr [ 4 ] = ' a ' ; /* Active */
2015-01-20 15:14:16 +03:00
else if ( lvdm - > info . inactive_table )
2010-09-30 17:52:55 +04:00
repstr [ 4 ] = ' i ' ; /* Inactive with table */
else
repstr [ 4 ] = ' d ' ; /* Inactive without table */
/* Snapshot dropped? */
2015-01-20 15:14:16 +03:00
if ( lvdm - > info . live_table & & lv_is_cow ( lv ) ) {
2012-01-21 02:03:03 +04:00
if ( ! lv_snapshot_percent ( lv , & snap_percent ) | |
2014-06-09 14:08:27 +04:00
snap_percent = = DM_PERCENT_INVALID ) {
2015-01-20 15:14:16 +03:00
if ( lvdm - > info . suspended )
2012-01-21 02:03:03 +04:00
repstr [ 4 ] = ' S ' ; /* Susp Inv snapshot */
else
repstr [ 4 ] = ' I ' ; /* Invalid snapshot */
}
2014-06-09 14:08:27 +04:00
else if ( snap_percent = = LVM_PERCENT_MERGE_FAILED ) {
2015-01-20 15:14:16 +03:00
if ( lvdm - > info . suspended )
2012-01-21 02:03:03 +04:00
repstr [ 4 ] = ' M ' ; /* Susp snapshot merge failed */
else
repstr [ 4 ] = ' m ' ; /* snapshot merge failed */
}
2010-09-30 17:52:55 +04:00
}
2012-01-12 20:58:43 +04:00
/*
* ' R ' indicates read - only activation of a device that
* does not have metadata flagging it as read - only .
*/
2015-01-20 15:14:16 +03:00
if ( repstr [ 1 ] ! = ' r ' & & lvdm - > info . read_only )
2012-01-12 20:58:43 +04:00
repstr [ 1 ] = ' R ' ;
2015-01-20 15:14:16 +03:00
repstr [ 5 ] = ( lvdm - > info . open_count ) ? ' o ' : ' - ' ;
2010-09-30 17:52:55 +04:00
} else {
repstr [ 4 ] = ' - ' ;
repstr [ 5 ] = ' - ' ;
}
2011-09-09 00:55:39 +04:00
2013-06-05 15:44:10 +04:00
if ( lv_is_thin_pool ( lv ) | | lv_is_thin_volume ( lv ) )
2011-09-09 00:55:39 +04:00
repstr [ 6 ] = ' t ' ;
2014-08-15 15:21:29 +04:00
else if ( lv_is_cache_pool ( lv ) | | lv_is_cache ( lv ) | | lv_is_cache_origin ( lv ) )
2014-02-05 19:44:37 +04:00
repstr [ 6 ] = ' C ' ;
2011-09-09 00:55:39 +04:00
else if ( lv_is_raid_type ( lv ) )
repstr [ 6 ] = ' r ' ;
2014-09-16 00:33:53 +04:00
else if ( lv_is_mirror_type ( lv ) | | lv_is_pvmove ( lv ) )
2012-08-25 00:34:19 +04:00
repstr [ 6 ] = ' m ' ;
2011-09-09 00:55:39 +04:00
else if ( lv_is_cow ( lv ) | | lv_is_origin ( lv ) )
repstr [ 6 ] = ' s ' ;
else if ( lv_has_unknown_segments ( lv ) )
repstr [ 6 ] = ' u ' ;
else if ( lv_is_virtual ( lv ) )
repstr [ 6 ] = ' v ' ;
else
repstr [ 6 ] = ' - ' ;
2011-09-14 14:03:15 +04:00
if ( ( ( lv_is_thin_volume ( lv ) & & ( seg = first_seg ( lv ) ) & & seg - > pool_lv & & ( seg = first_seg ( seg - > pool_lv ) ) ) | |
( lv_is_thin_pool ( lv ) & & ( seg = first_seg ( lv ) ) ) ) & &
2011-09-09 05:15:18 +04:00
seg - > zero_new_blocks )
repstr [ 7 ] = ' z ' ;
else
repstr [ 7 ] = ' - ' ;
2013-04-12 00:33:59 +04:00
repstr [ 8 ] = ' - ' ;
if ( lv - > status & PARTIAL_LV )
2012-09-19 15:49:40 +04:00
repstr [ 8 ] = ' p ' ;
2013-04-12 00:33:59 +04:00
else if ( lv_is_raid_type ( lv ) ) {
uint64_t n ;
2014-04-18 05:23:39 +04:00
if ( ! activation ( ) )
repstr [ 8 ] = ' X ' ; /* Unknown */
2014-07-01 11:56:03 +04:00
else if ( ! lv_raid_healthy ( lv ) )
2013-04-12 00:33:59 +04:00
repstr [ 8 ] = ' r ' ; /* RAID needs 'r'efresh */
2014-09-16 00:33:53 +04:00
else if ( lv_is_raid ( lv ) ) {
RAID: Add writemostly/writebehind support for RAID1
'lvchange' is used to alter a RAID 1 logical volume's write-mostly and
write-behind characteristics. The '--writemostly' parameter takes a
PV as an argument with an optional trailing character to specify whether
to set ('y'), unset ('n'), or toggle ('t') the value. If no trailing
character is given, it will set the flag.
Synopsis:
lvchange [--writemostly <PV>:{t|y|n}] [--writebehind <count>] vg/lv
Example:
lvchange --writemostly /dev/sdb1:y --writebehind 512 vg/raid1_lv
The last character in the 'lv_attr' field is used to show whether a device
has the WriteMostly flag set. It is signified with a 'w'. If the device
has failed, the 'p'artial flag has priority.
Example ("nosync" raid1 with mismatch_cnt and writemostly):
[~]# lvs -a --segment vg
LV VG Attr #Str Type SSize
raid1 vg Rwi---r-m 2 raid1 500.00m
[raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m
[raid1_rimage_1] vg Iwi---r-w 1 linear 500.00m
[raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m
[raid1_rmeta_1] vg ewi---r-- 1 linear 4.00m
Example (raid1 with mismatch_cnt, writemostly - but failed drive):
[~]# lvs -a --segment vg
LV VG Attr #Str Type SSize
raid1 vg rwi---r-p 2 raid1 500.00m
[raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m
[raid1_rimage_1] vg Iwi---r-p 1 linear 500.00m
[raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m
[raid1_rmeta_1] vg ewi---r-p 1 linear 4.00m
A new reportable field has been added for writebehind as well. If
write-behind has not been set or the LV is not RAID1, the field will
be blank.
Example (writebehind is set):
[~]# lvs -a -o name,attr,writebehind vg
LV Attr WBehind
lv rwi-a-r-- 512
[lv_rimage_0] iwi-aor-w
[lv_rimage_1] iwi-aor--
[lv_rmeta_0] ewi-aor--
[lv_rmeta_1] ewi-aor--
Example (writebehind is not set):
[~]# lvs -a -o name,attr,writebehind vg
LV Attr WBehind
lv rwi-a-r--
[lv_rimage_0] iwi-aor-w
[lv_rimage_1] iwi-aor--
[lv_rmeta_0] ewi-aor--
[lv_rmeta_1] ewi-aor--
2013-04-15 22:59:46 +04:00
if ( lv_raid_mismatch_count ( lv , & n ) & & n )
repstr [ 8 ] = ' m ' ; /* RAID has 'm'ismatches */
} else if ( lv - > status & LV_WRITEMOSTLY )
repstr [ 8 ] = ' w ' ; /* sub-LV has 'w'ritemostly */
2015-01-20 15:14:16 +03:00
} else if ( lv_is_thin_pool ( lv ) & &
( lvdm - > seg_status . type ! = SEG_STATUS_NONE ) ) {
if ( lvdm - > seg_status . type = = SEG_STATUS_UNKNOWN )
2015-01-13 17:23:03 +03:00
repstr [ 8 ] = ' X ' ; /* Unknown */
2015-01-20 15:14:16 +03:00
else if ( lvdm - > seg_status . thin_pool - > fail )
2015-01-13 17:23:03 +03:00
repstr [ 8 ] = ' F ' ;
2015-01-20 15:14:16 +03:00
else if ( lvdm - > seg_status . thin_pool - > out_of_data_space )
2015-01-13 17:23:03 +03:00
repstr [ 8 ] = ' D ' ;
2015-01-20 15:14:16 +03:00
else if ( lvdm - > seg_status . thin_pool - > read_only )
2015-01-13 17:23:03 +03:00
repstr [ 8 ] = ' M ' ;
2013-04-12 00:33:59 +04:00
}
2012-09-19 15:49:40 +04:00
2013-07-11 16:05:05 +04:00
if ( lv - > status & LV_ACTIVATION_SKIP )
repstr [ 9 ] = ' k ' ;
else
repstr [ 9 ] = ' - ' ;
2010-09-30 17:52:55 +04:00
out :
return repstr ;
}
2012-01-19 19:31:45 +04:00
2015-01-20 15:14:16 +03:00
/* backward compatible internal API for lvm2api, TODO improve it */
char * lv_attr_dup ( struct dm_pool * mem , const struct logical_volume * lv )
{
2015-01-20 18:24:45 +03:00
char * ret = NULL ;
2015-01-20 15:14:16 +03:00
struct lv_with_info_and_seg_status status = {
2015-01-20 18:24:45 +03:00
. seg_status . type = SEG_STATUS_NONE ,
. lv = lv
2015-01-20 15:14:16 +03:00
} ;
2015-01-20 18:24:45 +03:00
if ( ! ( status . seg_status . mem = dm_pool_create ( " reporter_pool " , 1024 ) ) )
return_0 ;
2015-01-30 11:45:22 +03:00
if ( ! ( status . info_ok = lv_info_with_seg_status ( lv - > vg - > cmd , lv , first_seg ( lv ) , 1 , & status , 1 , 1 ) ) )
2015-01-20 18:24:45 +03:00
goto_bad ;
ret = lv_attr_dup_with_info_and_seg_status ( mem , & status ) ;
bad :
dm_pool_destroy ( status . seg_status . mem ) ;
2015-01-20 15:14:16 +03:00
2015-01-20 18:24:45 +03:00
return ret ;
2015-01-20 15:14:16 +03:00
}
2012-01-19 19:31:45 +04:00
int lv_set_creation ( struct logical_volume * lv ,
const char * hostname , uint64_t timestamp )
{
const char * hn ;
if ( ! hostname ) {
if ( ! _utsinit ) {
if ( uname ( & _utsname ) ) {
log_error ( " uname failed: %s " , strerror ( errno ) ) ;
memset ( & _utsname , 0 , sizeof ( _utsname ) ) ;
}
_utsinit = 1 ;
}
hostname = _utsname . nodename ;
}
if ( ! ( hn = dm_hash_lookup ( lv - > vg - > hostnames , hostname ) ) ) {
if ( ! ( hn = dm_pool_strdup ( lv - > vg - > vgmem , hostname ) ) ) {
log_error ( " Failed to duplicate hostname " ) ;
return 0 ;
}
if ( ! dm_hash_insert ( lv - > vg - > hostnames , hostname , ( void * ) hn ) )
return_0 ;
}
lv - > hostname = hn ;
2012-02-24 02:31:23 +04:00
lv - > timestamp = timestamp ? : ( uint64_t ) time ( NULL ) ;
2012-01-19 19:31:45 +04:00
return 1 ;
}
2015-05-25 17:13:07 +03:00
char * lv_time_dup ( struct dm_pool * mem , const struct logical_volume * lv , int iso_mode )
2012-01-19 19:31:45 +04:00
{
2015-06-29 16:24:00 +03:00
char buffer [ 4096 ] ;
2012-01-19 19:31:45 +04:00
struct tm * local_tm ;
time_t ts = ( time_t ) lv - > timestamp ;
2015-05-25 17:13:07 +03:00
const char * format = iso_mode ? DEFAULT_TIME_FORMAT : lv - > vg - > cmd - > time_format ;
2012-01-19 19:31:45 +04:00
if ( ! ts | |
! ( local_tm = localtime ( & ts ) ) | |
2015-05-25 17:13:07 +03:00
! strftime ( buffer , sizeof ( buffer ) , format , local_tm ) )
2012-01-19 19:31:45 +04:00
buffer [ 0 ] = 0 ;
return dm_pool_strdup ( mem , buffer ) ;
}
char * lv_host_dup ( struct dm_pool * mem , const struct logical_volume * lv )
{
return dm_pool_strdup ( mem , lv - > hostname ? : " " ) ;
}
2013-04-25 14:12:05 +04:00
2013-11-01 13:31:31 +04:00
static int _lv_is_exclusive ( struct logical_volume * lv )
{
2014-11-10 20:44:37 +03:00
struct lv_segment * seg ;
/* Some seg types require exclusive activation */
2014-11-11 17:13:00 +03:00
/* FIXME Scan recursively */
2014-11-10 20:44:37 +03:00
dm_list_iterate_items ( seg , & lv - > segments )
if ( seg_only_exclusive ( seg ) )
return 1 ;
/* Origin has no seg type require exlusiveness */
return lv_is_origin ( lv ) ;
2013-11-01 13:31:31 +04:00
}
2013-04-29 16:04:38 +04:00
int lv_active_change ( struct cmd_context * cmd , struct logical_volume * lv ,
2014-11-05 17:14:58 +03:00
enum activation_change activate , int needs_exclusive )
2013-04-29 16:04:38 +04:00
{
2015-03-05 23:00:44 +03:00
const char * ay_with_mode = NULL ;
if ( activate = = CHANGE_ASY )
ay_with_mode = " sh " ;
if ( activate = = CHANGE_AEY )
ay_with_mode = " ex " ;
if ( is_change_activating ( activate ) & &
! lockd_lv ( cmd , lv , ay_with_mode , LDLV_PERSISTENT ) ) {
log_error ( " Failed to lock logical volume %s/%s " , lv - > vg - > name , lv - > name ) ;
return 0 ;
}
2013-11-01 13:31:31 +04:00
switch ( activate ) {
case CHANGE_AN :
deactivate :
2013-04-29 16:04:38 +04:00
log_verbose ( " Deactivating logical volume \" %s \" " , lv - > name ) ;
if ( ! deactivate_lv ( cmd , lv ) )
return_0 ;
2013-11-01 13:31:31 +04:00
break ;
case CHANGE_ALN :
2014-11-05 17:14:58 +03:00
if ( vg_is_clustered ( lv - > vg ) & & ( needs_exclusive | | _lv_is_exclusive ( lv ) ) ) {
2013-11-01 13:31:31 +04:00
if ( ! lv_is_active_locally ( lv ) ) {
log_error ( " Cannot deactivate remotely exclusive device locally. " ) ;
return 0 ;
}
/* Unlock whole exclusive activation */
goto deactivate ;
2013-04-29 16:04:38 +04:00
}
2013-09-11 01:33:22 +04:00
log_verbose ( " Deactivating logical volume \" %s \" locally. " ,
lv - > name ) ;
2013-04-29 16:04:38 +04:00
if ( ! deactivate_lv_local ( cmd , lv ) )
return_0 ;
2013-11-01 13:31:31 +04:00
break ;
case CHANGE_ALY :
case CHANGE_AAY :
2014-11-05 17:14:58 +03:00
if ( needs_exclusive | | _lv_is_exclusive ( lv ) ) {
2013-11-01 13:31:31 +04:00
log_verbose ( " Activating logical volume \" %s \" exclusively locally. " ,
lv - > name ) ;
if ( ! activate_lv_excl_local ( cmd , lv ) )
return_0 ;
} else {
log_verbose ( " Activating logical volume \" %s \" locally. " ,
lv - > name ) ;
if ( ! activate_lv_local ( cmd , lv ) )
return_0 ;
}
break ;
2014-09-19 16:28:28 +04:00
case CHANGE_AEY :
2013-11-01 13:31:31 +04:00
exclusive :
log_verbose ( " Activating logical volume \" %s \" exclusively. " ,
2013-09-11 01:33:22 +04:00
lv - > name ) ;
2013-11-01 13:31:31 +04:00
if ( ! activate_lv_excl ( cmd , lv ) )
2013-04-29 16:04:38 +04:00
return_0 ;
2013-11-01 13:31:31 +04:00
break ;
2015-06-16 18:18:16 +03:00
case CHANGE_ASY :
case CHANGE_AY :
default :
2014-11-05 17:14:58 +03:00
if ( needs_exclusive | | _lv_is_exclusive ( lv ) )
2013-11-01 13:31:31 +04:00
goto exclusive ;
2013-04-29 16:04:38 +04:00
log_verbose ( " Activating logical volume \" %s \" . " , lv - > name ) ;
if ( ! activate_lv ( cmd , lv ) )
return_0 ;
}
2015-03-05 23:00:44 +03:00
if ( ! is_change_activating ( activate ) & &
! lockd_lv ( cmd , lv , " un " , LDLV_PERSISTENT ) )
log_error ( " Failed to unlock logical volume %s/%s " , lv - > vg - > name , lv - > name ) ;
2013-04-29 16:04:38 +04:00
return 1 ;
}
2013-04-25 14:12:05 +04:00
char * lv_active_dup ( struct dm_pool * mem , const struct logical_volume * lv )
{
const char * s ;
2014-07-11 13:15:06 +04:00
if ( ! activation ( ) ) {
s = " unknown " ;
goto out ;
}
2013-05-02 20:06:50 +04:00
if ( vg_is_clustered ( lv - > vg ) ) {
//const struct logical_volume *lvo = lv;
lv = lv_lock_holder ( lv ) ;
//log_debug("Holder for %s => %s.", lvo->name, lv->name);
}
2013-04-25 14:12:05 +04:00
if ( ! lv_is_active ( lv ) )
s = " " ; /* not active */
else if ( ! vg_is_clustered ( lv - > vg ) )
s = " active " ;
else if ( lv_is_active_exclusive ( lv ) )
/* exclusive cluster activation */
s = lv_is_active_exclusive_locally ( lv ) ?
" local exclusive " : " remote exclusive " ;
else /* locally active */
s = lv_is_active_but_not_locally ( lv ) ?
2013-05-02 20:06:50 +04:00
" remotely " : " locally " ;
2014-07-11 13:15:06 +04:00
out :
2013-04-25 14:12:05 +04:00
return dm_pool_strdup ( mem , s ) ;
}
2013-05-02 20:06:50 +04:00
2013-07-02 16:34:52 +04:00
char * lv_profile_dup ( struct dm_pool * mem , const struct logical_volume * lv )
{
const char * profile_name = lv - > profile ? lv - > profile - > name : " " ;
return dm_pool_strdup ( mem , profile_name ) ;
}
2015-03-05 23:00:44 +03:00
char * lv_lock_args_dup ( struct dm_pool * mem , const struct logical_volume * lv )
{
const char * lock_args = lv - > lock_args ? lv - > lock_args : " " ;
return dm_pool_strdup ( mem , lock_args ) ;
}
2013-05-02 20:06:50 +04:00
/* For given LV find recursively the LV which holds lock for it */
const struct logical_volume * lv_lock_holder ( const struct logical_volume * lv )
{
const struct seg_list * sl ;
if ( lv_is_cow ( lv ) )
return lv_lock_holder ( origin_from_cow ( lv ) ) ;
if ( lv_is_thin_pool ( lv ) )
/* Find any active LV from the pool */
dm_list_iterate_items ( sl , & lv - > segs_using_this_lv )
if ( lv_is_active ( sl - > seg - > lv ) ) {
log_debug ( " Thin volume \" %s \" is active. " , sl - > seg - > lv - > name ) ;
return sl - > seg - > lv ;
}
2015-01-28 15:34:41 +03:00
/* RAID changes visibility of splitted LVs but references them still as leg/meta */
if ( ( lv_is_raid_image ( lv ) | | lv_is_raid_metadata ( lv ) ) & & lv_is_visible ( lv ) )
return lv ;
2013-05-02 20:06:50 +04:00
/* For other types, by default look for the first user */
dm_list_iterate_items ( sl , & lv - > segs_using_this_lv ) {
/* FIXME: complete this exception list */
if ( lv_is_thin_volume ( lv ) & &
lv_is_thin_volume ( sl - > seg - > lv ) & &
first_seg ( lv ) - > pool_lv = = sl - > seg - > pool_lv )
continue ; /* Skip thin snaphost */
if ( lv_is_external_origin ( lv ) & &
lv_is_thin_volume ( sl - > seg - > lv ) )
continue ; /* Skip external origin */
2014-11-10 12:56:43 +03:00
if ( lv_is_pending_delete ( sl - > seg - > lv ) )
continue ; /* Skip deleted LVs */
2013-05-02 20:06:50 +04:00
return lv_lock_holder ( sl - > seg - > lv ) ;
}
return lv ;
}
2013-06-27 13:17:16 +04:00
struct profile * lv_config_profile ( const struct logical_volume * lv )
{
return lv - > profile ? : lv - > vg - > profile ;
}