2011-08-03 02:07:20 +04:00
/*
2017-02-03 22:39:40 +03:00
* Copyright ( C ) 2011 - 2017 Red Hat , Inc . All rights reserved .
2011-08-03 02:07:20 +04:00
*
* This file is part of LVM2 .
*
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v .2 .1 .
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program ; if not , write to the Free Software Foundation ,
2016-01-21 13:49:46 +03:00
* Inc . , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA
2011-08-03 02:07:20 +04:00
*/
2018-06-08 15:40:53 +03:00
# include "base/memory/zalloc.h"
2018-05-14 12:30:20 +03:00
# include "lib/misc/lib.h"
# include "lib/metadata/segtype.h"
# include "lib/display/display.h"
# include "lib/format_text/text_export.h"
# include "lib/config/config.h"
# include "lib/datastruct/str_list.h"
# include "lib/activate/targets.h"
# include "lib/misc/lvm-string.h"
# include "lib/activate/activate.h"
# include "lib/metadata/metadata.h"
# include "lib/metadata/lv_alloc.h"
2011-08-03 02:07:20 +04:00
2014-07-17 01:55:46 +04:00
static void _raid_display ( const struct lv_segment * seg )
{
unsigned s ;
for ( s = 0 ; s < seg - > area_count ; + + s ) {
log_print ( " Raid Data LV%2d " , s ) ;
display_stripe ( seg , s , " " ) ;
}
2016-05-24 00:55:13 +03:00
if ( seg - > meta_areas )
2016-05-23 18:46:38 +03:00
for ( s = 0 ; s < seg - > area_count ; + + s )
2016-05-24 00:55:13 +03:00
if ( seg_metalv ( seg , s ) )
log_print ( " Raid Metadata LV%2d \t %s " , s , seg_metalv ( seg , s ) - > name ) ;
2014-07-17 01:55:46 +04:00
log_print ( " " ) ;
}
2011-08-30 18:55:15 +04:00
static int _raid_text_import_area_count ( const struct dm_config_node * sn ,
2011-08-03 02:07:20 +04:00
uint32_t * area_count )
{
2016-05-23 18:46:38 +03:00
uint32_t stripe_count = 0 , device_count = 0 ;
int stripe_count_found , device_count_found ;
device_count_found = dm_config_get_uint32 ( sn , " device_count " , & device_count ) ;
stripe_count_found = dm_config_get_uint32 ( sn , " stripe_count " , & stripe_count ) ;
if ( ! device_count_found & & ! stripe_count_found ) {
log_error ( " Couldn't read 'device_count' or 'stripe_count' for "
" segment '%s'. " , dm_config_parent_name ( sn ) ) ;
return 0 ;
}
if ( device_count_found & & stripe_count_found ) {
log_error ( " Only one of 'device_count' and 'stripe_count' allowed for "
2011-08-30 18:55:15 +04:00
" segment '%s'. " , dm_config_parent_name ( sn ) ) ;
2011-08-03 02:07:20 +04:00
return 0 ;
}
2016-05-23 18:46:38 +03:00
* area_count = stripe_count + device_count ;
2011-08-03 02:07:20 +04:00
return 1 ;
}
2011-08-19 19:59:15 +04:00
static int _raid_text_import_areas ( struct lv_segment * seg ,
2011-08-30 18:55:15 +04:00
const struct dm_config_node * sn ,
2011-08-31 19:19:19 +04:00
const struct dm_config_value * cv )
2011-08-03 02:07:20 +04:00
{
unsigned int s ;
2015-09-28 16:21:00 +03:00
struct logical_volume * lv ;
2011-08-30 18:55:15 +04:00
const char * seg_name = dm_config_parent_name ( sn ) ;
2011-08-03 02:07:20 +04:00
if ( ! seg - > area_count ) {
log_error ( " No areas found for segment %s " , seg_name ) ;
return 0 ;
}
2011-08-31 19:19:19 +04:00
for ( s = 0 ; cv & & s < seg - > area_count ; s + + , cv = cv - > next ) {
2011-08-30 18:55:15 +04:00
if ( cv - > type ! = DM_CFG_STRING ) {
2011-08-03 02:07:20 +04:00
log_error ( " Bad volume name in areas array for segment %s. " , seg_name ) ;
return 0 ;
}
2016-05-23 18:46:38 +03:00
/* Metadata device comes first. */
2017-02-03 22:39:40 +03:00
if ( ! ( lv = find_lv ( seg - > lv - > vg , cv - > v . str ) ) ) {
log_error ( " Couldn't find volume '%s' for segment '%s'. " ,
cv - > v . str ? : " NULL " , seg_name ) ;
return 0 ;
}
2016-05-23 18:46:38 +03:00
2017-02-03 22:39:40 +03:00
if ( strstr ( lv - > name , " _rmeta_ " ) ) {
2016-05-23 18:46:38 +03:00
if ( ! set_lv_segment_area_lv ( seg , s , lv , 0 , RAID_META ) )
return_0 ;
cv = cv - > next ;
2011-08-03 02:07:20 +04:00
}
2016-05-23 18:46:38 +03:00
if ( ! cv ) {
log_error ( " Missing data device in areas array for segment %s. " , seg_name ) ;
2011-08-03 02:07:20 +04:00
return 0 ;
}
/* Data device comes second */
2015-09-28 16:21:00 +03:00
if ( ! ( lv = find_lv ( seg - > lv - > vg , cv - > v . str ) ) ) {
2011-08-03 02:07:20 +04:00
log_error ( " Couldn't find volume '%s' for segment '%s'. " ,
cv - > v . str ? : " NULL " , seg_name ) ;
return 0 ;
}
2015-09-28 16:21:00 +03:00
if ( ! set_lv_segment_area_lv ( seg , s , lv , 0 , RAID_IMAGE ) )
return_0 ;
2011-08-03 02:07:20 +04:00
}
/*
* Check we read the correct number of RAID data / meta pairs .
*/
if ( cv | | ( s < seg - > area_count ) ) {
log_error ( " Incorrect number of areas in area array "
" for segment '%s'. " , seg_name ) ;
return 0 ;
}
return 1 ;
}
2011-08-19 19:59:15 +04:00
static int _raid_text_import ( struct lv_segment * seg ,
2011-08-30 18:55:15 +04:00
const struct dm_config_node * sn ,
2011-08-19 19:59:15 +04:00
struct dm_hash_table * pv_hash )
2011-08-03 02:07:20 +04:00
{
2011-08-31 19:19:19 +04:00
const struct dm_config_value * cv ;
2015-09-28 16:21:00 +03:00
const struct {
const char * name ;
uint32_t * var ;
} raid_attr_import [ ] = {
{ " region_size " , & seg - > region_size } ,
{ " stripe_size " , & seg - > stripe_size } ,
2017-02-24 02:50:00 +03:00
{ " data_copies " , & seg - > data_copies } ,
2015-09-28 16:21:00 +03:00
{ " writebehind " , & seg - > writebehind } ,
{ " min_recovery_rate " , & seg - > min_recovery_rate } ,
{ " max_recovery_rate " , & seg - > max_recovery_rate } ,
2017-07-13 19:26:43 +03:00
{ " data_offset " , & seg - > data_offset } ,
2015-09-28 16:21:00 +03:00
} , * aip = raid_attr_import ;
2016-02-23 14:18:48 +03:00
unsigned i ;
2015-09-28 16:21:00 +03:00
for ( i = 0 ; i < DM_ARRAY_SIZE ( raid_attr_import ) ; i + + , aip + + ) {
if ( dm_config_has_node ( sn , aip - > name ) ) {
if ( ! dm_config_get_uint32 ( sn , aip - > name , aip - > var ) ) {
2017-07-13 19:26:43 +03:00
if ( ! strcmp ( aip - > name , " data_copies " ) | |
! strcmp ( aip - > name , " data_offset " ) ) {
2017-02-24 02:50:00 +03:00
* aip - > var = 0 ;
continue ;
}
2015-09-28 16:21:00 +03:00
log_error ( " Couldn't read '%s' for segment %s of logical volume %s. " ,
aip - > name , dm_config_parent_name ( sn ) , seg - > lv - > name ) ;
return 0 ;
}
2017-07-13 19:26:43 +03:00
if ( ! strcmp ( aip - > name , " data_offset " ) & & ! * aip - > var )
* aip - > var = 1 ;
2013-05-31 20:25:52 +04:00
}
}
2015-09-28 16:21:00 +03:00
2016-07-02 00:20:54 +03:00
if ( ! dm_config_get_list ( sn , seg_is_raid0 ( seg ) ? " raid0_lvs " : " raids " , & cv ) ) {
2011-08-03 02:07:20 +04:00
log_error ( " Couldn't find RAID array for "
" segment %s of logical volume %s. " ,
2011-08-30 18:55:15 +04:00
dm_config_parent_name ( sn ) , seg - > lv - > name ) ;
2011-08-03 02:07:20 +04:00
return 0 ;
}
2011-08-31 19:19:19 +04:00
if ( ! _raid_text_import_areas ( seg , sn , cv ) ) {
2015-09-28 16:21:00 +03:00
log_error ( " Failed to import RAID component pairs. " ) ;
2011-08-03 02:07:20 +04:00
return 0 ;
}
2017-02-24 02:50:00 +03:00
if ( seg - > data_copies < 2 )
2017-02-24 03:57:04 +03:00
seg - > data_copies = lv_raid_data_copies ( seg - > segtype , seg - > area_count ) ;
2017-02-24 02:50:00 +03:00
2016-05-23 18:46:38 +03:00
if ( seg_is_any_raid0 ( seg ) )
seg - > area_len / = seg - > area_count ;
2011-08-03 02:07:20 +04:00
return 1 ;
}
2016-05-23 18:46:38 +03:00
static int _raid_text_export_raid0 ( const struct lv_segment * seg , struct formatter * f )
{
outf ( f , " stripe_count = %u " , seg - > area_count ) ;
if ( seg - > stripe_size )
outf ( f , " stripe_size = % " PRIu32 , seg - > stripe_size ) ;
2016-07-02 00:20:54 +03:00
return out_areas ( f , seg , seg_is_raid0 ( seg ) ? " raid0_lv " : " raid " ) ;
2016-05-23 18:46:38 +03:00
}
static int _raid_text_export_raid ( const struct lv_segment * seg , struct formatter * f )
2011-08-03 02:07:20 +04:00
{
2017-02-24 02:50:00 +03:00
int raid0 = seg_is_any_raid0 ( seg ) ;
if ( raid0 )
outfc ( f , ( seg - > area_count = = 1 ) ? " # linear " : NULL ,
" stripe_count = %u " , seg - > area_count ) ;
else {
outf ( f , " device_count = %u " , seg - > area_count ) ;
if ( seg_is_any_raid10 ( seg ) & & seg - > data_copies > 0 )
outf ( f , " data_copies = % " PRIu32 , seg - > data_copies ) ;
if ( seg - > region_size )
outf ( f , " region_size = % " PRIu32 , seg - > region_size ) ;
}
2016-05-23 18:46:38 +03:00
2011-08-03 02:07:20 +04:00
if ( seg - > stripe_size )
outf ( f , " stripe_size = % " PRIu32 , seg - > stripe_size ) ;
2017-02-24 02:50:00 +03:00
if ( ! raid0 ) {
if ( seg_is_raid1 ( seg ) & & seg - > writebehind )
outf ( f , " writebehind = % " PRIu32 , seg - > writebehind ) ;
if ( seg - > min_recovery_rate )
outf ( f , " min_recovery_rate = % " PRIu32 , seg - > min_recovery_rate ) ;
if ( seg - > max_recovery_rate )
outf ( f , " max_recovery_rate = % " PRIu32 , seg - > max_recovery_rate ) ;
2017-07-13 19:26:43 +03:00
if ( seg - > data_offset )
outf ( f , " data_offset = % " PRIu32 , seg - > data_offset = = 1 ? 0 : seg - > data_offset ) ;
2017-02-24 02:50:00 +03:00
}
2011-08-03 02:07:20 +04:00
return out_areas ( f , seg , " raid " ) ;
}
2016-05-23 18:46:38 +03:00
static int _raid_text_export ( const struct lv_segment * seg , struct formatter * f )
{
if ( seg_is_any_raid0 ( seg ) )
return _raid_text_export_raid0 ( seg , f ) ;
return _raid_text_export_raid ( seg , f ) ;
}
2020-09-28 19:56:58 +03:00
static int _raid_target_status_compatible ( const char * type )
{
return ( strstr ( type , " raid " ) ! = NULL ) ;
}
static void _raid_destroy ( struct segment_type * segtype )
{
free ( ( void * ) segtype - > dso ) ;
free ( segtype ) ;
}
# ifdef DEVMAPPER_SUPPORT
static int _raid_target_present ( struct cmd_context * cmd ,
const struct lv_segment * seg __attribute__ ( ( unused ) ) ,
unsigned * attributes ) ;
2011-08-19 19:59:15 +04:00
static int _raid_add_target_line ( struct dev_manager * dm __attribute__ ( ( unused ) ) ,
struct dm_pool * mem __attribute__ ( ( unused ) ) ,
struct cmd_context * cmd __attribute__ ( ( unused ) ) ,
void * * target_state __attribute__ ( ( unused ) ) ,
struct lv_segment * seg ,
const struct lv_activate_opts * laopts __attribute__ ( ( unused ) ) ,
struct dm_tree_node * node , uint64_t len ,
uint32_t * pvmove_mirror_count __attribute__ ( ( unused ) ) )
2011-08-03 02:07:20 +04:00
{
2017-02-24 02:50:00 +03:00
int delta_disks = 0 , delta_disks_minus = 0 , delta_disks_plus = 0 , data_offset = 0 ;
2011-08-18 23:41:21 +04:00
uint32_t s ;
2012-02-14 00:13:39 +04:00
uint64_t flags = 0 ;
2017-11-09 15:00:28 +03:00
uint64_t rebuilds [ RAID_BITMAP_SIZE ] = { 0 } ;
uint64_t writemostly [ RAID_BITMAP_SIZE ] = { 0 } ;
struct dm_tree_node_raid_params_v2 params = { 0 } ;
unsigned attrs ;
if ( seg_is_raid4 ( seg ) ) {
if ( ! _raid_target_present ( cmd , NULL , & attrs ) | |
! ( attrs & RAID_FEATURE_RAID4 ) ) {
log_error ( " RAID target does not support RAID4 for LV %s. " ,
display_lvname ( seg - > lv ) ) ;
return 0 ;
}
}
2011-08-18 23:41:21 +04:00
2011-08-03 02:07:20 +04:00
if ( ! seg - > area_count ) {
log_error ( INTERNAL_ERROR " _raid_add_target_line called "
" with no areas for %s. " , seg - > lv - > name ) ;
return 0 ;
}
2011-08-18 23:41:21 +04:00
/*
2017-02-24 02:50:00 +03:00
* 253 device restriction imposed by kernel due to MD and dm - raid bitfield limitation in superblock .
* It is not strictly a userspace limitation .
2011-08-18 23:41:21 +04:00
*/
2017-02-24 02:50:00 +03:00
if ( seg - > area_count > DEFAULT_RAID_MAX_IMAGES ) {
log_error ( " Unable to handle more than %u devices in a "
" single RAID array " , DEFAULT_RAID_MAX_IMAGES ) ;
2011-08-18 23:41:21 +04:00
return 0 ;
}
2017-02-24 02:50:00 +03:00
if ( ! seg_is_any_raid0 ( seg ) ) {
2016-05-23 18:46:38 +03:00
if ( ! seg - > region_size ) {
2017-02-24 02:50:00 +03:00
log_error ( " Missing region size for raid segment in %s. " ,
seg_lv ( seg , 0 ) - > name ) ;
2016-05-23 18:46:38 +03:00
return 0 ;
}
2011-08-03 02:07:20 +04:00
2017-02-24 02:50:00 +03:00
for ( s = 0 ; s < seg - > area_count ; s + + ) {
uint64_t status = seg_lv ( seg , s ) - > status ;
if ( status & LV_REBUILD )
rebuilds [ s / 64 ] | = 1ULL < < ( s % 64 ) ;
if ( status & LV_RESHAPE_DELTA_DISKS_PLUS ) {
delta_disks + + ;
delta_disks_plus + + ;
} else if ( status & LV_RESHAPE_DELTA_DISKS_MINUS ) {
delta_disks - - ;
delta_disks_minus + + ;
}
if ( delta_disks_plus & & delta_disks_minus ) {
log_error ( INTERNAL_ERROR " Invalid request for delta disks minus and delta disks plus! " ) ;
return 0 ;
}
if ( status & LV_WRITEMOSTLY )
writemostly [ s / 64 ] | = 1ULL < < ( s % 64 ) ;
}
2011-08-18 23:41:21 +04:00
2017-02-24 02:50:00 +03:00
data_offset = seg - > data_offset ;
RAID: Add writemostly/writebehind support for RAID1
'lvchange' is used to alter a RAID 1 logical volume's write-mostly and
write-behind characteristics. The '--writemostly' parameter takes a
PV as an argument with an optional trailing character to specify whether
to set ('y'), unset ('n'), or toggle ('t') the value. If no trailing
character is given, it will set the flag.
Synopsis:
lvchange [--writemostly <PV>:{t|y|n}] [--writebehind <count>] vg/lv
Example:
lvchange --writemostly /dev/sdb1:y --writebehind 512 vg/raid1_lv
The last character in the 'lv_attr' field is used to show whether a device
has the WriteMostly flag set. It is signified with a 'w'. If the device
has failed, the 'p'artial flag has priority.
Example ("nosync" raid1 with mismatch_cnt and writemostly):
[~]# lvs -a --segment vg
LV VG Attr #Str Type SSize
raid1 vg Rwi---r-m 2 raid1 500.00m
[raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m
[raid1_rimage_1] vg Iwi---r-w 1 linear 500.00m
[raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m
[raid1_rmeta_1] vg ewi---r-- 1 linear 4.00m
Example (raid1 with mismatch_cnt, writemostly - but failed drive):
[~]# lvs -a --segment vg
LV VG Attr #Str Type SSize
raid1 vg rwi---r-p 2 raid1 500.00m
[raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m
[raid1_rimage_1] vg Iwi---r-p 1 linear 500.00m
[raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m
[raid1_rmeta_1] vg ewi---r-p 1 linear 4.00m
A new reportable field has been added for writebehind as well. If
write-behind has not been set or the LV is not RAID1, the field will
be blank.
Example (writebehind is set):
[~]# lvs -a -o name,attr,writebehind vg
LV Attr WBehind
lv rwi-a-r-- 512
[lv_rimage_0] iwi-aor-w
[lv_rimage_1] iwi-aor--
[lv_rmeta_0] ewi-aor--
[lv_rmeta_1] ewi-aor--
Example (writebehind is not set):
[~]# lvs -a -o name,attr,writebehind vg
LV Attr WBehind
lv rwi-a-r--
[lv_rimage_0] iwi-aor-w
[lv_rimage_1] iwi-aor--
[lv_rmeta_0] ewi-aor--
[lv_rmeta_1] ewi-aor--
2013-04-15 22:59:46 +04:00
2016-05-23 18:46:38 +03:00
if ( mirror_in_sync ( ) )
flags = DM_NOSYNC ;
}
2012-02-14 00:13:39 +04:00
2014-10-20 22:09:42 +04:00
params . raid_type = lvseg_name ( seg ) ;
2016-05-23 18:46:38 +03:00
2017-02-24 02:50:00 +03:00
if ( seg - > segtype - > parity_devs ) {
RAID: Add writemostly/writebehind support for RAID1
'lvchange' is used to alter a RAID 1 logical volume's write-mostly and
write-behind characteristics. The '--writemostly' parameter takes a
PV as an argument with an optional trailing character to specify whether
to set ('y'), unset ('n'), or toggle ('t') the value. If no trailing
character is given, it will set the flag.
Synopsis:
lvchange [--writemostly <PV>:{t|y|n}] [--writebehind <count>] vg/lv
Example:
lvchange --writemostly /dev/sdb1:y --writebehind 512 vg/raid1_lv
The last character in the 'lv_attr' field is used to show whether a device
has the WriteMostly flag set. It is signified with a 'w'. If the device
has failed, the 'p'artial flag has priority.
Example ("nosync" raid1 with mismatch_cnt and writemostly):
[~]# lvs -a --segment vg
LV VG Attr #Str Type SSize
raid1 vg Rwi---r-m 2 raid1 500.00m
[raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m
[raid1_rimage_1] vg Iwi---r-w 1 linear 500.00m
[raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m
[raid1_rmeta_1] vg ewi---r-- 1 linear 4.00m
Example (raid1 with mismatch_cnt, writemostly - but failed drive):
[~]# lvs -a --segment vg
LV VG Attr #Str Type SSize
raid1 vg rwi---r-p 2 raid1 500.00m
[raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m
[raid1_rimage_1] vg Iwi---r-p 1 linear 500.00m
[raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m
[raid1_rmeta_1] vg ewi---r-p 1 linear 4.00m
A new reportable field has been added for writebehind as well. If
write-behind has not been set or the LV is not RAID1, the field will
be blank.
Example (writebehind is set):
[~]# lvs -a -o name,attr,writebehind vg
LV Attr WBehind
lv rwi-a-r-- 512
[lv_rimage_0] iwi-aor-w
[lv_rimage_1] iwi-aor--
[lv_rmeta_0] ewi-aor--
[lv_rmeta_1] ewi-aor--
Example (writebehind is not set):
[~]# lvs -a -o name,attr,writebehind vg
LV Attr WBehind
lv rwi-a-r--
[lv_rimage_0] iwi-aor-w
[lv_rimage_1] iwi-aor--
[lv_rmeta_0] ewi-aor--
[lv_rmeta_1] ewi-aor--
2013-04-15 22:59:46 +04:00
/* RAID 4/5/6 */
params . mirrors = 1 ;
params . stripes = seg - > area_count - seg - > segtype - > parity_devs ;
2017-02-24 02:50:00 +03:00
} else if ( seg_is_any_raid0 ( seg ) ) {
params . mirrors = 1 ;
params . stripes = seg - > area_count ;
} else if ( seg_is_any_raid10 ( seg ) ) {
params . data_copies = seg - > data_copies ;
params . stripes = seg - > area_count ;
RAID: Add writemostly/writebehind support for RAID1
'lvchange' is used to alter a RAID 1 logical volume's write-mostly and
write-behind characteristics. The '--writemostly' parameter takes a
PV as an argument with an optional trailing character to specify whether
to set ('y'), unset ('n'), or toggle ('t') the value. If no trailing
character is given, it will set the flag.
Synopsis:
lvchange [--writemostly <PV>:{t|y|n}] [--writebehind <count>] vg/lv
Example:
lvchange --writemostly /dev/sdb1:y --writebehind 512 vg/raid1_lv
The last character in the 'lv_attr' field is used to show whether a device
has the WriteMostly flag set. It is signified with a 'w'. If the device
has failed, the 'p'artial flag has priority.
Example ("nosync" raid1 with mismatch_cnt and writemostly):
[~]# lvs -a --segment vg
LV VG Attr #Str Type SSize
raid1 vg Rwi---r-m 2 raid1 500.00m
[raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m
[raid1_rimage_1] vg Iwi---r-w 1 linear 500.00m
[raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m
[raid1_rmeta_1] vg ewi---r-- 1 linear 4.00m
Example (raid1 with mismatch_cnt, writemostly - but failed drive):
[~]# lvs -a --segment vg
LV VG Attr #Str Type SSize
raid1 vg rwi---r-p 2 raid1 500.00m
[raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m
[raid1_rimage_1] vg Iwi---r-p 1 linear 500.00m
[raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m
[raid1_rmeta_1] vg ewi---r-p 1 linear 4.00m
A new reportable field has been added for writebehind as well. If
write-behind has not been set or the LV is not RAID1, the field will
be blank.
Example (writebehind is set):
[~]# lvs -a -o name,attr,writebehind vg
LV Attr WBehind
lv rwi-a-r-- 512
[lv_rimage_0] iwi-aor-w
[lv_rimage_1] iwi-aor--
[lv_rmeta_0] ewi-aor--
[lv_rmeta_1] ewi-aor--
Example (writebehind is not set):
[~]# lvs -a -o name,attr,writebehind vg
LV Attr WBehind
lv rwi-a-r--
[lv_rimage_0] iwi-aor-w
[lv_rimage_1] iwi-aor--
[lv_rmeta_0] ewi-aor--
[lv_rmeta_1] ewi-aor--
2013-04-15 22:59:46 +04:00
} else {
/* RAID 1 */
2017-02-24 02:50:00 +03:00
params . mirrors = seg - > data_copies ;
RAID: Add writemostly/writebehind support for RAID1
'lvchange' is used to alter a RAID 1 logical volume's write-mostly and
write-behind characteristics. The '--writemostly' parameter takes a
PV as an argument with an optional trailing character to specify whether
to set ('y'), unset ('n'), or toggle ('t') the value. If no trailing
character is given, it will set the flag.
Synopsis:
lvchange [--writemostly <PV>:{t|y|n}] [--writebehind <count>] vg/lv
Example:
lvchange --writemostly /dev/sdb1:y --writebehind 512 vg/raid1_lv
The last character in the 'lv_attr' field is used to show whether a device
has the WriteMostly flag set. It is signified with a 'w'. If the device
has failed, the 'p'artial flag has priority.
Example ("nosync" raid1 with mismatch_cnt and writemostly):
[~]# lvs -a --segment vg
LV VG Attr #Str Type SSize
raid1 vg Rwi---r-m 2 raid1 500.00m
[raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m
[raid1_rimage_1] vg Iwi---r-w 1 linear 500.00m
[raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m
[raid1_rmeta_1] vg ewi---r-- 1 linear 4.00m
Example (raid1 with mismatch_cnt, writemostly - but failed drive):
[~]# lvs -a --segment vg
LV VG Attr #Str Type SSize
raid1 vg rwi---r-p 2 raid1 500.00m
[raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m
[raid1_rimage_1] vg Iwi---r-p 1 linear 500.00m
[raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m
[raid1_rmeta_1] vg ewi---r-p 1 linear 4.00m
A new reportable field has been added for writebehind as well. If
write-behind has not been set or the LV is not RAID1, the field will
be blank.
Example (writebehind is set):
[~]# lvs -a -o name,attr,writebehind vg
LV Attr WBehind
lv rwi-a-r-- 512
[lv_rimage_0] iwi-aor-w
[lv_rimage_1] iwi-aor--
[lv_rmeta_0] ewi-aor--
[lv_rmeta_1] ewi-aor--
Example (writebehind is not set):
[~]# lvs -a -o name,attr,writebehind vg
LV Attr WBehind
lv rwi-a-r--
[lv_rimage_0] iwi-aor-w
[lv_rimage_1] iwi-aor--
[lv_rmeta_0] ewi-aor--
[lv_rmeta_1] ewi-aor--
2013-04-15 22:59:46 +04:00
params . stripes = 1 ;
params . writebehind = seg - > writebehind ;
2017-02-24 02:50:00 +03:00
memcpy ( params . writemostly , writemostly , sizeof ( params . writemostly ) ) ;
RAID: Add writemostly/writebehind support for RAID1
'lvchange' is used to alter a RAID 1 logical volume's write-mostly and
write-behind characteristics. The '--writemostly' parameter takes a
PV as an argument with an optional trailing character to specify whether
to set ('y'), unset ('n'), or toggle ('t') the value. If no trailing
character is given, it will set the flag.
Synopsis:
lvchange [--writemostly <PV>:{t|y|n}] [--writebehind <count>] vg/lv
Example:
lvchange --writemostly /dev/sdb1:y --writebehind 512 vg/raid1_lv
The last character in the 'lv_attr' field is used to show whether a device
has the WriteMostly flag set. It is signified with a 'w'. If the device
has failed, the 'p'artial flag has priority.
Example ("nosync" raid1 with mismatch_cnt and writemostly):
[~]# lvs -a --segment vg
LV VG Attr #Str Type SSize
raid1 vg Rwi---r-m 2 raid1 500.00m
[raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m
[raid1_rimage_1] vg Iwi---r-w 1 linear 500.00m
[raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m
[raid1_rmeta_1] vg ewi---r-- 1 linear 4.00m
Example (raid1 with mismatch_cnt, writemostly - but failed drive):
[~]# lvs -a --segment vg
LV VG Attr #Str Type SSize
raid1 vg rwi---r-p 2 raid1 500.00m
[raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m
[raid1_rimage_1] vg Iwi---r-p 1 linear 500.00m
[raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m
[raid1_rmeta_1] vg ewi---r-p 1 linear 4.00m
A new reportable field has been added for writebehind as well. If
write-behind has not been set or the LV is not RAID1, the field will
be blank.
Example (writebehind is set):
[~]# lvs -a -o name,attr,writebehind vg
LV Attr WBehind
lv rwi-a-r-- 512
[lv_rimage_0] iwi-aor-w
[lv_rimage_1] iwi-aor--
[lv_rmeta_0] ewi-aor--
[lv_rmeta_1] ewi-aor--
Example (writebehind is not set):
[~]# lvs -a -o name,attr,writebehind vg
LV Attr WBehind
lv rwi-a-r--
[lv_rimage_0] iwi-aor-w
[lv_rimage_1] iwi-aor--
[lv_rmeta_0] ewi-aor--
[lv_rmeta_1] ewi-aor--
2013-04-15 22:59:46 +04:00
}
2016-05-23 18:46:38 +03:00
2017-02-24 02:50:00 +03:00
/* RAID 0 doesn't have a bitmap, thus no region_size, rebuilds etc. */
if ( ! seg_is_any_raid0 ( seg ) ) {
2016-05-23 18:46:38 +03:00
params . region_size = seg - > region_size ;
2017-02-24 02:50:00 +03:00
memcpy ( params . rebuilds , rebuilds , sizeof ( params . rebuilds ) ) ;
2016-05-23 18:46:38 +03:00
params . min_recovery_rate = seg - > min_recovery_rate ;
params . max_recovery_rate = seg - > max_recovery_rate ;
2017-02-24 02:50:00 +03:00
params . delta_disks = delta_disks ;
params . data_offset = data_offset ;
2016-05-23 18:46:38 +03:00
}
RAID: Add writemostly/writebehind support for RAID1
'lvchange' is used to alter a RAID 1 logical volume's write-mostly and
write-behind characteristics. The '--writemostly' parameter takes a
PV as an argument with an optional trailing character to specify whether
to set ('y'), unset ('n'), or toggle ('t') the value. If no trailing
character is given, it will set the flag.
Synopsis:
lvchange [--writemostly <PV>:{t|y|n}] [--writebehind <count>] vg/lv
Example:
lvchange --writemostly /dev/sdb1:y --writebehind 512 vg/raid1_lv
The last character in the 'lv_attr' field is used to show whether a device
has the WriteMostly flag set. It is signified with a 'w'. If the device
has failed, the 'p'artial flag has priority.
Example ("nosync" raid1 with mismatch_cnt and writemostly):
[~]# lvs -a --segment vg
LV VG Attr #Str Type SSize
raid1 vg Rwi---r-m 2 raid1 500.00m
[raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m
[raid1_rimage_1] vg Iwi---r-w 1 linear 500.00m
[raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m
[raid1_rmeta_1] vg ewi---r-- 1 linear 4.00m
Example (raid1 with mismatch_cnt, writemostly - but failed drive):
[~]# lvs -a --segment vg
LV VG Attr #Str Type SSize
raid1 vg rwi---r-p 2 raid1 500.00m
[raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m
[raid1_rimage_1] vg Iwi---r-p 1 linear 500.00m
[raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m
[raid1_rmeta_1] vg ewi---r-p 1 linear 4.00m
A new reportable field has been added for writebehind as well. If
write-behind has not been set or the LV is not RAID1, the field will
be blank.
Example (writebehind is set):
[~]# lvs -a -o name,attr,writebehind vg
LV Attr WBehind
lv rwi-a-r-- 512
[lv_rimage_0] iwi-aor-w
[lv_rimage_1] iwi-aor--
[lv_rmeta_0] ewi-aor--
[lv_rmeta_1] ewi-aor--
Example (writebehind is not set):
[~]# lvs -a -o name,attr,writebehind vg
LV Attr WBehind
lv rwi-a-r--
[lv_rimage_0] iwi-aor-w
[lv_rimage_1] iwi-aor--
[lv_rmeta_0] ewi-aor--
[lv_rmeta_1] ewi-aor--
2013-04-15 22:59:46 +04:00
2017-02-24 02:50:00 +03:00
params . stripe_size = seg - > stripe_size ;
params . flags = flags ;
2017-03-01 00:34:00 +03:00
if ( ! dm_tree_node_add_raid_target_with_params_v2 ( node , len , & params ) )
2011-08-03 02:07:20 +04:00
return_0 ;
return add_areas_line ( dm , seg , node , 0u , seg - > area_count ) ;
}
static int _raid_target_percent ( void * * target_state ,
2014-06-09 14:08:27 +04:00
dm_percent_t * percent ,
2011-08-03 02:07:20 +04:00
struct dm_pool * mem ,
struct cmd_context * cmd ,
struct lv_segment * seg , char * params ,
uint64_t * total_numerator ,
uint64_t * total_denominator )
{
2017-06-16 11:48:38 +03:00
struct dm_status_raid * sr ;
2011-08-03 02:07:20 +04:00
2017-06-16 11:48:38 +03:00
if ( ! dm_get_status_raid ( mem , params , & sr ) )
return_0 ;
* total_numerator + = sr - > insync_regions ;
* total_denominator + = sr - > total_regions ;
2011-08-03 02:07:20 +04:00
if ( seg )
2017-06-16 11:48:38 +03:00
seg - > extents_copied = ( uint64_t ) seg - > area_len
* dm_make_percent ( sr - > insync_regions , sr - > total_regions ) / DM_PERCENT_100 ;
* percent = dm_make_percent ( sr - > insync_regions , sr - > total_regions ) ;
2011-08-03 02:07:20 +04:00
2017-06-16 11:48:38 +03:00
dm_pool_free ( mem , sr ) ;
2011-08-03 02:07:20 +04:00
return 1 ;
}
2016-11-21 14:43:18 +03:00
static int _raid_transient_status ( struct dm_pool * mem ,
struct lv_segment * seg ,
char * params )
{
int failed = 0 , r = 0 ;
unsigned i ;
struct lvinfo info ;
struct logical_volume * lv ;
struct dm_status_raid * sr ;
log_debug ( " Raid transient status %s. " , params ) ;
if ( ! dm_get_status_raid ( mem , params , & sr ) )
return_0 ;
if ( sr - > dev_count ! = seg - > area_count ) {
log_error ( " Active raid has a wrong number of raid images! " ) ;
log_error ( " Metadata says %u, kernel says %u. " ,
seg - > area_count , sr - > dev_count ) ;
goto out ;
}
if ( seg - > meta_areas )
for ( i = 0 ; i < seg - > area_count ; + + i ) {
lv = seg_metalv ( seg , i ) ;
if ( ! lv_info ( lv - > vg - > cmd , lv , 0 , & info , 0 , 0 ) ) {
log_error ( " Check for existence of raid meta %s failed. " ,
display_lvname ( lv ) ) ;
goto out ;
}
}
for ( i = 0 ; i < seg - > area_count ; + + i ) {
lv = seg_lv ( seg , i ) ;
if ( ! lv_info ( lv - > vg - > cmd , lv , 0 , & info , 0 , 0 ) ) {
log_error ( " Check for existence of raid image %s failed. " ,
display_lvname ( lv ) ) ;
goto out ;
}
if ( sr - > dev_health [ i ] = = ' D ' ) {
lv - > status | = PARTIAL_LV ;
+ + failed ;
}
}
/* Update PARTIAL_LV flags across the VG */
if ( failed )
vg_mark_partial_lvs ( lv - > vg , 0 ) ;
r = 1 ;
out :
dm_pool_free ( mem , sr ) ;
return r ;
}
2017-03-01 16:52:23 +03:00
/* Define raid feature based on the tuple(major, minor, patchlevel) of raid target */
struct raid_feature {
uint32_t maj ;
uint32_t min ;
uint32_t patchlevel ;
unsigned raid_feature ;
const char * feature ;
} ;
/* Return true if tuple(@maj, @min, @patchlevel) is greater/equal to @*feature members */
static int _check_feature ( const struct raid_feature * feature , uint32_t maj , uint32_t min , uint32_t patchlevel )
{
return ( maj > feature - > maj ) | |
2018-03-17 23:44:53 +03:00
( maj = = feature - > maj & & min > feature - > min ) | |
2017-03-01 16:52:23 +03:00
( maj = = feature - > maj & & min = = feature - > min & & patchlevel > = feature - > patchlevel ) ;
}
2021-01-26 21:32:24 +03:00
/* Check availability of raid10 taking data copies into consideration. */
static bool _raid10_is_available ( const struct logical_volume * lv )
{
2021-02-01 16:54:49 +03:00
uint32_t i , rebuilds_per_group = 0 , s ;
2021-01-26 21:32:24 +03:00
const uint32_t copies = 2 ; /* FIXME: we only support 2-way mirrors (i.e. 2 data copies) in RAID10 for now. */
struct lv_segment * seg = first_seg ( lv ) ; /* We only have one segment in RaidLVs for now. */
for ( i = 0 ; i < seg - > area_count * copies ; + + i ) {
s = i % seg - > area_count ;
if ( ! ( i % copies ) )
rebuilds_per_group = 0 ;
if ( seg_type ( seg , s ) = = AREA_LV & &
( lv_is_partial ( seg_lv ( seg , s ) ) | |
lv_is_virtual ( seg_lv ( seg , s ) ) ) )
rebuilds_per_group + + ;
if ( rebuilds_per_group > = copies )
return false ;
}
return true ;
}
/*
* Return true in case RaidLV with specific RAID level is available .
*
* - raid0 : all legs have to be live
* - raid1 : minimum of 1 leg live
* - raid4 / 5 : maximum of 1 leg unavailable
* - raid6 : maximum of 2 legs unavailable
* - raid10 : minimum of 1 leg per mirror group available
*
*/
bool raid_is_available ( const struct logical_volume * lv )
{
uint32_t s , missing_legs = 0 ;
struct lv_segment * seg = first_seg ( lv ) ; /* We only have one segment in RaidLVs for now. */
/* Be cautious about bogus calls. */
if ( ! seg | | ! seg_is_raid ( seg ) )
return false ;
if ( seg_is_any_raid10 ( seg ) )
return _raid10_is_available ( lv ) ;
/* Count missing RAID legs */
for ( s = 0 ; s < seg - > area_count ; + + s )
if ( seg_type ( seg , s ) = = AREA_LV & &
lv_is_partial ( seg_lv ( seg , s ) ) )
missing_legs + + ;
/* Degradation: segtype raid1 may miss legs-1, raid0/4/5/6 may loose parity devices. */
return missing_legs < = ( seg_is_raid1 ( seg ) ? seg - > area_count - 1 : seg - > segtype - > parity_devs ) ;
}
2013-09-30 10:17:56 +04:00
static int _raid_target_present ( struct cmd_context * cmd ,
const struct lv_segment * seg __attribute__ ( ( unused ) ) ,
2014-02-24 16:15:40 +04:00
unsigned * attributes )
2013-09-30 10:17:56 +04:00
{
2014-02-24 16:15:40 +04:00
/* List of features with their kernel target version */
2017-03-01 16:52:23 +03:00
const struct raid_feature _features [ ] = {
{ 1 , 3 , 0 , RAID_FEATURE_RAID10 , SEG_TYPE_NAME_RAID10 } ,
{ 1 , 7 , 0 , RAID_FEATURE_RAID0 , SEG_TYPE_NAME_RAID0 } ,
2017-03-17 18:46:33 +03:00
{ 1 , 9 , 0 , RAID_FEATURE_SHRINK , " shrinking " } ,
2017-06-14 16:33:42 +03:00
{ 1 , 9 , 0 , RAID_FEATURE_NEW_DEVICES_ACCEPT_REBUILD , " rebuild+emptymeta " } ,
2017-06-16 16:58:47 +03:00
{ 1 , 12 , 0 , RAID_FEATURE_RESHAPE , " reshaping " } ,
2014-02-24 16:15:40 +04:00
} ;
2013-09-30 10:17:56 +04:00
static int _raid_checked = 0 ;
static int _raid_present = 0 ;
2017-03-22 19:50:51 +03:00
static unsigned _raid_attrs = 0 ;
2014-02-24 16:15:40 +04:00
uint32_t maj , min , patchlevel ;
unsigned i ;
2013-09-30 10:17:56 +04:00
2016-05-06 14:57:36 +03:00
if ( ! activation ( ) )
return 0 ;
2014-02-24 16:15:40 +04:00
if ( ! _raid_checked ) {
2015-12-17 14:23:33 +03:00
_raid_checked = 1 ;
2013-09-30 10:17:56 +04:00
2021-02-07 23:48:18 +03:00
if ( ! ( _raid_present = target_present_version ( cmd , TARGET_NAME_RAID , 1 ,
& maj , & min , & patchlevel ) ) )
2015-12-17 14:23:33 +03:00
return_0 ;
2014-02-24 16:15:40 +04:00
2014-04-04 23:10:30 +04:00
for ( i = 0 ; i < DM_ARRAY_SIZE ( _features ) ; + + i )
2017-03-01 16:52:23 +03:00
if ( _check_feature ( _features + i , maj , min , patchlevel ) )
2014-02-24 16:15:40 +04:00
_raid_attrs | = _features [ i ] . raid_feature ;
else
log_very_verbose ( " Target raid does not support %s. " ,
_features [ i ] . feature ) ;
2016-10-27 12:38:16 +03:00
2017-03-01 16:52:23 +03:00
/*
* Seperate check for proper raid4 mapping supported
*
* If we get more of these range checks , avoid them
* altogether by enhancing ' struct raid_feature '
* and _check_feature ( ) to handle them .
*/
2016-10-27 12:38:16 +03:00
if ( ! ( maj = = 1 & & ( min = = 8 | | ( min = = 9 & & patchlevel = = 0 ) ) ) )
_raid_attrs | = RAID_FEATURE_RAID4 ;
else
log_very_verbose ( " Target raid does not support %s. " ,
SEG_TYPE_NAME_RAID4 ) ;
2014-02-24 16:15:40 +04:00
}
if ( attributes )
* attributes = _raid_attrs ;
2013-09-30 10:17:56 +04:00
return _raid_present ;
}
2014-04-30 01:41:17 +04:00
static int _raid_modules_needed ( struct dm_pool * mem ,
const struct lv_segment * seg __attribute__ ( ( unused ) ) ,
struct dm_list * modules )
{
2016-03-22 20:46:15 +03:00
if ( ! str_list_add ( mem , modules , MODULE_NAME_RAID ) ) {
2014-04-30 01:41:17 +04:00
log_error ( " raid module string list allocation failed " ) ;
return 0 ;
}
return 1 ;
}
# ifdef DMEVENTD
2018-01-29 18:28:57 +03:00
static int _raid_target_monitored ( struct lv_segment * seg , int * pending , int * monitored )
2011-08-11 09:00:20 +04:00
{
2018-02-10 01:40:37 +03:00
return target_registered_with_dmeventd ( seg - > lv - > vg - > cmd , seg - > segtype - > dso ,
seg - > lv , pending , monitored ) ;
2011-08-11 09:00:20 +04:00
}
static int _raid_set_events ( struct lv_segment * seg , int evmask , int set )
{
2018-02-10 01:40:37 +03:00
return target_register_events ( seg - > lv - > vg - > cmd , seg - > segtype - > dso ,
seg - > lv , evmask , set , 0 ) ;
2011-08-11 09:00:20 +04:00
}
static int _raid_target_monitor_events ( struct lv_segment * seg , int events )
{
return _raid_set_events ( seg , events , 1 ) ;
}
static int _raid_target_unmonitor_events ( struct lv_segment * seg , int events )
{
return _raid_set_events ( seg , events , 0 ) ;
}
2014-04-30 01:41:17 +04:00
# endif /* DMEVENTD */
2011-08-11 18:00:58 +04:00
# endif /* DEVMAPPER_SUPPORT */
2014-04-30 01:41:17 +04:00
2011-08-03 02:07:20 +04:00
static struct segtype_handler _raid_ops = {
2014-07-17 01:55:46 +04:00
. display = _raid_display ,
2011-08-03 02:07:20 +04:00
. text_import_area_count = _raid_text_import_area_count ,
. text_import = _raid_text_import ,
. text_export = _raid_text_export ,
. target_status_compatible = _raid_target_status_compatible ,
2011-08-11 17:30:36 +04:00
# ifdef DEVMAPPER_SUPPORT
2020-09-28 19:56:58 +03:00
. add_target_line = _raid_add_target_line ,
2011-08-03 02:07:20 +04:00
. target_percent = _raid_target_percent ,
. target_present = _raid_target_present ,
2016-11-21 14:43:18 +03:00
. check_transient_status = _raid_transient_status ,
2014-04-30 01:41:17 +04:00
. modules_needed = _raid_modules_needed ,
2011-08-11 17:30:36 +04:00
# ifdef DMEVENTD
2011-08-11 09:00:20 +04:00
. target_monitored = _raid_target_monitored ,
. target_monitor_events = _raid_target_monitor_events ,
. target_unmonitor_events = _raid_target_unmonitor_events ,
2011-08-11 17:30:36 +04:00
# endif /* DMEVENTD */
# endif
. destroy = _raid_destroy ,
2011-08-03 02:07:20 +04:00
} ;
2013-12-06 19:38:11 +04:00
static const struct raid_type {
const char name [ 12 ] ;
unsigned parity ;
2015-09-22 20:03:33 +03:00
uint64_t extra_flags ;
2013-12-06 19:38:11 +04:00
} _raid_types [ ] = {
2016-07-02 00:20:54 +03:00
{ SEG_TYPE_NAME_RAID0 , 0 , SEG_RAID0 | SEG_AREAS_STRIPED } ,
{ SEG_TYPE_NAME_RAID0_META , 0 , SEG_RAID0_META | SEG_AREAS_STRIPED } ,
{ SEG_TYPE_NAME_RAID1 , 0 , SEG_RAID1 | SEG_AREAS_MIRRORED } ,
{ SEG_TYPE_NAME_RAID10 , 0 , SEG_RAID10 | SEG_AREAS_MIRRORED } ,
2017-04-12 02:28:22 +03:00
{ SEG_TYPE_NAME_RAID10_NEAR , 0 , SEG_RAID10_NEAR | SEG_AREAS_MIRRORED } ,
2016-07-02 00:20:54 +03:00
{ SEG_TYPE_NAME_RAID4 , 1 , SEG_RAID4 } ,
{ SEG_TYPE_NAME_RAID5 , 1 , SEG_RAID5 } ,
2017-02-03 22:39:40 +03:00
{ SEG_TYPE_NAME_RAID5_N , 1 , SEG_RAID5_N } ,
2016-07-02 00:20:54 +03:00
{ SEG_TYPE_NAME_RAID5_LA , 1 , SEG_RAID5_LA } ,
{ SEG_TYPE_NAME_RAID5_LS , 1 , SEG_RAID5_LS } ,
{ SEG_TYPE_NAME_RAID5_RA , 1 , SEG_RAID5_RA } ,
{ SEG_TYPE_NAME_RAID5_RS , 1 , SEG_RAID5_RS } ,
{ SEG_TYPE_NAME_RAID6 , 2 , SEG_RAID6 } ,
2017-02-04 03:40:58 +03:00
{ SEG_TYPE_NAME_RAID6_N_6 , 2 , SEG_RAID6_N_6 } ,
2016-07-02 00:20:54 +03:00
{ SEG_TYPE_NAME_RAID6_NC , 2 , SEG_RAID6_NC } ,
{ SEG_TYPE_NAME_RAID6_NR , 2 , SEG_RAID6_NR } ,
lvconvert: add segtypes raid6_{ls,rs,la,ra}_6 and conversions to/from it
Add:
- support for segment types raid6_{ls,rs,la,ra}_6
(striped raid with dedicated last Q-Syndrome SubLVs)
- conversion support from raid5_{ls,rs,la,ra} to/from raid6_{ls,rs,la,ra}_6
- setting convenient segtypes on conversions from/to raid4/5/6
- related tests to lvconvert-raid-takeover.sh factoring
out _lvcreate,_lvconvert funxtions
Related: rhbz1366296
2017-02-05 02:53:36 +03:00
{ SEG_TYPE_NAME_RAID6_ZR , 2 , SEG_RAID6_ZR } ,
{ SEG_TYPE_NAME_RAID6_LS_6 , 2 , SEG_RAID6_LS_6 } ,
{ SEG_TYPE_NAME_RAID6_RS_6 , 2 , SEG_RAID6_RS_6 } ,
{ SEG_TYPE_NAME_RAID6_LA_6 , 2 , SEG_RAID6_LA_6 } ,
{ SEG_TYPE_NAME_RAID6_RA_6 , 2 , SEG_RAID6_RA_6 }
2013-12-06 19:38:11 +04:00
} ;
2011-08-24 17:41:46 +04:00
static struct segment_type * _init_raid_segtype ( struct cmd_context * cmd ,
2013-12-06 19:38:11 +04:00
const struct raid_type * rt ,
2018-02-10 01:40:37 +03:00
const char * dso ,
2015-09-22 20:03:33 +03:00
uint64_t monitored )
2011-08-03 02:07:20 +04:00
{
2018-06-08 15:40:53 +03:00
struct segment_type * segtype = zalloc ( sizeof ( * segtype ) ) ;
2011-08-03 02:07:20 +04:00
2011-08-12 01:32:18 +04:00
if ( ! segtype ) {
log_error ( " Failed to allocate memory for %s segtype " ,
2013-12-06 19:38:11 +04:00
rt - > name ) ;
2011-09-25 01:19:30 +04:00
return NULL ;
2011-08-12 01:32:18 +04:00
}
2014-10-27 12:50:52 +03:00
2011-08-03 02:07:20 +04:00
segtype - > ops = & _raid_ops ;
2013-12-06 19:38:11 +04:00
segtype - > name = rt - > name ;
2016-08-20 04:14:33 +03:00
segtype - > flags = SEG_RAID | SEG_ONLY_EXCLUSIVE | rt - > extra_flags ;
/* Never monitor raid0 or raid0_meta LVs */
2018-02-13 21:00:47 +03:00
if ( ! segtype_is_any_raid0 ( segtype ) & &
2018-06-08 15:40:53 +03:00
dso & & ( dso = strdup ( dso ) ) ) {
2018-02-10 01:40:37 +03:00
segtype - > dso = dso ;
2016-08-20 04:14:33 +03:00
segtype - > flags | = monitored ;
2018-02-10 01:40:37 +03:00
}
2016-08-20 04:14:33 +03:00
2013-12-06 19:38:11 +04:00
segtype - > parity_devs = rt - > parity ;
2011-08-03 02:07:20 +04:00
log_very_verbose ( " Initialised segtype: %s " , segtype - > name ) ;
return segtype ;
}
2011-08-24 17:41:46 +04:00
# ifdef RAID_INTERNAL /* Shared */
int init_raid_segtypes ( struct cmd_context * cmd , struct segtype_library * seglib )
# else
int init_multiple_segtypes ( struct cmd_context * cmd , struct segtype_library * seglib ) ;
int init_multiple_segtypes ( struct cmd_context * cmd , struct segtype_library * seglib )
# endif
2011-08-03 02:07:20 +04:00
{
2011-08-24 17:41:46 +04:00
struct segment_type * segtype ;
2018-02-13 21:53:07 +03:00
char * dso = NULL ;
2013-12-06 19:38:11 +04:00
unsigned i ;
2015-09-22 20:03:33 +03:00
uint64_t monitored = 0 ;
2018-02-25 18:20:30 +03:00
int r = 1 ;
2013-12-06 19:38:11 +04:00
# ifdef DEVMAPPER_SUPPORT
2014-04-30 01:41:17 +04:00
# ifdef DMEVENTD
2018-02-10 22:22:32 +03:00
dso = get_monitor_dso_path ( cmd , dmeventd_raid_library_CFG ) ;
2018-02-10 01:40:37 +03:00
if ( dso )
2013-12-06 19:38:11 +04:00
monitored = SEG_MONITORED ;
2014-04-30 01:41:17 +04:00
# endif
2013-12-06 19:38:11 +04:00
# endif
for ( i = 0 ; i < DM_ARRAY_SIZE ( _raid_types ) ; + + i )
2018-02-10 01:40:37 +03:00
if ( ( segtype = _init_raid_segtype ( cmd , & _raid_types [ i ] , dso , monitored ) ) & &
2018-02-25 18:20:30 +03:00
! lvm_register_segtype ( seglib , segtype ) ) {
2012-02-28 18:23:41 +04:00
/* segtype is already destroyed */
2018-02-25 18:20:30 +03:00
stack ;
r = 0 ;
break ;
}
2011-08-24 17:41:46 +04:00
2018-06-08 15:40:53 +03:00
free ( dso ) ;
2018-02-13 21:00:47 +03:00
2018-02-25 18:20:30 +03:00
return r ;
2011-08-03 02:07:20 +04:00
}