2011-08-03 02:07:20 +04:00
/*
* Copyright ( C ) 2011 Red Hat , Inc . All rights reserved .
*
* This file is part of LVM2 .
*
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v .2 .1 .
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program ; if not , write to the Free Software Foundation ,
* Inc . , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
*/
# include "lib.h"
# include "toolcontext.h"
# include "segtype.h"
# include "display.h"
# include "text_export.h"
# include "text_import.h"
# include "config.h"
# include "str_list.h"
# include "targets.h"
# include "lvm-string.h"
# include "activate.h"
# include "metadata.h"
# include "lv_alloc.h"
2011-08-11 09:00:20 +04:00
# include "defaults.h"
2011-08-03 02:07:20 +04:00
static const char * _raid_name ( const struct lv_segment * seg )
{
return seg - > segtype - > name ;
}
2011-08-30 18:55:15 +04:00
static int _raid_text_import_area_count ( const struct dm_config_node * sn ,
2011-08-03 02:07:20 +04:00
uint32_t * area_count )
{
2011-08-30 18:55:15 +04:00
if ( ! dm_config_get_uint32 ( sn , " device_count " , area_count ) ) {
2011-08-03 02:07:20 +04:00
log_error ( " Couldn't read 'device_count' for "
2011-08-30 18:55:15 +04:00
" segment '%s'. " , dm_config_parent_name ( sn ) ) ;
2011-08-03 02:07:20 +04:00
return 0 ;
}
return 1 ;
}
2011-08-19 19:59:15 +04:00
static int _raid_text_import_areas ( struct lv_segment * seg ,
2011-08-30 18:55:15 +04:00
const struct dm_config_node * sn ,
2011-08-31 19:19:19 +04:00
const struct dm_config_value * cv )
2011-08-03 02:07:20 +04:00
{
unsigned int s ;
struct logical_volume * lv1 ;
2011-08-30 18:55:15 +04:00
const char * seg_name = dm_config_parent_name ( sn ) ;
2011-08-03 02:07:20 +04:00
if ( ! seg - > area_count ) {
log_error ( " No areas found for segment %s " , seg_name ) ;
return 0 ;
}
2011-08-31 19:19:19 +04:00
for ( s = 0 ; cv & & s < seg - > area_count ; s + + , cv = cv - > next ) {
2011-08-30 18:55:15 +04:00
if ( cv - > type ! = DM_CFG_STRING ) {
2011-08-03 02:07:20 +04:00
log_error ( " Bad volume name in areas array for segment %s. " , seg_name ) ;
return 0 ;
}
if ( ! cv - > next ) {
log_error ( " Missing data device in areas array for segment %s. " , seg_name ) ;
return 0 ;
}
/* Metadata device comes first */
if ( ! ( lv1 = find_lv ( seg - > lv - > vg , cv - > v . str ) ) ) {
log_error ( " Couldn't find volume '%s' for segment '%s'. " ,
cv - > v . str ? : " NULL " , seg_name ) ;
return 0 ;
}
if ( ! set_lv_segment_area_lv ( seg , s , lv1 , 0 , RAID_META ) )
return_0 ;
/* Data device comes second */
cv = cv - > next ;
if ( ! ( lv1 = find_lv ( seg - > lv - > vg , cv - > v . str ) ) ) {
log_error ( " Couldn't find volume '%s' for segment '%s'. " ,
cv - > v . str ? : " NULL " , seg_name ) ;
return 0 ;
}
if ( ! set_lv_segment_area_lv ( seg , s , lv1 , 0 , RAID_IMAGE ) )
return_0 ;
}
/*
* Check we read the correct number of RAID data / meta pairs .
*/
if ( cv | | ( s < seg - > area_count ) ) {
log_error ( " Incorrect number of areas in area array "
" for segment '%s'. " , seg_name ) ;
return 0 ;
}
return 1 ;
}
2011-08-19 19:59:15 +04:00
static int _raid_text_import ( struct lv_segment * seg ,
2011-08-30 18:55:15 +04:00
const struct dm_config_node * sn ,
2011-08-19 19:59:15 +04:00
struct dm_hash_table * pv_hash )
2011-08-03 02:07:20 +04:00
{
2011-08-31 19:19:19 +04:00
const struct dm_config_value * cv ;
2011-08-03 02:07:20 +04:00
2011-08-31 19:19:19 +04:00
if ( dm_config_has_node ( sn , " region_size " ) ) {
2011-08-30 18:55:15 +04:00
if ( ! dm_config_get_uint32 ( sn , " region_size " , & seg - > region_size ) ) {
2011-08-03 02:07:20 +04:00
log_error ( " Couldn't read 'region_size' for "
" segment %s of logical volume %s. " ,
2011-08-30 18:55:15 +04:00
dm_config_parent_name ( sn ) , seg - > lv - > name ) ;
2011-08-03 02:07:20 +04:00
return 0 ;
}
}
2011-08-31 19:19:19 +04:00
if ( dm_config_has_node ( sn , " stripe_size " ) ) {
2011-08-30 18:55:15 +04:00
if ( ! dm_config_get_uint32 ( sn , " stripe_size " , & seg - > stripe_size ) ) {
2011-08-03 02:07:20 +04:00
log_error ( " Couldn't read 'stripe_size' for "
" segment %s of logical volume %s. " ,
2011-08-30 18:55:15 +04:00
dm_config_parent_name ( sn ) , seg - > lv - > name ) ;
2011-08-03 02:07:20 +04:00
return 0 ;
}
}
RAID: Add writemostly/writebehind support for RAID1
'lvchange' is used to alter a RAID 1 logical volume's write-mostly and
write-behind characteristics. The '--writemostly' parameter takes a
PV as an argument with an optional trailing character to specify whether
to set ('y'), unset ('n'), or toggle ('t') the value. If no trailing
character is given, it will set the flag.
Synopsis:
lvchange [--writemostly <PV>:{t|y|n}] [--writebehind <count>] vg/lv
Example:
lvchange --writemostly /dev/sdb1:y --writebehind 512 vg/raid1_lv
The last character in the 'lv_attr' field is used to show whether a device
has the WriteMostly flag set. It is signified with a 'w'. If the device
has failed, the 'p'artial flag has priority.
Example ("nosync" raid1 with mismatch_cnt and writemostly):
[~]# lvs -a --segment vg
LV VG Attr #Str Type SSize
raid1 vg Rwi---r-m 2 raid1 500.00m
[raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m
[raid1_rimage_1] vg Iwi---r-w 1 linear 500.00m
[raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m
[raid1_rmeta_1] vg ewi---r-- 1 linear 4.00m
Example (raid1 with mismatch_cnt, writemostly - but failed drive):
[~]# lvs -a --segment vg
LV VG Attr #Str Type SSize
raid1 vg rwi---r-p 2 raid1 500.00m
[raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m
[raid1_rimage_1] vg Iwi---r-p 1 linear 500.00m
[raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m
[raid1_rmeta_1] vg ewi---r-p 1 linear 4.00m
A new reportable field has been added for writebehind as well. If
write-behind has not been set or the LV is not RAID1, the field will
be blank.
Example (writebehind is set):
[~]# lvs -a -o name,attr,writebehind vg
LV Attr WBehind
lv rwi-a-r-- 512
[lv_rimage_0] iwi-aor-w
[lv_rimage_1] iwi-aor--
[lv_rmeta_0] ewi-aor--
[lv_rmeta_1] ewi-aor--
Example (writebehind is not set):
[~]# lvs -a -o name,attr,writebehind vg
LV Attr WBehind
lv rwi-a-r--
[lv_rimage_0] iwi-aor-w
[lv_rimage_1] iwi-aor--
[lv_rmeta_0] ewi-aor--
[lv_rmeta_1] ewi-aor--
2013-04-15 22:59:46 +04:00
if ( dm_config_has_node ( sn , " writebehind " ) ) {
if ( ! dm_config_get_uint32 ( sn , " writebehind " , & seg - > writebehind ) ) {
log_error ( " Couldn't read 'writebehind' for "
" segment %s of logical volume %s. " ,
dm_config_parent_name ( sn ) , seg - > lv - > name ) ;
return 0 ;
}
}
2011-08-31 19:19:19 +04:00
if ( ! dm_config_get_list ( sn , " raids " , & cv ) ) {
2011-08-03 02:07:20 +04:00
log_error ( " Couldn't find RAID array for "
" segment %s of logical volume %s. " ,
2011-08-30 18:55:15 +04:00
dm_config_parent_name ( sn ) , seg - > lv - > name ) ;
2011-08-03 02:07:20 +04:00
return 0 ;
}
2011-08-31 19:19:19 +04:00
if ( ! _raid_text_import_areas ( seg , sn , cv ) ) {
2011-08-03 02:07:20 +04:00
log_error ( " Failed to import RAID images " ) ;
return 0 ;
}
seg - > status | = RAID ;
return 1 ;
}
2011-08-19 19:59:15 +04:00
static int _raid_text_export ( const struct lv_segment * seg , struct formatter * f )
2011-08-03 02:07:20 +04:00
{
outf ( f , " device_count = %u " , seg - > area_count ) ;
if ( seg - > region_size )
outf ( f , " region_size = % " PRIu32 , seg - > region_size ) ;
if ( seg - > stripe_size )
outf ( f , " stripe_size = % " PRIu32 , seg - > stripe_size ) ;
RAID: Add writemostly/writebehind support for RAID1
'lvchange' is used to alter a RAID 1 logical volume's write-mostly and
write-behind characteristics. The '--writemostly' parameter takes a
PV as an argument with an optional trailing character to specify whether
to set ('y'), unset ('n'), or toggle ('t') the value. If no trailing
character is given, it will set the flag.
Synopsis:
lvchange [--writemostly <PV>:{t|y|n}] [--writebehind <count>] vg/lv
Example:
lvchange --writemostly /dev/sdb1:y --writebehind 512 vg/raid1_lv
The last character in the 'lv_attr' field is used to show whether a device
has the WriteMostly flag set. It is signified with a 'w'. If the device
has failed, the 'p'artial flag has priority.
Example ("nosync" raid1 with mismatch_cnt and writemostly):
[~]# lvs -a --segment vg
LV VG Attr #Str Type SSize
raid1 vg Rwi---r-m 2 raid1 500.00m
[raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m
[raid1_rimage_1] vg Iwi---r-w 1 linear 500.00m
[raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m
[raid1_rmeta_1] vg ewi---r-- 1 linear 4.00m
Example (raid1 with mismatch_cnt, writemostly - but failed drive):
[~]# lvs -a --segment vg
LV VG Attr #Str Type SSize
raid1 vg rwi---r-p 2 raid1 500.00m
[raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m
[raid1_rimage_1] vg Iwi---r-p 1 linear 500.00m
[raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m
[raid1_rmeta_1] vg ewi---r-p 1 linear 4.00m
A new reportable field has been added for writebehind as well. If
write-behind has not been set or the LV is not RAID1, the field will
be blank.
Example (writebehind is set):
[~]# lvs -a -o name,attr,writebehind vg
LV Attr WBehind
lv rwi-a-r-- 512
[lv_rimage_0] iwi-aor-w
[lv_rimage_1] iwi-aor--
[lv_rmeta_0] ewi-aor--
[lv_rmeta_1] ewi-aor--
Example (writebehind is not set):
[~]# lvs -a -o name,attr,writebehind vg
LV Attr WBehind
lv rwi-a-r--
[lv_rimage_0] iwi-aor-w
[lv_rimage_1] iwi-aor--
[lv_rmeta_0] ewi-aor--
[lv_rmeta_1] ewi-aor--
2013-04-15 22:59:46 +04:00
if ( seg - > writebehind )
outf ( f , " writebehind = % " PRIu32 , seg - > writebehind ) ;
2011-08-03 02:07:20 +04:00
return out_areas ( f , seg , " raid " ) ;
}
2011-08-19 19:59:15 +04:00
static int _raid_add_target_line ( struct dev_manager * dm __attribute__ ( ( unused ) ) ,
struct dm_pool * mem __attribute__ ( ( unused ) ) ,
struct cmd_context * cmd __attribute__ ( ( unused ) ) ,
void * * target_state __attribute__ ( ( unused ) ) ,
struct lv_segment * seg ,
const struct lv_activate_opts * laopts __attribute__ ( ( unused ) ) ,
struct dm_tree_node * node , uint64_t len ,
uint32_t * pvmove_mirror_count __attribute__ ( ( unused ) ) )
2011-08-03 02:07:20 +04:00
{
2011-08-18 23:41:21 +04:00
uint32_t s ;
2012-02-14 00:13:39 +04:00
uint64_t flags = 0 ;
2011-08-18 23:41:21 +04:00
uint64_t rebuilds = 0 ;
RAID: Add writemostly/writebehind support for RAID1
'lvchange' is used to alter a RAID 1 logical volume's write-mostly and
write-behind characteristics. The '--writemostly' parameter takes a
PV as an argument with an optional trailing character to specify whether
to set ('y'), unset ('n'), or toggle ('t') the value. If no trailing
character is given, it will set the flag.
Synopsis:
lvchange [--writemostly <PV>:{t|y|n}] [--writebehind <count>] vg/lv
Example:
lvchange --writemostly /dev/sdb1:y --writebehind 512 vg/raid1_lv
The last character in the 'lv_attr' field is used to show whether a device
has the WriteMostly flag set. It is signified with a 'w'. If the device
has failed, the 'p'artial flag has priority.
Example ("nosync" raid1 with mismatch_cnt and writemostly):
[~]# lvs -a --segment vg
LV VG Attr #Str Type SSize
raid1 vg Rwi---r-m 2 raid1 500.00m
[raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m
[raid1_rimage_1] vg Iwi---r-w 1 linear 500.00m
[raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m
[raid1_rmeta_1] vg ewi---r-- 1 linear 4.00m
Example (raid1 with mismatch_cnt, writemostly - but failed drive):
[~]# lvs -a --segment vg
LV VG Attr #Str Type SSize
raid1 vg rwi---r-p 2 raid1 500.00m
[raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m
[raid1_rimage_1] vg Iwi---r-p 1 linear 500.00m
[raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m
[raid1_rmeta_1] vg ewi---r-p 1 linear 4.00m
A new reportable field has been added for writebehind as well. If
write-behind has not been set or the LV is not RAID1, the field will
be blank.
Example (writebehind is set):
[~]# lvs -a -o name,attr,writebehind vg
LV Attr WBehind
lv rwi-a-r-- 512
[lv_rimage_0] iwi-aor-w
[lv_rimage_1] iwi-aor--
[lv_rmeta_0] ewi-aor--
[lv_rmeta_1] ewi-aor--
Example (writebehind is not set):
[~]# lvs -a -o name,attr,writebehind vg
LV Attr WBehind
lv rwi-a-r--
[lv_rimage_0] iwi-aor-w
[lv_rimage_1] iwi-aor--
[lv_rmeta_0] ewi-aor--
[lv_rmeta_1] ewi-aor--
2013-04-15 22:59:46 +04:00
uint64_t writemostly = 0 ;
struct dm_tree_node_raid_params params ;
memset ( & params , 0 , sizeof ( params ) ) ;
2011-08-18 23:41:21 +04:00
2011-08-03 02:07:20 +04:00
if ( ! seg - > area_count ) {
log_error ( INTERNAL_ERROR " _raid_add_target_line called "
" with no areas for %s. " , seg - > lv - > name ) ;
return 0 ;
}
2011-08-18 23:41:21 +04:00
/*
* 64 device restriction imposed by kernel as well . It is
* not strictly a userspace limitation .
*/
if ( seg - > area_count > 64 ) {
log_error ( " Unable to handle more than 64 devices in a "
" single RAID array " ) ;
return 0 ;
}
2011-08-03 02:07:20 +04:00
if ( ! seg - > region_size ) {
log_error ( " Missing region size for mirror segment. " ) ;
return 0 ;
}
2011-08-18 23:41:21 +04:00
for ( s = 0 ; s < seg - > area_count ; s + + )
2011-11-30 06:02:10 +04:00
if ( seg_lv ( seg , s ) - > status & LV_REBUILD )
2011-08-18 23:41:21 +04:00
rebuilds | = 1 < < s ;
RAID: Add writemostly/writebehind support for RAID1
'lvchange' is used to alter a RAID 1 logical volume's write-mostly and
write-behind characteristics. The '--writemostly' parameter takes a
PV as an argument with an optional trailing character to specify whether
to set ('y'), unset ('n'), or toggle ('t') the value. If no trailing
character is given, it will set the flag.
Synopsis:
lvchange [--writemostly <PV>:{t|y|n}] [--writebehind <count>] vg/lv
Example:
lvchange --writemostly /dev/sdb1:y --writebehind 512 vg/raid1_lv
The last character in the 'lv_attr' field is used to show whether a device
has the WriteMostly flag set. It is signified with a 'w'. If the device
has failed, the 'p'artial flag has priority.
Example ("nosync" raid1 with mismatch_cnt and writemostly):
[~]# lvs -a --segment vg
LV VG Attr #Str Type SSize
raid1 vg Rwi---r-m 2 raid1 500.00m
[raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m
[raid1_rimage_1] vg Iwi---r-w 1 linear 500.00m
[raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m
[raid1_rmeta_1] vg ewi---r-- 1 linear 4.00m
Example (raid1 with mismatch_cnt, writemostly - but failed drive):
[~]# lvs -a --segment vg
LV VG Attr #Str Type SSize
raid1 vg rwi---r-p 2 raid1 500.00m
[raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m
[raid1_rimage_1] vg Iwi---r-p 1 linear 500.00m
[raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m
[raid1_rmeta_1] vg ewi---r-p 1 linear 4.00m
A new reportable field has been added for writebehind as well. If
write-behind has not been set or the LV is not RAID1, the field will
be blank.
Example (writebehind is set):
[~]# lvs -a -o name,attr,writebehind vg
LV Attr WBehind
lv rwi-a-r-- 512
[lv_rimage_0] iwi-aor-w
[lv_rimage_1] iwi-aor--
[lv_rmeta_0] ewi-aor--
[lv_rmeta_1] ewi-aor--
Example (writebehind is not set):
[~]# lvs -a -o name,attr,writebehind vg
LV Attr WBehind
lv rwi-a-r--
[lv_rimage_0] iwi-aor-w
[lv_rimage_1] iwi-aor--
[lv_rmeta_0] ewi-aor--
[lv_rmeta_1] ewi-aor--
2013-04-15 22:59:46 +04:00
for ( s = 0 ; s < seg - > area_count ; s + + )
if ( seg_lv ( seg , s ) - > status & LV_WRITEMOSTLY )
writemostly | = 1 < < s ;
2012-02-14 00:13:39 +04:00
if ( mirror_in_sync ( ) )
flags = DM_NOSYNC ;
RAID: Add writemostly/writebehind support for RAID1
'lvchange' is used to alter a RAID 1 logical volume's write-mostly and
write-behind characteristics. The '--writemostly' parameter takes a
PV as an argument with an optional trailing character to specify whether
to set ('y'), unset ('n'), or toggle ('t') the value. If no trailing
character is given, it will set the flag.
Synopsis:
lvchange [--writemostly <PV>:{t|y|n}] [--writebehind <count>] vg/lv
Example:
lvchange --writemostly /dev/sdb1:y --writebehind 512 vg/raid1_lv
The last character in the 'lv_attr' field is used to show whether a device
has the WriteMostly flag set. It is signified with a 'w'. If the device
has failed, the 'p'artial flag has priority.
Example ("nosync" raid1 with mismatch_cnt and writemostly):
[~]# lvs -a --segment vg
LV VG Attr #Str Type SSize
raid1 vg Rwi---r-m 2 raid1 500.00m
[raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m
[raid1_rimage_1] vg Iwi---r-w 1 linear 500.00m
[raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m
[raid1_rmeta_1] vg ewi---r-- 1 linear 4.00m
Example (raid1 with mismatch_cnt, writemostly - but failed drive):
[~]# lvs -a --segment vg
LV VG Attr #Str Type SSize
raid1 vg rwi---r-p 2 raid1 500.00m
[raid1_rimage_0] vg Iwi---r-- 1 linear 500.00m
[raid1_rimage_1] vg Iwi---r-p 1 linear 500.00m
[raid1_rmeta_0] vg ewi---r-- 1 linear 4.00m
[raid1_rmeta_1] vg ewi---r-p 1 linear 4.00m
A new reportable field has been added for writebehind as well. If
write-behind has not been set or the LV is not RAID1, the field will
be blank.
Example (writebehind is set):
[~]# lvs -a -o name,attr,writebehind vg
LV Attr WBehind
lv rwi-a-r-- 512
[lv_rimage_0] iwi-aor-w
[lv_rimage_1] iwi-aor--
[lv_rmeta_0] ewi-aor--
[lv_rmeta_1] ewi-aor--
Example (writebehind is not set):
[~]# lvs -a -o name,attr,writebehind vg
LV Attr WBehind
lv rwi-a-r--
[lv_rimage_0] iwi-aor-w
[lv_rimage_1] iwi-aor--
[lv_rmeta_0] ewi-aor--
[lv_rmeta_1] ewi-aor--
2013-04-15 22:59:46 +04:00
params . raid_type = _raid_name ( seg ) ;
if ( seg - > segtype - > parity_devs ) {
/* RAID 4/5/6 */
params . mirrors = 1 ;
params . stripes = seg - > area_count - seg - > segtype - > parity_devs ;
} else if ( strcmp ( seg - > segtype - > name , " raid10 " ) ) {
/* RAID 10 only supports 2 mirrors now */
params . mirrors = 2 ;
params . stripes = seg - > area_count / 2 ;
} else {
/* RAID 1 */
params . mirrors = seg - > area_count ;
params . stripes = 1 ;
params . writebehind = seg - > writebehind ;
}
params . region_size = seg - > region_size ;
params . stripe_size = seg - > stripe_size ;
params . rebuilds = rebuilds ;
params . writemostly = writemostly ;
params . flags = flags ;
if ( ! dm_tree_node_add_raid_target_with_params ( node , len , & params ) )
2011-08-03 02:07:20 +04:00
return_0 ;
return add_areas_line ( dm , seg , node , 0u , seg - > area_count ) ;
}
static int _raid_target_status_compatible ( const char * type )
{
return ( strstr ( type , " raid " ) ! = NULL ) ;
}
static int _raid_target_percent ( void * * target_state ,
percent_t * percent ,
struct dm_pool * mem ,
struct cmd_context * cmd ,
struct lv_segment * seg , char * params ,
uint64_t * total_numerator ,
uint64_t * total_denominator )
{
int i ;
uint64_t numerator , denominator ;
char * pos = params ;
/*
* Status line :
* < raid_type > < # devs > < status_chars > < synced > / < total >
* Example :
* raid1 2 AA 1024000 / 1024000
*/
for ( i = 0 ; i < 3 ; i + + ) {
pos = strstr ( pos , " " ) ;
if ( pos )
pos + + ;
else
break ;
}
if ( ! pos | | ( sscanf ( pos , " % " PRIu64 " /% " PRIu64 " %n " ,
& numerator , & denominator , & i ) ! = 2 ) ) {
log_error ( " Failed to parse %s status fraction: %s " ,
2012-02-27 14:15:08 +04:00
( seg ) ? seg - > segtype - > name : " segment " , params ) ;
2011-08-03 02:07:20 +04:00
return 0 ;
}
* total_numerator + = numerator ;
* total_denominator + = denominator ;
if ( seg )
seg - > extents_copied = seg - > area_len * numerator / denominator ;
* percent = make_percent ( numerator , denominator ) ;
return 1 ;
}
2011-08-19 19:59:15 +04:00
static int _raid_target_present ( struct cmd_context * cmd ,
const struct lv_segment * seg __attribute__ ( ( unused ) ) ,
unsigned * attributes __attribute__ ( ( unused ) ) )
2011-08-03 02:07:20 +04:00
{
static int _raid_checked = 0 ;
static int _raid_present = 0 ;
if ( ! _raid_checked )
_raid_present = target_present ( cmd , " raid " , 1 ) ;
_raid_checked = 1 ;
return _raid_present ;
}
2011-08-19 19:59:15 +04:00
static int _raid_modules_needed ( struct dm_pool * mem ,
const struct lv_segment * seg __attribute__ ( ( unused ) ) ,
struct dm_list * modules )
2011-08-03 02:07:20 +04:00
{
if ( ! str_list_add ( mem , modules , " raid " ) ) {
log_error ( " raid module string list allocation failed " ) ;
return 0 ;
}
return 1 ;
}
static void _raid_destroy ( struct segment_type * segtype )
{
dm_free ( ( void * ) segtype ) ;
}
2011-08-11 18:00:58 +04:00
# ifdef DEVMAPPER_SUPPORT
# ifdef DMEVENTD
2011-08-11 09:00:20 +04:00
static const char * _get_raid_dso_path ( struct cmd_context * cmd )
{
2013-03-05 20:00:43 +04:00
const char * config_str = find_config_tree_str ( cmd , dmeventd_raid_library_CFG ) ;
2011-08-11 09:00:20 +04:00
return get_monitor_dso_path ( cmd , config_str ) ;
}
static int _raid_target_monitored ( struct lv_segment * seg , int * pending )
{
struct cmd_context * cmd = seg - > lv - > vg - > cmd ;
const char * dso_path = _get_raid_dso_path ( cmd ) ;
return target_registered_with_dmeventd ( cmd , dso_path , seg - > lv , pending ) ;
}
static int _raid_set_events ( struct lv_segment * seg , int evmask , int set )
{
struct cmd_context * cmd = seg - > lv - > vg - > cmd ;
const char * dso_path = _get_raid_dso_path ( cmd ) ;
return target_register_events ( cmd , dso_path , seg - > lv , evmask , set , 0 ) ;
}
static int _raid_target_monitor_events ( struct lv_segment * seg , int events )
{
return _raid_set_events ( seg , events , 1 ) ;
}
static int _raid_target_unmonitor_events ( struct lv_segment * seg , int events )
{
return _raid_set_events ( seg , events , 0 ) ;
}
2011-08-11 18:00:58 +04:00
# endif /* DEVMAPPER_SUPPORT */
# endif /* DMEVENTD */
2011-08-03 02:07:20 +04:00
static struct segtype_handler _raid_ops = {
. name = _raid_name ,
. text_import_area_count = _raid_text_import_area_count ,
. text_import = _raid_text_import ,
. text_export = _raid_text_export ,
. add_target_line = _raid_add_target_line ,
. target_status_compatible = _raid_target_status_compatible ,
2011-08-11 17:30:36 +04:00
# ifdef DEVMAPPER_SUPPORT
2011-08-03 02:07:20 +04:00
. target_percent = _raid_target_percent ,
. target_present = _raid_target_present ,
2011-08-11 17:30:36 +04:00
# ifdef DMEVENTD
2011-08-11 09:00:20 +04:00
. target_monitored = _raid_target_monitored ,
. target_monitor_events = _raid_target_monitor_events ,
. target_unmonitor_events = _raid_target_unmonitor_events ,
2011-08-11 17:30:36 +04:00
# endif /* DMEVENTD */
# endif
. modules_needed = _raid_modules_needed ,
. destroy = _raid_destroy ,
2011-08-03 02:07:20 +04:00
} ;
2011-08-24 17:41:46 +04:00
static struct segment_type * _init_raid_segtype ( struct cmd_context * cmd ,
const char * raid_type )
2011-08-03 02:07:20 +04:00
{
2011-08-12 01:32:18 +04:00
struct segment_type * segtype = dm_zalloc ( sizeof ( * segtype ) ) ;
2011-08-03 02:07:20 +04:00
2011-08-12 01:32:18 +04:00
if ( ! segtype ) {
log_error ( " Failed to allocate memory for %s segtype " ,
raid_type ) ;
2011-09-25 01:19:30 +04:00
return NULL ;
2011-08-12 01:32:18 +04:00
}
2011-08-03 02:07:20 +04:00
segtype - > cmd = cmd ;
segtype - > flags = SEG_RAID ;
2011-08-11 09:00:20 +04:00
# ifdef DEVMAPPER_SUPPORT
# ifdef DMEVENTD
if ( _get_raid_dso_path ( cmd ) )
segtype - > flags | = SEG_MONITORED ;
# endif
# endif
2011-08-03 02:07:20 +04:00
segtype - > parity_devs = strstr ( raid_type , " raid6 " ) ? 2 : 1 ;
segtype - > ops = & _raid_ops ;
segtype - > name = raid_type ;
segtype - > private = NULL ;
log_very_verbose ( " Initialised segtype: %s " , segtype - > name ) ;
return segtype ;
}
2011-08-24 17:41:46 +04:00
static struct segment_type * _init_raid1_segtype ( struct cmd_context * cmd )
2011-08-03 02:07:20 +04:00
{
struct segment_type * segtype ;
2011-08-24 17:41:46 +04:00
segtype = _init_raid_segtype ( cmd , " raid1 " ) ;
2011-08-03 02:07:20 +04:00
if ( ! segtype )
return NULL ;
segtype - > flags | = SEG_AREAS_MIRRORED ;
segtype - > parity_devs = 0 ;
return segtype ;
}
2011-08-24 17:41:46 +04:00
2012-08-25 00:34:19 +04:00
static struct segment_type * _init_raid10_segtype ( struct cmd_context * cmd )
{
struct segment_type * segtype ;
segtype = _init_raid_segtype ( cmd , " raid10 " ) ;
if ( ! segtype )
return NULL ;
segtype - > flags | = SEG_AREAS_MIRRORED ;
segtype - > parity_devs = 0 ;
return segtype ;
}
2011-08-24 17:41:46 +04:00
static struct segment_type * _init_raid4_segtype ( struct cmd_context * cmd )
2011-08-03 02:07:20 +04:00
{
2011-08-24 17:41:46 +04:00
return _init_raid_segtype ( cmd , " raid4 " ) ;
2011-08-03 02:07:20 +04:00
}
2011-08-24 17:41:46 +04:00
static struct segment_type * _init_raid5_segtype ( struct cmd_context * cmd )
2011-08-03 02:07:20 +04:00
{
2011-08-24 17:41:46 +04:00
return _init_raid_segtype ( cmd , " raid5 " ) ;
2011-08-03 02:07:20 +04:00
}
2011-08-24 17:41:46 +04:00
static struct segment_type * _init_raid5_la_segtype ( struct cmd_context * cmd )
2011-08-03 02:07:20 +04:00
{
2011-08-24 17:41:46 +04:00
return _init_raid_segtype ( cmd , " raid5_la " ) ;
2011-08-03 02:07:20 +04:00
}
2011-08-24 17:41:46 +04:00
static struct segment_type * _init_raid5_ra_segtype ( struct cmd_context * cmd )
2011-08-03 02:07:20 +04:00
{
2011-08-24 17:41:46 +04:00
return _init_raid_segtype ( cmd , " raid5_ra " ) ;
2011-08-03 02:07:20 +04:00
}
2011-08-24 17:41:46 +04:00
static struct segment_type * _init_raid5_ls_segtype ( struct cmd_context * cmd )
2011-08-03 02:07:20 +04:00
{
2011-08-24 17:41:46 +04:00
return _init_raid_segtype ( cmd , " raid5_ls " ) ;
2011-08-03 02:07:20 +04:00
}
2011-08-24 17:41:46 +04:00
static struct segment_type * _init_raid5_rs_segtype ( struct cmd_context * cmd )
2011-08-03 02:07:20 +04:00
{
2011-08-24 17:41:46 +04:00
return _init_raid_segtype ( cmd , " raid5_rs " ) ;
2011-08-03 02:07:20 +04:00
}
2011-08-24 17:41:46 +04:00
static struct segment_type * _init_raid6_segtype ( struct cmd_context * cmd )
2011-08-03 02:07:20 +04:00
{
2011-08-24 17:41:46 +04:00
return _init_raid_segtype ( cmd , " raid6 " ) ;
2011-08-03 02:07:20 +04:00
}
2011-08-24 17:41:46 +04:00
static struct segment_type * _init_raid6_zr_segtype ( struct cmd_context * cmd )
2011-08-03 02:07:20 +04:00
{
2011-08-24 17:41:46 +04:00
return _init_raid_segtype ( cmd , " raid6_zr " ) ;
2011-08-03 02:07:20 +04:00
}
2011-08-24 17:41:46 +04:00
static struct segment_type * _init_raid6_nr_segtype ( struct cmd_context * cmd )
2011-08-03 02:07:20 +04:00
{
2011-08-24 17:41:46 +04:00
return _init_raid_segtype ( cmd , " raid6_nr " ) ;
2011-08-03 02:07:20 +04:00
}
2011-08-24 17:41:46 +04:00
static struct segment_type * _init_raid6_nc_segtype ( struct cmd_context * cmd )
{
return _init_raid_segtype ( cmd , " raid6_nc " ) ;
}
# ifdef RAID_INTERNAL /* Shared */
int init_raid_segtypes ( struct cmd_context * cmd , struct segtype_library * seglib )
# else
int init_multiple_segtypes ( struct cmd_context * cmd , struct segtype_library * seglib ) ;
int init_multiple_segtypes ( struct cmd_context * cmd , struct segtype_library * seglib )
# endif
2011-08-03 02:07:20 +04:00
{
2011-08-24 17:41:46 +04:00
struct segment_type * segtype ;
unsigned i = 0 ;
struct segment_type * ( * raid_segtype_fn [ ] ) ( struct cmd_context * ) = {
_init_raid1_segtype ,
2012-08-25 00:34:19 +04:00
_init_raid10_segtype ,
2011-08-24 17:41:46 +04:00
_init_raid4_segtype ,
_init_raid5_segtype ,
_init_raid5_la_segtype ,
_init_raid5_ra_segtype ,
_init_raid5_ls_segtype ,
_init_raid5_rs_segtype ,
_init_raid6_segtype ,
_init_raid6_zr_segtype ,
_init_raid6_nr_segtype ,
_init_raid6_nc_segtype ,
NULL ,
} ;
do {
if ( ( segtype = raid_segtype_fn [ i ] ( cmd ) ) & &
2012-02-28 18:23:41 +04:00
! lvm_register_segtype ( seglib , segtype ) )
/* segtype is already destroyed */
2012-01-26 01:54:00 +04:00
return_0 ;
2011-08-24 17:41:46 +04:00
} while ( raid_segtype_fn [ + + i ] ) ;
return 1 ;
2011-08-03 02:07:20 +04:00
}