2001-11-06 22:02:26 +03:00
/*
2008-01-30 17:00:02 +03:00
* Copyright ( C ) 2001 - 2004 Sistina Software , Inc . All rights reserved .
2023-12-14 16:20:19 +03:00
* Copyright ( C ) 2004 - 2023 Red Hat , Inc . All rights reserved .
2001-11-06 22:02:26 +03:00
*
2004-03-30 23:35:44 +04:00
* This file is part of LVM2 .
2001-11-06 22:02:26 +03:00
*
2004-03-30 23:35:44 +04:00
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
2007-08-21 00:55:30 +04:00
* of the GNU Lesser General Public License v .2 .1 .
2004-03-30 23:35:44 +04:00
*
2007-08-21 00:55:30 +04:00
* You should have received a copy of the GNU Lesser General Public License
2004-03-30 23:35:44 +04:00
* along with this program ; if not , write to the Free Software Foundation ,
2016-01-21 13:49:46 +03:00
* Inc . , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA
2001-11-06 22:02:26 +03:00
*/
# include "tools.h"
2002-01-01 00:27:39 +03:00
2001-11-16 18:38:52 +03:00
# include <fcntl.h>
2001-11-06 22:02:26 +03:00
2009-07-26 06:31:41 +04:00
struct lvcreate_cmdline_params {
2009-07-26 06:32:00 +04:00
uint64_t size ;
2014-10-31 13:33:19 +03:00
uint64_t virtual_size ; /* snapshot, thin */
2021-09-24 23:16:07 +03:00
percent_type_t percent ;
2009-07-26 06:32:50 +04:00
char * * pvs ;
2014-02-25 12:43:04 +04:00
uint32_t pv_count ;
2009-07-26 06:31:41 +04:00
} ;
2015-12-01 02:32:49 +03:00
struct processing_params {
struct lvcreate_params * lp ;
struct lvcreate_cmdline_params * lcp ;
} ;
2011-09-06 04:26:42 +04:00
static int _set_vg_name ( struct lvcreate_params * lp , const char * vg_name )
{
/* Can't do anything */
if ( ! vg_name )
return 1 ;
/* If VG name already known, ensure this 2nd copy is identical */
if ( lp - > vg_name & & strcmp ( lp - > vg_name , vg_name ) ) {
log_error ( " Inconsistent volume group names "
" given: \" %s \" and \" %s \" " ,
lp - > vg_name , vg_name ) ;
return 0 ;
}
lp - > vg_name = vg_name ;
return 1 ;
}
2014-10-24 17:26:41 +04:00
static int _lvcreate_name_params ( struct cmd_context * cmd ,
int * pargc , char * * * pargv ,
struct lvcreate_params * lp )
2002-02-15 14:53:22 +03:00
{
int argc = * pargc ;
2014-10-04 01:51:54 +04:00
char * * argv = * pargv ;
2011-02-18 17:47:28 +03:00
const char * vg_name ;
2002-02-15 14:53:22 +03:00
2009-11-03 18:50:42 +03:00
lp - > lv_name = arg_str_value ( cmd , name_ARG , NULL ) ;
2014-10-08 13:14:33 +04:00
if ( ! validate_restricted_lvname_param ( cmd , & lp - > vg_name , & lp - > lv_name ) )
2014-10-07 17:41:54 +04:00
return_0 ;
2014-10-07 12:43:47 +04:00
lp - > pool_name = arg_str_value ( cmd , thinpool_ARG , NULL )
2018-06-29 14:16:08 +03:00
? : arg_str_value ( cmd , vdopool_ARG , NULL )
2014-10-03 21:04:45 +04:00
? : arg_str_value ( cmd , cachepool_ARG , NULL ) ;
2014-10-07 12:43:47 +04:00
if ( ! validate_lvname_param ( cmd , & lp - > vg_name , & lp - > pool_name ) )
2014-10-03 21:04:45 +04:00
return_0 ;
2011-09-06 04:26:42 +04:00
2014-02-05 02:50:16 +04:00
if ( seg_is_cache ( lp ) ) {
/*
2014-10-20 16:53:48 +04:00
* 2 ways of cache usage for lvcreate - H - l1 vg / lv
*
* vg / lv is existing cache pool :
* cached LV is created using this cache pool
* vg / lv is not cache pool so it is cache origin
* origin is cached with created cache pool
2014-02-05 02:50:16 +04:00
*
2014-10-04 01:51:54 +04:00
* We are looking for the vgname or cache pool or cache origin LV .
*
2014-10-20 16:53:48 +04:00
* lv name is stored in origin_name and pool_name and
* later with opened VG it ' s decided what should happen .
2014-02-05 02:50:16 +04:00
*/
if ( ! argc ) {
2014-10-07 12:43:47 +04:00
if ( ! lp - > pool_name ) {
2014-10-20 16:53:48 +04:00
log_error ( " Please specify a logical volume to act as the cache pool or origin. " ) ;
2014-10-04 01:51:54 +04:00
return 0 ;
}
} else {
vg_name = skip_dev_dir ( cmd , argv [ 0 ] , NULL ) ;
if ( ! strchr ( vg_name , ' / ' ) ) {
/* Lucky part - only vgname is here */
if ( ! _set_vg_name ( lp , vg_name ) )
return_0 ;
} else {
2014-10-20 16:53:48 +04:00
/* Assume it's cache origin for now */
2014-10-07 12:43:47 +04:00
lp - > origin_name = vg_name ;
if ( ! validate_lvname_param ( cmd , & lp - > vg_name , & lp - > origin_name ) )
2014-10-04 01:51:54 +04:00
return_0 ;
2014-10-07 12:43:47 +04:00
if ( lp - > pool_name ) {
if ( strcmp ( lp - > pool_name , lp - > origin_name ) ) {
2014-10-04 01:51:54 +04:00
log_error ( " Unsupported syntax, cannot use cache origin %s and --cachepool %s. " ,
2014-10-07 12:43:47 +04:00
lp - > origin_name , lp - > pool_name ) ;
2014-10-04 01:51:54 +04:00
return 0 ;
}
2014-10-07 12:43:47 +04:00
lp - > origin_name = NULL ;
2014-10-04 01:51:54 +04:00
} else {
/*
* Gambling here , could be cache pool or cache origin ,
* detection is possible after openning vg ,
* yet we need to parse pool args
*/
2014-10-07 12:43:47 +04:00
lp - > pool_name = lp - > origin_name ;
2014-10-04 01:51:54 +04:00
lp - > create_pool = 1 ;
}
}
( * pargv ) + + , ( * pargc ) - - ;
2014-02-05 02:50:16 +04:00
}
if ( ! lp - > vg_name & &
! _set_vg_name ( lp , extract_vgname ( cmd , NULL ) ) )
return_0 ;
if ( ! lp - > vg_name ) {
2014-10-04 01:51:54 +04:00
log_error ( " The cache pool or cache origin name should "
" include the volume group. " ) ;
return 0 ;
}
2014-10-07 12:43:47 +04:00
if ( ! lp - > pool_name ) {
2014-10-04 01:51:54 +04:00
log_error ( " Creation of cached volume and cache pool "
" in one command is not yet supported. " ) ;
2014-02-05 02:50:16 +04:00
return 0 ;
}
2016-06-22 00:24:52 +03:00
} else if ( lp - > snapshot & & ! arg_is_set ( cmd , virtualsize_ARG ) ) {
2014-10-03 21:04:45 +04:00
/* argv[0] might be [vg/]origin */
2002-02-15 14:53:22 +03:00
if ( ! argc ) {
2009-07-16 00:02:46 +04:00
log_error ( " Please specify a logical volume to act as "
" the snapshot origin. " ) ;
2002-02-15 14:53:22 +03:00
return 0 ;
}
2014-10-07 12:43:47 +04:00
lp - > origin_name = argv [ 0 ] ;
if ( ! validate_lvname_param ( cmd , & lp - > vg_name , & lp - > origin_name ) )
2014-10-03 21:04:45 +04:00
return_0 ;
2011-09-06 04:26:42 +04:00
2012-02-27 14:00:23 +04:00
if ( ! lp - > vg_name & &
! _set_vg_name ( lp , extract_vgname ( cmd , NULL ) ) )
return_0 ;
2011-11-07 15:01:53 +04:00
2011-09-06 04:26:42 +04:00
if ( ! lp - > vg_name ) {
2009-07-16 00:02:46 +04:00
log_error ( " The origin name should include the "
" volume group. " ) ;
2002-02-15 14:53:22 +03:00
return 0 ;
}
2011-09-06 04:26:42 +04:00
( * pargv ) + + , ( * pargc ) - - ;
2018-06-29 14:16:08 +03:00
} else if ( ( seg_is_pool ( lp ) | | seg_is_thin ( lp ) | | seg_is_vdo ( lp ) ) & & argc ) {
2014-10-03 21:04:45 +04:00
/* argv[0] might be [/dev.../]vg or [/dev../]vg/pool */
2011-09-06 04:26:42 +04:00
vg_name = skip_dev_dir ( cmd , argv [ 0 ] , NULL ) ;
2014-10-03 21:04:45 +04:00
if ( ! strchr ( vg_name , ' / ' ) ) {
2014-10-31 01:36:07 +03:00
if ( lp - > snapshot & & arg_is_set ( cmd , virtualsize_ARG ) )
lp - > snapshot = 0 ; /* Sparse volume via thin-pool */
2011-09-06 04:26:42 +04:00
if ( ! _set_vg_name ( lp , vg_name ) )
return_0 ;
} else {
2014-10-03 21:04:45 +04:00
if ( ! validate_lvname_param ( cmd , & lp - > vg_name , & vg_name ) )
2011-09-06 04:26:42 +04:00
return_0 ;
2011-11-07 15:01:53 +04:00
2014-10-07 12:43:47 +04:00
if ( lp - > pool_name & &
( strcmp ( vg_name , lp - > pool_name ) ! = 0 ) ) {
2014-10-03 21:04:45 +04:00
log_error ( " Ambiguous %s name specified, %s and %s. " ,
2014-10-07 12:43:47 +04:00
lp - > segtype - > name , vg_name , lp - > pool_name ) ;
2014-10-03 21:04:45 +04:00
return 0 ;
}
2014-10-07 12:43:47 +04:00
lp - > pool_name = vg_name ;
2014-10-03 21:04:45 +04:00
2012-02-27 14:00:23 +04:00
if ( ! lp - > vg_name & &
! _set_vg_name ( lp , extract_vgname ( cmd , NULL ) ) )
return_0 ;
2011-11-07 15:01:53 +04:00
2011-09-06 04:26:42 +04:00
if ( ! lp - > vg_name ) {
2014-10-03 21:04:45 +04:00
log_error ( " The %s name should include the "
" volume group. " , lp - > segtype - > name ) ;
2011-09-06 04:26:42 +04:00
return 0 ;
}
}
( * pargv ) + + , ( * pargc ) - - ;
2002-02-15 14:53:22 +03:00
} else {
/*
2011-09-06 04:26:42 +04:00
* If VG not on command line , try environment default .
2002-02-15 14:53:22 +03:00
*/
if ( ! argc ) {
2011-09-06 04:26:42 +04:00
if ( ! lp - > vg_name & & ! ( lp - > vg_name = extract_vgname ( cmd , NULL ) ) ) {
2009-07-16 00:02:46 +04:00
log_error ( " Please provide a volume group name " ) ;
2002-02-15 14:53:22 +03:00
return 0 ;
}
} else {
2007-03-09 23:47:41 +03:00
vg_name = skip_dev_dir ( cmd , argv [ 0 ] , NULL ) ;
2014-10-03 21:04:45 +04:00
if ( strchr ( vg_name , ' / ' ) ) {
2002-07-17 20:04:05 +04:00
log_error ( " Volume group name expected "
" (no slash) " ) ;
return 0 ;
}
2011-09-06 04:26:42 +04:00
if ( ! _set_vg_name ( lp , vg_name ) )
return_0 ;
2002-02-15 14:53:22 +03:00
( * pargv ) + + , ( * pargc ) - - ;
}
2001-11-06 22:02:26 +03:00
}
2014-10-03 21:04:45 +04:00
/* support --name & --type {thin|cache}-pool */
if ( seg_is_pool ( lp ) & & lp - > lv_name ) {
2014-10-07 12:43:47 +04:00
if ( lp - > pool_name & & ( strcmp ( lp - > lv_name , lp - > pool_name ) ! = 0 ) ) {
2014-10-03 21:04:45 +04:00
log_error ( " Ambiguous %s name specified, %s and %s. " ,
2014-10-07 12:43:47 +04:00
lp - > segtype - > name , lp - > lv_name , lp - > pool_name ) ;
2003-04-25 02:23:24 +04:00
return 0 ;
}
2014-10-07 12:43:47 +04:00
lp - > pool_name = lp - > lv_name ;
2014-10-03 21:04:45 +04:00
lp - > lv_name = NULL ;
2003-02-03 23:09:58 +03:00
}
2014-10-07 12:43:47 +04:00
if ( lp - > pool_name & & lp - > lv_name & & ! strcmp ( lp - > pool_name , lp - > lv_name ) ) {
2014-10-03 21:04:45 +04:00
log_error ( " Logical volume name %s and pool name must be different. " ,
lp - > lv_name ) ;
return 0 ;
}
2011-09-06 04:26:42 +04:00
2014-10-03 21:04:45 +04:00
if ( ! validate_name ( lp - > vg_name ) ) {
log_error ( " Volume group name %s has invalid characters " ,
lp - > vg_name ) ;
return 0 ;
2011-09-06 04:26:42 +04:00
}
return 1 ;
}
2009-07-26 06:31:18 +04:00
/*
* Update extents parameters based on other parameters which affect the size
2011-11-10 16:43:05 +04:00
* calculation .
2014-06-09 14:08:27 +04:00
* NOTE : We must do this here because of the dm_percent_t typedef and because we
2009-07-26 06:31:18 +04:00
* need the vg .
*/
static int _update_extents_params ( struct volume_group * vg ,
2009-07-26 06:31:41 +04:00
struct lvcreate_params * lp ,
struct lvcreate_cmdline_params * lcp )
2009-07-26 06:31:18 +04:00
{
uint32_t pv_extent_count ;
2014-10-07 12:43:47 +04:00
struct logical_volume * origin_lv = NULL ;
2012-04-11 16:33:34 +04:00
uint32_t size_rest ;
uint32_t stripesize_extents ;
2013-05-29 23:43:46 +04:00
uint32_t extents ;
2022-09-05 17:22:32 +03:00
uint32_t base_calc_extents = 0 ;
2022-07-09 00:38:34 +03:00
uint32_t vdo_pool_max_extents ;
2009-07-26 06:31:18 +04:00
2009-07-26 06:32:00 +04:00
if ( lcp - > size & &
2009-07-26 06:34:09 +04:00
! ( lp - > extents = extents_from_size ( vg - > cmd , lcp - > size ,
2009-07-26 06:31:18 +04:00
vg - > extent_size ) ) )
return_0 ;
2014-10-31 13:33:19 +03:00
if ( lcp - > virtual_size & &
! ( lp - > virtual_extents = extents_from_size ( vg - > cmd , lcp - > virtual_size ,
2009-07-26 06:31:18 +04:00
vg - > extent_size ) ) )
return_0 ;
/*
2009-07-26 06:31:41 +04:00
* Create the pv list before we parse lcp - > percent - might be
2009-07-26 06:31:18 +04:00
* PERCENT_PVSs
*/
2009-07-26 06:32:50 +04:00
if ( lcp - > pv_count ) {
2009-07-26 06:31:18 +04:00
if ( ! ( lp - > pvh = create_pv_list ( vg - > cmd - > mem , vg ,
2013-06-24 13:48:08 +04:00
lcp - > pv_count , lcp - > pvs , 1 ) ) )
2009-07-26 06:31:18 +04:00
return_0 ;
} else
lp - > pvh = & vg - > pvs ;
2014-02-25 12:36:26 +04:00
switch ( lcp - > percent ) {
2009-07-26 06:31:18 +04:00
case PERCENT_VG :
2017-05-12 04:04:05 +03:00
extents = percent_of_extents ( lp - > extents , base_calc_extents = vg - > extent_count , 0 ) ;
2019-01-19 00:27:02 +03:00
if ( extents > vg - > free_count ) {
extents = vg - > free_count ;
log_print_unless_silent ( " Reducing %u%%VG to remaining free space %s in VG. " ,
lp - > extents ,
display_size ( vg - > cmd , ( uint64_t ) vg - > extent_size * extents ) ) ;
}
2009-07-26 06:31:18 +04:00
break ;
case PERCENT_FREE :
2017-05-12 04:04:05 +03:00
extents = percent_of_extents ( lp - > extents , base_calc_extents = vg - > free_count , 0 ) ;
2009-07-26 06:31:18 +04:00
break ;
case PERCENT_PVS :
2013-06-24 13:48:08 +04:00
if ( lcp - > pv_count ) {
2009-11-04 17:47:27 +03:00
pv_extent_count = pv_list_extents_free ( lp - > pvh ) ;
2017-05-12 04:04:05 +03:00
extents = percent_of_extents ( lp - > extents , base_calc_extents = pv_extent_count , 0 ) ;
2013-06-24 13:48:08 +04:00
} else
2017-05-12 04:04:05 +03:00
extents = percent_of_extents ( lp - > extents , base_calc_extents = vg - > extent_count , 0 ) ;
2009-07-26 06:31:18 +04:00
break ;
case PERCENT_LV :
2014-10-18 13:01:29 +04:00
log_error ( " Please express size as %%FREE%s, %%PVS or %%VG. " ,
( lp - > snapshot ) ? " , %ORIGIN " : " " ) ;
2009-07-26 06:31:18 +04:00
return 0 ;
2010-02-03 06:58:08 +03:00
case PERCENT_ORIGIN :
2014-10-07 12:43:47 +04:00
if ( lp - > snapshot & & lp - > origin_name & &
! ( origin_lv = find_lv ( vg , lp - > origin_name ) ) ) {
2010-02-03 06:58:08 +03:00
log_error ( " Couldn't find origin volume '%s'. " ,
2014-10-07 12:43:47 +04:00
lp - > origin_name ) ;
2010-02-03 06:58:08 +03:00
return 0 ;
}
2014-10-07 12:43:47 +04:00
if ( ! origin_lv ) {
2010-10-25 16:05:46 +04:00
log_error ( INTERNAL_ERROR " Couldn't find origin volume. " ) ;
return 0 ;
}
2014-06-17 15:33:42 +04:00
/* Add whole metadata size estimation */
2014-10-07 12:43:47 +04:00
extents = cow_max_extents ( origin_lv , lp - > chunk_size ) - origin_lv - > le_count +
2017-05-12 04:04:05 +03:00
percent_of_extents ( lp - > extents , base_calc_extents = origin_lv - > le_count , 1 ) ;
2010-02-03 06:58:08 +03:00
break ;
2009-07-26 06:31:18 +04:00
case PERCENT_NONE :
2014-02-22 04:26:01 +04:00
extents = lp - > extents ;
2009-07-26 06:31:18 +04:00
break ;
2014-02-25 12:36:26 +04:00
default :
log_error ( INTERNAL_ERROR " Unsupported percent type %u. " , lcp - > percent ) ;
return 0 ;
2009-07-26 06:31:18 +04:00
}
2011-11-05 02:43:10 +04:00
2022-07-09 00:38:34 +03:00
if ( seg_is_vdo ( lp ) ) {
2023-12-14 16:09:55 +03:00
vdo_pool_max_extents = get_vdo_pool_max_extents ( & lp - > vcp . vdo_params , vg - > extent_size ) ;
2022-07-09 00:38:34 +03:00
if ( extents > vdo_pool_max_extents ) {
if ( lcp - > percent = = PERCENT_NONE ) {
log_error ( " Can't use %s size. Maximal supported VDO POOL volume size with slab size %s is %s. " ,
display_size ( vg - > cmd , ( uint64_t ) vg - > extent_size * extents ) ,
2023-12-14 16:09:55 +03:00
display_size ( vg - > cmd , ( uint64_t ) lp - > vcp . vdo_params . slab_size_mb < < ( 20 - SECTOR_SHIFT ) ) ,
2022-07-09 00:38:34 +03:00
display_size ( vg - > cmd , ( uint64_t ) vg - > extent_size * vdo_pool_max_extents ) ) ;
return 0 ;
}
extents = vdo_pool_max_extents ;
log_verbose ( " Using maximal supported VDO POOL volume size %s (with slab size %s). " ,
display_size ( vg - > cmd , ( uint64_t ) vg - > extent_size * extents ) ,
2023-12-14 16:09:55 +03:00
display_size ( vg - > cmd , ( uint64_t ) lp - > vcp . vdo_params . slab_size_mb < < ( 20 - SECTOR_SHIFT ) ) ) ;
2022-07-09 00:38:34 +03:00
}
}
2017-05-12 04:04:05 +03:00
if ( lcp - > percent ! = PERCENT_NONE ) {
2014-02-25 02:48:23 +04:00
/* FIXME Don't do the adjustment for parallel allocation with PERCENT_ORIGIN! */
2014-02-22 04:26:01 +04:00
lp - > approx_alloc = 1 ;
2017-05-12 04:04:05 +03:00
if ( ! extents ) {
log_error ( " Calculated size of logical volume is 0 extents. Needs to be larger. " ) ;
return 0 ;
}
/* For mirrors and raid with percentages based on physical extents, convert the total number of PEs
* into the number of logical extents per image ( minimum 1 ) */
/* FIXME Handle all the supported raid layouts here based on already-known segtype. */
if ( ( lcp - > percent ! = PERCENT_ORIGIN ) & & lp - > mirrors ) {
extents / = lp - > mirrors ;
if ( ! extents )
extents = 1 ;
}
log_verbose ( " Converted % " PRIu32 " %% of %s (% " PRIu32 " ) extents into % " PRIu32 " (with mimages % " PRIu32 " and stripes % " PRIu32
" for segtype %s). " , lp - > extents , get_percent_string ( lcp - > percent ) , base_calc_extents ,
extents , lp - > mirrors , lp - > stripes , lp - > segtype - > name ) ;
2014-02-22 04:26:01 +04:00
lp - > extents = extents ;
}
2014-10-07 12:43:47 +04:00
if ( lp - > snapshot & & lp - > origin_name & & lp - > extents ) {
2013-05-31 13:00:29 +04:00
if ( ! lp - > chunk_size ) {
log_error ( INTERNAL_ERROR " Missing snapshot chunk size. " ) ;
return 0 ;
}
2014-10-07 12:43:47 +04:00
if ( ! origin_lv & & ! ( origin_lv = find_lv ( vg , lp - > origin_name ) ) ) {
2013-05-29 23:43:46 +04:00
log_error ( " Couldn't find origin volume '%s'. " ,
2014-10-07 12:43:47 +04:00
lp - > origin_name ) ;
2013-05-29 23:43:46 +04:00
return 0 ;
}
2014-10-07 12:43:47 +04:00
extents = cow_max_extents ( origin_lv , lp - > chunk_size ) ;
2013-05-29 23:43:46 +04:00
if ( extents < lp - > extents ) {
2013-05-30 19:56:02 +04:00
log_print_unless_silent ( " Reducing COW size %s down to maximum usable size %s. " ,
2013-05-29 23:43:46 +04:00
display_size ( vg - > cmd , ( uint64_t ) vg - > extent_size * lp - > extents ) ,
display_size ( vg - > cmd , ( uint64_t ) vg - > extent_size * extents ) ) ;
lp - > extents = extents ;
}
}
2012-04-11 16:33:34 +04:00
if ( ! ( stripesize_extents = lp - > stripe_size / vg - > extent_size ) )
stripesize_extents = 1 ;
if ( ( lcp - > percent ! = PERCENT_NONE ) & & lp - > stripes & &
( size_rest = lp - > extents % ( lp - > stripes * stripesize_extents ) ) & &
( vg - > free_count < lp - > extents - size_rest + ( lp - > stripes * stripesize_extents ) ) ) {
config: add silent mode
Accept -q as the short form of --quiet.
Suppress non-essential standard output if -q is given twice.
Treat log/silent in lvm.conf as equivalent to -qq.
Review all log_print messages and change some to
log_print_unless_silent.
When silent, the following commands still produce output:
dumpconfig, lvdisplay, lvmdiskscan, lvs, pvck, pvdisplay,
pvs, version, vgcfgrestore -l, vgdisplay, vgs.
[Needs checking.]
Non-essential messages are shifted from log level 4 to log level 5
for syslog and lvm2_log_fn purposes.
2012-08-25 23:35:48 +04:00
log_print_unless_silent ( " Rounding size (%d extents) down to stripe boundary "
" size (%d extents) " , lp - > extents ,
2014-10-31 12:25:05 +03:00
lp - > extents - size_rest ) ;
2012-04-11 16:33:34 +04:00
lp - > extents = lp - > extents - size_rest ;
}
2014-01-22 20:30:55 +04:00
if ( lp - > create_pool ) {
2014-10-30 15:04:06 +03:00
if ( lp - > pool_metadata_size & &
! ( lp - > pool_metadata_extents =
extents_from_size ( vg - > cmd , lp - > pool_metadata_size , vg - > extent_size ) ) )
return_0 ;
2017-03-09 18:24:28 +03:00
if ( segtype_is_thin_pool ( lp - > segtype ) | | segtype_is_thin ( lp - > segtype ) ) {
if ( ! update_thin_pool_params ( vg - > cmd , vg - > profile , vg - > extent_size ,
lp - > segtype , lp - > target_attr ,
lp - > extents ,
& lp - > pool_metadata_extents ,
2021-01-12 19:59:29 +03:00
NULL ,
& lp - > crop_metadata ,
2017-03-09 18:24:28 +03:00
& lp - > thin_chunk_size_calc_policy ,
& lp - > chunk_size ,
& lp - > discards ,
& lp - > zero_new_blocks ) )
return_0 ;
2020-01-06 17:57:08 +03:00
} else if ( segtype_is_cache_pool ( lp - > segtype ) | | segtype_is_cache ( lp - > segtype ) ) {
if ( ! update_cache_pool_params ( vg - > cmd , vg - > profile , vg - > extent_size ,
lp - > segtype , lp - > target_attr ,
lp - > extents ,
& lp - > pool_metadata_extents ,
2021-01-30 18:32:51 +03:00
NULL ,
2020-01-06 17:57:08 +03:00
& lp - > thin_chunk_size_calc_policy ,
& lp - > chunk_size ) )
return_0 ;
}
2012-03-03 00:18:25 +04:00
2016-04-07 12:12:49 +03:00
if ( lcp - > percent = = PERCENT_FREE | | lcp - > percent = = PERCENT_PVS ) {
2014-10-07 12:43:47 +04:00
if ( lp - > extents < = ( 2 * lp - > pool_metadata_extents ) ) {
2013-05-13 15:03:04 +04:00
log_error ( " Not enough space for thin pool creation. " ) ;
return 0 ;
}
/* FIXME: persistent hidden space in VG wanted */
2014-10-07 12:43:47 +04:00
lp - > extents - = ( 2 * lp - > pool_metadata_extents ) ;
2013-05-13 15:03:04 +04:00
}
2012-03-03 00:18:25 +04:00
}
2011-11-05 02:43:10 +04:00
2017-05-12 15:16:10 +03:00
if ( ( lcp - > percent ! = PERCENT_NONE ) & & ! lp - > extents ) {
2017-05-12 04:04:05 +03:00
log_error ( " Adjusted size of logical volume is 0 extents. Needs to be larger. " ) ;
return 0 ;
}
2009-07-26 06:31:18 +04:00
return 1 ;
}
2014-09-29 00:31:30 +04:00
/*
* Validate various common size arguments
*
* Note : at this place all volume types needs to be already
* identified , do not change them here .
*/
2014-10-24 17:26:41 +04:00
static int _read_size_params ( struct cmd_context * cmd ,
struct lvcreate_params * lp ,
struct lvcreate_cmdline_params * lcp )
2002-02-15 14:53:22 +03:00
{
2014-10-24 17:26:41 +04:00
if ( arg_from_list_is_negative ( cmd , " may not be negative " ,
chunksize_ARG , extents_ARG ,
mirrors_ARG ,
maxrecoveryrate_ARG ,
minrecoveryrate_ARG ,
regionsize_ARG ,
size_ARG ,
stripes_ARG , stripesize_ARG ,
virtualsize_ARG ,
- 1 ) )
return_0 ;
2001-11-06 22:02:26 +03:00
2014-10-24 17:26:41 +04:00
if ( arg_from_list_is_zero ( cmd , " may not be zero " ,
chunksize_ARG , extents_ARG ,
regionsize_ARG ,
size_ARG ,
stripes_ARG , stripesize_ARG ,
virtualsize_ARG ,
- 1 ) )
return_0 ;
2011-09-06 04:26:42 +04:00
2014-10-31 13:33:19 +03:00
lcp - > virtual_size = arg_uint64_value ( cmd , virtualsize_ARG , UINT64_C ( 0 ) ) ;
2014-09-29 00:31:30 +04:00
2016-06-22 00:24:52 +03:00
if ( arg_is_set ( cmd , extents_ARG ) ) {
if ( arg_is_set ( cmd , size_ARG ) ) {
2014-10-24 17:26:41 +04:00
log_error ( " Please specify either size or extents (not both). " ) ;
2003-09-15 19:04:39 +04:00
return 0 ;
}
2014-10-24 17:26:41 +04:00
lp - > extents = arg_uint_value ( cmd , extents_ARG , 0 ) ;
2009-07-26 06:31:41 +04:00
lcp - > percent = arg_percent_value ( cmd , extents_ARG , PERCENT_NONE ) ;
2016-06-22 00:24:52 +03:00
} else if ( arg_is_set ( cmd , size_ARG ) ) {
2014-10-24 17:26:41 +04:00
lcp - > size = arg_uint64_value ( cmd , size_ARG , UINT64_C ( 0 ) ) ;
2009-07-26 06:31:41 +04:00
lcp - > percent = PERCENT_NONE ;
2014-10-24 17:26:41 +04:00
} else if ( ! lp - > snapshot & & ! seg_is_thin_volume ( lp ) ) {
log_error ( " Please specify either size or extents. " ) ;
return 0 ;
2009-04-25 05:17:59 +04:00
}
2002-02-15 14:53:22 +03:00
return 1 ;
}
2007-09-24 17:29:49 +04:00
/*
2014-10-24 17:26:41 +04:00
* Read parameters related to mirrors
2007-09-24 17:29:49 +04:00
*/
2014-10-24 17:26:41 +04:00
static int _read_mirror_params ( struct cmd_context * cmd ,
struct lvcreate_params * lp )
2005-06-01 20:51:55 +04:00
{
2014-10-22 23:02:29 +04:00
int corelog = arg_is_set ( cmd , corelog_ARG ) ;
lp - > log_count = arg_int_value ( cmd , mirrorlog_ARG , corelog ? 0 : DEFAULT_MIRRORLOG ) ;
2007-08-02 01:01:06 +04:00
2014-10-24 17:26:41 +04:00
if ( corelog & & ( lp - > log_count ! = 0 ) ) {
log_error ( " Please use only one of --corelog or --mirrorlog. " ) ;
return 0 ;
2007-09-24 17:29:49 +04:00
}
2014-10-22 23:02:29 +04:00
log_verbose ( " Setting logging type to %s " , get_mirror_log_name ( lp - > log_count ) ) ;
2007-09-24 17:29:49 +04:00
2005-06-01 20:51:55 +04:00
return 1 ;
}
2014-10-24 17:26:41 +04:00
/*
* Read parameters related to raids
*/
static int _read_raid_params ( struct cmd_context * cmd ,
struct lvcreate_params * lp )
2011-08-03 02:07:20 +04:00
{
2016-07-20 18:20:15 +03:00
if ( seg_is_mirrored ( lp ) ) {
if ( segtype_is_raid10 ( lp - > segtype ) ) {
if ( lp - > stripes < 2 ) {
/*
* RAID10 needs at least 4 stripes
*/
2016-08-19 16:19:51 +03:00
if ( lp - > stripes_supplied ) {
log_error ( " Minimum of 2 stripes required for %s. " ,
lp - > segtype - > name ) ;
return 0 ;
}
2016-08-19 16:53:33 +03:00
log_verbose ( " Using 2 stripes for %s. " , lp - > segtype - > name ) ;
2016-07-20 18:20:15 +03:00
lp - > stripes = 2 ;
}
/*
* FIXME : _check_raid_parameters devides by 2 , which
* needs to change if we start supporting
* odd numbers of stripes with raid10
*/
lp - > stripes * = 2 ;
} else if ( lp - > stripes > 1 ) {
/*
* RAID1 does not take a stripe arg
*/
2016-08-19 16:19:51 +03:00
log_error ( " Stripes argument cannot be used with segment type, %s " ,
2016-07-20 18:20:15 +03:00
lp - > segtype - > name ) ;
2016-07-15 23:53:37 +03:00
return 0 ;
}
2016-07-13 22:44:06 +03:00
2016-08-19 16:19:51 +03:00
} else if ( seg_is_any_raid6 ( lp ) & & lp - > stripes < 3 ) {
if ( lp - > stripes_supplied ) {
log_error ( " Minimum of 3 stripes required for %s. " , lp - > segtype - > name ) ;
return 0 ;
}
2016-08-19 16:53:33 +03:00
log_verbose ( " Using 3 stripes for %s. " , lp - > segtype - > name ) ;
2016-08-19 16:19:51 +03:00
lp - > stripes = 3 ;
} else if ( lp - > stripes < 2 ) {
if ( lp - > stripes_supplied ) {
log_error ( " Minimum of 2 stripes required for %s. " , lp - > segtype - > name ) ;
return 0 ;
}
2016-08-19 16:53:33 +03:00
log_verbose ( " Using 2 stripes for %s. " , lp - > segtype - > name ) ;
2016-08-19 16:19:51 +03:00
lp - > stripes = 2 ;
}
2016-07-20 18:20:15 +03:00
if ( seg_is_raid1 ( lp ) ) {
if ( lp - > stripe_size ) {
log_error ( " Stripe size argument cannot be used with segment type, %s " ,
lp - > segtype - > name ) ;
return 0 ;
}
2016-07-30 04:05:50 +03:00
}
2012-08-08 21:32:27 +04:00
2016-06-22 00:24:52 +03:00
if ( arg_is_set ( cmd , mirrors_ARG ) & & segtype_is_raid ( lp - > segtype ) & &
2015-09-24 16:59:07 +03:00
! segtype_is_raid1 ( lp - > segtype ) & & ! segtype_is_raid10 ( lp - > segtype ) ) {
2015-04-07 15:32:25 +03:00
log_error ( " Mirror argument cannot be used with segment type, %s " ,
lp - > segtype - > name ) ;
return 0 ;
}
2016-07-27 19:17:29 +03:00
if ( seg_is_any_raid0 ( lp ) )
lp - > region_size = 0 ;
else {
2016-07-20 18:20:15 +03:00
/* Rates are recorded in kiB/sec/disk, not sectors/sec/disk */
lp - > min_recovery_rate = arg_uint_value ( cmd , minrecoveryrate_ARG , 0 ) / 2 ;
lp - > max_recovery_rate = arg_uint_value ( cmd , maxrecoveryrate_ARG , 0 ) / 2 ;
2014-10-24 17:26:41 +04:00
2016-07-20 18:20:15 +03:00
if ( lp - > min_recovery_rate > lp - > max_recovery_rate ) {
2023-11-20 19:05:59 +03:00
log_print_unless_silent ( " Minimum recovery rate cannot be higher than maximum, adjusting. " ) ;
2023-11-21 16:17:57 +03:00
lp - > max_recovery_rate = lp - > min_recovery_rate ;
2016-07-20 18:20:15 +03:00
}
2019-11-27 00:17:34 +03:00
if ( lp - > region_size < lp - > stripe_size ) {
log_print_unless_silent ( " Adjusting %s %s region size to required minimum of stripe size %s. " ,
lp - > segtype - > name , display_size ( cmd , ( uint64_t ) lp - > region_size ) ,
display_size ( cmd , ( uint64_t ) lp - > stripe_size ) ) ;
lp - > region_size = lp - > stripe_size ;
}
2011-09-06 04:26:42 +04:00
}
2024-04-17 21:40:33 +03:00
if ( arg_int_value ( cmd , raidintegrity_ARG , 0 ) ) {
lp - > raidintegrity = 1 ;
if ( arg_is_set ( cmd , raidintegrityblocksize_ARG ) )
lp - > integrity_settings . block_size = arg_int_value ( cmd , raidintegrityblocksize_ARG , 0 ) ;
if ( arg_is_set ( cmd , raidintegritymode_ARG ) ) {
if ( ! integrity_mode_set ( arg_str_value ( cmd , raidintegritymode_ARG , NULL ) , & lp - > integrity_settings ) )
return_0 ;
}
}
2014-10-24 17:26:41 +04:00
return 1 ;
}
2013-05-31 20:25:52 +04:00
2014-10-24 17:26:41 +04:00
/*
* Read parameters related to mirrors and raids
*/
static int _read_mirror_and_raid_params ( struct cmd_context * cmd ,
struct lvcreate_params * lp )
{
2016-08-12 20:14:28 +03:00
unsigned max_images ;
if ( seg_is_raid ( lp ) ) {
if ( seg_is_raid1 ( lp ) )
max_images = DEFAULT_RAID1_MAX_IMAGES ;
else {
max_images = DEFAULT_RAID_MAX_IMAGES ;
if ( seg_is_raid4 ( lp ) | |
seg_is_any_raid5 ( lp ) )
max_images - - ;
else if ( seg_is_any_raid6 ( lp ) )
max_images - = 2 ;
}
2017-03-10 20:57:06 +03:00
} else if ( seg_is_mirrored ( lp ) )
2016-08-12 20:14:28 +03:00
max_images = DEFAULT_MIRROR_MAX_IMAGES ;
2017-03-10 20:57:06 +03:00
else
max_images = MAX_STRIPES ;
2013-05-31 20:25:52 +04:00
2014-10-24 17:26:41 +04:00
/* Common mirror and raid params */
2016-06-22 00:24:52 +03:00
if ( arg_is_set ( cmd , mirrors_ARG ) ) {
2014-10-24 17:26:41 +04:00
lp - > mirrors = arg_uint_value ( cmd , mirrors_ARG , 0 ) + 1 ;
2015-09-24 16:59:07 +03:00
if ( ( lp - > mirrors > 2 ) & & segtype_is_raid10 ( lp - > segtype ) ) {
2014-10-24 17:26:41 +04:00
/*
* FIXME : When RAID10 is no longer limited to
* 2 - way mirror , ' lv_mirror_count ( ) '
* must also change for RAID10 .
*/
log_error ( " RAID10 currently supports "
" only 2-way mirroring (i.e. '-m 1') " ) ;
return 0 ;
}
if ( lp - > mirrors = = 1 ) {
if ( seg_is_mirrored ( lp ) ) {
log_error ( " --mirrors must be at least 1 with segment type %s. " , lp - > segtype - > name ) ;
return 0 ;
}
log_print_unless_silent ( " Redundant mirrors argument: default is 0 " ) ;
}
} else
/* Default to 2 mirrored areas if '--type mirror|raid1|raid10' */
lp - > mirrors = seg_is_mirrored ( lp ) ? 2 : 1 ;
2016-08-12 20:14:28 +03:00
/* FIMXE: raid10 check has to change once we support data copies and odd numbers of stripes */
if ( seg_is_raid10 ( lp ) & & lp - > mirrors * lp - > stripes > max_images ) {
log_error ( " Only up to %u stripes in %s supported currently. " ,
2016-08-30 14:04:23 +03:00
max_images / lp - > mirrors , lp - > segtype - > name ) ;
2016-08-12 20:14:28 +03:00
return 0 ;
2017-07-19 17:16:12 +03:00
}
if ( seg_is_mirrored ( lp ) ) {
2016-08-12 20:14:28 +03:00
if ( lp - > mirrors > max_images ) {
log_error ( " Only up to %u mirrors in %s supported currently. " ,
max_images , lp - > segtype - > name ) ;
return 0 ;
}
} else if ( lp - > stripes > max_images ) {
log_error ( " Only up to %u stripes in %s supported currently. " ,
2016-07-27 19:20:48 +03:00
max_images , lp - > segtype - > name ) ;
return 0 ;
}
2016-08-08 14:52:35 +03:00
if ( ( lp - > nosync = arg_is_set ( cmd , nosync_ARG ) ) & & seg_is_any_raid6 ( lp ) ) {
2017-10-26 18:25:22 +03:00
log_error ( " nosync option prohibited on RAID6. " ) ;
2016-08-08 14:52:35 +03:00
return 0 ;
}
2014-10-24 17:26:41 +04:00
if ( ! ( lp - > region_size = arg_uint_value ( cmd , regionsize_ARG , 0 ) ) & &
( ( lp - > region_size = get_default_region_size ( cmd ) ) < = 0 ) ) {
log_error ( " regionsize in configuration file is invalid. " ) ;
return 0 ;
}
if ( seg_is_mirror ( lp ) & & ! _read_mirror_params ( cmd , lp ) )
return_0 ;
if ( seg_is_raid ( lp ) & & ! _read_raid_params ( cmd , lp ) )
return_0 ;
2011-09-06 04:26:42 +04:00
return 1 ;
}
2014-10-24 17:26:41 +04:00
static int _read_cache_params ( struct cmd_context * cmd ,
struct lvcreate_params * lp )
2014-02-04 21:57:08 +04:00
{
2014-10-18 13:01:29 +04:00
if ( ! seg_is_cache ( lp ) & & ! seg_is_cache_pool ( lp ) )
2014-02-04 21:57:08 +04:00
return 1 ;
2015-07-23 16:35:12 +03:00
if ( ! get_cache_params ( cmd ,
2017-03-09 18:20:44 +03:00
& lp - > chunk_size ,
2017-02-26 22:18:37 +03:00
& lp - > cache_metadata_format ,
2015-07-23 16:35:12 +03:00
& lp - > cache_mode ,
& lp - > policy_name ,
& lp - > policy_settings ) )
2015-08-12 15:53:00 +03:00
return_0 ;
2015-07-15 12:06:40 +03:00
2014-02-04 21:57:08 +04:00
return 1 ;
}
2022-04-13 16:09:08 +03:00
static int _read_vdo_params ( struct cmd_context * cmd ,
2022-07-09 00:38:34 +03:00
struct lvcreate_params * lp ,
struct lvcreate_cmdline_params * lcp )
2022-04-13 16:09:08 +03:00
{
2023-12-14 16:20:19 +03:00
if ( ! seg_is_vdo ( lp ) & &
! lp - > pool_data_vdo )
2022-04-13 16:09:08 +03:00
return 1 ;
// prefiling settings here
2023-12-14 16:09:55 +03:00
if ( ! fill_vdo_target_params ( cmd , & lp - > vcp . vdo_params , & lp - > vdo_pool_header_size , NULL ) )
2022-04-13 16:09:08 +03:00
return_0 ;
2023-12-14 16:20:19 +03:00
if ( lp - > pool_data_vdo ) {
lp - > vcp . activate = CHANGE_AN ;
lp - > vcp . do_zero = 1 ;
lp - > vcp . do_wipe_signatures = lp - > wipe_signatures ;
lp - > vcp . force = lp - > force ;
2024-05-03 17:16:16 +03:00
lp - > vcp . yes = lp - > yes ;
2023-12-14 16:20:19 +03:00
cmd - > lvcreate_vcp = & lp - > vcp ;
}
2022-07-09 00:38:34 +03:00
if ( ( lcp - > virtual_size < = DM_VDO_LOGICAL_SIZE_MAXIMUM ) & &
( ( lcp - > virtual_size + lp - > vdo_pool_header_size ) > DM_VDO_LOGICAL_SIZE_MAXIMUM ) ) {
log_verbose ( " Dropping VDO pool header size to 0 to support maximal size %s. " ,
display_size ( cmd , DM_VDO_LOGICAL_SIZE_MAXIMUM ) ) ;
lp - > vdo_pool_header_size = 0 ;
}
2022-04-13 16:09:08 +03:00
// override with optional vdo settings
2023-12-14 16:09:55 +03:00
if ( ! get_vdo_settings ( cmd , & lp - > vcp . vdo_params , NULL ) )
2022-04-13 16:09:08 +03:00
return_0 ;
return 1 ;
}
2014-10-24 17:26:41 +04:00
static int _read_activation_params ( struct cmd_context * cmd ,
2015-07-10 14:51:15 +03:00
struct volume_group * vg ,
2014-10-24 17:26:41 +04:00
struct lvcreate_params * lp )
2011-09-06 04:26:42 +04:00
{
2014-10-31 15:36:54 +03:00
unsigned pagesize = lvm_getpagesize ( ) > > SECTOR_SHIFT ;
2011-09-06 04:26:42 +04:00
2012-02-28 18:24:57 +04:00
lp - > activate = ( activation_change_t )
2012-06-27 15:48:31 +04:00
arg_uint_value ( cmd , activate_ARG , CHANGE_AY ) ;
2011-09-06 04:26:42 +04:00
2015-01-13 17:23:03 +03:00
/* Error when full */
if ( arg_is_set ( cmd , errorwhenfull_ARG ) ) {
lp - > error_when_full = arg_uint_value ( cmd , errorwhenfull_ARG , 0 ) ;
} else
lp - > error_when_full =
seg_can_error_when_full ( lp ) & &
2023-12-08 16:04:55 +03:00
find_config_tree_bool ( cmd , activation_error_when_full_CFG , vg - > profile ) ;
2015-01-13 17:23:03 +03:00
2014-10-18 13:01:29 +04:00
/* Read ahead */
2011-09-06 04:26:42 +04:00
lp - > read_ahead = arg_uint_value ( cmd , readahead_ARG ,
cmd - > default_settings . read_ahead ) ;
if ( lp - > read_ahead ! = DM_READ_AHEAD_AUTO & &
lp - > read_ahead ! = DM_READ_AHEAD_NONE & &
lp - > read_ahead % pagesize ) {
if ( lp - > read_ahead < pagesize )
lp - > read_ahead = pagesize ;
else
lp - > read_ahead = ( lp - > read_ahead / pagesize ) * pagesize ;
log_warn ( " WARNING: Overriding readahead to %u sectors, a multiple "
2013-07-06 13:04:19 +04:00
" of %uK page size. " , lp - > read_ahead , pagesize > > 1 ) ;
2011-09-06 04:26:42 +04:00
}
2014-10-23 16:26:16 +04:00
/* Persistent minor (and major), default 'n' */
if ( ! get_and_validate_major_minor ( cmd , vg - > fid - > fmt , & lp - > major , & lp - > minor ) )
return_0 ;
2011-08-03 02:07:20 +04:00
2014-10-21 14:03:07 +04:00
if ( arg_is_set ( cmd , setactivationskip_ARG ) ) {
2013-07-10 16:06:50 +04:00
lp - > activation_skip | = ACTIVATION_SKIP_SET ;
if ( arg_int_value ( cmd , setactivationskip_ARG , 0 ) )
lp - > activation_skip | = ACTIVATION_SKIP_SET_ENABLED ;
}
2014-10-21 14:03:07 +04:00
if ( arg_is_set ( cmd , ignoreactivationskip_ARG ) )
2013-07-10 16:06:50 +04:00
lp - > activation_skip | = ACTIVATION_SKIP_IGNORE ;
Add metadata-based autoactivation property for VG and LV
The autoactivation property can be specified in lvcreate
or vgcreate for new LVs/VGs, and the property can be changed
by lvchange or vgchange for existing LVs/VGs.
--setautoactivation y|n
enables|disables autoactivation of a VG or LV.
Autoactivation is enabled by default, which is consistent with
past behavior. The disabled state is stored as a new flag
in the VG metadata, and the absence of the flag allows
autoactivation.
If autoactivation is disabled for the VG, then no LVs in the VG
will be autoactivated (the LV autoactivation property will have
no effect.) When autoactivation is enabled for the VG, then
autoactivation can be controlled on individual LVs.
The state of this property can be reported for LVs/VGs using
the "-o autoactivation" option in lvs/vgs commands, which will
report "enabled", or "" for the disabled state.
Previous versions of lvm do not recognize this property. Since
autoactivation is enabled by default, the disabled setting will
have no effect in older lvm versions. If the VG is modified by
older lvm versions, the disabled state will also be dropped from
the metadata.
The autoactivation property is an alternative to using the lvm.conf
auto_activation_volume_list, which is still applied to to VGs/LVs
in addition to the new property.
If VG or LV autoactivation is disabled either in metadata or in
auto_activation_volume_list, it will not be autoactivated.
An autoactivation command will silently skip activating an LV
when the autoactivation property is disabled.
To determine the effective autoactivation behavior for a specific
LV, multiple settings would need to be checked:
the VG autoactivation property, the LV autoactivation property,
the auto_activation_volume_list. The "activation skip" property
would also be relevant, since it applies to both normal and auto
activation.
2021-04-02 01:20:00 +03:00
if ( arg_is_set ( cmd , setautoactivation_ARG ) & & ! arg_int_value ( cmd , setautoactivation_ARG , 1 ) )
lp - > noautoactivate = 1 ;
2011-08-03 02:07:20 +04:00
return 1 ;
}
2014-10-24 17:26:41 +04:00
static int _lvcreate_params ( struct cmd_context * cmd ,
int argc , char * * argv ,
struct lvcreate_params * lp ,
struct lvcreate_cmdline_params * lcp )
2002-02-15 14:53:22 +03:00
{
2004-05-19 02:12:53 +04:00
int contiguous ;
2010-11-11 20:29:05 +03:00
struct arg_value_group_list * current_group ;
2011-08-03 02:07:20 +04:00
const char * segtype_str ;
2010-11-11 20:29:05 +03:00
const char * tag ;
2014-10-11 20:36:40 +04:00
int only_linear = 0 ;
2014-10-24 17:26:41 +04:00
int mirror_default_cfg ;
2004-05-19 02:12:53 +04:00
2010-11-11 20:29:05 +03:00
dm_list_init ( & lp - > tags ) ;
2012-11-26 14:05:30 +04:00
lp - > target_attr = ~ 0 ;
2014-10-18 13:01:34 +04:00
lp - > yes = arg_count ( cmd , yes_ARG ) ;
lp - > force = ( force_t ) arg_count ( cmd , force_ARG ) ;
2014-10-24 17:26:41 +04:00
lp - > permission = arg_uint_value ( cmd , permission_ARG ,
LVM_READ | LVM_WRITE ) ;
2002-07-17 20:04:05 +04:00
2014-10-24 17:26:41 +04:00
/*
* - - type is the top most rule
*
* Ordering of following type tests is IMPORTANT
*/
lvcreate: new cache or writecache lv with single command
To create a new cache or writecache LV with a single command:
lvcreate --type cache|writecache
-n Name -L Size --cachedevice PVfast VG [PVslow ...]
- A new main linear|striped LV is created as usual, using the
specified -n Name and -L Size, and using the optionally
specified PVslow devices.
- Then, a new cachevol LV is created internally, using PVfast
specified by the cachedevice option.
- Then, the cachevol is attached to the main LV, converting the
main LV to type cache|writecache.
Include --cachesize Size to specify the size of cache|writecache
to create from the specified --cachedevice PVs, otherwise the
entire cachedevice PV is used. The --cachedevice option can be
repeated to create the cache from multiple devices, or the
cachedevice option can contain a tag name specifying a set of PVs
to allocate the cache from.
To create a new cache or writecache LV with a single command
using an existing cachevol LV:
lvcreate --type cache|writecache
-n Name -L Size --cachevol LVfast VG [PVslow ...]
- A new main linear|striped LV is created as usual, using the
specified -n Name and -L Size, and using the optionally
specified PVslow devices.
- Then, the cachevol LVfast is attached to the main LV, converting
the main LV to type cache|writecache.
In cases where more advanced types (for the main LV or cachevol LV)
are needed, they should be created independently and then combined
with lvconvert.
Example
-------
user creates a new VG with one slow device and one fast device:
$ vgcreate vg /dev/slow1 /dev/fast1
user creates a new 8G main LV on /dev/slow1 that uses all of
/dev/fast1 as a writecache:
$ lvcreate --type writecache --cachedevice /dev/fast1
-n main -L 8G vg /dev/slow1
Example
-------
user creates a new VG with two slow devs and two fast devs:
$ vgcreate vg /dev/slow1 /dev/slow2 /dev/fast1 /dev/fast2
user creates a new 8G main LV on /dev/slow1 and /dev/slow2
that uses all of /dev/fast1 and /dev/fast2 as a writecache:
$ lvcreate --type writecache --cachedevice /dev/fast1 --cachedevice /dev/fast2
-n main -L 8G vg /dev/slow1 /dev/slow2
Example
-------
A user has several slow devices and several fast devices in their VG,
the slow devs have tag @slow, the fast devs have tag @fast.
user creates a new 8G main LV on the slow devs with a
2G writecache on the fast devs:
$ lvcreate --type writecache -n main -L 8G
--cachedevice @fast --cachesize 2G vg @slow
2020-04-10 21:17:37 +03:00
if ( lp - > ignore_type ) {
segtype_str = SEG_TYPE_NAME_STRIPED ;
} else if ( ( segtype_str = arg_str_value ( cmd , type_ARG , NULL ) ) ) {
2014-10-24 17:26:41 +04:00
lp - > type = 1 ;
if ( ! strcmp ( segtype_str , " linear " ) ) {
segtype_str = " striped " ;
only_linear = 1 ; /* User requested linear only target */
2013-02-21 01:10:04 +04:00
}
2014-10-24 17:26:41 +04:00
/* More estimations from options after shortcuts */
2014-10-31 01:38:02 +03:00
} else if ( arg_is_set ( cmd , snapshot_ARG ) & &
( arg_is_set ( cmd , virtualoriginsize_ARG ) | |
! arg_is_set ( cmd , virtualsize_ARG ) ) )
2014-10-24 17:26:41 +04:00
/* Snapshot has higher priority then thin */
2015-09-22 21:04:12 +03:00
segtype_str = SEG_TYPE_NAME_SNAPSHOT ; /* --thinpool makes thin volume */
2014-10-31 01:38:02 +03:00
else if ( arg_is_set ( cmd , cache_ARG ) | | arg_is_set ( cmd , cachepool_ARG ) )
2015-09-22 21:04:12 +03:00
segtype_str = SEG_TYPE_NAME_CACHE ;
2014-10-31 01:38:02 +03:00
else if ( arg_is_set ( cmd , thin_ARG ) | | arg_is_set ( cmd , thinpool_ARG ) )
2015-09-22 21:04:12 +03:00
segtype_str = SEG_TYPE_NAME_THIN ;
2019-01-28 22:32:42 +03:00
else if ( arg_is_set ( cmd , vdo_ARG ) | | arg_is_set ( cmd , vdopool_ARG ) )
2018-06-29 14:16:08 +03:00
segtype_str = SEG_TYPE_NAME_VDO ;
2014-10-31 01:38:02 +03:00
else if ( arg_is_set ( cmd , virtualsize_ARG ) ) {
if ( arg_is_set ( cmd , virtualoriginsize_ARG ) )
2015-09-22 21:04:12 +03:00
segtype_str = SEG_TYPE_NAME_SNAPSHOT ; /* --virtualoriginsize incompatible with pools */
2014-10-31 01:38:02 +03:00
else
segtype_str = find_config_tree_str ( cmd , global_sparse_segtype_default_CFG , NULL ) ;
} else if ( arg_uint_value ( cmd , mirrors_ARG , 0 ) ) {
2014-10-24 17:26:41 +04:00
/* Remember, '-m 0' implies stripe */
mirror_default_cfg = ( arg_uint_value ( cmd , stripes_ARG , 1 ) > 1 )
? global_raid10_segtype_default_CFG : global_mirror_segtype_default_CFG ;
segtype_str = find_config_tree_str ( cmd , mirror_default_cfg , NULL ) ;
} else
2015-09-22 21:04:12 +03:00
segtype_str = SEG_TYPE_NAME_STRIPED ;
2011-08-03 02:07:20 +04:00
2012-03-02 01:21:54 +04:00
if ( ! ( lp - > segtype = get_segtype_from_string ( cmd , segtype_str ) ) )
return_0 ;
2004-05-11 20:01:58 +04:00
2011-09-08 20:41:18 +04:00
if ( seg_unknown ( lp ) ) {
2012-03-02 01:21:54 +04:00
log_error ( " Unable to create LV with unknown segment type %s. " , segtype_str ) ;
2011-09-08 20:41:18 +04:00
return 0 ;
}
2014-10-24 17:26:41 +04:00
/* Starts basic option validation for every segment type */
2014-11-11 17:13:00 +03:00
/* FIXME Use these ARGS macros also in commands.h? */
2014-10-24 17:26:41 +04:00
/* ARGS are disjoint! sets of options */
# define LVCREATE_ARGS \
activate_ARG , \
addtag_ARG , \
alloc_ARG , \
autobackup_ARG , \
available_ARG , \
2022-01-18 21:06:21 +03:00
cachesettings_ARG , \
2014-10-24 17:26:41 +04:00
contiguous_ARG , \
2021-12-13 17:59:31 +03:00
devices_ARG , \
devicesfile_ARG , \
2014-10-24 17:26:41 +04:00
ignoreactivationskip_ARG , \
ignoremonitoring_ARG , \
2021-12-13 17:59:31 +03:00
journal_ARG , \
2017-03-03 01:02:28 +03:00
metadataprofile_ARG , \
2015-08-27 18:19:09 +03:00
monitor_ARG , \
2014-10-31 15:37:39 +03:00
mirrors_ARG , \
2014-10-24 17:26:41 +04:00
name_ARG , \
2021-12-13 17:59:31 +03:00
nohints_ARG , \
2014-10-24 17:26:41 +04:00
noudevsync_ARG , \
permission_ARG , \
2014-10-31 15:37:39 +03:00
persistent_ARG , \
readahead_ARG , \
2014-10-24 17:26:41 +04:00
setactivationskip_ARG , \
test_ARG , \
type_ARG
# define CACHE_POOL_ARGS \
2017-02-26 22:18:37 +03:00
cachemetadataformat_ARG , \
2014-10-24 17:26:41 +04:00
cachemode_ARG , \
2014-11-27 22:21:41 +03:00
cachepool_ARG , \
2022-01-18 21:06:21 +03:00
cachepolicy_ARG
2014-10-24 17:26:41 +04:00
# define MIRROR_ARGS \
corelog_ARG , \
mirrorlog_ARG
# define MIRROR_RAID_ARGS \
nosync_ARG , \
regionsize_ARG
# define PERSISTENT_ARGS \
major_ARG , \
2014-10-31 15:37:39 +03:00
minor_ARG
2014-10-24 17:26:41 +04:00
# define POOL_ARGS \
pooldatasize_ARG , \
poolmetadatasize_ARG , \
poolmetadataspare_ARG
# define RAID_ARGS \
maxrecoveryrate_ARG , \
minrecoveryrate_ARG , \
raidmaxrecoveryrate_ARG , \
2019-11-21 01:07:27 +03:00
raidminrecoveryrate_ARG , \
raidintegrity_ARG , \
raidintegritymode_ARG , \
raidintegrityblocksize_ARG
2014-10-24 17:26:41 +04:00
# define SIZE_ARGS \
extents_ARG , \
size_ARG , \
stripes_ARG , \
stripesize_ARG
# define THIN_POOL_ARGS \
discards_ARG , \
2023-12-07 21:17:35 +03:00
pooldatavdo_ARG , \
2014-10-24 17:26:41 +04:00
thinpool_ARG
2018-06-29 14:16:08 +03:00
# define VDO_POOL_ARGS \
2019-01-28 22:32:42 +03:00
vdopool_ARG , \
2018-06-29 14:16:08 +03:00
compression_ARG , \
2022-04-13 16:09:08 +03:00
deduplication_ARG , \
vdosettings_ARG
2018-06-29 14:16:08 +03:00
2014-10-24 17:26:41 +04:00
/* Cache and cache-pool segment type */
if ( seg_is_cache ( lp ) ) {
/* Only supported with --type cache, -H, --cache */
if ( arg_outside_list_is_set ( cmd , " is unsupported with cache " ,
CACHE_POOL_ARGS ,
LVCREATE_ARGS ,
PERSISTENT_ARGS ,
POOL_ARGS ,
SIZE_ARGS ,
cache_ARG ,
chunksize_ARG ,
wipesignatures_ARG , zero_ARG ,
- 1 ) )
2014-09-29 00:31:30 +04:00
return_0 ;
2014-10-24 17:26:41 +04:00
lp - > create_pool = 1 ; /* Confirmed when opened VG */
} else if ( seg_is_cache_pool ( lp ) ) {
if ( arg_outside_list_is_set ( cmd , " is unsupported with cache pools " ,
CACHE_POOL_ARGS ,
LVCREATE_ARGS ,
POOL_ARGS ,
2015-09-10 17:33:11 +03:00
extents_ARG ,
size_ARG ,
2014-10-31 01:36:07 +03:00
cache_ARG ,
2014-10-24 17:26:41 +04:00
chunksize_ARG ,
- 1 ) )
return_0 ;
if ( ! ( lp - > permission & LVM_WRITE ) ) {
log_error ( " Cannot create read-only cache pool. " ) ;
2014-09-29 00:31:30 +04:00
return 0 ;
}
2014-10-24 17:26:41 +04:00
lp - > create_pool = 1 ;
} else if ( arg_from_list_is_set ( cmd , " is supported only with cache " ,
cache_ARG , CACHE_POOL_ARGS ,
- 1 ) )
return_0 ;
/* Snapshot segment type */
if ( seg_is_snapshot ( lp ) ) {
/* Only supported with --type snapshot, -s, --snapshot */
if ( arg_outside_list_is_set ( cmd , " is unsupported with snapshots " ,
LVCREATE_ARGS ,
PERSISTENT_ARGS ,
SIZE_ARGS ,
chunksize_ARG ,
snapshot_ARG ,
thinpool_ARG ,
virtualoriginsize_ARG ,
virtualsize_ARG ,
- 1 ) )
return_0 ;
2014-11-11 17:13:00 +03:00
/* FIXME Resolve this ambiguous case with --pooldatasize */
2014-10-24 17:26:41 +04:00
if ( arg_is_set ( cmd , thinpool_ARG ) ) {
if ( lp - > type ) {
/* Unsupported with --type snapshot */
log_error ( " Snapshot segment type is incompatible with thin pools. " ) ;
return 0 ;
}
if ( arg_from_list_is_set ( cmd , " is unsupported with snapshots and --thinpool " ,
SIZE_ARGS ,
chunksize_ARG ,
virtualoriginsize_ARG ,
virtualsize_ARG ,
- 1 ) )
return_0 ;
}
2014-09-29 00:31:30 +04:00
2014-10-24 17:26:41 +04:00
/* Snapshot segment type needs size/extents */
if ( lp - > type & & ! arg_is_set ( cmd , size_ARG ) & & ! arg_is_set ( cmd , extents_ARG ) ) {
log_error ( " Snapshot segment type requires size or extents. " ) ;
2014-09-29 00:31:30 +04:00
return 0 ;
}
2013-04-02 16:53:58 +04:00
2014-10-24 17:26:41 +04:00
lp - > snapshot = 1 ; /* Free arg is snapshot origin */
} else if ( arg_from_list_is_set ( cmd , " is supported only with sparse snapshots " ,
virtualoriginsize_ARG ,
- 1 ) )
return_0 ;
/* Mirror segment type */
if ( seg_is_mirror ( lp ) ) {
if ( arg_outside_list_is_set ( cmd , " is unsupported with mirrors " ,
LVCREATE_ARGS ,
MIRROR_ARGS ,
MIRROR_RAID_ARGS ,
PERSISTENT_ARGS ,
SIZE_ARGS ,
wipesignatures_ARG , zero_ARG ,
- 1 ) )
return_0 ;
} else if ( arg_from_list_is_set ( cmd , " is supported only with mirrors " ,
MIRROR_ARGS ,
- 1 ) )
return_0 ;
2004-05-11 20:01:58 +04:00
2014-10-24 17:26:41 +04:00
/* Raid segment type */
if ( seg_is_raid ( lp ) ) {
if ( arg_outside_list_is_set ( cmd , " is unsupported with raids " ,
LVCREATE_ARGS ,
MIRROR_RAID_ARGS ,
PERSISTENT_ARGS ,
RAID_ARGS ,
SIZE_ARGS ,
wipesignatures_ARG , zero_ARG ,
- 1 ) )
return_0 ;
} else if ( arg_from_list_is_set ( cmd , " is supported only with raids " ,
RAID_ARGS ,
- 1 ) )
return_0 ;
/* Thin and thin-pool segment type */
2014-09-29 00:31:30 +04:00
if ( seg_is_thin_volume ( lp ) ) {
2014-10-24 17:26:41 +04:00
/* Only supported with --type thin, -T, --thin, -V */
if ( arg_outside_list_is_set ( cmd , " is unsupported with thins " ,
LVCREATE_ARGS ,
PERSISTENT_ARGS ,
POOL_ARGS ,
SIZE_ARGS ,
THIN_POOL_ARGS ,
2023-12-14 16:20:19 +03:00
VDO_POOL_ARGS ,
2014-10-24 17:26:41 +04:00
chunksize_ARG ,
2015-01-13 17:23:03 +03:00
errorwhenfull_ARG ,
2014-10-31 01:36:07 +03:00
snapshot_ARG ,
2014-10-24 17:26:41 +04:00
thin_ARG ,
virtualsize_ARG ,
wipesignatures_ARG , zero_ARG ,
- 1 ) )
2014-09-29 00:31:30 +04:00
return_0 ;
2014-10-24 17:26:41 +04:00
/* If size/extents given with thin, then we are also creating a thin-pool */
if ( arg_is_set ( cmd , size_ARG ) | | arg_is_set ( cmd , extents_ARG ) ) {
if ( arg_is_set ( cmd , pooldatasize_ARG ) ) {
log_error ( " Please specify either size or pooldatasize. " ) ;
2014-09-29 00:31:30 +04:00
return 0 ;
}
lp - > create_pool = 1 ;
2014-10-24 17:26:41 +04:00
} else if ( arg_from_list_is_set ( cmd , " is supported only with thin pool creation " ,
POOL_ARGS ,
SIZE_ARGS ,
chunksize_ARG ,
discards_ARG ,
2015-01-13 17:23:03 +03:00
errorwhenfull_ARG ,
2014-10-24 17:26:41 +04:00
zero_ARG ,
- 1 ) )
2014-09-29 00:31:30 +04:00
return_0 ;
2011-09-06 04:26:42 +04:00
2014-10-24 17:26:41 +04:00
if ( ! arg_is_set ( cmd , virtualsize_ARG ) ) {
/* Without virtual size could be creation of thin-pool or snapshot */
if ( lp - > create_pool ) {
if ( lp - > type ) {
log_error ( " Thin segment type requires --virtualsize. " ) ;
return 0 ;
}
log_debug_metadata ( " Switching from thin to thin pool segment type. " ) ;
2015-09-22 21:04:12 +03:00
if ( ! ( lp - > segtype = get_segtype_from_string ( cmd , SEG_TYPE_NAME_THIN_POOL ) ) )
2014-10-24 17:26:41 +04:00
return_0 ;
} else /* Parse free arg as snapshot origin */
lp - > snapshot = 1 ;
2014-10-31 01:36:07 +03:00
} else if ( arg_is_set ( cmd , snapshot_ARG ) )
lp - > snapshot = 1 ;
2014-10-24 17:26:41 +04:00
} else if ( seg_is_thin_pool ( lp ) ) {
if ( arg_outside_list_is_set ( cmd , " is unsupported with thin pools " ,
LVCREATE_ARGS ,
POOL_ARGS ,
SIZE_ARGS ,
THIN_POOL_ARGS ,
chunksize_ARG ,
zero_ARG ,
- 1 ) )
2014-09-29 00:31:30 +04:00
return_0 ;
2014-10-24 17:26:41 +04:00
if ( ! ( lp - > permission & LVM_WRITE ) ) {
log_error ( " Cannot create read-only thin pool. " ) ;
return 0 ;
}
lp - > create_pool = 1 ;
} else if ( ! lp - > snapshot & &
arg_from_list_is_set ( cmd , " is supported only with thins " ,
thin_ARG , THIN_POOL_ARGS ,
2014-09-29 00:31:30 +04:00
- 1 ) )
return_0 ;
2018-06-29 14:16:08 +03:00
else if ( seg_is_vdo ( lp ) ) {
/* Only supported with --type thin, -T, --thin, -V */
if ( arg_outside_list_is_set ( cmd , " is unsupported with VDOs " ,
LVCREATE_ARGS ,
PERSISTENT_ARGS ,
SIZE_ARGS ,
VDO_POOL_ARGS ,
vdo_ARG ,
virtualsize_ARG ,
wipesignatures_ARG , zero_ARG ,
- 1 ) )
return_0 ;
/* If size/extents given with thin, then we are also creating a thin-pool */
if ( arg_is_set ( cmd , size_ARG ) | | arg_is_set ( cmd , extents_ARG ) ) {
if ( arg_is_set ( cmd , pooldatasize_ARG ) ) {
log_error ( " Please specify either size or pooldatasize. " ) ;
return 0 ;
}
lp - > create_pool = 1 ;
} else if ( arg_from_list_is_set ( cmd , " is supported only with VDO pool creation " ,
VDO_POOL_ARGS ,
SIZE_ARGS ,
zero_ARG ,
- 1 ) )
return_0 ;
}
2005-06-01 20:51:55 +04:00
2014-10-24 17:26:41 +04:00
/* Check options shared between more segment types */
if ( ! seg_is_mirror ( lp ) & & ! seg_is_raid ( lp ) ) {
if ( arg_from_list_is_set ( cmd , " is supported only with mirrors or raids " ,
nosync_ARG ,
regionsize_ARG ,
- 1 ) )
return_0 ;
/* Let -m0 pass */
if ( arg_int_value ( cmd , mirrors_ARG , 0 ) ) {
log_error ( " --mirrors is supported only with mirrors or raids " ) ;
2013-06-24 13:55:18 +04:00
return 0 ;
}
2014-10-24 17:26:41 +04:00
}
2013-06-24 13:55:18 +04:00
2014-10-24 17:26:41 +04:00
if ( ! lp - > create_pool & & ! lp - > snapshot & &
arg_from_list_is_set ( cmd , " is supported only with pools and snapshots " ,
chunksize_ARG ,
- 1 ) )
return_0 ;
2013-06-24 13:55:18 +04:00
2018-06-29 14:16:08 +03:00
if ( ! lp - > snapshot & & ! seg_is_thin_volume ( lp ) & & ! seg_is_vdo ( lp ) & &
arg_from_list_is_set ( cmd , " is supported only with vdo, sparse snapshots and thins " ,
2014-10-24 17:26:41 +04:00
virtualsize_ARG ,
- 1 ) )
return_0 ;
2012-08-25 00:34:19 +04:00
2015-01-20 17:43:16 +03:00
if ( ! seg_can_error_when_full ( lp ) & & ! lp - > create_pool & &
arg_is_set ( cmd , errorwhenfull_ARG ) ) {
2015-01-13 17:23:03 +03:00
log_error ( " Segment type %s does not support --errorwhenfull. " , lp - > segtype - > name ) ;
return 0 ;
}
2014-10-24 17:26:41 +04:00
/* Basic segment type validation finished here */
2005-06-01 20:51:55 +04:00
2014-02-24 16:15:40 +04:00
if ( activation ( ) & & lp - > segtype - > ops - > target_present ) {
if ( ! lp - > segtype - > ops - > target_present ( cmd , NULL , & lp - > target_attr ) ) {
log_error ( " %s: Required device-mapper target(s) not detected in your kernel. " ,
lp - > segtype - > name ) ;
2012-08-25 00:34:19 +04:00
return 0 ;
}
2014-02-24 16:15:40 +04:00
2016-05-23 18:46:38 +03:00
if ( segtype_is_any_raid0 ( lp - > segtype ) & &
! ( lp - > target_attr & RAID_FEATURE_RAID0 ) ) {
log_error ( " RAID module does not support RAID0. " ) ;
2016-10-27 12:38:16 +03:00
return 0 ;
}
if ( segtype_is_raid4 ( lp - > segtype ) & &
! ( lp - > target_attr & RAID_FEATURE_RAID4 ) ) {
log_error ( " RAID module does not support RAID4. " ) ;
2016-05-23 18:46:38 +03:00
return 0 ;
}
2015-09-24 16:59:07 +03:00
if ( segtype_is_raid10 ( lp - > segtype ) & & ! ( lp - > target_attr & RAID_FEATURE_RAID10 ) ) {
2014-02-24 16:15:40 +04:00
log_error ( " RAID module does not support RAID10. " ) ;
2012-08-25 00:34:19 +04:00
return 0 ;
}
2004-05-11 20:01:58 +04:00
}
2014-10-21 22:50:26 +04:00
/* Should we zero/wipe signatures on the lv, default to 'y' */
lp - > zero = arg_int_value ( cmd , zero_ARG , 1 ) ;
2012-11-15 17:47:37 +04:00
2014-10-21 22:50:26 +04:00
if ( arg_is_set ( cmd , wipesignatures_ARG ) ) {
2013-11-06 19:05:50 +04:00
/* If -W/--wipesignatures is given on command line directly, respect it. */
2014-10-21 22:50:26 +04:00
lp - > wipe_signatures = arg_int_value ( cmd , wipesignatures_ARG , 1 ) ;
2013-11-06 19:05:50 +04:00
} else {
/*
* If - W / - - wipesignatures is not given on command line ,
2013-12-09 13:35:47 +04:00
* look at the allocation / wipe_signatures_when_zeroing_new_lvs
2013-11-06 19:05:50 +04:00
* to decide what should be done exactly .
*/
2013-12-09 13:35:47 +04:00
if ( find_config_tree_bool ( cmd , allocation_wipe_signatures_when_zeroing_new_lvs_CFG , NULL ) )
2013-11-06 19:05:50 +04:00
lp - > wipe_signatures = lp - > zero ;
else
lp - > wipe_signatures = 0 ;
}
2014-10-24 17:26:41 +04:00
if ( ! _lvcreate_name_params ( cmd , & argc , & argv , lp ) | |
! _read_size_params ( cmd , lp , lcp ) | |
2016-08-19 15:51:43 +03:00
! get_stripe_params ( cmd , lp - > segtype , & lp - > stripes , & lp - > stripe_size , & lp - > stripes_supplied , & lp - > stripe_size_supplied ) | |
2014-01-22 20:30:55 +04:00
( lp - > create_pool & &
2023-12-07 21:17:35 +03:00
! get_pool_params ( cmd , lp - > segtype , & lp - > pool_data_vdo ,
2014-10-07 12:43:47 +04:00
& lp - > pool_metadata_size , & lp - > pool_metadata_spare ,
2017-03-03 22:46:13 +03:00
& lp - > chunk_size , & lp - > discards , & lp - > zero_new_blocks ) ) | |
2014-10-24 17:26:41 +04:00
! _read_cache_params ( cmd , lp ) | |
2022-07-09 00:38:34 +03:00
! _read_vdo_params ( cmd , lp , lcp ) | |
2014-10-24 17:26:41 +04:00
! _read_mirror_and_raid_params ( cmd , lp ) )
2008-01-30 16:19:47 +03:00
return_0 ;
2001-11-12 22:28:50 +03:00
2014-10-11 20:36:40 +04:00
if ( only_linear & & lp - > stripes > 1 ) {
log_error ( " Cannot use stripes with linear type. " ) ;
return 0 ;
}
2013-05-31 13:02:52 +04:00
if ( lp - > snapshot & & ( lp - > extents | | lcp - > size ) ) {
lp - > chunk_size = arg_uint_value ( cmd , chunksize_ARG , 8 ) ;
if ( lp - > chunk_size < 8 | | lp - > chunk_size > 1024 | |
2016-06-30 19:59:44 +03:00
! is_power_of_2 ( lp - > chunk_size ) ) {
2013-05-31 13:02:52 +04:00
log_error ( " Chunk size must be a power of 2 in the "
" range 4K to 512K. " ) ;
2012-11-19 15:47:34 +04:00
return 0 ;
2011-09-06 04:26:42 +04:00
}
2013-05-31 13:02:52 +04:00
log_verbose ( " Setting chunksize to %s. " , display_size ( cmd , lp - > chunk_size ) ) ;
2011-06-01 23:21:03 +04:00
}
2013-05-31 13:02:52 +04:00
2014-10-24 17:26:41 +04:00
/* Allocation parameters */
2014-10-11 20:17:46 +04:00
contiguous = arg_int_value ( cmd , contiguous_ARG , 0 ) ;
2011-09-06 04:26:42 +04:00
lp - > alloc = contiguous ? ALLOC_CONTIGUOUS : ALLOC_INHERIT ;
2012-02-28 18:24:57 +04:00
lp - > alloc = ( alloc_policy_t ) arg_uint_value ( cmd , alloc_ARG , lp - > alloc ) ;
2002-02-15 14:53:22 +03:00
2011-09-06 04:26:42 +04:00
if ( contiguous & & ( lp - > alloc ! = ALLOC_CONTIGUOUS ) ) {
2014-10-18 13:01:29 +04:00
log_error ( " Conflicting contiguous and alloc arguments. " ) ;
2008-09-24 20:32:51 +04:00
return 0 ;
2001-11-06 22:02:26 +03:00
}
2010-11-11 20:29:05 +03:00
dm_list_iterate_items ( current_group , & cmd - > arg_value_groups ) {
if ( ! grouped_arg_is_set ( current_group - > arg_values , addtag_ARG ) )
continue ;
if ( ! ( tag = grouped_arg_str_value ( current_group - > arg_values , addtag_ARG , NULL ) ) ) {
2014-10-18 13:01:29 +04:00
log_error ( " Failed to get tag. " ) ;
2010-11-11 20:29:05 +03:00
return 0 ;
}
if ( ! str_list_add ( cmd - > mem , & lp - > tags , tag ) ) {
2014-10-18 13:01:29 +04:00
log_error ( " Unable to allocate memory for tag %s. " , tag ) ;
2010-11-11 20:29:05 +03:00
return 0 ;
}
2011-09-06 04:26:42 +04:00
}
2009-07-26 06:30:57 +04:00
2009-07-26 06:32:50 +04:00
lcp - > pv_count = argc ;
lcp - > pvs = argv ;
2001-11-06 22:02:26 +03:00
2002-02-15 14:53:22 +03:00
return 1 ;
}
2014-10-22 15:53:36 +04:00
/*
* _determine_cache_argument
* @ vg
* @ lp
*
* ' lp - > pool_name ' is set with an LV that could be either the cache_pool name
* or the origin name of the cached LV which is being created .
* This function determines which it is and sets ' lp - > origin_name ' or
* ' lp - > pool_name ' appropriately .
*/
static int _determine_cache_argument ( struct volume_group * vg ,
struct lvcreate_params * lp )
{
2014-10-26 18:17:14 +03:00
struct cmd_context * cmd = vg - > cmd ;
2014-10-22 15:53:36 +04:00
struct logical_volume * lv ;
if ( ! lp - > pool_name ) {
lp - > pool_name = lp - > lv_name ;
2014-10-24 17:26:41 +04:00
} else if ( ( lv = find_lv ( vg , lp - > pool_name ) ) & & lv_is_cache_pool ( lv ) ) {
2014-10-22 23:01:03 +04:00
if ( ! validate_lv_cache_create_pool ( lv ) )
return_0 ;
2014-10-24 17:26:41 +04:00
/* Pool exists, create cache volume */
lp - > create_pool = 0 ;
lp - > origin_name = NULL ;
} else if ( lv ) {
2017-06-09 11:59:37 +03:00
if ( arg_is_set ( cmd , cachepool_ARG ) ) {
/* Argument of --cachepool has to be a cache-pool */
log_error ( " Logical volume %s is not a cache pool. " ,
display_lvname ( lv ) ) ;
return 0 ;
}
2014-10-24 17:26:41 +04:00
/* Origin exists, create cache pool volume */
if ( ! validate_lv_cache_create_origin ( lv ) )
return_0 ;
2014-10-26 18:17:14 +03:00
if ( arg_is_set ( cmd , permission_ARG ) & &
2014-10-24 17:26:41 +04:00
( ( lp - > permission & LVM_WRITE ) ! = ( lv - > status & LVM_WRITE ) ) ) {
/* Reverting permissions on all error path is very complicated */
log_error ( " Change of volume permission is unsupported with cache conversion, use lvchange. " ) ;
return 0 ;
2014-10-22 15:53:36 +04:00
}
2014-11-11 17:13:00 +03:00
/* FIXME How to handle skip flag? */
2014-10-26 18:17:14 +03:00
if ( arg_from_list_is_set ( cmd , " is unsupported with cache conversion " ,
2015-09-10 17:33:11 +03:00
stripes_ARG ,
stripesize_ARG ,
2014-10-24 17:26:41 +04:00
setactivationskip_ARG ,
ignoreactivationskip_ARG ,
- 1 ) )
2014-11-11 17:13:00 +03:00
return_0 ; /* FIXME */
2014-10-24 17:26:41 +04:00
/* Put origin into resulting activation state first */
2018-03-05 14:54:39 +03:00
lv = ( struct logical_volume * ) lv_lock_holder ( lv ) ;
2014-10-24 17:26:41 +04:00
if ( is_change_activating ( lp - > activate ) ) {
if ( ( lp - > activate = = CHANGE_AAY ) & &
2014-10-26 18:17:14 +03:00
! lv_passes_auto_activation_filter ( cmd , lv ) ) {
2014-10-24 17:26:41 +04:00
log_verbose ( " Skipping activation of cache origin %s. " ,
display_lvname ( lv ) ) ;
return 1 ;
2018-05-31 22:20:11 +03:00
2018-06-01 18:04:54 +03:00
} else if ( vg_is_shared ( vg ) ) {
2018-06-05 21:21:28 +03:00
if ( ! lv_active_change ( cmd , lv , CHANGE_AEY ) ) {
2018-05-31 22:20:11 +03:00
log_error ( " Cannot activate cache origin %s. " ,
display_lvname ( lv ) ) ;
return 0 ;
}
2018-06-05 21:21:28 +03:00
} else if ( ! activate_lv ( cmd , lv ) ) {
2014-10-24 17:26:41 +04:00
log_error ( " Cannot activate cache origin %s. " ,
display_lvname ( lv ) ) ;
return 0 ;
}
2014-10-26 18:17:14 +03:00
} else if ( ! deactivate_lv ( cmd , lv ) ) {
2014-10-24 17:26:41 +04:00
log_error ( " Cannot deactivate activate cache origin %s. " ,
display_lvname ( lv ) ) ;
return 0 ;
}
/* lp->origin_name is already equal to lp->pool_name */
lp - > pool_name = lp - > lv_name ; /* --name is cache pool name */
/* No zeroing of an existing origin! */
lp - > zero = lp - > wipe_signatures = 0 ;
} else {
/* Cache pool and cache volume needs to be created */
lp - > origin_name = NULL ;
/* --pooldatasize is needed here */
log_error ( " Ambiguous syntax, please create --type cache-pool %s separately. " ,
lp - > pool_name ) ;
return 0 ;
2014-10-22 15:53:36 +04:00
}
return 1 ;
}
/*
* Normal snapshot or thinly - provisioned snapshot ?
*/
static int _determine_snapshot_type ( struct volume_group * vg ,
2014-10-24 17:26:41 +04:00
struct lvcreate_params * lp ,
struct lvcreate_cmdline_params * lcp )
2014-10-22 15:53:36 +04:00
{
2014-10-24 17:26:41 +04:00
struct logical_volume * origin_lv , * pool_lv = NULL ;
2014-10-22 15:53:36 +04:00
2014-10-24 17:26:41 +04:00
if ( ! ( origin_lv = find_lv ( vg , lp - > origin_name ) ) ) {
2014-10-22 15:53:36 +04:00
log_error ( " Snapshot origin LV %s not found in Volume group %s. " ,
lp - > origin_name , vg - > name ) ;
return 0 ;
}
2014-10-24 17:26:41 +04:00
if ( lp - > extents | | lcp - > size )
return 1 ; /* Size specified */
2014-10-22 15:53:36 +04:00
2014-10-24 17:26:41 +04:00
/* Check if we could make thin snapshot */
2014-10-22 15:53:36 +04:00
if ( lp - > pool_name ) {
if ( ! ( pool_lv = find_lv ( vg , lp - > pool_name ) ) ) {
log_error ( " Thin pool volume %s not found in Volume group %s. " ,
lp - > pool_name , vg - > name ) ;
return 0 ;
}
if ( ! lv_is_thin_pool ( pool_lv ) ) {
log_error ( " Logical volume %s is not a thin pool volume. " ,
display_lvname ( pool_lv ) ) ;
return 0 ;
}
2024-03-04 01:23:04 +03:00
if ( ! validate_thin_external_origin ( origin_lv , pool_lv ) )
return_0 ;
2014-10-24 17:26:41 +04:00
} else {
if ( ! lv_is_thin_volume ( origin_lv ) ) {
if ( ! seg_is_thin ( lp ) )
log_error ( " Please specify either size or extents with snapshots. " ) ;
else
log_error ( " Logical volume %s is not a thin volume. "
" Thin snapshot supports only thin origins. " ,
display_lvname ( origin_lv ) ) ;
return 0 ;
}
/* Origin thin volume without size makes thin segment */
lp - > pool_name = first_seg ( origin_lv ) - > pool_lv - > name ;
2014-10-22 15:53:36 +04:00
}
2014-10-24 17:26:41 +04:00
log_debug_metadata ( " Switching from snapshot to thin segment type. " ) ;
2015-09-22 21:04:12 +03:00
if ( ! ( lp - > segtype = get_segtype_from_string ( vg - > cmd , SEG_TYPE_NAME_THIN ) ) )
2014-10-24 17:26:41 +04:00
return_0 ;
lp - > snapshot = 0 ;
2014-10-22 15:53:36 +04:00
2014-10-24 17:26:41 +04:00
return 1 ;
}
2014-10-22 15:53:36 +04:00
2014-10-24 17:26:41 +04:00
static int _check_raid_parameters ( struct volume_group * vg ,
struct lvcreate_params * lp ,
struct lvcreate_cmdline_params * lcp )
{
2016-07-15 23:53:37 +03:00
unsigned devs = lcp - > pv_count ? : dm_list_size ( & vg - > pvs ) ;
2016-08-16 14:39:40 +03:00
uint64_t page_sectors = lvm_getpagesize ( ) > > SECTOR_SHIFT ;
2014-10-24 17:26:41 +04:00
struct cmd_context * cmd = vg - > cmd ;
2016-07-20 18:20:15 +03:00
int old_stripes = ! arg_is_set ( cmd , stripes_ARG ) & &
find_config_tree_bool ( cmd , allocation_raid_stripe_all_devices_CFG , NULL ) ;
2014-10-24 17:26:41 +04:00
2016-08-16 14:39:40 +03:00
if ( vg - > extent_size < page_sectors ) {
log_error ( " Unable to create RAID LV: requires minimum VG extent size %s " ,
display_size ( vg - > cmd , page_sectors ) ) ;
return 0 ;
}
2016-07-20 18:20:15 +03:00
/*
* If we requested the previous behaviour by setting
* " allocation/raid_stripe_all_devices = 1 " and the
* number of devices was not supplied , we can infer
* from the PVs given .
*/
if ( old_stripes & & seg_is_raid ( lp ) & & ! seg_is_raid1 ( lp ) )
lp - > stripes = devs ;
if ( seg_is_raid10 ( lp ) ) {
lp - > stripes / = lp - > mirrors ;
2016-05-23 18:46:38 +03:00
if ( lp - > stripes < 2 ) {
2016-07-20 18:20:15 +03:00
log_error ( " Unable to create RAID(1)0 LV: "
" insufficient number of devices. " ) ;
2016-05-23 18:46:38 +03:00
return 0 ;
}
2016-07-15 23:53:37 +03:00
2016-07-20 18:20:15 +03:00
} else if ( ! seg_is_mirrored ( lp ) ) {
if ( old_stripes & &
lp - > segtype - > parity_devs & &
devs > 2 * lp - > segtype - > parity_devs )
lp - > stripes - = lp - > segtype - > parity_devs ;
if ( seg_is_any_raid0 ( lp ) ) {
if ( lp - > stripes < 2 ) {
log_error ( " Segment type 'raid0' requires 2 or more stripes. " ) ;
return 0 ;
}
} else if ( lp - > stripes < = lp - > segtype - > parity_devs ) {
2016-07-15 23:53:37 +03:00
log_error ( " Number of stripes must be at least %d for %s " ,
lp - > segtype - > parity_devs + 1 ,
lp - > segtype - > name ) ;
2014-10-24 17:26:41 +04:00
return 0 ;
}
2014-10-22 15:53:36 +04:00
}
2016-07-15 23:53:37 +03:00
/* 'mirrors' defaults to 2 - not the number of PVs supplied */
2014-10-22 15:53:36 +04:00
return 1 ;
}
2011-09-06 04:26:42 +04:00
static int _check_thin_parameters ( struct volume_group * vg , struct lvcreate_params * lp ,
struct lvcreate_cmdline_params * lcp )
{
2014-10-24 17:26:41 +04:00
if ( seg_is_thin_volume ( lp ) & & lp - > snapshot ) {
2013-05-31 13:02:52 +04:00
log_error ( " Please either create snapshot or thin volume. " ) ;
2013-04-02 16:53:58 +04:00
return 0 ;
}
2014-10-24 17:26:41 +04:00
if ( ! seg_is_thin_volume ( lp ) & & ! lp - > snapshot ) {
if ( ! lp - > create_pool ) {
/* Not even creating thin pool? */
log_error ( " Please specify device size(s). " ) ;
return 0 ;
}
} else if ( ! lp - > create_pool ) {
if ( arg_from_list_is_set ( vg - > cmd , " is only available when creating thin pool " ,
2014-10-06 15:13:01 +04:00
alloc_ARG ,
chunksize_ARG ,
contiguous_ARG ,
stripes_ARG ,
zero_ARG ,
- 1 ) )
return_0 ;
2011-09-06 04:26:42 +04:00
if ( lcp - > pv_count ) {
2014-10-24 17:26:41 +04:00
log_error ( " Only specify Physical volumes when allocating thin pool. " ) ;
2011-09-06 04:26:42 +04:00
return 0 ;
}
2014-10-24 17:26:41 +04:00
}
2011-09-06 04:26:42 +04:00
2014-10-24 17:26:41 +04:00
return 1 ;
}
static int _check_pool_parameters ( struct cmd_context * cmd ,
struct volume_group * vg ,
struct lvcreate_params * lp ,
struct lvcreate_cmdline_params * lcp )
{
struct logical_volume * pool_lv ;
2011-09-06 04:26:42 +04:00
2014-10-24 17:26:41 +04:00
if ( ! lp - > create_pool & &
arg_from_list_is_set ( cmd , " is only available with pools " ,
POOL_ARGS ,
discards_ARG ,
- 1 ) )
return_0 ;
if ( ! seg_is_cache ( lp ) & &
! seg_is_thin_volume ( lp ) & &
2018-06-29 14:16:08 +03:00
! seg_is_vdo ( lp ) & &
2014-10-24 17:26:41 +04:00
! seg_is_pool ( lp ) ) {
if ( lp - > pool_name & & ! lp - > snapshot ) {
log_error ( " Segment type %s cannot use pool %s. " ,
lp - > segtype - > name , lp - > pool_name ) ;
2011-09-06 04:26:42 +04:00
return 0 ;
}
2014-10-24 17:26:41 +04:00
return 1 ; /* Pool unrelated types */
}
2013-05-31 13:02:52 +04:00
2014-10-24 17:26:41 +04:00
if ( lp - > create_pool ) {
/* Given pool name needs to follow restrictions for created LV */
if ( lp - > pool_name ) {
2017-10-23 12:36:51 +03:00
if ( ! seg_is_cache ( lp ) & & ! apply_lvname_restrictions ( lp - > pool_name ) )
2014-10-24 17:26:41 +04:00
return_0 ;
/* We could check existance only when we have vg */
if ( vg & & find_lv ( vg , lp - > pool_name ) ) {
log_error ( " Logical volume %s already exists in Volume group %s. " ,
lp - > pool_name , vg - > name ) ;
return 0 ;
}
2011-09-06 04:26:42 +04:00
}
2018-06-29 14:16:08 +03:00
if ( seg_is_pool ( lp ) | | seg_is_vdo ( lp ) ) {
2014-10-23 16:26:16 +04:00
if ( lp - > major ! = - 1 | | lp - > minor ! = - 1 ) {
log_error ( " Persistent major and minor numbers are unsupported with pools. " ) ;
return 0 ;
}
/* When creating just pool the pool_name needs to be in lv_name */
2018-06-29 14:16:08 +03:00
if ( seg_is_pool ( lp ) )
lp - > lv_name = lp - > pool_name ;
2014-10-31 15:37:39 +03:00
} else if ( vg ) {
/* FIXME: what better to do with --readahead and pools? */
if ( arg_is_set ( cmd , readahead_ARG ) ) {
log_error ( " Ambigous --readahead parameter specified. Please use either with pool or volume. " ) ;
return 0 ;
}
2014-10-23 16:26:16 +04:00
}
2014-10-31 15:37:39 +03:00
2014-10-24 17:26:41 +04:00
return 1 ;
}
/* Not creating new pool, but existing pool is needed */
if ( ! lp - > pool_name ) {
if ( lp - > snapshot )
/* Taking snapshot via 'lvcreate -T vg/origin' */
return 1 ;
log_error ( " Please specify name of existing pool. " ) ;
2011-09-06 04:26:42 +04:00
return 0 ;
}
2014-10-24 17:26:41 +04:00
if ( vg ) {
/* Validate pool has matching type */
if ( ! ( pool_lv = find_lv ( vg , lp - > pool_name ) ) ) {
log_error ( " Pool %s not found in Volume group %s. " ,
lp - > pool_name , vg - > name ) ;
2011-09-06 04:26:42 +04:00
return 0 ;
}
2014-10-24 17:26:41 +04:00
if ( seg_is_cache ( lp ) & & ! lv_is_cache_pool ( pool_lv ) ) {
log_error ( " Logical volume %s is not a cache pool. " ,
display_lvname ( pool_lv ) ) ;
2011-09-06 04:26:42 +04:00
return 0 ;
}
2014-10-24 17:26:41 +04:00
if ( seg_is_thin_volume ( lp ) & & ! lv_is_thin_pool ( pool_lv ) ) {
log_error ( " Logical volume %s is not a thin pool. " ,
display_lvname ( pool_lv ) ) ;
2011-09-06 04:26:42 +04:00
return 0 ;
}
}
return 1 ;
}
2018-06-29 14:16:08 +03:00
static int _check_vdo_parameters ( struct volume_group * vg , struct lvcreate_params * lp ,
2022-07-09 00:38:34 +03:00
struct lvcreate_cmdline_params * lcp )
2018-06-29 14:16:08 +03:00
{
2022-07-09 00:38:34 +03:00
if ( lp - > snapshot ) {
2018-06-29 14:16:08 +03:00
log_error ( " Please either create VDO or snapshot. " ) ;
return 0 ;
}
2022-07-09 00:38:34 +03:00
if ( lcp - > virtual_size > DM_VDO_LOGICAL_SIZE_MAXIMUM ) {
log_error ( " Maximal supported VDO virtual size is %s. " ,
display_size ( vg - > cmd , DM_VDO_LOGICAL_SIZE_MAXIMUM ) ) ;
return 0 ;
}
2018-06-29 14:16:08 +03:00
return 1 ;
}
2014-10-21 22:50:26 +04:00
/*
* Check zero_ARG with default value set to value of wipesignatures_ARG
* with its default set to ' n ' . So if user specifies on command line either
* - Zy or - Wy it will check for incompatible options will report error then .
*
* Catching cases like we cannot fulfill :
* lvcreate [ - an ] [ - pr ] [ - aay ] [ - ky ] [ - Zy ] [ - Wy ]
*/
static int _check_zero_parameters ( struct cmd_context * cmd , struct lvcreate_params * lp )
{
char buf [ NAME_LEN + 128 ] ;
/* -Z has different meaning for thins */
if ( seg_is_thin ( lp ) )
return 1 ;
/* If there is some problem, buffer will not be empty */
if ( dm_snprintf ( buf , sizeof ( buf ) , " %s%s%s%s%s%s%s " ,
lp - > origin_name ? " origin " : " " ,
lp - > origin_name ? : " " ,
lp - > origin_name ? " " : " " ,
! ( lp - > permission & LVM_WRITE ) ? " read-only " : " " ,
! is_change_activating ( lp - > activate ) ? " inactive " : " " ,
( lp - > activate = = CHANGE_AAY ) ? " auto activated " : " " ,
( ( lp - > activation_skip & ACTIVATION_SKIP_SET_ENABLED ) & &
! ( lp - > activation_skip & ACTIVATION_SKIP_IGNORE ) )
? " skipped from activation " : " " ) < 0 ) {
log_error ( INTERNAL_ERROR " Buffer is too small for dm_snprintf(). " ) ;
return 0 ;
}
if ( buf [ 0 ] | | ( lp - > segtype - > flags & SEG_CANNOT_BE_ZEROED ) ) {
/* Found condition that prevents zeroing */
if ( arg_int_value ( cmd , zero_ARG , arg_int_value ( cmd , wipesignatures_ARG , 0 ) ) ) {
if ( ! ( lp - > segtype - > flags & SEG_CANNOT_BE_ZEROED ) ) {
log_error ( " Cannot zero %slogical volume with option -Zy or -Wy. " , buf ) ;
return 0 ;
}
log_print_unless_silent ( " Ignoring option -Zy or -Wy for unzeroable %s volume. " ,
lp - > segtype - > name ) ;
}
lp - > zero = lp - > wipe_signatures = 0 ;
}
return 1 ;
}
RAID: Allow implicit stripe (and parity) when creating RAID LVs
There are typically 2 functions for the more advanced segment types that
deal with parameters in lvcreate.c: _get_*_params() and _check_*_params().
(Not all segment types name their functions according to this scheme.)
The former function is responsible for reading parameters before the VG
has been read. The latter is for sanity checking and possibly setting
parameters after the VG has been read.
This patch adds a _check_raid_parameters() function that will determine
if the user has specified 'stripe' or 'mirror' parameters. If not, the
proper number is computed from the list of PVs the user has supplied or
the number that are available in the VG. Now that _check_raid_parameters()
is available, we move the check for proper number of stripes from
_get_* to _check_*.
This gives the user the ability to create RAID LVs as follows:
# 5-device RAID5, 4-data, 1-parity (i.e. implicit '-i 4')
~> lvcreate --type raid5 -L 100G -n lv vg /dev/sd[abcde]1
# 5-device RAID6, 3-data, 2-parity (i.e. implicit '-i 3')
~> lvcreate --type raid6 -L 100G -n lv vg /dev/sd[abcde]1
# If 5 PVs in VG, 4-data, 1-parity RAID5
~> lvcreate --type raid5 -L 100G -n lv vg
Considerations:
This patch only affects RAID. It might also be useful to apply this to
the 'stripe' segment type. LVM RAID may include RAID0 at some point in
the future and the implicit stripes would apply there. It would be odd
to have RAID0 be able to auto-determine the stripe count while 'stripe'
could not.
The only draw-back of this patch that I can see is that there might be
less error checking. Rather than informing the user that they forgot
to supply an argument (e.g. '-i'), the value would be computed and it
may differ from what the user actually wanted. I don't see this as a
problem, because the user can check the device count after creation
and remove the LV if they have made an error.
2014-02-18 06:18:23 +04:00
2011-09-06 04:26:42 +04:00
/*
* Ensure the set of thin parameters extracted from the command line is consistent .
*/
static int _validate_internal_thin_processing ( const struct lvcreate_params * lp )
{
int r = 1 ;
/*
The final state should be one of :
2014-01-22 20:30:55 +04:00
thin create_pool snapshot origin pool
1 1 0 0 y / n - create new pool and a thin LV in it
1 0 0 0 y - create new thin LV in existing pool
0 1 0 0 y / n - create new pool only
1 0 1 1 y - create thin snapshot of existing thin LV
2011-09-06 04:26:42 +04:00
*/
2014-10-07 12:43:47 +04:00
if ( ! lp - > create_pool & & ! lp - > pool_name ) {
2011-09-06 04:26:42 +04:00
log_error ( INTERNAL_ERROR " --thinpool not identified. " ) ;
r = 0 ;
}
2014-10-24 17:26:41 +04:00
if ( ( ! lp - > origin_name & & lp - > snapshot ) | |
( lp - > origin_name & & ! lp - > snapshot & & ! seg_is_thin_volume ( lp ) ) ) {
2011-09-06 04:26:42 +04:00
log_error ( INTERNAL_ERROR " Inconsistent snapshot and origin parameters identified. " ) ;
r = 0 ;
}
2014-10-24 17:26:41 +04:00
if ( ! lp - > create_pool & & ! lp - > snapshot & & ! seg_is_thin_volume ( lp ) ) {
2011-09-06 04:26:42 +04:00
log_error ( INTERNAL_ERROR " Failed to identify what type of thin target to use. " ) ;
r = 0 ;
}
return r ;
}
2014-11-27 22:21:41 +03:00
static void _destroy_lvcreate_params ( struct lvcreate_params * lp )
{
2015-07-15 12:06:40 +03:00
if ( lp - > policy_settings ) {
dm_config_destroy ( lp - > policy_settings ) ;
lp - > policy_settings = NULL ;
}
2014-11-27 22:21:41 +03:00
}
2015-12-01 02:32:49 +03:00
static int _lvcreate_single ( struct cmd_context * cmd , const char * vg_name ,
struct volume_group * vg , struct processing_handle * handle )
2002-02-15 14:53:22 +03:00
{
2015-12-01 02:32:49 +03:00
struct processing_params * pp = ( struct processing_params * ) handle - > custom_handle ;
struct lvcreate_params * lp = pp - > lp ;
struct lvcreate_cmdline_params * lcp = pp - > lcp ;
2017-10-28 01:10:16 +03:00
struct logical_volume * spare = vg - > pool_metadata_spare_lv ;
2022-01-20 18:01:17 +03:00
struct logical_volume * lv ;
2015-12-01 02:32:49 +03:00
int ret = ECMD_FAILED ;
2002-02-15 14:53:22 +03:00
2015-12-01 02:32:49 +03:00
if ( ! _read_activation_params ( cmd , vg , lp ) )
2014-10-23 16:26:16 +04:00
goto_out ;
2014-10-24 17:26:41 +04:00
/* Resolve segment types with opened VG */
2015-12-01 02:32:49 +03:00
if ( lp - > snapshot & & lp - > origin_name & & ! _determine_snapshot_type ( vg , lp , lcp ) )
2011-09-06 04:26:42 +04:00
goto_out ;
2015-12-01 02:32:49 +03:00
if ( seg_is_cache ( lp ) & & ! _determine_cache_argument ( vg , lp ) )
2014-02-05 02:50:16 +04:00
goto_out ;
2014-10-24 17:26:41 +04:00
/* All types resolved at this point, now only validation steps */
2015-12-01 02:32:49 +03:00
if ( seg_is_raid ( lp ) & & ! _check_raid_parameters ( vg , lp , lcp ) )
RAID: Allow implicit stripe (and parity) when creating RAID LVs
There are typically 2 functions for the more advanced segment types that
deal with parameters in lvcreate.c: _get_*_params() and _check_*_params().
(Not all segment types name their functions according to this scheme.)
The former function is responsible for reading parameters before the VG
has been read. The latter is for sanity checking and possibly setting
parameters after the VG has been read.
This patch adds a _check_raid_parameters() function that will determine
if the user has specified 'stripe' or 'mirror' parameters. If not, the
proper number is computed from the list of PVs the user has supplied or
the number that are available in the VG. Now that _check_raid_parameters()
is available, we move the check for proper number of stripes from
_get_* to _check_*.
This gives the user the ability to create RAID LVs as follows:
# 5-device RAID5, 4-data, 1-parity (i.e. implicit '-i 4')
~> lvcreate --type raid5 -L 100G -n lv vg /dev/sd[abcde]1
# 5-device RAID6, 3-data, 2-parity (i.e. implicit '-i 3')
~> lvcreate --type raid6 -L 100G -n lv vg /dev/sd[abcde]1
# If 5 PVs in VG, 4-data, 1-parity RAID5
~> lvcreate --type raid5 -L 100G -n lv vg
Considerations:
This patch only affects RAID. It might also be useful to apply this to
the 'stripe' segment type. LVM RAID may include RAID0 at some point in
the future and the implicit stripes would apply there. It would be odd
to have RAID0 be able to auto-determine the stripe count while 'stripe'
could not.
The only draw-back of this patch that I can see is that there might be
less error checking. Rather than informing the user that they forgot
to supply an argument (e.g. '-i'), the value would be computed and it
may differ from what the user actually wanted. I don't see this as a
problem, because the user can check the device count after creation
and remove the LV if they have made an error.
2014-02-18 06:18:23 +04:00
goto_out ;
2015-12-01 02:32:49 +03:00
if ( seg_is_thin ( lp ) & & ! _check_thin_parameters ( vg , lp , lcp ) )
2014-10-24 17:26:41 +04:00
goto_out ;
2018-06-29 14:16:08 +03:00
if ( seg_is_vdo ( lp ) & & ! _check_vdo_parameters ( vg , lp , lcp ) )
2022-11-08 14:39:25 +03:00
goto_out ;
2018-06-29 14:16:08 +03:00
2022-07-09 00:38:34 +03:00
if ( ! _check_pool_parameters ( cmd , vg , lp , lcp ) )
goto_out ;
2014-10-21 22:50:26 +04:00
/* All types are checked */
2015-12-01 02:32:49 +03:00
if ( ! _check_zero_parameters ( cmd , lp ) )
2022-11-08 14:39:25 +03:00
goto_out ;
2014-10-21 22:50:26 +04:00
2015-12-01 02:32:49 +03:00
if ( ! _update_extents_params ( vg , lp , lcp ) )
2009-11-04 17:47:27 +03:00
goto_out ;
2009-07-26 06:31:18 +04:00
2023-01-15 23:27:37 +03:00
if ( seg_is_vdo ( lp ) & &
! check_vdo_constrains ( cmd , & ( struct vdo_pool_size_config ) {
. physical_size = ( uint64_t ) lp - > extents * vg - > extent_size ,
. virtual_size = lcp - > virtual_size ,
2023-12-14 16:09:55 +03:00
. block_map_cache_size_mb = lp - > vcp . vdo_params . block_map_cache_size_mb ,
. index_memory_size_mb = lp - > vcp . vdo_params . index_memory_size_mb } ) )
2022-11-08 14:39:25 +03:00
goto_out ;
2022-07-09 00:33:29 +03:00
2015-12-01 02:32:49 +03:00
if ( seg_is_thin ( lp ) & & ! _validate_internal_thin_processing ( lp ) )
2011-09-06 04:26:42 +04:00
goto_out ;
2018-06-29 14:16:08 +03:00
if ( lp - > create_pool & & ! seg_is_vdo ( lp ) ) {
/* TODO: VDO does not use spare LV ATM, maybe later for rescue resize ? */
2015-12-01 02:32:49 +03:00
if ( ! handle_pool_metadata_spare ( vg , lp - > pool_metadata_extents ,
lp - > pvh , lp - > pool_metadata_spare ) )
2013-06-25 15:34:31 +04:00
goto_out ;
2014-10-04 01:51:54 +04:00
log_verbose ( " Making pool %s in VG %s using segtype %s " ,
2015-12-01 02:32:49 +03:00
lp - > pool_name ? : " with generated name " , lp - > vg_name , lp - > segtype - > name ) ;
2013-06-25 15:34:31 +04:00
}
2011-09-06 04:26:42 +04:00
2015-03-05 23:00:44 +03:00
if ( vg - > lock_type & & ! strcmp ( vg - > lock_type , " sanlock " ) ) {
if ( ! handle_sanlock_lv ( cmd , vg ) ) {
log_error ( " No space for sanlock lock, extend the internal lvmlock LV. " ) ;
2016-11-25 15:46:06 +03:00
goto out ;
2015-03-05 23:00:44 +03:00
}
}
2015-12-01 02:32:49 +03:00
if ( seg_is_thin_volume ( lp ) )
2016-11-25 16:08:39 +03:00
log_verbose ( " Making thin LV %s in pool %s in VG %s%s%s using segtype %s. " ,
2015-12-01 02:32:49 +03:00
lp - > lv_name ? : " with generated name " ,
lp - > pool_name ? : " with generated name " , lp - > vg_name ,
lp - > snapshot ? " as snapshot of " : " " ,
lp - > snapshot ? lp - > origin_name : " " , lp - > segtype - > name ) ;
2011-09-06 04:26:42 +04:00
2018-06-01 18:04:54 +03:00
if ( vg_is_shared ( vg ) ) {
2018-05-31 22:20:11 +03:00
if ( cmd - > command - > command_enum = = lvcreate_thin_vol_with_thinpool_or_sparse_snapshot_CMD ) {
2017-09-14 20:20:29 +03:00
log_error ( " Use lvconvert to create thin pools and cache pools in a shared VG. " ) ;
goto out ;
}
2015-12-01 02:32:49 +03:00
lp - > needs_lockd_init = 1 ;
2017-09-14 20:20:29 +03:00
}
2015-03-05 23:00:44 +03:00
2022-01-20 18:01:17 +03:00
if ( ! ( lv = lv_create_single ( vg , lp ) ) )
2013-06-30 23:40:56 +04:00
goto_out ;
2022-01-20 18:01:17 +03:00
if ( ! lp - > lv_name )
lp - > lv_name = lv - > name ; /* Get created LV name when it was not specified */
2015-12-01 02:32:49 +03:00
ret = ECMD_PROCESSED ;
2009-11-04 17:47:27 +03:00
out :
2017-10-28 01:10:16 +03:00
if ( ret ! = ECMD_PROCESSED & & ! spare & & vg - > pool_metadata_spare_lv )
/* Remove created spare volume for failed pool creation */
if ( ! lvremove_single ( cmd , vg - > pool_metadata_spare_lv , NULL ) )
log_error ( " Removal of created spare volume failed. "
" Manual intervention required. " ) ;
2015-12-01 02:32:49 +03:00
return ret ;
}
int lvcreate ( struct cmd_context * cmd , int argc , char * * argv )
{
struct processing_handle * handle = NULL ;
struct processing_params pp ;
struct lvcreate_params lp = {
. major = - 1 ,
. minor = - 1 ,
} ;
struct lvcreate_cmdline_params lcp = { 0 } ;
int ret ;
if ( ! _lvcreate_params ( cmd , argc , argv , & lp , & lcp ) ) {
stack ;
return EINVALID_CMD_LINE ;
}
if ( ! _check_pool_parameters ( cmd , NULL , & lp , & lcp ) ) {
stack ;
return EINVALID_CMD_LINE ;
}
pp . lp = & lp ;
pp . lcp = & lcp ;
2016-05-31 13:24:05 +03:00
if ( ! ( handle = init_processing_handle ( cmd , NULL ) ) ) {
2015-12-01 02:32:49 +03:00
log_error ( " Failed to initialize processing handle. " ) ;
return ECMD_FAILED ;
}
handle - > custom_handle = & pp ;
2016-05-03 12:46:28 +03:00
ret = process_each_vg ( cmd , 0 , NULL , lp . vg_name , NULL , READ_FOR_UPDATE , 0 , handle ,
2015-12-01 02:32:49 +03:00
& _lvcreate_single ) ;
2014-11-27 22:21:41 +03:00
_destroy_lvcreate_params ( & lp ) ;
2015-12-01 02:32:49 +03:00
destroy_processing_handle ( cmd , handle ) ;
return ret ;
2001-11-06 22:02:26 +03:00
}
lvcreate: new cache or writecache lv with single command
To create a new cache or writecache LV with a single command:
lvcreate --type cache|writecache
-n Name -L Size --cachedevice PVfast VG [PVslow ...]
- A new main linear|striped LV is created as usual, using the
specified -n Name and -L Size, and using the optionally
specified PVslow devices.
- Then, a new cachevol LV is created internally, using PVfast
specified by the cachedevice option.
- Then, the cachevol is attached to the main LV, converting the
main LV to type cache|writecache.
Include --cachesize Size to specify the size of cache|writecache
to create from the specified --cachedevice PVs, otherwise the
entire cachedevice PV is used. The --cachedevice option can be
repeated to create the cache from multiple devices, or the
cachedevice option can contain a tag name specifying a set of PVs
to allocate the cache from.
To create a new cache or writecache LV with a single command
using an existing cachevol LV:
lvcreate --type cache|writecache
-n Name -L Size --cachevol LVfast VG [PVslow ...]
- A new main linear|striped LV is created as usual, using the
specified -n Name and -L Size, and using the optionally
specified PVslow devices.
- Then, the cachevol LVfast is attached to the main LV, converting
the main LV to type cache|writecache.
In cases where more advanced types (for the main LV or cachevol LV)
are needed, they should be created independently and then combined
with lvconvert.
Example
-------
user creates a new VG with one slow device and one fast device:
$ vgcreate vg /dev/slow1 /dev/fast1
user creates a new 8G main LV on /dev/slow1 that uses all of
/dev/fast1 as a writecache:
$ lvcreate --type writecache --cachedevice /dev/fast1
-n main -L 8G vg /dev/slow1
Example
-------
user creates a new VG with two slow devs and two fast devs:
$ vgcreate vg /dev/slow1 /dev/slow2 /dev/fast1 /dev/fast2
user creates a new 8G main LV on /dev/slow1 and /dev/slow2
that uses all of /dev/fast1 and /dev/fast2 as a writecache:
$ lvcreate --type writecache --cachedevice /dev/fast1 --cachedevice /dev/fast2
-n main -L 8G vg /dev/slow1 /dev/slow2
Example
-------
A user has several slow devices and several fast devices in their VG,
the slow devs have tag @slow, the fast devs have tag @fast.
user creates a new 8G main LV on the slow devs with a
2G writecache on the fast devs:
$ lvcreate --type writecache -n main -L 8G
--cachedevice @fast --cachesize 2G vg @slow
2020-04-10 21:17:37 +03:00
static int _lvcreate_and_attach_writecache_single ( struct cmd_context * cmd ,
const char * vg_name , struct volume_group * vg , struct processing_handle * handle )
{
struct processing_params * pp = ( struct processing_params * ) handle - > custom_handle ;
struct lvcreate_params * lp = pp - > lp ;
struct logical_volume * lv ;
int ret ;
ret = _lvcreate_single ( cmd , vg_name , vg , handle ) ;
if ( ret = = ECMD_FAILED )
return ret ;
if ( ! ( lv = find_lv ( vg , lp - > lv_name ) ) ) {
log_error ( " Failed to find LV %s to add writecache. " , lp - > lv_name ) ;
return ECMD_FAILED ;
}
ret = lvconvert_writecache_attach_single ( cmd , lv , handle ) ;
if ( ret = = ECMD_FAILED ) {
log_error ( " Removing new LV after failing to add writecache. " ) ;
if ( ! deactivate_lv ( cmd , lv ) )
log_error ( " Failed to deactivate new LV %s. " , display_lvname ( lv ) ) ;
2020-08-29 22:53:05 +03:00
if ( ! lv_remove_with_dependencies ( cmd , lv , DONT_PROMPT , 0 ) )
lvcreate: new cache or writecache lv with single command
To create a new cache or writecache LV with a single command:
lvcreate --type cache|writecache
-n Name -L Size --cachedevice PVfast VG [PVslow ...]
- A new main linear|striped LV is created as usual, using the
specified -n Name and -L Size, and using the optionally
specified PVslow devices.
- Then, a new cachevol LV is created internally, using PVfast
specified by the cachedevice option.
- Then, the cachevol is attached to the main LV, converting the
main LV to type cache|writecache.
Include --cachesize Size to specify the size of cache|writecache
to create from the specified --cachedevice PVs, otherwise the
entire cachedevice PV is used. The --cachedevice option can be
repeated to create the cache from multiple devices, or the
cachedevice option can contain a tag name specifying a set of PVs
to allocate the cache from.
To create a new cache or writecache LV with a single command
using an existing cachevol LV:
lvcreate --type cache|writecache
-n Name -L Size --cachevol LVfast VG [PVslow ...]
- A new main linear|striped LV is created as usual, using the
specified -n Name and -L Size, and using the optionally
specified PVslow devices.
- Then, the cachevol LVfast is attached to the main LV, converting
the main LV to type cache|writecache.
In cases where more advanced types (for the main LV or cachevol LV)
are needed, they should be created independently and then combined
with lvconvert.
Example
-------
user creates a new VG with one slow device and one fast device:
$ vgcreate vg /dev/slow1 /dev/fast1
user creates a new 8G main LV on /dev/slow1 that uses all of
/dev/fast1 as a writecache:
$ lvcreate --type writecache --cachedevice /dev/fast1
-n main -L 8G vg /dev/slow1
Example
-------
user creates a new VG with two slow devs and two fast devs:
$ vgcreate vg /dev/slow1 /dev/slow2 /dev/fast1 /dev/fast2
user creates a new 8G main LV on /dev/slow1 and /dev/slow2
that uses all of /dev/fast1 and /dev/fast2 as a writecache:
$ lvcreate --type writecache --cachedevice /dev/fast1 --cachedevice /dev/fast2
-n main -L 8G vg /dev/slow1 /dev/slow2
Example
-------
A user has several slow devices and several fast devices in their VG,
the slow devs have tag @slow, the fast devs have tag @fast.
user creates a new 8G main LV on the slow devs with a
2G writecache on the fast devs:
$ lvcreate --type writecache -n main -L 8G
--cachedevice @fast --cachesize 2G vg @slow
2020-04-10 21:17:37 +03:00
log_error ( " Failed to remove new LV %s. " , display_lvname ( lv ) ) ;
return ECMD_FAILED ;
}
return ECMD_PROCESSED ;
}
int lvcreate_and_attach_writecache_cmd ( struct cmd_context * cmd , int argc , char * * argv )
{
struct processing_handle * handle = NULL ;
struct lvcreate_params lp = {
. major = - 1 ,
. minor = - 1 ,
2022-01-18 19:33:53 +03:00
/*
* Tell lvcreate to ignore - - type since we are using lvcreate
* to create a linear LV and using lvconvert to add cache .
* ( Would be better if lvcreate code was split up so we could
* call a specific function that just created a linear / striped LV . )
*/
. ignore_type = 1 ,
lvcreate: new cache or writecache lv with single command
To create a new cache or writecache LV with a single command:
lvcreate --type cache|writecache
-n Name -L Size --cachedevice PVfast VG [PVslow ...]
- A new main linear|striped LV is created as usual, using the
specified -n Name and -L Size, and using the optionally
specified PVslow devices.
- Then, a new cachevol LV is created internally, using PVfast
specified by the cachedevice option.
- Then, the cachevol is attached to the main LV, converting the
main LV to type cache|writecache.
Include --cachesize Size to specify the size of cache|writecache
to create from the specified --cachedevice PVs, otherwise the
entire cachedevice PV is used. The --cachedevice option can be
repeated to create the cache from multiple devices, or the
cachedevice option can contain a tag name specifying a set of PVs
to allocate the cache from.
To create a new cache or writecache LV with a single command
using an existing cachevol LV:
lvcreate --type cache|writecache
-n Name -L Size --cachevol LVfast VG [PVslow ...]
- A new main linear|striped LV is created as usual, using the
specified -n Name and -L Size, and using the optionally
specified PVslow devices.
- Then, the cachevol LVfast is attached to the main LV, converting
the main LV to type cache|writecache.
In cases where more advanced types (for the main LV or cachevol LV)
are needed, they should be created independently and then combined
with lvconvert.
Example
-------
user creates a new VG with one slow device and one fast device:
$ vgcreate vg /dev/slow1 /dev/fast1
user creates a new 8G main LV on /dev/slow1 that uses all of
/dev/fast1 as a writecache:
$ lvcreate --type writecache --cachedevice /dev/fast1
-n main -L 8G vg /dev/slow1
Example
-------
user creates a new VG with two slow devs and two fast devs:
$ vgcreate vg /dev/slow1 /dev/slow2 /dev/fast1 /dev/fast2
user creates a new 8G main LV on /dev/slow1 and /dev/slow2
that uses all of /dev/fast1 and /dev/fast2 as a writecache:
$ lvcreate --type writecache --cachedevice /dev/fast1 --cachedevice /dev/fast2
-n main -L 8G vg /dev/slow1 /dev/slow2
Example
-------
A user has several slow devices and several fast devices in their VG,
the slow devs have tag @slow, the fast devs have tag @fast.
user creates a new 8G main LV on the slow devs with a
2G writecache on the fast devs:
$ lvcreate --type writecache -n main -L 8G
--cachedevice @fast --cachesize 2G vg @slow
2020-04-10 21:17:37 +03:00
} ;
struct lvcreate_cmdline_params lcp = { 0 } ;
2022-01-18 19:33:53 +03:00
struct processing_params pp = {
. lp = & lp ,
. lcp = & lcp ,
} ;
lvcreate: new cache or writecache lv with single command
To create a new cache or writecache LV with a single command:
lvcreate --type cache|writecache
-n Name -L Size --cachedevice PVfast VG [PVslow ...]
- A new main linear|striped LV is created as usual, using the
specified -n Name and -L Size, and using the optionally
specified PVslow devices.
- Then, a new cachevol LV is created internally, using PVfast
specified by the cachedevice option.
- Then, the cachevol is attached to the main LV, converting the
main LV to type cache|writecache.
Include --cachesize Size to specify the size of cache|writecache
to create from the specified --cachedevice PVs, otherwise the
entire cachedevice PV is used. The --cachedevice option can be
repeated to create the cache from multiple devices, or the
cachedevice option can contain a tag name specifying a set of PVs
to allocate the cache from.
To create a new cache or writecache LV with a single command
using an existing cachevol LV:
lvcreate --type cache|writecache
-n Name -L Size --cachevol LVfast VG [PVslow ...]
- A new main linear|striped LV is created as usual, using the
specified -n Name and -L Size, and using the optionally
specified PVslow devices.
- Then, the cachevol LVfast is attached to the main LV, converting
the main LV to type cache|writecache.
In cases where more advanced types (for the main LV or cachevol LV)
are needed, they should be created independently and then combined
with lvconvert.
Example
-------
user creates a new VG with one slow device and one fast device:
$ vgcreate vg /dev/slow1 /dev/fast1
user creates a new 8G main LV on /dev/slow1 that uses all of
/dev/fast1 as a writecache:
$ lvcreate --type writecache --cachedevice /dev/fast1
-n main -L 8G vg /dev/slow1
Example
-------
user creates a new VG with two slow devs and two fast devs:
$ vgcreate vg /dev/slow1 /dev/slow2 /dev/fast1 /dev/fast2
user creates a new 8G main LV on /dev/slow1 and /dev/slow2
that uses all of /dev/fast1 and /dev/fast2 as a writecache:
$ lvcreate --type writecache --cachedevice /dev/fast1 --cachedevice /dev/fast2
-n main -L 8G vg /dev/slow1 /dev/slow2
Example
-------
A user has several slow devices and several fast devices in their VG,
the slow devs have tag @slow, the fast devs have tag @fast.
user creates a new 8G main LV on the slow devs with a
2G writecache on the fast devs:
$ lvcreate --type writecache -n main -L 8G
--cachedevice @fast --cachesize 2G vg @slow
2020-04-10 21:17:37 +03:00
int ret ;
if ( ! _lvcreate_params ( cmd , argc , argv , & lp , & lcp ) ) {
stack ;
return EINVALID_CMD_LINE ;
}
if ( ! ( handle = init_processing_handle ( cmd , NULL ) ) ) {
log_error ( " Failed to initialize processing handle. " ) ;
return ECMD_FAILED ;
}
handle - > custom_handle = & pp ;
ret = process_each_vg ( cmd , 0 , NULL , lp . vg_name , NULL , READ_FOR_UPDATE , 0 , handle ,
& _lvcreate_and_attach_writecache_single ) ;
_destroy_lvcreate_params ( & lp ) ;
destroy_processing_handle ( cmd , handle ) ;
return ret ;
}
static int _lvcreate_and_attach_cache_single ( struct cmd_context * cmd ,
const char * vg_name , struct volume_group * vg , struct processing_handle * handle )
{
struct processing_params * pp = ( struct processing_params * ) handle - > custom_handle ;
struct lvcreate_params * lp = pp - > lp ;
struct logical_volume * lv ;
int ret ;
ret = _lvcreate_single ( cmd , vg_name , vg , handle ) ;
if ( ret = = ECMD_FAILED )
return ret ;
if ( ! ( lv = find_lv ( vg , lp - > lv_name ) ) ) {
log_error ( " Failed to find LV %s to add cache. " , lp - > lv_name ) ;
return ECMD_FAILED ;
}
ret = lvconvert_cachevol_attach_single ( cmd , lv , handle ) ;
if ( ret = = ECMD_FAILED ) {
log_error ( " Removing new LV after failing to add cache. " ) ;
if ( ! deactivate_lv ( cmd , lv ) )
log_error ( " Failed to deactivate new LV %s. " , display_lvname ( lv ) ) ;
2020-08-29 22:53:05 +03:00
if ( ! lv_remove_with_dependencies ( cmd , lv , DONT_PROMPT , 0 ) )
lvcreate: new cache or writecache lv with single command
To create a new cache or writecache LV with a single command:
lvcreate --type cache|writecache
-n Name -L Size --cachedevice PVfast VG [PVslow ...]
- A new main linear|striped LV is created as usual, using the
specified -n Name and -L Size, and using the optionally
specified PVslow devices.
- Then, a new cachevol LV is created internally, using PVfast
specified by the cachedevice option.
- Then, the cachevol is attached to the main LV, converting the
main LV to type cache|writecache.
Include --cachesize Size to specify the size of cache|writecache
to create from the specified --cachedevice PVs, otherwise the
entire cachedevice PV is used. The --cachedevice option can be
repeated to create the cache from multiple devices, or the
cachedevice option can contain a tag name specifying a set of PVs
to allocate the cache from.
To create a new cache or writecache LV with a single command
using an existing cachevol LV:
lvcreate --type cache|writecache
-n Name -L Size --cachevol LVfast VG [PVslow ...]
- A new main linear|striped LV is created as usual, using the
specified -n Name and -L Size, and using the optionally
specified PVslow devices.
- Then, the cachevol LVfast is attached to the main LV, converting
the main LV to type cache|writecache.
In cases where more advanced types (for the main LV or cachevol LV)
are needed, they should be created independently and then combined
with lvconvert.
Example
-------
user creates a new VG with one slow device and one fast device:
$ vgcreate vg /dev/slow1 /dev/fast1
user creates a new 8G main LV on /dev/slow1 that uses all of
/dev/fast1 as a writecache:
$ lvcreate --type writecache --cachedevice /dev/fast1
-n main -L 8G vg /dev/slow1
Example
-------
user creates a new VG with two slow devs and two fast devs:
$ vgcreate vg /dev/slow1 /dev/slow2 /dev/fast1 /dev/fast2
user creates a new 8G main LV on /dev/slow1 and /dev/slow2
that uses all of /dev/fast1 and /dev/fast2 as a writecache:
$ lvcreate --type writecache --cachedevice /dev/fast1 --cachedevice /dev/fast2
-n main -L 8G vg /dev/slow1 /dev/slow2
Example
-------
A user has several slow devices and several fast devices in their VG,
the slow devs have tag @slow, the fast devs have tag @fast.
user creates a new 8G main LV on the slow devs with a
2G writecache on the fast devs:
$ lvcreate --type writecache -n main -L 8G
--cachedevice @fast --cachesize 2G vg @slow
2020-04-10 21:17:37 +03:00
log_error ( " Failed to remove new LV %s. " , display_lvname ( lv ) ) ;
return ECMD_FAILED ;
}
return ECMD_PROCESSED ;
}
int lvcreate_and_attach_cache_cmd ( struct cmd_context * cmd , int argc , char * * argv )
{
struct processing_handle * handle = NULL ;
struct lvcreate_params lp = {
. major = - 1 ,
. minor = - 1 ,
2022-01-18 19:33:53 +03:00
/*
* Tell lvcreate to ignore - - type since we are using lvcreate
* to create a linear LV and using lvconvert to add cache .
* ( Would be better if lvcreate code was split up so we could
* call a specific function that just created a linear / striped LV . )
*/
. ignore_type = 1 ,
lvcreate: new cache or writecache lv with single command
To create a new cache or writecache LV with a single command:
lvcreate --type cache|writecache
-n Name -L Size --cachedevice PVfast VG [PVslow ...]
- A new main linear|striped LV is created as usual, using the
specified -n Name and -L Size, and using the optionally
specified PVslow devices.
- Then, a new cachevol LV is created internally, using PVfast
specified by the cachedevice option.
- Then, the cachevol is attached to the main LV, converting the
main LV to type cache|writecache.
Include --cachesize Size to specify the size of cache|writecache
to create from the specified --cachedevice PVs, otherwise the
entire cachedevice PV is used. The --cachedevice option can be
repeated to create the cache from multiple devices, or the
cachedevice option can contain a tag name specifying a set of PVs
to allocate the cache from.
To create a new cache or writecache LV with a single command
using an existing cachevol LV:
lvcreate --type cache|writecache
-n Name -L Size --cachevol LVfast VG [PVslow ...]
- A new main linear|striped LV is created as usual, using the
specified -n Name and -L Size, and using the optionally
specified PVslow devices.
- Then, the cachevol LVfast is attached to the main LV, converting
the main LV to type cache|writecache.
In cases where more advanced types (for the main LV or cachevol LV)
are needed, they should be created independently and then combined
with lvconvert.
Example
-------
user creates a new VG with one slow device and one fast device:
$ vgcreate vg /dev/slow1 /dev/fast1
user creates a new 8G main LV on /dev/slow1 that uses all of
/dev/fast1 as a writecache:
$ lvcreate --type writecache --cachedevice /dev/fast1
-n main -L 8G vg /dev/slow1
Example
-------
user creates a new VG with two slow devs and two fast devs:
$ vgcreate vg /dev/slow1 /dev/slow2 /dev/fast1 /dev/fast2
user creates a new 8G main LV on /dev/slow1 and /dev/slow2
that uses all of /dev/fast1 and /dev/fast2 as a writecache:
$ lvcreate --type writecache --cachedevice /dev/fast1 --cachedevice /dev/fast2
-n main -L 8G vg /dev/slow1 /dev/slow2
Example
-------
A user has several slow devices and several fast devices in their VG,
the slow devs have tag @slow, the fast devs have tag @fast.
user creates a new 8G main LV on the slow devs with a
2G writecache on the fast devs:
$ lvcreate --type writecache -n main -L 8G
--cachedevice @fast --cachesize 2G vg @slow
2020-04-10 21:17:37 +03:00
} ;
struct lvcreate_cmdline_params lcp = { 0 } ;
2022-01-18 19:33:53 +03:00
struct processing_params pp = {
. lp = & lp ,
. lcp = & lcp ,
} ;
lvcreate: new cache or writecache lv with single command
To create a new cache or writecache LV with a single command:
lvcreate --type cache|writecache
-n Name -L Size --cachedevice PVfast VG [PVslow ...]
- A new main linear|striped LV is created as usual, using the
specified -n Name and -L Size, and using the optionally
specified PVslow devices.
- Then, a new cachevol LV is created internally, using PVfast
specified by the cachedevice option.
- Then, the cachevol is attached to the main LV, converting the
main LV to type cache|writecache.
Include --cachesize Size to specify the size of cache|writecache
to create from the specified --cachedevice PVs, otherwise the
entire cachedevice PV is used. The --cachedevice option can be
repeated to create the cache from multiple devices, or the
cachedevice option can contain a tag name specifying a set of PVs
to allocate the cache from.
To create a new cache or writecache LV with a single command
using an existing cachevol LV:
lvcreate --type cache|writecache
-n Name -L Size --cachevol LVfast VG [PVslow ...]
- A new main linear|striped LV is created as usual, using the
specified -n Name and -L Size, and using the optionally
specified PVslow devices.
- Then, the cachevol LVfast is attached to the main LV, converting
the main LV to type cache|writecache.
In cases where more advanced types (for the main LV or cachevol LV)
are needed, they should be created independently and then combined
with lvconvert.
Example
-------
user creates a new VG with one slow device and one fast device:
$ vgcreate vg /dev/slow1 /dev/fast1
user creates a new 8G main LV on /dev/slow1 that uses all of
/dev/fast1 as a writecache:
$ lvcreate --type writecache --cachedevice /dev/fast1
-n main -L 8G vg /dev/slow1
Example
-------
user creates a new VG with two slow devs and two fast devs:
$ vgcreate vg /dev/slow1 /dev/slow2 /dev/fast1 /dev/fast2
user creates a new 8G main LV on /dev/slow1 and /dev/slow2
that uses all of /dev/fast1 and /dev/fast2 as a writecache:
$ lvcreate --type writecache --cachedevice /dev/fast1 --cachedevice /dev/fast2
-n main -L 8G vg /dev/slow1 /dev/slow2
Example
-------
A user has several slow devices and several fast devices in their VG,
the slow devs have tag @slow, the fast devs have tag @fast.
user creates a new 8G main LV on the slow devs with a
2G writecache on the fast devs:
$ lvcreate --type writecache -n main -L 8G
--cachedevice @fast --cachesize 2G vg @slow
2020-04-10 21:17:37 +03:00
int ret ;
if ( ! _lvcreate_params ( cmd , argc , argv , & lp , & lcp ) ) {
stack ;
return EINVALID_CMD_LINE ;
}
if ( ! ( handle = init_processing_handle ( cmd , NULL ) ) ) {
log_error ( " Failed to initialize processing handle. " ) ;
return ECMD_FAILED ;
}
handle - > custom_handle = & pp ;
ret = process_each_vg ( cmd , 0 , NULL , lp . vg_name , NULL , READ_FOR_UPDATE , 0 , handle ,
& _lvcreate_and_attach_cache_single ) ;
_destroy_lvcreate_params ( & lp ) ;
destroy_processing_handle ( cmd , handle ) ;
return ret ;
}