2011-09-06 23:25:42 +04:00
/*
2013-02-05 14:07:09 +04:00
* Copyright ( C ) 2011 - 2013 Red Hat , Inc . All rights reserved .
2011-09-06 23:25:42 +04:00
*
* This file is part of LVM2 .
*
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v .2 .1 .
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program ; if not , write to the Free Software Foundation ,
* Inc . , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
*/
# include "lib.h"
2011-10-22 20:44:23 +04:00
# include "activate.h"
# include "locking.h"
2014-09-19 03:09:36 +04:00
# include "memlock.h"
2011-09-06 23:25:42 +04:00
# include "metadata.h"
2011-09-08 20:41:18 +04:00
# include "segtype.h"
2012-02-08 17:05:38 +04:00
# include "defaults.h"
2013-03-11 15:37:09 +04:00
# include "display.h"
2011-09-06 23:25:42 +04:00
2012-01-25 12:55:19 +04:00
int attach_pool_message ( struct lv_segment * pool_seg , dm_thin_message_t type ,
2011-10-19 20:39:09 +04:00
struct logical_volume * lv , uint32_t delete_id ,
2012-01-25 12:55:19 +04:00
int no_update )
2011-10-17 18:17:09 +04:00
{
struct lv_thin_message * tmsg ;
2012-01-25 12:55:19 +04:00
if ( ! seg_is_thin_pool ( pool_seg ) ) {
2013-07-09 15:34:48 +04:00
log_error ( INTERNAL_ERROR " Cannot attach message to non-pool LV %s. " , pool_seg - > lv - > name ) ;
2012-01-25 12:55:19 +04:00
return 0 ;
}
2011-11-07 14:59:07 +04:00
2012-01-25 12:55:19 +04:00
if ( pool_has_message ( pool_seg , lv , delete_id ) ) {
if ( lv )
log_error ( " Message referring LV %s already queued in pool %s. " ,
lv - > name , pool_seg - > lv - > name ) ;
else
log_error ( " Delete for device %u already queued in pool %s. " ,
delete_id , pool_seg - > lv - > name ) ;
return 0 ;
2011-10-19 20:39:09 +04:00
}
2012-01-25 12:55:19 +04:00
if ( ! ( tmsg = dm_pool_alloc ( pool_seg - > lv - > vg - > vgmem , sizeof ( * tmsg ) ) ) ) {
2011-10-17 18:17:09 +04:00
log_error ( " Failed to allocate memory for message. " ) ;
return 0 ;
}
switch ( type ) {
case DM_THIN_MESSAGE_CREATE_SNAP :
case DM_THIN_MESSAGE_CREATE_THIN :
tmsg - > u . lv = lv ;
break ;
case DM_THIN_MESSAGE_DELETE :
2011-10-19 20:39:09 +04:00
tmsg - > u . delete_id = delete_id ;
2011-10-17 18:17:09 +04:00
break ;
default :
2011-10-19 20:42:14 +04:00
log_error ( INTERNAL_ERROR " Unsupported message type %u. " , type ) ;
2011-10-17 18:17:09 +04:00
return 0 ;
}
tmsg - > type = type ;
/* If the 1st message is add in non-read-only mode, modify transaction_id */
2012-01-25 12:55:19 +04:00
if ( ! no_update & & dm_list_empty ( & pool_seg - > thin_messages ) )
pool_seg - > transaction_id + + ;
2011-10-17 18:17:09 +04:00
2012-01-25 12:55:19 +04:00
dm_list_add ( & pool_seg - > thin_messages , & tmsg - > list ) ;
2011-10-17 18:17:09 +04:00
2014-01-24 13:49:31 +04:00
log_debug_metadata ( " Added %s message. " ,
2013-01-08 02:30:29 +04:00
( type = = DM_THIN_MESSAGE_CREATE_SNAP | |
2014-01-24 13:49:31 +04:00
type = = DM_THIN_MESSAGE_CREATE_THIN ) ? " create " :
2013-01-08 02:30:29 +04:00
( type = = DM_THIN_MESSAGE_DELETE ) ? " delete " : " unknown " ) ;
2011-10-17 18:17:09 +04:00
return 1 ;
}
2013-02-21 13:25:44 +04:00
int attach_thin_external_origin ( struct lv_segment * seg ,
struct logical_volume * external_lv )
{
if ( seg - > external_lv ) {
log_error ( INTERNAL_ERROR " LV \" %s \" already has external origin. " ,
seg - > lv - > name ) ;
return 0 ;
}
seg - > external_lv = external_lv ;
if ( external_lv ) {
if ( ! add_seg_to_segs_using_this_lv ( external_lv , seg ) )
return_0 ;
external_lv - > external_count + + ;
if ( external_lv - > status & LVM_WRITE ) {
log_verbose ( " Setting logical volume \" %s \" read-only. " ,
external_lv - > name ) ;
external_lv - > status & = ~ LVM_WRITE ;
}
}
return 1 ;
}
int detach_thin_external_origin ( struct lv_segment * seg )
{
if ( seg - > external_lv ) {
if ( ! lv_is_external_origin ( seg - > external_lv ) ) {
log_error ( INTERNAL_ERROR " Inconsitent external origin. " ) ;
return 0 ;
}
if ( ! remove_seg_from_segs_using_this_lv ( seg - > external_lv , seg ) )
return_0 ;
seg - > external_lv - > external_count - - ;
seg - > external_lv = NULL ;
}
return 1 ;
}
2013-11-29 18:51:28 +04:00
int lv_is_merging_thin_snapshot ( const struct logical_volume * lv )
{
2014-02-22 04:26:01 +04:00
struct lv_segment * seg = first_seg ( lv ) ;
return ( seg & & seg - > status & MERGING ) ? 1 : 0 ;
2013-11-29 18:51:28 +04:00
}
2012-01-25 12:55:19 +04:00
/*
* Check whether pool has some message queued for LV or for device_id
* When LV is NULL and device_id is 0 it just checks for any message .
*/
int pool_has_message ( const struct lv_segment * seg ,
const struct logical_volume * lv , uint32_t device_id )
{
const struct lv_thin_message * tmsg ;
if ( ! seg_is_thin_pool ( seg ) ) {
log_error ( INTERNAL_ERROR " LV %s is not pool. " , seg - > lv - > name ) ;
return 0 ;
}
if ( ! lv & & ! device_id )
2014-03-12 01:50:23 +04:00
return ! dm_list_empty ( & seg - > thin_messages ) ;
2012-01-25 12:55:19 +04:00
dm_list_iterate_items ( tmsg , & seg - > thin_messages ) {
switch ( tmsg - > type ) {
case DM_THIN_MESSAGE_CREATE_SNAP :
case DM_THIN_MESSAGE_CREATE_THIN :
if ( tmsg - > u . lv = = lv )
return 1 ;
break ;
case DM_THIN_MESSAGE_DELETE :
if ( tmsg - > u . delete_id = = device_id )
return 1 ;
break ;
default :
break ;
}
}
return 0 ;
}
2013-02-05 19:49:09 +04:00
int pool_is_active ( const struct logical_volume * lv )
2013-02-05 13:52:39 +04:00
{
struct lvinfo info ;
const struct seg_list * sl ;
2013-02-05 19:49:09 +04:00
if ( ! lv_is_thin_pool ( lv ) ) {
2013-07-09 15:34:48 +04:00
log_error ( INTERNAL_ERROR " pool_is_active called with non-pool LV %s. " , lv - > name ) ;
2013-02-05 13:52:39 +04:00
return 0 ;
}
/* On clustered VG, query every related thin pool volume */
if ( vg_is_clustered ( lv - > vg ) ) {
if ( lv_is_active ( lv ) )
return 1 ;
dm_list_iterate_items ( sl , & lv - > segs_using_this_lv )
if ( lv_is_active ( sl - > seg - > lv ) ) {
log_debug ( " Thin volume \" %s \" is active. " , sl - > seg - > lv - > name ) ;
return 1 ;
}
} else if ( lv_info ( lv - > vg - > cmd , lv , 1 , & info , 0 , 0 ) & & info . exists )
return 1 ; /* Non clustered VG - just checks for '-tpool' */
return 0 ;
}
2014-01-23 14:47:10 +04:00
int thin_pool_feature_supported ( const struct logical_volume * lv , int feature )
2013-06-11 14:32:01 +04:00
{
static unsigned attr = 0U ;
struct lv_segment * seg ;
if ( ! lv_is_thin_pool ( lv ) ) {
log_error ( INTERNAL_ERROR " LV %s is not thin pool. " , lv - > name ) ;
return 0 ;
}
seg = first_seg ( lv ) ;
if ( ( attr = = 0U ) & & activation ( ) & & seg - > segtype & &
seg - > segtype - > ops - > target_present & &
! seg - > segtype - > ops - > target_present ( lv - > vg - > cmd , NULL , & attr ) ) {
log_error ( " %s: Required device-mapper target(s) not "
" detected in your kernel " , seg - > segtype - > name ) ;
return 0 ;
}
2014-01-23 14:47:10 +04:00
return ( attr & feature ) ? 1 : 0 ;
2013-06-11 14:32:01 +04:00
}
2012-02-08 17:05:38 +04:00
int pool_below_threshold ( const struct lv_segment * pool_seg )
{
2014-06-09 14:08:27 +04:00
dm_percent_t percent ;
int threshold = DM_PERCENT_1 *
2013-06-27 13:22:02 +04:00
find_config_tree_int ( pool_seg - > lv - > vg - > cmd , activation_thin_pool_autoextend_threshold_CFG ,
lv_config_profile ( pool_seg - > lv ) ) ;
2012-02-08 17:05:38 +04:00
/* Data */
2012-02-13 01:42:43 +04:00
if ( ! lv_thin_pool_percent ( pool_seg - > lv , 0 , & percent ) )
return_0 ;
2012-02-08 17:05:38 +04:00
if ( percent > = threshold )
2014-01-29 17:26:06 +04:00
return 0 ;
2012-02-08 17:05:38 +04:00
/* Metadata */
2012-02-13 01:42:43 +04:00
if ( ! lv_thin_pool_percent ( pool_seg - > lv , 1 , & percent ) )
return_0 ;
2012-02-08 17:05:38 +04:00
if ( percent > = threshold )
2014-01-29 17:26:06 +04:00
return 0 ;
2012-02-08 17:05:38 +04:00
return 1 ;
}
2014-01-29 17:27:13 +04:00
/*
* Validate given external origin could be used with thin pool
*/
int pool_supports_external_origin ( const struct lv_segment * pool_seg , const struct logical_volume * external_lv )
{
uint32_t csize = pool_seg - > chunk_size ;
if ( ( external_lv - > size < csize ) | | ( external_lv - > size % csize ) ) {
/* TODO: Validate with thin feature flag once, it will be supported */
log_error ( " Can't use \" %s/%s \" as external origin with \" %s/%s \" pool. "
" Size %s is not a multiple of pool's chunk size %s. " ,
external_lv - > vg - > name , external_lv - > name ,
pool_seg - > lv - > vg - > name , pool_seg - > lv - > name ,
display_size ( external_lv - > vg - > cmd , external_lv - > size ) ,
display_size ( external_lv - > vg - > cmd , csize ) ) ;
return 0 ;
}
return 1 ;
}
2014-01-08 13:27:17 +04:00
struct logical_volume * find_pool_lv ( const struct logical_volume * lv )
2013-06-11 14:32:01 +04:00
{
struct lv_segment * seg ;
2013-06-15 00:02:12 +04:00
if ( ! ( seg = first_seg ( lv ) ) ) {
log_error ( " LV %s has no segment " , lv - > name ) ;
return NULL ;
}
if ( ! ( seg = find_pool_seg ( seg ) ) )
2013-06-11 14:32:01 +04:00
return_NULL ;
return seg - > lv ;
}
2011-10-03 22:39:17 +04:00
/*
* Find a free device_id for given thin_pool segment .
*
* \ return
* Free device id , or 0 if free device_id is not found .
*
* FIXME : Improve naive search and keep the value cached
* and updated during VG lifetime ( so no const for lv_segment )
*/
uint32_t get_free_pool_device_id ( struct lv_segment * thin_pool_seg )
{
2011-11-03 18:36:40 +04:00
uint32_t max_id = 0 ;
struct seg_list * sl ;
2011-10-03 22:39:17 +04:00
if ( ! seg_is_thin_pool ( thin_pool_seg ) ) {
2011-10-31 02:00:57 +04:00
log_error ( INTERNAL_ERROR
" Segment in %s is not a thin pool segment. " ,
2011-10-03 23:10:52 +04:00
thin_pool_seg - > lv - > name ) ;
2011-10-03 22:39:17 +04:00
return 0 ;
}
2011-11-03 18:36:40 +04:00
dm_list_iterate_items ( sl , & thin_pool_seg - > lv - > segs_using_this_lv )
if ( sl - > seg - > device_id > max_id )
max_id = sl - > seg - > device_id ;
2011-10-03 22:39:17 +04:00
2011-10-31 02:00:57 +04:00
if ( + + max_id > DM_THIN_MAX_DEVICE_ID ) {
2012-01-23 21:46:31 +04:00
/* FIXME Find empty holes instead of aborting! */
2011-10-31 02:00:57 +04:00
log_error ( " Cannot find free device_id. " ) ;
2011-10-03 22:39:17 +04:00
return 0 ;
}
2013-01-08 02:30:29 +04:00
log_debug_metadata ( " Found free pool device_id %u. " , max_id ) ;
2011-10-03 22:39:17 +04:00
return max_id ;
}
2011-10-22 20:44:23 +04:00
2014-08-26 14:10:29 +04:00
static int _check_pool_create ( const struct logical_volume * lv )
{
const struct lv_thin_message * lmsg ;
struct lvinfo info ;
dm_list_iterate_items ( lmsg , & first_seg ( lv ) - > thin_messages ) {
if ( lmsg - > type ! = DM_THIN_MESSAGE_CREATE_THIN )
continue ;
/* When creating new thin LV, check for size would be needed */
if ( ! lv_info ( lv - > vg - > cmd , lv , 1 , & info , 0 , 0 ) | |
! info . exists ) {
log_error ( " Pool %s needs to be locally active for threshold check. " ,
display_lvname ( lv ) ) ;
return 0 ;
}
if ( ! pool_below_threshold ( first_seg ( lv ) ) ) {
log_error ( " Free space in pool %s is above threshold, new volumes are not allowed. " ,
display_lvname ( lv ) ) ;
return 0 ;
}
break ;
}
return 1 ;
}
2011-11-03 18:53:58 +04:00
int update_pool_lv ( struct logical_volume * lv , int activate )
{
2012-03-23 13:58:04 +04:00
int monitored ;
2014-08-26 14:10:29 +04:00
int ret = 1 ;
2012-03-23 13:58:04 +04:00
2011-11-03 18:53:58 +04:00
if ( ! lv_is_thin_pool ( lv ) ) {
log_error ( INTERNAL_ERROR " Updated LV %s is not pool. " , lv - > name ) ;
return 0 ;
}
2012-01-25 13:17:15 +04:00
if ( dm_list_empty ( & ( first_seg ( lv ) - > thin_messages ) ) )
return 1 ; /* No messages */
2011-11-03 18:53:58 +04:00
if ( activate ) {
2011-11-10 16:43:05 +04:00
/* If the pool is not active, do activate deactivate */
2011-11-03 19:58:20 +04:00
if ( ! lv_is_active ( lv ) ) {
2012-03-23 13:58:04 +04:00
monitored = dmeventd_monitor_mode ( ) ;
init_dmeventd_monitor ( DMEVENTD_MONITOR_IGNORE ) ;
2014-08-26 14:10:29 +04:00
if ( ! activate_lv_excl ( lv - > vg - > cmd , lv ) ) {
init_dmeventd_monitor ( monitored ) ;
2011-11-03 19:58:20 +04:00
return_0 ;
2014-08-26 14:10:29 +04:00
}
if ( ! lv_is_active ( lv ) ) {
init_dmeventd_monitor ( monitored ) ;
log_error ( " Cannot activate thin pool %s, perhaps skipped in lvm.conf volume_list? " ,
display_lvname ( lv ) ) ;
return 0 ;
}
if ( ! ( ret = _check_pool_create ( lv ) ) )
stack ;
if ( ! deactivate_lv ( lv - > vg - > cmd , lv ) ) {
init_dmeventd_monitor ( monitored ) ;
2011-11-03 19:58:20 +04:00
return_0 ;
2014-08-26 14:10:29 +04:00
}
2012-03-23 13:58:04 +04:00
init_dmeventd_monitor ( monitored ) ;
2014-09-19 03:09:36 +04:00
/* Unlock memory if possible */
memlock_unlock ( lv - > vg - > cmd ) ;
2011-11-03 18:53:58 +04:00
}
2012-01-25 13:13:10 +04:00
/*
* Resume active pool to send thin messages .
* origin_only is used to skip check for resumed state
2011-11-03 18:53:58 +04:00
*/
2012-01-25 13:13:10 +04:00
else if ( ! resume_lv_origin ( lv - > vg - > cmd , lv ) ) {
2011-11-03 18:53:58 +04:00
log_error ( " Failed to resume %s. " , lv - > name ) ;
return 0 ;
2014-08-26 14:10:29 +04:00
} else if ( ! ( ret = _check_pool_create ( lv ) ) )
stack ;
2011-11-03 18:53:58 +04:00
}
2012-01-25 13:17:15 +04:00
dm_list_init ( & ( first_seg ( lv ) - > thin_messages ) ) ;
2011-11-03 18:53:58 +04:00
2012-01-25 13:17:15 +04:00
if ( ! vg_write ( lv - > vg ) | | ! vg_commit ( lv - > vg ) )
return_0 ;
2011-11-03 18:53:58 +04:00
2014-08-26 14:10:29 +04:00
return ret ;
2011-11-03 18:53:58 +04:00
}
2012-06-28 16:47:34 +04:00
2014-11-26 11:27:40 +03:00
/* Estimate thin pool chunk size from data and metadata size (in sector units) */
static size_t _estimate_chunk_size ( uint64_t data_size , uint64_t metadata_size , int attr )
{
/*
* nr_pool_blocks = data_size / metadata_size
* chunk_size = nr_pool_blocks * 64 b / sector_size
*/
size_t chunk_size = data_size / ( metadata_size * ( SECTOR_SIZE / 64 ) ) ;
if ( attr & THIN_FEATURE_BLOCK_SIZE ) {
/* Round up to 64KB */
chunk_size + = DM_THIN_MIN_DATA_BLOCK_SIZE - 1 ;
chunk_size & = ~ ( size_t ) ( DM_THIN_MIN_DATA_BLOCK_SIZE - 1 ) ;
} else {
/* Round up to nearest power of 2 */
chunk_size - - ;
chunk_size | = chunk_size > > 1 ;
chunk_size | = chunk_size > > 2 ;
chunk_size | = chunk_size > > 4 ;
chunk_size | = chunk_size > > 8 ;
chunk_size | = chunk_size > > 16 ;
chunk_size + + ;
}
return chunk_size ;
}
2014-10-06 14:22:51 +04:00
int update_thin_pool_params ( const struct segment_type * segtype ,
struct volume_group * vg ,
2014-10-30 15:04:06 +03:00
unsigned attr , int passed_args ,
uint32_t pool_data_extents ,
uint32_t * pool_metadata_extents ,
2014-07-23 00:20:18 +04:00
int * chunk_size_calc_method , uint32_t * chunk_size ,
thin_discards_t * discards , int * zero )
2013-08-06 13:42:40 +04:00
{
2014-07-23 00:20:18 +04:00
struct cmd_context * cmd = vg - > cmd ;
struct profile * profile = vg - > profile ;
uint32_t extent_size = vg - > extent_size ;
2014-10-30 15:04:06 +03:00
uint64_t pool_metadata_size = ( uint64_t ) * pool_metadata_extents * extent_size ;
2014-07-23 00:20:18 +04:00
size_t estimate_chunk_size ;
2013-09-25 18:00:52 +04:00
const char * str ;
2013-08-06 13:42:40 +04:00
2013-09-25 18:00:52 +04:00
if ( ! ( passed_args & PASS_ARG_CHUNK_SIZE ) ) {
if ( ! ( * chunk_size = find_config_tree_int ( cmd , allocation_thin_pool_chunk_size_CFG , profile ) * 2 ) ) {
2014-05-07 12:52:00 +04:00
if ( ! ( str = find_config_tree_str ( cmd , allocation_thin_pool_chunk_size_policy_CFG , profile ) ) ) {
2014-05-13 12:33:17 +04:00
log_error ( INTERNAL_ERROR " Could not find configuration. " ) ;
2014-05-07 12:52:00 +04:00
return 0 ;
}
2014-03-04 14:10:59 +04:00
if ( ! strcasecmp ( str , " generic " ) )
* chunk_size_calc_method = THIN_CHUNK_SIZE_CALC_METHOD_GENERIC ;
else if ( ! strcasecmp ( str , " performance " ) )
* chunk_size_calc_method = THIN_CHUNK_SIZE_CALC_METHOD_PERFORMANCE ;
else {
log_error ( " Thin pool chunk size calculation policy \" %s \" is unrecognised. " , str ) ;
return 0 ;
}
2014-07-23 00:20:18 +04:00
if ( ! ( * chunk_size = get_default_allocation_thin_pool_chunk_size_CFG ( cmd , profile ) ) )
return_0 ;
2013-09-25 18:00:52 +04:00
}
}
2013-08-06 18:28:12 +04:00
2014-10-06 14:22:51 +04:00
if ( ! validate_pool_chunk_size ( cmd , segtype , * chunk_size ) )
return_0 ;
2013-08-06 13:42:40 +04:00
if ( ! ( passed_args & PASS_ARG_DISCARDS ) ) {
2014-05-07 12:52:00 +04:00
if ( ! ( str = find_config_tree_str ( cmd , allocation_thin_pool_discards_CFG , profile ) ) ) {
2014-05-13 12:33:17 +04:00
log_error ( INTERNAL_ERROR " Could not find configuration. " ) ;
2014-05-07 12:52:00 +04:00
return 0 ;
}
2014-11-08 03:28:38 +03:00
if ( ! set_pool_discards ( discards , str ) )
2013-08-06 13:42:40 +04:00
return_0 ;
}
if ( ! ( passed_args & PASS_ARG_ZERO ) )
* zero = find_config_tree_bool ( cmd , allocation_thin_pool_zero_CFG , profile ) ;
2013-03-11 15:37:09 +04:00
if ( ! ( attr & THIN_FEATURE_BLOCK_SIZE ) & &
( * chunk_size & ( * chunk_size - 1 ) ) ) {
log_error ( " Chunk size must be a power of 2 for this thin target version. " ) ;
return 0 ;
}
2014-10-30 15:04:06 +03:00
if ( ! pool_metadata_size ) {
2013-03-11 15:37:09 +04:00
/* Defaults to nr_pool_blocks * 64b converted to size in sectors */
2014-10-30 15:04:06 +03:00
pool_metadata_size = ( uint64_t ) pool_data_extents * extent_size /
2013-03-11 15:37:09 +04:00
( * chunk_size * ( SECTOR_SIZE / UINT64_C ( 64 ) ) ) ;
/* Check if we could eventually use bigger chunk size */
if ( ! ( passed_args & PASS_ARG_CHUNK_SIZE ) ) {
2014-10-30 15:04:06 +03:00
while ( ( pool_metadata_size >
2013-03-11 15:37:09 +04:00
( DEFAULT_THIN_POOL_OPTIMAL_SIZE / SECTOR_SIZE ) ) & &
( * chunk_size < DM_THIN_MAX_DATA_BLOCK_SIZE ) ) {
* chunk_size < < = 1 ;
2014-10-30 15:04:06 +03:00
pool_metadata_size > > = 1 ;
2013-03-11 15:37:09 +04:00
}
log_verbose ( " Setting chunk size to %s. " ,
display_size ( cmd , * chunk_size ) ) ;
2014-10-30 15:04:06 +03:00
} else if ( pool_metadata_size > ( DEFAULT_THIN_POOL_MAX_METADATA_SIZE * 2 ) ) {
2013-03-11 15:37:09 +04:00
/* Suggest bigger chunk size */
2014-11-26 11:27:40 +03:00
estimate_chunk_size =
_estimate_chunk_size ( ( uint64_t ) pool_data_extents * extent_size ,
( DEFAULT_THIN_POOL_MAX_METADATA_SIZE * 2 ) , attr ) ;
2013-03-11 15:37:09 +04:00
log_warn ( " WARNING: Chunk size is too small for pool, suggested minimum is %s. " ,
2014-11-26 11:27:40 +03:00
display_size ( cmd , estimate_chunk_size ) ) ;
2013-03-11 15:37:09 +04:00
}
2014-10-30 15:04:06 +03:00
/* Round up to extent size silently */
if ( pool_metadata_size % extent_size )
pool_metadata_size + = extent_size - pool_metadata_size % extent_size ;
2013-03-11 15:37:09 +04:00
} else {
2014-11-26 11:27:40 +03:00
estimate_chunk_size =
_estimate_chunk_size ( ( uint64_t ) pool_data_extents * extent_size ,
pool_metadata_size , attr ) ;
2014-07-23 00:20:18 +04:00
if ( estimate_chunk_size < DM_THIN_MIN_DATA_BLOCK_SIZE )
estimate_chunk_size = DM_THIN_MIN_DATA_BLOCK_SIZE ;
else if ( estimate_chunk_size > DM_THIN_MAX_DATA_BLOCK_SIZE )
estimate_chunk_size = DM_THIN_MAX_DATA_BLOCK_SIZE ;
2013-03-11 15:37:09 +04:00
/* Check to eventually use bigger chunk size */
if ( ! ( passed_args & PASS_ARG_CHUNK_SIZE ) ) {
* chunk_size = estimate_chunk_size ;
2014-07-23 00:20:18 +04:00
log_verbose ( " Setting chunk size %s. " , display_size ( cmd , * chunk_size ) ) ;
2013-03-11 15:37:09 +04:00
} else if ( * chunk_size < estimate_chunk_size ) {
/* Suggest bigger chunk size */
log_warn ( " WARNING: Chunk size is smaller then suggested minimum size %s. " ,
display_size ( cmd , estimate_chunk_size ) ) ;
}
}
2014-10-30 15:04:06 +03:00
if ( pool_metadata_size > ( 2 * DEFAULT_THIN_POOL_MAX_METADATA_SIZE ) ) {
pool_metadata_size = 2 * DEFAULT_THIN_POOL_MAX_METADATA_SIZE ;
2013-03-11 15:37:09 +04:00
if ( passed_args & PASS_ARG_POOL_METADATA_SIZE )
log_warn ( " WARNING: Maximum supported pool metadata size is %s. " ,
2014-10-30 15:04:06 +03:00
display_size ( cmd , pool_metadata_size ) ) ;
} else if ( pool_metadata_size < ( 2 * DEFAULT_THIN_POOL_MIN_METADATA_SIZE ) ) {
pool_metadata_size = 2 * DEFAULT_THIN_POOL_MIN_METADATA_SIZE ;
2013-03-11 15:37:09 +04:00
if ( passed_args & PASS_ARG_POOL_METADATA_SIZE )
log_warn ( " WARNING: Minimum supported pool metadata size is %s. " ,
2014-10-30 15:04:06 +03:00
display_size ( cmd , pool_metadata_size ) ) ;
2013-03-11 15:37:09 +04:00
}
2014-10-30 15:04:06 +03:00
if ( ! ( * pool_metadata_extents =
extents_from_size ( vg - > cmd , pool_metadata_size , extent_size ) ) )
return_0 ;
2013-03-11 15:37:09 +04:00
return 1 ;
}
2014-11-08 03:28:38 +03:00
int set_pool_discards ( thin_discards_t * discards , const char * str )
2012-06-28 16:47:34 +04:00
{
if ( ! strcasecmp ( str , " passdown " ) )
2012-08-08 00:24:41 +04:00
* discards = THIN_DISCARDS_PASSDOWN ;
2012-08-07 21:37:35 +04:00
else if ( ! strcasecmp ( str , " nopassdown " ) )
2012-08-08 00:24:41 +04:00
* discards = THIN_DISCARDS_NO_PASSDOWN ;
2012-06-28 16:47:34 +04:00
else if ( ! strcasecmp ( str , " ignore " ) )
2012-08-08 00:24:41 +04:00
* discards = THIN_DISCARDS_IGNORE ;
2012-06-28 16:47:34 +04:00
else {
2013-06-24 14:00:48 +04:00
log_error ( " Thin pool discards type \" %s \" is unknown. " , str ) ;
2012-06-28 16:47:34 +04:00
return 0 ;
}
return 1 ;
}
2012-08-08 00:24:41 +04:00
const char * get_pool_discards_name ( thin_discards_t discards )
2012-06-28 16:47:34 +04:00
{
2012-08-08 00:24:41 +04:00
switch ( discards ) {
case THIN_DISCARDS_PASSDOWN :
2012-06-28 16:47:34 +04:00
return " passdown " ;
2012-08-08 00:24:41 +04:00
case THIN_DISCARDS_NO_PASSDOWN :
2012-06-28 16:47:34 +04:00
return " nopassdown " ;
2012-08-08 00:24:41 +04:00
case THIN_DISCARDS_IGNORE :
2012-06-28 16:47:34 +04:00
return " ignore " ;
}
2012-10-09 21:42:26 +04:00
log_error ( INTERNAL_ERROR " Unknown discards type encountered. " ) ;
2012-06-28 16:47:34 +04:00
2012-08-07 21:37:35 +04:00
return " unknown " ;
2012-06-28 16:47:34 +04:00
}
2014-08-15 15:08:30 +04:00
2014-08-15 17:43:42 +04:00
int lv_is_thin_origin ( const struct logical_volume * lv , unsigned int * snap_count )
2014-08-15 15:08:30 +04:00
{
struct seg_list * segl ;
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
int r = 0 ;
2014-08-15 17:43:42 +04:00
if ( snap_count )
* snap_count = 0 ;
2014-08-15 15:08:30 +04:00
if ( ! lv_is_thin_volume ( lv ) | |
dm_list_empty ( & lv - > segs_using_this_lv ) )
return 0 ;
dm_list_iterate_items ( segl , & lv - > segs_using_this_lv ) {
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
if ( segl - > seg - > origin = = lv ) {
r = 1 ;
2014-08-15 17:43:42 +04:00
if ( snap_count )
( * snap_count ) + + ;
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
else
/* not interested in number of snapshots */
break ;
}
2014-08-15 15:08:30 +04:00
}
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
return r ;
2014-08-15 15:08:30 +04:00
}
2014-11-04 17:06:55 +03:00
/*
* Explict check of new thin pool for usability
*
* Allow use of thin pools by external apps . When lvm2 metadata has
* transaction_id = = 0 for a new thin pool , it will explicitely validate
* the pool is still unused .
*
* To prevent lvm2 to create thin volumes in externally used thin pools
* simply increment its transaction_id .
*/
int check_new_thin_pool ( const struct logical_volume * pool_lv )
{
struct cmd_context * cmd = pool_lv - > vg - > cmd ;
uint64_t transaction_id ;
/* For transaction_id check LOCAL activation is required */
if ( ! activate_lv_excl_local ( cmd , pool_lv ) ) {
log_error ( " Aborting. Failed to locally activate thin pool %s. " ,
display_lvname ( pool_lv ) ) ;
return 0 ;
}
/* With volume lists, check pool really is locally active */
if ( ! lv_thin_pool_transaction_id ( pool_lv , & transaction_id ) ) {
log_error ( " Cannot read thin pool %s transaction id locally, perhaps skipped in lvm.conf volume_list? " ,
display_lvname ( pool_lv ) ) ;
return 0 ;
}
/* Require pool to have same transaction_id as new */
if ( first_seg ( pool_lv ) - > transaction_id ! = transaction_id ) {
log_error ( " Cannot use thin pool %s with transaction id "
" % " PRIu64 " for thin volumes. "
" Expected transaction id % " PRIu64 " . " ,
display_lvname ( pool_lv ) , transaction_id ,
first_seg ( pool_lv ) - > transaction_id ) ;
return 0 ;
}
log_verbose ( " Deactivating public thin pool %s " ,
display_lvname ( pool_lv ) ) ;
/* Prevent any 'race' with in-use thin pool and always deactivate */
if ( ! deactivate_lv ( pool_lv - > vg - > cmd , pool_lv ) ) {
log_error ( " Aborting. Could not deactivate thin pool %s. " ,
display_lvname ( pool_lv ) ) ;
return 0 ;
}
return 1 ;
}