2011-09-06 23:25:42 +04:00
/*
2013-02-05 14:07:09 +04:00
* Copyright ( C ) 2011 - 2013 Red Hat , Inc . All rights reserved .
2011-09-06 23:25:42 +04:00
*
* This file is part of LVM2 .
*
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v .2 .1 .
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program ; if not , write to the Free Software Foundation ,
2016-01-21 13:49:46 +03:00
* Inc . , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA
2011-09-06 23:25:42 +04:00
*/
# include "lib.h"
2011-10-22 20:44:23 +04:00
# include "activate.h"
# include "locking.h"
2014-09-19 03:09:36 +04:00
# include "memlock.h"
2011-09-06 23:25:42 +04:00
# include "metadata.h"
2011-09-08 20:41:18 +04:00
# include "segtype.h"
2012-02-08 17:05:38 +04:00
# include "defaults.h"
2013-03-11 15:37:09 +04:00
# include "display.h"
2011-09-06 23:25:42 +04:00
thin: move pool messaging from resume to suspend
Existing messaging intarface for thin-pool has a few 'weak' points:
* Message were posted with each 'resume' operation, thus not allowing
activation of thin-pool with the existing state.
* Acceleration skipped suspend step has not worked in cluster,
since clvmd resumes only nodes which are suspended (have proper lock
state).
* Resume may fail and code is not really designed to 'fail' in this
phase (generic rule here is resume DOES NOT fail unless something serious
is wrong and lvm2 tool usually doesn't handle recovery path in this case.)
* Full thin-pool suspend happened, when taken a thin-volume snapshot.
With this patch the new method relocates message passing into suspend
state.
This has a few drawbacks with current API, but overal it performs
better and gives are more posibilities to deal with errors.
Patch introduces a new logic for 'origin-only' suspend of thin-pool and
this also relates to thin-volume when taking snapshot.
When suspend_origin_only operation is invoked on a pool with
queued messages then only those messages are posted to thin-pool and
actual suspend of thin pool and data and metadata volume is skipped.
This makes taking a snapshot of thin-volume lighter operation and
avoids blocking of other unrelated active thin volumes.
Also fail now happens in 'suspend' state where the 'Fail' is more expected
and it is better handled through error paths.
Activation of thin-pool is now not sending any message and leaves upto a tool
to decided later how to finish unfinished double-commit transaction.
Problem which needs some API improvements relates to the lvm2 tree
construction. For the suspend tree we do not add target table line
into the tree, but only a device is inserted into a tree.
Current mechanism to attach messages for thin-pool requires the libdm
to know about thin-pool target, so lvm2 currently takes assumption, node
is really a thin-pool and fills in the table line for this node (which
should be ensured by the PRELOAD phase, but it's a misuse of internal API)
we would possibly need to be able to attach message to 'any' node.
Other thing to notice - current messaging interface in thin-pool
target requires to suspend thin volume origin first and then send
a create message, but this could not have any 'nice' solution on lvm2
side and IMHO we should introduce something like 'create_after_resume'
message.
Patch also changes the moment, where lvm2 transaction id is increased.
Now it happens only after successful finish of kernel transaction id
change. This change was needed to handle properly activation of pool,
which is in the middle of unfinished transaction, and also this corrects
usage of thin-pool by external apps like Docker.
2015-07-01 14:31:37 +03:00
/* TODO: drop unused no_update */
2012-01-25 12:55:19 +04:00
int attach_pool_message ( struct lv_segment * pool_seg , dm_thin_message_t type ,
2011-10-19 20:39:09 +04:00
struct logical_volume * lv , uint32_t delete_id ,
2012-01-25 12:55:19 +04:00
int no_update )
2011-10-17 18:17:09 +04:00
{
struct lv_thin_message * tmsg ;
2012-01-25 12:55:19 +04:00
if ( ! seg_is_thin_pool ( pool_seg ) ) {
2017-06-27 09:28:36 +03:00
log_error ( INTERNAL_ERROR " Cannot attach message to non-pool LV %s. " ,
display_lvname ( pool_seg - > lv ) ) ;
2012-01-25 12:55:19 +04:00
return 0 ;
}
2011-11-07 14:59:07 +04:00
2012-01-25 12:55:19 +04:00
if ( pool_has_message ( pool_seg , lv , delete_id ) ) {
if ( lv )
log_error ( " Message referring LV %s already queued in pool %s. " ,
2017-06-27 09:28:36 +03:00
display_lvname ( lv ) , display_lvname ( pool_seg - > lv ) ) ;
2012-01-25 12:55:19 +04:00
else
log_error ( " Delete for device %u already queued in pool %s. " ,
2017-06-27 09:28:36 +03:00
delete_id , display_lvname ( pool_seg - > lv ) ) ;
2012-01-25 12:55:19 +04:00
return 0 ;
2011-10-19 20:39:09 +04:00
}
2012-01-25 12:55:19 +04:00
if ( ! ( tmsg = dm_pool_alloc ( pool_seg - > lv - > vg - > vgmem , sizeof ( * tmsg ) ) ) ) {
2011-10-17 18:17:09 +04:00
log_error ( " Failed to allocate memory for message. " ) ;
return 0 ;
}
switch ( type ) {
case DM_THIN_MESSAGE_CREATE_SNAP :
case DM_THIN_MESSAGE_CREATE_THIN :
tmsg - > u . lv = lv ;
break ;
case DM_THIN_MESSAGE_DELETE :
2011-10-19 20:39:09 +04:00
tmsg - > u . delete_id = delete_id ;
2011-10-17 18:17:09 +04:00
break ;
default :
2011-10-19 20:42:14 +04:00
log_error ( INTERNAL_ERROR " Unsupported message type %u. " , type ) ;
2011-10-17 18:17:09 +04:00
return 0 ;
}
tmsg - > type = type ;
2015-08-14 18:41:27 +03:00
/* If the 1st message is add in non-read-only mode, modify transaction_id */
if ( ! no_update & & dm_list_empty ( & pool_seg - > thin_messages ) )
pool_seg - > transaction_id + + ;
2012-01-25 12:55:19 +04:00
dm_list_add ( & pool_seg - > thin_messages , & tmsg - > list ) ;
2011-10-17 18:17:09 +04:00
2014-01-24 13:49:31 +04:00
log_debug_metadata ( " Added %s message. " ,
2013-01-08 02:30:29 +04:00
( type = = DM_THIN_MESSAGE_CREATE_SNAP | |
2014-01-24 13:49:31 +04:00
type = = DM_THIN_MESSAGE_CREATE_THIN ) ? " create " :
2013-01-08 02:30:29 +04:00
( type = = DM_THIN_MESSAGE_DELETE ) ? " delete " : " unknown " ) ;
2011-10-17 18:17:09 +04:00
return 1 ;
}
2013-02-21 13:25:44 +04:00
int attach_thin_external_origin ( struct lv_segment * seg ,
struct logical_volume * external_lv )
{
if ( seg - > external_lv ) {
2017-06-27 09:28:36 +03:00
log_error ( INTERNAL_ERROR " LV %s already has external origin. " ,
display_lvname ( seg - > lv ) ) ;
2013-02-21 13:25:44 +04:00
return 0 ;
}
seg - > external_lv = external_lv ;
if ( external_lv ) {
if ( ! add_seg_to_segs_using_this_lv ( external_lv , seg ) )
return_0 ;
external_lv - > external_count + + ;
if ( external_lv - > status & LVM_WRITE ) {
log_verbose ( " Setting logical volume \" %s \" read-only. " ,
2017-06-27 09:28:36 +03:00
display_lvname ( external_lv ) ) ;
2013-02-21 13:25:44 +04:00
external_lv - > status & = ~ LVM_WRITE ;
}
2016-12-18 17:06:12 +03:00
2017-03-28 17:28:53 +03:00
/* FIXME Mark origin read-only?
if ( lv_is_cache ( external_lv ) ) // read-only corigin of cache LV
seg_lv ( first_seg ( external_lv ) , 0 ) - > status & = ~ LVM_WRITE ;
*/
2013-02-21 13:25:44 +04:00
}
return 1 ;
}
int detach_thin_external_origin ( struct lv_segment * seg )
{
if ( seg - > external_lv ) {
if ( ! lv_is_external_origin ( seg - > external_lv ) ) {
log_error ( INTERNAL_ERROR " Inconsitent external origin. " ) ;
return 0 ;
}
if ( ! remove_seg_from_segs_using_this_lv ( seg - > external_lv , seg ) )
return_0 ;
seg - > external_lv - > external_count - - ;
seg - > external_lv = NULL ;
}
return 1 ;
}
2013-11-29 18:51:28 +04:00
int lv_is_merging_thin_snapshot ( const struct logical_volume * lv )
{
2014-02-22 04:26:01 +04:00
struct lv_segment * seg = first_seg ( lv ) ;
return ( seg & & seg - > status & MERGING ) ? 1 : 0 ;
2013-11-29 18:51:28 +04:00
}
2012-01-25 12:55:19 +04:00
/*
* Check whether pool has some message queued for LV or for device_id
* When LV is NULL and device_id is 0 it just checks for any message .
*/
int pool_has_message ( const struct lv_segment * seg ,
const struct logical_volume * lv , uint32_t device_id )
{
const struct lv_thin_message * tmsg ;
if ( ! seg_is_thin_pool ( seg ) ) {
2017-06-27 09:28:36 +03:00
log_error ( INTERNAL_ERROR " LV %s is not pool. " , display_lvname ( seg - > lv ) ) ;
2012-01-25 12:55:19 +04:00
return 0 ;
}
if ( ! lv & & ! device_id )
2014-03-12 01:50:23 +04:00
return ! dm_list_empty ( & seg - > thin_messages ) ;
2012-01-25 12:55:19 +04:00
dm_list_iterate_items ( tmsg , & seg - > thin_messages ) {
switch ( tmsg - > type ) {
case DM_THIN_MESSAGE_CREATE_SNAP :
case DM_THIN_MESSAGE_CREATE_THIN :
if ( tmsg - > u . lv = = lv )
return 1 ;
break ;
case DM_THIN_MESSAGE_DELETE :
if ( tmsg - > u . delete_id = = device_id )
return 1 ;
break ;
default :
break ;
}
}
return 0 ;
}
2013-02-05 19:49:09 +04:00
int pool_is_active ( const struct logical_volume * lv )
2013-02-05 13:52:39 +04:00
{
struct lvinfo info ;
const struct seg_list * sl ;
2013-02-05 19:49:09 +04:00
if ( ! lv_is_thin_pool ( lv ) ) {
2016-06-23 14:04:37 +03:00
log_error ( INTERNAL_ERROR " pool_is_active called with non-pool volume %s. " ,
display_lvname ( lv ) ) ;
2013-02-05 13:52:39 +04:00
return 0 ;
}
/* On clustered VG, query every related thin pool volume */
if ( vg_is_clustered ( lv - > vg ) ) {
if ( lv_is_active ( lv ) )
return 1 ;
dm_list_iterate_items ( sl , & lv - > segs_using_this_lv )
if ( lv_is_active ( sl - > seg - > lv ) ) {
2016-06-23 14:04:37 +03:00
log_debug_activation ( " Pool's thin volume %s is active. " ,
display_lvname ( sl - > seg - > lv ) ) ;
2013-02-05 13:52:39 +04:00
return 1 ;
}
} else if ( lv_info ( lv - > vg - > cmd , lv , 1 , & info , 0 , 0 ) & & info . exists )
return 1 ; /* Non clustered VG - just checks for '-tpool' */
return 0 ;
}
2014-01-23 14:47:10 +04:00
int thin_pool_feature_supported ( const struct logical_volume * lv , int feature )
2013-06-11 14:32:01 +04:00
{
static unsigned attr = 0U ;
struct lv_segment * seg ;
if ( ! lv_is_thin_pool ( lv ) ) {
2017-06-27 09:28:36 +03:00
log_error ( INTERNAL_ERROR " LV %s is not thin pool. " , display_lvname ( lv ) ) ;
2013-06-11 14:32:01 +04:00
return 0 ;
}
seg = first_seg ( lv ) ;
if ( ( attr = = 0U ) & & activation ( ) & & seg - > segtype & &
seg - > segtype - > ops - > target_present & &
! seg - > segtype - > ops - > target_present ( lv - > vg - > cmd , NULL , & attr ) ) {
log_error ( " %s: Required device-mapper target(s) not "
2017-06-27 09:28:36 +03:00
" detected in your kernel. " , lvseg_name ( seg ) ) ;
2013-06-11 14:32:01 +04:00
return 0 ;
}
2014-01-23 14:47:10 +04:00
return ( attr & feature ) ? 1 : 0 ;
2013-06-11 14:32:01 +04:00
}
2016-09-16 22:50:14 +03:00
int pool_metadata_min_threshold ( const struct lv_segment * pool_seg )
{
/*
* Hardcoded minimal requirment for thin pool target .
*
* In the metadata LV there should be minimum from either 4 MiB of free space
* or at least 25 % of free space , which applies when the size of thin pool ' s
* metadata is less then 16 MiB .
*/
const dm_percent_t meta_min = DM_PERCENT_1 * 25 ;
dm_percent_t meta_free = dm_make_percent ( ( ( 4096 * 1024 ) > > SECTOR_SHIFT ) ,
pool_seg - > metadata_lv - > size ) ;
if ( meta_min < meta_free )
meta_free = meta_min ;
return DM_PERCENT_100 - meta_free ;
}
2012-02-08 17:05:38 +04:00
int pool_below_threshold ( const struct lv_segment * pool_seg )
{
2017-06-24 17:22:36 +03:00
struct cmd_context * cmd = pool_seg - > lv - > vg - > cmd ;
2014-06-09 14:08:27 +04:00
dm_percent_t percent ;
2016-09-16 22:50:14 +03:00
dm_percent_t min_threshold = pool_metadata_min_threshold ( pool_seg ) ;
2015-10-29 14:01:31 +03:00
dm_percent_t threshold = DM_PERCENT_1 *
2017-06-24 17:22:36 +03:00
find_config_tree_int ( cmd , activation_thin_pool_autoextend_threshold_CFG ,
2013-06-27 13:22:02 +04:00
lv_config_profile ( pool_seg - > lv ) ) ;
2012-02-08 17:05:38 +04:00
/* Data */
2012-02-13 01:42:43 +04:00
if ( ! lv_thin_pool_percent ( pool_seg - > lv , 0 , & percent ) )
return_0 ;
2012-02-08 17:05:38 +04:00
2016-09-16 22:50:14 +03:00
if ( percent > threshold | | percent > = DM_PERCENT_100 ) {
2015-10-09 22:35:05 +03:00
log_debug ( " Threshold configured for free data space in "
2017-06-24 17:22:36 +03:00
" thin pool %s has been reached (%s%% >= %s%%). " ,
2015-10-09 22:35:05 +03:00
display_lvname ( pool_seg - > lv ) ,
2017-06-24 17:22:36 +03:00
display_percent ( cmd , percent ) ,
display_percent ( cmd , threshold ) ) ;
2014-01-29 17:26:06 +04:00
return 0 ;
2015-10-09 22:35:05 +03:00
}
2012-02-08 17:05:38 +04:00
/* Metadata */
2012-02-13 01:42:43 +04:00
if ( ! lv_thin_pool_percent ( pool_seg - > lv , 1 , & percent ) )
return_0 ;
2012-02-08 17:05:38 +04:00
2016-09-16 22:50:14 +03:00
if ( percent > = min_threshold ) {
log_warn ( " WARNING: Remaining free space in metadata of thin pool %s "
2017-06-24 17:22:36 +03:00
" is too low (%s%% >= %s%%). "
2016-09-16 22:50:14 +03:00
" Resize is recommended. " ,
display_lvname ( pool_seg - > lv ) ,
2017-06-24 17:22:36 +03:00
display_percent ( cmd , percent ) ,
display_percent ( cmd , min_threshold ) ) ;
2016-09-16 22:50:14 +03:00
return 0 ;
}
2015-10-25 21:13:53 +03:00
if ( percent > threshold ) {
2015-10-09 22:35:05 +03:00
log_debug ( " Threshold configured for free metadata space in "
2017-06-24 17:22:36 +03:00
" thin pool %s has been reached (%s%% > %s%%). " ,
2015-10-09 22:35:05 +03:00
display_lvname ( pool_seg - > lv ) ,
2017-06-24 17:22:36 +03:00
display_percent ( cmd , percent ) ,
display_percent ( cmd , threshold ) ) ;
2014-01-29 17:26:06 +04:00
return 0 ;
2015-10-09 22:35:05 +03:00
}
2012-02-08 17:05:38 +04:00
return 1 ;
}
2015-07-03 16:31:31 +03:00
/*
* Detect overprovisioning and check lvm2 is configured for auto resize .
*
* If passed LV is thin volume / pool , check first only this one for overprovisiong .
* Lots of test combined together .
* Test is not detecting status of dmeventd , too complex for now . . .
*/
int pool_check_overprovisioning ( const struct logical_volume * lv )
{
const struct lv_list * lvl ;
const struct seg_list * sl ;
const struct logical_volume * pool_lv = NULL ;
struct cmd_context * cmd = lv - > vg - > cmd ;
const char * txt = " " ;
uint64_t thinsum = 0 , poolsum = 0 , sz = ~ 0 ;
int threshold , max_threshold = 0 ;
int percent , min_percent = 100 ;
int more_pools = 0 ;
/* When passed thin volume, check related pool first */
if ( lv_is_thin_volume ( lv ) )
pool_lv = first_seg ( lv ) - > pool_lv ;
else if ( lv_is_thin_pool ( lv ) )
pool_lv = lv ;
if ( pool_lv ) {
poolsum + = pool_lv - > size ;
dm_list_iterate_items ( sl , & pool_lv - > segs_using_this_lv )
thinsum + = sl - > seg - > lv - > size ;
if ( thinsum < = poolsum )
return 1 ; /* All thins fit into this thin pool */
}
/* Sum all thins and all thin pools in VG */
dm_list_iterate_items ( lvl , & lv - > vg - > lvs ) {
if ( ! lv_is_thin_pool ( lvl - > lv ) )
continue ;
threshold = find_config_tree_int ( cmd , activation_thin_pool_autoextend_threshold_CFG ,
lv_config_profile ( lvl - > lv ) ) ;
percent = find_config_tree_int ( cmd , activation_thin_pool_autoextend_percent_CFG ,
lv_config_profile ( lvl - > lv ) ) ;
if ( threshold > max_threshold )
max_threshold = threshold ;
if ( percent < min_percent )
min_percent = percent ;
if ( lvl - > lv = = pool_lv )
continue ; /* Skip iteration for already checked thin pool */
more_pools + + ;
poolsum + = lvl - > lv - > size ;
dm_list_iterate_items ( sl , & lvl - > lv - > segs_using_this_lv )
thinsum + = sl - > seg - > lv - > size ;
}
if ( thinsum < = poolsum )
return 1 ; /* All fits for all pools */
if ( ( sz = vg_size ( lv - > vg ) ) < thinsum )
/* Thin sum size is above VG size */
txt = " and the size of whole volume group " ;
else if ( ( sz = vg_free ( lv - > vg ) ) < thinsum )
/* Thin sum size is more then free space in a VG */
txt = ! sz ? " " : " and the amount of free space in volume group " ;
else if ( ( max_threshold > 99 ) | | ! min_percent )
/* There is some free space in VG, but it is not configured
* for growing - threshold is 100 % or percent is 0 % */
sz = poolsum ;
2015-07-13 12:08:49 +03:00
else
2016-02-23 14:18:48 +03:00
sz = UINT64_C ( ~ 0 ) ; /* No warning */
2015-07-03 16:31:31 +03:00
2016-02-23 14:18:48 +03:00
if ( sz ! = UINT64_C ( ~ 0 ) ) {
2015-07-03 16:31:31 +03:00
log_warn ( " WARNING: Sum of all thin volume sizes (%s) exceeds the "
" size of thin pool%s%s%s (%s)! " ,
display_size ( cmd , thinsum ) ,
more_pools ? " " : " " ,
more_pools ? " s " : display_lvname ( pool_lv ) ,
txt ,
( sz > 0 ) ? display_size ( cmd , sz ) : " no free space in volume group " ) ;
if ( max_threshold > 99 )
2015-07-06 18:15:11 +03:00
log_print_unless_silent ( " For thin pool auto extension activation/thin_pool_autoextend_threshold should be below 100. " ) ;
2015-07-03 16:31:31 +03:00
if ( ! min_percent )
log_print_unless_silent ( " For thin pool auto extension activation/thin_pool_autoextend_percent should be above 0. " ) ;
}
return 1 ;
}
2014-01-29 17:27:13 +04:00
/*
* Validate given external origin could be used with thin pool
*/
int pool_supports_external_origin ( const struct lv_segment * pool_seg , const struct logical_volume * external_lv )
{
uint32_t csize = pool_seg - > chunk_size ;
2015-06-18 15:38:57 +03:00
if ( ( ( external_lv - > size < csize ) | | ( external_lv - > size % csize ) ) & &
! thin_pool_feature_supported ( pool_seg - > lv , THIN_FEATURE_EXTERNAL_ORIGIN_EXTEND ) ) {
log_error ( " Can't use \" %s \" as external origin with \" %s \" pool. "
2014-01-29 17:27:13 +04:00
" Size %s is not a multiple of pool's chunk size %s. " ,
2015-06-18 15:38:57 +03:00
display_lvname ( external_lv ) , display_lvname ( pool_seg - > lv ) ,
2014-01-29 17:27:13 +04:00
display_size ( external_lv - > vg - > cmd , external_lv - > size ) ,
display_size ( external_lv - > vg - > cmd , csize ) ) ;
return 0 ;
}
return 1 ;
}
2014-01-08 13:27:17 +04:00
struct logical_volume * find_pool_lv ( const struct logical_volume * lv )
2013-06-11 14:32:01 +04:00
{
struct lv_segment * seg ;
2013-06-15 00:02:12 +04:00
if ( ! ( seg = first_seg ( lv ) ) ) {
2017-06-27 09:28:36 +03:00
log_error ( " LV %s has no segment. " , display_lvname ( lv ) ) ;
2013-06-15 00:02:12 +04:00
return NULL ;
}
if ( ! ( seg = find_pool_seg ( seg ) ) )
2013-06-11 14:32:01 +04:00
return_NULL ;
return seg - > lv ;
}
2011-10-03 22:39:17 +04:00
/*
* Find a free device_id for given thin_pool segment .
*
* \ return
* Free device id , or 0 if free device_id is not found .
*
* FIXME : Improve naive search and keep the value cached
* and updated during VG lifetime ( so no const for lv_segment )
*/
uint32_t get_free_pool_device_id ( struct lv_segment * thin_pool_seg )
{
2011-11-03 18:36:40 +04:00
uint32_t max_id = 0 ;
struct seg_list * sl ;
2011-10-03 22:39:17 +04:00
if ( ! seg_is_thin_pool ( thin_pool_seg ) ) {
2011-10-31 02:00:57 +04:00
log_error ( INTERNAL_ERROR
" Segment in %s is not a thin pool segment. " ,
2017-06-27 09:28:36 +03:00
display_lvname ( thin_pool_seg - > lv ) ) ;
2011-10-03 22:39:17 +04:00
return 0 ;
}
2011-11-03 18:36:40 +04:00
dm_list_iterate_items ( sl , & thin_pool_seg - > lv - > segs_using_this_lv )
if ( sl - > seg - > device_id > max_id )
max_id = sl - > seg - > device_id ;
2011-10-03 22:39:17 +04:00
2011-10-31 02:00:57 +04:00
if ( + + max_id > DM_THIN_MAX_DEVICE_ID ) {
2012-01-23 21:46:31 +04:00
/* FIXME Find empty holes instead of aborting! */
2011-10-31 02:00:57 +04:00
log_error ( " Cannot find free device_id. " ) ;
2011-10-03 22:39:17 +04:00
return 0 ;
}
2013-01-08 02:30:29 +04:00
log_debug_metadata ( " Found free pool device_id %u. " , max_id ) ;
2011-10-03 22:39:17 +04:00
return max_id ;
}
2011-10-22 20:44:23 +04:00
2014-08-26 14:10:29 +04:00
static int _check_pool_create ( const struct logical_volume * lv )
{
const struct lv_thin_message * lmsg ;
struct lvinfo info ;
dm_list_iterate_items ( lmsg , & first_seg ( lv ) - > thin_messages ) {
if ( lmsg - > type ! = DM_THIN_MESSAGE_CREATE_THIN )
continue ;
/* When creating new thin LV, check for size would be needed */
if ( ! lv_info ( lv - > vg - > cmd , lv , 1 , & info , 0 , 0 ) | |
! info . exists ) {
log_error ( " Pool %s needs to be locally active for threshold check. " ,
display_lvname ( lv ) ) ;
return 0 ;
}
if ( ! pool_below_threshold ( first_seg ( lv ) ) ) {
log_error ( " Free space in pool %s is above threshold, new volumes are not allowed. " ,
display_lvname ( lv ) ) ;
return 0 ;
}
break ;
}
return 1 ;
}
2011-11-03 18:53:58 +04:00
int update_pool_lv ( struct logical_volume * lv , int activate )
{
2016-10-03 14:04:16 +03:00
int monitored ;
2014-08-26 14:10:29 +04:00
int ret = 1 ;
2012-03-23 13:58:04 +04:00
2011-11-03 18:53:58 +04:00
if ( ! lv_is_thin_pool ( lv ) ) {
2017-06-27 09:28:36 +03:00
log_error ( INTERNAL_ERROR " Updated LV %s is not pool. " , display_lvname ( lv ) ) ;
2011-11-03 18:53:58 +04:00
return 0 ;
}
2012-01-25 13:17:15 +04:00
if ( dm_list_empty ( & ( first_seg ( lv ) - > thin_messages ) ) )
return 1 ; /* No messages */
2011-11-03 18:53:58 +04:00
if ( activate ) {
2011-11-10 16:43:05 +04:00
/* If the pool is not active, do activate deactivate */
2016-06-23 00:21:27 +03:00
monitored = dmeventd_monitor_mode ( ) ;
init_dmeventd_monitor ( DMEVENTD_MONITOR_IGNORE ) ;
2011-11-03 19:58:20 +04:00
if ( ! lv_is_active ( lv ) ) {
2016-06-23 00:21:27 +03:00
/*
* FIXME :
* Rewrite activation code to handle whole tree of thinLVs
* as this version has major problem when it does not know
* which Node has pool active .
*/
2014-08-26 14:10:29 +04:00
if ( ! activate_lv_excl ( lv - > vg - > cmd , lv ) ) {
init_dmeventd_monitor ( monitored ) ;
2011-11-03 19:58:20 +04:00
return_0 ;
2014-08-26 14:10:29 +04:00
}
if ( ! lv_is_active ( lv ) ) {
init_dmeventd_monitor ( monitored ) ;
log_error ( " Cannot activate thin pool %s, perhaps skipped in lvm.conf volume_list? " ,
display_lvname ( lv ) ) ;
return 0 ;
}
thin: move pool messaging from resume to suspend
Existing messaging intarface for thin-pool has a few 'weak' points:
* Message were posted with each 'resume' operation, thus not allowing
activation of thin-pool with the existing state.
* Acceleration skipped suspend step has not worked in cluster,
since clvmd resumes only nodes which are suspended (have proper lock
state).
* Resume may fail and code is not really designed to 'fail' in this
phase (generic rule here is resume DOES NOT fail unless something serious
is wrong and lvm2 tool usually doesn't handle recovery path in this case.)
* Full thin-pool suspend happened, when taken a thin-volume snapshot.
With this patch the new method relocates message passing into suspend
state.
This has a few drawbacks with current API, but overal it performs
better and gives are more posibilities to deal with errors.
Patch introduces a new logic for 'origin-only' suspend of thin-pool and
this also relates to thin-volume when taking snapshot.
When suspend_origin_only operation is invoked on a pool with
queued messages then only those messages are posted to thin-pool and
actual suspend of thin pool and data and metadata volume is skipped.
This makes taking a snapshot of thin-volume lighter operation and
avoids blocking of other unrelated active thin volumes.
Also fail now happens in 'suspend' state where the 'Fail' is more expected
and it is better handled through error paths.
Activation of thin-pool is now not sending any message and leaves upto a tool
to decided later how to finish unfinished double-commit transaction.
Problem which needs some API improvements relates to the lvm2 tree
construction. For the suspend tree we do not add target table line
into the tree, but only a device is inserted into a tree.
Current mechanism to attach messages for thin-pool requires the libdm
to know about thin-pool target, so lvm2 currently takes assumption, node
is really a thin-pool and fills in the table line for this node (which
should be ensured by the PRELOAD phase, but it's a misuse of internal API)
we would possibly need to be able to attach message to 'any' node.
Other thing to notice - current messaging interface in thin-pool
target requires to suspend thin volume origin first and then send
a create message, but this could not have any 'nice' solution on lvm2
side and IMHO we should introduce something like 'create_after_resume'
message.
Patch also changes the moment, where lvm2 transaction id is increased.
Now it happens only after successful finish of kernel transaction id
change. This change was needed to handle properly activation of pool,
which is in the middle of unfinished transaction, and also this corrects
usage of thin-pool by external apps like Docker.
2015-07-01 14:31:37 +03:00
} else
activate = 0 ; /* Was already active */
if ( ! ( ret = _check_pool_create ( lv ) ) )
stack ; /* Safety guard, needs local presence of thin-pool target */
2016-02-19 13:18:41 +03:00
else {
if ( ! ( ret = suspend_lv_origin ( lv - > vg - > cmd , lv ) ) )
/* Send messages */
log_error ( " Failed to suspend %s with queued messages. " , display_lvname ( lv ) ) ;
/* Even failing suspend needs resume */
if ( ! resume_lv_origin ( lv - > vg - > cmd , lv ) ) {
log_error ( " Failed to resume %s. " , display_lvname ( lv ) ) ;
ret = 0 ;
}
}
thin: move pool messaging from resume to suspend
Existing messaging intarface for thin-pool has a few 'weak' points:
* Message were posted with each 'resume' operation, thus not allowing
activation of thin-pool with the existing state.
* Acceleration skipped suspend step has not worked in cluster,
since clvmd resumes only nodes which are suspended (have proper lock
state).
* Resume may fail and code is not really designed to 'fail' in this
phase (generic rule here is resume DOES NOT fail unless something serious
is wrong and lvm2 tool usually doesn't handle recovery path in this case.)
* Full thin-pool suspend happened, when taken a thin-volume snapshot.
With this patch the new method relocates message passing into suspend
state.
This has a few drawbacks with current API, but overal it performs
better and gives are more posibilities to deal with errors.
Patch introduces a new logic for 'origin-only' suspend of thin-pool and
this also relates to thin-volume when taking snapshot.
When suspend_origin_only operation is invoked on a pool with
queued messages then only those messages are posted to thin-pool and
actual suspend of thin pool and data and metadata volume is skipped.
This makes taking a snapshot of thin-volume lighter operation and
avoids blocking of other unrelated active thin volumes.
Also fail now happens in 'suspend' state where the 'Fail' is more expected
and it is better handled through error paths.
Activation of thin-pool is now not sending any message and leaves upto a tool
to decided later how to finish unfinished double-commit transaction.
Problem which needs some API improvements relates to the lvm2 tree
construction. For the suspend tree we do not add target table line
into the tree, but only a device is inserted into a tree.
Current mechanism to attach messages for thin-pool requires the libdm
to know about thin-pool target, so lvm2 currently takes assumption, node
is really a thin-pool and fills in the table line for this node (which
should be ensured by the PRELOAD phase, but it's a misuse of internal API)
we would possibly need to be able to attach message to 'any' node.
Other thing to notice - current messaging interface in thin-pool
target requires to suspend thin volume origin first and then send
a create message, but this could not have any 'nice' solution on lvm2
side and IMHO we should introduce something like 'create_after_resume'
message.
Patch also changes the moment, where lvm2 transaction id is increased.
Now it happens only after successful finish of kernel transaction id
change. This change was needed to handle properly activation of pool,
which is in the middle of unfinished transaction, and also this corrects
usage of thin-pool by external apps like Docker.
2015-07-01 14:31:37 +03:00
2016-06-23 00:21:27 +03:00
if ( activate & &
! deactivate_lv ( lv - > vg - > cmd , lv ) ) {
log_error ( " Failed to deactivate %s. " , display_lvname ( lv ) ) ;
ret = 0 ;
2011-11-03 18:53:58 +04:00
}
2016-06-23 00:21:27 +03:00
init_dmeventd_monitor ( monitored ) ;
thin: move pool messaging from resume to suspend
Existing messaging intarface for thin-pool has a few 'weak' points:
* Message were posted with each 'resume' operation, thus not allowing
activation of thin-pool with the existing state.
* Acceleration skipped suspend step has not worked in cluster,
since clvmd resumes only nodes which are suspended (have proper lock
state).
* Resume may fail and code is not really designed to 'fail' in this
phase (generic rule here is resume DOES NOT fail unless something serious
is wrong and lvm2 tool usually doesn't handle recovery path in this case.)
* Full thin-pool suspend happened, when taken a thin-volume snapshot.
With this patch the new method relocates message passing into suspend
state.
This has a few drawbacks with current API, but overal it performs
better and gives are more posibilities to deal with errors.
Patch introduces a new logic for 'origin-only' suspend of thin-pool and
this also relates to thin-volume when taking snapshot.
When suspend_origin_only operation is invoked on a pool with
queued messages then only those messages are posted to thin-pool and
actual suspend of thin pool and data and metadata volume is skipped.
This makes taking a snapshot of thin-volume lighter operation and
avoids blocking of other unrelated active thin volumes.
Also fail now happens in 'suspend' state where the 'Fail' is more expected
and it is better handled through error paths.
Activation of thin-pool is now not sending any message and leaves upto a tool
to decided later how to finish unfinished double-commit transaction.
Problem which needs some API improvements relates to the lvm2 tree
construction. For the suspend tree we do not add target table line
into the tree, but only a device is inserted into a tree.
Current mechanism to attach messages for thin-pool requires the libdm
to know about thin-pool target, so lvm2 currently takes assumption, node
is really a thin-pool and fills in the table line for this node (which
should be ensured by the PRELOAD phase, but it's a misuse of internal API)
we would possibly need to be able to attach message to 'any' node.
Other thing to notice - current messaging interface in thin-pool
target requires to suspend thin volume origin first and then send
a create message, but this could not have any 'nice' solution on lvm2
side and IMHO we should introduce something like 'create_after_resume'
message.
Patch also changes the moment, where lvm2 transaction id is increased.
Now it happens only after successful finish of kernel transaction id
change. This change was needed to handle properly activation of pool,
which is in the middle of unfinished transaction, and also this corrects
usage of thin-pool by external apps like Docker.
2015-07-01 14:31:37 +03:00
/* Unlock memory if possible */
memlock_unlock ( lv - > vg - > cmd ) ;
if ( ! ret )
return_0 ;
2011-11-03 18:53:58 +04:00
}
2012-01-25 13:17:15 +04:00
dm_list_init ( & ( first_seg ( lv ) - > thin_messages ) ) ;
2011-11-03 18:53:58 +04:00
2012-01-25 13:17:15 +04:00
if ( ! vg_write ( lv - > vg ) | | ! vg_commit ( lv - > vg ) )
return_0 ;
2011-11-03 18:53:58 +04:00
2014-08-26 14:10:29 +04:00
return ret ;
2011-11-03 18:53:58 +04:00
}
2012-06-28 16:47:34 +04:00
2017-03-09 17:02:07 +03:00
static uint64_t _estimate_size ( uint32_t data_extents , uint32_t extent_size , uint64_t size )
2014-11-26 11:27:40 +03:00
{
/*
* nr_pool_blocks = data_size / metadata_size
* chunk_size = nr_pool_blocks * 64 b / sector_size
*/
2017-03-09 17:02:07 +03:00
return ( uint64_t ) data_extents * extent_size / ( size * ( SECTOR_SIZE / UINT64_C ( 64 ) ) ) ;
}
/* Estimate thin pool metadata size from data size and chunks size (in sector units) */
static uint64_t _estimate_metadata_size ( uint32_t data_extents , uint32_t extent_size , uint32_t chunk_size )
{
return _estimate_size ( data_extents , extent_size , chunk_size ) ;
}
2017-06-08 11:46:22 +03:00
/* Estimate maximal supportable thin pool data size for given chunk_size */
static uint64_t _estimate_max_data_size ( uint32_t chunk_size )
{
return chunk_size * ( DEFAULT_THIN_POOL_MAX_METADATA_SIZE * 2 ) * SECTOR_SIZE / UINT64_C ( 64 ) ;
}
2017-03-09 17:02:07 +03:00
/* Estimate thin pool chunk size from data and metadata size (in sector units) */
static uint32_t _estimate_chunk_size ( uint32_t data_extents , uint32_t extent_size ,
uint64_t metadata_size , int attr )
{
uint32_t chunk_size = _estimate_size ( data_extents , extent_size , metadata_size ) ;
2014-11-26 11:27:40 +03:00
if ( attr & THIN_FEATURE_BLOCK_SIZE ) {
/* Round up to 64KB */
chunk_size + = DM_THIN_MIN_DATA_BLOCK_SIZE - 1 ;
2017-03-09 17:02:07 +03:00
chunk_size & = ~ ( uint32_t ) ( DM_THIN_MIN_DATA_BLOCK_SIZE - 1 ) ;
2014-11-26 11:27:40 +03:00
} else {
/* Round up to nearest power of 2 */
chunk_size - - ;
chunk_size | = chunk_size > > 1 ;
chunk_size | = chunk_size > > 2 ;
chunk_size | = chunk_size > > 4 ;
chunk_size | = chunk_size > > 8 ;
chunk_size | = chunk_size > > 16 ;
chunk_size + + ;
}
2017-03-09 17:02:07 +03:00
if ( chunk_size < DM_THIN_MIN_DATA_BLOCK_SIZE )
chunk_size = DM_THIN_MIN_DATA_BLOCK_SIZE ;
else if ( chunk_size > DM_THIN_MAX_DATA_BLOCK_SIZE )
chunk_size = DM_THIN_MAX_DATA_BLOCK_SIZE ;
2014-11-26 11:27:40 +03:00
return chunk_size ;
}
2017-03-01 13:23:26 +03:00
int get_default_allocation_thin_pool_chunk_size ( struct cmd_context * cmd , struct profile * profile ,
uint32_t * chunk_size , int * chunk_size_calc_method )
{
const char * str ;
if ( ! ( str = find_config_tree_str ( cmd , allocation_thin_pool_chunk_size_policy_CFG , profile ) ) ) {
log_error ( INTERNAL_ERROR " Cannot find configuration. " ) ;
return 0 ;
}
if ( ! strcasecmp ( str , " generic " ) ) {
* chunk_size = DEFAULT_THIN_POOL_CHUNK_SIZE * 2 ;
* chunk_size_calc_method = THIN_CHUNK_SIZE_CALC_METHOD_GENERIC ;
} else if ( ! strcasecmp ( str , " performance " ) ) {
* chunk_size = DEFAULT_THIN_POOL_CHUNK_SIZE_PERFORMANCE * 2 ;
* chunk_size_calc_method = THIN_CHUNK_SIZE_CALC_METHOD_PERFORMANCE ;
} else {
log_error ( " Thin pool chunk size calculation policy \" %s \" is unrecognised. " , str ) ;
return 0 ;
}
return 1 ;
}
2017-03-09 18:24:28 +03:00
int update_thin_pool_params ( struct cmd_context * cmd ,
struct profile * profile ,
uint32_t extent_size ,
const struct segment_type * segtype ,
unsigned attr ,
2014-10-30 15:04:06 +03:00
uint32_t pool_data_extents ,
uint32_t * pool_metadata_extents ,
2014-07-23 00:20:18 +04:00
int * chunk_size_calc_method , uint32_t * chunk_size ,
2017-03-03 22:46:13 +03:00
thin_discards_t * discards , thin_zero_t * zero_new_blocks )
2013-08-06 13:42:40 +04:00
{
2014-10-30 15:04:06 +03:00
uint64_t pool_metadata_size = ( uint64_t ) * pool_metadata_extents * extent_size ;
2017-03-09 18:24:28 +03:00
uint32_t estimate_chunk_size ;
2017-06-08 11:46:22 +03:00
uint64_t max_pool_data_size ;
2013-09-25 18:00:52 +04:00
const char * str ;
2013-08-06 13:42:40 +04:00
2017-03-09 18:24:28 +03:00
if ( ! * chunk_size & &
find_config_tree_node ( cmd , allocation_thin_pool_chunk_size_CFG , profile ) )
* chunk_size = find_config_tree_int ( cmd , allocation_thin_pool_chunk_size_CFG , profile ) * 2 ;
2013-08-06 18:28:12 +04:00
2017-03-09 18:24:28 +03:00
if ( * chunk_size & & ! ( attr & THIN_FEATURE_BLOCK_SIZE ) & &
! is_power_of_2 ( * chunk_size ) ) {
log_error ( " Chunk size must be a power of 2 for this thin target version. " ) ;
return 0 ;
}
2013-08-06 13:42:40 +04:00
2017-03-09 18:24:28 +03:00
if ( ( * discards = = THIN_DISCARDS_UNSELECTED ) & &
2017-03-03 22:46:13 +03:00
find_config_tree_node ( cmd , allocation_thin_pool_discards_CFG , profile ) ) {
2014-05-07 12:52:00 +04:00
if ( ! ( str = find_config_tree_str ( cmd , allocation_thin_pool_discards_CFG , profile ) ) ) {
2014-05-13 12:33:17 +04:00
log_error ( INTERNAL_ERROR " Could not find configuration. " ) ;
2014-05-07 12:52:00 +04:00
return 0 ;
}
2014-11-08 03:28:38 +03:00
if ( ! set_pool_discards ( discards , str ) )
2013-08-06 13:42:40 +04:00
return_0 ;
}
2017-03-16 02:34:31 +03:00
if ( ( * zero_new_blocks = = THIN_ZERO_UNSELECTED ) & &
2017-03-03 22:46:13 +03:00
find_config_tree_node ( cmd , allocation_thin_pool_zero_CFG , profile ) )
* zero_new_blocks = find_config_tree_bool ( cmd , allocation_thin_pool_zero_CFG , profile )
? THIN_ZERO_YES : THIN_ZERO_NO ;
2013-08-06 13:42:40 +04:00
2014-10-30 15:04:06 +03:00
if ( ! pool_metadata_size ) {
2017-03-09 17:02:07 +03:00
if ( ! * chunk_size ) {
if ( ! get_default_allocation_thin_pool_chunk_size ( cmd , profile ,
chunk_size ,
chunk_size_calc_method ) )
return_0 ;
pool_metadata_size = _estimate_metadata_size ( pool_data_extents , extent_size , * chunk_size ) ;
/* Check if we should eventually use bigger chunk size */
2014-10-30 15:04:06 +03:00
while ( ( pool_metadata_size >
2017-06-09 22:29:34 +03:00
( DEFAULT_THIN_POOL_OPTIMAL_METADATA_SIZE * 2 ) ) & &
2013-03-11 15:37:09 +04:00
( * chunk_size < DM_THIN_MAX_DATA_BLOCK_SIZE ) ) {
* chunk_size < < = 1 ;
2014-10-30 15:04:06 +03:00
pool_metadata_size > > = 1 ;
2013-03-11 15:37:09 +04:00
}
log_verbose ( " Setting chunk size to %s. " ,
display_size ( cmd , * chunk_size ) ) ;
2017-03-09 17:02:07 +03:00
} else {
pool_metadata_size = _estimate_metadata_size ( pool_data_extents , extent_size , * chunk_size ) ;
if ( pool_metadata_size > ( DEFAULT_THIN_POOL_MAX_METADATA_SIZE * 2 ) ) {
/* Suggest bigger chunk size */
estimate_chunk_size =
_estimate_chunk_size ( pool_data_extents , extent_size ,
( DEFAULT_THIN_POOL_MAX_METADATA_SIZE * 2 ) , attr ) ;
log_warn ( " WARNING: Chunk size is too small for pool, suggested minimum is %s. " ,
display_size ( cmd , estimate_chunk_size ) ) ;
}
2013-03-11 15:37:09 +04:00
}
2014-10-30 15:04:06 +03:00
/* Round up to extent size silently */
if ( pool_metadata_size % extent_size )
pool_metadata_size + = extent_size - pool_metadata_size % extent_size ;
2013-03-11 15:37:09 +04:00
} else {
2017-03-09 17:02:07 +03:00
estimate_chunk_size = _estimate_chunk_size ( pool_data_extents , extent_size ,
pool_metadata_size , attr ) ;
2014-07-23 00:20:18 +04:00
2013-03-11 15:37:09 +04:00
/* Check to eventually use bigger chunk size */
2017-03-09 18:24:28 +03:00
if ( ! * chunk_size ) {
2013-03-11 15:37:09 +04:00
* chunk_size = estimate_chunk_size ;
2014-07-23 00:20:18 +04:00
log_verbose ( " Setting chunk size %s. " , display_size ( cmd , * chunk_size ) ) ;
2013-03-11 15:37:09 +04:00
} else if ( * chunk_size < estimate_chunk_size ) {
/* Suggest bigger chunk size */
log_warn ( " WARNING: Chunk size is smaller then suggested minimum size %s. " ,
display_size ( cmd , estimate_chunk_size ) ) ;
}
}
2017-06-08 11:46:22 +03:00
max_pool_data_size = _estimate_max_data_size ( * chunk_size ) ;
if ( ( max_pool_data_size / extent_size ) < pool_data_extents ) {
log_error ( " Selected chunk size %s cannot address more then %s of thin pool data space. " ,
display_size ( cmd , * chunk_size ) , display_size ( cmd , max_pool_data_size ) ) ;
return 0 ;
}
2017-06-09 22:31:02 +03:00
log_print_unless_silent ( " Thin pool volume with chunk size %s can address at most %s of data. " ,
2017-06-08 11:46:22 +03:00
display_size ( cmd , * chunk_size ) , display_size ( cmd , max_pool_data_size ) ) ;
2017-03-09 18:24:28 +03:00
if ( ! validate_thin_pool_chunk_size ( cmd , * chunk_size ) )
return_0 ;
2014-10-30 15:04:06 +03:00
if ( pool_metadata_size > ( 2 * DEFAULT_THIN_POOL_MAX_METADATA_SIZE ) ) {
pool_metadata_size = 2 * DEFAULT_THIN_POOL_MAX_METADATA_SIZE ;
2017-03-09 18:24:28 +03:00
if ( * pool_metadata_extents )
2013-03-11 15:37:09 +04:00
log_warn ( " WARNING: Maximum supported pool metadata size is %s. " ,
2014-10-30 15:04:06 +03:00
display_size ( cmd , pool_metadata_size ) ) ;
} else if ( pool_metadata_size < ( 2 * DEFAULT_THIN_POOL_MIN_METADATA_SIZE ) ) {
pool_metadata_size = 2 * DEFAULT_THIN_POOL_MIN_METADATA_SIZE ;
2017-03-09 18:24:28 +03:00
if ( * pool_metadata_extents )
2013-03-11 15:37:09 +04:00
log_warn ( " WARNING: Minimum supported pool metadata size is %s. " ,
2014-10-30 15:04:06 +03:00
display_size ( cmd , pool_metadata_size ) ) ;
2013-03-11 15:37:09 +04:00
}
2014-10-30 15:04:06 +03:00
if ( ! ( * pool_metadata_extents =
2017-03-09 18:24:28 +03:00
extents_from_size ( cmd , pool_metadata_size , extent_size ) ) )
2014-10-30 15:04:06 +03:00
return_0 ;
2017-03-09 18:24:28 +03:00
if ( ( uint64_t ) * chunk_size > ( uint64_t ) pool_data_extents * extent_size ) {
log_error ( " Size of %s data volume cannot be smaller than chunk size %s. " ,
segtype - > name , display_size ( cmd , * chunk_size ) ) ;
return 0 ;
}
2017-03-03 22:46:13 +03:00
2017-03-09 18:24:28 +03:00
if ( ( * discards = = THIN_DISCARDS_UNSELECTED ) & &
! set_pool_discards ( discards , DEFAULT_THIN_POOL_DISCARDS ) )
return_0 ;
2017-07-21 21:58:33 +03:00
if ( * zero_new_blocks = = THIN_ZERO_UNSELECTED ) {
2017-03-03 22:46:13 +03:00
* zero_new_blocks = ( DEFAULT_THIN_POOL_ZERO ) ? THIN_ZERO_YES : THIN_ZERO_NO ;
2017-07-21 21:58:33 +03:00
log_verbose ( " %s pool zeroing on default. " , ( * zero_new_blocks = = THIN_ZERO_YES ) ?
" Enabling " : " Disabling " ) ;
}
if ( ( * zero_new_blocks = = THIN_ZERO_YES ) & &
( * chunk_size > = DEFAULT_THIN_POOL_CHUNK_SIZE_PERFORMANCE * 2 ) ) {
log_warn ( " WARNING: Pool zeroing and %s large chunk size slows down thin provisioning. " ,
display_size ( cmd , * chunk_size ) ) ;
log_warn ( " WARNING: Consider disabling zeroing (-Zn) or using smaller chunk size (<%s). " ,
display_size ( cmd , DEFAULT_THIN_POOL_CHUNK_SIZE_PERFORMANCE * 2 ) ) ;
}
2017-03-03 22:46:13 +03:00
2017-03-09 18:24:28 +03:00
log_verbose ( " Preferred pool metadata size %s. " ,
display_size ( cmd , ( uint64_t ) * pool_metadata_extents * extent_size ) ) ;
2013-03-11 15:37:09 +04:00
return 1 ;
}
2014-11-08 03:28:38 +03:00
int set_pool_discards ( thin_discards_t * discards , const char * str )
2012-06-28 16:47:34 +04:00
{
if ( ! strcasecmp ( str , " passdown " ) )
2012-08-08 00:24:41 +04:00
* discards = THIN_DISCARDS_PASSDOWN ;
2012-08-07 21:37:35 +04:00
else if ( ! strcasecmp ( str , " nopassdown " ) )
2012-08-08 00:24:41 +04:00
* discards = THIN_DISCARDS_NO_PASSDOWN ;
2012-06-28 16:47:34 +04:00
else if ( ! strcasecmp ( str , " ignore " ) )
2012-08-08 00:24:41 +04:00
* discards = THIN_DISCARDS_IGNORE ;
2012-06-28 16:47:34 +04:00
else {
2013-06-24 14:00:48 +04:00
log_error ( " Thin pool discards type \" %s \" is unknown. " , str ) ;
2012-06-28 16:47:34 +04:00
return 0 ;
}
return 1 ;
}
2012-08-08 00:24:41 +04:00
const char * get_pool_discards_name ( thin_discards_t discards )
2012-06-28 16:47:34 +04:00
{
2012-08-08 00:24:41 +04:00
switch ( discards ) {
case THIN_DISCARDS_PASSDOWN :
2012-06-28 16:47:34 +04:00
return " passdown " ;
2012-08-08 00:24:41 +04:00
case THIN_DISCARDS_NO_PASSDOWN :
2012-06-28 16:47:34 +04:00
return " nopassdown " ;
2012-08-08 00:24:41 +04:00
case THIN_DISCARDS_IGNORE :
2012-06-28 16:47:34 +04:00
return " ignore " ;
2017-03-03 22:46:13 +03:00
default :
log_error ( INTERNAL_ERROR " Unknown discards type encountered. " ) ;
return " unknown " ;
2012-06-28 16:47:34 +04:00
}
}
2014-08-15 15:08:30 +04:00
2014-08-15 17:43:42 +04:00
int lv_is_thin_origin ( const struct logical_volume * lv , unsigned int * snap_count )
2014-08-15 15:08:30 +04:00
{
struct seg_list * segl ;
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
int r = 0 ;
2014-08-15 17:43:42 +04:00
if ( snap_count )
* snap_count = 0 ;
2014-08-15 15:08:30 +04:00
2017-03-08 16:28:28 +03:00
if ( lv_is_thin_volume ( lv ) )
dm_list_iterate_items ( segl , & lv - > segs_using_this_lv )
if ( segl - > seg - > origin = = lv ) {
r = 1 ;
if ( ! snap_count )
break ; /* not interested in number of snapshots */
2014-08-15 15:08:30 +04:00
2014-08-15 17:43:42 +04:00
( * snap_count ) + + ;
2017-03-08 16:28:28 +03:00
}
2014-08-15 15:08:30 +04:00
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
return r ;
2014-08-15 15:08:30 +04:00
}
2014-11-04 17:06:55 +03:00
commands: new method for defining commands
. Define a prototype for every lvm command.
. Match every user command with one definition.
. Generate help text and man pages from them.
The new file command-lines.in defines a prototype for every
unique lvm command. A unique lvm command is a unique
combination of: command name + required option args +
required positional args. Each of these prototypes also
includes the optional option args and optional positional
args that the command will accept, a description, and a
unique string ID for the definition. Any valid command
will match one of the prototypes.
Here's an example of the lvresize command definitions from
command-lines.in, there are three unique lvresize commands:
lvresize --size SizeMB LV
OO: --alloc Alloc, --autobackup Bool, --force,
--nofsck, --nosync, --noudevsync, --reportformat String, --resizefs,
--stripes Number, --stripesize SizeKB, --poolmetadatasize SizeMB
OP: PV ...
ID: lvresize_by_size
DESC: Resize an LV by a specified size.
lvresize LV PV ...
OO: --alloc Alloc, --autobackup Bool, --force,
--nofsck, --nosync, --noudevsync,
--reportformat String, --resizefs, --stripes Number, --stripesize SizeKB
ID: lvresize_by_pv
DESC: Resize an LV by specified PV extents.
FLAGS: SECONDARY_SYNTAX
lvresize --poolmetadatasize SizeMB LV_thinpool
OO: --alloc Alloc, --autobackup Bool, --force,
--nofsck, --nosync, --noudevsync,
--reportformat String, --stripes Number, --stripesize SizeKB
OP: PV ...
ID: lvresize_pool_metadata_by_size
DESC: Resize a pool metadata SubLV by a specified size.
The three commands have separate definitions because they have
different required parameters. Required parameters are specified
on the first line of the definition. Optional options are
listed after OO, and optional positional args are listed after OP.
This data is used to generate corresponding command definition
structures for lvm in command-lines.h. usage/help output is also
auto generated, so it is always in sync with the definitions.
Every user-entered command is compared against the set of
command structures, and matched with one. An error is
reported if an entered command does not have the required
parameters for any definition. The closest match is printed
as a suggestion, and running lvresize --help will display
the usage for each possible lvresize command.
The prototype syntax used for help/man output includes
required --option and positional args on the first line,
and optional --option and positional args enclosed in [ ]
on subsequent lines.
command_name <required_opt_args> <required_pos_args>
[ <optional_opt_args> ]
[ <optional_pos_args> ]
Command definitions that are not to be advertised/suggested
have the flag SECONDARY_SYNTAX. These commands will not be
printed in the normal help output.
Man page prototypes are also generated from the same original
command definitions, and are always in sync with the code
and help text.
Very early in command execution, a matching command definition
is found. lvm then knows the operation being done, and that
the provided args conform to the definition. This will allow
lots of ad hoc checking/validation to be removed throughout
the code.
Each command definition can also be routed to a specific
function to implement it. The function is associated with
an enum value for the command definition (generated from
the ID string.) These per-command-definition implementation
functions have not yet been created, so all commands
currently fall back to the existing per-command-name
implementation functions.
Using per-command-definition functions will allow lots of
code to be removed which tries to figure out what the
command is meant to do. This is currently based on ad hoc
and complicated option analysis. When using the new
functions, what the command is doing is already known
from the associated command definition.
2016-08-12 23:52:18 +03:00
int lv_is_thin_snapshot ( const struct logical_volume * lv )
{
struct lv_segment * seg ;
if ( ! lv_is_thin_volume ( lv ) )
return 0 ;
if ( ( seg = first_seg ( lv ) ) & & ( seg - > origin | | seg - > external_lv ) )
return 1 ;
return 0 ;
}
2014-11-04 17:06:55 +03:00
/*
* Explict check of new thin pool for usability
*
* Allow use of thin pools by external apps . When lvm2 metadata has
* transaction_id = = 0 for a new thin pool , it will explicitely validate
* the pool is still unused .
*
* To prevent lvm2 to create thin volumes in externally used thin pools
* simply increment its transaction_id .
*/
int check_new_thin_pool ( const struct logical_volume * pool_lv )
{
struct cmd_context * cmd = pool_lv - > vg - > cmd ;
uint64_t transaction_id ;
/* For transaction_id check LOCAL activation is required */
if ( ! activate_lv_excl_local ( cmd , pool_lv ) ) {
log_error ( " Aborting. Failed to locally activate thin pool %s. " ,
display_lvname ( pool_lv ) ) ;
return 0 ;
}
/* With volume lists, check pool really is locally active */
if ( ! lv_thin_pool_transaction_id ( pool_lv , & transaction_id ) ) {
log_error ( " Cannot read thin pool %s transaction id locally, perhaps skipped in lvm.conf volume_list? " ,
display_lvname ( pool_lv ) ) ;
return 0 ;
}
/* Require pool to have same transaction_id as new */
if ( first_seg ( pool_lv ) - > transaction_id ! = transaction_id ) {
log_error ( " Cannot use thin pool %s with transaction id "
2015-07-06 17:09:17 +03:00
FMTu64 " for thin volumes. "
2014-11-04 17:06:55 +03:00
" Expected transaction id % " PRIu64 " . " ,
display_lvname ( pool_lv ) , transaction_id ,
first_seg ( pool_lv ) - > transaction_id ) ;
return 0 ;
}
2016-06-14 15:56:17 +03:00
log_verbose ( " Deactivating public thin pool %s. " ,
2014-11-04 17:06:55 +03:00
display_lvname ( pool_lv ) ) ;
/* Prevent any 'race' with in-use thin pool and always deactivate */
if ( ! deactivate_lv ( pool_lv - > vg - > cmd , pool_lv ) ) {
log_error ( " Aborting. Could not deactivate thin pool %s. " ,
display_lvname ( pool_lv ) ) ;
return 0 ;
}
return 1 ;
}
2017-03-05 19:41:16 +03:00
int validate_thin_pool_chunk_size ( struct cmd_context * cmd , uint32_t chunk_size )
{
const uint32_t min_size = DM_THIN_MIN_DATA_BLOCK_SIZE ;
const uint32_t max_size = DM_THIN_MAX_DATA_BLOCK_SIZE ;
int r = 1 ;
if ( ( chunk_size < min_size ) | | ( chunk_size > max_size ) ) {
log_error ( " Thin pool chunk size %s is not in the range %s to %s. " ,
display_size ( cmd , chunk_size ) ,
display_size ( cmd , min_size ) ,
display_size ( cmd , max_size ) ) ;
r = 0 ;
}
if ( chunk_size & ( min_size - 1 ) ) {
log_error ( " Thin pool chunk size %s must be a multiple of %s. " ,
display_size ( cmd , chunk_size ) ,
display_size ( cmd , min_size ) ) ;
r = 0 ;
}
return r ;
}