2011-09-06 23:25:42 +04:00
/*
2013-02-05 14:07:09 +04:00
* Copyright ( C ) 2011 - 2013 Red Hat , Inc . All rights reserved .
2011-09-06 23:25:42 +04:00
*
* This file is part of LVM2 .
*
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v .2 .1 .
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program ; if not , write to the Free Software Foundation ,
2016-01-21 13:49:46 +03:00
* Inc . , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA
2011-09-06 23:25:42 +04:00
*/
# include "lib.h"
2011-10-22 20:44:23 +04:00
# include "activate.h"
# include "locking.h"
2014-09-19 03:09:36 +04:00
# include "memlock.h"
2011-09-06 23:25:42 +04:00
# include "metadata.h"
2011-09-08 20:41:18 +04:00
# include "segtype.h"
2012-02-08 17:05:38 +04:00
# include "defaults.h"
2013-03-11 15:37:09 +04:00
# include "display.h"
2011-09-06 23:25:42 +04:00
thin: move pool messaging from resume to suspend
Existing messaging intarface for thin-pool has a few 'weak' points:
* Message were posted with each 'resume' operation, thus not allowing
activation of thin-pool with the existing state.
* Acceleration skipped suspend step has not worked in cluster,
since clvmd resumes only nodes which are suspended (have proper lock
state).
* Resume may fail and code is not really designed to 'fail' in this
phase (generic rule here is resume DOES NOT fail unless something serious
is wrong and lvm2 tool usually doesn't handle recovery path in this case.)
* Full thin-pool suspend happened, when taken a thin-volume snapshot.
With this patch the new method relocates message passing into suspend
state.
This has a few drawbacks with current API, but overal it performs
better and gives are more posibilities to deal with errors.
Patch introduces a new logic for 'origin-only' suspend of thin-pool and
this also relates to thin-volume when taking snapshot.
When suspend_origin_only operation is invoked on a pool with
queued messages then only those messages are posted to thin-pool and
actual suspend of thin pool and data and metadata volume is skipped.
This makes taking a snapshot of thin-volume lighter operation and
avoids blocking of other unrelated active thin volumes.
Also fail now happens in 'suspend' state where the 'Fail' is more expected
and it is better handled through error paths.
Activation of thin-pool is now not sending any message and leaves upto a tool
to decided later how to finish unfinished double-commit transaction.
Problem which needs some API improvements relates to the lvm2 tree
construction. For the suspend tree we do not add target table line
into the tree, but only a device is inserted into a tree.
Current mechanism to attach messages for thin-pool requires the libdm
to know about thin-pool target, so lvm2 currently takes assumption, node
is really a thin-pool and fills in the table line for this node (which
should be ensured by the PRELOAD phase, but it's a misuse of internal API)
we would possibly need to be able to attach message to 'any' node.
Other thing to notice - current messaging interface in thin-pool
target requires to suspend thin volume origin first and then send
a create message, but this could not have any 'nice' solution on lvm2
side and IMHO we should introduce something like 'create_after_resume'
message.
Patch also changes the moment, where lvm2 transaction id is increased.
Now it happens only after successful finish of kernel transaction id
change. This change was needed to handle properly activation of pool,
which is in the middle of unfinished transaction, and also this corrects
usage of thin-pool by external apps like Docker.
2015-07-01 14:31:37 +03:00
/* TODO: drop unused no_update */
2012-01-25 12:55:19 +04:00
int attach_pool_message ( struct lv_segment * pool_seg , dm_thin_message_t type ,
2011-10-19 20:39:09 +04:00
struct logical_volume * lv , uint32_t delete_id ,
2012-01-25 12:55:19 +04:00
int no_update )
2011-10-17 18:17:09 +04:00
{
struct lv_thin_message * tmsg ;
2012-01-25 12:55:19 +04:00
if ( ! seg_is_thin_pool ( pool_seg ) ) {
2013-07-09 15:34:48 +04:00
log_error ( INTERNAL_ERROR " Cannot attach message to non-pool LV %s. " , pool_seg - > lv - > name ) ;
2012-01-25 12:55:19 +04:00
return 0 ;
}
2011-11-07 14:59:07 +04:00
2012-01-25 12:55:19 +04:00
if ( pool_has_message ( pool_seg , lv , delete_id ) ) {
if ( lv )
log_error ( " Message referring LV %s already queued in pool %s. " ,
lv - > name , pool_seg - > lv - > name ) ;
else
log_error ( " Delete for device %u already queued in pool %s. " ,
delete_id , pool_seg - > lv - > name ) ;
return 0 ;
2011-10-19 20:39:09 +04:00
}
2012-01-25 12:55:19 +04:00
if ( ! ( tmsg = dm_pool_alloc ( pool_seg - > lv - > vg - > vgmem , sizeof ( * tmsg ) ) ) ) {
2011-10-17 18:17:09 +04:00
log_error ( " Failed to allocate memory for message. " ) ;
return 0 ;
}
switch ( type ) {
case DM_THIN_MESSAGE_CREATE_SNAP :
case DM_THIN_MESSAGE_CREATE_THIN :
tmsg - > u . lv = lv ;
break ;
case DM_THIN_MESSAGE_DELETE :
2011-10-19 20:39:09 +04:00
tmsg - > u . delete_id = delete_id ;
2011-10-17 18:17:09 +04:00
break ;
default :
2011-10-19 20:42:14 +04:00
log_error ( INTERNAL_ERROR " Unsupported message type %u. " , type ) ;
2011-10-17 18:17:09 +04:00
return 0 ;
}
tmsg - > type = type ;
2015-08-14 18:41:27 +03:00
/* If the 1st message is add in non-read-only mode, modify transaction_id */
if ( ! no_update & & dm_list_empty ( & pool_seg - > thin_messages ) )
pool_seg - > transaction_id + + ;
2012-01-25 12:55:19 +04:00
dm_list_add ( & pool_seg - > thin_messages , & tmsg - > list ) ;
2011-10-17 18:17:09 +04:00
2014-01-24 13:49:31 +04:00
log_debug_metadata ( " Added %s message. " ,
2013-01-08 02:30:29 +04:00
( type = = DM_THIN_MESSAGE_CREATE_SNAP | |
2014-01-24 13:49:31 +04:00
type = = DM_THIN_MESSAGE_CREATE_THIN ) ? " create " :
2013-01-08 02:30:29 +04:00
( type = = DM_THIN_MESSAGE_DELETE ) ? " delete " : " unknown " ) ;
2011-10-17 18:17:09 +04:00
return 1 ;
}
2013-02-21 13:25:44 +04:00
int attach_thin_external_origin ( struct lv_segment * seg ,
struct logical_volume * external_lv )
{
if ( seg - > external_lv ) {
log_error ( INTERNAL_ERROR " LV \" %s \" already has external origin. " ,
seg - > lv - > name ) ;
return 0 ;
}
seg - > external_lv = external_lv ;
if ( external_lv ) {
if ( ! add_seg_to_segs_using_this_lv ( external_lv , seg ) )
return_0 ;
external_lv - > external_count + + ;
if ( external_lv - > status & LVM_WRITE ) {
log_verbose ( " Setting logical volume \" %s \" read-only. " ,
external_lv - > name ) ;
external_lv - > status & = ~ LVM_WRITE ;
}
}
return 1 ;
}
int detach_thin_external_origin ( struct lv_segment * seg )
{
if ( seg - > external_lv ) {
if ( ! lv_is_external_origin ( seg - > external_lv ) ) {
log_error ( INTERNAL_ERROR " Inconsitent external origin. " ) ;
return 0 ;
}
if ( ! remove_seg_from_segs_using_this_lv ( seg - > external_lv , seg ) )
return_0 ;
seg - > external_lv - > external_count - - ;
seg - > external_lv = NULL ;
}
return 1 ;
}
2013-11-29 18:51:28 +04:00
int lv_is_merging_thin_snapshot ( const struct logical_volume * lv )
{
2014-02-22 04:26:01 +04:00
struct lv_segment * seg = first_seg ( lv ) ;
return ( seg & & seg - > status & MERGING ) ? 1 : 0 ;
2013-11-29 18:51:28 +04:00
}
2012-01-25 12:55:19 +04:00
/*
* Check whether pool has some message queued for LV or for device_id
* When LV is NULL and device_id is 0 it just checks for any message .
*/
int pool_has_message ( const struct lv_segment * seg ,
const struct logical_volume * lv , uint32_t device_id )
{
const struct lv_thin_message * tmsg ;
if ( ! seg_is_thin_pool ( seg ) ) {
log_error ( INTERNAL_ERROR " LV %s is not pool. " , seg - > lv - > name ) ;
return 0 ;
}
if ( ! lv & & ! device_id )
2014-03-12 01:50:23 +04:00
return ! dm_list_empty ( & seg - > thin_messages ) ;
2012-01-25 12:55:19 +04:00
dm_list_iterate_items ( tmsg , & seg - > thin_messages ) {
switch ( tmsg - > type ) {
case DM_THIN_MESSAGE_CREATE_SNAP :
case DM_THIN_MESSAGE_CREATE_THIN :
if ( tmsg - > u . lv = = lv )
return 1 ;
break ;
case DM_THIN_MESSAGE_DELETE :
if ( tmsg - > u . delete_id = = device_id )
return 1 ;
break ;
default :
break ;
}
}
return 0 ;
}
2013-02-05 19:49:09 +04:00
int pool_is_active ( const struct logical_volume * lv )
2013-02-05 13:52:39 +04:00
{
struct lvinfo info ;
const struct seg_list * sl ;
2013-02-05 19:49:09 +04:00
if ( ! lv_is_thin_pool ( lv ) ) {
2013-07-09 15:34:48 +04:00
log_error ( INTERNAL_ERROR " pool_is_active called with non-pool LV %s. " , lv - > name ) ;
2013-02-05 13:52:39 +04:00
return 0 ;
}
/* On clustered VG, query every related thin pool volume */
if ( vg_is_clustered ( lv - > vg ) ) {
if ( lv_is_active ( lv ) )
return 1 ;
dm_list_iterate_items ( sl , & lv - > segs_using_this_lv )
if ( lv_is_active ( sl - > seg - > lv ) ) {
log_debug ( " Thin volume \" %s \" is active. " , sl - > seg - > lv - > name ) ;
return 1 ;
}
} else if ( lv_info ( lv - > vg - > cmd , lv , 1 , & info , 0 , 0 ) & & info . exists )
return 1 ; /* Non clustered VG - just checks for '-tpool' */
return 0 ;
}
2014-01-23 14:47:10 +04:00
int thin_pool_feature_supported ( const struct logical_volume * lv , int feature )
2013-06-11 14:32:01 +04:00
{
static unsigned attr = 0U ;
struct lv_segment * seg ;
if ( ! lv_is_thin_pool ( lv ) ) {
log_error ( INTERNAL_ERROR " LV %s is not thin pool. " , lv - > name ) ;
return 0 ;
}
seg = first_seg ( lv ) ;
if ( ( attr = = 0U ) & & activation ( ) & & seg - > segtype & &
seg - > segtype - > ops - > target_present & &
! seg - > segtype - > ops - > target_present ( lv - > vg - > cmd , NULL , & attr ) ) {
log_error ( " %s: Required device-mapper target(s) not "
" detected in your kernel " , seg - > segtype - > name ) ;
return 0 ;
}
2014-01-23 14:47:10 +04:00
return ( attr & feature ) ? 1 : 0 ;
2013-06-11 14:32:01 +04:00
}
2012-02-08 17:05:38 +04:00
int pool_below_threshold ( const struct lv_segment * pool_seg )
{
2014-06-09 14:08:27 +04:00
dm_percent_t percent ;
2015-10-29 14:01:31 +03:00
dm_percent_t threshold = DM_PERCENT_1 *
2013-06-27 13:22:02 +04:00
find_config_tree_int ( pool_seg - > lv - > vg - > cmd , activation_thin_pool_autoextend_threshold_CFG ,
lv_config_profile ( pool_seg - > lv ) ) ;
2012-02-08 17:05:38 +04:00
/* Data */
2012-02-13 01:42:43 +04:00
if ( ! lv_thin_pool_percent ( pool_seg - > lv , 0 , & percent ) )
return_0 ;
2012-02-08 17:05:38 +04:00
2015-10-25 21:13:53 +03:00
if ( percent > threshold ) {
2015-10-09 22:35:05 +03:00
log_debug ( " Threshold configured for free data space in "
" thin pool %s has been reached (%.2f%% >= %.2f%%). " ,
display_lvname ( pool_seg - > lv ) ,
dm_percent_to_float ( percent ) ,
dm_percent_to_float ( threshold ) ) ;
2014-01-29 17:26:06 +04:00
return 0 ;
2015-10-09 22:35:05 +03:00
}
2012-02-08 17:05:38 +04:00
/* Metadata */
2012-02-13 01:42:43 +04:00
if ( ! lv_thin_pool_percent ( pool_seg - > lv , 1 , & percent ) )
return_0 ;
2012-02-08 17:05:38 +04:00
2015-10-25 21:13:53 +03:00
if ( percent > threshold ) {
2015-10-09 22:35:05 +03:00
log_debug ( " Threshold configured for free metadata space in "
" thin pool %s has been reached (%.2f%% > %.2f%%). " ,
display_lvname ( pool_seg - > lv ) ,
dm_percent_to_float ( percent ) ,
dm_percent_to_float ( threshold ) ) ;
2014-01-29 17:26:06 +04:00
return 0 ;
2015-10-09 22:35:05 +03:00
}
2012-02-08 17:05:38 +04:00
return 1 ;
}
2015-07-03 16:31:31 +03:00
/*
* Detect overprovisioning and check lvm2 is configured for auto resize .
*
* If passed LV is thin volume / pool , check first only this one for overprovisiong .
* Lots of test combined together .
* Test is not detecting status of dmeventd , too complex for now . . .
*/
int pool_check_overprovisioning ( const struct logical_volume * lv )
{
const struct lv_list * lvl ;
const struct seg_list * sl ;
const struct logical_volume * pool_lv = NULL ;
struct cmd_context * cmd = lv - > vg - > cmd ;
const char * txt = " " ;
uint64_t thinsum = 0 , poolsum = 0 , sz = ~ 0 ;
int threshold , max_threshold = 0 ;
int percent , min_percent = 100 ;
int more_pools = 0 ;
/* When passed thin volume, check related pool first */
if ( lv_is_thin_volume ( lv ) )
pool_lv = first_seg ( lv ) - > pool_lv ;
else if ( lv_is_thin_pool ( lv ) )
pool_lv = lv ;
if ( pool_lv ) {
poolsum + = pool_lv - > size ;
dm_list_iterate_items ( sl , & pool_lv - > segs_using_this_lv )
thinsum + = sl - > seg - > lv - > size ;
if ( thinsum < = poolsum )
return 1 ; /* All thins fit into this thin pool */
}
/* Sum all thins and all thin pools in VG */
dm_list_iterate_items ( lvl , & lv - > vg - > lvs ) {
if ( ! lv_is_thin_pool ( lvl - > lv ) )
continue ;
threshold = find_config_tree_int ( cmd , activation_thin_pool_autoextend_threshold_CFG ,
lv_config_profile ( lvl - > lv ) ) ;
percent = find_config_tree_int ( cmd , activation_thin_pool_autoextend_percent_CFG ,
lv_config_profile ( lvl - > lv ) ) ;
if ( threshold > max_threshold )
max_threshold = threshold ;
if ( percent < min_percent )
min_percent = percent ;
if ( lvl - > lv = = pool_lv )
continue ; /* Skip iteration for already checked thin pool */
more_pools + + ;
poolsum + = lvl - > lv - > size ;
dm_list_iterate_items ( sl , & lvl - > lv - > segs_using_this_lv )
thinsum + = sl - > seg - > lv - > size ;
}
if ( thinsum < = poolsum )
return 1 ; /* All fits for all pools */
if ( ( sz = vg_size ( lv - > vg ) ) < thinsum )
/* Thin sum size is above VG size */
txt = " and the size of whole volume group " ;
else if ( ( sz = vg_free ( lv - > vg ) ) < thinsum )
/* Thin sum size is more then free space in a VG */
txt = ! sz ? " " : " and the amount of free space in volume group " ;
else if ( ( max_threshold > 99 ) | | ! min_percent )
/* There is some free space in VG, but it is not configured
* for growing - threshold is 100 % or percent is 0 % */
sz = poolsum ;
2015-07-13 12:08:49 +03:00
else
sz = ~ 0 ; /* No warning */
2015-07-03 16:31:31 +03:00
if ( sz ! = ~ 0 ) {
log_warn ( " WARNING: Sum of all thin volume sizes (%s) exceeds the "
" size of thin pool%s%s%s (%s)! " ,
display_size ( cmd , thinsum ) ,
more_pools ? " " : " " ,
more_pools ? " s " : display_lvname ( pool_lv ) ,
txt ,
( sz > 0 ) ? display_size ( cmd , sz ) : " no free space in volume group " ) ;
if ( max_threshold > 99 )
2015-07-06 18:15:11 +03:00
log_print_unless_silent ( " For thin pool auto extension activation/thin_pool_autoextend_threshold should be below 100. " ) ;
2015-07-03 16:31:31 +03:00
if ( ! min_percent )
log_print_unless_silent ( " For thin pool auto extension activation/thin_pool_autoextend_percent should be above 0. " ) ;
}
return 1 ;
}
2014-01-29 17:27:13 +04:00
/*
* Validate given external origin could be used with thin pool
*/
int pool_supports_external_origin ( const struct lv_segment * pool_seg , const struct logical_volume * external_lv )
{
uint32_t csize = pool_seg - > chunk_size ;
2015-06-18 15:38:57 +03:00
if ( ( ( external_lv - > size < csize ) | | ( external_lv - > size % csize ) ) & &
! thin_pool_feature_supported ( pool_seg - > lv , THIN_FEATURE_EXTERNAL_ORIGIN_EXTEND ) ) {
log_error ( " Can't use \" %s \" as external origin with \" %s \" pool. "
2014-01-29 17:27:13 +04:00
" Size %s is not a multiple of pool's chunk size %s. " ,
2015-06-18 15:38:57 +03:00
display_lvname ( external_lv ) , display_lvname ( pool_seg - > lv ) ,
2014-01-29 17:27:13 +04:00
display_size ( external_lv - > vg - > cmd , external_lv - > size ) ,
display_size ( external_lv - > vg - > cmd , csize ) ) ;
return 0 ;
}
return 1 ;
}
2014-01-08 13:27:17 +04:00
struct logical_volume * find_pool_lv ( const struct logical_volume * lv )
2013-06-11 14:32:01 +04:00
{
struct lv_segment * seg ;
2013-06-15 00:02:12 +04:00
if ( ! ( seg = first_seg ( lv ) ) ) {
log_error ( " LV %s has no segment " , lv - > name ) ;
return NULL ;
}
if ( ! ( seg = find_pool_seg ( seg ) ) )
2013-06-11 14:32:01 +04:00
return_NULL ;
return seg - > lv ;
}
2011-10-03 22:39:17 +04:00
/*
* Find a free device_id for given thin_pool segment .
*
* \ return
* Free device id , or 0 if free device_id is not found .
*
* FIXME : Improve naive search and keep the value cached
* and updated during VG lifetime ( so no const for lv_segment )
*/
uint32_t get_free_pool_device_id ( struct lv_segment * thin_pool_seg )
{
2011-11-03 18:36:40 +04:00
uint32_t max_id = 0 ;
struct seg_list * sl ;
2011-10-03 22:39:17 +04:00
if ( ! seg_is_thin_pool ( thin_pool_seg ) ) {
2011-10-31 02:00:57 +04:00
log_error ( INTERNAL_ERROR
" Segment in %s is not a thin pool segment. " ,
2011-10-03 23:10:52 +04:00
thin_pool_seg - > lv - > name ) ;
2011-10-03 22:39:17 +04:00
return 0 ;
}
2011-11-03 18:36:40 +04:00
dm_list_iterate_items ( sl , & thin_pool_seg - > lv - > segs_using_this_lv )
if ( sl - > seg - > device_id > max_id )
max_id = sl - > seg - > device_id ;
2011-10-03 22:39:17 +04:00
2011-10-31 02:00:57 +04:00
if ( + + max_id > DM_THIN_MAX_DEVICE_ID ) {
2012-01-23 21:46:31 +04:00
/* FIXME Find empty holes instead of aborting! */
2011-10-31 02:00:57 +04:00
log_error ( " Cannot find free device_id. " ) ;
2011-10-03 22:39:17 +04:00
return 0 ;
}
2013-01-08 02:30:29 +04:00
log_debug_metadata ( " Found free pool device_id %u. " , max_id ) ;
2011-10-03 22:39:17 +04:00
return max_id ;
}
2011-10-22 20:44:23 +04:00
2014-08-26 14:10:29 +04:00
static int _check_pool_create ( const struct logical_volume * lv )
{
const struct lv_thin_message * lmsg ;
struct lvinfo info ;
dm_list_iterate_items ( lmsg , & first_seg ( lv ) - > thin_messages ) {
if ( lmsg - > type ! = DM_THIN_MESSAGE_CREATE_THIN )
continue ;
/* When creating new thin LV, check for size would be needed */
if ( ! lv_info ( lv - > vg - > cmd , lv , 1 , & info , 0 , 0 ) | |
! info . exists ) {
log_error ( " Pool %s needs to be locally active for threshold check. " ,
display_lvname ( lv ) ) ;
return 0 ;
}
if ( ! pool_below_threshold ( first_seg ( lv ) ) ) {
log_error ( " Free space in pool %s is above threshold, new volumes are not allowed. " ,
display_lvname ( lv ) ) ;
return 0 ;
}
break ;
}
return 1 ;
}
2011-11-03 18:53:58 +04:00
int update_pool_lv ( struct logical_volume * lv , int activate )
{
thin: move pool messaging from resume to suspend
Existing messaging intarface for thin-pool has a few 'weak' points:
* Message were posted with each 'resume' operation, thus not allowing
activation of thin-pool with the existing state.
* Acceleration skipped suspend step has not worked in cluster,
since clvmd resumes only nodes which are suspended (have proper lock
state).
* Resume may fail and code is not really designed to 'fail' in this
phase (generic rule here is resume DOES NOT fail unless something serious
is wrong and lvm2 tool usually doesn't handle recovery path in this case.)
* Full thin-pool suspend happened, when taken a thin-volume snapshot.
With this patch the new method relocates message passing into suspend
state.
This has a few drawbacks with current API, but overal it performs
better and gives are more posibilities to deal with errors.
Patch introduces a new logic for 'origin-only' suspend of thin-pool and
this also relates to thin-volume when taking snapshot.
When suspend_origin_only operation is invoked on a pool with
queued messages then only those messages are posted to thin-pool and
actual suspend of thin pool and data and metadata volume is skipped.
This makes taking a snapshot of thin-volume lighter operation and
avoids blocking of other unrelated active thin volumes.
Also fail now happens in 'suspend' state where the 'Fail' is more expected
and it is better handled through error paths.
Activation of thin-pool is now not sending any message and leaves upto a tool
to decided later how to finish unfinished double-commit transaction.
Problem which needs some API improvements relates to the lvm2 tree
construction. For the suspend tree we do not add target table line
into the tree, but only a device is inserted into a tree.
Current mechanism to attach messages for thin-pool requires the libdm
to know about thin-pool target, so lvm2 currently takes assumption, node
is really a thin-pool and fills in the table line for this node (which
should be ensured by the PRELOAD phase, but it's a misuse of internal API)
we would possibly need to be able to attach message to 'any' node.
Other thing to notice - current messaging interface in thin-pool
target requires to suspend thin volume origin first and then send
a create message, but this could not have any 'nice' solution on lvm2
side and IMHO we should introduce something like 'create_after_resume'
message.
Patch also changes the moment, where lvm2 transaction id is increased.
Now it happens only after successful finish of kernel transaction id
change. This change was needed to handle properly activation of pool,
which is in the middle of unfinished transaction, and also this corrects
usage of thin-pool by external apps like Docker.
2015-07-01 14:31:37 +03:00
int monitored = DMEVENTD_MONITOR_IGNORE ;
2014-08-26 14:10:29 +04:00
int ret = 1 ;
2012-03-23 13:58:04 +04:00
2011-11-03 18:53:58 +04:00
if ( ! lv_is_thin_pool ( lv ) ) {
log_error ( INTERNAL_ERROR " Updated LV %s is not pool. " , lv - > name ) ;
return 0 ;
}
2012-01-25 13:17:15 +04:00
if ( dm_list_empty ( & ( first_seg ( lv ) - > thin_messages ) ) )
return 1 ; /* No messages */
2011-11-03 18:53:58 +04:00
if ( activate ) {
2011-11-10 16:43:05 +04:00
/* If the pool is not active, do activate deactivate */
2011-11-03 19:58:20 +04:00
if ( ! lv_is_active ( lv ) ) {
2012-03-23 13:58:04 +04:00
monitored = dmeventd_monitor_mode ( ) ;
init_dmeventd_monitor ( DMEVENTD_MONITOR_IGNORE ) ;
2014-08-26 14:10:29 +04:00
if ( ! activate_lv_excl ( lv - > vg - > cmd , lv ) ) {
init_dmeventd_monitor ( monitored ) ;
2011-11-03 19:58:20 +04:00
return_0 ;
2014-08-26 14:10:29 +04:00
}
if ( ! lv_is_active ( lv ) ) {
init_dmeventd_monitor ( monitored ) ;
log_error ( " Cannot activate thin pool %s, perhaps skipped in lvm.conf volume_list? " ,
display_lvname ( lv ) ) ;
return 0 ;
}
thin: move pool messaging from resume to suspend
Existing messaging intarface for thin-pool has a few 'weak' points:
* Message were posted with each 'resume' operation, thus not allowing
activation of thin-pool with the existing state.
* Acceleration skipped suspend step has not worked in cluster,
since clvmd resumes only nodes which are suspended (have proper lock
state).
* Resume may fail and code is not really designed to 'fail' in this
phase (generic rule here is resume DOES NOT fail unless something serious
is wrong and lvm2 tool usually doesn't handle recovery path in this case.)
* Full thin-pool suspend happened, when taken a thin-volume snapshot.
With this patch the new method relocates message passing into suspend
state.
This has a few drawbacks with current API, but overal it performs
better and gives are more posibilities to deal with errors.
Patch introduces a new logic for 'origin-only' suspend of thin-pool and
this also relates to thin-volume when taking snapshot.
When suspend_origin_only operation is invoked on a pool with
queued messages then only those messages are posted to thin-pool and
actual suspend of thin pool and data and metadata volume is skipped.
This makes taking a snapshot of thin-volume lighter operation and
avoids blocking of other unrelated active thin volumes.
Also fail now happens in 'suspend' state where the 'Fail' is more expected
and it is better handled through error paths.
Activation of thin-pool is now not sending any message and leaves upto a tool
to decided later how to finish unfinished double-commit transaction.
Problem which needs some API improvements relates to the lvm2 tree
construction. For the suspend tree we do not add target table line
into the tree, but only a device is inserted into a tree.
Current mechanism to attach messages for thin-pool requires the libdm
to know about thin-pool target, so lvm2 currently takes assumption, node
is really a thin-pool and fills in the table line for this node (which
should be ensured by the PRELOAD phase, but it's a misuse of internal API)
we would possibly need to be able to attach message to 'any' node.
Other thing to notice - current messaging interface in thin-pool
target requires to suspend thin volume origin first and then send
a create message, but this could not have any 'nice' solution on lvm2
side and IMHO we should introduce something like 'create_after_resume'
message.
Patch also changes the moment, where lvm2 transaction id is increased.
Now it happens only after successful finish of kernel transaction id
change. This change was needed to handle properly activation of pool,
which is in the middle of unfinished transaction, and also this corrects
usage of thin-pool by external apps like Docker.
2015-07-01 14:31:37 +03:00
} else
activate = 0 ; /* Was already active */
if ( ! ( ret = _check_pool_create ( lv ) ) )
stack ; /* Safety guard, needs local presence of thin-pool target */
else if ( ! ( ret = suspend_lv_origin ( lv - > vg - > cmd , lv ) ) )
/* Send messages */
log_error ( " Failed to suspend and send message %s. " , display_lvname ( lv ) ) ;
else if ( ! ( ret = resume_lv_origin ( lv - > vg - > cmd , lv ) ) )
log_error ( " Failed to resume %s. " , display_lvname ( lv ) ) ;
if ( activate ) {
2014-08-26 14:10:29 +04:00
if ( ! deactivate_lv ( lv - > vg - > cmd , lv ) ) {
init_dmeventd_monitor ( monitored ) ;
2011-11-03 19:58:20 +04:00
return_0 ;
2014-08-26 14:10:29 +04:00
}
2012-03-23 13:58:04 +04:00
init_dmeventd_monitor ( monitored ) ;
2011-11-03 18:53:58 +04:00
}
thin: move pool messaging from resume to suspend
Existing messaging intarface for thin-pool has a few 'weak' points:
* Message were posted with each 'resume' operation, thus not allowing
activation of thin-pool with the existing state.
* Acceleration skipped suspend step has not worked in cluster,
since clvmd resumes only nodes which are suspended (have proper lock
state).
* Resume may fail and code is not really designed to 'fail' in this
phase (generic rule here is resume DOES NOT fail unless something serious
is wrong and lvm2 tool usually doesn't handle recovery path in this case.)
* Full thin-pool suspend happened, when taken a thin-volume snapshot.
With this patch the new method relocates message passing into suspend
state.
This has a few drawbacks with current API, but overal it performs
better and gives are more posibilities to deal with errors.
Patch introduces a new logic for 'origin-only' suspend of thin-pool and
this also relates to thin-volume when taking snapshot.
When suspend_origin_only operation is invoked on a pool with
queued messages then only those messages are posted to thin-pool and
actual suspend of thin pool and data and metadata volume is skipped.
This makes taking a snapshot of thin-volume lighter operation and
avoids blocking of other unrelated active thin volumes.
Also fail now happens in 'suspend' state where the 'Fail' is more expected
and it is better handled through error paths.
Activation of thin-pool is now not sending any message and leaves upto a tool
to decided later how to finish unfinished double-commit transaction.
Problem which needs some API improvements relates to the lvm2 tree
construction. For the suspend tree we do not add target table line
into the tree, but only a device is inserted into a tree.
Current mechanism to attach messages for thin-pool requires the libdm
to know about thin-pool target, so lvm2 currently takes assumption, node
is really a thin-pool and fills in the table line for this node (which
should be ensured by the PRELOAD phase, but it's a misuse of internal API)
we would possibly need to be able to attach message to 'any' node.
Other thing to notice - current messaging interface in thin-pool
target requires to suspend thin volume origin first and then send
a create message, but this could not have any 'nice' solution on lvm2
side and IMHO we should introduce something like 'create_after_resume'
message.
Patch also changes the moment, where lvm2 transaction id is increased.
Now it happens only after successful finish of kernel transaction id
change. This change was needed to handle properly activation of pool,
which is in the middle of unfinished transaction, and also this corrects
usage of thin-pool by external apps like Docker.
2015-07-01 14:31:37 +03:00
/* Unlock memory if possible */
memlock_unlock ( lv - > vg - > cmd ) ;
if ( ! ret )
return_0 ;
2011-11-03 18:53:58 +04:00
}
2012-01-25 13:17:15 +04:00
dm_list_init ( & ( first_seg ( lv ) - > thin_messages ) ) ;
2011-11-03 18:53:58 +04:00
2012-01-25 13:17:15 +04:00
if ( ! vg_write ( lv - > vg ) | | ! vg_commit ( lv - > vg ) )
return_0 ;
2011-11-03 18:53:58 +04:00
2014-08-26 14:10:29 +04:00
return ret ;
2011-11-03 18:53:58 +04:00
}
2012-06-28 16:47:34 +04:00
2014-11-26 11:27:40 +03:00
/* Estimate thin pool chunk size from data and metadata size (in sector units) */
static size_t _estimate_chunk_size ( uint64_t data_size , uint64_t metadata_size , int attr )
{
/*
* nr_pool_blocks = data_size / metadata_size
* chunk_size = nr_pool_blocks * 64 b / sector_size
*/
size_t chunk_size = data_size / ( metadata_size * ( SECTOR_SIZE / 64 ) ) ;
if ( attr & THIN_FEATURE_BLOCK_SIZE ) {
/* Round up to 64KB */
chunk_size + = DM_THIN_MIN_DATA_BLOCK_SIZE - 1 ;
chunk_size & = ~ ( size_t ) ( DM_THIN_MIN_DATA_BLOCK_SIZE - 1 ) ;
} else {
/* Round up to nearest power of 2 */
chunk_size - - ;
chunk_size | = chunk_size > > 1 ;
chunk_size | = chunk_size > > 2 ;
chunk_size | = chunk_size > > 4 ;
chunk_size | = chunk_size > > 8 ;
chunk_size | = chunk_size > > 16 ;
chunk_size + + ;
}
return chunk_size ;
}
2014-10-06 14:22:51 +04:00
int update_thin_pool_params ( const struct segment_type * segtype ,
struct volume_group * vg ,
2014-10-30 15:04:06 +03:00
unsigned attr , int passed_args ,
uint32_t pool_data_extents ,
uint32_t * pool_metadata_extents ,
2014-07-23 00:20:18 +04:00
int * chunk_size_calc_method , uint32_t * chunk_size ,
thin_discards_t * discards , int * zero )
2013-08-06 13:42:40 +04:00
{
2014-07-23 00:20:18 +04:00
struct cmd_context * cmd = vg - > cmd ;
struct profile * profile = vg - > profile ;
uint32_t extent_size = vg - > extent_size ;
2014-10-30 15:04:06 +03:00
uint64_t pool_metadata_size = ( uint64_t ) * pool_metadata_extents * extent_size ;
2014-07-23 00:20:18 +04:00
size_t estimate_chunk_size ;
2013-09-25 18:00:52 +04:00
const char * str ;
2013-08-06 13:42:40 +04:00
2013-09-25 18:00:52 +04:00
if ( ! ( passed_args & PASS_ARG_CHUNK_SIZE ) ) {
if ( ! ( * chunk_size = find_config_tree_int ( cmd , allocation_thin_pool_chunk_size_CFG , profile ) * 2 ) ) {
2014-05-07 12:52:00 +04:00
if ( ! ( str = find_config_tree_str ( cmd , allocation_thin_pool_chunk_size_policy_CFG , profile ) ) ) {
2014-05-13 12:33:17 +04:00
log_error ( INTERNAL_ERROR " Could not find configuration. " ) ;
2014-05-07 12:52:00 +04:00
return 0 ;
}
2014-03-04 14:10:59 +04:00
if ( ! strcasecmp ( str , " generic " ) )
* chunk_size_calc_method = THIN_CHUNK_SIZE_CALC_METHOD_GENERIC ;
else if ( ! strcasecmp ( str , " performance " ) )
* chunk_size_calc_method = THIN_CHUNK_SIZE_CALC_METHOD_PERFORMANCE ;
else {
log_error ( " Thin pool chunk size calculation policy \" %s \" is unrecognised. " , str ) ;
return 0 ;
}
2014-07-23 00:20:18 +04:00
if ( ! ( * chunk_size = get_default_allocation_thin_pool_chunk_size_CFG ( cmd , profile ) ) )
return_0 ;
2013-09-25 18:00:52 +04:00
}
}
2013-08-06 18:28:12 +04:00
2014-10-06 14:22:51 +04:00
if ( ! validate_pool_chunk_size ( cmd , segtype , * chunk_size ) )
return_0 ;
2013-08-06 13:42:40 +04:00
if ( ! ( passed_args & PASS_ARG_DISCARDS ) ) {
2014-05-07 12:52:00 +04:00
if ( ! ( str = find_config_tree_str ( cmd , allocation_thin_pool_discards_CFG , profile ) ) ) {
2014-05-13 12:33:17 +04:00
log_error ( INTERNAL_ERROR " Could not find configuration. " ) ;
2014-05-07 12:52:00 +04:00
return 0 ;
}
2014-11-08 03:28:38 +03:00
if ( ! set_pool_discards ( discards , str ) )
2013-08-06 13:42:40 +04:00
return_0 ;
}
if ( ! ( passed_args & PASS_ARG_ZERO ) )
* zero = find_config_tree_bool ( cmd , allocation_thin_pool_zero_CFG , profile ) ;
2013-03-11 15:37:09 +04:00
if ( ! ( attr & THIN_FEATURE_BLOCK_SIZE ) & &
( * chunk_size & ( * chunk_size - 1 ) ) ) {
log_error ( " Chunk size must be a power of 2 for this thin target version. " ) ;
return 0 ;
}
2014-10-30 15:04:06 +03:00
if ( ! pool_metadata_size ) {
2013-03-11 15:37:09 +04:00
/* Defaults to nr_pool_blocks * 64b converted to size in sectors */
2014-10-30 15:04:06 +03:00
pool_metadata_size = ( uint64_t ) pool_data_extents * extent_size /
2013-03-11 15:37:09 +04:00
( * chunk_size * ( SECTOR_SIZE / UINT64_C ( 64 ) ) ) ;
/* Check if we could eventually use bigger chunk size */
if ( ! ( passed_args & PASS_ARG_CHUNK_SIZE ) ) {
2014-10-30 15:04:06 +03:00
while ( ( pool_metadata_size >
2013-03-11 15:37:09 +04:00
( DEFAULT_THIN_POOL_OPTIMAL_SIZE / SECTOR_SIZE ) ) & &
( * chunk_size < DM_THIN_MAX_DATA_BLOCK_SIZE ) ) {
* chunk_size < < = 1 ;
2014-10-30 15:04:06 +03:00
pool_metadata_size > > = 1 ;
2013-03-11 15:37:09 +04:00
}
log_verbose ( " Setting chunk size to %s. " ,
display_size ( cmd , * chunk_size ) ) ;
2014-10-30 15:04:06 +03:00
} else if ( pool_metadata_size > ( DEFAULT_THIN_POOL_MAX_METADATA_SIZE * 2 ) ) {
2013-03-11 15:37:09 +04:00
/* Suggest bigger chunk size */
2014-11-26 11:27:40 +03:00
estimate_chunk_size =
_estimate_chunk_size ( ( uint64_t ) pool_data_extents * extent_size ,
( DEFAULT_THIN_POOL_MAX_METADATA_SIZE * 2 ) , attr ) ;
2013-03-11 15:37:09 +04:00
log_warn ( " WARNING: Chunk size is too small for pool, suggested minimum is %s. " ,
2014-11-26 11:27:40 +03:00
display_size ( cmd , estimate_chunk_size ) ) ;
2013-03-11 15:37:09 +04:00
}
2014-10-30 15:04:06 +03:00
/* Round up to extent size silently */
if ( pool_metadata_size % extent_size )
pool_metadata_size + = extent_size - pool_metadata_size % extent_size ;
2013-03-11 15:37:09 +04:00
} else {
2014-11-26 11:27:40 +03:00
estimate_chunk_size =
_estimate_chunk_size ( ( uint64_t ) pool_data_extents * extent_size ,
pool_metadata_size , attr ) ;
2014-07-23 00:20:18 +04:00
if ( estimate_chunk_size < DM_THIN_MIN_DATA_BLOCK_SIZE )
estimate_chunk_size = DM_THIN_MIN_DATA_BLOCK_SIZE ;
else if ( estimate_chunk_size > DM_THIN_MAX_DATA_BLOCK_SIZE )
estimate_chunk_size = DM_THIN_MAX_DATA_BLOCK_SIZE ;
2013-03-11 15:37:09 +04:00
/* Check to eventually use bigger chunk size */
if ( ! ( passed_args & PASS_ARG_CHUNK_SIZE ) ) {
* chunk_size = estimate_chunk_size ;
2014-07-23 00:20:18 +04:00
log_verbose ( " Setting chunk size %s. " , display_size ( cmd , * chunk_size ) ) ;
2013-03-11 15:37:09 +04:00
} else if ( * chunk_size < estimate_chunk_size ) {
/* Suggest bigger chunk size */
log_warn ( " WARNING: Chunk size is smaller then suggested minimum size %s. " ,
display_size ( cmd , estimate_chunk_size ) ) ;
}
}
2014-10-30 15:04:06 +03:00
if ( pool_metadata_size > ( 2 * DEFAULT_THIN_POOL_MAX_METADATA_SIZE ) ) {
pool_metadata_size = 2 * DEFAULT_THIN_POOL_MAX_METADATA_SIZE ;
2013-03-11 15:37:09 +04:00
if ( passed_args & PASS_ARG_POOL_METADATA_SIZE )
log_warn ( " WARNING: Maximum supported pool metadata size is %s. " ,
2014-10-30 15:04:06 +03:00
display_size ( cmd , pool_metadata_size ) ) ;
} else if ( pool_metadata_size < ( 2 * DEFAULT_THIN_POOL_MIN_METADATA_SIZE ) ) {
pool_metadata_size = 2 * DEFAULT_THIN_POOL_MIN_METADATA_SIZE ;
2013-03-11 15:37:09 +04:00
if ( passed_args & PASS_ARG_POOL_METADATA_SIZE )
log_warn ( " WARNING: Minimum supported pool metadata size is %s. " ,
2014-10-30 15:04:06 +03:00
display_size ( cmd , pool_metadata_size ) ) ;
2013-03-11 15:37:09 +04:00
}
2014-10-30 15:04:06 +03:00
if ( ! ( * pool_metadata_extents =
extents_from_size ( vg - > cmd , pool_metadata_size , extent_size ) ) )
return_0 ;
2013-03-11 15:37:09 +04:00
return 1 ;
}
2014-11-08 03:28:38 +03:00
int set_pool_discards ( thin_discards_t * discards , const char * str )
2012-06-28 16:47:34 +04:00
{
if ( ! strcasecmp ( str , " passdown " ) )
2012-08-08 00:24:41 +04:00
* discards = THIN_DISCARDS_PASSDOWN ;
2012-08-07 21:37:35 +04:00
else if ( ! strcasecmp ( str , " nopassdown " ) )
2012-08-08 00:24:41 +04:00
* discards = THIN_DISCARDS_NO_PASSDOWN ;
2012-06-28 16:47:34 +04:00
else if ( ! strcasecmp ( str , " ignore " ) )
2012-08-08 00:24:41 +04:00
* discards = THIN_DISCARDS_IGNORE ;
2012-06-28 16:47:34 +04:00
else {
2013-06-24 14:00:48 +04:00
log_error ( " Thin pool discards type \" %s \" is unknown. " , str ) ;
2012-06-28 16:47:34 +04:00
return 0 ;
}
return 1 ;
}
2012-08-08 00:24:41 +04:00
const char * get_pool_discards_name ( thin_discards_t discards )
2012-06-28 16:47:34 +04:00
{
2012-08-08 00:24:41 +04:00
switch ( discards ) {
case THIN_DISCARDS_PASSDOWN :
2012-06-28 16:47:34 +04:00
return " passdown " ;
2012-08-08 00:24:41 +04:00
case THIN_DISCARDS_NO_PASSDOWN :
2012-06-28 16:47:34 +04:00
return " nopassdown " ;
2012-08-08 00:24:41 +04:00
case THIN_DISCARDS_IGNORE :
2012-06-28 16:47:34 +04:00
return " ignore " ;
}
2012-10-09 21:42:26 +04:00
log_error ( INTERNAL_ERROR " Unknown discards type encountered. " ) ;
2012-06-28 16:47:34 +04:00
2012-08-07 21:37:35 +04:00
return " unknown " ;
2012-06-28 16:47:34 +04:00
}
2014-08-15 15:08:30 +04:00
2014-08-15 17:43:42 +04:00
int lv_is_thin_origin ( const struct logical_volume * lv , unsigned int * snap_count )
2014-08-15 15:08:30 +04:00
{
struct seg_list * segl ;
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
int r = 0 ;
2014-08-15 17:43:42 +04:00
if ( snap_count )
* snap_count = 0 ;
2014-08-15 15:08:30 +04:00
if ( ! lv_is_thin_volume ( lv ) | |
dm_list_empty ( & lv - > segs_using_this_lv ) )
return 0 ;
dm_list_iterate_items ( segl , & lv - > segs_using_this_lv ) {
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
if ( segl - > seg - > origin = = lv ) {
r = 1 ;
2014-08-15 17:43:42 +04:00
if ( snap_count )
( * snap_count ) + + ;
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
else
/* not interested in number of snapshots */
break ;
}
2014-08-15 15:08:30 +04:00
}
Add lv_layout_and_type fn, lv_layout and lv_type reporting fields.
The lv_layout and lv_type fields together help with LV identification.
We can do basic identification using the lv_attr field which provides
very condensed view. In contrast to that, the new lv_layout and lv_type
fields provide more detialed information on exact layout and type used
for LVs.
For top-level LVs which are pure types not combined with any
other LV types, the lv_layout value is equal to lv_type value.
For non-top-level LVs which may be combined with other types,
the lv_layout describes the underlying layout used, while the
lv_type describes the use/type/usage of the LV.
These two new fields are both string lists so selection (-S/--select)
criteria can be defined using the list operators easily:
[] for strict matching
{} for subset matching.
For example, let's consider this:
$ lvs -a -o name,vg_name,lv_attr,layout,type
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
pool vg twi-a-tz-- pool,thin pool,thin
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tdata_rimage_0] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_1] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_2] vg iwi-aor--- linear image,raid
[pool_tdata_rimage_3] vg iwi-aor--- linear image,raid
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rimage_0] vg iwi-aor--- linear image,raid
[pool_tmeta_rimage_1] vg iwi-aor--- linear image,raid
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
thin_vol1 vg Vwi-a-tz-- thin thin
thin_vol2 vg Vwi-a-tz-- thin multiple,origin,thin
Which is a situation with thin pool, thin volumes and thin snapshots.
We can see internal 'pool_tdata' volume that makes up thin pool has
actually a level10 raid layout and the internal 'pool_tmeta' has
level1 raid layout. Also, we can see that 'thin_snap1' and 'thin_snap2'
are both thin snapshots while 'thin_vol1' is thin origin (having
multiple snapshots).
Such reporting scheme provides much better base for selection criteria
in addition to providing more detailed information, for example:
$ lvs -a -o name,vg_name,lv_attr,layout,type -S 'type=metadata'
LV VG Attr Layout Type
[lvol1_pmspare] vg ewi------- linear metadata,pool,spare
[pool_tdata_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_1] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_2] vg ewi-aor--- linear metadata,raid
[pool_tdata_rmeta_3] vg ewi-aor--- linear metadata,raid
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
[pool_tmeta_rmeta_0] vg ewi-aor--- linear metadata,raid
[pool_tmeta_rmeta_1] vg ewi-aor--- linear metadata,raid
(selected all LVs which are related to metadata of any type)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={metadata,thin}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs which hold metadata related to thin)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'type={thin,snapshot}'
LV VG Attr Layout Type
thin_snap1 vg Vwi---tz-k thin snapshot,thin
thin_snap2 vg Vwi---tz-k thin snapshot,thin
(selected all LVs which are thin snapshots)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout=raid'
LV VG Attr Layout Type
[pool_tdata] vg rwi-aor--- level10,raid data,pool,thin
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid layout, any raid layout)
lvs -a -o name,vg_name,lv_attr,layout,type -S 'layout={raid,level1}'
LV VG Attr Layout Type
[pool_tmeta] vg ewi-aor--- level1,raid metadata,pool,thin
(selected all LVs with raid level1 layout exactly)
And so on...
2014-08-13 12:03:45 +04:00
return r ;
2014-08-15 15:08:30 +04:00
}
2014-11-04 17:06:55 +03:00
/*
* Explict check of new thin pool for usability
*
* Allow use of thin pools by external apps . When lvm2 metadata has
* transaction_id = = 0 for a new thin pool , it will explicitely validate
* the pool is still unused .
*
* To prevent lvm2 to create thin volumes in externally used thin pools
* simply increment its transaction_id .
*/
int check_new_thin_pool ( const struct logical_volume * pool_lv )
{
struct cmd_context * cmd = pool_lv - > vg - > cmd ;
uint64_t transaction_id ;
/* For transaction_id check LOCAL activation is required */
if ( ! activate_lv_excl_local ( cmd , pool_lv ) ) {
log_error ( " Aborting. Failed to locally activate thin pool %s. " ,
display_lvname ( pool_lv ) ) ;
return 0 ;
}
/* With volume lists, check pool really is locally active */
if ( ! lv_thin_pool_transaction_id ( pool_lv , & transaction_id ) ) {
log_error ( " Cannot read thin pool %s transaction id locally, perhaps skipped in lvm.conf volume_list? " ,
display_lvname ( pool_lv ) ) ;
return 0 ;
}
/* Require pool to have same transaction_id as new */
if ( first_seg ( pool_lv ) - > transaction_id ! = transaction_id ) {
log_error ( " Cannot use thin pool %s with transaction id "
2015-07-06 17:09:17 +03:00
FMTu64 " for thin volumes. "
2014-11-04 17:06:55 +03:00
" Expected transaction id % " PRIu64 " . " ,
display_lvname ( pool_lv ) , transaction_id ,
first_seg ( pool_lv ) - > transaction_id ) ;
return 0 ;
}
log_verbose ( " Deactivating public thin pool %s " ,
display_lvname ( pool_lv ) ) ;
/* Prevent any 'race' with in-use thin pool and always deactivate */
if ( ! deactivate_lv ( pool_lv - > vg - > cmd , pool_lv ) ) {
log_error ( " Aborting. Could not deactivate thin pool %s. " ,
display_lvname ( pool_lv ) ) ;
return 0 ;
}
return 1 ;
}