2014-02-04 17:59:58 +04:00
/*
2015-09-10 16:07:59 +03:00
* Copyright ( C ) 2014 - 2015 Red Hat , Inc . All rights reserved .
2014-02-04 17:59:58 +04:00
*
* This file is part of LVM2 .
*
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v .2 .1 .
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program ; if not , write to the Free Software Foundation ,
2016-01-21 13:49:46 +03:00
* Inc . , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA
2014-02-04 17:59:58 +04:00
*/
2018-05-14 12:30:20 +03:00
# include "lib/misc/lib.h"
# include "lib/metadata/metadata.h"
# include "lib/locking/locking.h"
# include "lib/misc/lvm-string.h"
# include "lib/commands/toolcontext.h"
# include "lib/display/display.h"
# include "lib/metadata/segtype.h"
# include "lib/activate/activate.h"
# include "lib/config/defaults.h"
# include "lib/metadata/lv_alloc.h"
# include "lib/misc/lvm-signal.h"
2018-08-17 23:45:52 +03:00
# include "lib/activate/dev_manager.h"
2014-02-04 17:59:58 +04:00
2014-10-06 14:22:03 +04:00
/* https://github.com/jthornber/thin-provisioning-tools/blob/master/caching/cache_metadata_size.cc */
# define DM_TRANSACTION_OVERHEAD 4096 /* KiB */
# define DM_BYTES_PER_BLOCK 16 /* bytes */
# define DM_HINT_OVERHEAD_PER_BLOCK 8 /* bytes */
2014-11-11 17:13:00 +03:00
# define DM_MAX_HINT_WIDTH (4+16) /* bytes. FIXME Configurable? */
2014-10-06 14:22:03 +04:00
2018-11-05 23:53:52 +03:00
const char * cache_mode_num_to_str ( cache_mode_t mode )
{
switch ( mode ) {
case CACHE_MODE_WRITETHROUGH :
return " writethrough " ;
case CACHE_MODE_WRITEBACK :
return " writeback " ;
case CACHE_MODE_PASSTHROUGH :
return " passthrough " ;
default :
return NULL ;
}
}
2018-08-17 23:45:52 +03:00
const char * get_cache_mode_name ( const struct lv_segment * pool_seg )
{
const char * str ;
if ( ! ( str = cache_mode_num_to_str ( pool_seg - > cache_mode ) ) ) {
log_error ( INTERNAL_ERROR " Cache pool %s has undefined cache mode, using writethrough instead. " ,
display_lvname ( pool_seg - > lv ) ) ;
str = " writethrough " ;
}
return str ;
}
2016-04-25 14:39:30 +03:00
const char * display_cache_mode ( const struct lv_segment * seg )
2015-08-11 15:01:12 +03:00
{
2018-11-05 23:53:52 +03:00
const struct lv_segment * setting_seg = NULL ;
2019-01-30 18:55:34 +03:00
if ( seg_is_cache ( seg ) & & lv_is_cache_vol ( seg - > pool_lv ) )
2018-08-17 23:45:52 +03:00
setting_seg = seg ;
else if ( seg_is_cache_pool ( seg ) )
2018-11-05 23:53:52 +03:00
setting_seg = seg ;
2015-08-11 15:01:12 +03:00
2018-11-05 23:53:52 +03:00
else if ( seg_is_cache ( seg ) )
setting_seg = first_seg ( seg - > pool_lv ) ;
if ( ! setting_seg | | ( setting_seg - > cache_mode = = CACHE_MODE_UNSELECTED ) )
2016-04-25 14:39:30 +03:00
return " " ;
2018-08-17 23:45:52 +03:00
return cache_mode_num_to_str ( setting_seg - > cache_mode ) ;
2016-04-25 14:39:30 +03:00
}
2016-04-21 21:53:22 +03:00
2016-04-25 14:39:30 +03:00
int set_cache_mode ( cache_mode_t * mode , const char * cache_mode )
{
if ( ! strcasecmp ( cache_mode , " writethrough " ) )
* mode = CACHE_MODE_WRITETHROUGH ;
else if ( ! strcasecmp ( cache_mode , " writeback " ) )
* mode = CACHE_MODE_WRITEBACK ;
else if ( ! strcasecmp ( cache_mode , " passthrough " ) )
* mode = CACHE_MODE_PASSTHROUGH ;
else {
log_error ( " Unknown cache mode: %s. " , cache_mode ) ;
return 0 ;
}
2014-11-09 20:51:56 +03:00
2016-04-25 14:39:30 +03:00
return 1 ;
2014-11-09 20:51:56 +03:00
}
2018-11-06 00:46:07 +03:00
static cache_mode_t _get_cache_mode_from_config ( struct cmd_context * cmd ,
struct profile * profile ,
struct logical_volume * lv )
2014-11-09 20:51:56 +03:00
{
2018-11-06 00:46:07 +03:00
cache_mode_t mode ;
2016-04-25 14:39:30 +03:00
const char * str ;
2015-08-11 15:01:12 +03:00
int id ;
2016-04-25 14:39:30 +03:00
/* Figure default settings from config/profiles */
id = allocation_cache_mode_CFG ;
2015-08-11 15:01:12 +03:00
2016-04-25 14:39:30 +03:00
/* If present, check backward compatible settings */
if ( ! find_config_node ( cmd , cmd - > cft , id ) & &
find_config_node ( cmd , cmd - > cft , allocation_cache_pool_cachemode_CFG ) )
id = allocation_cache_pool_cachemode_CFG ;
2015-08-11 15:01:12 +03:00
2017-03-05 19:42:15 +03:00
if ( ! ( str = find_config_tree_str ( cmd , id , profile ) ) ) {
2016-04-25 14:39:30 +03:00
log_error ( INTERNAL_ERROR " Cache mode is not determined. " ) ;
2018-11-06 00:46:07 +03:00
return CACHE_MODE_WRITETHROUGH ;
}
if ( ! ( set_cache_mode ( & mode , str ) ) )
return CACHE_MODE_WRITETHROUGH ;
return mode ;
}
int cache_set_cache_mode ( struct lv_segment * seg , cache_mode_t mode )
{
struct cmd_context * cmd = seg - > lv - > vg - > cmd ;
struct lv_segment * setting_seg ;
/*
* Don ' t set a cache mode on an unused cache pool , the
* cache mode will be set when it ' s attached .
*/
if ( seg_is_cache_pool ( seg ) & & ( mode = = CACHE_MODE_UNSELECTED ) )
return 1 ;
2019-01-30 18:55:34 +03:00
if ( seg_is_cache ( seg ) & & lv_is_cache_vol ( seg - > pool_lv ) )
2018-08-17 23:45:52 +03:00
setting_seg = seg ;
else if ( seg_is_cache_pool ( seg ) )
2018-11-06 00:46:07 +03:00
setting_seg = seg ;
else if ( seg_is_cache ( seg ) )
setting_seg = first_seg ( seg - > pool_lv ) ;
else {
log_error ( INTERNAL_ERROR " Cannot set cache mode for non cache volume %s. " ,
display_lvname ( seg - > lv ) ) ;
2014-11-09 20:51:56 +03:00
return 0 ;
}
2018-11-06 00:46:07 +03:00
if ( mode ! = CACHE_MODE_UNSELECTED ) {
setting_seg - > cache_mode = mode ;
return 1 ;
}
if ( setting_seg - > cache_mode ! = CACHE_MODE_UNSELECTED )
return 1 ;
setting_seg - > cache_mode = _get_cache_mode_from_config ( cmd , seg - > lv - > profile , seg - > lv ) ;
2015-08-11 15:01:12 +03:00
2014-11-09 20:51:56 +03:00
return 1 ;
2014-10-02 01:06:01 +04:00
}
2015-09-10 16:07:59 +03:00
/*
* At least warn a user if certain cache stacks may present some problems
*/
void cache_check_for_warns ( const struct lv_segment * seg )
{
struct logical_volume * origin_lv = seg_lv ( seg , 0 ) ;
if ( lv_is_raid ( origin_lv ) & &
2016-04-25 14:39:30 +03:00
first_seg ( seg - > pool_lv ) - > cache_mode = = CACHE_MODE_WRITEBACK )
2017-11-20 18:33:43 +03:00
log_warn ( " WARNING: Data redundancy could be lost with writeback "
2015-09-10 16:07:59 +03:00
" caching of raid logical volume! " ) ;
2018-11-06 01:08:31 +03:00
if ( lv_is_thin_pool_data ( seg - > lv ) ) {
log_warn ( " WARNING: thin pool data will not be automatically extended when cached. " ) ;
log_warn ( " WARNING: manual splitcache is required before extending thin pool data. " ) ;
}
2015-09-10 16:07:59 +03:00
}
2016-04-28 19:08:36 +03:00
/*
* Returns minimum size of cache metadata volume for give data and chunk size
* ( all values in sector )
* Default meta size is : ( Overhead + mapping size + hint size )
*/
static uint64_t _cache_min_metadata_size ( uint64_t data_size , uint32_t chunk_size )
{
uint64_t min_meta_size ;
min_meta_size = data_size / chunk_size ; /* nr_chunks */
min_meta_size * = ( DM_BYTES_PER_BLOCK + DM_MAX_HINT_WIDTH + DM_HINT_OVERHEAD_PER_BLOCK ) ;
min_meta_size = ( min_meta_size + ( SECTOR_SIZE - 1 ) ) > > SECTOR_SHIFT ; /* in sectors */
min_meta_size + = DM_TRANSACTION_OVERHEAD * ( 1024 > > SECTOR_SHIFT ) ;
return min_meta_size ;
}
2017-03-09 18:24:28 +03:00
int update_cache_pool_params ( struct cmd_context * cmd ,
struct profile * profile ,
uint32_t extent_size ,
const struct segment_type * segtype ,
unsigned attr ,
uint32_t pool_data_extents ,
2014-10-30 15:04:06 +03:00
uint32_t * pool_metadata_extents ,
2014-07-23 00:20:18 +04:00
int * chunk_size_calc_method , uint32_t * chunk_size )
2014-02-04 21:50:27 +04:00
{
uint64_t min_meta_size ;
2016-04-28 19:08:36 +03:00
uint64_t pool_metadata_size = ( uint64_t ) * pool_metadata_extents * extent_size ;
2016-08-24 11:16:01 +03:00
uint64_t pool_data_size = ( uint64_t ) pool_data_extents * extent_size ;
2017-03-05 19:42:15 +03:00
const uint64_t max_chunks =
2017-03-09 18:24:28 +03:00
get_default_allocation_cache_pool_max_chunks_CFG ( cmd , profile ) ;
2016-08-24 11:16:01 +03:00
/* min chunk size in a multiple of DM_CACHE_MIN_DATA_BLOCK_SIZE */
uint64_t min_chunk_size = ( ( ( pool_data_size + max_chunks - 1 ) / max_chunks +
DM_CACHE_MIN_DATA_BLOCK_SIZE - 1 ) /
DM_CACHE_MIN_DATA_BLOCK_SIZE ) * DM_CACHE_MIN_DATA_BLOCK_SIZE ;
2017-03-09 18:24:28 +03:00
if ( ! * chunk_size ) {
if ( ! ( * chunk_size = find_config_tree_int ( cmd , allocation_cache_pool_chunk_size_CFG ,
2019-01-28 22:08:49 +03:00
profile ) * 2 ) ) {
2017-03-09 18:24:28 +03:00
* chunk_size = get_default_allocation_cache_pool_chunk_size_CFG ( cmd ,
profile ) ;
2019-01-28 22:08:49 +03:00
/* Use power-of-2 for min chunk size when unspecified */
min_chunk_size = 1 < < ( 32 - clz ( min_chunk_size - 1 ) ) ;
}
2016-08-24 11:16:01 +03:00
if ( * chunk_size < min_chunk_size ) {
/*
* When using more then ' standard ' default ,
* keep user informed he might be using things in untintended direction
*/
log_print_unless_silent ( " Using %s chunk size instead of default %s, "
2018-05-17 00:53:38 +03:00
" so cache pool has less than " FMTu64 " chunks. " ,
2017-03-09 18:24:28 +03:00
display_size ( cmd , min_chunk_size ) ,
display_size ( cmd , * chunk_size ) ,
2016-08-24 11:16:01 +03:00
max_chunks ) ;
* chunk_size = min_chunk_size ;
} else
log_verbose ( " Setting chunk size to %s. " ,
2017-03-09 18:24:28 +03:00
display_size ( cmd , * chunk_size ) ) ;
2016-08-24 11:16:01 +03:00
} else if ( * chunk_size < min_chunk_size ) {
2018-05-17 00:53:38 +03:00
log_error ( " Chunk size %s is less than required minimal chunk size %s "
2016-08-24 11:16:01 +03:00
" for a cache pool of %s size and limit " FMTu64 " chunks. " ,
2017-03-09 18:24:28 +03:00
display_size ( cmd , * chunk_size ) ,
display_size ( cmd , min_chunk_size ) ,
display_size ( cmd , pool_data_size ) ,
2016-08-24 11:16:01 +03:00
max_chunks ) ;
log_error ( " To allow use of more chunks, see setting allocation/cache_pool_max_chunks. " ) ;
return 0 ;
}
2014-07-23 00:20:18 +04:00
2017-03-09 18:24:28 +03:00
if ( ! validate_cache_chunk_size ( cmd , * chunk_size ) )
2014-10-06 14:22:51 +04:00
return_0 ;
2014-02-04 21:50:27 +04:00
2016-04-28 19:08:36 +03:00
min_meta_size = _cache_min_metadata_size ( ( uint64_t ) pool_data_extents * extent_size , * chunk_size ) ;
2014-10-06 14:22:03 +04:00
/* Round up to extent size */
if ( min_meta_size % extent_size )
min_meta_size + = extent_size - min_meta_size % extent_size ;
2014-02-04 21:50:27 +04:00
2014-10-30 15:04:06 +03:00
if ( ! pool_metadata_size )
pool_metadata_size = min_meta_size ;
2014-02-04 21:50:27 +04:00
2014-10-30 15:04:06 +03:00
if ( pool_metadata_size > ( 2 * DEFAULT_CACHE_POOL_MAX_METADATA_SIZE ) ) {
pool_metadata_size = 2 * DEFAULT_CACHE_POOL_MAX_METADATA_SIZE ;
2017-03-09 18:24:28 +03:00
if ( * pool_metadata_extents )
2014-07-11 14:12:51 +04:00
log_warn ( " WARNING: Maximum supported pool metadata size is %s. " ,
2017-03-09 18:24:28 +03:00
display_size ( cmd , pool_metadata_size ) ) ;
2014-10-30 15:04:06 +03:00
} else if ( pool_metadata_size < min_meta_size ) {
2017-03-09 18:24:28 +03:00
if ( * pool_metadata_extents )
2014-10-06 14:22:03 +04:00
log_warn ( " WARNING: Minimum required pool metadata size is %s "
2014-07-11 14:12:51 +04:00
" (needs extra %s). " ,
2017-03-09 18:24:28 +03:00
display_size ( cmd , min_meta_size ) ,
display_size ( cmd , min_meta_size - pool_metadata_size ) ) ;
2014-10-30 15:04:06 +03:00
pool_metadata_size = min_meta_size ;
2014-02-04 21:50:27 +04:00
}
2014-10-30 15:04:06 +03:00
if ( ! ( * pool_metadata_extents =
2017-03-09 18:24:28 +03:00
extents_from_size ( cmd , pool_metadata_size , extent_size ) ) )
2014-10-30 15:04:06 +03:00
return_0 ;
2017-03-09 18:24:28 +03:00
if ( ( uint64_t ) * chunk_size > ( uint64_t ) pool_data_extents * extent_size ) {
log_error ( " Size of %s data volume cannot be smaller than chunk size %s. " ,
segtype - > name , display_size ( cmd , * chunk_size ) ) ;
return 0 ;
}
log_verbose ( " Preferred pool metadata size %s. " ,
display_size ( cmd , ( uint64_t ) * pool_metadata_extents * extent_size ) ) ;
2014-02-04 21:50:27 +04:00
return 1 ;
}
2016-05-03 22:50:04 +03:00
/*
* Validate if existing cache - pool can be used with given chunk size
* i . e . cache - pool metadata size fits all info .
*/
int validate_lv_cache_chunk_size ( struct logical_volume * pool_lv , uint32_t chunk_size )
{
2016-08-24 11:16:01 +03:00
struct volume_group * vg = pool_lv - > vg ;
2017-03-05 19:42:15 +03:00
const uint64_t max_chunks = get_default_allocation_cache_pool_max_chunks_CFG ( vg - > cmd , pool_lv - > profile ) ;
2016-05-03 22:50:04 +03:00
uint64_t min_size = _cache_min_metadata_size ( pool_lv - > size , chunk_size ) ;
2016-08-24 11:16:01 +03:00
uint64_t chunks = pool_lv - > size / chunk_size ;
int r = 1 ;
2016-05-03 22:50:04 +03:00
if ( min_size > first_seg ( pool_lv ) - > metadata_lv - > size ) {
2016-08-24 11:16:01 +03:00
log_error ( " Cannot use chunk size %s with cache pool %s metadata size %s. " ,
display_size ( vg - > cmd , chunk_size ) ,
2016-05-03 22:50:04 +03:00
display_lvname ( pool_lv ) ,
2016-08-24 11:16:01 +03:00
display_size ( vg - > cmd , first_seg ( pool_lv ) - > metadata_lv - > size ) ) ;
log_error ( " Minimal size for cache pool %s metadata with chunk size %s would be %s. " ,
display_lvname ( pool_lv ) ,
display_size ( vg - > cmd , chunk_size ) ,
display_size ( vg - > cmd , min_size ) ) ;
r = 0 ;
2016-05-03 22:50:04 +03:00
}
2016-08-24 11:16:01 +03:00
if ( chunks > max_chunks ) {
log_error ( " Cannot use too small chunk size %s with cache pool %s data volume size %s. " ,
display_size ( vg - > cmd , chunk_size ) ,
display_lvname ( pool_lv ) ,
display_size ( pool_lv - > vg - > cmd , pool_lv - > size ) ) ;
log_error ( " Maximum configured chunks for a cache pool is " FMTu64 " . " ,
max_chunks ) ;
log_error ( " Use smaller cache pool (<%s) or bigger cache chunk size (>=%s) or enable higher "
" values in 'allocation/cache_pool_max_chunks'. " ,
display_size ( vg - > cmd , chunk_size * max_chunks ) ,
display_size ( vg - > cmd , pool_lv - > size / max_chunks ) ) ;
r = 0 ;
}
return r ;
2016-05-03 22:50:04 +03:00
}
2014-10-03 20:37:11 +04:00
/*
* Validate arguments for converting origin into cached volume with given cache pool .
*
* Always validates origin_lv , and when it is known also cache pool_lv
*/
2014-10-22 23:01:03 +04:00
int validate_lv_cache_create_pool ( const struct logical_volume * pool_lv )
2014-10-03 20:37:11 +04:00
{
struct lv_segment * seg ;
2019-01-30 18:55:34 +03:00
if ( ! lv_is_cache_pool ( pool_lv ) & & ! lv_is_cache_vol ( pool_lv ) ) {
2014-10-22 23:01:03 +04:00
log_error ( " Logical volume %s is not a cache pool. " ,
display_lvname ( pool_lv ) ) ;
return 0 ;
}
if ( lv_is_locked ( pool_lv ) ) {
log_error ( " Cannot use locked cache pool %s. " ,
display_lvname ( pool_lv ) ) ;
return 0 ;
2014-10-03 20:37:11 +04:00
}
2014-10-22 23:01:03 +04:00
if ( ! dm_list_empty ( & pool_lv - > segs_using_this_lv ) ) {
seg = get_only_segment_using_this_lv ( pool_lv ) ;
2017-02-24 14:52:19 +03:00
log_error ( " Logical volume %s is already in use by %s. " ,
2014-10-22 23:01:03 +04:00
display_lvname ( pool_lv ) ,
seg ? display_lvname ( seg - > lv ) : " another LV " ) ;
return 0 ;
}
return 1 ;
}
int validate_lv_cache_create_origin ( const struct logical_volume * origin_lv )
{
2014-10-20 16:58:29 +04:00
if ( lv_is_locked ( origin_lv ) ) {
log_error ( " Cannot use locked origin volume %s. " ,
display_lvname ( origin_lv ) ) ;
return 0 ;
}
2014-10-03 20:37:11 +04:00
/* For now we only support conversion of thin pool data volume */
2019-01-24 16:12:26 +03:00
if ( ! lv_is_visible ( origin_lv ) & &
! lv_is_thin_pool_data ( origin_lv ) & &
! lv_is_vdo_pool_data ( origin_lv ) ) {
2014-10-03 20:37:11 +04:00
log_error ( " Can't convert internal LV %s. " , display_lvname ( origin_lv ) ) ;
return 0 ;
}
/*
* Only linear , striped or raid supported .
* FIXME Tidy up all these type restrictions .
*/
if ( lv_is_cache_type ( origin_lv ) | |
lv_is_mirror_type ( origin_lv ) | |
lv_is_thin_volume ( origin_lv ) | | lv_is_thin_pool_metadata ( origin_lv ) | |
2016-12-19 16:08:56 +03:00
lv_is_merging_origin ( origin_lv ) | |
2014-10-03 20:37:11 +04:00
lv_is_cow ( origin_lv ) | | lv_is_merging_cow ( origin_lv ) | |
2019-01-24 16:12:26 +03:00
/* TODO: think about enabling caching of a single thin volume */
( lv_is_virtual ( origin_lv ) & & ! lv_is_vdo ( origin_lv ) ) ) {
2014-10-20 16:53:48 +04:00
log_error ( " Cache is not supported with %s segment type of the original logical volume %s. " ,
2016-12-25 02:29:30 +03:00
lvseg_name ( first_seg ( origin_lv ) ) , display_lvname ( origin_lv ) ) ;
2014-10-03 20:37:11 +04:00
return 0 ;
}
return 1 ;
}
2017-03-05 19:41:16 +03:00
int validate_cache_chunk_size ( struct cmd_context * cmd , uint32_t chunk_size )
{
const uint32_t min_size = DM_CACHE_MIN_DATA_BLOCK_SIZE ;
const uint32_t max_size = DM_CACHE_MAX_DATA_BLOCK_SIZE ;
int r = 1 ;
if ( ( chunk_size < min_size ) | | ( chunk_size > max_size ) ) {
log_error ( " Cache chunk size %s is not in the range %s to %s. " ,
display_size ( cmd , chunk_size ) ,
display_size ( cmd , min_size ) ,
display_size ( cmd , max_size ) ) ;
r = 0 ;
}
if ( chunk_size & ( min_size - 1 ) ) {
log_error ( " Cache chunk size %s must be a multiple of %s. " ,
display_size ( cmd , chunk_size ) ,
display_size ( cmd , min_size ) ) ;
r = 0 ;
}
return r ;
}
2014-02-04 17:59:58 +04:00
/*
* lv_cache_create
* @ pool
* @ origin
*
* Given a cache_pool and an origin , link the two and create a
* cached LV .
*
* Returns : cache LV on success , NULL on failure
*/
2014-10-03 20:37:11 +04:00
struct logical_volume * lv_cache_create ( struct logical_volume * pool_lv ,
struct logical_volume * origin_lv )
2014-02-04 17:59:58 +04:00
{
const struct segment_type * segtype ;
2014-10-03 20:37:11 +04:00
struct cmd_context * cmd = pool_lv - > vg - > cmd ;
struct logical_volume * cache_lv = origin_lv ;
2014-02-04 17:59:58 +04:00
struct lv_segment * seg ;
2014-10-22 23:01:03 +04:00
if ( ! validate_lv_cache_create_pool ( pool_lv ) | |
2014-11-02 19:03:14 +03:00
! validate_lv_cache_create_origin ( cache_lv ) )
2014-10-03 20:37:11 +04:00
return_NULL ;
2014-02-04 17:59:58 +04:00
2019-01-24 16:12:26 +03:00
if ( lv_is_thin_pool ( cache_lv ) | | lv_is_vdo_pool ( cache_lv ) )
2014-11-02 19:03:14 +03:00
cache_lv = seg_lv ( first_seg ( cache_lv ) , 0 ) ; /* cache _tdata */
2015-09-22 21:04:12 +03:00
if ( ! ( segtype = get_segtype_from_string ( cmd , SEG_TYPE_NAME_CACHE ) ) )
2014-02-04 17:59:58 +04:00
return_NULL ;
2014-10-03 20:37:11 +04:00
if ( ! insert_layer_for_lv ( cmd , cache_lv , CACHE , " _corig " ) )
2014-02-04 17:59:58 +04:00
return_NULL ;
seg = first_seg ( cache_lv ) ;
seg - > segtype = segtype ;
2016-03-01 17:21:36 +03:00
if ( ! attach_pool_lv ( seg , pool_lv , NULL , NULL , NULL ) )
2014-02-11 23:47:26 +04:00
return_NULL ;
2014-02-04 17:59:58 +04:00
2017-03-05 19:42:15 +03:00
if ( ! seg - > lv - > profile ) /* Inherit profile from cache-pool */
seg - > lv - > profile = seg - > pool_lv - > profile ;
2014-02-04 17:59:58 +04:00
return cache_lv ;
}
2016-04-26 22:45:48 +03:00
/*
* Checks cache status and loops until there are not dirty blocks
* Set 1 to * is_clean when there are no dirty blocks on return .
*/
int lv_cache_wait_for_clean ( struct logical_volume * cache_lv , int * is_clean )
{
2016-04-25 14:39:30 +03:00
const struct logical_volume * lock_lv = lv_lock_holder ( cache_lv ) ;
2016-04-26 22:45:48 +03:00
struct lv_segment * cache_seg = first_seg ( cache_lv ) ;
struct lv_status_cache * status ;
2016-12-18 17:05:57 +03:00
int cleaner_policy , writeback ;
2016-04-26 22:45:48 +03:00
uint64_t dirty_blocks ;
* is_clean = 0 ;
//FIXME: use polling to do this...
for ( ; ; ) {
2017-06-22 18:08:47 +03:00
sigint_allow ( ) ;
sigint_restore ( ) ;
if ( sigint_caught ( ) ) {
sigint_clear ( ) ;
log_error ( " Flushing of %s aborted. " , display_lvname ( cache_lv ) ) ;
if ( cache_seg - > cleaner_policy ) {
cache_seg - > cleaner_policy = 0 ;
/* Restore normal table */
2017-06-23 18:05:24 +03:00
if ( ! lv_update_and_reload_origin ( cache_lv ) )
2017-06-22 18:08:47 +03:00
stack ;
}
return 0 ;
}
2016-04-26 22:45:48 +03:00
if ( ! lv_cache_status ( cache_lv , & status ) )
return_0 ;
2016-05-20 12:28:23 +03:00
2016-04-26 22:45:48 +03:00
if ( status - > cache - > fail ) {
dm_pool_destroy ( status - > mem ) ;
2016-04-25 14:39:30 +03:00
log_warn ( " WARNING: Skippping flush for failed cache %s. " ,
display_lvname ( cache_lv ) ) ;
2016-04-26 22:45:48 +03:00
return 1 ;
}
cleaner_policy = ! strcmp ( status - > cache - > policy_name , " cleaner " ) ;
dirty_blocks = status - > cache - > dirty_blocks ;
2016-12-18 17:05:57 +03:00
writeback = ( status - > cache - > feature_flags & DM_CACHE_FEATURE_WRITEBACK ) ;
2016-04-26 22:45:48 +03:00
dm_pool_destroy ( status - > mem ) ;
2016-12-18 17:05:57 +03:00
/* Only clear when policy is Clear or mode != writeback */
if ( ! dirty_blocks & & ( cleaner_policy | | ! writeback ) )
2016-04-26 22:45:48 +03:00
break ;
2016-04-25 14:39:30 +03:00
log_print_unless_silent ( " Flushing " FMTu64 " blocks for cache %s. " ,
dirty_blocks , display_lvname ( cache_lv ) ) ;
2017-06-22 18:08:47 +03:00
2016-04-26 22:45:48 +03:00
if ( cleaner_policy ) {
2016-04-27 00:59:20 +03:00
/* TODO: Use centralized place */
2017-06-22 18:08:47 +03:00
sigint_allow ( ) ;
2016-04-27 00:59:20 +03:00
usleep ( 500000 ) ;
2017-06-22 18:08:47 +03:00
sigint_restore ( ) ;
2016-04-27 00:59:20 +03:00
continue ;
2016-04-26 22:45:48 +03:00
}
2016-12-18 17:05:57 +03:00
if ( ! ( cache_lv - > status & LVM_WRITE ) ) {
log_warn ( " WARNING: Dirty blocks found on read-only cache volume %s. " ,
display_lvname ( cache_lv ) ) ;
/* TODO: can we actually clean something? */
}
2016-04-26 22:45:48 +03:00
/* Switch to cleaner policy to flush the cache */
cache_seg - > cleaner_policy = 1 ;
2017-01-03 16:47:46 +03:00
/* Reload cache volume with "cleaner" policy */
2017-06-23 18:05:24 +03:00
if ( ! lv_update_and_reload_origin ( cache_lv ) )
2016-04-27 00:59:20 +03:00
return_0 ;
2017-01-03 16:47:46 +03:00
if ( ! sync_local_dev_names ( cache_lv - > vg - > cmd ) ) {
log_error ( " Failed to sync local devices when clearing cache volume %s. " ,
display_lvname ( cache_lv ) ) ;
return 0 ;
}
2016-04-26 22:45:48 +03:00
}
2016-04-25 14:39:30 +03:00
/*
* TODO : add check if extra suspend resume is necessary
* ATM this is workaround for missing cache sync when cache gets clean
*/
if ( 1 ) {
2016-05-20 11:55:05 +03:00
if ( ! lv_refresh_suspend_resume ( lock_lv ) )
2016-04-25 14:39:30 +03:00
return_0 ;
2017-01-03 16:47:46 +03:00
if ( ! sync_local_dev_names ( cache_lv - > vg - > cmd ) ) {
log_error ( " Failed to sync local devices after final clearing of cache %s. " ,
display_lvname ( cache_lv ) ) ;
return 0 ;
}
2016-04-25 14:39:30 +03:00
}
cache_seg - > cleaner_policy = 0 ;
2016-04-26 22:45:48 +03:00
* is_clean = 1 ;
return 1 ;
}
2018-08-17 23:45:52 +03:00
2019-01-30 18:55:34 +03:00
static int _lv_detach_cache_vol_while_active ( struct cmd_context * cmd , struct logical_volume * cache_lv )
2018-08-17 23:45:52 +03:00
{
struct lv_segment * cache_seg = first_seg ( cache_lv ) ;
struct logical_volume * corigin_lv ;
struct logical_volume * cache_pool_lv ;
struct lvinfo corigin_info ;
struct dm_info info_meta ;
struct dm_info info_data ;
int is_clear ;
cache_pool_lv = cache_seg - > pool_lv ;
corigin_lv = seg_lv ( cache_seg , 0 ) ;
/*
* This info is needed to remove the corigin lv at the end .
*/
if ( ! lv_info ( cmd , corigin_lv , 1 , & corigin_info , 0 , 0 ) )
log_error ( " Failed to get info about corigin %s " , display_lvname ( corigin_lv ) ) ;
/*
* This info is needed to remove the cmeta / cdata devs at the end .
*/
2019-01-30 18:55:34 +03:00
if ( ! get_cache_vol_meta_data ( cmd , cache_lv , cache_pool_lv , & info_meta , & info_data ) ) {
2018-08-17 23:45:52 +03:00
log_error ( " Failed to get info about cdata/cmeta for %s " , display_lvname ( cache_pool_lv ) ) ;
return 0 ;
}
/*
* Flush the cache .
*/
if ( ! lv_cache_wait_for_clean ( cache_lv , & is_clear ) ) {
log_error ( " Failed to flush cache for detaching LV %s. " , display_lvname ( cache_lv ) ) ;
return_0 ;
}
/*
* The main job of detaching the cache .
*/
if ( ! detach_pool_lv ( cache_seg ) ) {
log_error ( " Failed to detach cache from %s " , display_lvname ( cache_lv ) ) ;
return_0 ;
}
2019-01-30 18:55:34 +03:00
cache_pool_lv - > status & = ~ LV_CACHE_VOL ;
2018-08-17 23:45:52 +03:00
if ( ! remove_layer_from_lv ( cache_lv , corigin_lv ) ) {
log_error ( " Failed to remove cache layer from %s " , display_lvname ( cache_lv ) ) ;
return_0 ;
}
if ( ! lv_update_and_reload ( cache_lv ) ) {
log_error ( " Failed to update and reload after detaching cache from %s " , display_lvname ( cache_lv ) ) ;
return 0 ;
}
/*
* Detaching the cache is done , now finish cleaning up what ' s left over
* from when the cache was attached : deactivate the cache_pool_lv , and
* remove the unused dm dev for corigin_lv .
*/
/* These cmeta/cdata dm devs need to be removed since they are using cache_pool_lv. */
2019-01-30 18:55:34 +03:00
if ( ! remove_cache_vol_meta_data ( cmd , & info_meta , & info_data ) )
2018-08-17 23:45:52 +03:00
log_error ( " Failed to remove cdata/cmeta devs for %s " , display_lvname ( cache_pool_lv ) ) ;
if ( ! deactivate_lv ( cmd , cache_pool_lv ) )
log_error ( " Failed to deactivate the detached cache %s " , display_lvname ( cache_pool_lv ) ) ;
if ( ! corigin_info . major | | ! corigin_info . minor ) {
log_error ( " Invalid device number %u:%u for corigin %s " ,
corigin_info . major , corigin_info . minor , display_lvname ( corigin_lv ) ) ;
return 1 ;
}
dm_udev_set_sync_support ( 0 ) ;
if ( ! dev_manager_remove_dm_major_minor ( corigin_info . major , corigin_info . minor ) )
log_error ( " Failed to remove the unused corigin dev %s " , display_lvname ( corigin_lv ) ) ;
dm_udev_set_sync_support ( 1 ) ;
if ( ! lv_remove ( corigin_lv ) ) {
log_error ( " Failed to remove unused cache layer %s for %s " ,
display_lvname ( corigin_lv ) ,
display_lvname ( cache_lv ) ) ;
return_0 ;
}
return 1 ;
}
2019-09-18 00:32:34 +03:00
static int _lv_detach_cache_vol_while_inactive ( struct cmd_context * cmd , struct logical_volume * cache_lv , int noflush )
2018-08-17 23:45:52 +03:00
{
struct lv_segment * cache_seg = first_seg ( cache_lv ) ;
struct logical_volume * corigin_lv ;
struct logical_volume * cache_pool_lv ;
int cache_mode ;
int is_clear ;
cache_pool_lv = cache_seg - > pool_lv ;
corigin_lv = seg_lv ( cache_seg , 0 ) ;
cache_mode = cache_seg - > cache_mode ;
/*
* With these modes there is no flush needed so we can immediately
* detach without temporarily activating the LV to flush it .
*/
2019-09-18 00:32:34 +03:00
if ( ( cache_mode = = CACHE_MODE_WRITETHROUGH ) | | ( cache_mode = = CACHE_MODE_PASSTHROUGH ) | | noflush )
2018-08-17 23:45:52 +03:00
goto detach ;
/*
* With mode WRITEBACK we need to activate the cache LV to flush / clean
* it before detaching the cache .
*
* LV_TEMPORARY should prevent the active LV from being exposed and
* used outside of lvm .
*/
log_debug ( " Activating %s internally for cache flush. " , display_lvname ( cache_lv ) ) ;
cache_lv - > status | = LV_TEMPORARY ;
if ( ! activate_lv ( cmd , cache_lv ) ) {
log_error ( " Failed to activate LV %s to flush cache. " , display_lvname ( cache_lv ) ) ;
return 0 ;
}
if ( ! lv_cache_wait_for_clean ( cache_lv , & is_clear ) ) {
log_error ( " Failed to flush cache for detaching LV %s. " , display_lvname ( cache_lv ) ) ;
return_0 ;
}
if ( ! deactivate_lv ( cmd , cache_lv ) ) {
log_error ( " Failed to deactivate LV %s for detaching cache. " , display_lvname ( cache_lv ) ) ;
return 0 ;
}
cache_lv - > status & = ~ LV_TEMPORARY ;
detach :
if ( ! detach_pool_lv ( cache_seg ) ) {
log_error ( " Failed to detach cache from %s " , display_lvname ( cache_lv ) ) ;
return_0 ;
}
2019-01-30 18:55:34 +03:00
cache_pool_lv - > status & = ~ LV_CACHE_VOL ;
2018-08-17 23:45:52 +03:00
if ( ! remove_layer_from_lv ( cache_lv , corigin_lv ) ) {
log_error ( " Failed to remove cache layer from %s " , display_lvname ( cache_lv ) ) ;
return_0 ;
}
if ( ! lv_remove ( corigin_lv ) ) {
log_error ( " Failed to remove unused cache layer %s for %s " ,
display_lvname ( corigin_lv ) ,
display_lvname ( cache_lv ) ) ;
return_0 ;
}
return 1 ;
}
2019-09-18 00:32:34 +03:00
int lv_detach_cache_vol ( struct logical_volume * cache_lv , int noflush )
2018-08-17 23:45:52 +03:00
{
struct cmd_context * cmd = cache_lv - > vg - > cmd ;
if ( lv_is_pending_delete ( cache_lv ) ) {
log_error ( " Already detaching cache pool from %s. " , display_lvname ( cache_lv ) ) ;
return 0 ;
}
if ( lv_is_active ( cache_lv ) )
2019-01-30 18:55:34 +03:00
return _lv_detach_cache_vol_while_active ( cmd , cache_lv ) ;
2018-08-17 23:45:52 +03:00
else
2019-09-18 00:32:34 +03:00
return _lv_detach_cache_vol_while_inactive ( cmd , cache_lv , noflush ) ;
2018-08-17 23:45:52 +03:00
}
2014-02-04 17:59:58 +04:00
/*
* lv_cache_remove
* @ cache_lv
*
* Given a cache LV , remove the cache layer . This will unlink
* the origin and cache_pool , remove the cache LV layer , and promote
* the origin to a usable non - cached LV of the same name as the
* given cache_lv .
*
* Returns : 1 on success , 0 on failure
*/
int lv_cache_remove ( struct logical_volume * cache_lv )
{
struct lv_segment * cache_seg = first_seg ( cache_lv ) ;
2014-04-01 20:02:36 +04:00
struct logical_volume * corigin_lv ;
2014-02-04 17:59:58 +04:00
struct logical_volume * cache_pool_lv ;
2016-04-26 22:45:48 +03:00
int is_clear ;
2014-02-04 17:59:58 +04:00
2014-04-01 19:55:07 +04:00
if ( ! lv_is_cache ( cache_lv ) ) {
2014-11-03 14:52:24 +03:00
log_error ( INTERNAL_ERROR " LV %s is not cache volume. " ,
display_lvname ( cache_lv ) ) ;
2014-04-01 19:55:07 +04:00
return 0 ;
}
2014-02-04 17:59:58 +04:00
2019-01-30 18:55:34 +03:00
if ( lv_is_cache_vol ( cache_seg - > pool_lv ) ) {
2018-08-17 23:45:52 +03:00
log_error ( INTERNAL_ERROR " Incorrect remove for cache single " ) ;
return 0 ;
}
2014-11-11 15:31:25 +03:00
if ( lv_is_pending_delete ( cache_lv ) ) {
2018-02-21 16:08:17 +03:00
log_debug ( INTERNAL_ERROR " LV %s is already dropped cache volume. " ,
2014-11-11 15:31:25 +03:00
display_lvname ( cache_lv ) ) ;
2014-11-10 12:56:43 +03:00
goto remove ; /* Already dropped */
2014-11-11 15:31:25 +03:00
}
2014-11-10 12:56:43 +03:00
2014-11-10 12:07:41 +03:00
/* Localy active volume is needed for writeback */
2016-12-17 23:52:27 +03:00
if ( ! lv_info ( cache_lv - > vg - > cmd , cache_lv , 1 , NULL , 0 , 0 ) ) {
2014-11-10 12:07:41 +03:00
/* Give up any remote locks */
2018-03-02 18:34:09 +03:00
if ( ! deactivate_lv_with_sub_lv ( cache_lv ) )
return_0 ;
2016-05-27 15:19:29 +03:00
switch ( first_seg ( cache_seg - > pool_lv ) - > cache_mode ) {
case CACHE_MODE_WRITETHROUGH :
case CACHE_MODE_PASSTHROUGH :
/* For inactive pass/writethrough just drop cache layer */
2014-11-10 12:07:41 +03:00
corigin_lv = seg_lv ( cache_seg , 0 ) ;
if ( ! detach_pool_lv ( cache_seg ) )
return_0 ;
if ( ! remove_layer_from_lv ( cache_lv , corigin_lv ) )
return_0 ;
if ( ! lv_remove ( corigin_lv ) )
return_0 ;
return 1 ;
2016-05-27 15:19:29 +03:00
default :
/* Otherwise localy activate volume to sync dirty blocks */
cache_lv - > status | = LV_TEMPORARY ;
2018-06-05 21:21:28 +03:00
if ( ! activate_lv ( cache_lv - > vg - > cmd , cache_lv ) | |
! lv_is_active ( cache_lv ) ) {
2018-11-06 01:10:49 +03:00
log_error ( " Failed to activate %s to flush cache. " , display_lvname ( cache_lv ) ) ;
2016-05-27 15:19:29 +03:00
return 0 ;
}
cache_lv - > status & = ~ LV_TEMPORARY ;
2014-11-10 12:07:41 +03:00
}
2014-04-01 20:02:36 +04:00
}
2014-02-04 17:59:58 +04:00
/*
* FIXME :
* Before the link can be broken , we must ensure that the
* cache has been flushed . This may already be the case
* if the cache mode is writethrough ( or the cleaner
* policy is in place from a previous half - finished attempt
* to remove the cache_pool ) . It could take a long time to
* flush the cache - it should probably be done in the background .
*
* Also , if we do perform the flush in the background and we
* happen to also be removing the cache / origin LV , then we
* could check if the cleaner policy is in place and simply
* remove the cache_pool then without waiting for the flush to
* complete .
*/
2016-04-26 22:45:48 +03:00
if ( ! lv_cache_wait_for_clean ( cache_lv , & is_clear ) )
2014-02-04 17:59:58 +04:00
return_0 ;
2014-04-01 20:02:36 +04:00
cache_pool_lv = cache_seg - > pool_lv ;
if ( ! detach_pool_lv ( cache_seg ) )
2014-02-04 17:59:58 +04:00
return_0 ;
2014-11-10 12:56:43 +03:00
/*
* Drop layer from cache LV and make _corigin to appear again as regular LV
* And use ' existing ' _corigin volume to keep reference on cache - pool
* This way we still have a way to reference _corigin in dm table and we
* know it ' s been ' cache ' LV and we can drop all needed table entries via
* activation and deactivation of it .
*
* This ' cache ' LV without origin is temporary LV , which still could be
* easily operated by lvm2 commands - it could be activate / deactivated / removed .
* However in the dm - table it will use ' error ' target for _corigin volume .
*/
2014-04-01 20:02:36 +04:00
corigin_lv = seg_lv ( cache_seg , 0 ) ;
lv_set_visible ( corigin_lv ) ;
2014-11-06 22:36:53 +03:00
2014-04-01 20:02:36 +04:00
if ( ! remove_layer_from_lv ( cache_lv , corigin_lv ) )
2014-11-06 22:36:53 +03:00
return_0 ;
2014-02-04 17:59:58 +04:00
2014-11-10 12:56:43 +03:00
/* Replace 'error' with 'cache' segtype */
cache_seg = first_seg ( corigin_lv ) ;
2015-09-22 21:04:12 +03:00
if ( ! ( cache_seg - > segtype = get_segtype_from_string ( corigin_lv - > vg - > cmd , SEG_TYPE_NAME_CACHE ) ) )
2014-02-04 17:59:58 +04:00
return_0 ;
2018-06-23 12:47:33 +03:00
if ( ! add_lv_segment_areas ( cache_seg , 1 ) )
2014-11-10 12:56:43 +03:00
return_0 ;
2016-05-20 12:28:23 +03:00
2014-11-10 12:56:43 +03:00
if ( ! set_lv_segment_area_lv ( cache_seg , 0 , cache_lv , 0 , 0 ) )
return_0 ;
2014-02-04 17:59:58 +04:00
2014-11-10 12:56:43 +03:00
corigin_lv - > le_count = cache_lv - > le_count ;
corigin_lv - > size = cache_lv - > size ;
corigin_lv - > status | = LV_PENDING_DELETE ;
/* Reattach cache pool */
2016-03-01 17:21:36 +03:00
if ( ! attach_pool_lv ( cache_seg , cache_pool_lv , NULL , NULL , NULL ) )
2014-04-01 20:02:36 +04:00
return_0 ;
2014-11-10 12:56:43 +03:00
/* Suspend/resume also deactivates deleted LV via support of LV_PENDING_DELETE */
if ( ! lv_update_and_reload ( cache_lv ) )
2014-02-04 17:59:58 +04:00
return_0 ;
2014-11-10 12:56:43 +03:00
cache_lv = corigin_lv ;
remove :
if ( ! detach_pool_lv ( cache_seg ) )
2014-02-04 17:59:58 +04:00
return_0 ;
2014-04-01 20:02:36 +04:00
2014-11-10 12:56:43 +03:00
if ( ! lv_remove ( cache_lv ) ) /* Will use LV_PENDING_DELETE */
2014-02-04 17:59:58 +04:00
return_0 ;
return 1 ;
}
2014-07-17 17:12:21 +04:00
2014-08-15 15:08:30 +04:00
int lv_is_cache_origin ( const struct logical_volume * lv )
{
struct lv_segment * seg ;
/* Make sure there's exactly one segment in segs_using_this_lv! */
if ( dm_list_empty ( & lv - > segs_using_this_lv ) | |
( dm_list_size ( & lv - > segs_using_this_lv ) > 1 ) )
return 0 ;
seg = get_only_segment_using_this_lv ( lv ) ;
2014-11-10 12:56:43 +03:00
return seg & & lv_is_cache ( seg - > lv ) & & ! lv_is_pending_delete ( seg - > lv ) & & ( seg_lv ( seg , 0 ) = = lv ) ;
2014-08-15 15:08:30 +04:00
}
2014-11-02 20:36:41 +03:00
2015-08-17 16:35:43 +03:00
static const char * _get_default_cache_policy ( struct cmd_context * cmd )
{
2015-09-22 21:04:12 +03:00
const struct segment_type * segtype = get_segtype_from_string ( cmd , SEG_TYPE_NAME_CACHE ) ;
2015-08-17 16:35:43 +03:00
unsigned attr = ~ 0 ;
const char * def = NULL ;
if ( ! segtype | |
! segtype - > ops - > target_present | |
! segtype - > ops - > target_present ( cmd , NULL , & attr ) ) {
log_warn ( " WARNING: Cannot detect default cache policy, using \" "
DEFAULT_CACHE_POLICY " \" . " ) ;
return DEFAULT_CACHE_POLICY ;
}
if ( attr & CACHE_FEATURE_POLICY_SMQ )
def = " smq " ;
else if ( attr & CACHE_FEATURE_POLICY_MQ )
def = " mq " ;
else {
log_error ( " Default cache policy is not available. " ) ;
return NULL ;
}
log_debug_metadata ( " Detected default cache_policy \" %s \" . " , def ) ;
return def ;
}
2017-02-26 22:18:37 +03:00
/* Autodetect best available cache metadata format for a user */
static cache_metadata_format_t _get_default_cache_metadata_format ( struct cmd_context * cmd )
{
const struct segment_type * segtype = get_segtype_from_string ( cmd , SEG_TYPE_NAME_CACHE ) ;
unsigned attr ;
cache_metadata_format_t f ;
if ( ! segtype | |
! segtype - > ops - > target_present | |
! segtype - > ops - > target_present ( cmd , NULL , & attr ) ) {
f = CACHE_METADATA_FORMAT_1 ;
log_warn ( " WARNING: Cannot detect default cache metadata format, using format: %u. " , f ) ;
} else {
f = ( attr & CACHE_FEATURE_METADATA2 ) ? CACHE_METADATA_FORMAT_2 : CACHE_METADATA_FORMAT_1 ;
log_debug_metadata ( " Detected default cache metadata format: %u. " , f ) ;
}
return f ;
}
2018-11-06 01:14:45 +03:00
int cache_set_policy ( struct lv_segment * lvseg , const char * name ,
2015-08-11 15:01:12 +03:00
const struct dm_config_tree * settings )
2014-11-19 20:38:30 +03:00
{
2018-11-06 01:14:45 +03:00
struct lv_segment * seg ;
2014-11-20 14:05:40 +03:00
struct dm_config_node * cn ;
2015-08-11 15:01:12 +03:00
const struct dm_config_node * cns ;
2014-11-20 14:05:40 +03:00
struct dm_config_tree * old = NULL , * new = NULL , * tmp = NULL ;
int r = 0 ;
2018-11-06 01:14:45 +03:00
struct profile * profile = lvseg - > lv - > profile ;
2014-11-19 20:38:30 +03:00
2018-11-06 01:14:45 +03:00
if ( seg_is_cache_pool ( lvseg ) ) {
2017-03-08 01:54:01 +03:00
if ( ! name & & ! settings )
return 1 ; /* Policy and settings can be selected later when caching LV */
2018-11-06 01:14:45 +03:00
}
2019-01-30 18:55:34 +03:00
if ( seg_is_cache ( lvseg ) & & lv_is_cache_vol ( lvseg - > pool_lv ) )
2018-08-17 23:45:52 +03:00
seg = lvseg ;
else if ( seg_is_cache_pool ( lvseg ) )
2018-11-06 01:14:45 +03:00
seg = lvseg ;
else if ( seg_is_cache ( lvseg ) )
seg = first_seg ( lvseg - > pool_lv ) ;
else {
2017-03-08 01:54:01 +03:00
log_error ( INTERNAL_ERROR " Cannot set cache metadata format for non cache volume %s. " ,
2018-11-06 01:14:45 +03:00
display_lvname ( lvseg - > lv ) ) ;
2017-03-08 01:54:01 +03:00
return 0 ;
}
2014-11-19 20:38:30 +03:00
2015-08-11 15:01:12 +03:00
if ( name ) {
if ( ! ( seg - > policy_name = dm_pool_strdup ( seg - > lv - > vg - > vgmem , name ) ) ) {
log_error ( " Failed to duplicate policy name. " ) ;
return 0 ;
}
2017-03-08 01:54:01 +03:00
} else if ( ! seg - > policy_name ) {
2017-03-05 19:42:15 +03:00
if ( ! ( seg - > policy_name = find_config_tree_str ( seg - > lv - > vg - > cmd , allocation_cache_policy_CFG ,
profile ) ) & &
2015-08-17 16:35:43 +03:00
! ( seg - > policy_name = _get_default_cache_policy ( seg - > lv - > vg - > cmd ) ) )
return_0 ;
2015-08-11 15:01:12 +03:00
if ( ! seg - > policy_name ) {
log_error ( INTERNAL_ERROR " Can't set policy settings without policy name. " ) ;
return 0 ;
}
2017-03-08 01:54:01 +03:00
}
2015-08-11 15:01:12 +03:00
2017-03-08 01:54:01 +03:00
if ( settings ) {
2015-08-11 15:01:12 +03:00
if ( seg - > policy_settings ) {
if ( ! ( old = dm_config_create ( ) ) )
goto_out ;
if ( ! ( new = dm_config_create ( ) ) )
goto_out ;
new - > root = settings - > root ;
old - > root = seg - > policy_settings ;
new - > cascade = old ;
if ( ! ( tmp = dm_config_flatten ( new ) ) )
goto_out ;
}
if ( ( cn = dm_config_find_node ( ( tmp ) ? tmp - > root : settings - > root , " policy_settings " ) ) & &
! ( seg - > policy_settings = dm_config_clone_node_with_mem ( seg - > lv - > vg - > vgmem , cn , 0 ) ) )
2014-11-20 14:05:40 +03:00
goto_out ;
2017-03-08 01:54:01 +03:00
} else if ( ! seg - > policy_settings ) {
if ( ( cns = find_config_tree_node ( seg - > lv - > vg - > cmd , allocation_cache_settings_CFG_SECTION ,
profile ) ) ) {
/* Try to find our section for given policy */
for ( cn = cns - > child ; cn ; cn = cn - > sib ) {
if ( ! cn - > child )
continue ; /* Ignore section without settings */
if ( cn - > v | | strcmp ( cn - > key , seg - > policy_name ) ! = 0 )
continue ; /* Ignore mismatching sections */
/* Clone nodes with policy name */
if ( ! ( seg - > policy_settings = dm_config_clone_node_with_mem ( seg - > lv - > vg - > vgmem ,
cn , 0 ) ) )
return_0 ;
/* Replace policy name key with 'policy_settings' */
seg - > policy_settings - > key = " policy_settings " ;
break ; /* Only first match counts */
}
2015-08-11 15:01:12 +03:00
}
2015-07-15 12:06:40 +03:00
}
2014-11-20 14:05:40 +03:00
restart : /* remove any 'default" nodes */
2015-02-24 13:36:30 +03:00
cn = seg - > policy_settings ? seg - > policy_settings - > child : NULL ;
2014-11-20 14:05:40 +03:00
while ( cn ) {
if ( cn - > v - > type = = DM_CFG_STRING & & ! strcmp ( cn - > v - > v . str , " default " ) ) {
dm_config_remove_node ( seg - > policy_settings , cn ) ;
goto restart ;
}
cn = cn - > sib ;
}
2014-11-19 20:38:30 +03:00
2014-11-20 14:05:40 +03:00
r = 1 ;
out :
if ( tmp )
dm_config_destroy ( tmp ) ;
2015-07-15 12:06:40 +03:00
if ( new )
dm_config_destroy ( new ) ;
if ( old )
dm_config_destroy ( old ) ;
2014-11-20 14:05:40 +03:00
return r ;
2014-11-19 20:38:30 +03:00
}
2017-02-26 22:18:37 +03:00
/*
* Sets metadata format on cache pool segment with these rules :
* 1. When ' cache - pool ' segment is passed , sets only for selected formats ( 1 or 2 ) .
* 2. For ' cache ' segment passed in we know cache pool segment .
* When passed format is 0 ( UNSELECTED ) with ' cache ' segment - it ' s the moment
* lvm2 has to figure out ' default ' metadata format ( 1 or 2 ) from
* configuration or profiles .
* 3. If still unselected or selected format is ! = 1 , figure the best supported format
* and either use it or validate users settings is possible .
*
* Reasoning : A user may create cache - pool and may or may not specify CMFormat .
* If the CMFormat has been selected ( 1 or 2 ) store this in metadata , otherwise
* for an unused cache - pool UNSELECTED CMFormat is used . When caching LV , CMFormat
* must be decided and from this moment it ' s always stored . To support backward
* compatibility ' CMFormat 1 ' is used when it is NOT specified for a cached LV in
* lvm2 metadata ( no metadata_format = # F element in cache - pool segment ) .
*/
int cache_set_metadata_format ( struct lv_segment * seg , cache_metadata_format_t format )
{
cache_metadata_format_t best ;
struct profile * profile = seg - > lv - > profile ;
if ( seg_is_cache ( seg ) )
seg = first_seg ( seg - > pool_lv ) ;
else if ( seg_is_cache_pool ( seg ) ) {
if ( format = = CACHE_METADATA_FORMAT_UNSELECTED )
return 1 ; /* Format can be selected later when caching LV */
} else {
log_error ( INTERNAL_ERROR " Cannot set cache metadata format for non cache volume %s. " ,
display_lvname ( seg - > lv ) ) ;
return 0 ;
}
2018-03-19 12:23:48 +03:00
/*
* If policy is unselected , but format 2 is selected , policy smq is enforced .
*/
if ( ! seg - > policy_name ) {
if ( format = = CACHE_METADATA_FORMAT_2 )
seg - > policy_name = " smq " ;
}
2017-02-26 22:18:37 +03:00
/* Check if we need to search for configured cache metadata format */
if ( format = = CACHE_METADATA_FORMAT_UNSELECTED ) {
if ( seg - > cache_metadata_format ! = CACHE_METADATA_FORMAT_UNSELECTED )
return 1 ; /* Format already selected in cache pool */
/* Check configurations and profiles */
format = find_config_tree_int ( seg - > lv - > vg - > cmd , allocation_cache_metadata_format_CFG ,
profile ) ;
}
/* See what is a 'best' available cache metadata format
* when the specifed format is other then always existing CMFormat 1 */
if ( format ! = CACHE_METADATA_FORMAT_1 ) {
best = _get_default_cache_metadata_format ( seg - > lv - > vg - > cmd ) ;
/* Format was not selected, so use best present on a system */
if ( format = = CACHE_METADATA_FORMAT_UNSELECTED )
format = best ;
else if ( format ! = best ) {
/* Format is not valid (Only Format 1 or 2 is supported ATM) */
log_error ( " Cache metadata format %u is not supported by kernel target. " , format ) ;
return 0 ;
}
}
switch ( format ) {
case CACHE_METADATA_FORMAT_2 : seg - > lv - > status | = LV_METADATA_FORMAT ; break ;
case CACHE_METADATA_FORMAT_1 : seg - > lv - > status & = ~ LV_METADATA_FORMAT ; break ;
default :
log_error ( INTERNAL_ERROR " Invalid cache metadata format %u for cache volume %s. " ,
format , display_lvname ( seg - > lv ) ) ;
return 0 ;
}
seg - > cache_metadata_format = format ;
return 1 ;
}
2018-08-17 23:45:52 +03:00
# define ONE_MB_S 2048 /* 1MB in sectors */
# define ONE_GB_S 2097152 /* 1GB in sectors */
2019-01-30 18:55:34 +03:00
int cache_vol_set_params ( struct cmd_context * cmd ,
2018-08-17 23:45:52 +03:00
struct logical_volume * cache_lv ,
struct logical_volume * pool_lv ,
uint64_t poolmetadatasize ,
uint32_t chunk_size ,
cache_metadata_format_t format ,
cache_mode_t mode ,
const char * policy ,
const struct dm_config_tree * settings )
{
struct dm_pool * mem = cache_lv - > vg - > vgmem ;
struct profile * profile = cache_lv - > profile ;
struct lv_segment * cache_seg = first_seg ( cache_lv ) ;
struct logical_volume * corig_lv = seg_lv ( cache_seg , 0 ) ;
const char * policy_name = NULL ;
struct dm_config_node * policy_settings = NULL ;
const struct dm_config_node * cns ;
struct dm_config_node * cn ;
uint64_t meta_size = 0 ;
uint64_t data_size = 0 ;
uint64_t max_chunks ;
uint32_t min_meta_size ;
uint32_t max_meta_size ;
uint32_t extent_size ;
/* all _size variables in units of sectors (512 bytes) */
/*
* cache format : only create new cache LVs with 2.
*/
if ( format = = CACHE_METADATA_FORMAT_UNSELECTED )
format = CACHE_METADATA_FORMAT_2 ;
if ( format = = CACHE_METADATA_FORMAT_1 ) {
log_error ( " Use cache metadata format 2. " ) ;
return 0 ;
}
/*
* cache mode : get_cache_params ( ) gets mode from - - cachemode or sets
* UNSEL . When unspecified , it comes from config .
*/
if ( mode = = CACHE_MODE_UNSELECTED )
mode = _get_cache_mode_from_config ( cmd , profile , cache_lv ) ;
cache_seg - > cache_mode = mode ;
/*
* chunk size : get_cache_params ( ) get chunk_size from - - chunksize or
* sets 0. When unspecified it comes from config or default .
*
* cache_pool_chunk_size in lvm . conf , DEFAULT_CACHE_POOL_CHUNK_SIZE ,
* and DEFAULT_CACHE_POOL_MAX_METADATA_SIZE are in KiB , so * 2 turn
* them into sectors .
*/
if ( ! chunk_size )
chunk_size = find_config_tree_int ( cmd , allocation_cache_pool_chunk_size_CFG , cache_lv - > profile ) * 2 ;
if ( ! chunk_size )
chunk_size = get_default_allocation_cache_pool_chunk_size_CFG ( cmd , profile ) ;
if ( ! validate_cache_chunk_size ( cmd , chunk_size ) )
return_0 ;
/*
* metadata size : can be specified with - - poolmetadatasize ,
* otherwise it ' s set according to the size of the cache .
* data size : the LV size minus the metadata size .
*/
2018-12-21 22:55:50 +03:00
if ( ! ( extent_size = pool_lv - > vg - > extent_size ) ) {
log_error ( INTERNAL_ERROR " Extend size can't be 0. " ) ;
return 0 ;
}
2018-08-17 23:45:52 +03:00
min_meta_size = extent_size ;
max_meta_size = 2 * DEFAULT_CACHE_POOL_MAX_METADATA_SIZE ; /* 2x for KiB to sectors */
if ( pool_lv - > size < ( extent_size * 2 ) ) {
log_error ( " The minimum cache size is two extents (%s bytes). " ,
display_size ( cmd , extent_size * 2 ) ) ;
return 0 ;
}
if ( poolmetadatasize ) {
meta_size = poolmetadatasize ; /* in sectors, from --poolmetadatasize, see _size_arg() */
if ( meta_size > max_meta_size ) {
meta_size = max_meta_size ;
log_print_unless_silent ( " Rounding down metadata size to max size %s " ,
display_size ( cmd , meta_size ) ) ;
}
if ( meta_size < min_meta_size ) {
meta_size = min_meta_size ;
log_print_unless_silent ( " Rounding up metadata size to min size %s " ,
display_size ( cmd , meta_size ) ) ;
}
if ( meta_size % extent_size ) {
meta_size + = extent_size - meta_size % extent_size ;
log_print_unless_silent ( " Rounding up metadata size to full physical extent %s " ,
display_size ( cmd , meta_size ) ) ;
}
}
if ( ! meta_size ) {
if ( pool_lv - > size < ( 128 * ONE_MB_S ) )
meta_size = 16 * ONE_MB_S ;
else if ( pool_lv - > size < ONE_GB_S )
meta_size = 32 * ONE_MB_S ;
else if ( pool_lv - > size < ( 128 * ONE_GB_S ) )
meta_size = 64 * ONE_MB_S ;
if ( meta_size > ( pool_lv - > size / 2 ) )
meta_size = pool_lv - > size / 2 ;
if ( meta_size < min_meta_size )
meta_size = min_meta_size ;
if ( meta_size % extent_size )
meta_size + = extent_size - meta_size % extent_size ;
}
data_size = pool_lv - > size - meta_size ;
max_chunks = get_default_allocation_cache_pool_max_chunks_CFG ( cmd , profile ) ;
if ( data_size / chunk_size > max_chunks ) {
log_error ( " Cache data blocks %llu and chunk size %u exceed max chunks %llu. " ,
( unsigned long long ) data_size , chunk_size , ( unsigned long long ) max_chunks ) ;
log_error ( " Use smaller cache, larger --chunksize or increase max chunks setting. " ) ;
return 0 ;
}
/*
* cache policy : get_cache_params ( ) gets policy from - - cachepolicy ,
* or sets NULL .
*/
if ( ! policy )
policy = find_config_tree_str ( cmd , allocation_cache_policy_CFG , profile ) ;
if ( ! policy )
policy = _get_default_cache_policy ( cmd ) ;
if ( ! policy ) {
log_error ( INTERNAL_ERROR " Missing cache policy name. " ) ;
return 0 ;
}
if ( ! ( policy_name = dm_pool_strdup ( mem , policy ) ) )
return_0 ;
/*
* cache settings : get_cache_params ( ) gets policy from - - cachesettings ,
* or sets NULL .
* FIXME : code for this is a mess , mostly copied from cache_set_policy
* which is even worse .
*/
if ( settings ) {
if ( ( cn = dm_config_find_node ( settings - > root , " policy_settings " ) ) ) {
if ( ! ( policy_settings = dm_config_clone_node_with_mem ( mem , cn , 0 ) ) )
return_0 ;
}
} else {
if ( ( cns = find_config_tree_node ( cmd , allocation_cache_settings_CFG_SECTION , profile ) ) ) {
/* Try to find our section for given policy */
for ( cn = cns - > child ; cn ; cn = cn - > sib ) {
if ( ! cn - > child )
continue ; /* Ignore section without settings */
if ( cn - > v | | strcmp ( cn - > key , policy_name ) ! = 0 )
continue ; /* Ignore mismatching sections */
/* Clone nodes with policy name */
if ( ! ( policy_settings = dm_config_clone_node_with_mem ( mem , cn , 0 ) ) )
return_0 ;
/* Replace policy name key with 'policy_settings' */
policy_settings - > key = " policy_settings " ;
break ; /* Only first match counts */
}
}
}
restart : /* remove any 'default" nodes */
cn = policy_settings ? policy_settings - > child : NULL ;
while ( cn ) {
if ( cn - > v - > type = = DM_CFG_STRING & & ! strcmp ( cn - > v - > v . str , " default " ) ) {
dm_config_remove_node ( policy_settings , cn ) ;
goto restart ;
}
cn = cn - > sib ;
}
log_debug ( " Setting LV %s cache on %s meta start 0 len %llu data start %llu len %llu sectors " ,
display_lvname ( cache_lv ) , display_lvname ( pool_lv ) ,
( unsigned long long ) meta_size ,
( unsigned long long ) meta_size ,
( unsigned long long ) data_size ) ;
log_debug ( " Setting LV %s cache format %u policy %s chunk_size %u sectors " ,
display_lvname ( cache_lv ) , format , policy_name , chunk_size ) ;
if ( lv_is_raid ( corig_lv ) & & ( mode = = CACHE_MODE_WRITEBACK ) )
log_warn ( " WARNING: Data redundancy could be lost with writeback caching of raid logical volume! " ) ;
if ( lv_is_thin_pool_data ( cache_lv ) ) {
log_warn ( " WARNING: thin pool data will not be automatically extended when cached. " ) ;
log_warn ( " WARNING: manual splitcache is required before extending thin pool data. " ) ;
}
cache_seg - > chunk_size = chunk_size ;
cache_seg - > metadata_start = 0 ;
cache_seg - > metadata_len = meta_size ;
cache_seg - > data_start = meta_size ;
cache_seg - > data_len = data_size ;
cache_seg - > cache_metadata_format = format ;
cache_seg - > policy_name = policy_name ;
cache_seg - > policy_settings = policy_settings ;
id_create ( & cache_seg - > metadata_id ) ;
id_create ( & cache_seg - > data_id ) ;
return 1 ;
}
2016-05-05 22:30:15 +03:00
int cache_set_params ( struct lv_segment * seg ,
2017-03-09 17:54:30 +03:00
uint32_t chunk_size ,
2017-02-26 22:18:37 +03:00
cache_metadata_format_t format ,
2016-04-25 14:39:30 +03:00
cache_mode_t mode ,
2016-05-05 22:30:15 +03:00
const char * policy_name ,
2017-03-09 17:54:30 +03:00
const struct dm_config_tree * policy_settings )
2016-05-05 22:30:15 +03:00
{
struct lv_segment * pool_seg ;
2017-02-27 16:53:45 +03:00
struct cmd_context * cmd = seg - > lv - > vg - > cmd ;
2016-05-05 22:30:15 +03:00
2016-04-25 14:39:30 +03:00
if ( ! cache_set_cache_mode ( seg , mode ) )
2016-05-05 22:30:15 +03:00
return_0 ;
if ( ! cache_set_policy ( seg , policy_name , policy_settings ) )
return_0 ;
2018-03-19 12:23:48 +03:00
if ( ! cache_set_metadata_format ( seg , format ) )
return_0 ;
2016-05-05 22:30:15 +03:00
pool_seg = seg_is_cache ( seg ) ? first_seg ( seg - > pool_lv ) : seg ;
if ( chunk_size ) {
2017-02-27 16:53:45 +03:00
if ( seg_is_cache ( seg ) & &
! validate_lv_cache_chunk_size ( pool_seg - > lv , chunk_size ) )
2016-05-05 22:30:15 +03:00
return_0 ;
pool_seg - > chunk_size = chunk_size ;
2017-02-27 16:53:45 +03:00
} else if ( seg_is_cache ( seg ) ) {
/* Chunk size in profile has priority over cache-pool chunk size */
if ( ( chunk_size = find_config_tree_int ( cmd , allocation_cache_pool_chunk_size_CFG ,
seg - > lv - > profile ) * 2 ) ) {
if ( ! validate_lv_cache_chunk_size ( pool_seg - > lv , chunk_size ) )
return_0 ;
if ( pool_seg - > chunk_size ! = chunk_size )
log_verbose ( " Replacing chunk size %s in cache pool %s with "
" chunk size %s from profile. " ,
display_size ( cmd , pool_seg - > chunk_size ) ,
display_lvname ( seg - > lv ) ,
display_size ( cmd , chunk_size ) ) ;
pool_seg - > chunk_size = chunk_size ;
}
} else if ( seg_is_cache_pool ( seg ) ) {
if ( ! pool_seg - > chunk_size & &
/* TODO: some calc_policy solution for cache ? */
! recalculate_pool_chunk_size_with_dev_hints ( pool_seg - > lv ,
2016-05-05 22:30:15 +03:00
THIN_CHUNK_SIZE_CALC_METHOD_GENERIC ) )
return_0 ;
}
2017-02-27 16:53:45 +03:00
if ( seg_is_cache ( seg ) )
cache_check_for_warns ( seg ) ;
2016-05-05 22:30:15 +03:00
return 1 ;
}
2014-11-02 20:36:41 +03:00
/*
* Wipe cache pool metadata area before use .
*
* Activates metadata volume as ' cache - pool ' so regular wiping
* of existing visible volume may proceed .
*/
int wipe_cache_pool ( struct logical_volume * cache_pool_lv )
{
int r ;
/* Only unused cache-pool could be activated and wiped */
2019-01-30 18:55:34 +03:00
if ( ( ! lv_is_cache_pool ( cache_pool_lv ) & & ! lv_is_cache_vol ( cache_pool_lv ) ) | |
2014-11-02 20:36:41 +03:00
! dm_list_empty ( & cache_pool_lv - > segs_using_this_lv ) ) {
log_error ( INTERNAL_ERROR " Failed to wipe cache pool for volume %s. " ,
display_lvname ( cache_pool_lv ) ) ;
return 0 ;
}
cache_pool_lv - > status | = LV_TEMPORARY ;
2018-06-05 21:21:28 +03:00
if ( ! activate_lv ( cache_pool_lv - > vg - > cmd , cache_pool_lv ) ) {
2014-11-02 20:36:41 +03:00
log_error ( " Aborting. Failed to activate cache pool %s. " ,
display_lvname ( cache_pool_lv ) ) ;
return 0 ;
}
cache_pool_lv - > status & = ~ LV_TEMPORARY ;
if ( ! ( r = wipe_lv ( cache_pool_lv , ( struct wipe_params ) { . do_zero = 1 } ) ) ) {
log_error ( " Aborting. Failed to wipe cache pool %s. " ,
display_lvname ( cache_pool_lv ) ) ;
/* Delay return of error after deactivation */
}
/* Deactivate cleared cache-pool metadata */
if ( ! deactivate_lv ( cache_pool_lv - > vg - > cmd , cache_pool_lv ) ) {
log_error ( " Aborting. Could not deactivate cache pool %s. " ,
display_lvname ( cache_pool_lv ) ) ;
r = 0 ;
}
return r ;
}