2011-09-06 23:25:42 +04:00
/*
2012-01-19 19:23:50 +04:00
* Copyright ( C ) 2011 - 2012 Red Hat , Inc . All rights reserved .
2011-09-06 23:25:42 +04:00
*
* This file is part of LVM2 .
*
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v .2 .1 .
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program ; if not , write to the Free Software Foundation ,
* Inc . , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
*/
# include "lib.h"
2011-10-22 20:44:23 +04:00
# include "activate.h"
# include "locking.h"
2011-09-06 23:25:42 +04:00
# include "metadata.h"
2011-09-08 20:41:18 +04:00
# include "segtype.h"
# include "lv_alloc.h"
2011-10-31 02:52:08 +04:00
# include "archiver.h"
2012-02-08 17:05:38 +04:00
# include "defaults.h"
2011-09-06 23:25:42 +04:00
2012-01-25 12:55:19 +04:00
int attach_pool_metadata_lv ( struct lv_segment * pool_seg , struct logical_volume * metadata_lv )
2011-09-06 23:25:42 +04:00
{
2012-01-25 12:55:19 +04:00
pool_seg - > metadata_lv = metadata_lv ;
2012-01-19 19:23:50 +04:00
metadata_lv - > status | = THIN_POOL_METADATA ;
lv_set_hidden ( metadata_lv ) ;
2011-09-06 23:25:42 +04:00
2012-01-25 12:55:19 +04:00
return add_seg_to_segs_using_this_lv ( metadata_lv , pool_seg ) ;
2011-09-06 23:25:42 +04:00
}
2012-01-25 12:55:19 +04:00
int attach_pool_data_lv ( struct lv_segment * pool_seg , struct logical_volume * pool_data_lv )
2011-09-06 23:25:42 +04:00
{
2012-01-25 12:55:19 +04:00
if ( ! set_lv_segment_area_lv ( pool_seg , 0 , pool_data_lv , 0 , THIN_POOL_DATA ) )
2011-09-08 20:41:18 +04:00
return_0 ;
2011-10-19 20:36:39 +04:00
lv_set_hidden ( pool_data_lv ) ;
2011-09-06 23:25:42 +04:00
2011-10-19 20:36:39 +04:00
return 1 ;
2011-09-06 23:25:42 +04:00
}
2011-11-07 15:03:47 +04:00
int attach_pool_lv ( struct lv_segment * seg , struct logical_volume * pool_lv ,
struct logical_volume * origin )
2011-09-07 02:43:56 +04:00
{
seg - > pool_lv = pool_lv ;
2011-09-08 20:41:18 +04:00
seg - > lv - > status | = THIN_VOLUME ;
2011-11-07 15:03:47 +04:00
seg - > origin = origin ;
if ( origin & & ! add_seg_to_segs_using_this_lv ( origin , seg ) )
return_0 ;
2011-09-07 02:43:56 +04:00
2011-10-19 20:36:39 +04:00
return add_seg_to_segs_using_this_lv ( pool_lv , seg ) ;
2011-09-07 02:43:56 +04:00
}
2011-09-08 20:41:18 +04:00
int detach_pool_lv ( struct lv_segment * seg )
{
2011-11-03 18:36:40 +04:00
struct lv_thin_message * tmsg , * tmp ;
2011-11-07 15:03:47 +04:00
struct seg_list * sl , * tsl ;
2012-01-25 12:55:19 +04:00
int no_update = 0 ;
2011-10-19 20:37:30 +04:00
2011-11-03 18:36:40 +04:00
if ( ! seg - > pool_lv | | ! lv_is_thin_pool ( seg - > pool_lv ) ) {
log_error ( INTERNAL_ERROR " LV %s is not a thin volume " ,
seg - > lv - > name ) ;
2011-09-08 20:41:18 +04:00
return 0 ;
}
2011-10-19 20:37:30 +04:00
/* Drop any message referencing removed segment */
2012-01-25 12:55:19 +04:00
dm_list_iterate_items_safe ( tmsg , tmp , & ( first_seg ( seg - > pool_lv ) - > thin_messages ) ) {
2011-10-19 20:37:30 +04:00
switch ( tmsg - > type ) {
case DM_THIN_MESSAGE_CREATE_SNAP :
case DM_THIN_MESSAGE_CREATE_THIN :
case DM_THIN_MESSAGE_TRIM :
2012-01-25 12:55:19 +04:00
if ( tmsg - > u . lv = = seg - > lv ) {
2011-10-19 20:37:30 +04:00
log_debug ( " Discarding message for LV %s. " ,
tmsg - > u . lv - > name ) ;
dm_list_del ( & tmsg - > list ) ;
2012-01-25 12:55:19 +04:00
no_update = 1 ; /* Replacing existing */
2011-10-19 20:37:30 +04:00
}
2011-11-07 15:04:45 +04:00
break ;
case DM_THIN_MESSAGE_DELETE :
if ( tmsg - > u . delete_id = = seg - > device_id ) {
log_error ( INTERNAL_ERROR " Trying to delete %u again. " ,
tmsg - > u . delete_id ) ;
return 0 ;
}
break ;
2011-10-19 20:37:30 +04:00
default :
2011-11-07 15:04:45 +04:00
log_error ( INTERNAL_ERROR " Unsupported message type %u. " , tmsg - > type ) ;
2011-10-19 20:37:30 +04:00
break ;
}
}
2011-10-17 18:17:09 +04:00
if ( ! attach_pool_message ( first_seg ( seg - > pool_lv ) ,
DM_THIN_MESSAGE_DELETE ,
2012-01-25 12:55:19 +04:00
NULL , seg - > device_id , no_update ) )
2011-10-17 18:17:09 +04:00
return_0 ;
2011-11-07 15:03:47 +04:00
if ( ! remove_seg_from_segs_using_this_lv ( seg - > pool_lv , seg ) )
return_0 ;
if ( seg - > origin & &
! remove_seg_from_segs_using_this_lv ( seg - > origin , seg ) )
return_0 ;
/* If thin origin, remove it from related thin snapshots */
/*
* TODO : map removal of origin as snapshot lvconvert - - merge ?
* i . e . rename thin snapshot to origin thin origin
*/
dm_list_iterate_items_safe ( sl , tsl , & seg - > lv - > segs_using_this_lv ) {
if ( ! seg_is_thin_volume ( sl - > seg ) | |
( seg - > lv ! = sl - > seg - > origin ) )
continue ;
if ( ! remove_seg_from_segs_using_this_lv ( seg - > lv , sl - > seg ) )
return_0 ;
/* Thin snapshot is now regular thin volume */
sl - > seg - > origin = NULL ;
}
return 1 ;
2011-09-08 20:41:18 +04:00
}
2012-01-25 12:55:19 +04:00
int attach_pool_message ( struct lv_segment * pool_seg , dm_thin_message_t type ,
2011-10-19 20:39:09 +04:00
struct logical_volume * lv , uint32_t delete_id ,
2012-01-25 12:55:19 +04:00
int no_update )
2011-10-17 18:17:09 +04:00
{
struct lv_thin_message * tmsg ;
2012-01-25 12:55:19 +04:00
if ( ! seg_is_thin_pool ( pool_seg ) ) {
log_error ( INTERNAL_ERROR " LV %s is not pool. " , pool_seg - > lv - > name ) ;
return 0 ;
}
2011-11-07 14:59:07 +04:00
2012-01-25 12:55:19 +04:00
if ( pool_has_message ( pool_seg , lv , delete_id ) ) {
if ( lv )
log_error ( " Message referring LV %s already queued in pool %s. " ,
lv - > name , pool_seg - > lv - > name ) ;
else
log_error ( " Delete for device %u already queued in pool %s. " ,
delete_id , pool_seg - > lv - > name ) ;
return 0 ;
2011-10-19 20:39:09 +04:00
}
2012-01-25 12:55:19 +04:00
if ( ! ( tmsg = dm_pool_alloc ( pool_seg - > lv - > vg - > vgmem , sizeof ( * tmsg ) ) ) ) {
2011-10-17 18:17:09 +04:00
log_error ( " Failed to allocate memory for message. " ) ;
return 0 ;
}
switch ( type ) {
case DM_THIN_MESSAGE_CREATE_SNAP :
case DM_THIN_MESSAGE_CREATE_THIN :
case DM_THIN_MESSAGE_TRIM :
tmsg - > u . lv = lv ;
break ;
case DM_THIN_MESSAGE_DELETE :
2011-10-19 20:39:09 +04:00
tmsg - > u . delete_id = delete_id ;
2011-10-17 18:17:09 +04:00
break ;
default :
2011-10-19 20:42:14 +04:00
log_error ( INTERNAL_ERROR " Unsupported message type %u. " , type ) ;
2011-10-17 18:17:09 +04:00
return 0 ;
}
tmsg - > type = type ;
/* If the 1st message is add in non-read-only mode, modify transaction_id */
2012-01-25 12:55:19 +04:00
if ( ! no_update & & dm_list_empty ( & pool_seg - > thin_messages ) )
pool_seg - > transaction_id + + ;
2011-10-17 18:17:09 +04:00
2012-01-25 12:55:19 +04:00
dm_list_add ( & pool_seg - > thin_messages , & tmsg - > list ) ;
2011-10-17 18:17:09 +04:00
log_debug ( " Added %s message " ,
( type = = DM_THIN_MESSAGE_CREATE_SNAP | |
type = = DM_THIN_MESSAGE_CREATE_THIN ) ? " create " :
( type = = DM_THIN_MESSAGE_TRIM ) ? " trim " :
( type = = DM_THIN_MESSAGE_DELETE ) ? " delete " : " unknown " ) ;
return 1 ;
}
2012-01-25 12:55:19 +04:00
/*
* Check whether pool has some message queued for LV or for device_id
* When LV is NULL and device_id is 0 it just checks for any message .
*/
int pool_has_message ( const struct lv_segment * seg ,
const struct logical_volume * lv , uint32_t device_id )
{
const struct lv_thin_message * tmsg ;
if ( ! seg_is_thin_pool ( seg ) ) {
log_error ( INTERNAL_ERROR " LV %s is not pool. " , seg - > lv - > name ) ;
return 0 ;
}
if ( ! lv & & ! device_id )
return dm_list_empty ( & seg - > thin_messages ) ;
dm_list_iterate_items ( tmsg , & seg - > thin_messages ) {
switch ( tmsg - > type ) {
case DM_THIN_MESSAGE_CREATE_SNAP :
case DM_THIN_MESSAGE_CREATE_THIN :
case DM_THIN_MESSAGE_TRIM :
if ( tmsg - > u . lv = = lv )
return 1 ;
break ;
case DM_THIN_MESSAGE_DELETE :
if ( tmsg - > u . delete_id = = device_id )
return 1 ;
break ;
default :
break ;
}
}
return 0 ;
}
2012-02-08 17:05:38 +04:00
int pool_below_threshold ( const struct lv_segment * pool_seg )
{
percent_t percent ;
int threshold = PERCENT_1 *
find_config_tree_int ( pool_seg - > lv - > vg - > cmd ,
" activation/thin_pool_autoextend_threshold " ,
DEFAULT_THIN_POOL_AUTOEXTEND_THRESHOLD ) ;
/* Data */
if ( ! lv_thin_pool_percent ( pool_seg - > lv , 0 , & percent ) ) {
stack ;
return 0 ;
}
if ( percent > = threshold )
return 0 ;
/* Metadata */
if ( ! lv_thin_pool_percent ( pool_seg - > lv , 1 , & percent ) ) {
stack ;
return 0 ;
}
if ( percent > = threshold )
return 0 ;
return 1 ;
}
2011-09-09 05:15:18 +04:00
struct lv_segment * find_pool_seg ( const struct lv_segment * seg )
2011-09-08 20:41:18 +04:00
{
2011-10-19 20:36:39 +04:00
struct lv_segment * pool_seg ;
2011-09-08 20:41:18 +04:00
2011-10-19 20:36:39 +04:00
pool_seg = get_only_segment_using_this_lv ( seg - > lv ) ;
2011-09-08 20:41:18 +04:00
2011-10-19 20:36:39 +04:00
if ( ! pool_seg ) {
log_error ( " Failed to find pool_seg for %s " , seg - > lv - > name ) ;
return NULL ;
}
2011-09-08 20:41:18 +04:00
2011-10-19 20:36:39 +04:00
if ( ! seg_is_thin_pool ( pool_seg ) ) {
log_error ( " %s on %s is not a pool segment " ,
pool_seg - > lv - > name , seg - > lv - > name ) ;
return NULL ;
}
2011-09-08 20:41:18 +04:00
2011-10-19 20:36:39 +04:00
return pool_seg ;
2011-09-08 20:41:18 +04:00
}
2011-10-03 22:39:17 +04:00
/*
* Find a free device_id for given thin_pool segment .
*
* \ return
* Free device id , or 0 if free device_id is not found .
*
* FIXME : Improve naive search and keep the value cached
* and updated during VG lifetime ( so no const for lv_segment )
*/
uint32_t get_free_pool_device_id ( struct lv_segment * thin_pool_seg )
{
2011-11-03 18:36:40 +04:00
uint32_t max_id = 0 ;
struct seg_list * sl ;
2011-10-03 22:39:17 +04:00
if ( ! seg_is_thin_pool ( thin_pool_seg ) ) {
2011-10-31 02:00:57 +04:00
log_error ( INTERNAL_ERROR
" Segment in %s is not a thin pool segment. " ,
2011-10-03 23:10:52 +04:00
thin_pool_seg - > lv - > name ) ;
2011-10-03 22:39:17 +04:00
return 0 ;
}
2011-11-03 18:36:40 +04:00
dm_list_iterate_items ( sl , & thin_pool_seg - > lv - > segs_using_this_lv )
if ( sl - > seg - > device_id > max_id )
max_id = sl - > seg - > device_id ;
2011-10-03 22:39:17 +04:00
2011-10-31 02:00:57 +04:00
if ( + + max_id > DM_THIN_MAX_DEVICE_ID ) {
2012-01-23 21:46:31 +04:00
/* FIXME Find empty holes instead of aborting! */
2011-10-31 02:00:57 +04:00
log_error ( " Cannot find free device_id. " ) ;
2011-10-03 22:39:17 +04:00
return 0 ;
}
log_debug ( " Found free pool device_id %u. " , max_id ) ;
return max_id ;
}
2011-10-22 20:44:23 +04:00
2011-12-10 04:47:23 +04:00
// FIXME Rename this fn: it doesn't extend an already-existing pool AFAICT
2011-10-22 20:44:23 +04:00
int extend_pool ( struct logical_volume * pool_lv , const struct segment_type * segtype ,
2011-10-29 00:32:54 +04:00
struct alloc_handle * ah , uint32_t stripes , uint32_t stripe_size )
2011-10-22 20:44:23 +04:00
{
const struct segment_type * striped ;
struct logical_volume * meta_lv , * data_lv ;
struct lv_segment * seg ;
const size_t len = strlen ( pool_lv - > name ) + 16 ;
char name [ len ] ;
2011-10-31 02:00:57 +04:00
if ( pool_lv - > le_count ) {
/* FIXME move code for manipulation from lv_manip.c */
log_error ( INTERNAL_ERROR " Pool %s has already extents. " , pool_lv - > name ) ;
2011-10-22 20:44:23 +04:00
return 0 ;
}
/* LV is not yet a pool, so it's extension from lvcreate */
if ( ! ( striped = get_segtype_from_string ( pool_lv - > vg - > cmd , " striped " ) ) )
return_0 ;
if ( activation ( ) & & segtype - > ops - > target_present & &
! segtype - > ops - > target_present ( pool_lv - > vg - > cmd , NULL , NULL ) ) {
log_error ( " %s: Required device-mapper target(s) not "
" detected in your kernel. " , segtype - > name ) ;
return 0 ;
}
/* Metadata segment */
2011-10-29 00:32:54 +04:00
if ( ! lv_add_segment ( ah , stripes , 1 , pool_lv , striped , 1 , 0 , 0 ) )
2011-10-22 20:44:23 +04:00
return_0 ;
if ( activation ( ) ) {
if ( ! vg_write ( pool_lv - > vg ) | | ! vg_commit ( pool_lv - > vg ) )
return_0 ;
/*
* If killed here , only the VISIBLE striped pool LV is left
* and user could easily remove it .
*
* FIXME : implement lazy clearing when activation is disabled
*/
2011-12-10 04:47:23 +04:00
/* pool_lv is a new LV so the VG lock protects us */
2011-10-22 20:44:23 +04:00
if ( ! activate_lv_local ( pool_lv - > vg - > cmd , pool_lv ) | |
/* Clear 4KB of metadata device for new thin-pool. */
! set_lv ( pool_lv - > vg - > cmd , pool_lv , UINT64_C ( 0 ) , 0 ) ) {
log_error ( " Aborting. Failed to wipe pool metadata %s. " ,
pool_lv - > name ) ;
return 0 ;
}
if ( ! deactivate_lv_local ( pool_lv - > vg - > cmd , pool_lv ) ) {
log_error ( " Aborting. Could not deactivate pool metadata %s. " ,
pool_lv - > name ) ;
return 0 ;
}
} else {
2011-10-31 02:00:57 +04:00
log_warn ( " WARNING: Pool %s is created without initilization. " , pool_lv - > name ) ;
2011-10-22 20:44:23 +04:00
}
if ( dm_snprintf ( name , len , " %s_tmeta " , pool_lv - > name ) < 0 )
return_0 ;
if ( ! ( meta_lv = lv_create_empty ( name , NULL , LVM_READ | LVM_WRITE ,
ALLOC_INHERIT , pool_lv - > vg ) ) )
return_0 ;
if ( ! move_lv_segments ( meta_lv , pool_lv , 0 , 0 ) )
return_0 ;
/* Pool data segment */
2011-10-29 00:32:54 +04:00
if ( ! lv_add_segment ( ah , 0 , stripes , pool_lv , striped , stripe_size , 0 , 0 ) )
2011-10-22 20:44:23 +04:00
return_0 ;
if ( ! ( data_lv = insert_layer_for_lv ( pool_lv - > vg - > cmd , pool_lv ,
2011-11-03 18:38:36 +04:00
pool_lv - > status , " _tdata " ) ) )
2011-10-22 20:44:23 +04:00
return_0 ;
seg = first_seg ( pool_lv ) ;
seg - > segtype = segtype ; /* Set as thin_pool segment */
seg - > lv - > status | = THIN_POOL ;
if ( ! attach_pool_metadata_lv ( seg , meta_lv ) )
return_0 ;
/* Drop reference as attach_pool_data_lv() takes it again */
remove_seg_from_segs_using_this_lv ( data_lv , seg ) ;
if ( ! attach_pool_data_lv ( seg , data_lv ) )
return_0 ;
return 1 ;
}
2011-11-03 18:53:58 +04:00
int update_pool_lv ( struct logical_volume * lv , int activate )
{
if ( ! lv_is_thin_pool ( lv ) ) {
log_error ( INTERNAL_ERROR " Updated LV %s is not pool. " , lv - > name ) ;
return 0 ;
}
2012-01-25 13:17:15 +04:00
if ( dm_list_empty ( & ( first_seg ( lv ) - > thin_messages ) ) )
return 1 ; /* No messages */
2011-11-03 18:53:58 +04:00
if ( activate ) {
2011-11-10 16:43:05 +04:00
/* If the pool is not active, do activate deactivate */
2011-11-03 19:58:20 +04:00
if ( ! lv_is_active ( lv ) ) {
if ( ! activate_lv_excl ( lv - > vg - > cmd , lv ) )
return_0 ;
if ( ! deactivate_lv ( lv - > vg - > cmd , lv ) )
return_0 ;
2011-11-03 18:53:58 +04:00
}
2012-01-25 13:13:10 +04:00
/*
* Resume active pool to send thin messages .
* origin_only is used to skip check for resumed state
2011-11-03 18:53:58 +04:00
*/
2012-01-25 13:13:10 +04:00
else if ( ! resume_lv_origin ( lv - > vg - > cmd , lv ) ) {
2011-11-03 18:53:58 +04:00
log_error ( " Failed to resume %s. " , lv - > name ) ;
return 0 ;
}
}
2012-01-25 13:17:15 +04:00
dm_list_init ( & ( first_seg ( lv ) - > thin_messages ) ) ;
2011-11-03 18:53:58 +04:00
2012-01-25 13:17:15 +04:00
if ( ! vg_write ( lv - > vg ) | | ! vg_commit ( lv - > vg ) )
return_0 ;
2011-11-03 18:53:58 +04:00
2012-01-25 13:17:15 +04:00
backup ( lv - > vg ) ;
2011-11-03 18:53:58 +04:00
return 1 ;
}