2002-05-31 23:30:51 +04:00
/*
2008-01-30 17:00:02 +03:00
* Copyright ( C ) 2001 - 2004 Sistina Software , Inc . All rights reserved .
2009-07-09 09:40:59 +04:00
* Copyright ( C ) 2004 - 2009 Red Hat , Inc . All rights reserved .
2002-05-31 23:30:51 +04:00
*
2004-03-30 23:35:44 +04:00
* This file is part of LVM2 .
2002-05-31 23:30:51 +04:00
*
2004-03-30 23:35:44 +04:00
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
2007-08-21 00:55:30 +04:00
* of the GNU Lesser General Public License v .2 .1 .
2002-05-31 23:30:51 +04:00
*
2007-08-21 00:55:30 +04:00
* You should have received a copy of the GNU Lesser General Public License
2004-03-30 23:35:44 +04:00
* along with this program ; if not , write to the Free Software Foundation ,
* Inc . , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
2002-05-31 23:30:51 +04:00
*/
# include "tools.h"
2005-04-07 16:39:44 +04:00
static int _lv_is_in_vg ( struct volume_group * vg , struct logical_volume * lv )
{
2014-07-04 04:13:51 +04:00
if ( ! lv | | lv - > vg ! = vg )
return 0 ;
2005-04-07 16:39:44 +04:00
2014-07-04 04:13:51 +04:00
return 1 ;
2005-04-07 16:39:44 +04:00
}
2008-04-11 01:34:53 +04:00
static int _move_one_lv ( struct volume_group * vg_from ,
2008-04-10 23:59:43 +04:00
struct volume_group * vg_to ,
2008-11-04 01:14:30 +03:00
struct dm_list * lvh )
2008-04-10 05:30:22 +04:00
{
2008-11-04 01:14:30 +03:00
struct logical_volume * lv = dm_list_item ( lvh , struct lv_list ) - > lv ;
2014-07-04 04:13:51 +04:00
struct logical_volume * parent_lv ;
2008-04-10 05:30:22 +04:00
2008-04-11 01:34:53 +04:00
if ( lv_is_active ( lv ) ) {
2014-07-04 04:13:51 +04:00
if ( ( parent_lv = lv_parent ( lv ) ) )
log_error ( " Logical volume %s (part of %s) must be inactive. " , display_lvname ( lv ) , parent_lv - > name ) ;
else
log_error ( " Logical volume %s must be inactive. " , display_lvname ( lv ) ) ;
2008-04-11 01:34:53 +04:00
return 0 ;
}
2014-07-03 22:06:04 +04:00
dm_list_move ( & vg_to - > lvs , lvh ) ;
lv - > vg = vg_to ;
lv - > lvid . id [ 0 ] = lv - > vg - > id ;
2013-07-09 14:01:25 +04:00
/* Moved pool metadata spare LV */
if ( vg_from - > pool_metadata_spare_lv = = lv ) {
vg_to - > pool_metadata_spare_lv = lv ;
vg_from - > pool_metadata_spare_lv = NULL ;
}
2008-04-11 01:34:53 +04:00
return 1 ;
2009-05-14 01:22:57 +04:00
}
2005-04-07 16:39:44 +04:00
2002-05-31 23:30:51 +04:00
static int _move_lvs ( struct volume_group * vg_from , struct volume_group * vg_to )
{
2008-11-04 01:14:30 +03:00
struct dm_list * lvh , * lvht ;
2002-05-31 23:30:51 +04:00
struct logical_volume * lv ;
2002-11-18 17:04:08 +03:00
struct lv_segment * seg ;
2002-05-31 23:30:51 +04:00
struct physical_volume * pv ;
struct volume_group * vg_with ;
2006-05-10 01:23:51 +04:00
unsigned s ;
2002-05-31 23:30:51 +04:00
2008-11-04 01:14:30 +03:00
dm_list_iterate_safe ( lvh , lvht , & vg_from - > lvs ) {
lv = dm_list_item ( lvh , struct lv_list ) - > lv ;
2002-05-31 23:30:51 +04:00
2005-04-07 16:39:44 +04:00
if ( ( lv - > status & SNAPSHOT ) )
continue ;
2014-04-26 01:24:50 +04:00
if ( lv_is_raid ( lv ) )
continue ;
2014-09-16 00:33:53 +04:00
if ( lv_is_mirrored ( lv ) )
2007-01-30 02:01:18 +03:00
continue ;
2013-06-13 14:05:53 +04:00
if ( lv_is_thin_pool ( lv ) | |
lv_is_thin_volume ( lv ) )
continue ;
2014-02-25 02:51:02 +04:00
if ( lv_is_cache ( lv ) | | lv_is_cache_pool ( lv ) )
/* further checks by _move_cache() */
continue ;
2002-05-31 23:30:51 +04:00
/* Ensure all the PVs used by this LV remain in the same */
/* VG as each other */
vg_with = NULL ;
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( seg , & lv - > segments ) {
2003-04-25 02:23:24 +04:00
for ( s = 0 ; s < seg - > area_count ; s + + ) {
/* FIXME Check AREA_LV too */
2005-06-01 20:51:55 +04:00
if ( seg_type ( seg , s ) ! = AREA_PV )
2003-04-25 02:23:24 +04:00
continue ;
2005-06-01 20:51:55 +04:00
pv = seg_pv ( seg , s ) ;
2002-05-31 23:30:51 +04:00
if ( vg_with ) {
2003-01-18 00:04:26 +03:00
if ( ! pv_is_in_vg ( vg_with , pv ) ) {
2007-01-10 00:12:41 +03:00
log_error ( " Can't split Logical "
" Volume %s between "
" two Volume Groups " ,
2002-05-31 23:30:51 +04:00
lv - > name ) ;
return 0 ;
}
continue ;
}
2003-01-18 00:04:26 +03:00
if ( pv_is_in_vg ( vg_from , pv ) ) {
2002-05-31 23:30:51 +04:00
vg_with = vg_from ;
continue ;
}
2003-01-18 00:04:26 +03:00
if ( pv_is_in_vg ( vg_to , pv ) ) {
2002-05-31 23:30:51 +04:00
vg_with = vg_to ;
continue ;
}
log_error ( " Physical Volume %s not found " ,
2007-10-12 18:29:32 +04:00
pv_dev_name ( pv ) ) ;
2002-05-31 23:30:51 +04:00
return 0 ;
}
2005-04-07 16:39:44 +04:00
}
2009-07-09 09:40:59 +04:00
2002-05-31 23:30:51 +04:00
if ( vg_with = = vg_from )
continue ;
/* Move this LV */
2008-04-11 01:34:53 +04:00
if ( ! _move_one_lv ( vg_from , vg_to , lvh ) )
2008-04-15 18:57:12 +04:00
return_0 ;
2002-05-31 23:30:51 +04:00
}
2003-04-25 02:23:24 +04:00
/* FIXME Ensure no LVs contain segs pointing at LVs in the other VG */
2002-05-31 23:30:51 +04:00
return 1 ;
}
2008-04-10 00:56:06 +04:00
/*
* Move the hidden / internal " snapshotN " LVs . from ' vg_from ' to ' vg_to ' .
*/
2005-04-07 16:39:44 +04:00
static int _move_snapshots ( struct volume_group * vg_from ,
struct volume_group * vg_to )
2002-05-31 23:30:51 +04:00
{
2008-11-04 01:14:30 +03:00
struct dm_list * lvh , * lvht ;
2005-04-07 16:39:44 +04:00
struct logical_volume * lv ;
struct lv_segment * seg ;
int cow_from = 0 ;
int origin_from = 0 ;
2002-05-31 23:30:51 +04:00
2008-11-04 01:14:30 +03:00
dm_list_iterate_safe ( lvh , lvht , & vg_from - > lvs ) {
lv = dm_list_item ( lvh , struct lv_list ) - > lv ;
2002-05-31 23:30:51 +04:00
2005-04-07 16:39:44 +04:00
if ( ! ( lv - > status & SNAPSHOT ) )
continue ;
2002-05-31 23:30:51 +04:00
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( seg , & lv - > segments ) {
2005-04-07 16:39:44 +04:00
cow_from = _lv_is_in_vg ( vg_from , seg - > cow ) ;
origin_from = _lv_is_in_vg ( vg_from , seg - > origin ) ;
2007-05-15 17:01:41 +04:00
if ( cow_from & & origin_from )
continue ;
if ( ( ! cow_from & & origin_from ) | |
( cow_from & & ! origin_from ) ) {
log_error ( " Can't split snapshot %s between "
" two Volume Groups " , seg - > cow - > name ) ;
return 0 ;
}
2005-04-07 16:39:44 +04:00
2008-04-10 00:56:06 +04:00
/*
* At this point , the cow and origin should already be
* in vg_to .
*/
if ( _lv_is_in_vg ( vg_to , seg - > cow ) & &
2008-04-11 01:34:53 +04:00
_lv_is_in_vg ( vg_to , seg - > origin ) ) {
if ( ! _move_one_lv ( vg_from , vg_to , lvh ) )
2008-04-15 18:57:12 +04:00
return_0 ;
2008-04-11 01:34:53 +04:00
}
2008-04-10 00:56:06 +04:00
}
2005-04-07 16:39:44 +04:00
2002-05-31 23:30:51 +04:00
}
return 1 ;
}
2007-01-30 02:01:18 +03:00
static int _move_mirrors ( struct volume_group * vg_from ,
struct volume_group * vg_to )
{
2008-11-04 01:14:30 +03:00
struct dm_list * lvh , * lvht ;
2007-01-30 02:01:18 +03:00
struct logical_volume * lv ;
Fix vgsplit when there are mirrors that have mirrored logs.
The problem as reported by "ben <benscott@nwlink.com>" on lvm-devel:
vgsplit fails with mirrored mirror log
#lvs --all -o lv_name,lv_attr,devices
LV Attr Devices
MyMirror mwi--
[MyMirror_mimage_0] Iwi--- /dev/sdq(0)
[MyMirror_mimage_1] Iwi--- /dev/sdo(0)
[MyMirror_mimage_2] Iwi--- /dev/sdi(0)
[MyMirror_mlog] mwi---
[MyMirror_mlog_mimage_0] Iwi--- /dev/sds(0)
[MyMirror_mlog_mimage_1] Iwi--- /dev/sde(0)
#vgsplit -v "TestA" "TestB" "/dev/sdq" "/dev/sdo" "/dev/sdi" "/dev/sds"
"/dev/sde"
Checking for volume group "TestA"
Checking for new volume group "TestB"
Archiving volume group "TestA" metadata (seqno 213).
Can't split mirror MyMirror between two Volume Groups
AFTER FIX:
[root@bp-01 ~]# lvs -a -o name,vg_name,devices vg new
Volume group "new" not found
Skipping volume group new
LV VG Devices
lv vg lv_mimage_0(0),lv_mimage_1(0)
[lv_mimage_0] vg /dev/sdb1(0)
[lv_mimage_1] vg /dev/sdc1(0)
[lv_mlog] vg lv_mlog_mimage_0(0),lv_mlog_mimage_1(0)
[lv_mlog_mimage_0] vg /dev/sdh1(0)
[lv_mlog_mimage_1] vg /dev/sdi1(0)
[root@bp-01 ~]# vgsplit vg new /dev/sd[bchi]1
New volume group "new" successfully split from "vg"
[root@bp-01 ~]# lvs -a -o name,vg_name,devices vg new
LV VG Devices
lv new lv_mimage_0(0),lv_mimage_1(0)
[lv_mimage_0] new /dev/sdb1(0)
[lv_mimage_1] new /dev/sdc1(0)
[lv_mlog] new lv_mlog_mimage_0(0),lv_mlog_mimage_1(0)
[lv_mlog_mimage_0] new /dev/sdh1(0)
[lv_mlog_mimage_1] new /dev/sdi1(0)
2011-10-06 18:17:45 +04:00
struct lv_segment * seg , * log_seg ;
2007-08-22 18:38:18 +04:00
unsigned s , seg_in , log_in ;
2007-01-30 02:01:18 +03:00
2008-11-04 01:14:30 +03:00
dm_list_iterate_safe ( lvh , lvht , & vg_from - > lvs ) {
lv = dm_list_item ( lvh , struct lv_list ) - > lv ;
2007-01-30 02:01:18 +03:00
2014-04-26 01:24:50 +04:00
if ( lv_is_raid ( lv ) )
continue ;
2014-09-16 00:33:53 +04:00
if ( ! lv_is_mirrored ( lv ) )
2007-01-30 02:01:18 +03:00
continue ;
2008-01-30 17:00:02 +03:00
seg = first_seg ( lv ) ;
2007-01-30 02:01:18 +03:00
seg_in = 0 ;
2007-08-22 18:38:18 +04:00
for ( s = 0 ; s < seg - > area_count ; s + + )
if ( _lv_is_in_vg ( vg_to , seg_lv ( seg , s ) ) )
2013-06-13 16:10:49 +04:00
seg_in + + ;
2007-01-30 02:01:18 +03:00
Fix vgsplit when there are mirrors that have mirrored logs.
The problem as reported by "ben <benscott@nwlink.com>" on lvm-devel:
vgsplit fails with mirrored mirror log
#lvs --all -o lv_name,lv_attr,devices
LV Attr Devices
MyMirror mwi--
[MyMirror_mimage_0] Iwi--- /dev/sdq(0)
[MyMirror_mimage_1] Iwi--- /dev/sdo(0)
[MyMirror_mimage_2] Iwi--- /dev/sdi(0)
[MyMirror_mlog] mwi---
[MyMirror_mlog_mimage_0] Iwi--- /dev/sds(0)
[MyMirror_mlog_mimage_1] Iwi--- /dev/sde(0)
#vgsplit -v "TestA" "TestB" "/dev/sdq" "/dev/sdo" "/dev/sdi" "/dev/sds"
"/dev/sde"
Checking for volume group "TestA"
Checking for new volume group "TestB"
Archiving volume group "TestA" metadata (seqno 213).
Can't split mirror MyMirror between two Volume Groups
AFTER FIX:
[root@bp-01 ~]# lvs -a -o name,vg_name,devices vg new
Volume group "new" not found
Skipping volume group new
LV VG Devices
lv vg lv_mimage_0(0),lv_mimage_1(0)
[lv_mimage_0] vg /dev/sdb1(0)
[lv_mimage_1] vg /dev/sdc1(0)
[lv_mlog] vg lv_mlog_mimage_0(0),lv_mlog_mimage_1(0)
[lv_mlog_mimage_0] vg /dev/sdh1(0)
[lv_mlog_mimage_1] vg /dev/sdi1(0)
[root@bp-01 ~]# vgsplit vg new /dev/sd[bchi]1
New volume group "new" successfully split from "vg"
[root@bp-01 ~]# lvs -a -o name,vg_name,devices vg new
LV VG Devices
lv new lv_mimage_0(0),lv_mimage_1(0)
[lv_mimage_0] new /dev/sdb1(0)
[lv_mimage_1] new /dev/sdc1(0)
[lv_mlog] new lv_mlog_mimage_0(0),lv_mlog_mimage_1(0)
[lv_mlog_mimage_0] new /dev/sdh1(0)
[lv_mlog_mimage_1] new /dev/sdi1(0)
2011-10-06 18:17:45 +04:00
log_in = ! seg - > log_lv ;
if ( seg - > log_lv ) {
log_seg = first_seg ( seg - > log_lv ) ;
if ( seg_is_mirrored ( log_seg ) ) {
log_in = 1 ;
/* Ensure each log dev is in vg_to */
for ( s = 0 ; s < log_seg - > area_count ; s + + )
log_in = log_in & &
_lv_is_in_vg ( vg_to ,
seg_lv ( log_seg , s ) ) ;
} else
log_in = _lv_is_in_vg ( vg_to , seg - > log_lv ) ;
}
2009-07-09 09:40:59 +04:00
2008-01-30 17:00:02 +03:00
if ( ( seg_in & & seg_in < seg - > area_count ) | |
( seg_in & & seg - > log_lv & & ! log_in ) | |
2007-01-30 02:01:18 +03:00
( ! seg_in & & seg - > log_lv & & log_in ) ) {
2007-05-15 17:01:41 +04:00
log_error ( " Can't split mirror %s between "
" two Volume Groups " , lv - > name ) ;
2007-01-30 02:01:18 +03:00
return 0 ;
}
2008-04-11 01:34:53 +04:00
if ( seg_in = = seg - > area_count & & log_in ) {
if ( ! _move_one_lv ( vg_from , vg_to , lvh ) )
2008-04-15 18:57:12 +04:00
return_0 ;
2008-04-11 01:34:53 +04:00
}
2007-01-30 02:01:18 +03:00
}
return 1 ;
}
2014-04-26 01:24:50 +04:00
static int _move_raid ( struct volume_group * vg_from ,
struct volume_group * vg_to )
{
struct dm_list * lvh , * lvht ;
struct logical_volume * lv ;
struct lv_segment * seg ;
unsigned s , seg_in ;
dm_list_iterate_safe ( lvh , lvht , & vg_from - > lvs ) {
lv = dm_list_item ( lvh , struct lv_list ) - > lv ;
if ( ! lv_is_raid ( lv ) )
continue ;
seg = first_seg ( lv ) ;
seg_in = 0 ;
for ( s = 0 ; s < seg - > area_count ; s + + ) {
if ( _lv_is_in_vg ( vg_to , seg_lv ( seg , s ) ) )
seg_in + + ;
if ( _lv_is_in_vg ( vg_to , seg_metalv ( seg , s ) ) )
seg_in + + ;
}
if ( seg_in & & seg_in ! = ( seg - > area_count * 2 ) ) {
log_error ( " Can't split RAID %s between "
" two Volume Groups " , lv - > name ) ;
return 0 ;
}
if ( ! _move_one_lv ( vg_from , vg_to , lvh ) )
return_0 ;
}
return 1 ;
}
2013-06-13 14:05:53 +04:00
static int _move_thins ( struct volume_group * vg_from ,
struct volume_group * vg_to )
{
struct dm_list * lvh , * lvht ;
struct logical_volume * lv , * data_lv ;
struct lv_segment * seg ;
dm_list_iterate_safe ( lvh , lvht , & vg_from - > lvs ) {
lv = dm_list_item ( lvh , struct lv_list ) - > lv ;
if ( lv_is_thin_volume ( lv ) ) {
seg = first_seg ( lv ) ;
data_lv = seg_lv ( first_seg ( seg - > pool_lv ) , 0 ) ;
if ( ( _lv_is_in_vg ( vg_to , data_lv ) | |
_lv_is_in_vg ( vg_to , seg - > external_lv ) ) ) {
if ( _lv_is_in_vg ( vg_from , seg - > external_lv ) | |
_lv_is_in_vg ( vg_from , data_lv ) ) {
log_error ( " Can't split external origin %s "
" and pool %s between two Volume Groups. " ,
seg - > external_lv - > name ,
seg - > pool_lv - > name ) ;
return 0 ;
}
if ( ! _move_one_lv ( vg_from , vg_to , lvh ) )
return_0 ;
}
} else if ( lv_is_thin_pool ( lv ) ) {
seg = first_seg ( lv ) ;
data_lv = seg_lv ( seg , 0 ) ;
if ( _lv_is_in_vg ( vg_to , data_lv ) | |
_lv_is_in_vg ( vg_to , seg - > metadata_lv ) ) {
if ( _lv_is_in_vg ( vg_from , seg - > metadata_lv ) | |
_lv_is_in_vg ( vg_from , data_lv ) ) {
log_error ( " Can't split pool data and metadata %s "
" between two Volume Groups. " ,
lv - > name ) ;
return 0 ;
}
if ( ! _move_one_lv ( vg_from , vg_to , lvh ) )
return_0 ;
}
}
}
return 1 ;
}
2014-02-25 02:51:02 +04:00
static int _move_cache ( struct volume_group * vg_from ,
struct volume_group * vg_to )
{
int is_moving ;
struct dm_list * lvh , * lvht ;
struct logical_volume * lv , * data , * meta , * orig ;
struct lv_segment * seg , * cache_seg ;
dm_list_iterate_safe ( lvh , lvht , & vg_from - > lvs ) {
lv = dm_list_item ( lvh , struct lv_list ) - > lv ;
data = meta = orig = NULL ;
seg = first_seg ( lv ) ;
if ( ! lv_is_cache ( lv ) & & ! lv_is_cache_pool ( lv ) )
continue ;
/*
* FIXME : The code seems to move cache LVs fine , but it
* hasn ' t been well tested and it causes problems
* when just splitting PVs that don ' t contain
* cache LVs .
* Waiting for next release before fixing and enabling .
*/
log_error ( " Unable to split VG while it contains cache LVs " ) ;
return 0 ;
2014-04-04 04:19:04 +04:00
/* NOTREACHED */
2014-02-25 02:51:02 +04:00
if ( lv_is_cache ( lv ) ) {
orig = seg_lv ( seg , 0 ) ;
data = seg_lv ( first_seg ( seg - > pool_lv ) , 0 ) ;
meta = first_seg ( seg - > pool_lv ) - > metadata_lv ;
/* Ensure all components are coming along */
2015-02-17 15:39:47 +03:00
is_moving = _lv_is_in_vg ( vg_to , orig ) ;
2014-02-25 02:51:02 +04:00
} else {
if ( ! dm_list_empty ( & seg - > lv - > segs_using_this_lv ) & &
! ( cache_seg = get_only_segment_using_this_lv ( seg - > lv ) ) )
return_0 ;
orig = seg_lv ( cache_seg , 0 ) ;
data = seg_lv ( seg , 0 ) ;
meta = seg - > metadata_lv ;
if ( _lv_is_in_vg ( vg_to , data ) | |
_lv_is_in_vg ( vg_to , meta ) )
is_moving = 1 ;
}
2015-02-17 15:39:47 +03:00
if ( orig & & ( _lv_is_in_vg ( vg_to , orig ) ! = is_moving ) ) {
2014-02-25 02:51:02 +04:00
log_error ( " Can't split %s and its origin (%s) "
" into separate VGs " , lv - > name , orig - > name ) ;
return 0 ;
}
2015-02-17 15:39:47 +03:00
if ( data & & ( _lv_is_in_vg ( vg_to , data ) ! = is_moving ) ) {
2014-02-25 02:51:02 +04:00
log_error ( " Can't split %s and its cache pool "
" data LV (%s) into separate VGs " ,
lv - > name , data - > name ) ;
return 0 ;
}
2015-02-17 15:39:47 +03:00
if ( meta & & ( _lv_is_in_vg ( vg_to , meta ) ! = is_moving ) ) {
2014-02-25 02:51:02 +04:00
log_error ( " Can't split %s and its cache pool "
" metadata LV (%s) into separate VGs " ,
lv - > name , meta - > name ) ;
return 0 ;
}
if ( ! _move_one_lv ( vg_from , vg_to , lvh ) )
return_0 ;
}
return 1 ;
}
2009-09-03 01:26:34 +04:00
/*
* Create or open the destination of the vgsplit operation .
* Returns
* - non - NULL : VG handle w / VG lock held
* - NULL : no VG lock held
*/
static struct volume_group * _vgsplit_to ( struct cmd_context * cmd ,
const char * vg_name_to ,
int * existing_vg )
{
struct volume_group * vg_to = NULL ;
log_verbose ( " Checking for new volume group \" %s \" " , vg_name_to ) ;
/*
* First try to create a new VG . If we cannot create it ,
* and we get FAILED_EXIST ( we will not be holding a lock ) ,
* a VG must already exist with this name . We then try to
* read the existing VG - the vgsplit will be into an existing VG .
*
* Otherwise , if the lock was successful , it must be the case that
* we obtained a WRITE lock and could not find the vgname in the
* system . Thus , the split will be into a new VG .
*/
vg_to = vg_create ( cmd , vg_name_to ) ;
if ( vg_read_error ( vg_to ) = = FAILED_LOCKING ) {
log_error ( " Can't get lock for %s " , vg_name_to ) ;
2011-08-11 00:25:29 +04:00
release_vg ( vg_to ) ;
2009-09-03 01:26:34 +04:00
return NULL ;
}
if ( vg_read_error ( vg_to ) = = FAILED_EXIST ) {
* existing_vg = 1 ;
2011-08-11 00:25:29 +04:00
release_vg ( vg_to ) ;
2015-03-05 23:00:44 +03:00
vg_to = vg_read_for_update ( cmd , vg_name_to , NULL , 0 , 0 ) ;
2009-09-03 01:26:34 +04:00
if ( vg_read_error ( vg_to ) ) {
2011-08-11 00:25:29 +04:00
release_vg ( vg_to ) ;
2013-07-01 13:27:11 +04:00
return_NULL ;
2009-09-03 01:26:34 +04:00
}
} else if ( vg_read_error ( vg_to ) = = SUCCESS ) {
* existing_vg = 0 ;
}
return vg_to ;
}
2009-09-03 01:26:50 +04:00
/*
* Open the source of the vgsplit operation .
* Returns
* - non - NULL : VG handle w / VG lock held
* - NULL : no VG lock held
*/
static struct volume_group * _vgsplit_from ( struct cmd_context * cmd ,
const char * vg_name_from )
{
struct volume_group * vg_from ;
log_verbose ( " Checking for volume group \" %s \" " , vg_name_from ) ;
2015-03-05 23:00:44 +03:00
vg_from = vg_read_for_update ( cmd , vg_name_from , NULL , 0 , 0 ) ;
2009-09-03 01:26:50 +04:00
if ( vg_read_error ( vg_from ) ) {
2011-08-11 00:25:29 +04:00
release_vg ( vg_from ) ;
2009-09-03 01:26:50 +04:00
return NULL ;
}
2015-03-05 23:00:44 +03:00
if ( is_lockd_type ( vg_from - > lock_type ) ) {
log_error ( " vgsplit not allowed for lock_type %s " , vg_from - > lock_type ) ;
unlock_and_release_vg ( cmd , vg_from , vg_name_from ) ;
return NULL ;
}
2009-09-03 01:26:50 +04:00
return vg_from ;
}
2008-01-22 05:48:53 +03:00
/*
* Has the user given an option related to a new vg as the split destination ?
*/
static int new_vg_option_specified ( struct cmd_context * cmd )
{
return ( arg_count ( cmd , clustered_ARG ) | |
arg_count ( cmd , alloc_ARG ) | |
arg_count ( cmd , maxphysicalvolumes_ARG ) | |
2010-06-29 00:39:24 +04:00
arg_count ( cmd , maxlogicalvolumes_ARG ) | |
arg_count ( cmd , vgmetadatacopies_ARG ) ) ;
2008-01-22 05:48:53 +03:00
}
2002-05-31 23:30:51 +04:00
int vgsplit ( struct cmd_context * cmd , int argc , char * * argv )
{
2008-01-16 01:56:30 +03:00
struct vgcreate_params vp_new ;
struct vgcreate_params vp_def ;
2011-02-18 17:47:28 +03:00
const char * vg_name_from , * vg_name_to ;
2009-04-10 14:01:38 +04:00
struct volume_group * vg_to = NULL , * vg_from = NULL ;
2002-05-31 23:30:51 +04:00
int opt ;
2009-06-10 15:21:10 +04:00
int existing_vg = 0 ;
2009-04-10 14:01:38 +04:00
int r = ECMD_FAILED ;
2008-04-09 17:47:13 +04:00
const char * lv_name ;
2009-09-03 01:26:18 +04:00
int lock_vg_from_first = 1 ;
2002-05-31 23:30:51 +04:00
2008-04-09 17:47:13 +04:00
if ( ( arg_count ( cmd , name_ARG ) + argc ) < 3 ) {
log_error ( " Existing VG, new VG and either physical volumes "
" or logical volume required. " ) ;
2002-05-31 23:30:51 +04:00
return EINVALID_CMD_LINE ;
}
2008-04-09 17:47:13 +04:00
if ( arg_count ( cmd , name_ARG ) & & ( argc > 2 ) ) {
log_error ( " A logical volume name cannot be given with "
" physical volumes. " ) ;
return ECMD_FAILED ;
}
2015-03-05 23:00:44 +03:00
/* Needed change the global VG namespace. */
if ( ! lockd_gl ( cmd , " ex " , LDGL_UPDATE_NAMES ) )
return_ECMD_FAILED ;
2008-04-09 17:47:13 +04:00
if ( arg_count ( cmd , name_ARG ) )
lv_name = arg_value ( cmd , name_ARG ) ;
else
lv_name = NULL ;
2007-03-10 00:25:33 +03:00
vg_name_from = skip_dev_dir ( cmd , argv [ 0 ] , NULL ) ;
vg_name_to = skip_dev_dir ( cmd , argv [ 1 ] , NULL ) ;
2002-05-31 23:30:51 +04:00
argc - = 2 ;
argv + = 2 ;
if ( ! strcmp ( vg_name_to , vg_name_from ) ) {
log_error ( " Duplicate volume group name \" %s \" " , vg_name_from ) ;
return ECMD_FAILED ;
}
2009-09-03 01:27:39 +04:00
if ( strcmp ( vg_name_to , vg_name_from ) < 0 )
lock_vg_from_first = 0 ;
2009-09-03 01:27:22 +04:00
if ( lock_vg_from_first ) {
2013-07-01 13:27:22 +04:00
if ( ! ( vg_from = _vgsplit_from ( cmd , vg_name_from ) ) )
return_ECMD_FAILED ;
2009-09-03 01:27:22 +04:00
/*
* Set metadata format of original VG .
* NOTE : We must set the format before calling vg_create ( )
* since vg_create ( ) calls the per - format constructor .
*/
cmd - > fmt = vg_from - > fid - > fmt ;
2013-07-01 13:27:22 +04:00
if ( ! ( vg_to = _vgsplit_to ( cmd , vg_name_to , & existing_vg ) ) ) {
2011-08-11 00:25:29 +04:00
unlock_and_release_vg ( cmd , vg_from , vg_name_from ) ;
2013-07-01 13:27:22 +04:00
return_ECMD_FAILED ;
2009-09-03 01:27:22 +04:00
}
} else {
2013-07-01 13:27:22 +04:00
if ( ! ( vg_to = _vgsplit_to ( cmd , vg_name_to , & existing_vg ) ) )
return_ECMD_FAILED ;
if ( ! ( vg_from = _vgsplit_from ( cmd , vg_name_from ) ) ) {
2011-08-11 00:25:29 +04:00
unlock_and_release_vg ( cmd , vg_to , vg_name_to ) ;
2013-07-01 13:27:22 +04:00
return_ECMD_FAILED ;
2009-09-03 01:27:22 +04:00
}
Change vg_create() to take only minimal parameters and obtain a lock.
vg_t *vg_create(struct cmd_context *cmd, const char *vg_name);
This is the first step towards the API called to create a VG.
Call vg_lock_newname() inside this function. Use _vg_make_handle()
where possible.
Now we have 2 ways to construct a volume group:
1) vg_read: Used when constructing an existing VG from disks
2) vg_create: Used when constructing a new VG
Both of these interfaces obtain a lock, and return a vg_t *.
The usage of _vg_make_handle() inside vg_create() doesn't fit
perfectly but it's ok for now. Needs some cleanup though and I've
noted "FIXME" in the code.
Add the new vg_create() plus vg 'set' functions for non-default
VG parameters in the following tools:
- vgcreate: Fairly straightforward refactoring. We just moved
vg_lock_newname inside vg_create so we check the return via
vg_read_error.
- vgsplit: The refactoring here is a bit more tricky. Originally
we called vg_lock_newname and depending on the error code, we either
read the existing vg or created the new one. Now vg_create()
calls vg_lock_newname, so we first try to create the VG. If this
fails with FAILED_EXIST, we can then do the vg_read. If the
create succeeds, we check the input parameters and set any new
values on the VG.
TODO in future patches:
1. The VG_ORPHAN lock needs some thought. We may want to treat
this as any other VG, and require the application to obtain a handle
and pass it to other API calls (for example, vg_extend). Or,
we may find that hiding the VG_ORPHAN lock inside other APIs is
the way to go. I thought of placing the VG_ORPHAN lock inside
vg_create() and tying it to the vg handle, but was not certain
this was the right approach.
2. Cleanup error paths. Integrate vg_read_error() with vg_create and
vg_read* error codes and/or the new error APIs.
Signed-off-by: Dave Wysochanski <dwysocha@redhat.com>
2009-07-09 14:09:33 +04:00
2009-09-03 01:27:22 +04:00
if ( cmd - > fmt ! = vg_from - > fid - > fmt ) {
/* In this case we don't know the vg_from->fid->fmt */
log_error ( " Unable to set new VG metadata type based on "
" source VG format - use -M option. " ) ;
goto bad ;
}
2008-04-02 23:30:12 +04:00
}
2009-09-03 01:27:22 +04:00
2009-09-03 01:25:44 +04:00
if ( existing_vg ) {
2008-01-22 05:48:53 +03:00
if ( new_vg_option_specified ( cmd ) ) {
log_error ( " Volume group \" %s \" exists, but new VG "
" option specified " , vg_name_to ) ;
2009-09-15 02:47:49 +04:00
goto bad ;
2008-01-22 05:48:53 +03:00
}
2008-01-16 22:54:39 +03:00
if ( ! vgs_are_compatible ( cmd , vg_from , vg_to ) )
2008-04-10 23:59:43 +04:00
goto_bad ;
2009-09-03 01:25:44 +04:00
} else {
2014-09-12 12:03:12 +04:00
if ( ! vgcreate_params_set_defaults ( cmd , & vp_def , vg_from ) ) {
r = EINVALID_CMD_LINE ;
goto_bad ;
}
2009-11-01 23:03:24 +03:00
vp_def . vg_name = vg_name_to ;
2012-10-16 12:07:27 +04:00
if ( ! vgcreate_params_set_from_args ( cmd , & vp_new , & vp_def ) ) {
2009-04-10 14:01:38 +04:00
r = EINVALID_CMD_LINE ;
2009-09-15 02:47:49 +04:00
goto_bad ;
2008-01-22 06:25:45 +03:00
}
2008-01-15 00:07:58 +03:00
2012-10-16 12:07:27 +04:00
if ( ! vgcreate_params_validate ( cmd , & vp_new ) ) {
2009-04-10 14:01:38 +04:00
r = EINVALID_CMD_LINE ;
2009-09-15 02:47:49 +04:00
goto_bad ;
2008-01-22 06:25:45 +03:00
}
2008-01-15 00:07:58 +03:00
Change vg_create() to take only minimal parameters and obtain a lock.
vg_t *vg_create(struct cmd_context *cmd, const char *vg_name);
This is the first step towards the API called to create a VG.
Call vg_lock_newname() inside this function. Use _vg_make_handle()
where possible.
Now we have 2 ways to construct a volume group:
1) vg_read: Used when constructing an existing VG from disks
2) vg_create: Used when constructing a new VG
Both of these interfaces obtain a lock, and return a vg_t *.
The usage of _vg_make_handle() inside vg_create() doesn't fit
perfectly but it's ok for now. Needs some cleanup though and I've
noted "FIXME" in the code.
Add the new vg_create() plus vg 'set' functions for non-default
VG parameters in the following tools:
- vgcreate: Fairly straightforward refactoring. We just moved
vg_lock_newname inside vg_create so we check the return via
vg_read_error.
- vgsplit: The refactoring here is a bit more tricky. Originally
we called vg_lock_newname and depending on the error code, we either
read the existing vg or created the new one. Now vg_create()
calls vg_lock_newname, so we first try to create the VG. If this
fails with FAILED_EXIST, we can then do the vg_read. If the
create succeeds, we check the input parameters and set any new
values on the VG.
TODO in future patches:
1. The VG_ORPHAN lock needs some thought. We may want to treat
this as any other VG, and require the application to obtain a handle
and pass it to other API calls (for example, vg_extend). Or,
we may find that hiding the VG_ORPHAN lock inside other APIs is
the way to go. I thought of placing the VG_ORPHAN lock inside
vg_create() and tying it to the vg handle, but was not certain
this was the right approach.
2. Cleanup error paths. Integrate vg_read_error() with vg_create and
vg_read* error codes and/or the new error APIs.
Signed-off-by: Dave Wysochanski <dwysocha@redhat.com>
2009-07-09 14:09:33 +04:00
if ( ! vg_set_extent_size ( vg_to , vp_new . extent_size ) | |
! vg_set_max_lv ( vg_to , vp_new . max_lv ) | |
! vg_set_max_pv ( vg_to , vp_new . max_pv ) | |
2009-10-31 20:43:57 +03:00
! vg_set_alloc_policy ( vg_to , vp_new . alloc ) | |
2010-06-29 00:39:24 +04:00
! vg_set_clustered ( vg_to , vp_new . clustered ) | |
2015-02-24 02:41:38 +03:00
! vg_set_system_id ( vg_to , vp_new . system_id ) | |
2010-07-01 00:03:52 +04:00
! vg_set_mda_copies ( vg_to , vp_new . vgmetadatacopies ) )
2008-04-10 23:59:43 +04:00
goto_bad ;
2008-01-12 00:43:16 +03:00
}
2006-10-13 17:22:44 +04:00
2002-05-31 23:30:51 +04:00
/* Archive vg_from before changing it */
if ( ! archive ( vg_from ) )
2008-04-10 23:59:43 +04:00
goto_bad ;
2002-05-31 23:30:51 +04:00
/* Move PVs across to new structure */
for ( opt = 0 ; opt < argc ; opt + + ) {
2011-08-30 18:55:15 +04:00
dm_unescape_colons_and_at_signs ( argv [ opt ] , NULL , NULL ) ;
2009-07-14 06:15:21 +04:00
if ( ! move_pv ( vg_from , vg_to , argv [ opt ] ) )
2008-04-10 23:59:43 +04:00
goto_bad ;
2008-04-09 17:47:13 +04:00
}
2008-02-29 03:13:48 +03:00
2008-04-09 17:47:13 +04:00
/* If an LV given on the cmdline, move used_by PVs */
2009-07-14 06:15:21 +04:00
if ( lv_name & & ! move_pvs_used_by_lv ( vg_from , vg_to , lv_name ) )
2008-04-10 23:59:43 +04:00
goto_bad ;
2002-05-31 23:30:51 +04:00
/* Move required LVs across, checking consistency */
if ( ! ( _move_lvs ( vg_from , vg_to ) ) )
2008-04-10 23:59:43 +04:00
goto_bad ;
2002-05-31 23:30:51 +04:00
2009-10-26 13:01:56 +03:00
/* FIXME Separate the 'move' from the 'validation' to fix dev stacks */
2014-04-26 01:24:50 +04:00
/* Move required RAID across */
if ( ! ( _move_raid ( vg_from , vg_to ) ) )
goto_bad ;
2007-01-30 02:01:18 +03:00
/* Move required mirrors across */
if ( ! ( _move_mirrors ( vg_from , vg_to ) ) )
2008-04-10 23:59:43 +04:00
goto_bad ;
2009-10-26 13:01:56 +03:00
/* Move required snapshots across */
if ( ! ( _move_snapshots ( vg_from , vg_to ) ) )
goto_bad ;
2007-01-30 02:01:18 +03:00
2013-06-13 14:05:53 +04:00
/* Move required pools across */
if ( ! ( _move_thins ( vg_from , vg_to ) ) )
goto_bad ;
2014-02-25 02:51:02 +04:00
if ( ! ( _move_cache ( vg_from , vg_to ) ) )
goto_bad ;
2007-03-23 15:43:17 +03:00
/* Split metadata areas and check if both vgs have at least one area */
2007-06-28 21:59:34 +04:00
if ( ! ( vg_split_mdas ( cmd , vg_from , vg_to ) ) & & vg_from - > pv_count ) {
2007-03-23 15:43:17 +03:00
log_error ( " Cannot split: Nowhere to store metadata for new Volume Group " ) ;
2009-09-15 02:47:49 +04:00
goto bad ;
2007-03-23 15:43:17 +03:00
}
/* Set proper name for all PVs in new VG */
if ( ! vg_rename ( cmd , vg_to , vg_name_to ) )
2008-04-10 23:59:43 +04:00
goto_bad ;
2002-11-18 17:04:08 +03:00
2002-05-31 23:30:51 +04:00
/* store it on disks */
log_verbose ( " Writing out updated volume groups " ) ;
2008-04-02 23:30:12 +04:00
/*
* First , write out the new VG as EXPORTED . We do this first in case
* there is a crash - we will still have the new VG information , in an
* exported state . Recovery after this point would be removal of the
* new VG and redoing the vgsplit .
* FIXME : recover automatically or instruct the user ?
*/
2002-05-31 23:30:51 +04:00
vg_to - > status | = EXPORTED_VG ;
if ( ! archive ( vg_to ) )
2008-04-10 23:59:43 +04:00
goto_bad ;
2002-05-31 23:30:51 +04:00
2003-07-05 02:34:56 +04:00
if ( ! vg_write ( vg_to ) | | ! vg_commit ( vg_to ) )
2008-04-10 23:59:43 +04:00
goto_bad ;
2002-05-31 23:30:51 +04:00
backup ( vg_to ) ;
2008-04-02 23:30:12 +04:00
/*
* Next , write out the updated old VG . If we crash after this point ,
* recovery is a vgimport on the new VG .
2008-04-10 23:59:43 +04:00
* FIXME : recover automatically or instruct the user ?
2008-04-02 23:30:12 +04:00
*/
2007-06-28 21:59:34 +04:00
if ( vg_from - > pv_count ) {
if ( ! vg_write ( vg_from ) | | ! vg_commit ( vg_from ) )
2008-04-10 23:59:43 +04:00
goto_bad ;
2002-05-31 23:30:51 +04:00
2007-06-28 21:59:34 +04:00
backup ( vg_from ) ;
}
2002-05-31 23:30:51 +04:00
2008-04-02 23:30:12 +04:00
/*
* Finally , remove the EXPORTED flag from the new VG and write it out .
*/
2009-04-10 14:01:38 +04:00
if ( ! test_mode ( ) ) {
2011-08-11 00:25:29 +04:00
release_vg ( vg_to ) ;
2009-07-01 21:04:21 +04:00
vg_to = vg_read_for_update ( cmd , vg_name_to , NULL ,
2015-03-05 23:00:44 +03:00
READ_ALLOW_EXPORTED , 0 ) ;
2009-07-01 21:04:21 +04:00
if ( vg_read_error ( vg_to ) ) {
log_error ( " Volume group \" %s \" became inconsistent: "
" please fix manually " , vg_name_to ) ;
2009-09-15 02:47:49 +04:00
goto bad ;
2009-04-10 14:01:38 +04:00
}
2002-11-18 17:04:08 +03:00
}
2002-05-31 23:30:51 +04:00
vg_to - > status & = ~ EXPORTED_VG ;
2003-09-15 19:03:54 +04:00
if ( ! vg_write ( vg_to ) | | ! vg_commit ( vg_to ) )
2008-04-10 23:59:43 +04:00
goto_bad ;
2002-05-31 23:30:51 +04:00
backup ( vg_to ) ;
config: add silent mode
Accept -q as the short form of --quiet.
Suppress non-essential standard output if -q is given twice.
Treat log/silent in lvm.conf as equivalent to -qq.
Review all log_print messages and change some to
log_print_unless_silent.
When silent, the following commands still produce output:
dumpconfig, lvdisplay, lvmdiskscan, lvs, pvck, pvdisplay,
pvs, version, vgcfgrestore -l, vgdisplay, vgs.
[Needs checking.]
Non-essential messages are shifted from log level 4 to log level 5
for syslog and lvm2_log_fn purposes.
2012-08-25 23:35:48 +04:00
log_print_unless_silent ( " %s volume group \" %s \" successfully split from \" %s \" " ,
existing_vg ? " Existing " : " New " ,
vg_to - > name , vg_from - > name ) ;
2002-05-31 23:30:51 +04:00
2009-04-10 14:01:38 +04:00
r = ECMD_PROCESSED ;
bad :
2011-03-30 18:35:00 +04:00
/*
2011-04-29 04:21:13 +04:00
* vg_to references elements moved from vg_from
* so vg_to has to be freed first .
2011-03-30 18:35:00 +04:00
*/
2011-08-11 00:25:29 +04:00
unlock_and_release_vg ( cmd , vg_to , vg_name_to ) ;
unlock_and_release_vg ( cmd , vg_from , vg_name_from ) ;
2011-03-30 18:35:00 +04:00
2009-04-10 14:01:38 +04:00
return r ;
2002-05-31 23:30:51 +04:00
}