2002-05-31 23:30:51 +04:00
/*
2008-01-30 17:00:02 +03:00
* Copyright ( C ) 2001 - 2004 Sistina Software , Inc . All rights reserved .
2019-02-01 23:29:22 +03:00
* Copyright ( C ) 2004 - 2019 Red Hat , Inc . All rights reserved .
2002-05-31 23:30:51 +04:00
*
2004-03-30 23:35:44 +04:00
* This file is part of LVM2 .
2002-05-31 23:30:51 +04:00
*
2004-03-30 23:35:44 +04:00
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
2007-08-21 00:55:30 +04:00
* of the GNU Lesser General Public License v .2 .1 .
2002-05-31 23:30:51 +04:00
*
2007-08-21 00:55:30 +04:00
* You should have received a copy of the GNU Lesser General Public License
2004-03-30 23:35:44 +04:00
* along with this program ; if not , write to the Free Software Foundation ,
2016-01-21 13:49:46 +03:00
* Inc . , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA
2002-05-31 23:30:51 +04:00
*/
# include "tools.h"
2005-04-07 16:39:44 +04:00
static int _lv_is_in_vg ( struct volume_group * vg , struct logical_volume * lv )
{
2014-07-04 04:13:51 +04:00
if ( ! lv | | lv - > vg ! = vg )
return 0 ;
2005-04-07 16:39:44 +04:00
2014-07-04 04:13:51 +04:00
return 1 ;
2005-04-07 16:39:44 +04:00
}
2016-07-05 16:39:57 +03:00
static struct dm_list * _lvh_in_vg ( struct logical_volume * lv , struct volume_group * vg )
{
struct dm_list * lvh ;
dm_list_iterate ( lvh , & vg - > lvs )
if ( lv = = dm_list_item ( lvh , struct lv_list ) - > lv )
return lvh ;
return NULL ;
}
static int _lv_tree_move ( struct dm_list * lvh ,
2016-07-12 17:15:32 +03:00
struct dm_list * * lvht ,
2016-07-05 16:39:57 +03:00
struct volume_group * vg_from ,
struct volume_group * vg_to )
{
uint32_t s ;
struct logical_volume * lv = dm_list_item ( lvh , struct lv_list ) - > lv ;
struct lv_segment * seg = first_seg ( lv ) ;
struct dm_list * lvh1 ;
2024-08-30 00:05:41 +03:00
/* Update the list pointer referring to the item moving to @vg_to. */
2016-07-12 17:15:32 +03:00
if ( lvh = = * lvht )
2016-07-12 17:25:06 +03:00
* lvht = dm_list_next ( lvh , lvh ) ;
2016-07-12 17:15:32 +03:00
2016-07-05 16:39:57 +03:00
dm_list_move ( & vg_to - > lvs , lvh ) ;
lv - > vg = vg_to ;
lv - > lvid . id [ 0 ] = lv - > vg - > id ;
if ( seg )
for ( s = 0 ; s < seg - > area_count ; s + + )
if ( seg_type ( seg , s ) = = AREA_LV & & seg_lv ( seg , s ) ) {
if ( ( lvh1 = _lvh_in_vg ( seg_lv ( seg , s ) , vg_from ) ) ) {
2016-07-12 17:15:32 +03:00
if ( ! _lv_tree_move ( lvh1 , lvht , vg_from , vg_to ) )
2016-07-05 16:39:57 +03:00
return 0 ;
} else if ( ! _lvh_in_vg ( seg_lv ( seg , s ) , vg_to ) )
return 0 ;
}
return 1 ;
}
2008-04-11 01:34:53 +04:00
static int _move_one_lv ( struct volume_group * vg_from ,
2016-07-05 16:39:57 +03:00
struct volume_group * vg_to ,
2016-07-12 17:15:32 +03:00
struct dm_list * lvh ,
struct dm_list * * lvht )
2008-04-10 05:30:22 +04:00
{
2008-11-04 01:14:30 +03:00
struct logical_volume * lv = dm_list_item ( lvh , struct lv_list ) - > lv ;
2014-07-04 04:13:51 +04:00
struct logical_volume * parent_lv ;
2008-04-10 05:30:22 +04:00
2008-04-11 01:34:53 +04:00
if ( lv_is_active ( lv ) ) {
2014-07-04 04:13:51 +04:00
if ( ( parent_lv = lv_parent ( lv ) ) )
log_error ( " Logical volume %s (part of %s) must be inactive. " , display_lvname ( lv ) , parent_lv - > name ) ;
else
log_error ( " Logical volume %s must be inactive. " , display_lvname ( lv ) ) ;
2008-04-11 01:34:53 +04:00
return 0 ;
}
2016-07-05 16:39:57 +03:00
/* Bail out, if any allocations of @lv are still on PVs of @vg_from */
if ( lv_is_on_pvs ( lv , & vg_from - > pvs ) ) {
log_error ( " Can't split LV %s between "
" two Volume Groups " , lv - > name ) ;
return 0 ;
}
2014-07-03 22:06:04 +04:00
2016-07-12 17:15:32 +03:00
if ( ! _lv_tree_move ( lvh , lvht , vg_from , vg_to ) )
2016-07-05 16:39:57 +03:00
return 0 ;
2014-07-03 22:06:04 +04:00
2013-07-09 14:01:25 +04:00
/* Moved pool metadata spare LV */
if ( vg_from - > pool_metadata_spare_lv = = lv ) {
vg_to - > pool_metadata_spare_lv = lv ;
vg_from - > pool_metadata_spare_lv = NULL ;
}
2008-04-11 01:34:53 +04:00
return 1 ;
2009-05-14 01:22:57 +04:00
}
2005-04-07 16:39:44 +04:00
2002-05-31 23:30:51 +04:00
static int _move_lvs ( struct volume_group * vg_from , struct volume_group * vg_to )
{
2008-11-04 01:14:30 +03:00
struct dm_list * lvh , * lvht ;
2002-05-31 23:30:51 +04:00
struct logical_volume * lv ;
2002-11-18 17:04:08 +03:00
struct lv_segment * seg ;
2002-05-31 23:30:51 +04:00
struct physical_volume * pv ;
struct volume_group * vg_with ;
2006-05-10 01:23:51 +04:00
unsigned s ;
2002-05-31 23:30:51 +04:00
2008-11-04 01:14:30 +03:00
dm_list_iterate_safe ( lvh , lvht , & vg_from - > lvs ) {
lv = dm_list_item ( lvh , struct lv_list ) - > lv ;
2002-05-31 23:30:51 +04:00
2016-12-13 02:09:15 +03:00
if ( lv_is_snapshot ( lv ) )
2005-04-07 16:39:44 +04:00
continue ;
2014-04-26 01:24:50 +04:00
if ( lv_is_raid ( lv ) )
continue ;
2014-09-16 00:33:53 +04:00
if ( lv_is_mirrored ( lv ) )
2007-01-30 02:01:18 +03:00
continue ;
2013-06-13 14:05:53 +04:00
if ( lv_is_thin_pool ( lv ) | |
lv_is_thin_volume ( lv ) )
continue ;
2020-09-24 21:49:18 +03:00
if ( lv_is_vdo_pool ( lv ) | |
lv_is_vdo ( lv ) )
continue ;
2014-02-25 02:51:02 +04:00
if ( lv_is_cache ( lv ) | | lv_is_cache_pool ( lv ) )
/* further checks by _move_cache() */
continue ;
2002-05-31 23:30:51 +04:00
/* Ensure all the PVs used by this LV remain in the same */
/* VG as each other */
vg_with = NULL ;
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( seg , & lv - > segments ) {
2003-04-25 02:23:24 +04:00
for ( s = 0 ; s < seg - > area_count ; s + + ) {
/* FIXME Check AREA_LV too */
2005-06-01 20:51:55 +04:00
if ( seg_type ( seg , s ) ! = AREA_PV )
2003-04-25 02:23:24 +04:00
continue ;
2005-06-01 20:51:55 +04:00
pv = seg_pv ( seg , s ) ;
2002-05-31 23:30:51 +04:00
if ( vg_with ) {
2003-01-18 00:04:26 +03:00
if ( ! pv_is_in_vg ( vg_with , pv ) ) {
2007-01-10 00:12:41 +03:00
log_error ( " Can't split Logical "
" Volume %s between "
" two Volume Groups " ,
2002-05-31 23:30:51 +04:00
lv - > name ) ;
return 0 ;
}
continue ;
}
2003-01-18 00:04:26 +03:00
if ( pv_is_in_vg ( vg_from , pv ) ) {
2002-05-31 23:30:51 +04:00
vg_with = vg_from ;
continue ;
}
2003-01-18 00:04:26 +03:00
if ( pv_is_in_vg ( vg_to , pv ) ) {
2002-05-31 23:30:51 +04:00
vg_with = vg_to ;
continue ;
}
log_error ( " Physical Volume %s not found " ,
2007-10-12 18:29:32 +04:00
pv_dev_name ( pv ) ) ;
2002-05-31 23:30:51 +04:00
return 0 ;
}
2005-04-07 16:39:44 +04:00
}
2009-07-09 09:40:59 +04:00
2002-05-31 23:30:51 +04:00
if ( vg_with = = vg_from )
continue ;
/* Move this LV */
2016-07-12 17:15:32 +03:00
if ( ! _move_one_lv ( vg_from , vg_to , lvh , & lvht ) )
2008-04-15 18:57:12 +04:00
return_0 ;
2002-05-31 23:30:51 +04:00
}
2003-04-25 02:23:24 +04:00
/* FIXME Ensure no LVs contain segs pointing at LVs in the other VG */
2002-05-31 23:30:51 +04:00
return 1 ;
}
2008-04-10 00:56:06 +04:00
/*
* Move the hidden / internal " snapshotN " LVs . from ' vg_from ' to ' vg_to ' .
*/
2005-04-07 16:39:44 +04:00
static int _move_snapshots ( struct volume_group * vg_from ,
struct volume_group * vg_to )
2002-05-31 23:30:51 +04:00
{
2008-11-04 01:14:30 +03:00
struct dm_list * lvh , * lvht ;
2005-04-07 16:39:44 +04:00
struct logical_volume * lv ;
struct lv_segment * seg ;
int cow_from = 0 ;
int origin_from = 0 ;
2002-05-31 23:30:51 +04:00
2008-11-04 01:14:30 +03:00
dm_list_iterate_safe ( lvh , lvht , & vg_from - > lvs ) {
lv = dm_list_item ( lvh , struct lv_list ) - > lv ;
2002-05-31 23:30:51 +04:00
2016-12-13 02:09:15 +03:00
if ( ! lv_is_snapshot ( lv ) )
2005-04-07 16:39:44 +04:00
continue ;
2002-05-31 23:30:51 +04:00
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( seg , & lv - > segments ) {
2005-04-07 16:39:44 +04:00
cow_from = _lv_is_in_vg ( vg_from , seg - > cow ) ;
origin_from = _lv_is_in_vg ( vg_from , seg - > origin ) ;
2007-05-15 17:01:41 +04:00
if ( cow_from & & origin_from )
continue ;
if ( ( ! cow_from & & origin_from ) | |
( cow_from & & ! origin_from ) ) {
log_error ( " Can't split snapshot %s between "
" two Volume Groups " , seg - > cow - > name ) ;
return 0 ;
}
2005-04-07 16:39:44 +04:00
2008-04-10 00:56:06 +04:00
/*
* At this point , the cow and origin should already be
* in vg_to .
*/
if ( _lv_is_in_vg ( vg_to , seg - > cow ) & &
2008-04-11 01:34:53 +04:00
_lv_is_in_vg ( vg_to , seg - > origin ) ) {
2016-07-12 17:15:32 +03:00
if ( ! _move_one_lv ( vg_from , vg_to , lvh , & lvht ) )
2008-04-15 18:57:12 +04:00
return_0 ;
2008-04-11 01:34:53 +04:00
}
2008-04-10 00:56:06 +04:00
}
2005-04-07 16:39:44 +04:00
2002-05-31 23:30:51 +04:00
}
return 1 ;
}
2007-01-30 02:01:18 +03:00
static int _move_mirrors ( struct volume_group * vg_from ,
struct volume_group * vg_to )
{
2008-11-04 01:14:30 +03:00
struct dm_list * lvh , * lvht ;
2007-01-30 02:01:18 +03:00
struct logical_volume * lv ;
Fix vgsplit when there are mirrors that have mirrored logs.
The problem as reported by "ben <benscott@nwlink.com>" on lvm-devel:
vgsplit fails with mirrored mirror log
#lvs --all -o lv_name,lv_attr,devices
LV Attr Devices
MyMirror mwi--
[MyMirror_mimage_0] Iwi--- /dev/sdq(0)
[MyMirror_mimage_1] Iwi--- /dev/sdo(0)
[MyMirror_mimage_2] Iwi--- /dev/sdi(0)
[MyMirror_mlog] mwi---
[MyMirror_mlog_mimage_0] Iwi--- /dev/sds(0)
[MyMirror_mlog_mimage_1] Iwi--- /dev/sde(0)
#vgsplit -v "TestA" "TestB" "/dev/sdq" "/dev/sdo" "/dev/sdi" "/dev/sds"
"/dev/sde"
Checking for volume group "TestA"
Checking for new volume group "TestB"
Archiving volume group "TestA" metadata (seqno 213).
Can't split mirror MyMirror between two Volume Groups
AFTER FIX:
[root@bp-01 ~]# lvs -a -o name,vg_name,devices vg new
Volume group "new" not found
Skipping volume group new
LV VG Devices
lv vg lv_mimage_0(0),lv_mimage_1(0)
[lv_mimage_0] vg /dev/sdb1(0)
[lv_mimage_1] vg /dev/sdc1(0)
[lv_mlog] vg lv_mlog_mimage_0(0),lv_mlog_mimage_1(0)
[lv_mlog_mimage_0] vg /dev/sdh1(0)
[lv_mlog_mimage_1] vg /dev/sdi1(0)
[root@bp-01 ~]# vgsplit vg new /dev/sd[bchi]1
New volume group "new" successfully split from "vg"
[root@bp-01 ~]# lvs -a -o name,vg_name,devices vg new
LV VG Devices
lv new lv_mimage_0(0),lv_mimage_1(0)
[lv_mimage_0] new /dev/sdb1(0)
[lv_mimage_1] new /dev/sdc1(0)
[lv_mlog] new lv_mlog_mimage_0(0),lv_mlog_mimage_1(0)
[lv_mlog_mimage_0] new /dev/sdh1(0)
[lv_mlog_mimage_1] new /dev/sdi1(0)
2011-10-06 18:17:45 +04:00
struct lv_segment * seg , * log_seg ;
2007-08-22 18:38:18 +04:00
unsigned s , seg_in , log_in ;
2007-01-30 02:01:18 +03:00
2008-11-04 01:14:30 +03:00
dm_list_iterate_safe ( lvh , lvht , & vg_from - > lvs ) {
lv = dm_list_item ( lvh , struct lv_list ) - > lv ;
2007-01-30 02:01:18 +03:00
2014-04-26 01:24:50 +04:00
if ( lv_is_raid ( lv ) )
continue ;
2014-09-16 00:33:53 +04:00
if ( ! lv_is_mirrored ( lv ) )
2007-01-30 02:01:18 +03:00
continue ;
2016-07-05 16:39:57 +03:00
/* Ignore, if no allocations on PVs of @vg_to */
if ( ! lv_is_on_pvs ( lv , & vg_to - > pvs ) )
continue ;
2008-01-30 17:00:02 +03:00
seg = first_seg ( lv ) ;
2007-01-30 02:01:18 +03:00
seg_in = 0 ;
2007-08-22 18:38:18 +04:00
for ( s = 0 ; s < seg - > area_count ; s + + )
if ( _lv_is_in_vg ( vg_to , seg_lv ( seg , s ) ) )
2013-06-13 16:10:49 +04:00
seg_in + + ;
2007-01-30 02:01:18 +03:00
Fix vgsplit when there are mirrors that have mirrored logs.
The problem as reported by "ben <benscott@nwlink.com>" on lvm-devel:
vgsplit fails with mirrored mirror log
#lvs --all -o lv_name,lv_attr,devices
LV Attr Devices
MyMirror mwi--
[MyMirror_mimage_0] Iwi--- /dev/sdq(0)
[MyMirror_mimage_1] Iwi--- /dev/sdo(0)
[MyMirror_mimage_2] Iwi--- /dev/sdi(0)
[MyMirror_mlog] mwi---
[MyMirror_mlog_mimage_0] Iwi--- /dev/sds(0)
[MyMirror_mlog_mimage_1] Iwi--- /dev/sde(0)
#vgsplit -v "TestA" "TestB" "/dev/sdq" "/dev/sdo" "/dev/sdi" "/dev/sds"
"/dev/sde"
Checking for volume group "TestA"
Checking for new volume group "TestB"
Archiving volume group "TestA" metadata (seqno 213).
Can't split mirror MyMirror between two Volume Groups
AFTER FIX:
[root@bp-01 ~]# lvs -a -o name,vg_name,devices vg new
Volume group "new" not found
Skipping volume group new
LV VG Devices
lv vg lv_mimage_0(0),lv_mimage_1(0)
[lv_mimage_0] vg /dev/sdb1(0)
[lv_mimage_1] vg /dev/sdc1(0)
[lv_mlog] vg lv_mlog_mimage_0(0),lv_mlog_mimage_1(0)
[lv_mlog_mimage_0] vg /dev/sdh1(0)
[lv_mlog_mimage_1] vg /dev/sdi1(0)
[root@bp-01 ~]# vgsplit vg new /dev/sd[bchi]1
New volume group "new" successfully split from "vg"
[root@bp-01 ~]# lvs -a -o name,vg_name,devices vg new
LV VG Devices
lv new lv_mimage_0(0),lv_mimage_1(0)
[lv_mimage_0] new /dev/sdb1(0)
[lv_mimage_1] new /dev/sdc1(0)
[lv_mlog] new lv_mlog_mimage_0(0),lv_mlog_mimage_1(0)
[lv_mlog_mimage_0] new /dev/sdh1(0)
[lv_mlog_mimage_1] new /dev/sdi1(0)
2011-10-06 18:17:45 +04:00
log_in = ! seg - > log_lv ;
if ( seg - > log_lv ) {
log_seg = first_seg ( seg - > log_lv ) ;
if ( seg_is_mirrored ( log_seg ) ) {
log_in = 1 ;
/* Ensure each log dev is in vg_to */
for ( s = 0 ; s < log_seg - > area_count ; s + + )
log_in = log_in & &
_lv_is_in_vg ( vg_to ,
seg_lv ( log_seg , s ) ) ;
} else
log_in = _lv_is_in_vg ( vg_to , seg - > log_lv ) ;
}
2009-07-09 09:40:59 +04:00
2008-01-30 17:00:02 +03:00
if ( ( seg_in & & seg_in < seg - > area_count ) | |
( seg_in & & seg - > log_lv & & ! log_in ) | |
2007-01-30 02:01:18 +03:00
( ! seg_in & & seg - > log_lv & & log_in ) ) {
2007-05-15 17:01:41 +04:00
log_error ( " Can't split mirror %s between "
" two Volume Groups " , lv - > name ) ;
2007-01-30 02:01:18 +03:00
return 0 ;
}
2008-04-11 01:34:53 +04:00
if ( seg_in = = seg - > area_count & & log_in ) {
2016-07-12 17:15:32 +03:00
if ( ! _move_one_lv ( vg_from , vg_to , lvh , & lvht ) )
2008-04-15 18:57:12 +04:00
return_0 ;
2008-04-11 01:34:53 +04:00
}
2007-01-30 02:01:18 +03:00
}
return 1 ;
}
2016-07-05 16:39:57 +03:00
/*
* Check for any RAID LVs with allocations on PVs of @ vg_to .
*
* If these don ' t have any allocations on PVs of @ vg_from ,
* move their whole lv stack across to @ vg_to including the
* top - level RAID LV .
*/
static int _move_raids ( struct volume_group * vg_from ,
struct volume_group * vg_to )
2014-04-26 01:24:50 +04:00
{
struct dm_list * lvh , * lvht ;
struct logical_volume * lv ;
dm_list_iterate_safe ( lvh , lvht , & vg_from - > lvs ) {
lv = dm_list_item ( lvh , struct lv_list ) - > lv ;
if ( ! lv_is_raid ( lv ) )
continue ;
2016-07-05 16:39:57 +03:00
/* Ignore, if no allocations on PVs of @vg_to */
if ( ! lv_is_on_pvs ( lv , & vg_to - > pvs ) )
continue ;
2021-04-23 20:45:34 +03:00
2016-07-05 16:39:57 +03:00
/* If allocations are on PVs of @vg_to -> move RAID LV stack across */
2016-07-12 17:15:32 +03:00
if ( ! _move_one_lv ( vg_from , vg_to , lvh , & lvht ) )
2014-04-26 01:24:50 +04:00
return_0 ;
}
return 1 ;
}
2013-06-13 14:05:53 +04:00
static int _move_thins ( struct volume_group * vg_from ,
struct volume_group * vg_to )
{
struct dm_list * lvh , * lvht ;
struct logical_volume * lv , * data_lv ;
struct lv_segment * seg ;
dm_list_iterate_safe ( lvh , lvht , & vg_from - > lvs ) {
lv = dm_list_item ( lvh , struct lv_list ) - > lv ;
if ( lv_is_thin_volume ( lv ) ) {
seg = first_seg ( lv ) ;
data_lv = seg_lv ( first_seg ( seg - > pool_lv ) , 0 ) ;
2016-07-05 16:39:57 +03:00
/* Ignore, if no allocations on PVs of @vg_to */
2016-08-17 00:57:09 +03:00
if ( ! lv_is_on_pvs ( data_lv , & vg_to - > pvs ) & &
2016-07-28 19:34:46 +03:00
( seg - > external_lv & & ! lv_is_on_pvs ( seg - > external_lv , & vg_to - > pvs ) ) )
2016-07-05 16:39:57 +03:00
continue ;
2013-06-13 14:05:53 +04:00
if ( ( _lv_is_in_vg ( vg_to , data_lv ) | |
_lv_is_in_vg ( vg_to , seg - > external_lv ) ) ) {
2021-04-23 23:47:18 +03:00
if ( seg - > external_lv & &
( _lv_is_in_vg ( vg_from , seg - > external_lv ) | |
_lv_is_in_vg ( vg_from , data_lv ) ) ) {
2013-06-13 14:05:53 +04:00
log_error ( " Can't split external origin %s "
" and pool %s between two Volume Groups. " ,
2017-06-27 00:58:06 +03:00
display_lvname ( seg - > external_lv ) ,
display_lvname ( seg - > pool_lv ) ) ;
2013-06-13 14:05:53 +04:00
return 0 ;
}
2016-07-12 17:15:32 +03:00
if ( ! _move_one_lv ( vg_from , vg_to , lvh , & lvht ) )
2013-06-13 14:05:53 +04:00
return_0 ;
}
} else if ( lv_is_thin_pool ( lv ) ) {
seg = first_seg ( lv ) ;
data_lv = seg_lv ( seg , 0 ) ;
2016-07-05 16:39:57 +03:00
/* Ignore, if no allocations on PVs of @vg_to */
if ( ! lv_is_on_pvs ( data_lv , & vg_to - > pvs ) )
continue ;
2013-06-13 14:05:53 +04:00
if ( _lv_is_in_vg ( vg_to , data_lv ) | |
_lv_is_in_vg ( vg_to , seg - > metadata_lv ) ) {
if ( _lv_is_in_vg ( vg_from , seg - > metadata_lv ) | |
_lv_is_in_vg ( vg_from , data_lv ) ) {
log_error ( " Can't split pool data and metadata %s "
" between two Volume Groups. " ,
lv - > name ) ;
return 0 ;
}
2016-07-12 17:15:32 +03:00
if ( ! _move_one_lv ( vg_from , vg_to , lvh , & lvht ) )
2013-06-13 14:05:53 +04:00
return_0 ;
}
}
}
return 1 ;
}
2020-09-24 21:49:18 +03:00
static int _move_vdos ( struct volume_group * vg_from ,
struct volume_group * vg_to )
{
struct dm_list * lvh , * lvht ;
struct logical_volume * lv , * vdo_data_lv ;
struct lv_segment * seg ;
dm_list_iterate_safe ( lvh , lvht , & vg_from - > lvs ) {
lv = dm_list_item ( lvh , struct lv_list ) - > lv ;
if ( lv_is_vdo ( lv ) ) {
seg = first_seg ( lv ) ;
vdo_data_lv = seg_lv ( first_seg ( seg_lv ( seg , 0 ) ) , 0 ) ;
/* Ignore, if no allocations on PVs of @vg_to */
if ( ! lv_is_on_pvs ( vdo_data_lv , & vg_to - > pvs ) )
continue ;
if ( ! _move_one_lv ( vg_from , vg_to , lvh , & lvht ) )
return_0 ;
} else if ( lv_is_vdo_pool ( lv ) ) {
seg = first_seg ( lv ) ;
vdo_data_lv = seg_lv ( seg , 0 ) ;
/* Ignore, if no allocations on PVs of @vg_to */
if ( ! lv_is_on_pvs ( vdo_data_lv , & vg_to - > pvs ) )
continue ;
if ( ! _move_one_lv ( vg_from , vg_to , lvh , & lvht ) )
return_0 ;
}
}
return 1 ;
}
2014-02-25 02:51:02 +04:00
static int _move_cache ( struct volume_group * vg_from ,
struct volume_group * vg_to )
{
int is_moving ;
struct dm_list * lvh , * lvht ;
2020-02-04 00:33:58 +03:00
struct logical_volume * lv , * data = NULL , * meta = NULL , * orig = NULL , * fast = NULL ;
2014-02-25 02:51:02 +04:00
struct lv_segment * seg , * cache_seg ;
dm_list_iterate_safe ( lvh , lvht , & vg_from - > lvs ) {
lv = dm_list_item ( lvh , struct lv_list ) - > lv ;
seg = first_seg ( lv ) ;
2020-02-04 00:33:58 +03:00
if ( ! lv_is_cache ( lv ) & & ! lv_is_writecache ( lv ) & & ! lv_is_cache_pool ( lv ) & & ! lv_is_cache_vol ( lv ) )
2014-02-25 02:51:02 +04:00
continue ;
2019-02-01 23:29:22 +03:00
if ( lv_is_cache ( lv ) ) {
orig = seg_lv ( seg , 0 ) ;
seg = first_seg ( seg - > pool_lv ) ;
2020-02-04 00:33:58 +03:00
} else if ( lv_is_writecache ( lv ) ) {
orig = seg_lv ( seg , 0 ) ;
seg = first_seg ( seg - > writecache ) ;
} else if ( lv_is_cache_pool ( lv ) | | lv_is_cache_vol ( lv ) ) {
2019-02-01 23:29:22 +03:00
orig = NULL ;
if ( ! dm_list_empty ( & seg - > lv - > segs_using_this_lv ) ) {
if ( ! ( cache_seg = get_only_segment_using_this_lv ( seg - > lv ) ) )
return_0 ;
orig = seg_lv ( cache_seg , 0 ) ;
}
2014-02-25 02:51:02 +04:00
}
2020-02-04 00:33:58 +03:00
if ( lv_is_cache_vol ( lv ) ) {
fast = lv ;
} else {
data = seg_lv ( seg , 0 ) ;
meta = seg - > metadata_lv ;
}
2016-07-05 16:39:57 +03:00
2020-02-04 00:33:58 +03:00
if ( data & & meta ) {
if ( ( orig & & ! lv_is_on_pvs ( orig , & vg_to - > pvs ) ) & &
! lv_is_on_pvs ( data , & vg_to - > pvs ) & &
! lv_is_on_pvs ( meta , & vg_to - > pvs ) )
continue ;
}
2021-04-23 20:45:34 +03:00
2020-02-04 00:33:58 +03:00
if ( fast & & orig & &
! lv_is_on_pvs ( orig , & vg_to - > pvs ) & & ! lv_is_on_pvs ( fast , & vg_to - > pvs ) )
2016-07-05 16:39:57 +03:00
continue ;
2019-02-01 23:29:22 +03:00
/* Ensure all components are coming along */
2020-02-04 00:33:58 +03:00
if ( orig & & data & & meta ) {
2019-02-01 23:29:22 +03:00
is_moving = _lv_is_in_vg ( vg_to , orig ) ;
2014-02-25 02:51:02 +04:00
2019-02-01 23:29:22 +03:00
if ( _lv_is_in_vg ( vg_to , data ) ! = is_moving ) {
log_error ( " Cannot split cache origin %s and its cache pool data %s "
" into separate VGs. " ,
display_lvname ( orig ) , display_lvname ( data ) ) ;
return 0 ;
}
2014-02-25 02:51:02 +04:00
2019-02-01 23:29:22 +03:00
if ( _lv_is_in_vg ( vg_to , meta ) ! = is_moving ) {
log_error ( " Cannot split cache origin %s and its cache pool metadata %s "
" into separate VGs. " ,
display_lvname ( orig ) , display_lvname ( meta ) ) ;
return 0 ;
}
2020-02-04 00:33:58 +03:00
} else if ( data & & meta & & ( _lv_is_in_vg ( vg_to , data ) ! = _lv_is_in_vg ( vg_to , meta ) ) ) {
2019-02-01 23:29:22 +03:00
log_error ( " Cannot split cache pool data %s and its metadata %s "
" into separate VGs. " ,
display_lvname ( data ) , display_lvname ( meta ) ) ;
2014-02-25 02:51:02 +04:00
return 0 ;
2020-02-04 00:33:58 +03:00
} else if ( orig & & fast & & ( _lv_is_in_vg ( vg_to , orig ) ! = _lv_is_in_vg ( vg_to , fast ) ) ) {
log_error ( " Cannot split cache origin %s and its cachevol %s into separate VGs. " ,
display_lvname ( orig ) , display_lvname ( fast ) ) ;
return 0 ;
2014-02-25 02:51:02 +04:00
}
2019-02-01 23:29:22 +03:00
2016-07-12 17:15:32 +03:00
if ( ! _move_one_lv ( vg_from , vg_to , lvh , & lvht ) )
2014-02-25 02:51:02 +04:00
return_0 ;
}
return 1 ;
}
2008-01-22 05:48:53 +03:00
/*
* Has the user given an option related to a new vg as the split destination ?
*/
2017-10-18 17:57:46 +03:00
static int _new_vg_option_specified ( struct cmd_context * cmd )
2008-01-22 05:48:53 +03:00
{
2016-06-22 00:24:52 +03:00
return ( arg_is_set ( cmd , clustered_ARG ) | |
arg_is_set ( cmd , alloc_ARG ) | |
arg_is_set ( cmd , maxphysicalvolumes_ARG ) | |
arg_is_set ( cmd , maxlogicalvolumes_ARG ) | |
arg_is_set ( cmd , vgmetadatacopies_ARG ) ) ;
2008-01-22 05:48:53 +03:00
}
2002-05-31 23:30:51 +04:00
int vgsplit ( struct cmd_context * cmd , int argc , char * * argv )
{
2008-01-16 01:56:30 +03:00
struct vgcreate_params vp_new ;
struct vgcreate_params vp_def ;
2011-02-18 17:47:28 +03:00
const char * vg_name_from , * vg_name_to ;
2009-04-10 14:01:38 +04:00
struct volume_group * vg_to = NULL , * vg_from = NULL ;
2019-06-07 22:30:03 +03:00
struct lvmcache_vginfo * vginfo_to ;
2002-05-31 23:30:51 +04:00
int opt ;
2009-06-10 15:21:10 +04:00
int existing_vg = 0 ;
2009-04-10 14:01:38 +04:00
int r = ECMD_FAILED ;
2008-04-09 17:47:13 +04:00
const char * lv_name ;
2021-07-20 23:33:05 +03:00
int poolmetadataspare = arg_int_value ( cmd , poolmetadataspare_ARG , DEFAULT_POOL_METADATA_SPARE ) ;
2002-05-31 23:30:51 +04:00
2016-06-22 00:24:52 +03:00
if ( ( arg_is_set ( cmd , name_ARG ) + argc ) < 3 ) {
2008-04-09 17:47:13 +04:00
log_error ( " Existing VG, new VG and either physical volumes "
" or logical volume required. " ) ;
2002-05-31 23:30:51 +04:00
return EINVALID_CMD_LINE ;
}
2016-06-22 00:24:52 +03:00
if ( arg_is_set ( cmd , name_ARG ) & & ( argc > 2 ) ) {
2008-04-09 17:47:13 +04:00
log_error ( " A logical volume name cannot be given with "
" physical volumes. " ) ;
return ECMD_FAILED ;
}
locking: unify global lock for flock and lockd
There have been two file locks used to protect lvm
"global state": "ORPHANS" and "GLOBAL".
Commands that used the ORPHAN flock in exclusive mode:
pvcreate, pvremove, vgcreate, vgextend, vgremove,
vgcfgrestore
Commands that used the ORPHAN flock in shared mode:
vgimportclone, pvs, pvscan, pvresize, pvmove,
pvdisplay, pvchange, fullreport
Commands that used the GLOBAL flock in exclusive mode:
pvchange, pvscan, vgimportclone, vgscan
Commands that used the GLOBAL flock in shared mode:
pvscan --cache, pvs
The ORPHAN lock covers the important cases of serializing
the use of orphan PVs. It also partially covers the
reporting of orphan PVs (although not correctly as
explained below.)
The GLOBAL lock doesn't seem to have a clear purpose
(it may have eroded over time.)
Neither lock correctly protects the VG namespace, or
orphan PV properties.
To simplify and correct these issues, the two separate
flocks are combined into the one GLOBAL flock, and this flock
is used from the locking sites that are in place for the
lvmlockd global lock.
The logic behind the lvmlockd (distributed) global lock is
that any command that changes "global state" needs to take
the global lock in ex mode. Global state in lvm is: the list
of VG names, the set of orphan PVs, and any properties of
orphan PVs. Reading this global state can use the global lock
in sh mode to ensure it doesn't change while being reported.
The locking of global state now looks like:
lockd_global()
previously named lockd_gl(), acquires the distributed
global lock through lvmlockd. This is unchanged.
It serializes distributed lvm commands that are changing
global state. This is a no-op when lvmlockd is not in use.
lockf_global()
acquires an flock on a local file. It serializes local lvm
commands that are changing global state.
lock_global()
first calls lockf_global() to acquire the local flock for
global state, and if this succeeds, it calls lockd_global()
to acquire the distributed lock for global state.
Replace instances of lockd_gl() with lock_global(), so that the
existing sites for lvmlockd global state locking are now also
used for local file locking of global state. Remove the previous
file locking calls lock_vol(GLOBAL) and lock_vol(ORPHAN).
The following commands which change global state are now
serialized with the exclusive global flock:
pvchange (of orphan), pvresize (of orphan), pvcreate, pvremove,
vgcreate, vgextend, vgremove, vgreduce, vgrename,
vgcfgrestore, vgimportclone, vgmerge, vgsplit
Commands that use a shared flock to read global state (and will
be serialized against the prior list) are those that use
process_each functions that are based on processing a list of
all VG names, or all PVs. The list of all VGs or all PVs is
global state and the shared lock prevents those lists from
changing while the command is processing them.
The ORPHAN lock previously attempted to produce an accurate
listing of orphan PVs, but it was only acquired at the end of
the command during the fake vg_read of the fake orphan vg.
This is not when orphan PVs were determined; they were
determined by elimination beforehand by processing all real
VGs, and subtracting the PVs in the real VGs from the list
of all PVs that had been identified during the initial scan.
This is fixed by holding the single global lock in shared mode
while processing all VGs to determine the list of orphan PVs.
2019-04-18 23:01:19 +03:00
if ( ! lock_global ( cmd , " ex " ) )
2015-03-05 23:00:44 +03:00
return_ECMD_FAILED ;
2018-12-07 23:35:22 +03:00
clear_hint_file ( cmd ) ;
2016-06-22 00:24:52 +03:00
if ( arg_is_set ( cmd , name_ARG ) )
2008-04-09 17:47:13 +04:00
lv_name = arg_value ( cmd , name_ARG ) ;
else
lv_name = NULL ;
2007-03-10 00:25:33 +03:00
vg_name_from = skip_dev_dir ( cmd , argv [ 0 ] , NULL ) ;
vg_name_to = skip_dev_dir ( cmd , argv [ 1 ] , NULL ) ;
2002-05-31 23:30:51 +04:00
argc - = 2 ;
argv + = 2 ;
if ( ! strcmp ( vg_name_to , vg_name_from ) ) {
log_error ( " Duplicate volume group name \" %s \" " , vg_name_from ) ;
return ECMD_FAILED ;
}
2022-07-06 01:08:00 +03:00
if ( ! lvmcache_label_scan ( cmd ) )
return_ECMD_FAILED ;
2018-04-09 21:40:49 +03:00
2019-06-07 22:30:03 +03:00
if ( ! ( vginfo_to = lvmcache_vginfo_from_vgname ( vg_name_to , NULL ) ) ) {
if ( ! validate_name ( vg_name_to ) ) {
log_error ( " Invalid vg name %s. " , vg_name_to ) ;
return ECMD_FAILED ;
2009-09-03 01:27:22 +04:00
}
2013-07-01 13:27:22 +04:00
2019-06-07 22:30:03 +03:00
if ( ! lock_vol ( cmd , vg_name_to , LCK_VG_WRITE , NULL ) ) {
log_error ( " Failed to lock new VG name %s. " , vg_name_to ) ;
return ECMD_FAILED ;
2009-09-03 01:27:22 +04:00
}
Change vg_create() to take only minimal parameters and obtain a lock.
vg_t *vg_create(struct cmd_context *cmd, const char *vg_name);
This is the first step towards the API called to create a VG.
Call vg_lock_newname() inside this function. Use _vg_make_handle()
where possible.
Now we have 2 ways to construct a volume group:
1) vg_read: Used when constructing an existing VG from disks
2) vg_create: Used when constructing a new VG
Both of these interfaces obtain a lock, and return a vg_t *.
The usage of _vg_make_handle() inside vg_create() doesn't fit
perfectly but it's ok for now. Needs some cleanup though and I've
noted "FIXME" in the code.
Add the new vg_create() plus vg 'set' functions for non-default
VG parameters in the following tools:
- vgcreate: Fairly straightforward refactoring. We just moved
vg_lock_newname inside vg_create so we check the return via
vg_read_error.
- vgsplit: The refactoring here is a bit more tricky. Originally
we called vg_lock_newname and depending on the error code, we either
read the existing vg or created the new one. Now vg_create()
calls vg_lock_newname, so we first try to create the VG. If this
fails with FAILED_EXIST, we can then do the vg_read. If the
create succeeds, we check the input parameters and set any new
values on the VG.
TODO in future patches:
1. The VG_ORPHAN lock needs some thought. We may want to treat
this as any other VG, and require the application to obtain a handle
and pass it to other API calls (for example, vg_extend). Or,
we may find that hiding the VG_ORPHAN lock inside other APIs is
the way to go. I thought of placing the VG_ORPHAN lock inside
vg_create() and tying it to the vg handle, but was not certain
this was the right approach.
2. Cleanup error paths. Integrate vg_read_error() with vg_create and
vg_read* error codes and/or the new error APIs.
Signed-off-by: Dave Wysochanski <dwysocha@redhat.com>
2009-07-09 14:09:33 +04:00
2019-06-07 22:30:03 +03:00
if ( ! ( vg_to = vg_create ( cmd , vg_name_to ) ) ) {
log_error ( " Failed to create new VG %s. " , vg_name_to ) ;
unlock_vg ( cmd , NULL , vg_name_to ) ;
return ECMD_FAILED ;
}
} else {
if ( ! ( vg_to = vg_read_for_update ( cmd , vg_name_to , NULL , 0 , 0 ) ) ) {
log_error ( " Failed to read VG %s. " , vg_name_to ) ;
return ECMD_FAILED ;
2009-09-03 01:27:22 +04:00
}
2019-06-07 22:30:03 +03:00
existing_vg = 1 ;
2008-04-02 23:30:12 +04:00
}
2009-09-03 01:27:22 +04:00
2019-06-07 22:30:03 +03:00
if ( ! ( vg_from = vg_read_for_update ( cmd , vg_name_from , NULL , 0 , 0 ) ) ) {
log_error ( " Failed to read VG %s. " , vg_name_to ) ;
unlock_and_release_vg ( cmd , vg_to , vg_name_to ) ;
return ECMD_FAILED ;
}
cmd - > fmt = vg_from - > fid - > fmt ;
2009-09-03 01:25:44 +04:00
if ( existing_vg ) {
2017-10-18 17:57:46 +03:00
if ( _new_vg_option_specified ( cmd ) ) {
2019-06-07 22:30:03 +03:00
log_error ( " Volume group \" %s \" exists, but new VG option specified " , vg_name_to ) ;
2009-09-15 02:47:49 +04:00
goto bad ;
2008-01-22 05:48:53 +03:00
}
2019-06-07 22:30:03 +03:00
if ( ! vgs_are_compatible ( cmd , vg_from , vg_to ) )
2008-04-10 23:59:43 +04:00
goto_bad ;
2009-09-03 01:25:44 +04:00
} else {
2014-09-12 12:03:12 +04:00
if ( ! vgcreate_params_set_defaults ( cmd , & vp_def , vg_from ) ) {
r = EINVALID_CMD_LINE ;
goto_bad ;
}
2009-11-01 23:03:24 +03:00
vp_def . vg_name = vg_name_to ;
2012-10-16 12:07:27 +04:00
if ( ! vgcreate_params_set_from_args ( cmd , & vp_new , & vp_def ) ) {
2009-04-10 14:01:38 +04:00
r = EINVALID_CMD_LINE ;
2009-09-15 02:47:49 +04:00
goto_bad ;
2008-01-22 06:25:45 +03:00
}
2008-01-15 00:07:58 +03:00
2012-10-16 12:07:27 +04:00
if ( ! vgcreate_params_validate ( cmd , & vp_new ) ) {
2009-04-10 14:01:38 +04:00
r = EINVALID_CMD_LINE ;
2009-09-15 02:47:49 +04:00
goto_bad ;
2008-01-22 06:25:45 +03:00
}
2008-01-15 00:07:58 +03:00
Change vg_create() to take only minimal parameters and obtain a lock.
vg_t *vg_create(struct cmd_context *cmd, const char *vg_name);
This is the first step towards the API called to create a VG.
Call vg_lock_newname() inside this function. Use _vg_make_handle()
where possible.
Now we have 2 ways to construct a volume group:
1) vg_read: Used when constructing an existing VG from disks
2) vg_create: Used when constructing a new VG
Both of these interfaces obtain a lock, and return a vg_t *.
The usage of _vg_make_handle() inside vg_create() doesn't fit
perfectly but it's ok for now. Needs some cleanup though and I've
noted "FIXME" in the code.
Add the new vg_create() plus vg 'set' functions for non-default
VG parameters in the following tools:
- vgcreate: Fairly straightforward refactoring. We just moved
vg_lock_newname inside vg_create so we check the return via
vg_read_error.
- vgsplit: The refactoring here is a bit more tricky. Originally
we called vg_lock_newname and depending on the error code, we either
read the existing vg or created the new one. Now vg_create()
calls vg_lock_newname, so we first try to create the VG. If this
fails with FAILED_EXIST, we can then do the vg_read. If the
create succeeds, we check the input parameters and set any new
values on the VG.
TODO in future patches:
1. The VG_ORPHAN lock needs some thought. We may want to treat
this as any other VG, and require the application to obtain a handle
and pass it to other API calls (for example, vg_extend). Or,
we may find that hiding the VG_ORPHAN lock inside other APIs is
the way to go. I thought of placing the VG_ORPHAN lock inside
vg_create() and tying it to the vg handle, but was not certain
this was the right approach.
2. Cleanup error paths. Integrate vg_read_error() with vg_create and
vg_read* error codes and/or the new error APIs.
Signed-off-by: Dave Wysochanski <dwysocha@redhat.com>
2009-07-09 14:09:33 +04:00
if ( ! vg_set_extent_size ( vg_to , vp_new . extent_size ) | |
! vg_set_max_lv ( vg_to , vp_new . max_lv ) | |
! vg_set_max_pv ( vg_to , vp_new . max_pv ) | |
2009-10-31 20:43:57 +03:00
! vg_set_alloc_policy ( vg_to , vp_new . alloc ) | |
2015-02-24 02:41:38 +03:00
! vg_set_system_id ( vg_to , vp_new . system_id ) | |
2010-07-01 00:03:52 +04:00
! vg_set_mda_copies ( vg_to , vp_new . vgmetadatacopies ) )
2008-04-10 23:59:43 +04:00
goto_bad ;
2008-01-12 00:43:16 +03:00
}
2006-10-13 17:22:44 +04:00
2002-05-31 23:30:51 +04:00
/* Archive vg_from before changing it */
if ( ! archive ( vg_from ) )
2008-04-10 23:59:43 +04:00
goto_bad ;
2002-05-31 23:30:51 +04:00
/* Move PVs across to new structure */
for ( opt = 0 ; opt < argc ; opt + + ) {
2011-08-30 18:55:15 +04:00
dm_unescape_colons_and_at_signs ( argv [ opt ] , NULL , NULL ) ;
2009-07-14 06:15:21 +04:00
if ( ! move_pv ( vg_from , vg_to , argv [ opt ] ) )
2008-04-10 23:59:43 +04:00
goto_bad ;
2008-04-09 17:47:13 +04:00
}
2008-02-29 03:13:48 +03:00
2008-04-09 17:47:13 +04:00
/* If an LV given on the cmdline, move used_by PVs */
2009-07-14 06:15:21 +04:00
if ( lv_name & & ! move_pvs_used_by_lv ( vg_from , vg_to , lv_name ) )
2008-04-10 23:59:43 +04:00
goto_bad ;
2002-05-31 23:30:51 +04:00
2016-07-05 16:39:57 +03:00
/*
* First move any required RAID LVs across recursively .
* Reject if they get split between VGs .
*
* This moves the whole LV stack across , thus _move_lvs ( ) below
* ain ' t hit any of their MetaLVs / DataLVs any more but ' ll still
* work for all other type specific moves following it .
*/
if ( ! ( _move_raids ( vg_from , vg_to ) ) )
2008-04-10 23:59:43 +04:00
goto_bad ;
2002-05-31 23:30:51 +04:00
2016-07-05 16:39:57 +03:00
/* Move required sub LVs across, checking consistency */
if ( ! ( _move_lvs ( vg_from , vg_to ) ) )
2014-04-26 01:24:50 +04:00
goto_bad ;
2007-01-30 02:01:18 +03:00
/* Move required mirrors across */
if ( ! ( _move_mirrors ( vg_from , vg_to ) ) )
2008-04-10 23:59:43 +04:00
goto_bad ;
2009-10-26 13:01:56 +03:00
2013-06-13 14:05:53 +04:00
/* Move required pools across */
if ( ! ( _move_thins ( vg_from , vg_to ) ) )
goto_bad ;
2020-09-24 21:49:18 +03:00
/* Move required vdo pools across */
if ( ! ( _move_vdos ( vg_from , vg_to ) ) )
goto_bad ;
2016-07-06 01:08:14 +03:00
/* Move required cache LVs across */
2014-02-25 02:51:02 +04:00
if ( ! ( _move_cache ( vg_from , vg_to ) ) )
goto_bad ;
2016-07-06 01:08:14 +03:00
/* Move required snapshots across */
if ( ! ( _move_snapshots ( vg_from , vg_to ) ) )
goto_bad ;
2007-03-23 15:43:17 +03:00
/* Split metadata areas and check if both vgs have at least one area */
2007-06-28 21:59:34 +04:00
if ( ! ( vg_split_mdas ( cmd , vg_from , vg_to ) ) & & vg_from - > pv_count ) {
2007-03-23 15:43:17 +03:00
log_error ( " Cannot split: Nowhere to store metadata for new Volume Group " ) ;
2009-09-15 02:47:49 +04:00
goto bad ;
2007-03-23 15:43:17 +03:00
}
/* Set proper name for all PVs in new VG */
if ( ! vg_rename ( cmd , vg_to , vg_name_to ) )
2008-04-10 23:59:43 +04:00
goto_bad ;
2002-11-18 17:04:08 +03:00
2017-09-22 20:02:58 +03:00
/* Set old VG name so the metadata operations recognise that the PVs are in an existing VG */
vg_to - > old_name = vg_from - > name ;
2002-05-31 23:30:51 +04:00
/* store it on disks */
log_verbose ( " Writing out updated volume groups " ) ;
2008-04-02 23:30:12 +04:00
/*
* First , write out the new VG as EXPORTED . We do this first in case
* there is a crash - we will still have the new VG information , in an
2016-07-05 16:39:57 +03:00
* exported state . Recovery after this point would importing and removal
* of the new VG and redoing the vgsplit .
2008-04-02 23:30:12 +04:00
* FIXME : recover automatically or instruct the user ?
*/
2002-05-31 23:30:51 +04:00
vg_to - > status | = EXPORTED_VG ;
2021-07-20 23:33:05 +03:00
if ( ! handle_pool_metadata_spare ( vg_to , 0 , & vg_to - > pvs , poolmetadataspare ) )
goto_bad ;
if ( ! handle_pool_metadata_spare ( vg_from , 0 , & vg_from - > pvs , poolmetadataspare ) )
goto_bad ;
2002-05-31 23:30:51 +04:00
if ( ! archive ( vg_to ) )
2008-04-10 23:59:43 +04:00
goto_bad ;
2002-05-31 23:30:51 +04:00
2003-07-05 02:34:56 +04:00
if ( ! vg_write ( vg_to ) | | ! vg_commit ( vg_to ) )
2008-04-10 23:59:43 +04:00
goto_bad ;
2002-05-31 23:30:51 +04:00
backup ( vg_to ) ;
2008-04-02 23:30:12 +04:00
/*
* Next , write out the updated old VG . If we crash after this point ,
* recovery is a vgimport on the new VG .
2008-04-10 23:59:43 +04:00
* FIXME : recover automatically or instruct the user ?
2008-04-02 23:30:12 +04:00
*/
2007-06-28 21:59:34 +04:00
if ( vg_from - > pv_count ) {
if ( ! vg_write ( vg_from ) | | ! vg_commit ( vg_from ) )
2008-04-10 23:59:43 +04:00
goto_bad ;
2002-05-31 23:30:51 +04:00
2007-06-28 21:59:34 +04:00
backup ( vg_from ) ;
}
2002-05-31 23:30:51 +04:00
vg_to - > status & = ~ EXPORTED_VG ;
2003-09-15 19:03:54 +04:00
if ( ! vg_write ( vg_to ) | | ! vg_commit ( vg_to ) )
2008-04-10 23:59:43 +04:00
goto_bad ;
2002-05-31 23:30:51 +04:00
backup ( vg_to ) ;
config: add silent mode
Accept -q as the short form of --quiet.
Suppress non-essential standard output if -q is given twice.
Treat log/silent in lvm.conf as equivalent to -qq.
Review all log_print messages and change some to
log_print_unless_silent.
When silent, the following commands still produce output:
dumpconfig, lvdisplay, lvmdiskscan, lvs, pvck, pvdisplay,
pvs, version, vgcfgrestore -l, vgdisplay, vgs.
[Needs checking.]
Non-essential messages are shifted from log level 4 to log level 5
for syslog and lvm2_log_fn purposes.
2012-08-25 23:35:48 +04:00
log_print_unless_silent ( " %s volume group \" %s \" successfully split from \" %s \" " ,
existing_vg ? " Existing " : " New " ,
vg_to - > name , vg_from - > name ) ;
2002-05-31 23:30:51 +04:00
2009-04-10 14:01:38 +04:00
r = ECMD_PROCESSED ;
bad :
2011-03-30 18:35:00 +04:00
/*
2011-04-29 04:21:13 +04:00
* vg_to references elements moved from vg_from
* so vg_to has to be freed first .
2011-03-30 18:35:00 +04:00
*/
2011-08-11 00:25:29 +04:00
unlock_and_release_vg ( cmd , vg_to , vg_name_to ) ;
unlock_and_release_vg ( cmd , vg_from , vg_name_from ) ;
2011-03-30 18:35:00 +04:00
2009-04-10 14:01:38 +04:00
return r ;
2002-05-31 23:30:51 +04:00
}