2009-07-14 07:02:14 +04:00
/*
2013-07-13 00:40:10 +04:00
* Copyright ( C ) 2008 - 2013 Red Hat , Inc . All rights reserved .
2009-07-14 07:02:14 +04:00
*
* This file is part of LVM2 .
*
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v .2 .1 .
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program ; if not , write to the Free Software Foundation ,
2016-01-21 13:49:46 +03:00
* Inc . , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA
2009-07-14 07:02:14 +04:00
*/
# include "lib.h"
# include "toolcontext.h"
2010-07-09 20:57:34 +04:00
# include "metadata.h"
2009-07-14 07:02:14 +04:00
# include "archiver.h"
# include "locking.h"
2009-07-24 16:48:21 +04:00
# include "lvmcache.h"
2013-07-11 10:01:04 +04:00
# include "lvmetad.h"
2010-02-24 21:16:18 +03:00
# include "lvm_misc.h"
2010-12-15 02:20:58 +03:00
# include "lvm2app.h"
2013-09-26 20:37:40 +04:00
# include "display.h"
lvmetad: two phase vg_update
Previously, a command sent lvmetad new VG metadata in vg_commit().
In vg_commit(), devices are suspended, so any memory allocation
done by the command while sending to lvmetad, or by lvmetad while
updating its cache could deadlock if memory reclaim was triggered.
Now lvmetad is updated in unlock_vg(), after devices are resumed.
The new method for updating VG metadata in lvmetad is in two phases:
1. In vg_write(), before devices are suspended, the command sends
lvmetad a short message ("set_vg_info") telling it what the new
VG seqno will be. lvmetad sees that the seqno is newer than
the seqno of its cached VG, so it sets the INVALID flag for the
cached VG. If sending the message to lvmetad fails, the command
fails before the metadata is committed and the change is not made.
If sending the message succeeds, vg_commit() is called.
2. In unlock_vg(), after devices are resumed, the command sends
lvmetad the standard vg_update message with the new metadata.
lvmetad sees that the seqno in the new metadata matches the
seqno it saved from set_vg_info, and knows it has the latest
copy, so it clears the INVALID flag for the cached VG.
If a command fails between 1 and 2 (after committing the VG on disk,
but before sending lvmetad the new metadata), the cached VG retains
the INVALID flag in lvmetad. A subsequent command will read the
cached VG from lvmetad, see the INVALID flag, ignore the cached
copy, read the VG from disk instead, update the lvmetad copy
with the latest copy from disk, (this clears the INVALID flag
in lvmetad), and use the correct VG metadata for the command.
(This INVALID mechanism already existed for use by lvmlockd.)
2016-06-08 22:42:03 +03:00
# include "lvmetad.h"
2009-07-26 05:54:40 +04:00
2010-02-24 21:16:18 +03:00
int lvm_vg_add_tag ( vg_t vg , const char * tag )
{
2013-12-18 02:51:11 +04:00
int rc = - 1 ;
struct saved_env e = store_user_env ( vg - > cmd ) ;
2010-02-24 21:16:18 +03:00
2013-12-18 02:51:11 +04:00
if ( ! vg_read_error ( vg ) & & vg_check_write_mode ( vg ) & &
vg_change_tag ( vg , tag , 1 ) )
rc = 0 ;
restore_user_env ( & e ) ;
return rc ;
2010-02-24 21:16:18 +03:00
}
int lvm_vg_remove_tag ( vg_t vg , const char * tag )
{
2013-12-18 02:51:11 +04:00
int rc = - 1 ;
struct saved_env e = store_user_env ( vg - > cmd ) ;
2010-02-24 21:16:18 +03:00
2013-12-18 02:51:11 +04:00
if ( ! vg_read_error ( vg ) & & vg_check_write_mode ( vg ) & &
vg_change_tag ( vg , tag , 0 ) )
rc = 0 ;
restore_user_env ( & e ) ;
return rc ;
2010-02-24 21:16:18 +03:00
}
2009-08-13 16:16:45 +04:00
vg_t lvm_vg_create ( lvm_t libh , const char * vg_name )
2009-07-14 07:02:14 +04:00
{
2013-12-18 02:51:11 +04:00
struct volume_group * vg = NULL ;
struct saved_env e = store_user_env ( ( struct cmd_context * ) libh ) ;
2009-07-23 05:20:22 +04:00
2016-01-26 20:34:59 +03:00
vg = vg_lock_and_create ( ( struct cmd_context * ) libh , vg_name ) ;
2009-07-23 05:20:22 +04:00
/* FIXME: error handling is still TBD */
if ( vg_read_error ( vg ) ) {
2011-08-11 00:25:29 +04:00
release_vg ( vg ) ;
2013-12-18 02:51:11 +04:00
vg = NULL ;
} else {
vg - > open_mode = ' w ' ;
2009-07-23 05:20:22 +04:00
}
2013-12-18 02:51:11 +04:00
restore_user_env ( & e ) ;
2009-08-13 16:16:45 +04:00
return ( vg_t ) vg ;
2009-07-14 07:02:14 +04:00
}
2013-12-18 02:51:11 +04:00
static int _lvm_vg_extend ( vg_t vg , const char * device )
2009-07-14 07:02:14 +04:00
{
2009-10-06 00:03:08 +04:00
struct pvcreate_params pp ;
2009-07-14 07:02:14 +04:00
if ( vg_read_error ( vg ) )
2009-07-27 00:28:59 +04:00
return - 1 ;
2009-07-14 07:02:14 +04:00
2009-07-28 19:14:56 +04:00
if ( ! vg_check_write_mode ( vg ) )
return - 1 ;
2013-03-18 00:29:58 +04:00
if ( ! lock_vol ( vg - > cmd , VG_ORPHANS , LCK_VG_WRITE , NULL ) ) {
2009-07-24 19:12:50 +04:00
log_error ( " Can't get lock for orphan PVs " ) ;
2009-07-27 00:28:59 +04:00
return - 1 ;
2009-07-24 19:12:50 +04:00
}
2009-11-01 22:51:54 +03:00
pvcreate_params_set_defaults ( & pp ) ;
2011-02-18 17:47:28 +03:00
if ( ! vg_extend ( vg , 1 , & device , & pp ) ) {
lvmetad: two phase vg_update
Previously, a command sent lvmetad new VG metadata in vg_commit().
In vg_commit(), devices are suspended, so any memory allocation
done by the command while sending to lvmetad, or by lvmetad while
updating its cache could deadlock if memory reclaim was triggered.
Now lvmetad is updated in unlock_vg(), after devices are resumed.
The new method for updating VG metadata in lvmetad is in two phases:
1. In vg_write(), before devices are suspended, the command sends
lvmetad a short message ("set_vg_info") telling it what the new
VG seqno will be. lvmetad sees that the seqno is newer than
the seqno of its cached VG, so it sets the INVALID flag for the
cached VG. If sending the message to lvmetad fails, the command
fails before the metadata is committed and the change is not made.
If sending the message succeeds, vg_commit() is called.
2. In unlock_vg(), after devices are resumed, the command sends
lvmetad the standard vg_update message with the new metadata.
lvmetad sees that the seqno in the new metadata matches the
seqno it saved from set_vg_info, and knows it has the latest
copy, so it clears the INVALID flag for the cached VG.
If a command fails between 1 and 2 (after committing the VG on disk,
but before sending lvmetad the new metadata), the cached VG retains
the INVALID flag in lvmetad. A subsequent command will read the
cached VG from lvmetad, see the INVALID flag, ignore the cached
copy, read the VG from disk instead, update the lvmetad copy
with the latest copy from disk, (this clears the INVALID flag
in lvmetad), and use the correct VG metadata for the command.
(This INVALID mechanism already existed for use by lvmlockd.)
2016-06-08 22:42:03 +03:00
unlock_vg ( vg - > cmd , NULL , VG_ORPHANS ) ;
2009-07-27 00:28:59 +04:00
return - 1 ;
2009-07-24 19:12:50 +04:00
}
/*
* FIXME : Either commit to disk , or keep holding VG_ORPHANS and
* release in lvm_vg_close ( ) .
*/
lvmetad: two phase vg_update
Previously, a command sent lvmetad new VG metadata in vg_commit().
In vg_commit(), devices are suspended, so any memory allocation
done by the command while sending to lvmetad, or by lvmetad while
updating its cache could deadlock if memory reclaim was triggered.
Now lvmetad is updated in unlock_vg(), after devices are resumed.
The new method for updating VG metadata in lvmetad is in two phases:
1. In vg_write(), before devices are suspended, the command sends
lvmetad a short message ("set_vg_info") telling it what the new
VG seqno will be. lvmetad sees that the seqno is newer than
the seqno of its cached VG, so it sets the INVALID flag for the
cached VG. If sending the message to lvmetad fails, the command
fails before the metadata is committed and the change is not made.
If sending the message succeeds, vg_commit() is called.
2. In unlock_vg(), after devices are resumed, the command sends
lvmetad the standard vg_update message with the new metadata.
lvmetad sees that the seqno in the new metadata matches the
seqno it saved from set_vg_info, and knows it has the latest
copy, so it clears the INVALID flag for the cached VG.
If a command fails between 1 and 2 (after committing the VG on disk,
but before sending lvmetad the new metadata), the cached VG retains
the INVALID flag in lvmetad. A subsequent command will read the
cached VG from lvmetad, see the INVALID flag, ignore the cached
copy, read the VG from disk instead, update the lvmetad copy
with the latest copy from disk, (this clears the INVALID flag
in lvmetad), and use the correct VG metadata for the command.
(This INVALID mechanism already existed for use by lvmlockd.)
2016-06-08 22:42:03 +03:00
unlock_vg ( vg - > cmd , NULL , VG_ORPHANS ) ;
2009-07-27 00:28:59 +04:00
return 0 ;
2009-07-14 07:02:14 +04:00
}
2013-12-18 02:51:11 +04:00
int lvm_vg_extend ( vg_t vg , const char * device )
{
int rc = 0 ;
struct saved_env e = store_user_env ( vg - > cmd ) ;
rc = _lvm_vg_extend ( vg , device ) ;
restore_user_env ( & e ) ;
return rc ;
}
2009-08-13 16:16:45 +04:00
int lvm_vg_reduce ( vg_t vg , const char * device )
2009-07-27 21:44:29 +04:00
{
2013-12-18 02:51:11 +04:00
int rc = - 1 ;
struct saved_env e = store_user_env ( vg - > cmd ) ;
2009-07-27 21:44:29 +04:00
2013-12-18 02:51:11 +04:00
if ( ! vg_read_error ( vg ) & & vg_check_write_mode ( vg ) & & vg_reduce ( vg , device ) )
rc = 0 ;
restore_user_env ( & e ) ;
return rc ;
2009-07-27 21:44:29 +04:00
}
2009-08-13 16:16:45 +04:00
int lvm_vg_set_extent_size ( vg_t vg , uint32_t new_size )
2009-07-14 07:02:14 +04:00
{
2013-12-18 02:51:11 +04:00
int rc = - 1 ;
struct saved_env e = store_user_env ( vg - > cmd ) ;
2009-07-14 07:02:14 +04:00
2013-12-18 02:51:11 +04:00
if ( ! vg_read_error ( vg ) & & vg_check_write_mode ( vg ) & &
vg_set_extent_size ( vg , new_size / SECTOR_SIZE ) )
rc = 0 ;
restore_user_env ( & e ) ;
return rc ;
2009-07-14 07:02:14 +04:00
}
2013-12-18 02:51:11 +04:00
static int _lvm_vg_write ( vg_t vg )
2009-07-14 07:02:14 +04:00
{
2009-07-27 21:44:29 +04:00
struct pv_list * pvl ;
2009-07-14 07:02:14 +04:00
if ( vg_read_error ( vg ) )
2009-07-27 00:28:59 +04:00
return - 1 ;
2009-07-28 19:14:56 +04:00
if ( ! vg_check_write_mode ( vg ) )
return - 1 ;
2009-07-14 07:02:14 +04:00
2009-07-27 21:44:29 +04:00
if ( dm_list_empty ( & vg - > pvs ) ) {
2009-09-03 01:40:10 +04:00
if ( ! vg_remove ( vg ) )
return - 1 ;
return 0 ;
2009-07-27 21:44:29 +04:00
}
if ( ! dm_list_empty ( & vg - > removed_pvs ) ) {
2013-03-18 00:29:58 +04:00
if ( ! lock_vol ( vg - > cmd , VG_ORPHANS , LCK_VG_WRITE , NULL ) ) {
2009-07-27 21:44:29 +04:00
log_error ( " Can't get lock for orphan PVs " ) ;
return 0 ;
}
}
2009-07-27 00:28:59 +04:00
if ( ! archive ( vg ) )
return - 1 ;
2009-07-14 07:02:14 +04:00
/* Store VG on disk(s) */
2009-07-27 00:28:59 +04:00
if ( ! vg_write ( vg ) | | ! vg_commit ( vg ) )
return - 1 ;
2009-07-27 21:44:29 +04:00
if ( ! dm_list_empty ( & vg - > removed_pvs ) ) {
dm_list_iterate_items ( pvl , & vg - > removed_pvs ) {
pv_write_orphan ( vg - > cmd , pvl - > pv ) ;
2011-04-01 17:44:51 +04:00
pv_set_fid ( pvl - > pv , NULL ) ;
2009-07-27 21:44:29 +04:00
/* FIXME: do pvremove / label_remove()? */
}
dm_list_init ( & vg - > removed_pvs ) ;
lvmetad: two phase vg_update
Previously, a command sent lvmetad new VG metadata in vg_commit().
In vg_commit(), devices are suspended, so any memory allocation
done by the command while sending to lvmetad, or by lvmetad while
updating its cache could deadlock if memory reclaim was triggered.
Now lvmetad is updated in unlock_vg(), after devices are resumed.
The new method for updating VG metadata in lvmetad is in two phases:
1. In vg_write(), before devices are suspended, the command sends
lvmetad a short message ("set_vg_info") telling it what the new
VG seqno will be. lvmetad sees that the seqno is newer than
the seqno of its cached VG, so it sets the INVALID flag for the
cached VG. If sending the message to lvmetad fails, the command
fails before the metadata is committed and the change is not made.
If sending the message succeeds, vg_commit() is called.
2. In unlock_vg(), after devices are resumed, the command sends
lvmetad the standard vg_update message with the new metadata.
lvmetad sees that the seqno in the new metadata matches the
seqno it saved from set_vg_info, and knows it has the latest
copy, so it clears the INVALID flag for the cached VG.
If a command fails between 1 and 2 (after committing the VG on disk,
but before sending lvmetad the new metadata), the cached VG retains
the INVALID flag in lvmetad. A subsequent command will read the
cached VG from lvmetad, see the INVALID flag, ignore the cached
copy, read the VG from disk instead, update the lvmetad copy
with the latest copy from disk, (this clears the INVALID flag
in lvmetad), and use the correct VG metadata for the command.
(This INVALID mechanism already existed for use by lvmlockd.)
2016-06-08 22:42:03 +03:00
unlock_vg ( vg - > cmd , NULL , VG_ORPHANS ) ;
2009-07-27 21:44:29 +04:00
}
2009-07-14 07:02:14 +04:00
return 0 ;
}
2013-12-18 02:51:11 +04:00
int lvm_vg_write ( vg_t vg )
{
int rc ;
struct saved_env e = store_user_env ( vg - > cmd ) ;
rc = _lvm_vg_write ( vg ) ;
restore_user_env ( & e ) ;
return rc ;
}
2009-08-13 16:16:45 +04:00
int lvm_vg_close ( vg_t vg )
2009-07-14 07:02:14 +04:00
{
2013-12-18 02:51:11 +04:00
struct saved_env e = store_user_env ( vg - > cmd ) ;
2009-07-22 07:13:35 +04:00
if ( vg_read_error ( vg ) = = FAILED_LOCKING )
2011-08-11 00:25:29 +04:00
release_vg ( vg ) ;
2009-07-22 07:13:35 +04:00
else
2011-08-11 00:25:29 +04:00
unlock_and_release_vg ( vg - > cmd , vg , vg - > name ) ;
2013-12-18 02:51:11 +04:00
restore_user_env ( & e ) ;
2009-07-27 00:28:59 +04:00
return 0 ;
2009-07-14 07:02:14 +04:00
}
2009-08-13 16:16:45 +04:00
int lvm_vg_remove ( vg_t vg )
2009-07-14 07:02:14 +04:00
{
2013-12-18 02:51:11 +04:00
int rc = - 1 ;
struct saved_env e = store_user_env ( vg - > cmd ) ;
2009-07-14 07:02:14 +04:00
2013-12-18 02:51:11 +04:00
if ( ! vg_read_error ( vg ) & & vg_check_write_mode ( vg ) & & vg_remove_check ( vg ) ) {
vg_remove_pvs ( vg ) ;
rc = 0 ;
}
2010-06-30 22:03:52 +04:00
2013-12-18 02:51:11 +04:00
restore_user_env ( & e ) ;
return rc ;
2009-07-14 07:02:14 +04:00
}
2009-07-23 02:24:16 +04:00
2013-12-18 02:51:11 +04:00
static vg_t _lvm_vg_open ( lvm_t libh , const char * vgname , const char * mode ,
2009-07-23 02:24:16 +04:00
uint32_t flags )
{
uint32_t internal_flags = 0 ;
2009-08-13 16:16:45 +04:00
struct volume_group * vg ;
2009-07-23 02:24:16 +04:00
if ( ! strncmp ( mode , " w " , 1 ) )
internal_flags | = READ_FOR_UPDATE ;
else if ( strncmp ( mode , " r " , 1 ) ) {
log_errno ( EINVAL , " Invalid VG open mode " ) ;
return NULL ;
}
2015-03-05 23:00:44 +03:00
vg = vg_read ( ( struct cmd_context * ) libh , vgname , NULL , internal_flags , 0 ) ;
2009-07-23 02:24:16 +04:00
if ( vg_read_error ( vg ) ) {
/* FIXME: use log_errno either here in inside vg_read */
2011-08-11 00:25:29 +04:00
release_vg ( vg ) ;
2009-07-23 02:24:16 +04:00
return NULL ;
}
2009-07-28 19:14:56 +04:00
/* FIXME: combine this with locking ? */
vg - > open_mode = mode [ 0 ] ;
2009-07-23 02:24:16 +04:00
2009-08-13 16:16:45 +04:00
return ( vg_t ) vg ;
2009-07-23 02:24:16 +04:00
}
2009-07-24 03:39:02 +04:00
2013-12-18 02:51:11 +04:00
vg_t lvm_vg_open ( lvm_t libh , const char * vgname , const char * mode ,
uint32_t flags )
{
vg_t rc ;
struct saved_env e = store_user_env ( ( struct cmd_context * ) libh ) ;
rc = _lvm_vg_open ( libh , vgname , mode , flags ) ;
restore_user_env ( & e ) ;
return rc ;
}
static struct dm_list * _lvm_vg_list_pvs ( vg_t vg )
2009-07-24 03:39:02 +04:00
{
struct dm_list * list ;
pv_list_t * pvs ;
struct pv_list * pvl ;
if ( dm_list_empty ( & vg - > pvs ) )
return NULL ;
if ( ! ( list = dm_pool_zalloc ( vg - > vgmem , sizeof ( * list ) ) ) ) {
2009-07-28 01:03:15 +04:00
log_errno ( ENOMEM , " Memory allocation fail for dm_list. " ) ;
2009-07-24 03:39:02 +04:00
return NULL ;
}
dm_list_init ( list ) ;
dm_list_iterate_items ( pvl , & vg - > pvs ) {
if ( ! ( pvs = dm_pool_zalloc ( vg - > vgmem , sizeof ( * pvs ) ) ) ) {
2009-07-27 00:29:28 +04:00
log_errno ( ENOMEM ,
2009-07-28 01:03:15 +04:00
" Memory allocation fail for lvm_pv_list. " ) ;
2009-07-24 03:39:02 +04:00
return NULL ;
}
pvs - > pv = pvl - > pv ;
dm_list_add ( list , & pvs - > list ) ;
}
return list ;
}
2013-12-18 02:51:11 +04:00
struct dm_list * lvm_vg_list_pvs ( vg_t vg )
{
struct dm_list * rc ;
struct saved_env e = store_user_env ( vg - > cmd ) ;
rc = _lvm_vg_list_pvs ( vg ) ;
restore_user_env ( & e ) ;
return rc ;
}
static struct dm_list * _lvm_vg_list_lvs ( vg_t vg )
2009-07-24 03:39:02 +04:00
{
struct dm_list * list ;
lv_list_t * lvs ;
struct lv_list * lvl ;
if ( dm_list_empty ( & vg - > lvs ) )
return NULL ;
if ( ! ( list = dm_pool_zalloc ( vg - > vgmem , sizeof ( * list ) ) ) ) {
2009-07-28 01:03:15 +04:00
log_errno ( ENOMEM , " Memory allocation fail for dm_list. " ) ;
2009-07-24 03:39:02 +04:00
return NULL ;
}
dm_list_init ( list ) ;
dm_list_iterate_items ( lvl , & vg - > lvs ) {
2013-07-24 04:49:18 +04:00
if ( ! ( lvs = dm_pool_zalloc ( vg - > vgmem , sizeof ( * lvs ) ) ) ) {
log_errno ( ENOMEM ,
" Memory allocation fail for lvm_lv_list. " ) ;
return NULL ;
2009-07-24 03:39:02 +04:00
}
2013-07-24 04:49:18 +04:00
lvs - > lv = lvl - > lv ;
dm_list_add ( list , & lvs - > list ) ;
2009-07-24 03:39:02 +04:00
}
return list ;
}
2009-07-24 03:40:05 +04:00
2013-12-18 02:51:11 +04:00
struct dm_list * lvm_vg_list_lvs ( vg_t vg )
{
struct dm_list * rc ;
struct saved_env e = store_user_env ( vg - > cmd ) ;
rc = _lvm_vg_list_lvs ( vg ) ;
restore_user_env ( & e ) ;
return rc ;
}
2010-02-24 21:16:18 +03:00
struct dm_list * lvm_vg_get_tags ( const vg_t vg )
{
2013-12-18 02:51:11 +04:00
struct dm_list * rc ;
struct saved_env e = store_user_env ( vg - > cmd ) ;
rc = tag_list_copy ( vg - > vgmem , & vg - > tags ) ;
restore_user_env ( & e ) ;
return rc ;
2010-02-24 21:16:18 +03:00
}
2009-08-13 16:16:45 +04:00
uint64_t lvm_vg_get_seqno ( const vg_t vg )
2009-07-28 17:17:04 +04:00
{
2013-12-18 02:51:11 +04:00
uint64_t rc ;
struct saved_env e = store_user_env ( vg - > cmd ) ;
rc = vg_seqno ( vg ) ;
restore_user_env ( & e ) ;
return rc ;
2009-07-28 17:17:04 +04:00
}
2009-09-14 23:43:11 +04:00
uint64_t lvm_vg_is_clustered ( const vg_t vg )
{
2013-12-18 02:51:11 +04:00
uint64_t rc ;
struct saved_env e = store_user_env ( vg - > cmd ) ;
rc = vg_is_clustered ( vg ) ;
restore_user_env ( & e ) ;
return rc ;
2009-09-14 23:43:11 +04:00
}
uint64_t lvm_vg_is_exported ( const vg_t vg )
{
2013-12-18 02:51:11 +04:00
uint64_t rc ;
struct saved_env e = store_user_env ( vg - > cmd ) ;
rc = vg_is_exported ( vg ) ;
restore_user_env ( & e ) ;
return rc ;
2009-09-14 23:43:11 +04:00
}
uint64_t lvm_vg_is_partial ( const vg_t vg )
{
2013-12-18 02:51:11 +04:00
uint64_t rc ;
struct saved_env e = store_user_env ( vg - > cmd ) ;
rc = ( vg_missing_pv_count ( vg ) ! = 0 ) ;
restore_user_env ( & e ) ;
return rc ;
2009-09-14 23:43:11 +04:00
}
2009-07-26 17:06:59 +04:00
/* FIXME: invalid handle? return INTMAX? */
2009-08-13 16:16:45 +04:00
uint64_t lvm_vg_get_size ( const vg_t vg )
2009-07-26 17:06:59 +04:00
{
2013-12-18 02:51:11 +04:00
uint64_t rc ;
struct saved_env e = store_user_env ( vg - > cmd ) ;
rc = SECTOR_SIZE * vg_size ( vg ) ;
restore_user_env ( & e ) ;
return rc ;
2009-07-26 17:06:59 +04:00
}
2009-08-13 16:16:45 +04:00
uint64_t lvm_vg_get_free_size ( const vg_t vg )
2009-07-26 17:06:59 +04:00
{
2013-12-18 02:51:11 +04:00
uint64_t rc ;
struct saved_env e = store_user_env ( vg - > cmd ) ;
rc = SECTOR_SIZE * vg_free ( vg ) ;
restore_user_env ( & e ) ;
return rc ;
2009-07-26 17:06:59 +04:00
}
2009-08-13 16:16:45 +04:00
uint64_t lvm_vg_get_extent_size ( const vg_t vg )
2009-07-26 17:06:59 +04:00
{
2013-12-18 02:51:11 +04:00
uint64_t rc ;
struct saved_env e = store_user_env ( vg - > cmd ) ;
rc = SECTOR_SIZE * vg_extent_size ( vg ) ;
restore_user_env ( & e ) ;
return rc ;
2009-07-26 17:06:59 +04:00
}
2009-08-13 16:16:45 +04:00
uint64_t lvm_vg_get_extent_count ( const vg_t vg )
2009-07-26 17:06:59 +04:00
{
2013-12-18 02:51:11 +04:00
uint64_t rc ;
struct saved_env e = store_user_env ( vg - > cmd ) ;
rc = vg_extent_count ( vg ) ;
restore_user_env ( & e ) ;
return rc ;
2009-07-26 17:06:59 +04:00
}
2009-08-13 16:16:45 +04:00
uint64_t lvm_vg_get_free_extent_count ( const vg_t vg )
2009-07-26 17:06:59 +04:00
{
2013-12-18 02:51:11 +04:00
uint64_t rc ;
struct saved_env e = store_user_env ( vg - > cmd ) ;
rc = vg_free_count ( vg ) ;
restore_user_env ( & e ) ;
return rc ;
2009-07-26 17:06:59 +04:00
}
2009-08-13 16:16:45 +04:00
uint64_t lvm_vg_get_pv_count ( const vg_t vg )
2009-07-26 17:06:59 +04:00
{
2013-12-18 02:51:11 +04:00
uint64_t rc ;
struct saved_env e = store_user_env ( vg - > cmd ) ;
rc = vg_pv_count ( vg ) ;
restore_user_env ( & e ) ;
return rc ;
2009-09-14 19:45:23 +04:00
}
uint64_t lvm_vg_get_max_pv ( const vg_t vg )
{
2013-12-18 02:51:11 +04:00
uint64_t rc ;
struct saved_env e = store_user_env ( vg - > cmd ) ;
rc = vg_max_pv ( vg ) ;
restore_user_env ( & e ) ;
return rc ;
2009-09-14 19:45:23 +04:00
}
uint64_t lvm_vg_get_max_lv ( const vg_t vg )
{
2013-12-18 02:51:11 +04:00
uint64_t rc ;
struct saved_env e = store_user_env ( vg - > cmd ) ;
rc = vg_max_lv ( vg ) ;
restore_user_env ( & e ) ;
return rc ;
2009-07-26 17:06:59 +04:00
}
2010-04-19 19:22:24 +04:00
const char * lvm_vg_get_uuid ( const vg_t vg )
2009-07-24 03:40:05 +04:00
{
2013-12-18 02:51:11 +04:00
const char * rc ;
struct saved_env e = store_user_env ( vg - > cmd ) ;
rc = vg_uuid_dup ( vg ) ;
restore_user_env ( & e ) ;
return rc ;
2009-07-24 03:40:05 +04:00
}
2010-04-19 19:22:24 +04:00
const char * lvm_vg_get_name ( const vg_t vg )
2009-07-24 03:40:05 +04:00
{
2013-12-18 02:51:11 +04:00
const char * rc ;
struct saved_env e = store_user_env ( vg - > cmd ) ;
rc = dm_pool_strndup ( vg - > vgmem , ( const char * ) vg - > name , NAME_LEN + 1 ) ;
restore_user_env ( & e ) ;
return rc ;
2009-07-24 03:40:05 +04:00
}
2009-07-24 16:47:15 +04:00
2010-10-25 18:08:43 +04:00
struct lvm_property_value lvm_vg_get_property ( const vg_t vg , const char * name )
{
2013-12-18 02:51:11 +04:00
struct lvm_property_value rc ;
struct saved_env e = store_user_env ( vg - > cmd ) ;
rc = get_property ( NULL , vg , NULL , NULL , NULL , NULL , NULL , name ) ;
restore_user_env ( & e ) ;
return rc ;
2010-10-25 18:08:43 +04:00
}
2010-11-17 22:16:05 +03:00
int lvm_vg_set_property ( const vg_t vg , const char * name ,
struct lvm_property_value * value )
{
2013-06-18 23:13:27 +04:00
/* At this point it is unknown if all property set paths make the
* appropriate copy of the string . We will allocate a copy on the vg so
* that worst case we have two copies which will get freed when the vg gets
* released .
*/
2013-12-18 02:51:11 +04:00
int rc ;
struct saved_env e = store_user_env ( vg - > cmd ) ;
2013-06-18 23:13:27 +04:00
if ( value - > is_valid & & value - > is_string & & value - > value . string ) {
value - > value . string = dm_pool_strndup ( vg - > vgmem , value - > value . string ,
strlen ( value - > value . string ) + 1 ) ;
}
2013-12-18 02:51:11 +04:00
rc = set_property ( NULL , vg , NULL , NULL , NULL , name , value ) ;
restore_user_env ( & e ) ;
return rc ;
2010-11-17 22:16:05 +03:00
}
2009-07-24 16:47:15 +04:00
struct dm_list * lvm_list_vg_names ( lvm_t libh )
{
2013-12-18 02:51:11 +04:00
struct dm_list * rc = NULL ;
struct saved_env e = store_user_env ( ( struct cmd_context * ) libh ) ;
2013-07-19 19:31:13 +04:00
2013-12-18 02:51:11 +04:00
if ( lvmetad_vg_list_to_lvmcache ( ( struct cmd_context * ) libh ) ) {
rc = get_vgnames ( ( struct cmd_context * ) libh , 0 ) ;
}
restore_user_env ( & e ) ;
return rc ;
2009-07-24 16:47:15 +04:00
}
2009-07-26 20:06:21 +04:00
struct dm_list * lvm_list_vg_uuids ( lvm_t libh )
2009-07-24 16:47:15 +04:00
{
2013-12-18 02:51:11 +04:00
struct dm_list * rc = NULL ;
struct saved_env e = store_user_env ( ( struct cmd_context * ) libh ) ;
2013-07-19 19:31:13 +04:00
2013-12-18 02:51:11 +04:00
if ( lvmetad_vg_list_to_lvmcache ( ( struct cmd_context * ) libh ) ) {
rc = get_vgids ( ( struct cmd_context * ) libh , 0 ) ;
}
restore_user_env ( & e ) ;
return rc ;
2009-07-24 16:47:15 +04:00
}
2009-07-24 16:48:21 +04:00
2009-08-03 16:11:45 +04:00
/*
* FIXME : Elaborate on when to use , side - effects , . cache file , etc
*/
2009-07-26 20:44:05 +04:00
int lvm_scan ( lvm_t libh )
2009-07-24 16:48:21 +04:00
{
2013-12-18 02:51:11 +04:00
int rc = 0 ;
struct saved_env e = store_user_env ( ( struct cmd_context * ) libh ) ;
2015-12-01 23:09:01 +03:00
lvmcache_force_next_label_scan ( ) ;
if ( ! lvmcache_label_scan ( ( struct cmd_context * ) libh ) )
2013-12-18 02:51:11 +04:00
rc = - 1 ;
restore_user_env ( & e ) ;
return rc ;
2009-07-24 16:48:21 +04:00
}
2013-09-26 20:37:40 +04:00
int lvm_lv_name_validate ( const vg_t vg , const char * name )
{
2013-12-18 02:51:11 +04:00
int rc = - 1 ;
2013-09-26 20:37:40 +04:00
name_error_t name_error ;
2016-03-01 17:31:48 +03:00
int historical ;
2013-09-26 20:37:40 +04:00
2013-12-18 02:51:11 +04:00
struct saved_env e = store_user_env ( vg - > cmd ) ;
2013-09-26 20:37:40 +04:00
name_error = validate_name_detailed ( name ) ;
if ( NAME_VALID = = name_error ) {
if ( apply_lvname_restrictions ( name ) ) {
2016-03-01 17:31:48 +03:00
if ( ! lv_name_is_used_in_vg ( vg , name , & historical ) ) {
2013-12-18 02:51:11 +04:00
rc = 0 ;
2013-09-26 20:37:40 +04:00
} else {
2016-03-01 17:31:48 +03:00
log_errno ( EINVAL , " %sLV name exists in VG " ,
historical ? " historical " : " " ) ;
2013-09-26 20:37:40 +04:00
}
}
} else {
display_name_error ( name_error ) ;
}
2013-12-18 02:51:11 +04:00
restore_user_env ( & e ) ;
return rc ;
2013-09-26 20:37:40 +04:00
}
int lvm_vg_name_validate ( lvm_t libh , const char * name )
{
2013-12-18 02:51:11 +04:00
int rc = - 1 ;
2013-09-26 20:37:40 +04:00
struct cmd_context * cmd = ( struct cmd_context * ) libh ;
2013-12-18 02:51:11 +04:00
struct saved_env e = store_user_env ( cmd ) ;
2013-09-26 20:37:40 +04:00
if ( validate_new_vg_name ( cmd , name ) )
2013-12-18 02:51:11 +04:00
rc = 0 ;
restore_user_env ( & e ) ;
return rc ;
2013-09-26 20:37:40 +04:00
}