2001-10-02 21:09:05 +04:00
/*
2004-03-30 23:35:44 +04:00
* Copyright ( C ) 2001 - 2004 Sistina Software , Inc . All rights reserved .
2012-03-03 22:32:53 +04:00
* Copyright ( C ) 2004 - 2012 Red Hat , Inc . All rights reserved .
2001-10-02 21:09:05 +04:00
*
2004-03-30 23:35:44 +04:00
* This file is part of LVM2 .
2001-10-02 21:09:05 +04:00
*
2004-03-30 23:35:44 +04:00
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
2007-08-21 00:55:30 +04:00
* of the GNU Lesser General Public License v .2 .1 .
2001-10-02 21:09:05 +04:00
*
2007-08-21 00:55:30 +04:00
* You should have received a copy of the GNU Lesser General Public License
2004-03-30 23:35:44 +04:00
* along with this program ; if not , write to the Free Software Foundation ,
2016-01-21 13:49:46 +03:00
* Inc . , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA
2001-10-02 21:09:05 +04:00
*/
# include "tools.h"
2018-05-14 12:30:20 +03:00
# include "lib/cache/lvmcache.h"
2018-06-28 22:48:03 +03:00
# include "lib/metadata/metadata.h"
2019-01-16 23:19:09 +03:00
# include "lib/label/hints.h"
2018-06-28 22:48:03 +03:00
# include <dirent.h>
2015-10-19 21:58:43 +03:00
struct pvscan_params {
int new_pvs_found ;
int pvs_found ;
uint64_t size_total ;
uint64_t size_new ;
unsigned pv_max_name_len ;
unsigned vg_max_name_len ;
unsigned pv_tmp_namelen ;
char * pv_tmp_name ;
} ;
pvscan: use process_each_vg for autoactivate
This refactors the code for autoactivation. Previously,
as each PV was found, it would be sent to lvmetad, and
the VG would be autoactivated using a non-standard VG
processing function (the "activation_handler") called via
a function pointer from within the lvmetad notification path.
Now, any scanning that the command needs to do (scanning
only the named device args, or scanning all devices when
there are no args), is done first, before any activation
is attempted. During the scans, the VG names are saved.
After scanning is complete, process_each_vg is used to do
autoactivation of the saved VG names. This makes pvscan
activation much more similar to activation done with
vgchange or lvchange.
The separate autoactivate phase also means that if lvmetad
is disabled (either before or during the scan), the command
can continue with the activation step by simply not using
lvmetad and reverting to disk scanning to do the
activation.
2016-04-28 17:37:03 +03:00
struct pvscan_aa_params {
unsigned int activate_errors ;
} ;
2020-10-15 22:11:08 +03:00
/*
* Used by _pvscan_aa_quick ( ) which is an optimization used
* when one vg is being activated .
*/
static struct volume_group * saved_vg ;
2019-02-13 23:21:56 +03:00
static const char * _pvs_online_dir = DEFAULT_RUN_DIR " /pvs_online " ;
static const char * _vgs_online_dir = DEFAULT_RUN_DIR " /vgs_online " ;
2020-10-15 22:11:08 +03:00
static const char * _pvs_lookup_dir = DEFAULT_RUN_DIR " /pvs_lookup " ;
2019-02-13 22:35:24 +03:00
2018-06-28 22:48:03 +03:00
static int _pvscan_display_pv ( struct cmd_context * cmd ,
2015-10-19 21:58:43 +03:00
struct physical_volume * pv ,
struct pvscan_params * params )
2002-11-18 17:04:08 +03:00
{
2014-03-19 03:17:36 +04:00
/* XXXXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXXXX */
char uuid [ 40 ] __attribute__ ( ( aligned ( 8 ) ) ) ;
2015-10-19 21:58:43 +03:00
const unsigned suffix_len = sizeof ( uuid ) + 10 ;
unsigned pv_len ;
2014-03-19 03:17:36 +04:00
const char * pvdevname = pv_dev_name ( pv ) ;
2002-11-18 17:04:08 +03:00
/* short listing? */
2016-06-22 00:24:52 +03:00
if ( arg_is_set ( cmd , short_ARG ) ) {
2014-03-19 03:19:01 +04:00
log_print_unless_silent ( " %s " , pvdevname ) ;
2015-10-19 21:58:43 +03:00
return ECMD_PROCESSED ;
2002-11-18 17:04:08 +03:00
}
2015-10-19 21:58:43 +03:00
if ( ! params - > pv_max_name_len ) {
lvmcache_get_max_name_lengths ( cmd , & params - > pv_max_name_len , & params - > vg_max_name_len ) ;
2002-11-18 17:04:08 +03:00
2015-10-19 21:58:43 +03:00
params - > pv_max_name_len + = 2 ;
params - > vg_max_name_len + = 2 ;
params - > pv_tmp_namelen = params - > pv_max_name_len + suffix_len ;
2002-11-18 17:04:08 +03:00
2015-10-19 21:58:43 +03:00
if ( ! ( params - > pv_tmp_name = dm_pool_alloc ( cmd - > mem , params - > pv_tmp_namelen ) ) )
return ECMD_FAILED ;
2002-11-18 17:04:08 +03:00
}
2015-10-19 21:58:43 +03:00
pv_len = params - > pv_max_name_len ;
memset ( params - > pv_tmp_name , 0 , params - > pv_tmp_namelen ) ;
2016-06-22 00:24:52 +03:00
if ( arg_is_set ( cmd , uuid_ARG ) ) {
2002-11-18 17:04:08 +03:00
if ( ! id_write_format ( & pv - > id , uuid , sizeof ( uuid ) ) ) {
stack ;
2015-10-19 21:58:43 +03:00
return ECMD_FAILED ;
2002-11-18 17:04:08 +03:00
}
2015-10-19 21:58:43 +03:00
if ( dm_snprintf ( params - > pv_tmp_name , params - > pv_tmp_namelen , " %-*s with UUID %s " ,
params - > pv_max_name_len - 2 , pvdevname , uuid ) < 0 ) {
2014-03-19 03:17:36 +04:00
log_error ( " Invalid PV name with uuid. " ) ;
2015-10-19 21:58:43 +03:00
return ECMD_FAILED ;
2014-03-19 03:17:36 +04:00
}
2015-10-19 21:58:43 +03:00
pvdevname = params - > pv_tmp_name ;
pv_len + = suffix_len ;
2002-11-18 17:04:08 +03:00
}
2014-03-19 03:17:36 +04:00
if ( is_orphan ( pv ) )
config: add silent mode
Accept -q as the short form of --quiet.
Suppress non-essential standard output if -q is given twice.
Treat log/silent in lvm.conf as equivalent to -qq.
Review all log_print messages and change some to
log_print_unless_silent.
When silent, the following commands still produce output:
dumpconfig, lvdisplay, lvmdiskscan, lvs, pvck, pvdisplay,
pvs, version, vgcfgrestore -l, vgdisplay, vgs.
[Needs checking.]
Non-essential messages are shifted from log level 4 to log level 5
for syslog and lvm2_log_fn purposes.
2012-08-25 23:35:48 +04:00
log_print_unless_silent ( " PV %-*s %-*s %s [%s] " ,
2014-03-19 03:17:36 +04:00
pv_len , pvdevname ,
2015-10-19 21:58:43 +03:00
params - > vg_max_name_len , " " ,
config: add silent mode
Accept -q as the short form of --quiet.
Suppress non-essential standard output if -q is given twice.
Treat log/silent in lvm.conf as equivalent to -qq.
Review all log_print messages and change some to
log_print_unless_silent.
When silent, the following commands still produce output:
dumpconfig, lvdisplay, lvmdiskscan, lvs, pvck, pvdisplay,
pvs, version, vgcfgrestore -l, vgdisplay, vgs.
[Needs checking.]
Non-essential messages are shifted from log level 4 to log level 5
for syslog and lvm2_log_fn purposes.
2012-08-25 23:35:48 +04:00
pv - > fmt ? pv - > fmt - > name : " " ,
display_size ( cmd , pv_size ( pv ) ) ) ;
2014-03-19 03:17:36 +04:00
else if ( pv_status ( pv ) & EXPORTED_VG )
log_print_unless_silent ( " PV %-*s is in exported VG %s [%s / %s free] " ,
pv_len , pvdevname , pv_vg_name ( pv ) ,
display_size ( cmd , ( uint64_t ) pv_pe_count ( pv ) * pv_pe_size ( pv ) ) ,
display_size ( cmd , ( uint64_t ) ( pv_pe_count ( pv ) - pv_pe_alloc_count ( pv ) ) * pv_pe_size ( pv ) ) ) ;
else
log_print_unless_silent ( " PV %-*s VG %-*s %s [%s / %s free] " ,
pv_len , pvdevname ,
2015-10-19 21:58:43 +03:00
params - > vg_max_name_len , pv_vg_name ( pv ) ,
2014-03-19 03:17:36 +04:00
pv - > fmt ? pv - > fmt - > name : " " ,
config: add silent mode
Accept -q as the short form of --quiet.
Suppress non-essential standard output if -q is given twice.
Treat log/silent in lvm.conf as equivalent to -qq.
Review all log_print messages and change some to
log_print_unless_silent.
When silent, the following commands still produce output:
dumpconfig, lvdisplay, lvmdiskscan, lvs, pvck, pvdisplay,
pvs, version, vgcfgrestore -l, vgdisplay, vgs.
[Needs checking.]
Non-essential messages are shifted from log level 4 to log level 5
for syslog and lvm2_log_fn purposes.
2012-08-25 23:35:48 +04:00
display_size ( cmd , ( uint64_t ) pv_pe_count ( pv ) * pv_pe_size ( pv ) ) ,
display_size ( cmd , ( uint64_t ) ( pv_pe_count ( pv ) - pv_pe_alloc_count ( pv ) ) * pv_pe_size ( pv ) ) ) ;
2015-10-19 21:58:43 +03:00
return ECMD_PROCESSED ;
}
2018-06-28 22:48:03 +03:00
static int _pvscan_display_single ( struct cmd_context * cmd , struct volume_group * vg ,
2015-10-19 21:58:43 +03:00
struct physical_volume * pv , struct processing_handle * handle )
{
struct pvscan_params * params = ( struct pvscan_params * ) handle - > custom_handle ;
2016-06-22 00:24:52 +03:00
if ( ( arg_is_set ( cmd , exported_ARG ) & & ! ( pv_status ( pv ) & EXPORTED_VG ) ) | |
( arg_is_set ( cmd , novolumegroup_ARG ) & & ( ! is_orphan ( pv ) ) ) ) {
2015-10-19 21:58:43 +03:00
return ECMD_PROCESSED ;
}
params - > pvs_found + + ;
if ( is_orphan ( pv ) ) {
params - > new_pvs_found + + ;
params - > size_new + = pv_size ( pv ) ;
params - > size_total + = pv_size ( pv ) ;
} else {
params - > size_total + = ( uint64_t ) pv_pe_count ( pv ) * pv_pe_size ( pv ) ;
}
2018-06-28 22:48:03 +03:00
_pvscan_display_pv ( cmd , pv , params ) ;
2015-10-19 21:58:43 +03:00
return ECMD_PROCESSED ;
2002-11-18 17:04:08 +03:00
}
2018-07-10 21:39:29 +03:00
int pvscan_display_cmd ( struct cmd_context * cmd , int argc , char * * argv )
2001-10-02 21:09:05 +04:00
{
2015-10-19 21:58:43 +03:00
struct pvscan_params params = { 0 } ;
struct processing_handle * handle = NULL ;
int ret ;
2001-10-02 21:09:05 +04:00
2016-06-22 00:24:52 +03:00
if ( arg_is_set ( cmd , novolumegroup_ARG ) & & arg_is_set ( cmd , exported_ARG ) ) {
2001-10-16 22:07:54 +04:00
log_error ( " Options -e and -n are incompatible " ) ;
2001-10-05 02:53:37 +04:00
return EINVALID_CMD_LINE ;
2001-10-02 21:09:05 +04:00
}
2016-06-22 00:24:52 +03:00
if ( arg_is_set ( cmd , exported_ARG ) | | arg_is_set ( cmd , novolumegroup_ARG ) )
2007-06-28 21:33:44 +04:00
log_warn ( " WARNING: only considering physical volumes %s " ,
2016-06-22 00:24:52 +03:00
arg_is_set ( cmd , exported_ARG ) ?
2001-10-02 21:09:05 +04:00
" of exported volume group(s) " : " in no volume group " ) ;
2016-05-31 13:24:05 +03:00
if ( ! ( handle = init_processing_handle ( cmd , NULL ) ) ) {
2015-10-19 21:58:43 +03:00
log_error ( " Failed to initialize processing handle. " ) ;
ret = ECMD_FAILED ;
goto out ;
2001-10-02 21:09:05 +04:00
}
2015-10-19 21:58:43 +03:00
handle - > custom_handle = & params ;
2001-10-02 21:09:05 +04:00
2018-06-28 22:48:03 +03:00
ret = process_each_pv ( cmd , argc , argv , NULL , 0 , 0 , handle , _pvscan_display_single ) ;
2001-10-02 21:09:05 +04:00
2015-10-19 21:58:43 +03:00
if ( ! params . pvs_found )
config: add silent mode
Accept -q as the short form of --quiet.
Suppress non-essential standard output if -q is given twice.
Treat log/silent in lvm.conf as equivalent to -qq.
Review all log_print messages and change some to
log_print_unless_silent.
When silent, the following commands still produce output:
dumpconfig, lvdisplay, lvmdiskscan, lvs, pvck, pvdisplay,
pvs, version, vgcfgrestore -l, vgdisplay, vgs.
[Needs checking.]
Non-essential messages are shifted from log level 4 to log level 5
for syslog and lvm2_log_fn purposes.
2012-08-25 23:35:48 +04:00
log_print_unless_silent ( " No matching physical volumes found " ) ;
2014-03-19 03:19:01 +04:00
else
log_print_unless_silent ( " Total: %d [%s] / in use: %d [%s] / in no VG: %d [%s] " ,
2015-10-19 21:58:43 +03:00
params . pvs_found ,
display_size ( cmd , params . size_total ) ,
params . pvs_found - params . new_pvs_found ,
display_size ( cmd , ( params . size_total - params . size_new ) ) ,
params . new_pvs_found , display_size ( cmd , params . size_new ) ) ;
2001-10-02 21:09:05 +04:00
2015-10-19 21:58:43 +03:00
out :
2016-06-17 14:29:33 +03:00
destroy_processing_handle ( cmd , handle ) ;
2007-08-23 19:02:26 +04:00
2015-10-19 21:58:43 +03:00
return ret ;
2001-10-02 21:09:05 +04:00
}
2018-06-28 22:48:03 +03:00
2019-02-13 23:21:56 +03:00
static char * _vgname_in_pvid_file_buf ( char * buf )
{
char * p , * n ;
/*
* file contains :
* < major > : < minor > \ n
* vg : < vgname > \ n \ 0
*/
if ( ! ( p = strchr ( buf , ' \n ' ) ) )
return NULL ;
p + + ; /* skip \n */
if ( * p & & ! strncmp ( p , " vg: " , 3 ) ) {
if ( ( n = strchr ( p , ' \n ' ) ) )
* n = ' \0 ' ;
return p + 3 ;
}
return NULL ;
}
# define MAX_PVID_FILE_SIZE 512
2019-08-27 01:07:18 +03:00
static int _online_pvid_file_read ( char * path , int * major , int * minor , char * vgname )
{
2021-01-21 23:15:33 +03:00
char buf [ MAX_PVID_FILE_SIZE ] = { 0 } ;
2019-08-27 01:07:18 +03:00
char * name ;
int fd , rv ;
fd = open ( path , O_RDONLY ) ;
if ( fd < 0 ) {
log_warn ( " Failed to open %s " , path ) ;
return 0 ;
}
2021-01-21 23:15:33 +03:00
rv = read ( fd , buf , sizeof ( buf ) - 1 ) ;
2019-08-27 01:07:18 +03:00
if ( close ( fd ) )
log_sys_debug ( " close " , path ) ;
if ( ! rv | | rv < 0 ) {
log_warn ( " No info in %s " , path ) ;
return 0 ;
}
if ( sscanf ( buf , " %d:%d " , major , minor ) ! = 2 ) {
log_warn ( " No device numbers in %s " , path ) ;
return 0 ;
}
/* vgname points to an offset in buf */
if ( ( name = _vgname_in_pvid_file_buf ( buf ) ) )
strncpy ( vgname , name , NAME_LEN ) ;
else
log_debug ( " No vgname in %s " , path ) ;
return 1 ;
}
2020-10-15 22:11:08 +03:00
static void _lookup_file_remove ( char * vgname )
{
char path [ PATH_MAX ] ;
if ( dm_snprintf ( path , sizeof ( path ) , " %s/%s " , _pvs_lookup_dir , vgname ) < 0 ) {
log_error ( " Path %s/%s is too long. " , _pvs_lookup_dir , vgname ) ;
return ;
}
log_debug ( " Unlink pvs_lookup: %s " , path ) ;
if ( unlink ( path ) )
log_sys_debug ( " unlink " , path ) ;
}
2019-08-27 01:07:18 +03:00
2019-02-13 23:21:56 +03:00
/*
* When a PV goes offline , remove the vg online file for that VG
* ( even if other PVs for the VG are still online ) . This means
* that the vg will be activated again when it becomes complete .
*/
static void _online_vg_file_remove ( const char * vgname )
{
char path [ PATH_MAX ] ;
if ( dm_snprintf ( path , sizeof ( path ) , " %s/%s " , _vgs_online_dir , vgname ) < 0 ) {
log_error ( " Path %s/%s is too long. " , _vgs_online_dir , vgname ) ;
return ;
}
log_debug ( " Unlink vg online: %s " , path ) ;
if ( unlink ( path ) )
log_sys_debug ( " unlink " , path ) ;
}
2018-06-28 22:48:03 +03:00
/*
* When a device goes offline we only know its major : minor , not its PVID .
* Since the dev isn ' t around , we can ' t read it to get its PVID , so we have to
* read the PVID files to find the one containing this major : minor and remove
* that one . This means that the PVID files need to contain the devno ' s they
* were created from .
*/
static void _online_pvid_file_remove_devno ( int major , int minor )
{
char path [ PATH_MAX ] ;
2019-08-27 01:07:18 +03:00
char file_vgname [ NAME_LEN ] ;
2018-06-28 22:48:03 +03:00
DIR * dir ;
struct dirent * de ;
2019-08-27 01:07:18 +03:00
int file_major = 0 , file_minor = 0 ;
2018-06-28 22:48:03 +03:00
log_debug ( " Remove pv online devno %d:%d " , major , minor ) ;
if ( ! ( dir = opendir ( _pvs_online_dir ) ) )
return ;
while ( ( de = readdir ( dir ) ) ) {
if ( de - > d_name [ 0 ] = = ' . ' )
continue ;
memset ( path , 0 , sizeof ( path ) ) ;
snprintf ( path , sizeof ( path ) , " %s/%s " , _pvs_online_dir , de - > d_name ) ;
2019-08-27 01:07:18 +03:00
file_major = 0 ;
file_minor = 0 ;
memset ( file_vgname , 0 , sizeof ( file_vgname ) ) ;
2019-02-13 23:21:56 +03:00
2019-08-27 01:07:18 +03:00
_online_pvid_file_read ( path , & file_major , & file_minor , file_vgname ) ;
2018-06-28 22:48:03 +03:00
2019-08-27 01:07:18 +03:00
if ( ( file_major = = major ) & & ( file_minor = = minor ) ) {
2019-02-13 23:21:56 +03:00
log_debug ( " Unlink pv online %s " , path ) ;
2018-10-15 17:21:01 +03:00
if ( unlink ( path ) )
log_sys_debug ( " unlink " , path ) ;
2019-02-13 23:21:56 +03:00
2020-10-15 22:11:08 +03:00
if ( file_vgname [ 0 ] ) {
2019-08-27 01:07:18 +03:00
_online_vg_file_remove ( file_vgname ) ;
2020-10-15 22:11:08 +03:00
_lookup_file_remove ( file_vgname ) ;
}
2018-06-28 22:48:03 +03:00
}
}
2018-10-15 17:19:49 +03:00
if ( closedir ( dir ) )
log_sys_debug ( " closedir " , _pvs_online_dir ) ;
2018-06-28 22:48:03 +03:00
}
2019-02-13 23:21:56 +03:00
static void _online_files_remove ( const char * dirpath )
2018-06-28 22:48:03 +03:00
{
char path [ PATH_MAX ] ;
DIR * dir ;
struct dirent * de ;
2019-02-13 23:21:56 +03:00
if ( ! ( dir = opendir ( dirpath ) ) )
2018-06-28 22:48:03 +03:00
return ;
while ( ( de = readdir ( dir ) ) ) {
if ( de - > d_name [ 0 ] = = ' . ' )
continue ;
memset ( path , 0 , sizeof ( path ) ) ;
2019-02-13 23:21:56 +03:00
snprintf ( path , sizeof ( path ) , " %s/%s " , dirpath , de - > d_name ) ;
2018-10-15 17:21:01 +03:00
if ( unlink ( path ) )
log_sys_debug ( " unlink " , path ) ;
2018-06-28 22:48:03 +03:00
}
2018-10-15 17:19:49 +03:00
if ( closedir ( dir ) )
2019-02-13 23:21:56 +03:00
log_sys_debug ( " closedir " , dirpath ) ;
2018-06-28 22:48:03 +03:00
}
2019-02-13 23:21:56 +03:00
static int _online_pvid_file_create ( struct device * dev , const char * vgname )
2018-06-28 22:48:03 +03:00
{
char path [ PATH_MAX ] ;
2021-01-21 23:15:33 +03:00
char buf [ MAX_PVID_FILE_SIZE ] = { 0 } ;
2019-08-27 01:07:18 +03:00
char file_vgname [ NAME_LEN ] ;
int file_major = 0 , file_minor = 0 ;
2018-06-28 22:48:03 +03:00
int major , minor ;
int fd ;
int rv ;
2018-11-06 17:04:35 +03:00
int len ;
2019-02-13 23:21:56 +03:00
int len1 = 0 ;
int len2 = 0 ;
2018-06-28 22:48:03 +03:00
major = ( int ) MAJOR ( dev - > dev ) ;
minor = ( int ) MINOR ( dev - > dev ) ;
2018-11-06 17:04:35 +03:00
if ( dm_snprintf ( path , sizeof ( path ) , " %s/%s " , _pvs_online_dir , dev - > pvid ) < 0 ) {
log_error ( " Path %s/%s is too long. " , _pvs_online_dir , dev - > pvid ) ;
return 0 ;
}
2018-06-28 22:48:03 +03:00
2019-02-13 23:21:56 +03:00
if ( ( len1 = dm_snprintf ( buf , sizeof ( buf ) , " %d:%d \n " , major , minor ) ) < 0 ) {
2020-10-15 22:11:08 +03:00
log_error ( " Cannot create online file path for %s %d:%d. " , dev_name ( dev ) , major , minor ) ;
2018-11-06 17:04:35 +03:00
return 0 ;
}
2018-06-28 22:48:03 +03:00
2019-02-13 23:21:56 +03:00
if ( vgname ) {
if ( ( len2 = dm_snprintf ( buf + len1 , sizeof ( buf ) - len1 , " vg:%s \n " , vgname ) ) < 0 ) {
2020-10-15 22:11:08 +03:00
log_warn ( " Incomplete online file for %s %d:%d vg %s. " , dev_name ( dev ) , major , minor , vgname ) ;
2019-02-13 23:21:56 +03:00
/* can still continue without vgname */
len2 = 0 ;
}
}
len = len1 + len2 ;
2018-06-28 22:48:03 +03:00
log_debug ( " Create pv online: %s %d:%d %s " , path , major , minor , dev_name ( dev ) ) ;
2019-08-27 01:07:18 +03:00
fd = open ( path , O_CREAT | O_EXCL | O_RDWR , S_IRUSR | S_IWUSR ) ;
2018-06-28 22:48:03 +03:00
if ( fd < 0 ) {
2019-08-27 01:07:18 +03:00
if ( errno = = EEXIST )
goto check_duplicate ;
2020-10-15 22:11:08 +03:00
log_error ( " Failed to create online file for %s path %s error %d " , dev_name ( dev ) , path , errno ) ;
2018-11-06 17:04:35 +03:00
return 0 ;
2018-06-28 22:48:03 +03:00
}
2018-11-06 17:04:35 +03:00
while ( len > 0 ) {
rv = write ( fd , buf , len ) ;
if ( rv < 0 ) {
2020-10-15 22:11:08 +03:00
/* file exists so it still works in part */
log_warn ( " Cannot write online file for %s to %s error %d " ,
dev_name ( dev ) , path , errno ) ;
2018-12-21 22:26:15 +03:00
if ( close ( fd ) )
log_sys_debug ( " close " , path ) ;
2020-10-15 22:11:08 +03:00
return 1 ;
2018-11-06 17:04:35 +03:00
}
len - = rv ;
}
2018-06-28 22:48:03 +03:00
/* We don't care about syncing, these files are not even persistent. */
2018-10-15 17:06:35 +03:00
if ( close ( fd ) )
log_sys_debug ( " close " , path ) ;
2018-11-06 17:04:35 +03:00
return 1 ;
2019-08-27 01:07:18 +03:00
check_duplicate :
/*
* If a PVID online file already exists for this PVID , check if the
* file contains a different device number , and if so we may have a
* duplicate PV .
*
* FIXME : disable autoactivation of the VG somehow ?
* The VG may or may not already be activated when a dupicate appears .
* Perhaps write a new field in the pv online or vg online file ?
*/
memset ( file_vgname , 0 , sizeof ( file_vgname ) ) ;
_online_pvid_file_read ( path , & file_major , & file_minor , file_vgname ) ;
if ( ( file_major = = major ) & & ( file_minor = = minor ) ) {
log_debug ( " Existing online file for %d:%d " , major , minor ) ;
return 1 ;
}
/* Don't know how vgname might not match, but it's not good so fail. */
if ( ( file_major ! = major ) | | ( file_minor ! = minor ) )
log_error ( " pvscan[%d] PV %s is duplicate for PVID %s on %d:%d and %d:%d. " ,
getpid ( ) , dev_name ( dev ) , dev - > pvid , major , minor , file_major , file_minor ) ;
2019-11-14 19:04:33 +03:00
if ( file_vgname [ 0 ] & & vgname & & strcmp ( file_vgname , vgname ) )
2019-08-27 01:07:18 +03:00
log_error ( " pvscan[%d] PV %s has unexpected VG %s vs %s. " ,
getpid ( ) , dev_name ( dev ) , vgname , file_vgname ) ;
return 0 ;
2018-06-28 22:48:03 +03:00
}
static int _online_pvid_file_exists ( const char * pvid )
{
2021-01-21 23:15:33 +03:00
char path [ PATH_MAX ] = { 0 } ;
2018-06-28 22:48:03 +03:00
struct stat buf ;
int rv ;
2021-01-21 23:15:33 +03:00
if ( dm_snprintf ( path , sizeof ( path ) , " %s/%s " , _pvs_online_dir , pvid ) < 0 ) {
log_debug ( INTERNAL_ERROR " Path %s/%s is too long. " , _pvs_online_dir , pvid ) ;
return 0 ;
}
2018-06-28 22:48:03 +03:00
log_debug ( " Check pv online: %s " , path ) ;
rv = stat ( path , & buf ) ;
if ( ! rv ) {
2020-10-15 22:11:08 +03:00
log_debug ( " Check pv online %s: yes " , pvid ) ;
2018-06-28 22:48:03 +03:00
return 1 ;
}
2020-10-15 22:11:08 +03:00
log_debug ( " Check pv online %s: no " , pvid ) ;
2018-06-28 22:48:03 +03:00
return 0 ;
}
2020-10-15 22:11:08 +03:00
static int _write_lookup_file ( struct cmd_context * cmd , struct volume_group * vg )
2018-06-28 22:48:03 +03:00
{
2020-10-15 22:11:08 +03:00
char path [ PATH_MAX ] ;
char line [ ID_LEN + 2 ] ;
2018-06-28 22:48:03 +03:00
struct pv_list * pvl ;
2020-10-15 22:11:08 +03:00
int fd ;
2018-06-28 22:48:03 +03:00
2020-10-15 22:11:08 +03:00
if ( dm_snprintf ( path , sizeof ( path ) , " %s/%s " , _pvs_lookup_dir , vg - > name ) < 0 ) {
log_error ( " Path %s/%s is too long. " , _pvs_lookup_dir , vg - > name ) ;
return 0 ;
}
2018-06-28 22:48:03 +03:00
2020-10-15 22:11:08 +03:00
fd = open ( path , O_CREAT | O_EXCL | O_TRUNC | O_RDWR , S_IRUSR | S_IWUSR ) ;
if ( fd < 0 ) {
/* not a problem, can happen when multiple pvscans run at once */
log_debug ( " Did not create %s: %d " , path , errno ) ;
return 0 ;
2019-02-13 23:21:56 +03:00
}
2018-06-28 22:48:03 +03:00
2020-10-15 22:11:08 +03:00
log_debug ( " write_lookup_file %s " , path ) ;
2018-06-28 22:48:03 +03:00
dm_list_iterate_items ( pvl , & vg - > pvs ) {
2020-10-15 22:11:08 +03:00
memcpy ( & line , & pvl - > pv - > id . uuid , ID_LEN ) ;
line [ ID_LEN ] = ' \n ' ;
line [ ID_LEN + 1 ] = ' \0 ' ;
2018-06-28 22:48:03 +03:00
2020-10-15 22:11:08 +03:00
if ( write ( fd , & line , ID_LEN + 1 ) < 0 )
log_sys_debug ( " write " , path ) ;
2019-02-13 23:21:56 +03:00
}
2020-10-15 22:11:08 +03:00
if ( close ( fd ) )
log_sys_debug ( " close " , path ) ;
2018-06-28 22:48:03 +03:00
return 1 ;
}
2020-10-15 22:11:08 +03:00
static int _lookup_file_contains_pvid ( FILE * fp , char * pvid )
2018-06-28 22:48:03 +03:00
{
2020-10-15 22:11:08 +03:00
char line [ 64 ] ;
2018-06-28 22:48:03 +03:00
2020-10-15 22:11:08 +03:00
while ( fgets ( line , sizeof ( line ) , fp ) ) {
if ( ! memcmp ( pvid , line , ID_LEN ) )
return 1 ;
2019-05-03 00:54:28 +03:00
}
2020-10-15 22:11:08 +03:00
return 0 ;
2018-06-28 22:48:03 +03:00
}
2020-10-15 22:11:08 +03:00
static void _lookup_file_count_pvid_files ( FILE * fp , const char * vgname , int * pvs_online , int * pvs_offline )
2019-08-27 01:07:18 +03:00
{
2020-10-15 22:11:08 +03:00
char line [ 64 ] ;
2021-01-23 00:27:05 +03:00
char pvid [ ID_LEN + 1 ] = { 0 } ;
2019-08-27 01:07:18 +03:00
2020-10-15 22:11:08 +03:00
log_debug ( " checking all pvid files using lookup file for %s " , vgname ) ;
2019-08-27 01:07:18 +03:00
2020-10-15 22:11:08 +03:00
rewind ( fp ) ;
2018-06-28 22:48:03 +03:00
2020-10-15 22:11:08 +03:00
while ( fgets ( line , sizeof ( line ) , fp ) ) {
memcpy ( pvid , line , ID_LEN ) ;
2018-06-28 22:48:03 +03:00
2020-10-15 22:11:08 +03:00
if ( strlen ( pvid ) ! = ID_LEN ) {
log_debug ( " ignore lookup file line %s " , line ) ;
continue ;
}
2018-06-28 22:48:03 +03:00
2020-10-15 22:11:08 +03:00
if ( _online_pvid_file_exists ( ( const char * ) pvid ) )
( * pvs_online ) + + ;
else
( * pvs_offline ) + + ;
2018-06-28 22:48:03 +03:00
}
2020-10-15 22:11:08 +03:00
}
2018-06-28 22:48:03 +03:00
2020-10-15 22:11:08 +03:00
/*
* There is no synchronization between the one doing write_lookup_file and the
* other doing check_lookup_file . The pvscan doing write thinks the VG is
* incomplete , and the pvscan doing check may also conclude the VG is
* incomplete if it happens prior to the write . If neither pvscan thinks the
* VG is complete then neither will activate it . To solve this race , the
* pvscan doing write will recheck pvid online files after the write in which
* case it would find the pvid online file from the pvscan doing check .
*/
2019-03-04 23:25:53 +03:00
2020-10-15 22:11:08 +03:00
/*
* The VG is not yet complete , more PVs need to arrive , and
* some of those PVs may not have metadata on them . Without
* metadata , pvscan for those PVs will be unable to determine
* if the VG is complete . We don ' t know if other PVs will have
* metadata or not .
*
* Save a temp file with a list of pvids in the vg , to be used
* by a later pvscan on a PV without metadata . The later
* pvscan will check for vg completeness using the temp file
* since it has no vg metadata to use .
*
* Only the first pvscan for the VG creates the temp file . If
* there are multiple pvscans for the same VG running at once ,
* they all race to create the lookup file , and only the first
* to create the file will write it .
*
* After writing the temp file , we count pvid online files for
* the VG again - the VG could now be complete since multiple
* pvscans will run concurrently . We repeat this to cover a
* race where another pvscan was running _check_lookup_file
* during our _write_lookup_file . _write_lookup_file may not
* have finished before _check_lookup_file , which would cause
* the other pvscan to not find the pvid it ' s looking for , and
* conclude the VG is incomplete , while we also think the VG is
* incomplete . If we recheck online files after write_lookup ,
* we will see the pvid online file from the other pvscan and
* see the VG is complete .
*/
2019-03-04 23:25:53 +03:00
2020-10-15 22:11:08 +03:00
static int _count_pvid_files_from_lookup_file ( struct cmd_context * cmd , struct device * dev ,
int * pvs_online , int * pvs_offline ,
const char * * vgname_out )
{
2021-01-23 00:27:05 +03:00
char path [ PATH_MAX ] = { 0 } ;
2020-10-15 22:11:08 +03:00
FILE * fp ;
DIR * dir ;
struct dirent * de ;
const char * vgname = NULL ;
2018-06-28 22:48:03 +03:00
2021-01-23 00:27:05 +03:00
* vgname_out = NULL ;
2020-10-15 22:11:08 +03:00
* pvs_online = 0 ;
* pvs_offline = 0 ;
2018-06-28 22:48:03 +03:00
2021-01-23 00:27:05 +03:00
if ( ! ( dir = opendir ( _pvs_lookup_dir ) ) ) {
log_sys_debug ( " opendir " , _pvs_lookup_dir ) ;
return 0 ;
}
2018-06-28 22:48:03 +03:00
2020-10-15 22:11:08 +03:00
/*
* Read each file in pvs_lookup to find dev - > pvid , and if it ' s
* found save the vgname of the file it ' s found in .
*/
2021-01-23 00:27:05 +03:00
while ( ! vgname & & ( de = readdir ( dir ) ) ) {
2020-10-15 22:11:08 +03:00
if ( de - > d_name [ 0 ] = = ' . ' )
continue ;
2018-06-28 22:48:03 +03:00
2021-01-23 00:27:05 +03:00
if ( dm_snprintf ( path , sizeof ( path ) , " %s/%s " , _pvs_lookup_dir , de - > d_name ) < 0 ) {
log_warn ( " WARNING: Path %s/%s is too long. " , _pvs_lookup_dir , de - > d_name ) ;
continue ;
}
2018-06-28 22:48:03 +03:00
2020-10-15 22:11:08 +03:00
if ( ! ( fp = fopen ( path , " r " ) ) ) {
2021-01-23 00:27:05 +03:00
log_warn ( " WARNING: Failed to open %s. " , path ) ;
2020-10-15 22:11:08 +03:00
continue ;
}
2019-07-09 21:45:09 +03:00
2020-10-15 22:11:08 +03:00
if ( _lookup_file_contains_pvid ( fp , dev - > pvid ) ) {
2021-01-23 00:27:05 +03:00
if ( ( vgname = dm_pool_strdup ( cmd - > mem , de - > d_name ) ) )
/*
* stat pvid online file of each pvid listed in this file
* the list of pvids from the file is the alternative to
* using vg - > pvs
*/
_lookup_file_count_pvid_files ( fp , vgname , pvs_online , pvs_offline ) ;
else
log_warn ( " WARNING: Failed to strdup vgname. " ) ;
2020-10-15 22:11:08 +03:00
}
2019-03-06 00:19:05 +03:00
2020-10-15 22:11:08 +03:00
if ( fclose ( fp ) )
2021-01-23 00:27:05 +03:00
log_sys_debug ( " fclose " , path ) ;
2019-03-06 00:19:05 +03:00
}
2020-10-15 22:11:08 +03:00
if ( closedir ( dir ) )
log_sys_debug ( " closedir " , _pvs_lookup_dir ) ;
2019-03-06 00:19:05 +03:00
2020-10-15 22:11:08 +03:00
* vgname_out = vgname ;
2021-01-23 00:27:05 +03:00
return ( vgname ) ? 1 : 0 ;
2018-06-28 22:48:03 +03:00
}
2020-10-15 22:11:08 +03:00
static void _online_dir_setup ( void )
2019-04-15 19:27:49 +03:00
{
2020-10-15 22:11:08 +03:00
struct stat st ;
int rv ;
2019-04-15 19:27:49 +03:00
2020-10-15 22:11:08 +03:00
if ( ! stat ( DEFAULT_RUN_DIR , & st ) )
goto do_pvs ;
2019-04-15 19:27:49 +03:00
2020-10-15 22:11:08 +03:00
log_debug ( " Creating run_dir. " ) ;
dm_prepare_selinux_context ( DEFAULT_RUN_DIR , S_IFDIR ) ;
rv = mkdir ( DEFAULT_RUN_DIR , 0755 ) ;
dm_prepare_selinux_context ( NULL , 0 ) ;
2019-04-15 19:27:49 +03:00
2020-10-15 22:11:08 +03:00
if ( ( rv < 0 ) & & stat ( DEFAULT_RUN_DIR , & st ) )
log_error ( " Failed to create %s %d " , DEFAULT_RUN_DIR , errno ) ;
2019-04-15 19:27:49 +03:00
2020-10-15 22:11:08 +03:00
do_pvs :
if ( ! stat ( _pvs_online_dir , & st ) )
goto do_vgs ;
2019-04-15 19:27:49 +03:00
2020-10-15 22:11:08 +03:00
log_debug ( " Creating pvs_online_dir. " ) ;
dm_prepare_selinux_context ( _pvs_online_dir , S_IFDIR ) ;
rv = mkdir ( _pvs_online_dir , 0755 ) ;
dm_prepare_selinux_context ( NULL , 0 ) ;
2018-06-28 22:48:03 +03:00
2020-10-15 22:11:08 +03:00
if ( ( rv < 0 ) & & stat ( _pvs_online_dir , & st ) )
log_error ( " Failed to create %s %d " , _pvs_online_dir , errno ) ;
2019-04-15 19:27:49 +03:00
2020-10-15 22:11:08 +03:00
do_vgs :
if ( ! stat ( _vgs_online_dir , & st ) )
goto do_lookup ;
log_debug ( " Creating vgs_online_dir. " ) ;
dm_prepare_selinux_context ( _vgs_online_dir , S_IFDIR ) ;
rv = mkdir ( _vgs_online_dir , 0755 ) ;
dm_prepare_selinux_context ( NULL , 0 ) ;
if ( ( rv < 0 ) & & stat ( _vgs_online_dir , & st ) )
log_error ( " Failed to create %s %d " , _vgs_online_dir , errno ) ;
2018-06-28 22:48:03 +03:00
2020-10-15 22:11:08 +03:00
do_lookup :
if ( ! stat ( _pvs_lookup_dir , & st ) )
2018-06-28 22:48:03 +03:00
return ;
2020-10-15 22:11:08 +03:00
log_debug ( " Creating pvs_lookup_dir. " ) ;
dm_prepare_selinux_context ( _pvs_lookup_dir , S_IFDIR ) ;
rv = mkdir ( _pvs_lookup_dir , 0755 ) ;
dm_prepare_selinux_context ( NULL , 0 ) ;
2018-06-28 22:48:03 +03:00
2020-10-15 22:11:08 +03:00
if ( ( rv < 0 ) & & stat ( _pvs_lookup_dir , & st ) )
log_error ( " Failed to create %s %d " , _pvs_lookup_dir , errno ) ;
2019-04-15 19:27:49 +03:00
2020-10-15 22:11:08 +03:00
}
2018-06-28 22:48:03 +03:00
2020-10-15 22:11:08 +03:00
static void _count_pvid_files ( struct volume_group * vg , int * pvs_online , int * pvs_offline )
{
struct pv_list * pvl ;
* pvs_online = 0 ;
* pvs_offline = 0 ;
dm_list_iterate_items ( pvl , & vg - > pvs ) {
if ( _online_pvid_file_exists ( ( const char * ) & pvl - > pv - > id . uuid ) )
( * pvs_online ) + + ;
else
( * pvs_offline ) + + ;
}
2018-06-28 22:48:03 +03:00
}
static int _pvscan_aa_single ( struct cmd_context * cmd , const char * vg_name ,
struct volume_group * vg , struct processing_handle * handle )
{
struct pvscan_aa_params * pp = ( struct pvscan_aa_params * ) handle - > custom_handle ;
if ( vg_is_clustered ( vg ) )
return ECMD_PROCESSED ;
if ( vg_is_exported ( vg ) )
return ECMD_PROCESSED ;
if ( vg_is_shared ( vg ) )
return ECMD_PROCESSED ;
log_debug ( " pvscan autoactivating VG %s. " , vg_name ) ;
if ( ! vgchange_activate ( cmd , vg , CHANGE_AAY ) ) {
log_error ( " %s: autoactivation failed. " , vg - > name ) ;
pp - > activate_errors + + ;
}
return ECMD_PROCESSED ;
}
2019-02-13 23:21:56 +03:00
static int _online_vg_file_create ( struct cmd_context * cmd , const char * vgname )
{
char path [ PATH_MAX ] ;
int fd ;
if ( dm_snprintf ( path , sizeof ( path ) , " %s/%s " , _vgs_online_dir , vgname ) < 0 ) {
log_error ( " Path %s/%s is too long. " , _vgs_online_dir , vgname ) ;
return 0 ;
}
log_debug ( " Create vg online: %s " , path ) ;
fd = open ( path , O_CREAT | O_EXCL | O_TRUNC | O_RDWR , S_IRUSR | S_IWUSR ) ;
if ( fd < 0 ) {
log_debug ( " Failed to create %s: %d " , path , errno ) ;
return 0 ;
}
/* We don't care about syncing, these files are not even persistent. */
if ( close ( fd ) )
log_sys_debug ( " close " , path ) ;
return 1 ;
}
2019-08-27 01:07:18 +03:00
/*
* This is a very unconventional way of doing things because
* we want to figure out which devices to read the VG from
* without first scanning devices . It ' s usually the reverse ;
* we have to scan all devs , which tells us which devs we
* need to read to get the VG .
*
* We can do it this way only by cheating and using the pvid
* online files for devs that have been scanned by prior pvscan
* instances .
*
* This is similar to the hints file , but the hints file is
* always a full picture of PV state , and is only ever created
* by scanning all devs , whereas the online files are only
* created incrementally by scanning one device at a time .
* The online files are only used for determining complete VGs
* for the purpose of autoactivation , and no attempt is made
* to keep them in sync with lvm state once autoactivation
* is complete , but much effort is made to always ensure hints
* will accurately reflect PV state .
*
* The saved VG metadata tells us which PVIDs are needed to
* complete the VG . The pv online files tell us which of those
* PVIDs are online , and the content of those pv online files
* tells us which major : minor number holds that PVID . The
* dev_cache tell us which struct device has the major : minor .
* We end up with a list of struct devices that we need to
* scan / read in order to process / activate the VG .
*/
2019-09-04 23:59:49 +03:00
static int _get_devs_from_saved_vg ( struct cmd_context * cmd , const char * vgname ,
2019-08-27 01:07:18 +03:00
struct dm_list * devs )
{
char path [ PATH_MAX ] ;
char file_vgname [ NAME_LEN ] ;
2019-09-04 23:59:49 +03:00
char uuidstr [ 64 ] __attribute__ ( ( aligned ( 8 ) ) ) ;
2019-08-27 01:07:18 +03:00
struct pv_list * pvl ;
struct device_list * devl ;
struct device * dev ;
struct volume_group * vg ;
const char * pvid ;
2019-09-04 23:59:49 +03:00
const char * name1 , * name2 ;
2019-08-27 01:07:18 +03:00
dev_t devno ;
int file_major = 0 , file_minor = 0 ;
/*
* We previously saved the metadata ( as a struct vg ) from the device
* arg that was scanned . Now use that metadata to put together a list
* of devices for this VG . ( This could alternatively be worked out by
* reading all the pvid online files , see which have a matching vg
* name , and getting the device numbers from those files . )
*/
2020-10-15 22:11:08 +03:00
if ( ! ( vg = saved_vg ) )
goto_bad ;
2019-08-27 01:07:18 +03:00
dm_list_iterate_items ( pvl , & vg - > pvs ) {
pvid = ( const char * ) & pvl - > pv - > id . uuid ;
memset ( path , 0 , sizeof ( path ) ) ;
snprintf ( path , sizeof ( path ) , " %s/%s " , _pvs_online_dir , pvid ) ;
file_major = 0 ;
file_minor = 0 ;
memset ( file_vgname , 0 , sizeof ( file_vgname ) ) ;
_online_pvid_file_read ( path , & file_major , & file_minor , file_vgname ) ;
if ( file_vgname [ 0 ] & & strcmp ( vgname , file_vgname ) ) {
log_error ( " Wrong VG found for %d:%d PVID %s: %s vs %s " ,
file_major , file_minor , pvid , vgname , file_vgname ) ;
2020-10-15 22:11:08 +03:00
goto bad ;
2019-08-27 01:07:18 +03:00
}
devno = MKDEV ( file_major , file_minor ) ;
if ( ! ( dev = dev_cache_get_by_devt ( cmd , devno , NULL , NULL ) ) ) {
log_error ( " No device found for %d:%d PVID %s " , file_major , file_minor , pvid ) ;
2020-10-15 22:11:08 +03:00
goto bad ;
2019-08-27 01:07:18 +03:00
}
2019-09-04 23:59:49 +03:00
name1 = dev_name ( dev ) ;
name2 = pvl - > pv - > device_hint ;
if ( strcmp ( name1 , name2 ) ) {
if ( ! id_write_format ( ( const struct id * ) pvid , uuidstr , sizeof ( uuidstr ) ) )
uuidstr [ 0 ] = ' \0 ' ;
log_print ( " PVID %s read from %s last written to %s. " , uuidstr , name1 , name2 ) ;
2020-10-15 22:11:08 +03:00
goto bad ;
2019-09-04 23:59:49 +03:00
}
2019-08-27 01:07:18 +03:00
if ( ! ( devl = zalloc ( sizeof ( * devl ) ) ) )
2020-10-15 22:11:08 +03:00
goto_bad ;
2019-08-27 01:07:18 +03:00
devl - > dev = dev ;
dm_list_add ( devs , & devl - > list ) ;
log_debug ( " pvscan using %s for PVID %s in VG %s " , dev_name ( dev ) , pvid , vgname ) ;
}
return 1 ;
2020-10-15 22:11:08 +03:00
bad :
if ( saved_vg ) {
release_vg ( saved_vg ) ;
saved_vg = NULL ;
}
return 0 ;
2019-08-27 01:07:18 +03:00
}
/*
* When there ' s a single VG to activate ( the common case ) ,
* optimize things by cutting out the process_each_vg ( ) .
*
* The main point of this optimization is to avoid extra
* device scanning in the common case where we ' re
* activating a completed VG after scanning a single PV .
* The scanning overhead of hundreds of concurrent
* activations from hundreds of PVs appearing together
* can be overwhelming , and scanning needs to be reduced
* as much as possible .
*
* The common process_each_vg will generally do :
* label scan all devs
* vg_read
* lock vg
* label rescan of only vg devs ( often skipped )
* read metadata
* set pv devices ( find devs for each PVID )
* do command ( vgchange_activate )
* unlock vg
*
* In this optimized version with process_each we do :
* lock vg
* label scan of only vg devs
* vg_read
* read metadata
* set pv devices ( find devs for each PVID )
* do command ( vgchange_activate )
* unlock vg
*
* The optimized version avoids scanning all devs , which
* is important when there are many devs .
*/
2019-09-04 23:59:49 +03:00
static int _pvscan_aa_quick ( struct cmd_context * cmd , struct pvscan_aa_params * pp , const char * vgname ,
2020-10-15 22:11:08 +03:00
int * no_quick )
2019-08-27 01:07:18 +03:00
{
struct dm_list devs ; /* device_list */
struct volume_group * vg ;
struct pv_list * pvl ;
const char * vgid ;
uint32_t lockd_state = 0 ;
uint32_t error_flags = 0 ;
int ret = ECMD_PROCESSED ;
dm_list_init ( & devs ) ;
/*
* Get list of devices for this VG so we can label scan them .
* The saved VG struct gives the list of PVIDs in the VG .
* The pvs_online / PVID files gives us the devnums for PVIDs .
* The dev_cache gives us struct devices from the devnums .
*/
2020-10-15 22:11:08 +03:00
if ( ! _get_devs_from_saved_vg ( cmd , vgname , & devs ) ) {
2019-09-04 23:59:49 +03:00
log_print ( " pvscan[%d] VG %s not using quick activation. " , getpid ( ) , vgname ) ;
* no_quick = 1 ;
2019-08-27 01:07:18 +03:00
return ECMD_FAILED ;
}
/*
* Lock the VG before scanning so we don ' t need to
* rescan in _vg_read . ( The lock_vol and the
* label rescan are then disabled in vg_read . )
*/
if ( ! lock_vol ( cmd , vgname , LCK_VG_WRITE , NULL ) ) {
log_error ( " pvscan activation for VG %s failed to lock VG. " , vgname ) ;
return ECMD_FAILED ;
}
/*
* Drop lvmcache info about the PV / VG that was saved
* when originally identifying the PV .
*/
lvmcache_destroy ( cmd , 1 , 1 ) ;
label_scan_devs ( cmd , NULL , & devs ) ;
if ( ! ( vgid = lvmcache_vgid_from_vgname ( cmd , vgname ) ) ) {
log_error ( " pvscan activation for VG %s failed to find vgid. " , vgname ) ;
return ECMD_FAILED ;
}
/*
* can_use_one_scan and READ_WITHOUT_LOCK are both important key
* changes made to vg_read that are possible because the VG is locked
* above ( lock_vol ) .
*/
cmd - > can_use_one_scan = 1 ;
vg = vg_read ( cmd , vgname , vgid , READ_WITHOUT_LOCK | READ_FOR_ACTIVATE , lockd_state , & error_flags , NULL ) ;
if ( ! vg ) {
/*
* The common cases would already have been caught during the
* original device arg scan . There will be very few and unusual
* cases that would be caught here .
*/
log_error ( " pvscan activation for VG %s cannot read (%x). " , vgname , error_flags ) ;
return ECMD_FAILED ;
}
/*
* These cases would already have been caught during the original
* device arg scan .
*/
if ( vg_is_clustered ( vg ) )
goto_out ;
if ( vg_is_exported ( vg ) )
goto_out ;
if ( vg_is_shared ( vg ) )
goto_out ;
/*
* Verify that the devices we scanned above for the VG are in fact the
* devices used by the VG we read .
*/
dm_list_iterate_items ( pvl , & vg - > pvs ) {
if ( dev_in_device_list ( pvl - > pv - > dev , & devs ) )
continue ;
log_error ( " pvscan activation for VG %s found different devices. " , vgname ) ;
ret = ECMD_FAILED ;
goto out ;
}
log_debug ( " pvscan autoactivating VG %s. " , vgname ) ;
if ( ! vgchange_activate ( cmd , vg , CHANGE_AAY ) ) {
log_error ( " %s: autoactivation failed. " , vg - > name ) ;
pp - > activate_errors + + ;
}
out :
unlock_vg ( cmd , vg , vgname ) ;
release_vg ( vg ) ;
return ret ;
}
2018-06-28 22:48:03 +03:00
static int _pvscan_aa ( struct cmd_context * cmd , struct pvscan_aa_params * pp ,
2020-10-15 22:11:08 +03:00
int do_all , struct dm_list * vgnames )
2018-06-28 22:48:03 +03:00
{
struct processing_handle * handle = NULL ;
2019-02-13 23:21:56 +03:00
struct dm_str_list * sl , * sl2 ;
2019-09-04 23:59:49 +03:00
int no_quick = 0 ;
2021-01-20 00:38:17 +03:00
int ret = ECMD_FAILED ;
2018-06-28 22:48:03 +03:00
if ( ! ( handle = init_processing_handle ( cmd , NULL ) ) ) {
log_error ( " Failed to initialize processing handle. " ) ;
2020-10-15 22:11:08 +03:00
goto out ;
2018-06-28 22:48:03 +03:00
}
handle - > custom_handle = pp ;
2019-02-13 23:21:56 +03:00
/*
* For each complete vg that can be autoactivated , see if this
* particular pvscan command should activate the vg . There can be
* multiple concurrent pvscans for the same completed vg ( when all the
* PVs for the VG appear at once ) , and we want only one of the pvscans
* to run the activation . The first to create the file will do it .
*/
dm_list_iterate_items_safe ( sl , sl2 , vgnames ) {
if ( ! _online_vg_file_create ( cmd , sl - > str ) ) {
log_print ( " pvscan[%d] VG %s skip autoactivation. " , getpid ( ) , sl - > str ) ;
str_list_del ( vgnames , sl - > str ) ;
continue ;
}
log_print ( " pvscan[%d] VG %s run autoactivation. " , getpid ( ) , sl - > str ) ;
}
if ( dm_list_empty ( vgnames ) ) {
destroy_processing_handle ( cmd , handle ) ;
2020-10-15 22:11:08 +03:00
ret = ECMD_PROCESSED ;
goto out ;
2019-02-13 23:21:56 +03:00
}
2020-10-15 22:11:08 +03:00
/*
* When saved_vg is set there should only be one vgname .
* If the optimized " quick " function finds something amiss
* it will set no_quick and return so that the slow version
* can be used .
*/
if ( ! do_all & & saved_vg & & ( dm_list_size ( vgnames ) = = 1 ) ) {
log_debug ( " autoactivate quick " ) ;
ret = _pvscan_aa_quick ( cmd , pp , saved_vg - > name , & no_quick ) ;
2019-08-27 01:07:18 +03:00
}
2018-06-28 22:48:03 +03:00
2020-10-15 22:11:08 +03:00
/*
* do_all indicates ' pvscan - - cache ' in which case
* pvscan_cache_all ( ) has already done lvmcache_label_scan
* which does not need to be repeated by process_each_vg .
*/
if ( ! saved_vg | | ( dm_list_size ( vgnames ) > 1 ) | | no_quick ) {
uint32_t read_flags = READ_FOR_ACTIVATE ;
if ( do_all )
read_flags | = PROCESS_SKIP_SCAN ;
log_debug ( " autoactivate slow " ) ;
ret = process_each_vg ( cmd , 0 , NULL , NULL , vgnames , read_flags , 0 , handle , _pvscan_aa_single ) ;
}
2019-09-04 23:59:49 +03:00
2018-06-28 22:48:03 +03:00
destroy_processing_handle ( cmd , handle ) ;
2020-10-15 22:11:08 +03:00
out :
if ( saved_vg ) {
release_vg ( saved_vg ) ;
saved_vg = NULL ;
}
2018-06-28 22:48:03 +03:00
return ret ;
}
2020-10-15 22:11:08 +03:00
struct pvscan_arg {
struct dm_list list ;
const char * devname ;
dev_t devno ;
2018-06-28 22:48:03 +03:00
struct device * dev ;
2020-10-15 22:11:08 +03:00
} ;
static int _get_args ( struct cmd_context * cmd , int argc , char * * argv ,
struct dm_list * pvscan_args )
{
2018-06-28 22:48:03 +03:00
struct arg_value_group_list * current_group ;
2020-10-15 22:11:08 +03:00
struct pvscan_arg * arg ;
const char * arg_name ;
2021-03-09 18:05:46 +03:00
int major = - 1 , minor = - 1 ;
2018-06-28 22:48:03 +03:00
2020-10-15 22:11:08 +03:00
/* Process position args, which can be /dev/name or major:minor */
2019-02-13 22:35:24 +03:00
2020-10-15 22:11:08 +03:00
while ( argc ) {
argc - - ;
arg_name = * argv + + ;
2019-08-16 22:35:17 +03:00
2020-10-15 22:11:08 +03:00
if ( arg_name [ 0 ] = = ' / ' ) {
if ( ! ( arg = dm_pool_zalloc ( cmd - > mem , sizeof ( * arg ) ) ) )
return_0 ;
arg - > devname = arg_name ;
dm_list_add ( pvscan_args , & arg - > list ) ;
continue ;
}
2018-06-28 22:48:03 +03:00
2020-10-15 22:11:08 +03:00
if ( sscanf ( arg_name , " %d:%d " , & major , & minor ) ! = 2 ) {
log_warn ( " WARNING: Failed to parse major:minor from %s, skipping. " , arg_name ) ;
continue ;
}
2018-06-28 22:48:03 +03:00
2020-10-15 22:11:08 +03:00
if ( ! ( arg = dm_pool_zalloc ( cmd - > mem , sizeof ( * arg ) ) ) )
return_0 ;
arg - > devno = MKDEV ( major , minor ) ;
dm_list_add ( pvscan_args , & arg - > list ) ;
2018-06-28 22:48:03 +03:00
}
2020-10-15 22:11:08 +03:00
/* Process any grouped --major --minor args */
dm_list_iterate_items ( current_group , & cmd - > arg_value_groups ) {
major = grouped_arg_int_value ( current_group - > arg_values , major_ARG , major ) ;
minor = grouped_arg_int_value ( current_group - > arg_values , minor_ARG , minor ) ;
if ( major < 0 | | minor < 0 )
continue ;
if ( ! ( arg = dm_pool_zalloc ( cmd - > mem , sizeof ( * arg ) ) ) )
return_0 ;
arg - > devno = MKDEV ( major , minor ) ;
dm_list_add ( pvscan_args , & arg - > list ) ;
2019-02-27 01:35:16 +03:00
}
2020-10-15 22:11:08 +03:00
return 1 ;
}
2019-02-26 19:30:11 +03:00
2020-10-15 22:11:08 +03:00
static int _get_args_devs ( struct cmd_context * cmd , struct dm_list * pvscan_args ,
struct dm_list * pvscan_devs )
{
struct pvscan_arg * arg ;
struct device_list * devl ;
2018-06-28 22:48:03 +03:00
2020-10-15 22:11:08 +03:00
/* pass NULL filter when getting devs from dev-cache, filtering is done separately */
/* in common usage, no dev will be found for a devno */
dm_list_iterate_items ( arg , pvscan_args ) {
if ( arg - > devname )
arg - > dev = dev_cache_get ( cmd , arg - > devname , NULL ) ;
else if ( arg - > devno )
arg - > dev = dev_cache_get_by_devt ( cmd , arg - > devno , NULL , NULL ) ;
else
return_0 ;
2019-07-09 22:48:31 +03:00
}
2020-10-15 22:11:08 +03:00
dm_list_iterate_items ( arg , pvscan_args ) {
if ( ! arg - > dev )
continue ;
2019-02-13 22:35:24 +03:00
2020-10-15 22:11:08 +03:00
if ( ! ( devl = dm_pool_zalloc ( cmd - > mem , sizeof ( * devl ) ) ) )
return_0 ;
devl - > dev = arg - > dev ;
dm_list_add ( pvscan_devs , & devl - > list ) ;
}
2018-06-28 22:48:03 +03:00
2020-10-15 22:11:08 +03:00
return 1 ;
}
static int _online_devs ( struct cmd_context * cmd , int do_all , struct dm_list * pvscan_devs ,
int * pv_count , struct dm_list * complete_vgnames )
{
struct device_list * devl , * devl2 ;
struct device * dev ;
struct lvmcache_info * info ;
const struct format_type * fmt ;
struct format_instance_ctx fic = { . type = 0 } ;
struct format_instance * fid ;
struct metadata_area * mda1 , * mda2 ;
struct volume_group * vg ;
2021-02-06 01:16:03 +03:00
struct physical_volume * pv ;
2020-10-15 22:11:08 +03:00
const char * vgname ;
uint32_t ext_version , ext_flags ;
2021-02-06 01:16:03 +03:00
uint64_t devsize ;
2020-10-15 22:11:08 +03:00
int do_activate = arg_is_set ( cmd , activate_ARG ) ;
2021-02-06 01:16:03 +03:00
int do_full_check ;
2020-10-15 22:11:08 +03:00
int pvs_online ;
int pvs_offline ;
int pvs_unknown ;
int vg_complete ;
int ret = 1 ;
dm_list_iterate_items_safe ( devl , devl2 , pvscan_devs ) {
dev = devl - > dev ;
log_debug ( " online_devs %s %s " , dev_name ( dev ) , dev - > pvid ) ;
/*
* This should already have been done by the filter , but make
* another check directly with udev in case the filter was not
* using udev and the native version didn ' t catch it .
*/
if ( udev_dev_is_mpath_component ( dev ) ) {
log_print ( " pvscan[%d] ignore multipath component %s. " , getpid ( ) , dev_name ( dev ) ) ;
continue ;
2018-06-28 22:48:03 +03:00
}
2020-10-15 22:11:08 +03:00
if ( ! ( info = lvmcache_info_from_pvid ( dev - > pvid , dev , 0 ) ) ) {
if ( ! do_all )
log_print ( " pvscan[%d] ignore %s with no lvm info. " , getpid ( ) , dev_name ( dev ) ) ;
continue ;
}
2018-06-28 22:48:03 +03:00
2020-10-15 22:11:08 +03:00
ext_version = lvmcache_ext_version ( info ) ;
ext_flags = lvmcache_ext_flags ( info ) ;
if ( ( ext_version > = 2 ) & & ! ( ext_flags & PV_EXT_USED ) ) {
log_print ( " pvscan[%d] PV %s not used. " , getpid ( ) , dev_name ( dev ) ) ;
( * pv_count ) + + ;
2018-06-28 22:48:03 +03:00
continue ;
2020-10-15 22:11:08 +03:00
}
2018-06-28 22:48:03 +03:00
2020-10-15 22:11:08 +03:00
fmt = lvmcache_fmt ( info ) ;
fid = fmt - > ops - > create_instance ( fmt , & fic ) ;
vg = NULL ;
2018-06-28 22:48:03 +03:00
2020-10-15 22:11:08 +03:00
mda1 = lvmcache_get_dev_mda ( dev , 1 ) ;
mda2 = lvmcache_get_dev_mda ( dev , 2 ) ;
2019-04-05 22:03:38 +03:00
2020-10-15 22:11:08 +03:00
if ( mda1 & & ! mda_is_ignored ( mda1 ) )
vg = mda1 - > ops - > vg_read ( cmd , fid , " " , mda1 , NULL , NULL ) ;
if ( ! vg & & mda2 & & ! mda_is_ignored ( mda2 ) )
vg = mda2 - > ops - > vg_read ( cmd , fid , " " , mda2 , NULL , NULL ) ;
if ( ! vg ) {
log_print ( " pvscan[%d] PV %s has no VG metadata. " , getpid ( ) , dev_name ( dev ) ) ;
fmt - > ops - > destroy_instance ( fid ) ;
goto online ;
}
2021-02-06 01:16:03 +03:00
set_pv_devices ( fid , vg ) ;
2020-10-15 22:11:08 +03:00
2021-02-06 01:16:03 +03:00
if ( ! ( pv = find_pv ( vg , dev ) ) ) {
log_print ( " pvscan[%d] PV %s not found in VG %s. " , getpid ( ) , dev_name ( dev ) , vg - > name ) ;
release_vg ( vg ) ;
continue ;
}
2020-10-15 22:11:08 +03:00
2021-02-06 01:16:03 +03:00
devsize = dev - > size ;
if ( ! devsize )
dev_get_size ( dev , & devsize ) ;
do_full_check = 0 ;
/* If use_full_md_check is set then this has already been done by filter. */
if ( ! cmd - > use_full_md_check ) {
if ( devsize & & ( pv - > size ! = devsize ) )
do_full_check = 1 ;
if ( pv - > device_hint & & ! strncmp ( pv - > device_hint , " /dev/md " , 7 ) )
do_full_check = 1 ;
}
if ( do_full_check & & dev_is_md_component ( dev , NULL , 1 ) ) {
log_print ( " pvscan[%d] ignore md component %s. " , getpid ( ) , dev_name ( dev ) ) ;
2020-10-15 22:11:08 +03:00
release_vg ( vg ) ;
continue ;
}
if ( vg_is_shared ( vg ) ) {
log_print ( " pvscan[%d] PV %s ignore shared VG. " , getpid ( ) , dev_name ( dev ) ) ;
release_vg ( vg ) ;
continue ;
}
if ( vg - > system_id & & vg - > system_id [ 0 ] & &
cmd - > system_id & & cmd - > system_id [ 0 ] & &
vg_is_foreign ( vg ) ) {
log_verbose ( " Ignore PV %s with VG system id %s with our system id %s " ,
dev_name ( dev ) , vg - > system_id , cmd - > system_id ) ;
log_print ( " pvscan[%d] PV %s ignore foreign VG. " , getpid ( ) , dev_name ( dev ) ) ;
release_vg ( vg ) ;
continue ;
}
/*
* online file phase
* create pvs_online / < pvid >
* check if vg is complete : stat pvs_online / < pvid > for each vg - > pvs
* if vg is complete , save vg name in list for activation phase
* if vg not complete , create pvs_lookup / < vgname > listing all pvids from vg - > pvs
* ( only if pvs_lookup / < vgname > does not yet exist )
* if no vg metadata , read pvs_lookup files for pvid , use that list to check if complete
*/
online :
( * pv_count ) + + ;
/*
* Create file named for pvid to record this PV is online .
*/
if ( ! _online_pvid_file_create ( dev , vg ? vg - > name : NULL ) ) {
log_error ( " pvscan[%d] PV %s failed to create online file. " , getpid ( ) , dev_name ( dev ) ) ;
release_vg ( vg ) ;
ret = 0 ;
continue ;
}
/*
* When not activating we don ' t need to know about vg completeness .
*/
if ( ! do_activate ) {
log_print ( " pvscan[%d] PV %s online. " , getpid ( ) , dev_name ( dev ) ) ;
release_vg ( vg ) ;
continue ;
}
/*
* Check if all the PVs for this VG are online . If the arrival
* of this dev completes the VG , then save the vgname in
* complete_vgnames so it will be activated .
*/
pvs_online = 0 ;
pvs_offline = 0 ;
pvs_unknown = 0 ;
vg_complete = 0 ;
if ( vg ) {
/*
* Use the VG metadata from this PV for a list of all
* PVIDs . Write a lookup file of PVIDs in case another
* pvscan needs it . After writing lookup file , recheck
* pvid files to resolve a possible race with another
* pvscan reading the lookup file that missed it .
*/
log_debug ( " checking all pvid files from vg %s " , vg - > name ) ;
_count_pvid_files ( vg , & pvs_online , & pvs_offline ) ;
if ( pvs_offline & & _write_lookup_file ( cmd , vg ) ) {
log_debug ( " rechecking all pvid files from vg %s " , vg - > name ) ;
_count_pvid_files ( vg , & pvs_online , & pvs_offline ) ;
if ( ! pvs_offline )
log_print ( " pvscan[%d] VG %s complete after recheck. " , getpid ( ) , vg - > name ) ;
}
vgname = vg - > name ;
2018-06-28 22:48:03 +03:00
} else {
2020-10-15 22:11:08 +03:00
/*
* No VG metadata on this PV , so try to use a lookup
* file written by a prior pvscan for a list of all
* PVIDs . A lookup file may not exist for this PV if
* it ' s the first to appear from the VG .
*/
log_debug ( " checking all pvid files from lookup file " ) ;
if ( ! _count_pvid_files_from_lookup_file ( cmd , dev , & pvs_online , & pvs_offline , & vgname ) )
pvs_unknown = 1 ;
}
2018-06-28 22:48:03 +03:00
2020-10-15 22:11:08 +03:00
if ( pvs_unknown ) {
2021-03-10 16:16:42 +03:00
log_print ( " pvscan[%d] PV %s online, VG unknown. " ,
getpid ( ) , dev_name ( dev ) ) ;
2020-10-15 22:11:08 +03:00
} else if ( pvs_offline ) {
log_print ( " pvscan[%d] PV %s online, VG %s incomplete (need %d). " ,
getpid ( ) , dev_name ( dev ) , vgname , pvs_offline ) ;
} else {
log_print ( " pvscan[%d] PV %s online, VG %s is complete. " , getpid ( ) , dev_name ( dev ) , vgname ) ;
if ( ! str_list_add ( cmd - > mem , complete_vgnames , dm_pool_strdup ( cmd - > mem , vgname ) ) )
stack ;
vg_complete = 1 ;
2018-06-28 22:48:03 +03:00
}
2020-10-15 22:11:08 +03:00
if ( ! saved_vg & & vg & & vg_complete & & ! do_all & & ( dm_list_size ( pvscan_devs ) = = 1 ) )
saved_vg = vg ;
else
release_vg ( vg ) ;
2019-02-27 01:35:16 +03:00
}
2018-06-28 22:48:03 +03:00
2020-10-15 22:11:08 +03:00
return ret ;
}
static int _pvscan_cache_all ( struct cmd_context * cmd , int argc , char * * argv ,
struct dm_list * complete_vgnames )
{
struct dm_list pvscan_devs ;
struct dev_iter * iter ;
struct device_list * devl ;
struct device * dev ;
int pv_count = 0 ;
dm_list_init ( & pvscan_devs ) ;
_online_files_remove ( _pvs_online_dir ) ;
_online_files_remove ( _vgs_online_dir ) ;
_online_files_remove ( _pvs_lookup_dir ) ;
device usage based on devices file
The LVM devices file lists devices that lvm can use. The default
file is /etc/lvm/devices/system.devices, and the lvmdevices(8)
command is used to add or remove device entries. If the file
does not exist, or if lvm.conf includes use_devicesfile=0, then
lvm will not use a devices file. When the devices file is in use,
the regex filter is not used, and the filter settings in lvm.conf
or on the command line are ignored.
LVM records devices in the devices file using hardware-specific
IDs, such as the WWID, and attempts to use subsystem-specific
IDs for virtual device types. These device IDs are also written
in the VG metadata. When no hardware or virtual ID is available,
lvm falls back using the unstable device name as the device ID.
When devnames are used, lvm performs extra scanning to find
devices if their devname changes, e.g. after reboot.
When proper device IDs are used, an lvm command will not look
at devices outside the devices file, but when devnames are used
as a fallback, lvm will scan devices outside the devices file
to locate PVs on renamed devices. A config setting
search_for_devnames can be used to control the scanning for
renamed devname entries.
Related to the devices file, the new command option
--devices <devnames> allows a list of devices to be specified for
the command to use, overriding the devices file. The listed
devices act as a sort of devices file in terms of limiting which
devices lvm will see and use. Devices that are not listed will
appear to be missing to the lvm command.
Multiple devices files can be kept in /etc/lvm/devices, which
allows lvm to be used with different sets of devices, e.g.
system devices do not need to be exposed to a specific application,
and the application can use lvm on its own set of devices that are
not exposed to the system. The option --devicesfile <filename> is
used to select the devices file to use with the command. Without
the option set, the default system devices file is used.
Setting --devicesfile "" causes lvm to not use a devices file.
An existing, empty devices file means lvm will see no devices.
The new command vgimportdevices adds PVs from a VG to the devices
file and updates the VG metadata to include the device IDs.
vgimportdevices -a will import all VGs into the system devices file.
LVM commands run by dmeventd not use a devices file by default,
and will look at all devices on the system. A devices file can
be created for dmeventd (/etc/lvm/devices/dmeventd.devices) If
this file exists, lvm commands run by dmeventd will use it.
Internal implementaion:
- device_ids_read - read the devices file
. add struct dev_use (du) to cmd->use_devices for each devices file entry
- dev_cache_scan - get /dev entries
. add struct device (dev) to dev_cache for each device on the system
- device_ids_match - match devices file entries to /dev entries
. match each du on cmd->use_devices to a dev in dev_cache, using device ID
. on match, set du->dev, dev->id, dev->flags MATCHED_USE_ID
- label_scan - read lvm headers and metadata from devices
. filters are applied, those that do not need data from the device
. filter-deviceid skips devs without MATCHED_USE_ID, i.e.
skips /dev entries that are not listed in the devices file
. read lvm label from dev
. filters are applied, those that use data from the device
. read lvm metadata from dev
. add info/vginfo structs for PVs/VGs (info is "lvmcache")
- device_ids_find_renamed_devs - handle devices with unstable devname ID
where devname changed
. this step only needed when devs do not have proper device IDs,
and their dev names change, e.g. after reboot sdb becomes sdc.
. detect incorrect match because PVID in the devices file entry
does not match the PVID found when the device was read above
. undo incorrect match between du and dev above
. search system devices for new location of PVID
. update devices file with new devnames for PVIDs on renamed devices
. label_scan the renamed devs
- continue with command processing
2020-06-23 21:25:41 +03:00
unlink_searched_devnames ( cmd ) ;
2019-02-27 01:35:16 +03:00
/*
2020-10-15 22:11:08 +03:00
* pvscan - - cache removes existing hints and recreates new ones .
* We begin by clearing hints at the start of the command .
* The pvscan_recreate_hints flag is used to enable the
* special case hint recreation in label_scan .
2019-02-27 01:35:16 +03:00
*/
2020-10-15 22:11:08 +03:00
cmd - > pvscan_recreate_hints = 1 ;
pvscan_recreate_hints_begin ( cmd ) ;
2019-02-27 01:35:16 +03:00
2020-10-15 22:11:08 +03:00
log_debug ( " pvscan_cache_all: label scan all " ) ;
2019-02-27 01:35:16 +03:00
2020-10-15 22:11:08 +03:00
/*
* use lvmcache_label_scan ( ) instead of just label_scan_devs ( )
* because label_scan ( ) has the ability to update hints ,
* which we want ' pvscan - - cache ' to do , and that uses
* info from lvmcache , e . g . duplicate pv info .
*/
lvmcache_label_scan ( cmd ) ;
cmd - > pvscan_recreate_hints = 0 ;
cmd - > use_hints = 0 ;
log_debug ( " pvscan_cache_all: create list of devices " ) ;
/*
* The use of filter here will just reuse the existing
* ( persistent ) filter info label_scan has already set up .
*/
if ( ! ( iter = dev_iter_create ( cmd - > filter , 1 ) ) )
return_0 ;
2019-02-27 01:35:16 +03:00
2020-10-15 22:11:08 +03:00
while ( ( dev = dev_iter_get ( cmd , iter ) ) ) {
if ( ! ( devl = dm_pool_zalloc ( cmd - > mem , sizeof ( * devl ) ) ) ) {
dev_iter_destroy ( iter ) ;
return_0 ;
}
devl - > dev = dev ;
dm_list_add ( & pvscan_devs , & devl - > list ) ;
2018-06-28 22:48:03 +03:00
}
2020-10-15 22:11:08 +03:00
dev_iter_destroy ( iter ) ;
2018-06-28 22:48:03 +03:00
2020-10-15 22:11:08 +03:00
_online_devs ( cmd , 1 , & pvscan_devs , & pv_count , complete_vgnames ) ;
return 1 ;
}
static int _pvscan_cache_args ( struct cmd_context * cmd , int argc , char * * argv ,
struct dm_list * complete_vgnames )
{
struct dm_list pvscan_args ; /* struct pvscan_arg */
struct dm_list pvscan_devs ; /* struct device_list */
struct pvscan_arg * arg ;
struct device_list * devl , * devl2 ;
device usage based on devices file
The LVM devices file lists devices that lvm can use. The default
file is /etc/lvm/devices/system.devices, and the lvmdevices(8)
command is used to add or remove device entries. If the file
does not exist, or if lvm.conf includes use_devicesfile=0, then
lvm will not use a devices file. When the devices file is in use,
the regex filter is not used, and the filter settings in lvm.conf
or on the command line are ignored.
LVM records devices in the devices file using hardware-specific
IDs, such as the WWID, and attempts to use subsystem-specific
IDs for virtual device types. These device IDs are also written
in the VG metadata. When no hardware or virtual ID is available,
lvm falls back using the unstable device name as the device ID.
When devnames are used, lvm performs extra scanning to find
devices if their devname changes, e.g. after reboot.
When proper device IDs are used, an lvm command will not look
at devices outside the devices file, but when devnames are used
as a fallback, lvm will scan devices outside the devices file
to locate PVs on renamed devices. A config setting
search_for_devnames can be used to control the scanning for
renamed devname entries.
Related to the devices file, the new command option
--devices <devnames> allows a list of devices to be specified for
the command to use, overriding the devices file. The listed
devices act as a sort of devices file in terms of limiting which
devices lvm will see and use. Devices that are not listed will
appear to be missing to the lvm command.
Multiple devices files can be kept in /etc/lvm/devices, which
allows lvm to be used with different sets of devices, e.g.
system devices do not need to be exposed to a specific application,
and the application can use lvm on its own set of devices that are
not exposed to the system. The option --devicesfile <filename> is
used to select the devices file to use with the command. Without
the option set, the default system devices file is used.
Setting --devicesfile "" causes lvm to not use a devices file.
An existing, empty devices file means lvm will see no devices.
The new command vgimportdevices adds PVs from a VG to the devices
file and updates the VG metadata to include the device IDs.
vgimportdevices -a will import all VGs into the system devices file.
LVM commands run by dmeventd not use a devices file by default,
and will look at all devices on the system. A devices file can
be created for dmeventd (/etc/lvm/devices/dmeventd.devices) If
this file exists, lvm commands run by dmeventd will use it.
Internal implementaion:
- device_ids_read - read the devices file
. add struct dev_use (du) to cmd->use_devices for each devices file entry
- dev_cache_scan - get /dev entries
. add struct device (dev) to dev_cache for each device on the system
- device_ids_match - match devices file entries to /dev entries
. match each du on cmd->use_devices to a dev in dev_cache, using device ID
. on match, set du->dev, dev->id, dev->flags MATCHED_USE_ID
- label_scan - read lvm headers and metadata from devices
. filters are applied, those that do not need data from the device
. filter-deviceid skips devs without MATCHED_USE_ID, i.e.
skips /dev entries that are not listed in the devices file
. read lvm label from dev
. filters are applied, those that use data from the device
. read lvm metadata from dev
. add info/vginfo structs for PVs/VGs (info is "lvmcache")
- device_ids_find_renamed_devs - handle devices with unstable devname ID
where devname changed
. this step only needed when devs do not have proper device IDs,
and their dev names change, e.g. after reboot sdb becomes sdc.
. detect incorrect match because PVID in the devices file entry
does not match the PVID found when the device was read above
. undo incorrect match between du and dev above
. search system devices for new location of PVID
. update devices file with new devnames for PVIDs on renamed devices
. label_scan the renamed devs
- continue with command processing
2020-06-23 21:25:41 +03:00
int relax_deviceid_filter = 0 ;
2020-10-15 22:11:08 +03:00
int pv_count = 0 ;
int ret ;
dm_list_init ( & pvscan_args ) ;
dm_list_init ( & pvscan_devs ) ;
cmd - > pvscan_cache_single = 1 ;
device usage based on devices file
The LVM devices file lists devices that lvm can use. The default
file is /etc/lvm/devices/system.devices, and the lvmdevices(8)
command is used to add or remove device entries. If the file
does not exist, or if lvm.conf includes use_devicesfile=0, then
lvm will not use a devices file. When the devices file is in use,
the regex filter is not used, and the filter settings in lvm.conf
or on the command line are ignored.
LVM records devices in the devices file using hardware-specific
IDs, such as the WWID, and attempts to use subsystem-specific
IDs for virtual device types. These device IDs are also written
in the VG metadata. When no hardware or virtual ID is available,
lvm falls back using the unstable device name as the device ID.
When devnames are used, lvm performs extra scanning to find
devices if their devname changes, e.g. after reboot.
When proper device IDs are used, an lvm command will not look
at devices outside the devices file, but when devnames are used
as a fallback, lvm will scan devices outside the devices file
to locate PVs on renamed devices. A config setting
search_for_devnames can be used to control the scanning for
renamed devname entries.
Related to the devices file, the new command option
--devices <devnames> allows a list of devices to be specified for
the command to use, overriding the devices file. The listed
devices act as a sort of devices file in terms of limiting which
devices lvm will see and use. Devices that are not listed will
appear to be missing to the lvm command.
Multiple devices files can be kept in /etc/lvm/devices, which
allows lvm to be used with different sets of devices, e.g.
system devices do not need to be exposed to a specific application,
and the application can use lvm on its own set of devices that are
not exposed to the system. The option --devicesfile <filename> is
used to select the devices file to use with the command. Without
the option set, the default system devices file is used.
Setting --devicesfile "" causes lvm to not use a devices file.
An existing, empty devices file means lvm will see no devices.
The new command vgimportdevices adds PVs from a VG to the devices
file and updates the VG metadata to include the device IDs.
vgimportdevices -a will import all VGs into the system devices file.
LVM commands run by dmeventd not use a devices file by default,
and will look at all devices on the system. A devices file can
be created for dmeventd (/etc/lvm/devices/dmeventd.devices) If
this file exists, lvm commands run by dmeventd will use it.
Internal implementaion:
- device_ids_read - read the devices file
. add struct dev_use (du) to cmd->use_devices for each devices file entry
- dev_cache_scan - get /dev entries
. add struct device (dev) to dev_cache for each device on the system
- device_ids_match - match devices file entries to /dev entries
. match each du on cmd->use_devices to a dev in dev_cache, using device ID
. on match, set du->dev, dev->id, dev->flags MATCHED_USE_ID
- label_scan - read lvm headers and metadata from devices
. filters are applied, those that do not need data from the device
. filter-deviceid skips devs without MATCHED_USE_ID, i.e.
skips /dev entries that are not listed in the devices file
. read lvm label from dev
. filters are applied, those that use data from the device
. read lvm metadata from dev
. add info/vginfo structs for PVs/VGs (info is "lvmcache")
- device_ids_find_renamed_devs - handle devices with unstable devname ID
where devname changed
. this step only needed when devs do not have proper device IDs,
and their dev names change, e.g. after reboot sdb becomes sdc.
. detect incorrect match because PVID in the devices file entry
does not match the PVID found when the device was read above
. undo incorrect match between du and dev above
. search system devices for new location of PVID
. update devices file with new devnames for PVIDs on renamed devices
. label_scan the renamed devs
- continue with command processing
2020-06-23 21:25:41 +03:00
if ( ! setup_devices ( cmd ) ) {
log_error ( " Failed to set up devices. " ) ;
2021-03-09 13:42:29 +03:00
return 0 ;
device usage based on devices file
The LVM devices file lists devices that lvm can use. The default
file is /etc/lvm/devices/system.devices, and the lvmdevices(8)
command is used to add or remove device entries. If the file
does not exist, or if lvm.conf includes use_devicesfile=0, then
lvm will not use a devices file. When the devices file is in use,
the regex filter is not used, and the filter settings in lvm.conf
or on the command line are ignored.
LVM records devices in the devices file using hardware-specific
IDs, such as the WWID, and attempts to use subsystem-specific
IDs for virtual device types. These device IDs are also written
in the VG metadata. When no hardware or virtual ID is available,
lvm falls back using the unstable device name as the device ID.
When devnames are used, lvm performs extra scanning to find
devices if their devname changes, e.g. after reboot.
When proper device IDs are used, an lvm command will not look
at devices outside the devices file, but when devnames are used
as a fallback, lvm will scan devices outside the devices file
to locate PVs on renamed devices. A config setting
search_for_devnames can be used to control the scanning for
renamed devname entries.
Related to the devices file, the new command option
--devices <devnames> allows a list of devices to be specified for
the command to use, overriding the devices file. The listed
devices act as a sort of devices file in terms of limiting which
devices lvm will see and use. Devices that are not listed will
appear to be missing to the lvm command.
Multiple devices files can be kept in /etc/lvm/devices, which
allows lvm to be used with different sets of devices, e.g.
system devices do not need to be exposed to a specific application,
and the application can use lvm on its own set of devices that are
not exposed to the system. The option --devicesfile <filename> is
used to select the devices file to use with the command. Without
the option set, the default system devices file is used.
Setting --devicesfile "" causes lvm to not use a devices file.
An existing, empty devices file means lvm will see no devices.
The new command vgimportdevices adds PVs from a VG to the devices
file and updates the VG metadata to include the device IDs.
vgimportdevices -a will import all VGs into the system devices file.
LVM commands run by dmeventd not use a devices file by default,
and will look at all devices on the system. A devices file can
be created for dmeventd (/etc/lvm/devices/dmeventd.devices) If
this file exists, lvm commands run by dmeventd will use it.
Internal implementaion:
- device_ids_read - read the devices file
. add struct dev_use (du) to cmd->use_devices for each devices file entry
- dev_cache_scan - get /dev entries
. add struct device (dev) to dev_cache for each device on the system
- device_ids_match - match devices file entries to /dev entries
. match each du on cmd->use_devices to a dev in dev_cache, using device ID
. on match, set du->dev, dev->id, dev->flags MATCHED_USE_ID
- label_scan - read lvm headers and metadata from devices
. filters are applied, those that do not need data from the device
. filter-deviceid skips devs without MATCHED_USE_ID, i.e.
skips /dev entries that are not listed in the devices file
. read lvm label from dev
. filters are applied, those that use data from the device
. read lvm metadata from dev
. add info/vginfo structs for PVs/VGs (info is "lvmcache")
- device_ids_find_renamed_devs - handle devices with unstable devname ID
where devname changed
. this step only needed when devs do not have proper device IDs,
and their dev names change, e.g. after reboot sdb becomes sdc.
. detect incorrect match because PVID in the devices file entry
does not match the PVID found when the device was read above
. undo incorrect match between du and dev above
. search system devices for new location of PVID
. update devices file with new devnames for PVIDs on renamed devices
. label_scan the renamed devs
- continue with command processing
2020-06-23 21:25:41 +03:00
}
2019-02-27 01:35:16 +03:00
/*
2020-10-15 22:11:08 +03:00
* Get list of args . Do not use filters .
2019-02-27 01:35:16 +03:00
*/
2020-10-15 22:11:08 +03:00
if ( ! _get_args ( cmd , argc , argv , & pvscan_args ) )
return_0 ;
2019-02-27 01:35:16 +03:00
/*
2020-10-15 22:11:08 +03:00
* Get list of devs for args . Do not use filters .
2019-02-27 01:35:16 +03:00
*/
2020-10-15 22:11:08 +03:00
if ( ! _get_args_devs ( cmd , & pvscan_args , & pvscan_devs ) )
return_0 ;
2018-06-28 22:48:03 +03:00
2020-10-15 22:11:08 +03:00
/*
* Remove pvid online files for major / minor args for which the dev has
* been removed .
*/
dm_list_iterate_items ( arg , & pvscan_args ) {
if ( arg - > dev | | ! arg - > devno )
continue ;
_online_pvid_file_remove_devno ( ( int ) MAJOR ( arg - > devno ) , ( int ) MINOR ( arg - > devno ) ) ;
}
2018-06-28 22:48:03 +03:00
2020-10-15 22:11:08 +03:00
/*
* A common pvscan removal of a single dev is done here .
*/
if ( dm_list_empty ( & pvscan_devs ) )
return 1 ;
if ( cmd - > md_component_detection & & ! cmd - > use_full_md_check & &
! strcmp ( cmd - > md_component_checks , " auto " ) & &
dev_cache_has_md_with_end_superblock ( cmd - > dev_types ) ) {
log_debug ( " Enable full md component check. " ) ;
cmd - > use_full_md_check = 1 ;
}
2018-06-28 22:48:03 +03:00
2020-10-15 22:11:08 +03:00
/*
* Apply nodata filters .
device usage based on devices file
The LVM devices file lists devices that lvm can use. The default
file is /etc/lvm/devices/system.devices, and the lvmdevices(8)
command is used to add or remove device entries. If the file
does not exist, or if lvm.conf includes use_devicesfile=0, then
lvm will not use a devices file. When the devices file is in use,
the regex filter is not used, and the filter settings in lvm.conf
or on the command line are ignored.
LVM records devices in the devices file using hardware-specific
IDs, such as the WWID, and attempts to use subsystem-specific
IDs for virtual device types. These device IDs are also written
in the VG metadata. When no hardware or virtual ID is available,
lvm falls back using the unstable device name as the device ID.
When devnames are used, lvm performs extra scanning to find
devices if their devname changes, e.g. after reboot.
When proper device IDs are used, an lvm command will not look
at devices outside the devices file, but when devnames are used
as a fallback, lvm will scan devices outside the devices file
to locate PVs on renamed devices. A config setting
search_for_devnames can be used to control the scanning for
renamed devname entries.
Related to the devices file, the new command option
--devices <devnames> allows a list of devices to be specified for
the command to use, overriding the devices file. The listed
devices act as a sort of devices file in terms of limiting which
devices lvm will see and use. Devices that are not listed will
appear to be missing to the lvm command.
Multiple devices files can be kept in /etc/lvm/devices, which
allows lvm to be used with different sets of devices, e.g.
system devices do not need to be exposed to a specific application,
and the application can use lvm on its own set of devices that are
not exposed to the system. The option --devicesfile <filename> is
used to select the devices file to use with the command. Without
the option set, the default system devices file is used.
Setting --devicesfile "" causes lvm to not use a devices file.
An existing, empty devices file means lvm will see no devices.
The new command vgimportdevices adds PVs from a VG to the devices
file and updates the VG metadata to include the device IDs.
vgimportdevices -a will import all VGs into the system devices file.
LVM commands run by dmeventd not use a devices file by default,
and will look at all devices on the system. A devices file can
be created for dmeventd (/etc/lvm/devices/dmeventd.devices) If
this file exists, lvm commands run by dmeventd will use it.
Internal implementaion:
- device_ids_read - read the devices file
. add struct dev_use (du) to cmd->use_devices for each devices file entry
- dev_cache_scan - get /dev entries
. add struct device (dev) to dev_cache for each device on the system
- device_ids_match - match devices file entries to /dev entries
. match each du on cmd->use_devices to a dev in dev_cache, using device ID
. on match, set du->dev, dev->id, dev->flags MATCHED_USE_ID
- label_scan - read lvm headers and metadata from devices
. filters are applied, those that do not need data from the device
. filter-deviceid skips devs without MATCHED_USE_ID, i.e.
skips /dev entries that are not listed in the devices file
. read lvm label from dev
. filters are applied, those that use data from the device
. read lvm metadata from dev
. add info/vginfo structs for PVs/VGs (info is "lvmcache")
- device_ids_find_renamed_devs - handle devices with unstable devname ID
where devname changed
. this step only needed when devs do not have proper device IDs,
and their dev names change, e.g. after reboot sdb becomes sdc.
. detect incorrect match because PVID in the devices file entry
does not match the PVID found when the device was read above
. undo incorrect match between du and dev above
. search system devices for new location of PVID
. update devices file with new devnames for PVIDs on renamed devices
. label_scan the renamed devs
- continue with command processing
2020-06-23 21:25:41 +03:00
*
* We want pvscan autoactivation to work when using a devices file
* containing idtype = devname , in cases when the devname changes
* after reboot . To make this work , we have to relax the devices
* file restrictions somewhat here in cases where the devices file
* contains entries with idtype = devname : disable filter - deviceid
* when applying the nodata filters here , and read the label header .
* Once the label header is read , check if the label header pvid
* is in the devices file , and ignore the device if it ' s not .
* The downside of this is that pvscans from the system will read
* devs belonging to other devices files .
* Enable / disable this behavior with a config setting ?
2020-10-15 22:11:08 +03:00
*/
device usage based on devices file
The LVM devices file lists devices that lvm can use. The default
file is /etc/lvm/devices/system.devices, and the lvmdevices(8)
command is used to add or remove device entries. If the file
does not exist, or if lvm.conf includes use_devicesfile=0, then
lvm will not use a devices file. When the devices file is in use,
the regex filter is not used, and the filter settings in lvm.conf
or on the command line are ignored.
LVM records devices in the devices file using hardware-specific
IDs, such as the WWID, and attempts to use subsystem-specific
IDs for virtual device types. These device IDs are also written
in the VG metadata. When no hardware or virtual ID is available,
lvm falls back using the unstable device name as the device ID.
When devnames are used, lvm performs extra scanning to find
devices if their devname changes, e.g. after reboot.
When proper device IDs are used, an lvm command will not look
at devices outside the devices file, but when devnames are used
as a fallback, lvm will scan devices outside the devices file
to locate PVs on renamed devices. A config setting
search_for_devnames can be used to control the scanning for
renamed devname entries.
Related to the devices file, the new command option
--devices <devnames> allows a list of devices to be specified for
the command to use, overriding the devices file. The listed
devices act as a sort of devices file in terms of limiting which
devices lvm will see and use. Devices that are not listed will
appear to be missing to the lvm command.
Multiple devices files can be kept in /etc/lvm/devices, which
allows lvm to be used with different sets of devices, e.g.
system devices do not need to be exposed to a specific application,
and the application can use lvm on its own set of devices that are
not exposed to the system. The option --devicesfile <filename> is
used to select the devices file to use with the command. Without
the option set, the default system devices file is used.
Setting --devicesfile "" causes lvm to not use a devices file.
An existing, empty devices file means lvm will see no devices.
The new command vgimportdevices adds PVs from a VG to the devices
file and updates the VG metadata to include the device IDs.
vgimportdevices -a will import all VGs into the system devices file.
LVM commands run by dmeventd not use a devices file by default,
and will look at all devices on the system. A devices file can
be created for dmeventd (/etc/lvm/devices/dmeventd.devices) If
this file exists, lvm commands run by dmeventd will use it.
Internal implementaion:
- device_ids_read - read the devices file
. add struct dev_use (du) to cmd->use_devices for each devices file entry
- dev_cache_scan - get /dev entries
. add struct device (dev) to dev_cache for each device on the system
- device_ids_match - match devices file entries to /dev entries
. match each du on cmd->use_devices to a dev in dev_cache, using device ID
. on match, set du->dev, dev->id, dev->flags MATCHED_USE_ID
- label_scan - read lvm headers and metadata from devices
. filters are applied, those that do not need data from the device
. filter-deviceid skips devs without MATCHED_USE_ID, i.e.
skips /dev entries that are not listed in the devices file
. read lvm label from dev
. filters are applied, those that use data from the device
. read lvm metadata from dev
. add info/vginfo structs for PVs/VGs (info is "lvmcache")
- device_ids_find_renamed_devs - handle devices with unstable devname ID
where devname changed
. this step only needed when devs do not have proper device IDs,
and their dev names change, e.g. after reboot sdb becomes sdc.
. detect incorrect match because PVID in the devices file entry
does not match the PVID found when the device was read above
. undo incorrect match between du and dev above
. search system devices for new location of PVID
. update devices file with new devnames for PVIDs on renamed devices
. label_scan the renamed devs
- continue with command processing
2020-06-23 21:25:41 +03:00
2020-10-15 22:11:08 +03:00
log_debug ( " pvscan_cache_args: filter devs nodata " ) ;
device usage based on devices file
The LVM devices file lists devices that lvm can use. The default
file is /etc/lvm/devices/system.devices, and the lvmdevices(8)
command is used to add or remove device entries. If the file
does not exist, or if lvm.conf includes use_devicesfile=0, then
lvm will not use a devices file. When the devices file is in use,
the regex filter is not used, and the filter settings in lvm.conf
or on the command line are ignored.
LVM records devices in the devices file using hardware-specific
IDs, such as the WWID, and attempts to use subsystem-specific
IDs for virtual device types. These device IDs are also written
in the VG metadata. When no hardware or virtual ID is available,
lvm falls back using the unstable device name as the device ID.
When devnames are used, lvm performs extra scanning to find
devices if their devname changes, e.g. after reboot.
When proper device IDs are used, an lvm command will not look
at devices outside the devices file, but when devnames are used
as a fallback, lvm will scan devices outside the devices file
to locate PVs on renamed devices. A config setting
search_for_devnames can be used to control the scanning for
renamed devname entries.
Related to the devices file, the new command option
--devices <devnames> allows a list of devices to be specified for
the command to use, overriding the devices file. The listed
devices act as a sort of devices file in terms of limiting which
devices lvm will see and use. Devices that are not listed will
appear to be missing to the lvm command.
Multiple devices files can be kept in /etc/lvm/devices, which
allows lvm to be used with different sets of devices, e.g.
system devices do not need to be exposed to a specific application,
and the application can use lvm on its own set of devices that are
not exposed to the system. The option --devicesfile <filename> is
used to select the devices file to use with the command. Without
the option set, the default system devices file is used.
Setting --devicesfile "" causes lvm to not use a devices file.
An existing, empty devices file means lvm will see no devices.
The new command vgimportdevices adds PVs from a VG to the devices
file and updates the VG metadata to include the device IDs.
vgimportdevices -a will import all VGs into the system devices file.
LVM commands run by dmeventd not use a devices file by default,
and will look at all devices on the system. A devices file can
be created for dmeventd (/etc/lvm/devices/dmeventd.devices) If
this file exists, lvm commands run by dmeventd will use it.
Internal implementaion:
- device_ids_read - read the devices file
. add struct dev_use (du) to cmd->use_devices for each devices file entry
- dev_cache_scan - get /dev entries
. add struct device (dev) to dev_cache for each device on the system
- device_ids_match - match devices file entries to /dev entries
. match each du on cmd->use_devices to a dev in dev_cache, using device ID
. on match, set du->dev, dev->id, dev->flags MATCHED_USE_ID
- label_scan - read lvm headers and metadata from devices
. filters are applied, those that do not need data from the device
. filter-deviceid skips devs without MATCHED_USE_ID, i.e.
skips /dev entries that are not listed in the devices file
. read lvm label from dev
. filters are applied, those that use data from the device
. read lvm metadata from dev
. add info/vginfo structs for PVs/VGs (info is "lvmcache")
- device_ids_find_renamed_devs - handle devices with unstable devname ID
where devname changed
. this step only needed when devs do not have proper device IDs,
and their dev names change, e.g. after reboot sdb becomes sdc.
. detect incorrect match because PVID in the devices file entry
does not match the PVID found when the device was read above
. undo incorrect match between du and dev above
. search system devices for new location of PVID
. update devices file with new devnames for PVIDs on renamed devices
. label_scan the renamed devs
- continue with command processing
2020-06-23 21:25:41 +03:00
if ( cmd - > enable_devices_file & & device_ids_use_devname ( cmd ) ) {
relax_deviceid_filter = 1 ;
cmd - > filter_deviceid_skip = 1 ;
}
2020-10-15 22:11:08 +03:00
cmd - > filter_nodata_only = 1 ;
2019-01-16 23:19:09 +03:00
2020-10-15 22:11:08 +03:00
dm_list_iterate_items_safe ( devl , devl2 , & pvscan_devs ) {
if ( ! cmd - > filter - > passes_filter ( cmd , cmd - > filter , devl - > dev , NULL ) ) {
log_print ( " pvscan[%d] %s excluded by filters: %s. " , getpid ( ) ,
dev_name ( devl - > dev ) , dev_filtered_reason ( devl - > dev ) ) ;
dm_list_del ( & devl - > list ) ;
2018-06-28 22:48:03 +03:00
}
}
2020-10-15 22:11:08 +03:00
cmd - > filter_nodata_only = 0 ;
/*
* Clear the results of nodata filters that were saved by the
* persistent filter so that the complete set of filters will
* be checked by passes_filter below .
*/
dm_list_iterate_items ( devl , & pvscan_devs )
cmd - > filter - > wipe ( cmd , cmd - > filter , devl - > dev , NULL ) ;
2019-02-27 01:35:16 +03:00
/*
2020-10-15 22:11:08 +03:00
* Read header from each dev .
* Eliminate non - lvm devs .
* Apply all filters .
2019-02-27 01:35:16 +03:00
*/
2020-10-15 22:11:08 +03:00
log_debug ( " pvscan_cache_args: read and filter devs " ) ;
label_scan_setup_bcache ( ) ;
dm_list_iterate_items_safe ( devl , devl2 , & pvscan_devs ) {
if ( ! label_read_pvid ( devl - > dev ) ) {
/* Not an lvm device */
log_print ( " pvscan[%d] %s not an lvm device. " , getpid ( ) , dev_name ( devl - > dev ) ) ;
dm_list_del ( & devl - > list ) ;
continue ;
}
device usage based on devices file
The LVM devices file lists devices that lvm can use. The default
file is /etc/lvm/devices/system.devices, and the lvmdevices(8)
command is used to add or remove device entries. If the file
does not exist, or if lvm.conf includes use_devicesfile=0, then
lvm will not use a devices file. When the devices file is in use,
the regex filter is not used, and the filter settings in lvm.conf
or on the command line are ignored.
LVM records devices in the devices file using hardware-specific
IDs, such as the WWID, and attempts to use subsystem-specific
IDs for virtual device types. These device IDs are also written
in the VG metadata. When no hardware or virtual ID is available,
lvm falls back using the unstable device name as the device ID.
When devnames are used, lvm performs extra scanning to find
devices if their devname changes, e.g. after reboot.
When proper device IDs are used, an lvm command will not look
at devices outside the devices file, but when devnames are used
as a fallback, lvm will scan devices outside the devices file
to locate PVs on renamed devices. A config setting
search_for_devnames can be used to control the scanning for
renamed devname entries.
Related to the devices file, the new command option
--devices <devnames> allows a list of devices to be specified for
the command to use, overriding the devices file. The listed
devices act as a sort of devices file in terms of limiting which
devices lvm will see and use. Devices that are not listed will
appear to be missing to the lvm command.
Multiple devices files can be kept in /etc/lvm/devices, which
allows lvm to be used with different sets of devices, e.g.
system devices do not need to be exposed to a specific application,
and the application can use lvm on its own set of devices that are
not exposed to the system. The option --devicesfile <filename> is
used to select the devices file to use with the command. Without
the option set, the default system devices file is used.
Setting --devicesfile "" causes lvm to not use a devices file.
An existing, empty devices file means lvm will see no devices.
The new command vgimportdevices adds PVs from a VG to the devices
file and updates the VG metadata to include the device IDs.
vgimportdevices -a will import all VGs into the system devices file.
LVM commands run by dmeventd not use a devices file by default,
and will look at all devices on the system. A devices file can
be created for dmeventd (/etc/lvm/devices/dmeventd.devices) If
this file exists, lvm commands run by dmeventd will use it.
Internal implementaion:
- device_ids_read - read the devices file
. add struct dev_use (du) to cmd->use_devices for each devices file entry
- dev_cache_scan - get /dev entries
. add struct device (dev) to dev_cache for each device on the system
- device_ids_match - match devices file entries to /dev entries
. match each du on cmd->use_devices to a dev in dev_cache, using device ID
. on match, set du->dev, dev->id, dev->flags MATCHED_USE_ID
- label_scan - read lvm headers and metadata from devices
. filters are applied, those that do not need data from the device
. filter-deviceid skips devs without MATCHED_USE_ID, i.e.
skips /dev entries that are not listed in the devices file
. read lvm label from dev
. filters are applied, those that use data from the device
. read lvm metadata from dev
. add info/vginfo structs for PVs/VGs (info is "lvmcache")
- device_ids_find_renamed_devs - handle devices with unstable devname ID
where devname changed
. this step only needed when devs do not have proper device IDs,
and their dev names change, e.g. after reboot sdb becomes sdc.
. detect incorrect match because PVID in the devices file entry
does not match the PVID found when the device was read above
. undo incorrect match between du and dev above
. search system devices for new location of PVID
. update devices file with new devnames for PVIDs on renamed devices
. label_scan the renamed devs
- continue with command processing
2020-06-23 21:25:41 +03:00
/*
* filter - deviceid is not being used because of unstable devnames ,
* so in place of that check if the pvid is in the devices file .
*/
if ( relax_deviceid_filter ) {
if ( ! get_du_for_pvid ( cmd , devl - > dev - > pvid ) ) {
log_print ( " pvscan[%d] %s excluded by devices file (checking PVID). " ,
getpid ( ) , dev_name ( devl - > dev ) ) ;
dm_list_del ( & devl - > list ) ;
continue ;
}
}
2020-10-15 22:11:08 +03:00
/* Applies all filters, including those that need data from dev. */
if ( ! cmd - > filter - > passes_filter ( cmd , cmd - > filter , devl - > dev , NULL ) ) {
log_print ( " pvscan[%d] %s excluded by filters: %s. " , getpid ( ) ,
dev_name ( devl - > dev ) , dev_filtered_reason ( devl - > dev ) ) ;
dm_list_del ( & devl - > list ) ;
}
2019-02-27 01:35:16 +03:00
}
device usage based on devices file
The LVM devices file lists devices that lvm can use. The default
file is /etc/lvm/devices/system.devices, and the lvmdevices(8)
command is used to add or remove device entries. If the file
does not exist, or if lvm.conf includes use_devicesfile=0, then
lvm will not use a devices file. When the devices file is in use,
the regex filter is not used, and the filter settings in lvm.conf
or on the command line are ignored.
LVM records devices in the devices file using hardware-specific
IDs, such as the WWID, and attempts to use subsystem-specific
IDs for virtual device types. These device IDs are also written
in the VG metadata. When no hardware or virtual ID is available,
lvm falls back using the unstable device name as the device ID.
When devnames are used, lvm performs extra scanning to find
devices if their devname changes, e.g. after reboot.
When proper device IDs are used, an lvm command will not look
at devices outside the devices file, but when devnames are used
as a fallback, lvm will scan devices outside the devices file
to locate PVs on renamed devices. A config setting
search_for_devnames can be used to control the scanning for
renamed devname entries.
Related to the devices file, the new command option
--devices <devnames> allows a list of devices to be specified for
the command to use, overriding the devices file. The listed
devices act as a sort of devices file in terms of limiting which
devices lvm will see and use. Devices that are not listed will
appear to be missing to the lvm command.
Multiple devices files can be kept in /etc/lvm/devices, which
allows lvm to be used with different sets of devices, e.g.
system devices do not need to be exposed to a specific application,
and the application can use lvm on its own set of devices that are
not exposed to the system. The option --devicesfile <filename> is
used to select the devices file to use with the command. Without
the option set, the default system devices file is used.
Setting --devicesfile "" causes lvm to not use a devices file.
An existing, empty devices file means lvm will see no devices.
The new command vgimportdevices adds PVs from a VG to the devices
file and updates the VG metadata to include the device IDs.
vgimportdevices -a will import all VGs into the system devices file.
LVM commands run by dmeventd not use a devices file by default,
and will look at all devices on the system. A devices file can
be created for dmeventd (/etc/lvm/devices/dmeventd.devices) If
this file exists, lvm commands run by dmeventd will use it.
Internal implementaion:
- device_ids_read - read the devices file
. add struct dev_use (du) to cmd->use_devices for each devices file entry
- dev_cache_scan - get /dev entries
. add struct device (dev) to dev_cache for each device on the system
- device_ids_match - match devices file entries to /dev entries
. match each du on cmd->use_devices to a dev in dev_cache, using device ID
. on match, set du->dev, dev->id, dev->flags MATCHED_USE_ID
- label_scan - read lvm headers and metadata from devices
. filters are applied, those that do not need data from the device
. filter-deviceid skips devs without MATCHED_USE_ID, i.e.
skips /dev entries that are not listed in the devices file
. read lvm label from dev
. filters are applied, those that use data from the device
. read lvm metadata from dev
. add info/vginfo structs for PVs/VGs (info is "lvmcache")
- device_ids_find_renamed_devs - handle devices with unstable devname ID
where devname changed
. this step only needed when devs do not have proper device IDs,
and their dev names change, e.g. after reboot sdb becomes sdc.
. detect incorrect match because PVID in the devices file entry
does not match the PVID found when the device was read above
. undo incorrect match between du and dev above
. search system devices for new location of PVID
. update devices file with new devnames for PVIDs on renamed devices
. label_scan the renamed devs
- continue with command processing
2020-06-23 21:25:41 +03:00
if ( relax_deviceid_filter )
cmd - > filter_deviceid_skip = 0 ;
2020-10-15 22:11:08 +03:00
if ( dm_list_empty ( & pvscan_devs ) )
return 1 ;
log_debug ( " pvscan_cache_args: label scan devs " ) ;
/*
* Scan devs to populate lvmcache info , which includes the mda info that ' s
* needed to read vg metadata in the next step . The _cached variant of
* label_scan is used so the exsting bcache data from label_read_pvid above
* can be reused ( although more data may need to be read depending on how
* much of the metadata was covered by reading the pvid . )
*/
label_scan_devs_cached ( cmd , NULL , & pvscan_devs ) ;
ret = _online_devs ( cmd , 0 , & pvscan_devs , & pv_count , complete_vgnames ) ;
2019-01-16 23:19:09 +03:00
/*
* When a new PV appears , the system runs pvscan - - cache dev .
* This also means that existing hints are invalid , and
* we can force hints to be refreshed here . There may be
* cases where this detects a change that the other methods
* of detecting invalid hints doesn ' t catch .
*/
device usage based on devices file
The LVM devices file lists devices that lvm can use. The default
file is /etc/lvm/devices/system.devices, and the lvmdevices(8)
command is used to add or remove device entries. If the file
does not exist, or if lvm.conf includes use_devicesfile=0, then
lvm will not use a devices file. When the devices file is in use,
the regex filter is not used, and the filter settings in lvm.conf
or on the command line are ignored.
LVM records devices in the devices file using hardware-specific
IDs, such as the WWID, and attempts to use subsystem-specific
IDs for virtual device types. These device IDs are also written
in the VG metadata. When no hardware or virtual ID is available,
lvm falls back using the unstable device name as the device ID.
When devnames are used, lvm performs extra scanning to find
devices if their devname changes, e.g. after reboot.
When proper device IDs are used, an lvm command will not look
at devices outside the devices file, but when devnames are used
as a fallback, lvm will scan devices outside the devices file
to locate PVs on renamed devices. A config setting
search_for_devnames can be used to control the scanning for
renamed devname entries.
Related to the devices file, the new command option
--devices <devnames> allows a list of devices to be specified for
the command to use, overriding the devices file. The listed
devices act as a sort of devices file in terms of limiting which
devices lvm will see and use. Devices that are not listed will
appear to be missing to the lvm command.
Multiple devices files can be kept in /etc/lvm/devices, which
allows lvm to be used with different sets of devices, e.g.
system devices do not need to be exposed to a specific application,
and the application can use lvm on its own set of devices that are
not exposed to the system. The option --devicesfile <filename> is
used to select the devices file to use with the command. Without
the option set, the default system devices file is used.
Setting --devicesfile "" causes lvm to not use a devices file.
An existing, empty devices file means lvm will see no devices.
The new command vgimportdevices adds PVs from a VG to the devices
file and updates the VG metadata to include the device IDs.
vgimportdevices -a will import all VGs into the system devices file.
LVM commands run by dmeventd not use a devices file by default,
and will look at all devices on the system. A devices file can
be created for dmeventd (/etc/lvm/devices/dmeventd.devices) If
this file exists, lvm commands run by dmeventd will use it.
Internal implementaion:
- device_ids_read - read the devices file
. add struct dev_use (du) to cmd->use_devices for each devices file entry
- dev_cache_scan - get /dev entries
. add struct device (dev) to dev_cache for each device on the system
- device_ids_match - match devices file entries to /dev entries
. match each du on cmd->use_devices to a dev in dev_cache, using device ID
. on match, set du->dev, dev->id, dev->flags MATCHED_USE_ID
- label_scan - read lvm headers and metadata from devices
. filters are applied, those that do not need data from the device
. filter-deviceid skips devs without MATCHED_USE_ID, i.e.
skips /dev entries that are not listed in the devices file
. read lvm label from dev
. filters are applied, those that use data from the device
. read lvm metadata from dev
. add info/vginfo structs for PVs/VGs (info is "lvmcache")
- device_ids_find_renamed_devs - handle devices with unstable devname ID
where devname changed
. this step only needed when devs do not have proper device IDs,
and their dev names change, e.g. after reboot sdb becomes sdc.
. detect incorrect match because PVID in the devices file entry
does not match the PVID found when the device was read above
. undo incorrect match between du and dev above
. search system devices for new location of PVID
. update devices file with new devnames for PVIDs on renamed devices
. label_scan the renamed devs
- continue with command processing
2020-06-23 21:25:41 +03:00
if ( pv_count ) {
2019-01-16 23:19:09 +03:00
invalidate_hints ( cmd ) ;
device usage based on devices file
The LVM devices file lists devices that lvm can use. The default
file is /etc/lvm/devices/system.devices, and the lvmdevices(8)
command is used to add or remove device entries. If the file
does not exist, or if lvm.conf includes use_devicesfile=0, then
lvm will not use a devices file. When the devices file is in use,
the regex filter is not used, and the filter settings in lvm.conf
or on the command line are ignored.
LVM records devices in the devices file using hardware-specific
IDs, such as the WWID, and attempts to use subsystem-specific
IDs for virtual device types. These device IDs are also written
in the VG metadata. When no hardware or virtual ID is available,
lvm falls back using the unstable device name as the device ID.
When devnames are used, lvm performs extra scanning to find
devices if their devname changes, e.g. after reboot.
When proper device IDs are used, an lvm command will not look
at devices outside the devices file, but when devnames are used
as a fallback, lvm will scan devices outside the devices file
to locate PVs on renamed devices. A config setting
search_for_devnames can be used to control the scanning for
renamed devname entries.
Related to the devices file, the new command option
--devices <devnames> allows a list of devices to be specified for
the command to use, overriding the devices file. The listed
devices act as a sort of devices file in terms of limiting which
devices lvm will see and use. Devices that are not listed will
appear to be missing to the lvm command.
Multiple devices files can be kept in /etc/lvm/devices, which
allows lvm to be used with different sets of devices, e.g.
system devices do not need to be exposed to a specific application,
and the application can use lvm on its own set of devices that are
not exposed to the system. The option --devicesfile <filename> is
used to select the devices file to use with the command. Without
the option set, the default system devices file is used.
Setting --devicesfile "" causes lvm to not use a devices file.
An existing, empty devices file means lvm will see no devices.
The new command vgimportdevices adds PVs from a VG to the devices
file and updates the VG metadata to include the device IDs.
vgimportdevices -a will import all VGs into the system devices file.
LVM commands run by dmeventd not use a devices file by default,
and will look at all devices on the system. A devices file can
be created for dmeventd (/etc/lvm/devices/dmeventd.devices) If
this file exists, lvm commands run by dmeventd will use it.
Internal implementaion:
- device_ids_read - read the devices file
. add struct dev_use (du) to cmd->use_devices for each devices file entry
- dev_cache_scan - get /dev entries
. add struct device (dev) to dev_cache for each device on the system
- device_ids_match - match devices file entries to /dev entries
. match each du on cmd->use_devices to a dev in dev_cache, using device ID
. on match, set du->dev, dev->id, dev->flags MATCHED_USE_ID
- label_scan - read lvm headers and metadata from devices
. filters are applied, those that do not need data from the device
. filter-deviceid skips devs without MATCHED_USE_ID, i.e.
skips /dev entries that are not listed in the devices file
. read lvm label from dev
. filters are applied, those that use data from the device
. read lvm metadata from dev
. add info/vginfo structs for PVs/VGs (info is "lvmcache")
- device_ids_find_renamed_devs - handle devices with unstable devname ID
where devname changed
. this step only needed when devs do not have proper device IDs,
and their dev names change, e.g. after reboot sdb becomes sdc.
. detect incorrect match because PVID in the devices file entry
does not match the PVID found when the device was read above
. undo incorrect match between du and dev above
. search system devices for new location of PVID
. update devices file with new devnames for PVIDs on renamed devices
. label_scan the renamed devs
- continue with command processing
2020-06-23 21:25:41 +03:00
unlink_searched_devnames ( cmd ) ;
}
2019-01-16 23:19:09 +03:00
2020-10-15 22:11:08 +03:00
return ret ;
}
int pvscan_cache_cmd ( struct cmd_context * cmd , int argc , char * * argv )
{
struct pvscan_aa_params pp = { 0 } ;
struct dm_list complete_vgnames ;
int do_activate = arg_is_set ( cmd , activate_ARG ) ;
int devno_args = 0 ;
int do_all ;
int ret ;
dm_list_init ( & complete_vgnames ) ;
2021-02-08 18:28:18 +03:00
if ( do_activate & &
! find_config_tree_bool ( cmd , global_event_activation_CFG , NULL ) ) {
log_verbose ( " Ignoring pvscan --cache -aay because event_activation is disabled. " ) ;
return ECMD_PROCESSED ;
}
2020-10-15 22:11:08 +03:00
if ( arg_is_set ( cmd , major_ARG ) + arg_is_set ( cmd , minor_ARG ) )
devno_args = 1 ;
if ( devno_args & & ( ! arg_is_set ( cmd , major_ARG ) | | ! arg_is_set ( cmd , minor_ARG ) ) ) {
log_error ( " Both --major and --minor required to identify devices. " ) ;
return EINVALID_CMD_LINE ;
}
do_all = ! argc & & ! devno_args ;
_online_dir_setup ( ) ;
if ( do_all ) {
if ( ! _pvscan_cache_all ( cmd , argc , argv , & complete_vgnames ) )
return ECMD_FAILED ;
} else {
if ( ! _pvscan_cache_args ( cmd , argc , argv , & complete_vgnames ) )
return ECMD_FAILED ;
}
if ( ! do_activate )
return ECMD_PROCESSED ;
if ( dm_list_empty ( & complete_vgnames ) ) {
log_debug ( " No VGs to autoactivate. " ) ;
return ECMD_PROCESSED ;
}
2018-06-28 22:48:03 +03:00
/*
2020-10-15 22:11:08 +03:00
* When the PV was recorded online , we check if all the PVs for the VG
* are online . If so , the vgname was added to the list , and we can
* attempt to autoactivate LVs in the VG .
2018-06-28 22:48:03 +03:00
*/
2020-10-15 22:11:08 +03:00
ret = _pvscan_aa ( cmd , & pp , do_all , & complete_vgnames ) ;
2018-06-28 22:48:03 +03:00
2020-10-15 22:11:08 +03:00
if ( pp . activate_errors )
2018-06-28 22:48:03 +03:00
ret = ECMD_FAILED ;
if ( ! sync_local_dev_names ( cmd ) )
stack ;
return ret ;
}
2018-07-10 21:39:29 +03:00
int pvscan ( struct cmd_context * cmd , int argc , char * * argv )
{
log_error ( INTERNAL_ERROR " Missing function for command definition %d:%s. " ,
cmd - > command - > command_index , cmd - > command - > command_id ) ;
return ECMD_FAILED ;
}