2017-12-12 15:10:51 +03:00
#!@PYTHON3@
2016-02-18 02:53:35 +03:00
# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from collections import OrderedDict
import pprint as prettyprint
2016-03-22 01:19:38 +03:00
import os
2016-02-18 02:53:35 +03:00
2016-08-29 22:51:26 +03:00
from lvmdbusd import cmdhandler
2022-09-08 23:39:30 +03:00
from lvmdbusd . utils import log_debug , log_error , lvm_column_key , LvmBug
2016-02-18 02:53:35 +03:00
class DataStore ( object ) :
2022-06-07 16:20:06 +03:00
def __init__ ( self , vdo_support = False ) :
2016-02-18 02:53:35 +03:00
self . pvs = { }
self . vgs = { }
self . lvs = { }
self . pv_lvs = { }
self . lv_pvs = { }
self . lvs_hidden = { }
self . pv_path_to_uuid = { }
self . vg_name_to_uuid = { }
self . lv_full_name_to_uuid = { }
self . lvs_in_vgs = { }
self . pvs_in_vgs = { }
self . num_refreshes = 0
2019-10-09 15:49:58 +03:00
self . vdo_support = vdo_support
2016-02-18 02:53:35 +03:00
@staticmethod
2017-02-02 03:56:39 +03:00
def _pvs_parse_common ( c_pvs , c_pvs_in_vgs , c_lookup ) :
2016-02-18 02:53:35 +03:00
for p in c_pvs . values ( ) :
# Capture which PVs are associated with which VG
if p [ ' vg_uuid ' ] not in c_pvs_in_vgs :
c_pvs_in_vgs [ p [ ' vg_uuid ' ] ] = [ ]
if p [ ' vg_name ' ] :
c_pvs_in_vgs [ p [ ' vg_uuid ' ] ] . append (
( p [ ' pv_name ' ] , p [ ' pv_uuid ' ] ) )
# Lookup for translating between /dev/<name> and pv uuid
c_lookup [ p [ ' pv_name ' ] ] = p [ ' pv_uuid ' ]
2016-06-03 21:18:21 +03:00
@staticmethod
def _parse_pvs_json ( _all ) :
c_pvs = OrderedDict ( )
c_lookup = { }
c_pvs_in_vgs = { }
2022-08-25 16:33:50 +03:00
# Each item in the report is a collection of information pertaining
2016-06-03 21:18:21 +03:00
# to the vg
for r in _all [ ' report ' ] :
tmp_pv = [ ]
# Get the pv data for this VG.
if ' pv ' in r :
tmp_pv . extend ( r [ ' pv ' ] )
# Sort them
sorted_tmp_pv = sorted ( tmp_pv , key = lambda pk : pk [ ' pv_name ' ] )
# Add them to result set
for p in sorted_tmp_pv :
c_pvs [ p [ ' pv_uuid ' ] ] = p
if ' pvseg ' in r :
for s in r [ ' pvseg ' ] :
r = c_pvs [ s [ ' pv_uuid ' ] ]
2016-06-28 01:07:20 +03:00
r . setdefault ( ' pvseg_start ' , [ ] ) . append ( s [ ' pvseg_start ' ] )
2016-06-03 21:18:21 +03:00
r . setdefault ( ' pvseg_size ' , [ ] ) . append ( s [ ' pvseg_size ' ] )
r . setdefault ( ' segtype ' , [ ] ) . append ( s [ ' segtype ' ] )
# TODO: Remove this bug work around when we have orphan segs.
for i in c_pvs . values ( ) :
2016-06-28 01:07:20 +03:00
if ' pvseg_start ' not in i :
i [ ' pvseg_start ' ] = ' 0 '
2016-06-03 21:18:21 +03:00
i [ ' pvseg_size ' ] = i [ ' pv_pe_count ' ]
i [ ' segtype ' ] = ' free '
2017-02-02 03:56:39 +03:00
DataStore . _pvs_parse_common ( c_pvs , c_pvs_in_vgs , c_lookup )
2016-06-03 21:18:21 +03:00
return c_pvs , c_lookup , c_pvs_in_vgs
2016-02-18 02:53:35 +03:00
@staticmethod
2016-06-03 21:18:21 +03:00
def _parse_vgs_json ( _all ) :
2016-02-18 02:53:35 +03:00
2016-06-03 21:18:21 +03:00
tmp_vg = [ ]
for r in _all [ ' report ' ] :
# Get the pv data for this VG.
if ' vg ' in r :
tmp_vg . extend ( r [ ' vg ' ] )
2016-02-18 02:53:35 +03:00
2016-06-03 21:18:21 +03:00
# Sort for consistent output, however this is optional
2019-01-17 00:41:27 +03:00
vgs = sorted ( tmp_vg , key = lambda vk : vk [ ' vg_uuid ' ] )
2016-06-03 21:18:21 +03:00
c_vgs = OrderedDict ( )
c_lookup = { }
for i in vgs :
2019-01-17 00:41:27 +03:00
vg_name = i [ ' vg_name ' ]
# Lvm allows duplicate vg names. When this occurs, each subsequent
# matching VG name will be called vg_name:vg_uuid. Note: ':' is an
# invalid character for lvm VG names
if vg_name in c_lookup :
vg_name = " %s : %s " % ( vg_name , i [ ' vg_uuid ' ] )
i [ ' vg_name ' ] = vg_name
c_lookup [ vg_name ] = i [ ' vg_uuid ' ]
2016-06-03 21:18:21 +03:00
c_vgs [ i [ ' vg_uuid ' ] ] = i
return c_vgs , c_lookup
@staticmethod
def _parse_lvs_common ( c_lvs , c_lv_full_lookup ) :
c_lvs_in_vgs = OrderedDict ( )
c_lvs_hidden = OrderedDict ( )
2016-02-18 02:53:35 +03:00
for i in c_lvs . values ( ) :
if i [ ' vg_uuid ' ] not in c_lvs_in_vgs :
c_lvs_in_vgs [ i [ ' vg_uuid ' ] ] = [ ]
c_lvs_in_vgs [
i [ ' vg_uuid ' ] ] . append (
( i [ ' lv_name ' ] ,
( i [ ' lv_attr ' ] , i [ ' lv_layout ' ] , i [ ' lv_role ' ] ) ,
i [ ' lv_uuid ' ] ) )
if i [ ' lv_parent ' ] :
# Lookup what the parent refers too
parent_name = i [ ' lv_parent ' ]
full_parent_name = " %s / %s " % ( i [ ' vg_name ' ] , parent_name )
if full_parent_name not in c_lv_full_lookup :
parent_name = ' [ %s ] ' % ( parent_name )
full_parent_name = " %s / %s " % ( i [ ' vg_name ' ] , parent_name )
parent_uuid = c_lv_full_lookup [ full_parent_name ]
if parent_uuid not in c_lvs_hidden :
c_lvs_hidden [ parent_uuid ] = [ ]
c_lvs_hidden [ parent_uuid ] . append (
( i [ ' lv_uuid ' ] , i [ ' lv_name ' ] ) )
return c_lvs , c_lvs_in_vgs , c_lvs_hidden , c_lv_full_lookup
2019-10-09 15:49:58 +03:00
def _parse_lvs_json ( self , _all ) :
2016-06-03 21:18:21 +03:00
c_lvs = OrderedDict ( )
c_lv_full_lookup = { }
2022-08-23 18:24:37 +03:00
# Each item in the report is a collection of information pertaining
2016-06-03 21:18:21 +03:00
# to the vg
for r in _all [ ' report ' ] :
# Get the lv data for this VG.
if ' lv ' in r :
# Add them to result set
2016-06-06 23:08:20 +03:00
for i in r [ ' lv ' ] :
2016-06-03 21:18:21 +03:00
full_name = " %s / %s " % ( i [ ' vg_name ' ] , i [ ' lv_name ' ] )
c_lv_full_lookup [ full_name ] = i [ ' lv_uuid ' ]
c_lvs [ i [ ' lv_uuid ' ] ] = i
# Add in the segment data
if ' seg ' in r :
for s in r [ ' seg ' ] :
r = c_lvs [ s [ ' lv_uuid ' ] ]
2019-10-09 15:49:58 +03:00
r . setdefault ( ' seg_pe_ranges ' , [ ] ) . \
append ( s [ ' seg_pe_ranges ' ] )
2016-06-03 21:18:21 +03:00
r . setdefault ( ' segtype ' , [ ] ) . append ( s [ ' segtype ' ] )
2019-10-09 15:49:58 +03:00
if self . vdo_support :
for seg_key , seg_val in s . items ( ) :
if seg_key . startswith ( " vdo_ " ) :
r [ seg_key ] = seg_val
2016-06-03 21:18:21 +03:00
return DataStore . _parse_lvs_common ( c_lvs , c_lv_full_lookup )
2016-02-18 02:53:35 +03:00
@staticmethod
def _make_list ( l ) :
if not isinstance ( l , list ) :
l = [ l ]
return l
@staticmethod
def _parse_seg_entry ( se , segtype ) :
if se :
# print("_parse_seg_entry %s %s" % (str(se), str(segtype)))
device , segs = se . split ( " : " )
start , end = segs . split ( ' - ' )
return ( device , ( start , end ) , segtype )
else :
return ( " " , ( ) , segtype )
@staticmethod
def _build_segments ( l , seg_types ) :
rc = [ ]
l = DataStore . _make_list ( l )
s = DataStore . _make_list ( seg_types )
assert len ( l ) == len ( s )
ls = list ( zip ( l , s ) )
for i in ls :
if ' ' in i [ 0 ] :
tmp = i [ 0 ] . split ( ' ' )
for t in tmp :
rc . append ( DataStore . _parse_seg_entry ( t , i [ 1 ] ) )
else :
rc . append ( DataStore . _parse_seg_entry ( * i ) )
return rc
@staticmethod
def _pv_device_lv_entry ( table , pv_device , lv_uuid , meta , lv_attr ,
segment_info ) :
if pv_device not in table :
table [ pv_device ] = { }
if lv_uuid not in table [ pv_device ] :
table [ pv_device ] [ lv_uuid ] = { }
table [ pv_device ] [ lv_uuid ] [ ' segs ' ] = [ segment_info ]
table [ pv_device ] [ lv_uuid ] [ ' name ' ] = meta
table [ pv_device ] [ lv_uuid ] [ ' meta ' ] = lv_attr
else :
table [ pv_device ] [ lv_uuid ] [ ' segs ' ] . append ( segment_info )
@staticmethod
def _pv_device_lv_format ( pv_device_lvs ) :
rc = { }
for pv_device , pd in pv_device_lvs . items ( ) :
lvs = [ ]
for lv_uuid , ld in sorted ( pd . items ( ) ) :
lvs . append ( ( lv_uuid , ld [ ' name ' ] , ld [ ' meta ' ] , ld [ ' segs ' ] ) )
rc [ pv_device ] = lvs
return rc
@staticmethod
def _lvs_device_pv_entry ( table , lv_uuid , pv_device , pv_uuid , segment_info ) :
if lv_uuid not in table :
table [ lv_uuid ] = { }
if pv_device not in table [ lv_uuid ] :
table [ lv_uuid ] [ pv_device ] = { }
table [ lv_uuid ] [ pv_device ] [ ' segs ' ] = [ segment_info ]
table [ lv_uuid ] [ pv_device ] [ ' pv_uuid ' ] = pv_uuid
else :
table [ lv_uuid ] [ pv_device ] [ ' segs ' ] . append ( segment_info )
@staticmethod
def _lvs_device_pv_format ( lvs_device_pvs ) :
rc = { }
for lv_uuid , ld in lvs_device_pvs . items ( ) :
pvs = [ ]
for pv_device , pd in sorted ( ld . items ( ) ) :
pvs . append ( ( pd [ ' pv_uuid ' ] , pv_device , pd [ ' segs ' ] ) )
rc [ lv_uuid ] = pvs
return rc
def _parse_pv_in_lvs ( self ) :
pv_device_lvs = { } # What LVs are stored on a PV
lvs_device_pv = { } # Where LV data is stored
for i in self . lvs . values ( ) :
segs = self . _build_segments ( i [ ' seg_pe_ranges ' ] , i [ ' segtype ' ] )
for s in segs :
# We are referring to physical device
if ' /dev/ ' in s [ 0 ] :
device , r , seg_type = s
DataStore . _pv_device_lv_entry (
pv_device_lvs , device , i [ ' lv_uuid ' ] , i [ ' lv_name ' ] ,
( i [ ' lv_attr ' ] , i [ ' lv_layout ' ] , i [ ' lv_role ' ] ) ,
( r [ 0 ] , r [ 1 ] , seg_type ) )
# (pv_name, pv_segs, pv_uuid)
DataStore . _lvs_device_pv_entry (
lvs_device_pv , i [ ' lv_uuid ' ] , device ,
self . pv_path_to_uuid [ device ] , ( r [ 0 ] , r [ 1 ] , seg_type ) )
else :
# TODO Handle the case where the segments refer to a LV
# and not a PV
pass
# print("Handle this %s %s %s" % (s[0], s[1], s[2]))
# Convert form to needed result for consumption
pv_device_lvs_result = DataStore . _pv_device_lv_format ( pv_device_lvs )
lvs_device_pv_result = DataStore . _lvs_device_pv_format ( lvs_device_pv )
return pv_device_lvs_result , lvs_device_pv_result
def refresh ( self , log = True ) :
"""
Go out and query lvm for the latest data in as few trips as possible
: param log Add debug log entry / exit messages
: return : None
"""
2022-09-08 23:39:30 +03:00
try :
self . num_refreshes + = 1
if log :
log_debug ( " lvmdb - refresh entry " )
# Grab everything first then parse it
# Do a single lvm retrieve for everything in json
a = cmdhandler . lvm_full_report_json ( )
_pvs , _pvs_lookup , _pvs_in_vgs = self . _parse_pvs_json ( a )
_vgs , _vgs_lookup = self . _parse_vgs_json ( a )
_lvs , _lvs_in_vgs , _lvs_hidden , _lvs_lookup = self . _parse_lvs_json ( a )
# Set all
self . pvs = _pvs
self . pv_path_to_uuid = _pvs_lookup
self . vg_name_to_uuid = _vgs_lookup
self . lv_full_name_to_uuid = _lvs_lookup
self . vgs = _vgs
self . lvs = _lvs
self . lvs_in_vgs = _lvs_in_vgs
self . pvs_in_vgs = _pvs_in_vgs
self . lvs_hidden = _lvs_hidden
# Create lookup table for which LV and segments are on each PV
self . pv_lvs , self . lv_pvs = self . _parse_pv_in_lvs ( )
except KeyError as ke :
key = ke . args [ 0 ]
if lvm_column_key ( key ) :
raise LvmBug ( " missing JSON key: ' %s ' " % key )
raise ke
2016-02-18 02:53:35 +03:00
if log :
log_debug ( " lvmdb - refresh exit " )
def fetch_pvs ( self , pv_name ) :
if not pv_name :
return self . pvs . values ( )
else :
rc = [ ]
for s in pv_name :
2016-03-22 01:19:38 +03:00
# Ths user could be using a symlink instead of the actual
# block device, make sure we are using actual block device file
# if the pv name isn't in the lookup
if s not in self . pv_path_to_uuid :
s = os . path . realpath ( s )
2016-02-18 02:53:35 +03:00
rc . append ( self . pvs [ self . pv_path_to_uuid [ s ] ] )
return rc
2016-09-28 19:18:10 +03:00
def pv_missing ( self , pv_uuid ) :
lvmdbusd: Correct get_object_path_by_uuid_lvm_id
When checking to see if the PV is missing we incorrectly checked that the
path_create was equal to PV creation. However, there are cases where we
are doing a lookup where the path_create == None. In this case, we would
fail to set lvm_id == None which caused a problem as we had more than 1
PV that was missing. When this occurred, the second lookup matched the
first missing PV that was added to the object manager. This resulted in
the following:
Traceback (most recent call last):
File "/usr/lib/python3.9/site-packages/lvmdbusd/utils.py", line 667, in _run
self.rc = self.f(*self.args)
File "/usr/lib/python3.9/site-packages/lvmdbusd/fetch.py", line 25, in _main_thread_load
(changes, remove) = load_pvs(
File "/usr/lib/python3.9/site-packages/lvmdbusd/pv.py", line 46, in load_pvs
return common(
File "/usr/lib/python3.9/site-packages/lvmdbusd/loader.py", line 55, in common
del existing_paths[dbus_object.dbus_object_path()]
Because we expect to find the object in existing_paths if we found it in
the lookup.
resolves: https://bugzilla.redhat.com/show_bug.cgi?id=2085078
2022-09-20 19:40:15 +03:00
# The uuid might not be a PV, default to false
2016-09-28 19:18:10 +03:00
if pv_uuid in self . pvs :
if self . pvs [ pv_uuid ] [ ' pv_missing ' ] == ' ' :
return False
lvmdbusd: Correct get_object_path_by_uuid_lvm_id
When checking to see if the PV is missing we incorrectly checked that the
path_create was equal to PV creation. However, there are cases where we
are doing a lookup where the path_create == None. In this case, we would
fail to set lvm_id == None which caused a problem as we had more than 1
PV that was missing. When this occurred, the second lookup matched the
first missing PV that was added to the object manager. This resulted in
the following:
Traceback (most recent call last):
File "/usr/lib/python3.9/site-packages/lvmdbusd/utils.py", line 667, in _run
self.rc = self.f(*self.args)
File "/usr/lib/python3.9/site-packages/lvmdbusd/fetch.py", line 25, in _main_thread_load
(changes, remove) = load_pvs(
File "/usr/lib/python3.9/site-packages/lvmdbusd/pv.py", line 46, in load_pvs
return common(
File "/usr/lib/python3.9/site-packages/lvmdbusd/loader.py", line 55, in common
del existing_paths[dbus_object.dbus_object_path()]
Because we expect to find the object in existing_paths if we found it in
the lookup.
resolves: https://bugzilla.redhat.com/show_bug.cgi?id=2085078
2022-09-20 19:40:15 +03:00
else :
return True
return False
2016-09-28 19:18:10 +03:00
2016-02-18 02:53:35 +03:00
def fetch_vgs ( self , vg_name ) :
if not vg_name :
return self . vgs . values ( )
else :
rc = [ ]
for s in vg_name :
rc . append ( self . vgs [ self . vg_name_to_uuid [ s ] ] )
return rc
def fetch_lvs ( self , lv_names ) :
try :
if not lv_names :
return self . lvs . values ( )
else :
rc = [ ]
for s in lv_names :
rc . append ( self . lvs [ self . lv_full_name_to_uuid [ s ] ] )
return rc
except KeyError as ke :
2016-03-22 01:24:04 +03:00
log_error ( " Key %s not found! " % ( str ( lv_names ) ) )
log_error ( " lv name to uuid lookup " )
2016-02-18 02:53:35 +03:00
for keys in sorted ( self . lv_full_name_to_uuid . keys ( ) ) :
2016-03-22 01:24:04 +03:00
log_error ( " %s " % ( keys ) )
log_error ( " lvs entries by uuid " )
2016-02-18 02:53:35 +03:00
for keys in sorted ( self . lvs . keys ( ) ) :
2016-03-22 01:24:04 +03:00
log_error ( " %s " % ( keys ) )
2016-02-18 02:53:35 +03:00
raise ke
def pv_pe_segments ( self , pv_uuid ) :
pv = self . pvs [ pv_uuid ]
2016-06-28 01:07:20 +03:00
return list ( zip ( pv [ ' pvseg_start ' ] , pv [ ' pvseg_size ' ] ) )
2016-02-18 02:53:35 +03:00
def pv_contained_lv ( self , pv_device ) :
rc = [ ]
if pv_device in self . pv_lvs :
rc = self . pv_lvs [ pv_device ]
return rc
def lv_contained_pv ( self , lv_uuid ) :
rc = [ ]
if lv_uuid in self . lv_pvs :
rc = self . lv_pvs [ lv_uuid ]
return rc
def lvs_in_vg ( self , vg_uuid ) :
# Return an array of
# (lv_name, (lv_attr, lv_layout, lv_role), lv_uuid)
rc = [ ]
if vg_uuid in self . lvs_in_vgs :
rc = self . lvs_in_vgs [ vg_uuid ]
return rc
def pvs_in_vg ( self , vg_uuid ) :
# Returns an array of (pv_name, pv_uuid)
rc = [ ]
if vg_uuid in self . pvs_in_vgs :
rc = self . pvs_in_vgs [ vg_uuid ]
return rc
def hidden_lvs ( self , lv_uuid ) :
# For a specified LV, return a list of hidden lv_uuid, lv_name
# for it
rc = [ ]
if lv_uuid in self . lvs_hidden :
rc = self . lvs_hidden [ lv_uuid ]
return rc
if __name__ == " __main__ " :
2023-03-06 19:26:44 +03:00
os . environ [ " LC_ALL " ] = " C "
os . environ [ " LVM_COMMAND_PROFILE " ] = " lvmdbusd "
2016-02-18 02:53:35 +03:00
pp = prettyprint . PrettyPrinter ( indent = 4 )
2022-06-07 16:20:06 +03:00
ds = DataStore ( )
2016-02-18 02:53:35 +03:00
ds . refresh ( )
2016-06-03 21:18:21 +03:00
print ( " PVS " )
2016-02-18 02:53:35 +03:00
for v in ds . pvs . values ( ) :
pp . pprint ( v )
2016-09-28 19:18:10 +03:00
print ( ' PV missing is %s ' % ds . pv_missing ( v [ ' pv_uuid ' ] ) )
2016-02-18 02:53:35 +03:00
2016-06-03 21:18:21 +03:00
print ( " VGS " )
2016-02-18 02:53:35 +03:00
for v in ds . vgs . values ( ) :
pp . pprint ( v )
2019-01-17 00:41:27 +03:00
print ( " VG name to UUID " )
for k , v in ds . vg_name_to_uuid . items ( ) :
print ( " %s : %s " % ( k , v ) )
2016-02-18 02:53:35 +03:00
print ( " LVS " )
for v in ds . lvs . values ( ) :
pp . pprint ( v )
print ( " LVS in VG " )
for k , v in ds . lvs_in_vgs . items ( ) :
print ( " VG uuid = %s " % ( k ) )
pp . pprint ( v )
print ( " pv_in_lvs " )
for k , v in ds . pv_lvs . items ( ) :
print ( " PV %s contains LVS: " % ( k ) )
pp . pprint ( v )
for k , v in ds . lv_pvs . items ( ) :
print ( " LV device = %s " % ( k ) )
pp . pprint ( v )