2018-05-18 00:03:36 +03:00
#!/usr/bin/python3
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved.
2016-02-18 02:53:35 +03:00
#
2016-02-20 00:16:05 +03:00
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
2016-02-18 02:53:35 +03:00
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
2022-10-20 20:48:40 +03:00
import os
2022-08-23 18:27:30 +03:00
import signal
2016-10-11 20:22:31 +03:00
# noinspection PyUnresolvedReferences
2022-08-23 18:27:30 +03:00
import subprocess
import unittest
2023-02-20 20:10:24 +03:00
import tempfile
2022-08-23 18:27:30 +03:00
from glob import glob
from subprocess import Popen , PIPE
2016-02-18 02:53:35 +03:00
import dbus
2022-08-23 18:27:30 +03:00
import pyudev
2016-02-18 02:53:35 +03:00
# noinspection PyUnresolvedReferences
from dbus . mainloop . glib import DBusGMainLoop
2022-08-23 18:27:30 +03:00
2016-11-17 19:32:55 +03:00
import testlib
2022-08-23 18:27:30 +03:00
from testlib import *
2016-02-18 02:53:35 +03:00
2016-09-17 08:11:59 +03:00
g_tmo = 0
2022-09-23 01:11:45 +03:00
g_lvm_shell = False
2022-05-26 00:03:27 +03:00
# Approx. min size
VDO_MIN_SIZE = mib ( 8192 )
2022-09-23 00:17:20 +03:00
VG_TEST_SUFFIX = " _vg_LvMdBuS_TEST "
2022-09-23 01:11:45 +03:00
EXE_NAME = " /lvmdbusd "
2022-08-23 18:27:30 +03:00
2016-11-16 20:39:57 +03:00
# Prefix on created objects to enable easier clean-up
2016-10-11 20:02:10 +03:00
g_prefix = os . getenv ( ' PREFIX ' , ' ' )
2021-03-26 22:21:57 +03:00
# Check dev dir prefix for test suite (LVM_TEST_DEVDIR
dm_dev_dir = os . getenv ( ' DM_DEV_DIR ' , ' /dev ' )
2016-11-16 20:39:57 +03:00
# Use the session bus instead of the system bus
2016-11-11 21:34:38 +03:00
use_session = os . getenv ( ' LVM_DBUSD_USE_SESSION ' , False )
2016-11-16 20:39:57 +03:00
# Only use the devices listed in the ENV variable
2016-11-11 21:34:38 +03:00
pv_device_list = os . getenv ( ' LVM_DBUSD_PV_DEVICE_LIST ' , None )
2016-02-22 23:28:11 +03:00
2016-11-30 23:58:29 +03:00
# Default is to test all modes
# 0 == Only test fork & exec mode
2022-05-26 00:21:14 +03:00
# 1 == Only test lvm shell mode
# 2 == Test both fork & exec & lvm shell mode (default)
2016-11-30 23:58:29 +03:00
# Other == Test just lvm shell mode
2022-05-26 00:21:14 +03:00
test_shell = os . getenv ( ' LVM_DBUSD_TEST_MODE ' , 2 )
2016-11-30 23:58:29 +03:00
2017-03-09 00:50:46 +03:00
# LVM binary to use
LVM_EXECUTABLE = os . getenv ( ' LVM_BINARY ' , ' /usr/sbin/lvm ' )
2016-11-16 20:39:57 +03:00
# Empty options dictionary (EOD)
EOD = dbus . Dictionary ( { } , signature = dbus . Signature ( ' sv ' ) )
# Base interfaces on LV objects
LV_BASE_INT = ( LV_COMMON_INT , LV_INT )
2016-02-22 23:28:11 +03:00
if use_session :
bus = dbus . SessionBus ( mainloop = DBusGMainLoop ( ) )
else :
bus = dbus . SystemBus ( mainloop = DBusGMainLoop ( ) )
2016-02-18 02:53:35 +03:00
2016-11-16 20:39:57 +03:00
# If we have multiple clients we will globally disable introspection
# validation to limit the massive amount of introspection calls we make as
# that method prevents things from executing concurrently
if pv_device_list :
2016-11-17 19:32:55 +03:00
testlib . validate_introspection = False
2016-10-07 23:09:30 +03:00
2019-10-11 18:49:10 +03:00
def vg_n ( prefix = None ) :
2022-09-23 00:17:20 +03:00
name = rs ( 8 , VG_TEST_SUFFIX )
2019-10-11 18:49:10 +03:00
if prefix :
name = prefix + name
2021-03-26 22:21:57 +03:00
return g_prefix + name
2016-10-11 20:02:10 +03:00
def lv_n ( suffix = None ) :
if not suffix :
s = ' _lv '
else :
s = suffix
2021-03-26 22:21:57 +03:00
return rs ( 8 , s )
2016-10-11 20:02:10 +03:00
2017-05-26 16:34:47 +03:00
def _is_testsuite_pv ( pv_name ) :
2019-10-09 15:49:58 +03:00
return g_prefix != " " and pv_name [ - 1 ] . isdigit ( ) and \
pv_name [ : - 1 ] . endswith ( g_prefix + " pv " )
2017-05-26 16:34:47 +03:00
2017-04-22 08:06:16 +03:00
def is_nested_pv ( pv_name ) :
2017-05-26 16:34:47 +03:00
return pv_name . count ( ' / ' ) == 3 and not _is_testsuite_pv ( pv_name )
2017-04-22 08:06:16 +03:00
def _root_pv_name ( res , pv_name ) :
if not is_nested_pv ( pv_name ) :
return pv_name
2017-04-22 08:06:59 +03:00
vg_name = pv_name . split ( ' / ' ) [ 2 ]
2017-04-22 08:06:16 +03:00
for v in res [ VG_INT ] :
if v . Vg . Name == vg_name :
2019-10-14 22:33:16 +03:00
for pv in res [ PV_INT ] :
if pv . object_path in v . Vg . Pvs :
return _root_pv_name ( res , pv . Pv . Name )
return None
2019-10-14 22:31:27 +03:00
def _prune_lvs ( res , interface , vg_object_path ) :
lvs = [ lv for lv in res [ interface ] if lv . LvCommon . Vg == vg_object_path ]
res [ interface ] = lvs
2017-04-22 08:06:16 +03:00
def _prune ( res , pv_filter ) :
if pv_filter :
pv_lookup = { }
pv_list = [ ]
for p in res [ PV_INT ] :
if _root_pv_name ( res , p . Pv . Name ) in pv_filter :
pv_list . append ( p )
pv_lookup [ p . object_path ] = p
res [ PV_INT ] = pv_list
vg_list = [ ]
for v in res [ VG_INT ] :
if v . Vg . Pvs [ 0 ] in pv_lookup :
vg_list . append ( v )
2019-10-14 22:31:27 +03:00
for interface in \
[ LV_INT , THINPOOL_INT , LV_COMMON_INT ,
CACHE_POOL_INT , CACHE_LV_INT , VDOPOOL_INT ] :
_prune_lvs ( res , interface , v . object_path )
2017-04-22 08:06:16 +03:00
res [ VG_INT ] = vg_list
2019-10-14 22:31:27 +03:00
2017-04-22 08:06:16 +03:00
return res
2016-02-18 02:53:35 +03:00
def get_objects ( ) :
2016-10-11 20:22:31 +03:00
rc = {
MANAGER_INT : [ ] , PV_INT : [ ] , VG_INT : [ ] , LV_INT : [ ] ,
THINPOOL_INT : [ ] , JOB_INT : [ ] , SNAPSHOT_INT : [ ] , LV_COMMON_INT : [ ] ,
2019-10-09 15:49:58 +03:00
CACHE_POOL_INT : [ ] , CACHE_LV_INT : [ ] , VG_VDO_INT : [ ] , VDOPOOL_INT : [ ] }
2016-02-18 02:53:35 +03:00
2016-11-16 20:39:57 +03:00
object_manager_object = bus . get_object (
BUS_NAME , " /com/redhat/lvmdbus1 " , introspect = False )
2016-02-18 02:53:35 +03:00
2019-10-09 19:45:41 +03:00
manager_interface = dbus . Interface (
object_manager_object , " org.freedesktop.DBus.ObjectManager " )
2016-02-18 02:53:35 +03:00
2016-11-16 20:39:57 +03:00
objects = manager_interface . GetManagedObjects ( )
2016-11-11 21:34:38 +03:00
2016-11-16 20:39:57 +03:00
for object_path , v in objects . items ( ) :
proxy = ClientProxy ( bus , object_path , v )
2017-04-22 08:06:59 +03:00
for interface in v . keys ( ) :
2016-11-16 20:39:57 +03:00
rc [ interface ] . append ( proxy )
2016-02-18 02:53:35 +03:00
2017-04-22 08:06:16 +03:00
# At this point we have a full population of everything, we now need to
2022-06-06 17:59:59 +03:00
# prune the objects if we are filtering PVs with a sub selection.
2017-04-22 08:06:16 +03:00
return _prune ( rc , pv_device_list ) , bus
2016-02-18 02:53:35 +03:00
2022-09-23 01:11:45 +03:00
def set_exec_mode ( lvmshell ) :
lvm_manager = dbus . Interface ( bus . get_object (
BUS_NAME , " /com/redhat/lvmdbus1/Manager " , introspect = False ) ,
" com.redhat.lvmdbus1.Manager " )
return lvm_manager . UseLvmShell ( lvmshell )
2016-11-11 21:34:38 +03:00
def set_execution ( lvmshell , test_result ) :
2022-09-23 01:11:45 +03:00
global g_lvm_shell
2016-11-11 21:34:38 +03:00
if lvmshell :
m = ' lvm shell (non-fork) '
else :
m = " forking & exec ' ing "
2022-09-23 01:11:45 +03:00
rc = set_exec_mode ( lvmshell )
2016-11-11 21:34:38 +03:00
if rc :
2022-09-23 01:11:45 +03:00
g_lvm_shell = lvmshell
2016-11-11 21:34:38 +03:00
std_err_print ( ' Successfully changed execution mode to " %s " ' % m )
else :
std_err_print ( ' ERROR: Failed to change execution mode to " %s " ' % m )
test_result . register_fail ( )
return rc
2016-02-18 02:53:35 +03:00
2017-03-09 00:50:46 +03:00
def call_lvm ( command ) :
"""
Call lvm executable and return a tuple of exitcode , stdout , stderr
: param command : Command to execute
2017-03-10 01:06:24 +03:00
: type command : list
: returns ( exitcode , stdout , stderr )
: rtype ( int , str , str )
2017-03-09 00:50:46 +03:00
"""
# Prepend the full lvm executable so that we can run different versions
# in different locations on the same box
command . insert ( 0 , LVM_EXECUTABLE )
2019-10-09 19:45:41 +03:00
process = Popen (
command , stdout = PIPE , stderr = PIPE , close_fds = True , env = os . environ )
2017-03-09 00:50:46 +03:00
out = process . communicate ( )
stdout_text = bytes ( out [ 0 ] ) . decode ( " utf-8 " )
stderr_text = bytes ( out [ 1 ] ) . decode ( " utf-8 " )
return process . returncode , stdout_text , stderr_text
2019-10-10 00:55:39 +03:00
def supports_vdo ( ) :
cmd = [ ' segtypes ' ]
2019-12-16 13:25:37 +03:00
modprobe = Popen ( [ " modprobe " , " kvdo " ] , stdout = PIPE , stderr = PIPE , close_fds = True , env = os . environ )
modprobe . communicate ( )
if modprobe . returncode != 0 :
return False
2019-10-10 00:55:39 +03:00
rc , out , err = call_lvm ( cmd )
2019-12-16 13:25:37 +03:00
if rc != 0 or " vdo " not in out :
return False
return True
2019-10-10 00:55:39 +03:00
2022-08-23 18:27:30 +03:00
def process_exists ( name ) :
# Walk the process table looking for executable 'name'
for p in [ pid for pid in os . listdir ( ' /proc ' ) if pid . isdigit ( ) ] :
try :
cmdline_args = read_file_split_nuls ( " /proc/ %s /cmdline " % p )
except OSError :
continue
for arg in cmdline_args :
if name in arg :
return int ( p )
return None
def read_file_split_nuls ( fn ) :
with open ( fn , " rb " ) as fh :
return [ p . decode ( " utf-8 " ) for p in fh . read ( ) . split ( b ' \x00 ' ) if len ( p ) > 0 ]
def read_file_build_hash ( fn ) :
rc = dict ( )
lines = read_file_split_nuls ( fn )
for line in lines :
if line . count ( " = " ) == 1 :
k , v = line . split ( " = " )
rc [ k ] = v
return rc
2023-02-20 20:10:24 +03:00
def remove_lvm_debug ( ) :
# If we are running the lvmdbusd daemon and collecting lvm debug data, check and
# clean-up after the tests.
tmpdir = tempfile . gettempdir ( )
2023-02-23 21:24:39 +03:00
fp = os . path . join ( tmpdir , " lvmdbusd.lvm.debug.*.log " )
for f in glob ( fp ) :
os . unlink ( f )
2023-02-20 20:10:24 +03:00
2022-08-23 18:27:30 +03:00
class DaemonInfo ( object ) :
def __init__ ( self , pid ) :
# The daemon is running, we have a pid, lets see how it's being run.
# When running under systemd, fd 0 -> /dev/null, fd 1&2 -> socket
# when ran manually it may have output re-directed to a file etc.
# we need the following
# command line arguments
# cwd
# where the output is going (in case it's directed to a file)
# Which lvm binary is being used (check LVM_BINARY env. variable)
# PYTHONPATH
base = " /proc/ %d " % pid
self . cwd = os . readlink ( " %s /cwd " % base )
self . cmdline = read_file_split_nuls ( " %s /cmdline " % ( base ) ) [ 1 : ]
self . env = read_file_build_hash ( " %s /environ " % base )
self . stdin = os . readlink ( " %s /fd/0 " % base )
self . stdout = os . readlink ( " %s /fd/1 " % base )
self . stderr = os . readlink ( " %s /fd/2 " % base )
if self . cwd == " / " and self . stdin == " /dev/null " :
self . systemd = True
else :
self . systemd = False
self . process = None
@classmethod
def get ( cls ) :
pid = process_exists ( EXE_NAME )
if pid :
return cls ( pid )
return None
def start ( self , expect_fail = False ) :
if self . systemd :
2022-08-23 18:54:22 +03:00
subprocess . run ( [ " /usr/bin/systemctl " , " start " , " lvm2-lvmdbusd " ] , check = True )
2022-08-23 18:27:30 +03:00
else :
stdin_stream = None
stdout_stream = None
stderr_stream = None
try :
stdout_stream = open ( self . stdout , " ab " )
stdin_stream = open ( self . stdin , " rb " )
stderr_stream = open ( self . stderr , " ab " )
self . process = Popen ( self . cmdline , cwd = self . cwd , stdin = stdin_stream ,
stdout = stdout_stream , stderr = stderr_stream , env = self . env )
if expect_fail :
# Let's wait a bit to see if this process dies as expected and return the exit code
try :
self . process . wait ( 10 )
return self . process . returncode
except subprocess . TimeoutExpired as e :
# Process did not fail as expected, lets kill it
os . kill ( self . process . pid , signal . SIGKILL )
self . process . wait ( 20 )
raise e
else :
# This is a hack to set the returncode. When the Popen object goes out of scope during the unit test
# the __del__ method gets called. As we leave the daemon running the process.returncode
# hasn't been set, so it incorrectly raises an exception that the process is still running
# which in our case is correct and expected.
self . process . returncode = 0
finally :
# Close these in the parent
if stdin_stream :
stdin_stream . close ( )
if stderr_stream :
stderr_stream . close ( )
if stdout_stream :
stdout_stream . close ( )
# Make sure daemon is responding to dbus events before returning
DaemonInfo . _ensure_daemon ( " Daemon is not responding on dbus within 20 seconds of starting! " )
# During local testing it usually takes ~0.25 seconds for daemon to be ready
return None
@staticmethod
def _ensure_no_daemon ( ) :
start = time . time ( )
pid = process_exists ( EXE_NAME )
while pid is not None and ( time . time ( ) - start ) < = 20 :
2024-03-27 20:26:19 +03:00
time . sleep ( 0.1 )
2022-08-23 18:27:30 +03:00
pid = process_exists ( EXE_NAME )
if pid :
raise Exception (
" lsmd daemon did not exit within 20 seconds, pid = %s " % pid )
@staticmethod
def _ensure_daemon ( msg ) :
start = time . time ( )
running = False
while True and ( time . time ( ) - start ) < 20 :
try :
get_objects ( )
running = True
break
except dbus . exceptions . DBusException :
2024-03-27 20:26:19 +03:00
time . sleep ( 0.1 )
2022-08-23 18:27:30 +03:00
pass
if not running :
raise RuntimeError ( msg )
def term_signal ( self , sig_number ) :
# Used for signals that we expect with terminate the daemon, eg. SIGINT, SIGKILL
if self . process :
os . kill ( self . process . pid , sig_number )
# Note: The following should work, but doesn't!
# self.process.send_signal(sig_number)
try :
self . process . wait ( 10 )
except subprocess . TimeoutExpired :
std_err_print ( " Daemon hasn ' t exited within 10 seconds " )
if self . process . poll ( ) is None :
std_err_print ( " Daemon still running... " )
else :
self . process = None
else :
pid = process_exists ( EXE_NAME )
os . kill ( pid , sig_number )
# Make sure there is no daemon present before we return for things to be "good"
DaemonInfo . _ensure_no_daemon ( )
def non_term_signal ( self , sig_number ) :
if sig_number not in [ signal . SIGUSR1 , signal . SIGUSR2 ] :
raise ValueError ( " Incorrect signal number! %d " % sig_number )
if self . process :
os . kill ( self . process . pid , sig_number )
else :
pid = process_exists ( EXE_NAME )
os . kill ( pid , sig_number )
2016-02-18 02:53:35 +03:00
# noinspection PyUnresolvedReferences
class TestDbusService ( unittest . TestCase ) :
2016-02-20 00:16:05 +03:00
def setUp ( self ) :
2022-09-23 01:10:13 +03:00
self . pvs = [ ]
2022-09-01 02:03:16 +03:00
2016-02-20 00:16:05 +03:00
# Because of the sensitive nature of running LVM tests we will only
# run if we have PVs and nothing else, so that we can be confident that
2022-08-23 18:24:37 +03:00
# we are not mucking with someone's data on their system
2016-02-20 00:16:05 +03:00
self . objs , self . bus = get_objects ( )
if len ( self . objs [ PV_INT ] ) == 0 :
2016-10-07 23:09:30 +03:00
std_err_print ( ' No PVs present exiting! ' )
2016-02-20 00:16:05 +03:00
sys . exit ( 1 )
2022-09-23 01:10:13 +03:00
for p in self . objs [ PV_INT ] :
self . pvs . append ( p . Pv . Name )
2016-02-20 00:16:05 +03:00
if len ( self . objs [ MANAGER_INT ] ) != 1 :
2016-10-07 23:09:30 +03:00
std_err_print ( ' Expecting a manager object! ' )
2016-02-20 00:16:05 +03:00
sys . exit ( 1 )
2017-04-22 08:06:16 +03:00
if len ( self . objs [ VG_INT ] ) != 0 :
std_err_print ( ' Expecting no VGs to exist! ' )
sys . exit ( 1 )
2016-02-20 00:16:05 +03:00
2022-09-23 01:10:13 +03:00
self . addCleanup ( self . clean_up )
2016-02-20 00:16:05 +03:00
2019-10-10 00:55:39 +03:00
self . vdo = supports_vdo ( )
2023-02-20 20:10:24 +03:00
remove_lvm_debug ( )
2019-10-10 00:55:39 +03:00
2017-04-22 08:06:16 +03:00
def _recurse_vg_delete ( self , vg_proxy , pv_proxy , nested_pv_hash ) :
2022-09-23 00:18:48 +03:00
vg_name = str ( vg_proxy . Vg . Name )
if not vg_name . endswith ( VG_TEST_SUFFIX ) :
std_err_print ( " Refusing to remove VG: %s " % vg_name )
return
2017-04-22 08:06:16 +03:00
for pv_device_name , t in nested_pv_hash . items ( ) :
if vg_name in pv_device_name :
self . _recurse_vg_delete ( t [ 0 ] , t [ 1 ] , nested_pv_hash )
break
vg_proxy . update ( )
self . handle_return ( vg_proxy . Vg . Remove ( dbus . Int32 ( g_tmo ) , EOD ) )
if is_nested_pv ( pv_proxy . Pv . Name ) :
rc = self . _pv_remove ( pv_proxy )
2023-03-02 20:44:16 +03:00
self . assertTrue ( rc == ' / ' , " We expected a ' / ' , but got %s when removing a PV " % str ( rc ) )
2017-04-22 08:06:16 +03:00
2022-08-23 18:28:27 +03:00
def clean_up ( self ) :
2016-02-20 00:16:05 +03:00
self . objs , self . bus = get_objects ( )
2016-11-11 21:34:38 +03:00
2017-04-22 08:06:16 +03:00
# The self.objs[PV_INT] list only contains those which we should be
# mucking with, lets remove any embedded/nested PVs first, then proceed
# to walk the base PVs and remove the VGs
nested_pvs = { }
non_nested = [ ]
for p in self . objs [ PV_INT ] :
if is_nested_pv ( p . Pv . Name ) :
2016-11-11 21:34:38 +03:00
if p . Pv . Vg != ' / ' :
2017-04-22 08:06:16 +03:00
v = ClientProxy ( self . bus , p . Pv . Vg , interfaces = ( VG_INT , ) )
nested_pvs [ p . Pv . Name ] = ( v , p )
else :
# Nested PV with no VG, so just simply remove it!
self . _pv_remove ( p )
else :
non_nested . append ( p )
for p in non_nested :
# When we remove a VG for a PV it could ripple across multiple
# PVs, so update each PV while removing each VG, to ensure
# the properties are current and correct.
p . update ( )
if p . Pv . Vg != ' / ' :
2022-08-23 18:28:27 +03:00
v = ClientProxy ( self . bus , p . Pv . Vg , interfaces = ( VG_INT , ) )
2017-04-22 08:06:16 +03:00
self . _recurse_vg_delete ( v , p , nested_pvs )
2016-02-20 00:16:05 +03:00
# Check to make sure the PVs we had to start exist, else re-create
# them
2017-05-26 16:34:47 +03:00
self . objs , self . bus = get_objects ( )
2022-09-23 01:10:13 +03:00
if len ( self . pvs ) != len ( self . objs [ PV_INT ] ) :
2016-02-20 00:16:05 +03:00
for p in self . pvs :
found = False
for pc in self . objs [ PV_INT ] :
if pc . Pv . Name == p :
found = True
break
if not found :
# print('Re-creating PV=', p)
self . _pv_create ( p )
2023-02-20 20:10:24 +03:00
remove_lvm_debug ( )
2016-11-11 21:34:38 +03:00
def _check_consistency ( self ) :
# Only do consistency checks if we aren't running the unit tests
# concurrently
if pv_device_list is None :
self . assertEqual ( self . _refresh ( ) , 0 )
2016-09-17 08:11:59 +03:00
def handle_return ( self , rc ) :
if isinstance ( rc , ( tuple , list ) ) :
# We have a tuple returned
if rc [ 0 ] != ' / ' :
return rc [ 0 ]
else :
return self . _wait_for_job ( rc [ 1 ] )
else :
if rc == ' / ' :
return rc
else :
return self . _wait_for_job ( rc )
2016-02-20 00:16:05 +03:00
def _pv_create ( self , device ) :
2016-09-17 08:11:59 +03:00
pv_path = self . handle_return (
self . objs [ MANAGER_INT ] [ 0 ] . Manager . PvCreate (
2016-11-16 20:39:57 +03:00
dbus . String ( device ) , dbus . Int32 ( g_tmo ) , EOD )
2016-09-17 08:11:59 +03:00
)
2017-03-02 02:24:08 +03:00
self . _validate_lookup ( device , pv_path )
2023-03-02 20:44:16 +03:00
self . assertTrue ( pv_path is not None and len ( pv_path ) > 0 ,
" When creating a PV we expected the returned path to be valid " )
2016-02-20 00:16:05 +03:00
return pv_path
def _manager ( self ) :
return self . objs [ MANAGER_INT ] [ 0 ]
def _refresh ( self ) :
return self . _manager ( ) . Manager . Refresh ( )
def test_refresh ( self ) :
2016-11-11 21:34:38 +03:00
self . _check_consistency ( )
2016-02-20 00:16:05 +03:00
def test_version ( self ) :
rc = self . objs [ MANAGER_INT ] [ 0 ] . Manager . Version
2023-03-02 20:44:16 +03:00
self . assertTrue ( rc is not None and len ( rc ) > 0 , " Manager.Version is invalid " )
2016-11-11 21:34:38 +03:00
self . _check_consistency ( )
2016-02-20 00:16:05 +03:00
2022-08-17 20:11:07 +03:00
def _vg_create ( self , pv_paths = None , vg_prefix = None , options = None ) :
2016-02-20 00:16:05 +03:00
if not pv_paths :
2019-10-11 18:49:10 +03:00
pv_paths = self . _all_pv_object_paths ( )
2016-02-20 00:16:05 +03:00
2022-08-17 20:11:07 +03:00
if options is None :
options = EOD
2021-03-27 23:58:33 +03:00
vg_name = vg_n ( prefix = vg_prefix )
2016-02-20 00:16:05 +03:00
2016-09-17 08:11:59 +03:00
vg_path = self . handle_return (
self . objs [ MANAGER_INT ] [ 0 ] . Manager . VgCreate (
2016-11-16 20:39:57 +03:00
dbus . String ( vg_name ) ,
dbus . Array ( pv_paths , signature = dbus . Signature ( ' o ' ) ) ,
dbus . Int32 ( g_tmo ) ,
2022-08-17 20:11:07 +03:00
options ) )
2016-09-17 08:11:59 +03:00
2017-03-02 02:22:32 +03:00
self . _validate_lookup ( vg_name , vg_path )
2023-03-02 20:44:16 +03:00
self . assertTrue ( vg_path is not None and len ( vg_path ) > 0 , " During VG creation, returned path is empty " )
2019-10-10 00:55:39 +03:00
intf = [ VG_INT , ]
if self . vdo :
intf . append ( VG_VDO_INT )
return ClientProxy ( self . bus , vg_path , interfaces = intf )
2016-02-20 00:16:05 +03:00
def test_vg_create ( self ) :
self . _vg_create ( )
2016-11-11 21:34:38 +03:00
self . _check_consistency ( )
2016-02-20 00:16:05 +03:00
def test_vg_delete ( self ) :
vg = self . _vg_create ( ) . Vg
2016-09-17 08:11:59 +03:00
2016-11-16 20:39:57 +03:00
self . handle_return (
vg . Remove ( dbus . Int32 ( g_tmo ) , EOD ) )
2016-11-11 21:34:38 +03:00
self . _check_consistency ( )
2016-02-20 00:16:05 +03:00
2016-09-17 08:11:59 +03:00
def _pv_remove ( self , pv ) :
2016-11-16 20:39:57 +03:00
rc = self . handle_return (
pv . Pv . Remove ( dbus . Int32 ( g_tmo ) , EOD ) )
2016-02-20 00:16:05 +03:00
return rc
def test_pv_remove_add ( self ) :
target = self . objs [ PV_INT ] [ 0 ]
# Remove the PV
2016-09-17 08:11:59 +03:00
rc = self . _pv_remove ( target )
2016-02-20 00:16:05 +03:00
self . assertTrue ( rc == ' / ' )
2016-11-11 21:34:38 +03:00
self . _check_consistency ( )
2016-02-20 00:16:05 +03:00
# Add it back
rc = self . _pv_create ( target . Pv . Name ) [ 0 ]
self . assertTrue ( rc == ' / ' )
2016-11-11 21:34:38 +03:00
self . _check_consistency ( )
2016-02-20 00:16:05 +03:00
2016-06-10 21:45:59 +03:00
def _create_raid5_thin_pool ( self , vg = None ) :
2016-06-10 21:36:53 +03:00
2017-03-02 02:26:23 +03:00
meta_name = " meta_r5 "
data_name = " data_r5 "
2016-06-10 21:36:53 +03:00
if not vg :
2019-10-03 23:25:17 +03:00
vg = self . _vg_create ( self . _all_pv_object_paths ( ) ) . Vg
2016-06-10 21:36:53 +03:00
2016-09-17 08:11:59 +03:00
lv_meta_path = self . handle_return (
vg . LvCreateRaid (
2017-03-02 02:26:23 +03:00
dbus . String ( meta_name ) ,
2016-11-16 20:39:57 +03:00
dbus . String ( " raid5 " ) ,
dbus . UInt64 ( mib ( 4 ) ) ,
dbus . UInt32 ( 0 ) ,
dbus . UInt32 ( 0 ) ,
dbus . Int32 ( g_tmo ) ,
EOD )
2016-09-17 08:11:59 +03:00
)
2017-03-02 02:26:23 +03:00
self . _validate_lookup ( " %s / %s " % ( vg . Name , meta_name ) , lv_meta_path )
2016-06-10 21:36:53 +03:00
2016-09-17 08:11:59 +03:00
lv_data_path = self . handle_return (
vg . LvCreateRaid (
2017-03-02 02:26:23 +03:00
dbus . String ( data_name ) ,
2016-11-16 20:39:57 +03:00
dbus . String ( " raid5 " ) ,
dbus . UInt64 ( mib ( 16 ) ) ,
dbus . UInt32 ( 0 ) ,
dbus . UInt32 ( 0 ) ,
dbus . Int32 ( g_tmo ) ,
EOD )
2016-09-17 08:11:59 +03:00
)
2016-06-10 21:36:53 +03:00
2017-03-02 02:26:23 +03:00
self . _validate_lookup ( " %s / %s " % ( vg . Name , data_name ) , lv_data_path )
2016-09-17 08:11:59 +03:00
thin_pool_path = self . handle_return (
2016-11-16 20:39:57 +03:00
vg . CreateThinPool (
dbus . ObjectPath ( lv_meta_path ) ,
dbus . ObjectPath ( lv_data_path ) ,
dbus . Int32 ( g_tmo ) , EOD )
2016-09-17 08:11:59 +03:00
)
2016-06-10 21:36:53 +03:00
# Get thin pool client proxy
2019-10-09 19:45:41 +03:00
intf = ( LV_COMMON_INT , LV_INT , THINPOOL_INT )
thin_pool = ClientProxy ( self . bus , thin_pool_path , interfaces = intf )
2016-06-10 21:36:53 +03:00
return vg , thin_pool
def test_meta_lv_data_lv_props ( self ) :
# Ensure that metadata lv and data lv for thin pools and cache pools
# point to a valid LV
( vg , thin_pool ) = self . _create_raid5_thin_pool ( )
# Check properties on thin pool
self . assertTrue ( thin_pool . ThinPool . DataLv != ' / ' )
self . assertTrue ( thin_pool . ThinPool . MetaDataLv != ' / ' )
( vg , cache_pool ) = self . _create_cache_pool ( vg )
self . assertTrue ( cache_pool . CachePool . DataLv != ' / ' )
self . assertTrue ( cache_pool . CachePool . MetaDataLv != ' / ' )
# Cache the thin pool
2016-09-17 08:11:59 +03:00
cached_thin_pool_path = self . handle_return (
cache_pool . CachePool . CacheLv (
2016-11-16 20:39:57 +03:00
dbus . ObjectPath ( thin_pool . object_path ) ,
dbus . Int32 ( g_tmo ) , EOD )
2016-09-17 08:11:59 +03:00
)
2016-06-10 21:36:53 +03:00
# Get object proxy for cached thin pool
2019-10-09 19:45:41 +03:00
intf = ( LV_COMMON_INT , LV_INT , THINPOOL_INT )
cached_thin_pool_object = ClientProxy (
self . bus , cached_thin_pool_path , interfaces = intf )
2016-06-10 21:36:53 +03:00
# Check properties on cache pool
self . assertTrue ( cached_thin_pool_object . ThinPool . DataLv != ' / ' )
self . assertTrue ( cached_thin_pool_object . ThinPool . MetaDataLv != ' / ' )
2016-02-20 00:16:05 +03:00
def _lookup ( self , lvm_id ) :
2017-03-01 18:35:01 +03:00
return self . objs [ MANAGER_INT ] [ 0 ] . \
Manager . LookUpByLvmId ( dbus . String ( lvm_id ) )
2016-02-20 00:16:05 +03:00
2017-03-02 02:22:32 +03:00
def _validate_lookup ( self , lvm_name , object_path ) :
t = self . _lookup ( lvm_name )
self . assertTrue (
object_path == t , " %s != %s for %s " % ( object_path , t , lvm_name ) )
2017-03-01 20:09:51 +03:00
2016-02-20 00:16:05 +03:00
def test_lookup_by_lvm_id ( self ) :
# For the moment lets just lookup what we know about which is PVs
# When we start testing VGs and LVs we will test lookups for those
# during those unit tests
for p in self . objs [ PV_INT ] :
rc = self . _lookup ( p . Pv . Name )
self . assertTrue ( rc is not None and rc != ' / ' )
# Search for something which doesn't exist
rc = self . _lookup ( ' /dev/null ' )
self . assertTrue ( rc == ' / ' )
def test_vg_extend ( self ) :
# Create a VG
self . assertTrue ( len ( self . objs [ PV_INT ] ) > = 2 )
if len ( self . objs [ PV_INT ] ) > = 2 :
pv_initial = self . objs [ PV_INT ] [ 0 ]
pv_next = self . objs [ PV_INT ] [ 1 ]
vg = self . _vg_create ( [ pv_initial . object_path ] ) . Vg
2016-09-17 08:11:59 +03:00
path = self . handle_return (
2016-11-16 20:39:57 +03:00
vg . Extend (
dbus . Array ( [ pv_next . object_path ] , signature = " o " ) ,
dbus . Int32 ( g_tmo ) , EOD )
2016-09-17 08:11:59 +03:00
)
2016-02-20 00:16:05 +03:00
self . assertTrue ( path == ' / ' )
2016-11-11 21:34:38 +03:00
self . _check_consistency ( )
2016-02-20 00:16:05 +03:00
# noinspection PyUnresolvedReferences
def test_vg_reduce ( self ) :
self . assertTrue ( len ( self . objs [ PV_INT ] ) > = 2 )
if len ( self . objs [ PV_INT ] ) > = 2 :
vg = self . _vg_create (
[ self . objs [ PV_INT ] [ 0 ] . object_path ,
2016-10-11 20:22:31 +03:00
self . objs [ PV_INT ] [ 1 ] . object_path ] ) . Vg
2016-02-20 00:16:05 +03:00
2016-09-17 08:11:59 +03:00
path = self . handle_return (
2016-11-16 20:39:57 +03:00
vg . Reduce (
dbus . Boolean ( False ) , dbus . Array ( [ vg . Pvs [ 0 ] ] , signature = ' o ' ) ,
dbus . Int32 ( g_tmo ) , EOD )
2016-09-17 08:11:59 +03:00
)
2016-02-20 00:16:05 +03:00
self . assertTrue ( path == ' / ' )
2016-11-11 21:34:38 +03:00
self . _check_consistency ( )
2016-02-20 00:16:05 +03:00
2019-10-03 23:19:51 +03:00
def _verify_lv_paths ( self , vg , new_name ) :
"""
# Go through each LV and make sure it has the correct path back to the
# VG
: return :
"""
lv_paths = vg . Lvs
for l in lv_paths :
2019-10-09 19:45:41 +03:00
lv_proxy = ClientProxy (
self . bus , l , interfaces = ( LV_COMMON_INT , ) ) . LvCommon
2019-10-03 23:19:51 +03:00
self . assertTrue (
lv_proxy . Vg == vg . object_path , " %s != %s " %
( lv_proxy . Vg , vg . object_path ) )
full_name = " %s / %s " % ( new_name , lv_proxy . Name )
lv_path = self . _lookup ( full_name )
self . assertTrue (
lv_path == lv_proxy . object_path , " %s != %s " %
( lv_path , lv_proxy . object_path ) )
2016-02-20 00:16:05 +03:00
# noinspection PyUnresolvedReferences
def test_vg_rename ( self ) :
vg = self . _vg_create ( ) . Vg
# Do a vg lookup
2017-03-01 18:35:01 +03:00
path = self . _lookup ( vg . Name )
2016-02-20 00:16:05 +03:00
vg_name_start = vg . Name
2016-06-10 21:45:59 +03:00
2016-02-20 00:16:05 +03:00
prev_path = path
self . assertTrue ( path != ' / ' , " %s " % ( path ) )
# Create some LVs in the VG
for i in range ( 0 , 5 ) :
2016-02-22 23:03:31 +03:00
lv_t = self . _create_lv ( size = mib ( 4 ) , vg = vg )
2016-02-20 00:16:05 +03:00
full_name = " %s / %s " % ( vg_name_start , lv_t . LvCommon . Name )
2017-03-01 18:35:01 +03:00
lv_path = self . _lookup ( full_name )
2016-02-20 00:16:05 +03:00
self . assertTrue ( lv_path == lv_t . object_path )
new_name = ' renamed_ ' + vg . Name
2016-11-16 20:39:57 +03:00
path = self . handle_return (
vg . Rename ( dbus . String ( new_name ) , dbus . Int32 ( g_tmo ) , EOD ) )
2016-02-20 00:16:05 +03:00
self . assertTrue ( path == ' / ' )
2016-11-11 21:34:38 +03:00
self . _check_consistency ( )
2016-02-20 00:16:05 +03:00
# Do a vg lookup
2017-03-01 18:35:01 +03:00
path = self . _lookup ( new_name )
2016-02-20 00:16:05 +03:00
self . assertTrue ( path != ' / ' , " %s " % ( path ) )
self . assertTrue ( prev_path == path , " %s != %s " % ( prev_path , path ) )
# Go through each LV and make sure it has the correct path back to the
# VG
vg . update ( )
2019-10-03 23:19:51 +03:00
self . assertTrue ( len ( vg . Lvs ) == 5 )
self . _verify_lv_paths ( vg , new_name )
2016-02-20 00:16:05 +03:00
def _verify_hidden_lookups ( self , lv_common_object , vgname ) :
hidden_lv_paths = lv_common_object . HiddenLvs
for h in hidden_lv_paths :
2019-10-09 19:45:41 +03:00
h_lv = ClientProxy (
self . bus , h , interfaces = ( LV_COMMON_INT , ) ) . LvCommon
2016-02-20 00:16:05 +03:00
if len ( h_lv . HiddenLvs ) > 0 :
self . _verify_hidden_lookups ( h_lv , vgname )
full_name = " %s / %s " % ( vgname , h_lv . Name )
2016-09-27 06:03:12 +03:00
# print("Hidden check %s" % (full_name))
2017-03-01 18:35:01 +03:00
lookup_path = self . _lookup ( full_name )
2016-09-27 06:03:12 +03:00
self . assertTrue ( lookup_path != ' / ' )
self . assertTrue ( lookup_path == h_lv . object_path )
# Lets's strip off the '[ ]' and make sure we can find
full_name = " %s / %s " % ( vgname , h_lv . Name [ 1 : - 1 ] )
# print("Hidden check %s" % (full_name))
2017-03-01 18:35:01 +03:00
lookup_path = self . _lookup ( full_name )
2016-02-20 00:16:05 +03:00
self . assertTrue ( lookup_path != ' / ' )
self . assertTrue ( lookup_path == h_lv . object_path )
def test_vg_rename_with_thin_pool ( self ) :
2016-06-10 21:45:59 +03:00
( vg , thin_pool ) = self . _create_raid5_thin_pool ( )
2016-02-20 00:16:05 +03:00
vg_name_start = vg . Name
# noinspection PyTypeChecker
self . _verify_hidden_lookups ( thin_pool . LvCommon , vg_name_start )
for i in range ( 0 , 5 ) :
2016-10-11 20:02:10 +03:00
lv_name = lv_n ( )
2016-02-20 00:16:05 +03:00
2016-09-17 08:11:59 +03:00
thin_lv_path = self . handle_return (
thin_pool . ThinPool . LvCreate (
2016-11-16 20:39:57 +03:00
dbus . String ( lv_name ) ,
dbus . UInt64 ( mib ( 16 ) ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
2016-02-20 00:16:05 +03:00
2017-03-02 02:26:23 +03:00
self . _validate_lookup (
" %s / %s " % ( vg_name_start , lv_name ) , thin_lv_path )
2016-02-20 00:16:05 +03:00
self . assertTrue ( thin_lv_path != ' / ' )
full_name = " %s / %s " % ( vg_name_start , lv_name )
2017-03-01 18:35:01 +03:00
lookup_lv_path = self . _lookup ( full_name )
2016-10-11 20:22:31 +03:00
self . assertTrue (
thin_lv_path == lookup_lv_path ,
" %s != %s " % ( thin_lv_path , lookup_lv_path ) )
2016-02-20 00:16:05 +03:00
# Rename the VG
new_name = ' renamed_ ' + vg . Name
2016-11-16 20:39:57 +03:00
path = self . handle_return (
vg . Rename ( dbus . String ( new_name ) , dbus . Int32 ( g_tmo ) , EOD ) )
2016-02-20 00:16:05 +03:00
self . assertTrue ( path == ' / ' )
2016-11-11 21:34:38 +03:00
self . _check_consistency ( )
2016-02-20 00:16:05 +03:00
vg . update ( )
thin_pool . update ( )
2019-10-03 23:19:51 +03:00
self . _verify_lv_paths ( vg , new_name )
2016-02-20 00:16:05 +03:00
# noinspection PyTypeChecker
self . _verify_hidden_lookups ( thin_pool . LvCommon , new_name )
2016-11-16 20:39:57 +03:00
def _test_lv_create ( self , method , params , vg , proxy_interfaces = None ) :
2016-02-20 00:16:05 +03:00
lv = None
2016-09-17 08:11:59 +03:00
path = self . handle_return ( method ( * params ) )
2016-02-20 00:16:05 +03:00
self . assertTrue ( vg )
if path :
2016-11-16 20:39:57 +03:00
lv = ClientProxy ( self . bus , path , interfaces = proxy_interfaces )
2016-02-20 00:16:05 +03:00
2016-06-28 21:36:32 +03:00
# We are quick enough now that we can get VolumeType changes from
# 'I' to 'i' between the time it takes to create a RAID and it returns
# and when we refresh state here. Not sure how we can handle this as
# we cannot just sit and poll all the time for changes...
2016-11-11 21:34:38 +03:00
# self._check_consistency()
2016-02-20 00:16:05 +03:00
return lv
def test_lv_create ( self ) :
2017-03-02 02:26:23 +03:00
lv_name = lv_n ( )
2016-02-20 00:16:05 +03:00
vg = self . _vg_create ( ) . Vg
2017-03-02 02:26:23 +03:00
lv = self . _test_lv_create (
2016-02-20 00:16:05 +03:00
vg . LvCreate ,
2017-03-02 02:26:23 +03:00
( dbus . String ( lv_name ) , dbus . UInt64 ( mib ( 4 ) ) ,
2019-10-09 19:45:41 +03:00
dbus . Array ( [ ] , signature = ' (ott) ' ) , dbus . Int32 ( g_tmo ) ,
EOD ) , vg , LV_BASE_INT )
2017-03-02 02:26:23 +03:00
self . _validate_lookup ( " %s / %s " % ( vg . Name , lv_name ) , lv . object_path )
2016-02-20 00:16:05 +03:00
2022-06-07 19:47:27 +03:00
def test_prop_get ( self ) :
lv_name = lv_n ( )
vg = self . _vg_create ( ) . Vg
lv = self . _test_lv_create (
vg . LvCreate ,
( dbus . String ( lv_name ) , dbus . UInt64 ( mib ( 4 ) ) ,
dbus . Array ( [ ] , signature = ' (ott) ' ) , dbus . Int32 ( g_tmo ) ,
EOD ) , vg , LV_BASE_INT )
ri = RemoteInterface ( lv . dbus_object , interface = LV_COMMON_INT , introspect = False )
ri . update ( )
for prop_name in ri . get_property_names ( ) :
self . assertEqual ( ri . get_property_value ( prop_name ) , getattr ( ri , prop_name ) )
2016-09-16 22:00:14 +03:00
def test_lv_create_job ( self ) :
2017-03-02 02:26:23 +03:00
lv_name = lv_n ( )
2016-09-16 22:00:14 +03:00
vg = self . _vg_create ( ) . Vg
2016-10-11 20:22:31 +03:00
( object_path , job_path ) = vg . LvCreate (
2017-03-02 02:26:23 +03:00
dbus . String ( lv_name ) , dbus . UInt64 ( mib ( 4 ) ) ,
2016-11-16 20:39:57 +03:00
dbus . Array ( [ ] , signature = ' (ott) ' ) , dbus . Int32 ( 0 ) ,
EOD )
2016-09-16 22:00:14 +03:00
self . assertTrue ( object_path == ' / ' )
self . assertTrue ( job_path != ' / ' )
object_path = self . _wait_for_job ( job_path )
2017-03-02 02:26:23 +03:00
self . _validate_lookup ( " %s / %s " % ( vg . Name , lv_name ) , object_path )
2016-09-16 22:00:14 +03:00
self . assertTrue ( object_path != ' / ' )
2016-02-20 00:16:05 +03:00
def test_lv_create_linear ( self ) :
2017-03-02 02:26:23 +03:00
lv_name = lv_n ( )
2016-02-20 00:16:05 +03:00
vg = self . _vg_create ( ) . Vg
2017-03-02 02:26:23 +03:00
lv = self . _test_lv_create (
2016-02-20 00:16:05 +03:00
vg . LvCreateLinear ,
2017-03-02 02:26:23 +03:00
( dbus . String ( lv_name ) , dbus . UInt64 ( mib ( 4 ) ) , dbus . Boolean ( False ) ,
2019-10-09 19:45:41 +03:00
dbus . Int32 ( g_tmo ) , EOD ) , vg , LV_BASE_INT )
2017-03-02 02:26:23 +03:00
self . _validate_lookup ( " %s / %s " % ( vg . Name , lv_name ) , lv . object_path )
2016-02-20 00:16:05 +03:00
2019-10-03 23:25:17 +03:00
def _all_pv_object_paths ( self ) :
return [ pp . object_path for pp in self . objs [ PV_INT ] ]
2016-02-20 00:16:05 +03:00
def test_lv_create_striped ( self ) :
2017-03-02 02:26:23 +03:00
lv_name = lv_n ( )
2019-10-03 23:25:17 +03:00
vg = self . _vg_create ( self . _all_pv_object_paths ( ) ) . Vg
2017-03-02 02:26:23 +03:00
lv = self . _test_lv_create (
2016-11-16 20:39:57 +03:00
vg . LvCreateStriped ,
2017-03-02 02:26:23 +03:00
( dbus . String ( lv_name ) , dbus . UInt64 ( mib ( 4 ) ) ,
2019-10-09 19:45:41 +03:00
dbus . UInt32 ( 2 ) , dbus . UInt32 ( 8 ) , dbus . Boolean ( False ) ,
dbus . Int32 ( g_tmo ) , EOD ) , vg , LV_BASE_INT )
2017-03-02 02:26:23 +03:00
self . _validate_lookup ( " %s / %s " % ( vg . Name , lv_name ) , lv . object_path )
2016-02-20 00:16:05 +03:00
def test_lv_create_mirror ( self ) :
2017-03-02 02:26:23 +03:00
lv_name = lv_n ( )
2019-10-03 23:25:17 +03:00
vg = self . _vg_create ( self . _all_pv_object_paths ( ) ) . Vg
2017-03-02 02:26:23 +03:00
lv = self . _test_lv_create (
2016-11-16 20:39:57 +03:00
vg . LvCreateMirror ,
2017-03-02 02:26:23 +03:00
( dbus . String ( lv_name ) , dbus . UInt64 ( mib ( 4 ) ) , dbus . UInt32 ( 2 ) ,
2019-10-09 19:45:41 +03:00
dbus . Int32 ( g_tmo ) , EOD ) , vg , LV_BASE_INT )
2017-03-02 02:26:23 +03:00
self . _validate_lookup ( " %s / %s " % ( vg . Name , lv_name ) , lv . object_path )
2016-02-20 00:16:05 +03:00
def test_lv_create_raid ( self ) :
2017-03-02 02:26:23 +03:00
lv_name = lv_n ( )
2019-10-03 23:25:17 +03:00
vg = self . _vg_create ( self . _all_pv_object_paths ( ) ) . Vg
2017-03-02 02:26:23 +03:00
lv = self . _test_lv_create (
2016-11-16 20:39:57 +03:00
vg . LvCreateRaid ,
2017-03-02 02:26:23 +03:00
( dbus . String ( lv_name ) , dbus . String ( ' raid5 ' ) , dbus . UInt64 ( mib ( 16 ) ) ,
2019-10-09 19:45:41 +03:00
dbus . UInt32 ( 2 ) , dbus . UInt32 ( 8 ) , dbus . Int32 ( g_tmo ) , EOD ) ,
vg , LV_BASE_INT )
2017-03-02 02:26:23 +03:00
self . _validate_lookup ( " %s / %s " % ( vg . Name , lv_name ) , lv . object_path )
2016-02-20 00:16:05 +03:00
2017-05-26 09:33:39 +03:00
def _create_lv ( self , thinpool = False , size = None , vg = None , suffix = None ) :
2016-02-20 00:16:05 +03:00
2017-05-26 09:33:39 +03:00
lv_name = lv_n ( suffix = suffix )
2016-11-16 20:39:57 +03:00
interfaces = list ( LV_BASE_INT )
if thinpool :
interfaces . append ( THINPOOL_INT )
2016-02-20 00:16:05 +03:00
if not vg :
2019-10-03 23:25:17 +03:00
vg = self . _vg_create ( self . _all_pv_object_paths ( ) ) . Vg
2016-02-20 00:16:05 +03:00
if size is None :
2019-10-11 18:49:10 +03:00
size = mib ( 8 )
2016-02-20 00:16:05 +03:00
2017-03-02 02:26:23 +03:00
lv = self . _test_lv_create (
2016-02-20 00:16:05 +03:00
vg . LvCreateLinear ,
2017-03-02 02:26:23 +03:00
( dbus . String ( lv_name ) , dbus . UInt64 ( size ) ,
2019-10-09 19:45:41 +03:00
dbus . Boolean ( thinpool ) , dbus . Int32 ( g_tmo ) , EOD ) ,
2016-11-16 20:39:57 +03:00
vg , interfaces )
2016-02-20 00:16:05 +03:00
2017-03-02 02:26:23 +03:00
self . _validate_lookup ( " %s / %s " % ( vg . Name , lv_name ) , lv . object_path )
return lv
2019-10-11 18:49:10 +03:00
def _create_thin_pool_lv ( self ) :
return self . _create_lv ( True )
2016-02-20 00:16:05 +03:00
def test_lv_create_rounding ( self ) :
2016-02-22 23:03:31 +03:00
self . _create_lv ( size = ( mib ( 2 ) + 13 ) )
2016-02-20 00:16:05 +03:00
def test_lv_create_thin_pool ( self ) :
2019-10-11 18:49:10 +03:00
self . _create_thin_pool_lv ( )
2016-02-20 00:16:05 +03:00
2019-10-11 18:49:10 +03:00
def _rename_lv_test ( self , lv ) :
2017-03-01 18:35:01 +03:00
path = self . _lookup ( lv . LvCommon . Name )
2016-02-20 00:16:05 +03:00
prev_path = path
new_name = ' renamed_ ' + lv . LvCommon . Name
2016-09-17 08:11:59 +03:00
2019-10-09 19:45:41 +03:00
self . handle_return (
lv . Lv . Rename ( dbus . String ( new_name ) , dbus . Int32 ( g_tmo ) , EOD ) )
2016-02-20 00:16:05 +03:00
2017-03-01 18:35:01 +03:00
path = self . _lookup ( new_name )
2016-02-20 00:16:05 +03:00
2016-11-11 21:34:38 +03:00
self . _check_consistency ( )
2016-02-20 00:16:05 +03:00
self . assertTrue ( prev_path == path , " %s != %s " % ( prev_path , path ) )
2019-10-11 18:49:10 +03:00
lv . update ( )
self . assertTrue (
lv . LvCommon . Name == new_name ,
" %s != %s " % ( lv . LvCommon . Name , new_name ) )
def test_lv_rename ( self ) :
# Rename a regular LV
lv = self . _create_lv ( )
self . _rename_lv_test ( lv )
2016-02-20 00:16:05 +03:00
def test_lv_thinpool_rename ( self ) :
# Rename a thin pool
tp = self . _create_lv ( True )
2016-10-11 20:22:31 +03:00
self . assertTrue (
THINPOOL_LV_PATH in tp . object_path ,
" %s " % ( tp . object_path ) )
2016-02-20 00:16:05 +03:00
new_name = ' renamed_ ' + tp . LvCommon . Name
2016-11-16 20:39:57 +03:00
self . handle_return ( tp . Lv . Rename (
dbus . String ( new_name ) , dbus . Int32 ( g_tmo ) , EOD ) )
2016-02-20 00:16:05 +03:00
tp . update ( )
2016-11-11 21:34:38 +03:00
self . _check_consistency ( )
2016-02-20 00:16:05 +03:00
self . assertEqual ( new_name , tp . LvCommon . Name )
2019-10-03 23:50:08 +03:00
def _create_thin_lv ( self ) :
2017-03-02 02:26:23 +03:00
vg = self . _vg_create ( ) . Vg
tp = self . _create_lv ( thinpool = True , vg = vg )
lv_name = lv_n ( ' _thin_lv ' )
2016-02-20 00:16:05 +03:00
2016-09-17 08:11:59 +03:00
thin_path = self . handle_return (
tp . ThinPool . LvCreate (
2017-03-02 02:26:23 +03:00
dbus . String ( lv_name ) ,
2019-10-03 23:50:08 +03:00
dbus . UInt64 ( mib ( 10 ) ) ,
2016-11-16 20:39:57 +03:00
dbus . Int32 ( g_tmo ) ,
EOD )
2016-09-17 08:11:59 +03:00
)
2017-03-02 02:26:23 +03:00
self . _validate_lookup ( " %s / %s " % ( vg . Name , lv_name ) , thin_path )
2016-02-20 00:16:05 +03:00
2019-10-09 19:45:41 +03:00
lv = ClientProxy (
self . bus , thin_path , interfaces = ( LV_COMMON_INT , LV_INT ) )
2019-10-03 23:50:08 +03:00
return vg , thin_path , lv
2016-09-17 08:11:59 +03:00
2019-10-03 23:50:08 +03:00
# noinspection PyUnresolvedReferences
def test_lv_on_thin_pool_rename ( self ) :
# Rename a LV on a thin Pool
vg , thin_path , lv = self . _create_thin_lv ( )
2017-03-02 02:26:23 +03:00
re_named = ' rename_test ' + lv . LvCommon . Name
2016-09-17 08:11:59 +03:00
rc = self . handle_return (
2016-11-16 20:39:57 +03:00
lv . Lv . Rename (
2017-03-02 02:26:23 +03:00
dbus . String ( re_named ) ,
2016-11-16 20:39:57 +03:00
dbus . Int32 ( g_tmo ) ,
EOD )
2016-09-17 08:11:59 +03:00
)
2017-03-02 02:26:23 +03:00
self . _validate_lookup ( " %s / %s " % ( vg . Name , re_named ) , thin_path )
2016-02-20 00:16:05 +03:00
self . assertTrue ( rc == ' / ' )
2016-11-11 21:34:38 +03:00
self . _check_consistency ( )
2016-02-20 00:16:05 +03:00
2019-10-11 18:49:10 +03:00
def _lv_remove ( self , lv ) :
2016-11-16 20:39:57 +03:00
rc = self . handle_return (
2019-10-11 18:49:10 +03:00
lv . Lv . Remove (
2016-11-16 20:39:57 +03:00
dbus . Int32 ( g_tmo ) ,
EOD ) )
2016-02-20 00:16:05 +03:00
self . assertTrue ( rc == ' / ' )
2016-11-11 21:34:38 +03:00
self . _check_consistency ( )
2016-02-20 00:16:05 +03:00
2019-10-11 18:49:10 +03:00
def test_lv_remove ( self ) :
lv = self . _create_lv ( )
self . _lv_remove ( lv )
def _take_lv_snapshot ( self , lv_p ) :
2016-02-20 00:16:05 +03:00
ss_name = ' ss_ ' + lv_p . LvCommon . Name
2019-10-11 18:49:10 +03:00
ss_obj_path = self . handle_return ( lv_p . Lv . Snapshot (
2016-11-16 20:39:57 +03:00
dbus . String ( ss_name ) ,
dbus . UInt64 ( 0 ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
2016-02-20 00:16:05 +03:00
2019-10-11 18:49:10 +03:00
self . assertTrue ( ss_obj_path != ' / ' )
return ClientProxy (
self . bus , ss_obj_path , interfaces = ( LV_COMMON_INT , LV_INT ) )
def test_lv_snapshot ( self ) :
lv_p = self . _create_lv ( )
self . _take_lv_snapshot ( lv_p )
2016-02-20 00:16:05 +03:00
2019-10-09 19:45:41 +03:00
# noinspection PyUnresolvedReferences,PyUnusedLocal
2016-02-20 00:16:05 +03:00
def _wait_for_job ( self , j_path ) :
rc = None
2016-11-16 20:39:57 +03:00
j = ClientProxy ( self . bus , j_path , interfaces = ( JOB_INT , ) ) . Job
2016-02-20 00:16:05 +03:00
while True :
j . update ( )
if j . Complete :
( ec , error_msg ) = j . GetError
self . assertTrue ( ec == 0 , " %d : %s " % ( ec , error_msg ) )
if ec == 0 :
self . assertTrue ( j . Percent == 100 , " P= %f " % j . Percent )
rc = j . Result
j . Remove ( )
break
if j . Wait ( 1 ) :
2022-06-06 17:58:39 +03:00
self . assertTrue ( j . Wait ( 0 ) )
2016-02-20 00:16:05 +03:00
j . update ( )
self . assertTrue ( j . Complete )
return rc
def test_lv_create_pv_specific ( self ) :
vg = self . _vg_create ( ) . Vg
2017-03-02 02:26:23 +03:00
lv_name = lv_n ( )
2016-02-20 00:16:05 +03:00
pv = vg . Pvs
2016-11-16 20:39:57 +03:00
pvp = ClientProxy ( self . bus , pv [ 0 ] , interfaces = ( PV_INT , ) )
2016-02-22 23:03:31 +03:00
2017-03-02 02:26:23 +03:00
lv = self . _test_lv_create (
2016-11-16 20:39:57 +03:00
vg . LvCreate , (
2017-03-02 02:26:23 +03:00
dbus . String ( lv_name ) ,
2016-11-16 20:39:57 +03:00
dbus . UInt64 ( mib ( 4 ) ) ,
2019-10-09 19:45:41 +03:00
dbus . Array (
[ [ pvp . object_path , 0 , ( pvp . Pv . PeCount - 1 ) ] ] ,
signature = ' (ott) ' ) ,
2016-11-16 20:39:57 +03:00
dbus . Int32 ( g_tmo ) , EOD ) , vg , LV_BASE_INT )
2017-03-02 02:26:23 +03:00
self . _validate_lookup ( " %s / %s " % ( vg . Name , lv_name ) , lv . object_path )
2016-02-20 00:16:05 +03:00
2019-10-11 18:49:10 +03:00
def _test_lv_resize ( self , lv ) :
# Can't resize cache or thin pool volumes or vdo pool lv
if lv . LvCommon . Attr [ 0 ] == ' C ' or lv . LvCommon . Attr [ 0 ] == ' t ' or \
lv . LvCommon . Attr [ 0 ] == ' d ' :
return
2016-02-18 02:53:35 +03:00
2019-10-11 18:49:10 +03:00
vg = ClientProxy ( self . bus , lv . LvCommon . Vg , interfaces = ( VG_INT , ) ) . Vg
2016-02-18 02:53:35 +03:00
2019-10-11 18:49:10 +03:00
start_size = lv . LvCommon . SizeBytes
# Vdo are fairly big and need large re-size amounts.
if start_size > mib ( 4 ) * 3 :
delta = mib ( 4 )
else :
delta = 16384
2016-02-18 02:53:35 +03:00
2019-10-11 18:49:10 +03:00
for size in [ start_size + delta , start_size - delta ] :
2016-02-20 00:16:05 +03:00
# Select a PV in the VG that isn't in use
2023-10-25 23:08:12 +03:00
pv_empty = [ ]
for p in vg . Pvs :
pobj = ClientProxy ( self . bus , p , interfaces = ( PV_INT , ) )
if len ( pobj . Pv . Lv ) == 0 :
pv_empty . append ( p )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
prev = lv . LvCommon . SizeBytes
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
if len ( pv_empty ) :
2016-11-16 20:39:57 +03:00
p = ClientProxy ( self . bus , pv_empty [ 0 ] , interfaces = ( PV_INT , ) )
2016-09-17 08:11:59 +03:00
rc = self . handle_return (
lv . Lv . Resize (
2016-11-16 20:39:57 +03:00
dbus . UInt64 ( size ) ,
2016-10-11 20:22:31 +03:00
dbus . Array (
[ [ p . object_path , 0 , p . Pv . PeCount - 1 ] ] , ' (oii) ' ) ,
2016-11-16 20:39:57 +03:00
dbus . Int32 ( g_tmo ) , EOD ) )
2016-02-20 00:16:05 +03:00
else :
2016-09-17 08:11:59 +03:00
rc = self . handle_return (
lv . Lv . Resize (
2016-11-16 20:39:57 +03:00
dbus . UInt64 ( size ) ,
dbus . Array ( [ ] , ' (oii) ' ) ,
dbus . Int32 ( g_tmo ) , EOD ) )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
self . assertEqual ( rc , ' / ' )
2016-11-11 21:34:38 +03:00
self . _check_consistency ( )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
lv . update ( )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
if prev < size :
self . assertTrue ( lv . LvCommon . SizeBytes > prev )
else :
# We are testing re-sizing to same size too...
self . assertTrue ( lv . LvCommon . SizeBytes < = prev )
2016-02-18 02:53:35 +03:00
2019-10-11 18:49:10 +03:00
def test_lv_resize ( self ) :
pv_paths = [
self . objs [ PV_INT ] [ 0 ] . object_path , self . objs [ PV_INT ] [ 1 ] . object_path ]
vg = self . _vg_create ( pv_paths ) . Vg
lv = self . _create_lv ( vg = vg , size = mib ( 16 ) )
self . _test_lv_resize ( lv )
2016-02-20 00:16:05 +03:00
def test_lv_resize_same ( self ) :
2019-10-03 23:25:17 +03:00
vg = self . _vg_create ( self . _all_pv_object_paths ( ) ) . Vg
2016-02-20 00:16:05 +03:00
lv = self . _create_lv ( vg = vg )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
with self . assertRaises ( dbus . exceptions . DBusException ) :
2019-10-09 19:45:41 +03:00
lv . Lv . Resize (
dbus . UInt64 ( lv . LvCommon . SizeBytes ) ,
dbus . Array ( [ ] , ' (oii) ' ) ,
dbus . Int32 ( - 1 ) , EOD )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
def test_lv_move ( self ) :
lv = self . _create_lv ( )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
pv_path_move = str ( lv . LvCommon . Devices [ 0 ] [ 0 ] )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
# Test moving a specific LV
2016-09-17 08:11:59 +03:00
rc = self . handle_return (
lv . Lv . Move (
2016-11-16 20:39:57 +03:00
dbus . ObjectPath ( pv_path_move ) ,
dbus . Struct ( ( 0 , 0 ) , signature = ' (tt) ' ) ,
dbus . Array ( [ ] , ' (ott) ' ) , dbus . Int32 ( g_tmo ) ,
EOD ) )
2016-09-17 08:11:59 +03:00
self . assertTrue ( rc == ' / ' )
2016-11-11 21:34:38 +03:00
self . _check_consistency ( )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
lv . update ( )
new_pv = str ( lv . LvCommon . Devices [ 0 ] [ 0 ] )
2016-10-11 20:22:31 +03:00
self . assertTrue (
pv_path_move != new_pv , " %s == %s " % ( pv_path_move , new_pv ) )
2016-02-18 02:53:35 +03:00
2019-10-11 18:49:10 +03:00
def _test_activate_deactivate ( self , lv_p ) :
2016-11-16 20:39:57 +03:00
self . handle_return ( lv_p . Lv . Deactivate (
dbus . UInt64 ( 0 ) , dbus . Int32 ( g_tmo ) , EOD ) )
2016-02-20 00:16:05 +03:00
lv_p . update ( )
self . assertFalse ( lv_p . LvCommon . Active )
2016-11-11 21:34:38 +03:00
self . _check_consistency ( )
2016-02-18 02:53:35 +03:00
2016-11-16 20:39:57 +03:00
self . handle_return ( lv_p . Lv . Activate (
dbus . UInt64 ( 0 ) , dbus . Int32 ( g_tmo ) , EOD ) )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
lv_p . update ( )
self . assertTrue ( lv_p . LvCommon . Active )
2019-10-11 18:49:10 +03:00
# Vdo property "IndexState" when getting activated goes from
# "opening" -> "online" after we have returned from the activate call
# thus when we try to check the consistency we fail as the property
# is changing on it's own and not because the lvmdbusd failed to
# refresh it's own state. One solution is to not expose IndexState as
# a property.
# TODO Expose method to determine if Lv is partaking in VDO.
vg = ClientProxy ( self . bus , lv_p . LvCommon . Vg , interfaces = ( VG_INT , ) )
if " vdo " not in vg . Vg . Name :
self . _check_consistency ( )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
# Try control flags
2023-04-20 12:49:21 +03:00
for i in range ( 0 , 6 ) :
2016-09-17 08:11:59 +03:00
2016-11-16 20:39:57 +03:00
self . handle_return ( lv_p . Lv . Activate (
dbus . UInt64 ( 1 << i ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
2016-09-17 08:11:59 +03:00
2016-02-20 00:16:05 +03:00
self . assertTrue ( lv_p . LvCommon . Active )
2016-11-11 21:34:38 +03:00
self . _check_consistency ( )
2016-02-18 02:53:35 +03:00
2019-10-11 18:49:10 +03:00
def test_lv_activate_deactivate ( self ) :
lv_p = self . _create_lv ( )
self . _test_activate_deactivate ( lv_p )
2016-02-20 00:16:05 +03:00
def test_move ( self ) :
lv = self . _create_lv ( )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
# Test moving without being LV specific
2016-11-16 20:39:57 +03:00
vg = ClientProxy ( self . bus , lv . LvCommon . Vg , interfaces = ( VG_INT , ) ) . Vg
2016-02-20 00:16:05 +03:00
pv_to_move = str ( lv . LvCommon . Devices [ 0 ] [ 0 ] )
2016-09-17 08:11:59 +03:00
2016-11-16 20:39:57 +03:00
rc = self . handle_return (
vg . Move (
dbus . ObjectPath ( pv_to_move ) ,
dbus . Struct ( ( 0 , 0 ) , signature = ' tt ' ) ,
dbus . Array ( [ ] , ' (ott) ' ) ,
dbus . Int32 ( 0 ) ,
EOD ) )
2016-09-17 08:11:59 +03:00
self . assertEqual ( rc , ' / ' )
2016-11-11 21:34:38 +03:00
self . _check_consistency ( )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
vg . update ( )
lv . update ( )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
location = lv . LvCommon . Devices [ 0 ] [ 0 ]
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
dst = None
for p in vg . Pvs :
if p != location :
dst = p
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
# Fetch the destination
2016-11-16 20:39:57 +03:00
pv = ClientProxy ( self . bus , dst , interfaces = ( PV_INT , ) ) . Pv
2016-02-18 02:53:35 +03:00
2016-09-17 08:11:59 +03:00
# Test range, move it to the middle of the new destination
job = self . handle_return (
vg . Move (
2016-11-16 20:39:57 +03:00
dbus . ObjectPath ( location ) ,
dbus . Struct ( ( 0 , 0 ) , signature = ' tt ' ) ,
2018-05-18 17:23:10 +03:00
dbus . Array ( [ ( dst , pv . PeCount / / 2 , 0 ) , ] , ' (ott) ' ) ,
2016-11-16 20:39:57 +03:00
dbus . Int32 ( g_tmo ) ,
EOD ) )
2016-02-20 00:16:05 +03:00
self . assertEqual ( job , ' / ' )
2016-11-11 21:34:38 +03:00
self . _check_consistency ( )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
def test_job_handling ( self ) :
2019-10-03 23:25:17 +03:00
pv_paths = self . _all_pv_object_paths ( )
2016-10-11 20:02:10 +03:00
vg_name = vg_n ( )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
# Test getting a job right away
vg_path , vg_job = self . objs [ MANAGER_INT ] [ 0 ] . Manager . VgCreate (
2016-11-16 20:39:57 +03:00
dbus . String ( vg_name ) ,
dbus . Array ( pv_paths , ' o ' ) ,
dbus . Int32 ( 0 ) ,
EOD )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
self . assertTrue ( vg_path == ' / ' )
self . assertTrue ( vg_job and len ( vg_job ) > 0 )
2016-02-18 02:53:35 +03:00
2017-03-01 20:09:51 +03:00
vg_path = self . _wait_for_job ( vg_job )
2017-03-02 02:22:32 +03:00
self . _validate_lookup ( vg_name , vg_path )
2016-02-18 02:53:35 +03:00
2022-08-23 18:29:26 +03:00
def _create_num_lvs ( self , num_lvs , no_wait = False ) :
2019-10-03 23:25:17 +03:00
vg_proxy = self . _vg_create ( self . _all_pv_object_paths ( ) )
2022-08-23 18:29:26 +03:00
if no_wait :
tmo = 0
else :
tmo = g_tmo
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
for i in range ( 0 , num_lvs ) :
2017-03-02 02:26:23 +03:00
lv_name = lv_n ( )
2016-02-22 23:03:31 +03:00
vg_proxy . update ( )
if vg_proxy . Vg . FreeCount > 0 :
2022-08-23 18:29:26 +03:00
create_result = vg_proxy . Vg . LvCreateLinear (
2017-03-02 02:26:23 +03:00
dbus . String ( lv_name ) ,
2016-11-16 20:39:57 +03:00
dbus . UInt64 ( mib ( 4 ) ) ,
dbus . Boolean ( False ) ,
2022-08-23 18:29:26 +03:00
dbus . Int32 ( tmo ) ,
EOD )
2017-03-02 02:26:23 +03:00
2022-08-23 18:29:26 +03:00
if not no_wait :
lv_path = self . handle_return ( create_result )
self . assertTrue ( lv_path != ' / ' )
self . _validate_lookup ( " %s / %s " % ( vg_proxy . Vg . Name , lv_name ) , lv_path )
2016-02-22 23:03:31 +03:00
else :
2022-08-17 20:14:02 +03:00
# We ran out of space, test(s) may fail
2016-02-22 23:03:31 +03:00
break
2022-08-17 20:14:02 +03:00
return vg_proxy
def _test_expired_timer ( self , num_lvs ) :
rc = False
# In small configurations lvm is pretty snappy, so let's create a VG
# add a number of LVs and then remove the VG and all the contained
# LVs which appears to consistently run a little slow.
vg_proxy = self . _create_num_lvs ( num_lvs )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
# Make sure that we are honoring the timeout
start = time . time ( )
2016-02-18 02:53:35 +03:00
2016-11-16 20:39:57 +03:00
remove_job = vg_proxy . Vg . Remove ( dbus . Int32 ( 1 ) , EOD )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
end = time . time ( )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
tt_remove = float ( end ) - float ( start )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
self . assertTrue ( tt_remove < 2.0 , " remove time %s " % ( str ( tt_remove ) ) )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
# Depending on how long it took we could finish either way
if remove_job != ' / ' :
# We got a job
result = self . _wait_for_job ( remove_job )
self . assertTrue ( result == ' / ' )
rc = True
else :
# It completed before timer popped
pass
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
return rc
2019-10-09 19:45:41 +03:00
# noinspection PyUnusedLocal
2016-02-20 00:16:05 +03:00
def test_job_handling_timer ( self ) :
yes = False
2016-10-08 02:18:36 +03:00
for pp in self . objs [ PV_INT ] :
if ' /dev/sd ' not in pp . Pv . Name :
std_err_print ( " Skipping test_job_handling_timer on loopback " )
return
2016-02-20 00:16:05 +03:00
# This may not pass
2022-06-06 18:01:18 +03:00
for i in [ 128 , 256 ] :
2016-02-20 00:16:05 +03:00
yes = self . _test_expired_timer ( i )
if yes :
break
2016-10-07 23:09:30 +03:00
std_err_print ( ' Attempt ( %d ) failed, trying again... ' % ( i ) )
2016-02-20 00:16:05 +03:00
self . assertTrue ( yes )
def test_pv_tags ( self ) :
pvs = [ ]
2019-10-03 23:25:17 +03:00
vg = self . _vg_create ( self . _all_pv_object_paths ( ) ) . Vg
2016-02-20 00:16:05 +03:00
# Get the PVs
for p in vg . Pvs :
2016-11-16 20:39:57 +03:00
pvs . append ( ClientProxy ( self . bus , p , interfaces = ( PV_INT , ) ) . Pv )
2016-02-20 00:16:05 +03:00
for tags_value in [ [ ' hello ' ] , [ ' foo ' , ' bar ' ] ] :
2016-09-17 08:11:59 +03:00
rc = self . handle_return (
2016-11-16 20:39:57 +03:00
vg . PvTagsAdd (
dbus . Array ( vg . Pvs , ' o ' ) ,
dbus . Array ( tags_value , ' s ' ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
2016-02-20 00:16:05 +03:00
self . assertTrue ( rc == ' / ' )
for p in pvs :
p . update ( )
self . assertTrue ( sorted ( tags_value ) == p . Tags )
2016-09-17 08:11:59 +03:00
rc = self . handle_return (
2016-11-16 20:39:57 +03:00
vg . PvTagsDel (
dbus . Array ( vg . Pvs , ' o ' ) ,
dbus . Array ( tags_value , ' s ' ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
2016-09-17 08:11:59 +03:00
self . assertEqual ( rc , ' / ' )
2016-02-20 00:16:05 +03:00
for p in pvs :
p . update ( )
self . assertTrue ( [ ] == p . Tags )
def test_vg_tags ( self ) :
vg = self . _vg_create ( ) . Vg
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
t = [ ' Testing ' , ' tags ' ]
2016-02-18 02:53:35 +03:00
2016-11-16 20:39:57 +03:00
self . handle_return (
vg . TagsAdd (
dbus . Array ( t , ' s ' ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
2016-09-17 08:11:59 +03:00
2016-02-20 00:16:05 +03:00
vg . update ( )
self . assertTrue ( t == vg . Tags )
2016-09-17 08:11:59 +03:00
2016-11-16 20:39:57 +03:00
self . handle_return (
vg . TagsDel (
dbus . Array ( t , ' s ' ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
2016-02-20 00:16:05 +03:00
vg . update ( )
self . assertTrue ( [ ] == vg . Tags )
2016-02-18 02:53:35 +03:00
2019-10-11 18:49:10 +03:00
def _test_lv_tags ( self , lv ) :
2016-02-20 00:16:05 +03:00
t = [ ' Testing ' , ' tags ' ]
2016-02-18 02:53:35 +03:00
2016-09-17 08:11:59 +03:00
self . handle_return (
2016-11-16 20:39:57 +03:00
lv . Lv . TagsAdd (
dbus . Array ( t , ' s ' ) , dbus . Int32 ( g_tmo ) , EOD ) )
2019-10-11 18:49:10 +03:00
self . _check_consistency ( )
2016-02-20 00:16:05 +03:00
lv . update ( )
self . assertTrue ( t == lv . LvCommon . Tags )
2016-09-17 08:11:59 +03:00
self . handle_return (
2016-11-16 20:39:57 +03:00
lv . Lv . TagsDel (
dbus . Array ( t , ' s ' ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
2019-10-11 18:49:10 +03:00
self . _check_consistency ( )
2016-02-20 00:16:05 +03:00
lv . update ( )
self . assertTrue ( [ ] == lv . LvCommon . Tags )
2016-02-18 02:53:35 +03:00
2019-10-11 18:49:10 +03:00
def test_lv_tags ( self ) :
vg = self . _vg_create ( ) . Vg
lv = self . _create_lv ( vg = vg )
self . _test_lv_tags ( lv )
2016-02-20 00:16:05 +03:00
def test_vg_allocation_policy_set ( self ) :
vg = self . _vg_create ( ) . Vg
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
for p in [ ' anywhere ' , ' contiguous ' , ' cling ' , ' normal ' ] :
2016-09-17 08:11:59 +03:00
rc = self . handle_return (
2016-11-16 20:39:57 +03:00
vg . AllocationPolicySet (
dbus . String ( p ) , dbus . Int32 ( g_tmo ) , EOD ) )
2016-09-17 08:11:59 +03:00
2016-02-20 00:16:05 +03:00
self . assertEqual ( rc , ' / ' )
vg . update ( )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
prop = getattr ( vg , ' Alloc ' + p . title ( ) )
self . assertTrue ( prop )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
def test_vg_max_pv ( self ) :
2019-10-11 18:49:10 +03:00
vg = self . _vg_create ( [ self . objs [ PV_INT ] [ 0 ] . object_path ] ) . Vg
2016-02-20 00:16:05 +03:00
for p in [ 0 , 1 , 10 , 100 , 100 , 1024 , 2 * * 32 - 1 ] :
2016-09-17 08:11:59 +03:00
rc = self . handle_return (
2016-11-16 20:39:57 +03:00
vg . MaxPvSet (
dbus . UInt64 ( p ) , dbus . Int32 ( g_tmo ) , EOD ) )
2016-02-20 00:16:05 +03:00
self . assertEqual ( rc , ' / ' )
vg . update ( )
2016-10-11 20:22:31 +03:00
self . assertTrue (
vg . MaxPv == p ,
" Expected %s != Actual %s " % ( str ( p ) , str ( vg . MaxPv ) ) )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
def test_vg_max_lv ( self ) :
vg = self . _vg_create ( ) . Vg
for p in [ 0 , 1 , 10 , 100 , 100 , 1024 , 2 * * 32 - 1 ] :
2016-11-16 20:39:57 +03:00
rc = self . handle_return (
vg . MaxLvSet (
dbus . UInt64 ( p ) , dbus . Int32 ( g_tmo ) , EOD ) )
2016-02-20 00:16:05 +03:00
self . assertEqual ( rc , ' / ' )
vg . update ( )
2016-10-11 20:22:31 +03:00
self . assertTrue (
vg . MaxLv == p ,
" Expected %s != Actual %s " % ( str ( p ) , str ( vg . MaxLv ) ) )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
def test_vg_uuid_gen ( self ) :
2016-11-30 23:16:59 +03:00
vg = self . _vg_create ( ) . Vg
prev_uuid = vg . Uuid
rc = self . handle_return (
vg . UuidGenerate (
dbus . Int32 ( g_tmo ) ,
EOD ) )
self . assertEqual ( rc , ' / ' )
vg . update ( )
self . assertTrue (
vg . Uuid != prev_uuid ,
" Expected %s != Actual %s " % ( vg . Uuid , prev_uuid ) )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
def test_vg_activate_deactivate ( self ) :
vg = self . _vg_create ( ) . Vg
2019-10-03 23:56:45 +03:00
self . _create_lv ( vg = vg )
2016-02-20 00:16:05 +03:00
vg . update ( )
2016-02-18 02:53:35 +03:00
2016-11-16 20:39:57 +03:00
rc = self . handle_return (
vg . Deactivate (
dbus . UInt64 ( 0 ) , dbus . Int32 ( g_tmo ) , EOD ) )
2016-09-17 08:11:59 +03:00
self . assertEqual ( rc , ' / ' )
2016-11-11 21:34:38 +03:00
self . _check_consistency ( )
2016-02-18 02:53:35 +03:00
2016-11-16 20:39:57 +03:00
rc = self . handle_return (
vg . Activate (
dbus . UInt64 ( 0 ) , dbus . Int32 ( g_tmo ) , EOD ) )
2016-09-17 08:11:59 +03:00
self . assertEqual ( rc , ' / ' )
2016-11-11 21:34:38 +03:00
self . _check_consistency ( )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
# Try control flags
for i in range ( 0 , 5 ) :
2016-11-16 20:39:57 +03:00
self . handle_return (
vg . Activate (
dbus . UInt64 ( 1 << i ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
def test_pv_resize ( self ) :
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
self . assertTrue ( len ( self . objs [ PV_INT ] ) > 0 )
if len ( self . objs [ PV_INT ] ) > 0 :
2019-10-09 19:45:41 +03:00
pv = ClientProxy (
self . bus , self . objs [ PV_INT ] [ 0 ] . object_path ,
interfaces = ( PV_INT , ) ) . Pv
2016-02-20 00:16:05 +03:00
original_size = pv . SizeBytes
2018-05-18 17:23:10 +03:00
new_size = original_size / / 2
2016-02-18 02:53:35 +03:00
2016-11-16 20:39:57 +03:00
self . handle_return (
pv . ReSize (
dbus . UInt64 ( new_size ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
2016-09-17 08:11:59 +03:00
2016-11-11 21:34:38 +03:00
self . _check_consistency ( )
2016-02-20 00:16:05 +03:00
pv . update ( )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
self . assertTrue ( pv . SizeBytes != original_size )
2016-11-16 20:39:57 +03:00
self . handle_return (
pv . ReSize (
dbus . UInt64 ( 0 ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
2016-11-11 21:34:38 +03:00
self . _check_consistency ( )
2016-02-20 00:16:05 +03:00
pv . update ( )
self . assertTrue ( pv . SizeBytes == original_size )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
def test_pv_allocation ( self ) :
2019-10-03 23:25:17 +03:00
vg = self . _vg_create ( self . _all_pv_object_paths ( ) ) . Vg
2016-02-18 02:53:35 +03:00
2016-11-16 20:39:57 +03:00
pv = ClientProxy ( self . bus , vg . Pvs [ 0 ] , interfaces = ( PV_INT , ) ) . Pv
2016-02-18 02:53:35 +03:00
2016-11-16 20:39:57 +03:00
self . handle_return (
pv . AllocationEnabled (
dbus . Boolean ( False ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
2016-09-17 08:11:59 +03:00
2016-02-20 00:16:05 +03:00
pv . update ( )
self . assertFalse ( pv . Allocatable )
2016-02-18 02:53:35 +03:00
2016-11-16 20:39:57 +03:00
self . handle_return (
pv . AllocationEnabled (
dbus . Boolean ( True ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
2016-09-17 08:11:59 +03:00
2016-11-16 20:39:57 +03:00
self . handle_return (
pv . AllocationEnabled (
dbus . Boolean ( True ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
2016-02-20 00:16:05 +03:00
pv . update ( )
self . assertTrue ( pv . Allocatable )
2016-02-18 02:53:35 +03:00
2016-11-11 21:34:38 +03:00
self . _check_consistency ( )
2016-02-18 02:53:35 +03:00
2016-09-16 22:01:18 +03:00
@staticmethod
def _get_devices ( ) :
2016-02-20 00:16:05 +03:00
context = pyudev . Context ( )
2022-08-08 18:03:52 +03:00
bd = context . list_devices ( subsystem = ' block ' )
# Handle block extended major too (259)
return [ b for b in bd if b . properties . get ( ' MAJOR ' ) == ' 8 ' or
b . properties . get ( ' MAJOR ' ) == ' 259 ' ]
2016-02-18 02:53:35 +03:00
2019-10-04 00:11:18 +03:00
def _pv_scan ( self , activate , cache , device_paths , major_minors ) :
2016-02-20 00:16:05 +03:00
mgr = self . _manager ( ) . Manager
2019-10-04 00:11:18 +03:00
return self . handle_return (
mgr . PvScan (
dbus . Boolean ( activate ) ,
dbus . Boolean ( cache ) ,
dbus . Array ( device_paths , ' s ' ) ,
dbus . Array ( major_minors , ' (ii) ' ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
2016-02-18 02:53:35 +03:00
2019-10-04 00:11:18 +03:00
def test_pv_scan ( self ) :
2019-10-09 19:48:00 +03:00
def major_minor ( d ) :
2022-08-08 18:02:37 +03:00
return ( int ( d . properties . get ( ' MAJOR ' ) ) , int ( d . properties . get ( ' MINOR ' ) ) )
2019-10-09 19:48:00 +03:00
2019-10-04 00:11:18 +03:00
devices = TestDbusService . _get_devices ( )
2016-09-17 08:11:59 +03:00
2019-10-04 00:11:18 +03:00
self . assertEqual ( self . _pv_scan ( False , True , [ ] , [ ] ) , ' / ' )
2016-11-11 21:34:38 +03:00
self . _check_consistency ( )
2019-10-04 00:11:18 +03:00
self . assertEqual ( self . _pv_scan ( False , False , [ ] , [ ] ) , ' / ' )
2016-11-11 21:34:38 +03:00
self . _check_consistency ( )
2016-02-18 02:53:35 +03:00
2022-08-08 18:02:37 +03:00
block_path = [ d . properties . get ( ' DEVNAME ' ) for d in devices ]
2019-10-04 00:11:18 +03:00
self . assertEqual ( self . _pv_scan ( False , True , block_path , [ ] ) , ' / ' )
2016-11-11 21:34:38 +03:00
self . _check_consistency ( )
2016-02-18 02:53:35 +03:00
2019-10-09 19:48:00 +03:00
mm = [ major_minor ( d ) for d in devices ]
2016-02-18 02:53:35 +03:00
2019-10-04 00:11:18 +03:00
self . assertEqual ( self . _pv_scan ( False , True , block_path , mm ) , ' / ' )
2016-11-11 21:34:38 +03:00
self . _check_consistency ( )
2016-02-18 02:53:35 +03:00
2019-10-04 00:11:18 +03:00
self . assertEqual ( self . _pv_scan ( False , True , [ ] , mm ) , ' / ' )
2016-11-11 21:34:38 +03:00
self . _check_consistency ( )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
@staticmethod
def _write_some_data ( device_path , size ) :
2018-05-18 17:23:10 +03:00
blocks = int ( size / / 512 )
2016-02-20 00:16:05 +03:00
block = bytearray ( 512 )
for i in range ( 0 , 512 ) :
block [ i ] = i % 255
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
with open ( device_path , mode = ' wb ' ) as lv :
for i in range ( 0 , blocks ) :
lv . write ( block )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
def test_snapshot_merge ( self ) :
# Create a non-thin LV and merge it
2016-02-22 23:03:31 +03:00
ss_size = mib ( 8 )
2016-02-18 02:53:35 +03:00
2016-02-22 23:03:31 +03:00
lv_p = self . _create_lv ( size = mib ( 16 ) )
2016-02-20 00:16:05 +03:00
ss_name = lv_p . LvCommon . Name + ' _snap '
2016-09-17 08:11:59 +03:00
snapshot_path = self . handle_return (
2016-11-16 20:39:57 +03:00
lv_p . Lv . Snapshot (
dbus . String ( ss_name ) ,
dbus . UInt64 ( ss_size ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
2016-09-17 08:11:59 +03:00
2019-10-09 19:45:41 +03:00
intf = ( LV_COMMON_INT , LV_INT , SNAPSHOT_INT , )
ss = ClientProxy ( self . bus , snapshot_path , interfaces = intf )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
# Write some data to snapshot so merge takes some time
2018-05-18 17:23:10 +03:00
TestDbusService . _write_some_data ( ss . LvCommon . Path , ss_size / / 2 )
2016-02-18 02:53:35 +03:00
2016-11-16 20:39:57 +03:00
job_path = self . handle_return (
ss . Snapshot . Merge (
dbus . Int32 ( g_tmo ) ,
EOD ) )
2016-09-17 08:11:59 +03:00
self . assertEqual ( job_path , ' / ' )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
def test_snapshot_merge_thin ( self ) :
# Create a thin LV, snapshot it and merge it
2019-10-03 23:50:08 +03:00
_vg , _thin_path , lv_p = self . _create_thin_lv ( )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
ss_name = lv_p . LvCommon . Name + ' _snap '
2016-09-17 08:11:59 +03:00
snapshot_path = self . handle_return (
2016-11-16 20:39:57 +03:00
lv_p . Lv . Snapshot (
dbus . String ( ss_name ) ,
dbus . UInt64 ( 0 ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
2019-10-09 19:45:41 +03:00
intf = ( LV_INT , LV_COMMON_INT , SNAPSHOT_INT )
ss = ClientProxy ( self . bus , snapshot_path , interfaces = intf )
2016-09-17 08:11:59 +03:00
job_path = self . handle_return (
2016-11-16 20:39:57 +03:00
ss . Snapshot . Merge (
dbus . Int32 ( g_tmo ) , EOD )
2016-09-17 08:11:59 +03:00
)
self . assertTrue ( job_path == ' / ' )
2016-02-18 02:53:35 +03:00
2016-06-10 21:36:53 +03:00
def _create_cache_pool ( self , vg = None ) :
if not vg :
vg = self . _vg_create ( ) . Vg
2016-02-18 02:53:35 +03:00
2016-02-22 23:03:31 +03:00
md = self . _create_lv ( size = ( mib ( 8 ) ) , vg = vg )
data = self . _create_lv ( size = ( mib ( 8 ) ) , vg = vg )
2016-02-18 02:53:35 +03:00
2016-09-17 08:11:59 +03:00
cache_pool_path = self . handle_return (
vg . CreateCachePool (
2016-11-16 20:39:57 +03:00
dbus . ObjectPath ( md . object_path ) ,
dbus . ObjectPath ( data . object_path ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
2016-02-18 02:53:35 +03:00
2019-10-09 19:45:41 +03:00
intf = ( CACHE_POOL_INT , )
cp = ClientProxy ( self . bus , cache_pool_path , interfaces = intf )
2016-02-18 02:53:35 +03:00
2016-06-10 21:36:53 +03:00
return vg , cp
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
def test_cache_pool_create ( self ) :
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
vg , cache_pool = self . _create_cache_pool ( )
2016-02-18 02:53:35 +03:00
2016-10-11 20:22:31 +03:00
self . assertTrue (
' /com/redhat/lvmdbus1/CachePool ' in cache_pool . object_path )
2016-02-18 02:53:35 +03:00
2019-10-11 18:49:10 +03:00
def _create_cache_lv ( self , return_all = False ) :
2019-10-09 20:29:52 +03:00
vg , cache_pool = self . _create_cache_pool ( )
2016-02-18 02:53:35 +03:00
2019-10-11 18:49:10 +03:00
lv_to_cache = self . _create_lv ( size = mib ( 32 ) , vg = vg )
2016-02-18 02:53:35 +03:00
2019-10-09 20:29:52 +03:00
c_lv_path = self . handle_return (
cache_pool . CachePool . CacheLv (
dbus . ObjectPath ( lv_to_cache . object_path ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
2016-02-18 02:53:35 +03:00
2019-10-09 20:29:52 +03:00
intf = ( LV_COMMON_INT , LV_INT , CACHE_LV_INT )
cached_lv = ClientProxy ( self . bus , c_lv_path , interfaces = intf )
2019-10-11 18:49:10 +03:00
if return_all :
return vg , cache_pool , cached_lv
return cached_lv
2016-02-18 02:53:35 +03:00
2019-10-09 20:29:52 +03:00
def test_cache_lv_create ( self ) :
2016-02-18 02:53:35 +03:00
2019-10-09 20:29:52 +03:00
for destroy_cache in [ True , False ] :
2019-10-11 18:49:10 +03:00
vg , _ , cached_lv = self . _create_cache_lv ( True )
2016-09-17 08:11:59 +03:00
uncached_lv_path = self . handle_return (
2016-11-16 20:39:57 +03:00
cached_lv . CachedLv . DetachCachePool (
dbus . Boolean ( destroy_cache ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
2016-02-18 02:53:35 +03:00
2016-10-11 20:22:31 +03:00
self . assertTrue (
' /com/redhat/lvmdbus1/Lv ' in uncached_lv_path )
2016-02-18 02:53:35 +03:00
2016-11-16 20:39:57 +03:00
rc = self . handle_return (
vg . Remove ( dbus . Int32 ( g_tmo ) , EOD ) )
2016-09-17 08:11:59 +03:00
self . assertTrue ( rc == ' / ' )
2016-02-18 02:53:35 +03:00
2019-10-08 16:03:50 +03:00
def test_cache_lv_rename ( self ) :
"""
Make sure that if we rename a cache lv that we correctly handle the
internal state update .
: return :
"""
2019-10-09 20:29:52 +03:00
def verify_cache_lv_count ( ) :
cur_objs , _ = get_objects ( )
self . assertEqual ( len ( cur_objs [ CACHE_LV_INT ] ) , 2 )
self . _check_consistency ( )
2019-10-08 16:03:50 +03:00
2019-10-11 18:49:10 +03:00
cached_lv = self . _create_cache_lv ( )
2019-10-08 16:03:50 +03:00
2019-10-09 20:29:52 +03:00
verify_cache_lv_count ( )
2019-10-08 16:03:50 +03:00
new_name = ' renamed_ ' + cached_lv . LvCommon . Name
2019-10-09 19:45:41 +03:00
self . handle_return (
cached_lv . Lv . Rename ( dbus . String ( new_name ) , dbus . Int32 ( g_tmo ) , EOD ) )
2019-10-09 20:29:52 +03:00
verify_cache_lv_count ( )
2019-10-08 16:03:50 +03:00
2020-07-01 14:27:46 +03:00
def test_writecache_lv ( self ) :
vg = self . _vg_create ( ) . Vg
data_lv = self . _create_lv ( size = mib ( 16 ) , vg = vg )
cache_lv = self . _create_lv ( size = mib ( 16 ) , vg = vg )
# both LVs need to be inactive
self . handle_return ( data_lv . Lv . Deactivate (
dbus . UInt64 ( 0 ) , dbus . Int32 ( g_tmo ) , EOD ) )
data_lv . update ( )
self . handle_return ( cache_lv . Lv . Deactivate (
dbus . UInt64 ( 0 ) , dbus . Int32 ( g_tmo ) , EOD ) )
cache_lv . update ( )
cached_lv_path = self . handle_return (
cache_lv . Lv . WriteCacheLv (
dbus . ObjectPath ( data_lv . object_path ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
intf = ( LV_COMMON_INT , LV_INT , CACHE_LV_INT )
cached_lv = ClientProxy ( self . bus , cached_lv_path , interfaces = intf )
self . assertEqual ( cached_lv . LvCommon . SegType , [ " writecache " ] )
uncached_lv_path = self . handle_return (
cached_lv . CachedLv . DetachCachePool (
dbus . Boolean ( True ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
self . assertTrue ( ' /com/redhat/lvmdbus1/Lv ' in uncached_lv_path )
2016-02-20 00:16:05 +03:00
def test_vg_change ( self ) :
vg_proxy = self . _vg_create ( )
2016-09-17 08:11:59 +03:00
result = self . handle_return ( vg_proxy . Vg . Change (
2016-11-16 20:39:57 +03:00
dbus . Int32 ( g_tmo ) ,
dbus . Dictionary ( { ' -a ' : ' ay ' } , ' sv ' ) ) )
2016-02-20 00:16:05 +03:00
self . assertTrue ( result == ' / ' )
2016-09-17 08:11:59 +03:00
result = self . handle_return (
2016-11-16 20:39:57 +03:00
vg_proxy . Vg . Change (
dbus . Int32 ( g_tmo ) ,
dbus . Dictionary ( { ' -a ' : ' n ' } , ' sv ' ) ) )
2016-02-20 00:16:05 +03:00
self . assertTrue ( result == ' / ' )
2016-02-18 02:53:35 +03:00
2016-09-16 22:01:18 +03:00
@staticmethod
def _invalid_vg_lv_name_characters ( ) :
2016-02-20 00:16:05 +03:00
bad_vg_lv_set = set ( string . printable ) - \
set ( string . ascii_letters + string . digits + ' .-_+ ' )
return ' ' . join ( bad_vg_lv_set )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
def test_invalid_names ( self ) :
mgr = self . objs [ MANAGER_INT ] [ 0 ] . Manager
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
# Pv device path
with self . assertRaises ( dbus . exceptions . DBusException ) :
2016-09-17 08:11:59 +03:00
self . handle_return (
2016-11-16 20:39:57 +03:00
mgr . PvCreate (
dbus . String ( " /dev/space in name " ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
# VG Name testing...
# Go through all bad characters
pv_paths = [ self . objs [ PV_INT ] [ 0 ] . object_path ]
2016-09-16 22:01:18 +03:00
bad_chars = TestDbusService . _invalid_vg_lv_name_characters ( )
2016-02-20 00:16:05 +03:00
for c in bad_chars :
with self . assertRaises ( dbus . exceptions . DBusException ) :
2016-09-17 08:11:59 +03:00
self . handle_return (
2016-11-16 20:39:57 +03:00
mgr . VgCreate (
dbus . String ( " name %s " % ( c ) ) ,
dbus . Array ( pv_paths , ' o ' ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
# Bad names
for bad in [ " . " , " .. " ] :
with self . assertRaises ( dbus . exceptions . DBusException ) :
2016-09-17 08:11:59 +03:00
self . handle_return (
2016-11-16 20:39:57 +03:00
mgr . VgCreate (
dbus . String ( bad ) ,
dbus . Array ( pv_paths , ' o ' ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
# Exceed name length
for i in [ 128 , 1024 , 4096 ] :
with self . assertRaises ( dbus . exceptions . DBusException ) :
2016-09-17 08:11:59 +03:00
self . handle_return (
2016-11-16 20:39:57 +03:00
mgr . VgCreate (
dbus . String ( ' T ' * i ) ,
dbus . Array ( pv_paths , ' o ' ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
# Create a VG and try to create LVs with different bad names
2017-03-01 20:09:51 +03:00
vg_name = vg_n ( )
2016-09-17 08:11:59 +03:00
vg_path = self . handle_return (
2016-11-16 20:39:57 +03:00
mgr . VgCreate (
2017-03-01 20:09:51 +03:00
dbus . String ( vg_name ) ,
2016-11-16 20:39:57 +03:00
dbus . Array ( pv_paths , ' o ' ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
2017-03-02 02:22:32 +03:00
self . _validate_lookup ( vg_name , vg_path )
2016-09-17 08:11:59 +03:00
2016-11-16 20:39:57 +03:00
vg_proxy = ClientProxy ( self . bus , vg_path , interfaces = ( VG_INT , ) )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
for c in bad_chars :
with self . assertRaises ( dbus . exceptions . DBusException ) :
2016-09-17 08:11:59 +03:00
self . handle_return (
vg_proxy . Vg . LvCreateLinear (
2016-11-16 20:39:57 +03:00
dbus . String ( lv_n ( ) + c ) ,
dbus . UInt64 ( mib ( 4 ) ) ,
dbus . Boolean ( False ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
2016-02-18 02:53:35 +03:00
2016-10-11 20:22:31 +03:00
for reserved in (
" _cdata " , " _cmeta " , " _corig " , " _mimage " , " _mlog " ,
" _pmspare " , " _rimage " , " _rmeta " , " _tdata " , " _tmeta " ,
2019-10-03 20:19:07 +03:00
" _vorigin " , " _vdata " ) :
2016-02-20 00:16:05 +03:00
with self . assertRaises ( dbus . exceptions . DBusException ) :
2016-09-17 08:11:59 +03:00
self . handle_return (
vg_proxy . Vg . LvCreateLinear (
2016-11-16 20:39:57 +03:00
dbus . String ( lv_n ( ) + reserved ) ,
dbus . UInt64 ( mib ( 4 ) ) ,
dbus . Boolean ( False ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
2016-02-18 02:53:35 +03:00
2016-10-11 20:22:31 +03:00
for reserved in ( " snapshot " , " pvmove " ) :
2016-02-20 00:16:05 +03:00
with self . assertRaises ( dbus . exceptions . DBusException ) :
2016-09-17 08:11:59 +03:00
self . handle_return (
vg_proxy . Vg . LvCreateLinear (
2016-11-16 20:39:57 +03:00
dbus . String ( reserved + lv_n ( ) ) ,
dbus . UInt64 ( mib ( 4 ) ) ,
dbus . Boolean ( False ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
2016-02-18 02:53:35 +03:00
2016-08-30 01:59:42 +03:00
_ALLOWABLE_TAG_CH = string . ascii_letters + string . digits + " ._-+/=!:&# "
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
def _invalid_tag_characters ( self ) :
2016-08-30 01:59:42 +03:00
bad_tag_ch_set = set ( string . printable ) - set ( self . _ALLOWABLE_TAG_CH )
2016-02-20 00:16:05 +03:00
return ' ' . join ( bad_tag_ch_set )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
def test_invalid_tags ( self ) :
2019-10-03 22:55:51 +03:00
vg_proxy = self . _vg_create ( )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
for c in self . _invalid_tag_characters ( ) :
with self . assertRaises ( dbus . exceptions . DBusException ) :
2016-09-17 08:11:59 +03:00
self . handle_return (
vg_proxy . Vg . TagsAdd (
2016-11-16 20:39:57 +03:00
dbus . Array ( [ c ] , ' s ' ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
for c in self . _invalid_tag_characters ( ) :
with self . assertRaises ( dbus . exceptions . DBusException ) :
2016-09-17 08:11:59 +03:00
self . handle_return (
vg_proxy . Vg . TagsAdd (
2016-11-16 20:39:57 +03:00
dbus . Array ( [ " a %s b " % ( c ) ] , ' s ' ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
2016-02-18 02:53:35 +03:00
2019-10-03 23:05:37 +03:00
def _tag_add_common ( self , vg_proxy , tag ) :
tmp = self . handle_return (
vg_proxy . Vg . TagsAdd (
dbus . Array ( [ tag ] , ' s ' ) ,
dbus . Int32 ( g_tmo ) ,
EOD ) )
self . assertTrue ( tmp == ' / ' )
vg_proxy . update ( )
self . assertTrue (
tag in vg_proxy . Vg . Tags ,
" %s not in %s " % ( tag , str ( vg_proxy . Vg . Tags ) ) )
2016-02-20 00:16:05 +03:00
def test_tag_names ( self ) :
2019-10-03 22:55:51 +03:00
vg_proxy = self . _vg_create ( )
2016-02-18 02:53:35 +03:00
2016-02-20 00:16:05 +03:00
for i in range ( 1 , 64 ) :
tag = rs ( i , " " , self . _ALLOWABLE_TAG_CH )
2019-10-03 23:05:37 +03:00
self . _tag_add_common ( vg_proxy , tag )
2016-02-18 02:53:35 +03:00
2016-10-11 20:22:31 +03:00
self . assertEqual (
i , len ( vg_proxy . Vg . Tags ) ,
" %d != %d " % ( i , len ( vg_proxy . Vg . Tags ) ) )
2016-02-18 02:53:35 +03:00
2016-08-30 01:59:42 +03:00
def test_tag_regression ( self ) :
2019-10-03 22:55:51 +03:00
vg_proxy = self . _vg_create ( )
2016-08-30 01:59:42 +03:00
tag = ' --h/K.6g0A4FOEatf3+k_nI/Yp&L_u2oy-=j649x:+dUcYWPEo6.IWT0c '
2019-10-03 23:05:37 +03:00
self . _tag_add_common ( vg_proxy , tag )
2016-08-30 01:59:42 +03:00
2017-03-09 00:50:46 +03:00
def _verify_existence ( self , cmd , operation , resource_name ) :
ec , stdout , stderr = call_lvm ( cmd )
if ec == 0 :
path = self . _lookup ( resource_name )
self . assertTrue ( path != ' / ' )
else :
std_err_print (
" %s failed with stdout= %s , stderr= %s " %
( operation , stdout , stderr ) )
2017-03-10 01:07:14 +03:00
self . assertTrue ( ec == 0 , " %s exit code = %d " % ( operation , ec ) )
2017-03-09 00:50:46 +03:00
def test_external_vg_create ( self ) :
2022-06-06 17:59:59 +03:00
# We need to ensure that if a user creates something outside lvm
2017-03-09 00:50:46 +03:00
# dbus service that things are sequenced correctly so that if a dbus
# user calls into the service they will find the same information.
vg_name = vg_n ( )
# Get all the PV device paths
2019-10-03 23:25:17 +03:00
pv_device_paths = [ p . Pv . Name for p in self . objs [ PV_INT ] ]
2017-03-09 00:50:46 +03:00
cmd = [ ' vgcreate ' , vg_name ]
2019-10-03 23:25:17 +03:00
cmd . extend ( pv_device_paths )
2017-03-09 00:50:46 +03:00
self . _verify_existence ( cmd , cmd [ 0 ] , vg_name )
2016-10-07 23:30:18 +03:00
2017-03-09 00:52:30 +03:00
def test_external_lv_create ( self ) :
2022-06-06 17:59:59 +03:00
# Let's create a LV outside of service and see if we correctly handle
# its inclusion
2017-03-09 00:52:30 +03:00
vg = self . _vg_create ( ) . Vg
lv_name = lv_n ( )
full_name = " %s / %s " % ( vg . Name , lv_name )
cmd = [ ' lvcreate ' , ' -L4M ' , ' -n ' , lv_name , vg . Name ]
self . _verify_existence ( cmd , cmd [ 0 ] , full_name )
2017-03-09 00:52:48 +03:00
def test_external_pv_create ( self ) :
2022-06-06 17:59:59 +03:00
# Let's create a PV outside of service and see if we correctly handle
# its inclusion
2017-03-09 00:52:48 +03:00
target = self . objs [ PV_INT ] [ 0 ]
# Remove the PV
rc = self . _pv_remove ( target )
self . assertTrue ( rc == ' / ' )
self . _check_consistency ( )
# Make sure the PV we removed no longer exists
self . assertTrue ( self . _lookup ( target . Pv . Name ) == ' / ' )
# Add it back with external command line
cmd = [ ' pvcreate ' , target . Pv . Name ]
self . _verify_existence ( cmd , cmd [ 0 ] , target . Pv . Name )
2022-08-10 21:59:11 +03:00
def _create_nested ( self , pv_object_path , vg_suffix ) :
vg = self . _vg_create ( [ pv_object_path ] , vg_suffix )
2017-04-22 08:06:59 +03:00
pv = ClientProxy ( self . bus , pv_object_path , interfaces = ( PV_INT , ) )
self . assertEqual ( pv . Pv . Vg , vg . object_path )
2019-10-09 19:45:41 +03:00
self . assertIn (
pv_object_path , vg . Vg . Pvs , " Expecting PV object path in Vg.Pvs " )
2017-04-22 08:06:59 +03:00
2019-10-09 19:45:41 +03:00
lv = self . _create_lv (
2021-03-26 22:21:57 +03:00
vg = vg . Vg , size = vg . Vg . FreeBytes , suffix = " _pv0 " )
2017-04-22 08:06:59 +03:00
device_path = ' /dev/ %s / %s ' % ( vg . Vg . Name , lv . LvCommon . Name )
2022-08-10 21:59:11 +03:00
dev_info = os . stat ( device_path )
major = os . major ( dev_info . st_rdev )
minor = os . minor ( dev_info . st_rdev )
sysfs = " /sys/dev/block/ %d : %d " % ( major , minor )
self . assertTrue ( os . path . exists ( sysfs ) )
2017-04-22 08:06:59 +03:00
new_pv_object_path = self . _pv_create ( device_path )
vg . update ( )
self . assertEqual ( lv . LvCommon . Vg , vg . object_path )
2019-10-09 19:45:41 +03:00
self . assertIn (
lv . object_path , vg . Vg . Lvs , " Expecting LV object path in Vg.Lvs " )
2017-04-22 08:06:59 +03:00
2019-10-09 19:45:41 +03:00
new_pv_proxy = ClientProxy (
self . bus , new_pv_object_path , interfaces = ( PV_INT , ) )
2017-04-22 08:06:59 +03:00
self . assertEqual ( new_pv_proxy . Pv . Name , device_path )
return new_pv_object_path
2022-08-30 20:47:14 +03:00
@staticmethod
def _scan_lvs_enabled ( ) :
cmd = [ ' lvmconfig ' , ' --typeconfig ' , ' full ' , ' devices/scan_lvs ' ]
config = Popen ( cmd , stdout = PIPE , stderr = PIPE , close_fds = True , env = os . environ )
out = config . communicate ( )
if config . returncode != 0 :
return False
if " scan_lvs=1 " == out [ 0 ] . decode ( " utf-8 " ) . strip ( ) :
return True
return False
2017-04-22 08:06:59 +03:00
def test_nesting ( self ) :
# check to see if we handle an LV becoming a PV which has it's own
# LV
2017-11-09 12:56:15 +03:00
#
# NOTE: This needs an equivalent of aux extend_filter_LVMTEST
# when run from lvm2 testsuite. See dbustest.sh.
2022-06-06 17:59:59 +03:00
# Also, if developing locally with actual devices one can achieve this
2019-10-03 19:57:10 +03:00
# by editing lvm.conf with "devices/scan_lvs = 1" As testing
# typically utilizes loopback, this test is skipped in
# those environments.
2021-03-26 22:21:57 +03:00
if dm_dev_dir != ' /dev ' :
raise unittest . SkipTest ( ' test not running in real /dev ' )
2022-08-30 20:47:14 +03:00
if not TestDbusService . _scan_lvs_enabled ( ) :
raise unittest . SkipTest ( ' scan_lvs=0 in config, unit test requires scan_lvs=1 ' )
2017-04-22 08:06:59 +03:00
pv_object_path = self . objs [ PV_INT ] [ 0 ] . object_path
2018-12-18 21:43:57 +03:00
if not self . objs [ PV_INT ] [ 0 ] . Pv . Name . startswith ( " /dev " ) :
raise unittest . SkipTest ( ' test not running in /dev ' )
2018-05-19 13:08:23 +03:00
2017-04-22 08:06:59 +03:00
for i in range ( 0 , 5 ) :
2022-08-10 21:59:11 +03:00
pv_object_path = self . _create_nested ( pv_object_path , " nest_ %d _ " % i )
2017-04-22 08:06:59 +03:00
2018-12-18 21:43:57 +03:00
def test_pv_symlinks ( self ) :
2022-06-06 17:59:59 +03:00
# Let's take one of our test PVs, pvremove it, find a symlink to it
2017-05-03 22:07:13 +03:00
# and re-create using the symlink to ensure we return an object
# path to it. Additionally, we will take the symlink and do a lookup
# (Manager.LookUpByLvmId) using it and the original device path to
# ensure that we can find the PV.
symlink = None
pv = self . objs [ PV_INT ] [ 0 ]
pv_device_path = pv . Pv . Name
2021-03-28 02:18:38 +03:00
if dm_dev_dir != ' /dev ' :
raise unittest . SkipTest ( ' test not running in real /dev ' )
2018-12-18 21:43:57 +03:00
if not pv_device_path . startswith ( " /dev " ) :
raise unittest . SkipTest ( ' test not running in /dev ' )
2017-05-03 22:07:13 +03:00
self . _pv_remove ( pv )
# Make sure we no longer find the pv
rc = self . _lookup ( pv_device_path )
self . assertEqual ( rc , ' / ' )
2022-06-06 17:59:59 +03:00
# Let's locate a symlink for it
2017-05-03 22:07:13 +03:00
devices = glob ( ' /dev/disk/*/* ' )
2021-03-26 15:13:26 +03:00
rp_pv_device_path = os . path . realpath ( pv_device_path )
2017-05-03 22:07:13 +03:00
for d in devices :
2021-03-26 15:13:26 +03:00
if rp_pv_device_path == os . path . realpath ( d ) :
2017-05-03 22:07:13 +03:00
symlink = d
break
self . assertIsNotNone ( symlink , " We expected to find at least 1 symlink! " )
# Make sure symlink look up fails too
rc = self . _lookup ( symlink )
self . assertEqual ( rc , ' / ' )
2021-03-26 15:13:26 +03:00
### pv_object_path = self._pv_create(symlink)
### Test is limited by filter rules and must use /dev/mapper/LVMTEST path
pv_object_path = self . _pv_create ( pv_device_path )
2017-05-03 22:07:13 +03:00
self . assertNotEqual ( pv_object_path , ' / ' )
pv_proxy = ClientProxy ( self . bus , pv_object_path , interfaces = ( PV_INT , ) )
self . assertEqual ( pv_proxy . Pv . Name , pv_device_path )
# Lets check symlink lookup
self . assertEqual ( pv_object_path , self . _lookup ( pv_device_path ) )
2019-12-27 17:29:15 +03:00
def _create_vdo_pool_and_lv ( self , vg_prefix = " vdo_ " ) :
2019-10-10 00:55:39 +03:00
pool_name = lv_n ( " _vdo_pool " )
lv_name = lv_n ( )
2019-12-27 17:29:15 +03:00
vg_proxy = self . _vg_create ( vg_prefix = vg_prefix )
2019-10-10 00:55:39 +03:00
vdo_pool_object_path = self . handle_return (
vg_proxy . VgVdo . CreateVdoPoolandLv (
pool_name , lv_name ,
2022-05-26 00:03:27 +03:00
dbus . UInt64 ( VDO_MIN_SIZE ) ,
dbus . UInt64 ( VDO_MIN_SIZE * 2 ) ,
2019-10-10 00:55:39 +03:00
dbus . Int32 ( g_tmo ) ,
EOD ) )
self . assertNotEqual ( vdo_pool_object_path , " / " )
self . assertEqual (
vdo_pool_object_path ,
self . _lookup ( " %s / %s " % ( vg_proxy . Vg . Name , pool_name ) ) )
2019-10-11 18:49:10 +03:00
vdo_pool_path = self . _lookup ( " %s / %s " % ( vg_proxy . Vg . Name , pool_name ) )
self . assertNotEqual ( vdo_pool_path , " / " )
intf = [ LV_COMMON_INT , LV_INT ]
vdo_lv_obj_path = self . _lookup ( " %s / %s " % ( vg_proxy . Vg . Name , lv_name ) )
vdo_lv = ClientProxy ( self . bus , vdo_lv_obj_path , interfaces = intf )
intf . append ( VDOPOOL_INT )
vdo_pool_lv = ClientProxy ( self . bus , vdo_pool_path , interfaces = intf )
return vg_proxy , vdo_pool_lv , vdo_lv
def _create_vdo_lv ( self ) :
return self . _create_vdo_pool_and_lv ( ) [ 2 ]
def _vdo_pool_lv ( self ) :
return self . _create_vdo_pool_and_lv ( ) [ 1 ]
2019-10-10 00:55:39 +03:00
def test_vdo_pool_create ( self ) :
# Basic vdo sanity testing
if not self . vdo :
raise unittest . SkipTest ( ' vdo not supported ' )
# Do this twice to ensure we are providing the correct flags to force
2022-06-06 17:59:59 +03:00
# the operation when it finds an existing vdo signature, which likely
2019-10-10 00:55:39 +03:00
# shouldn't exist.
for _ in range ( 0 , 2 ) :
2019-10-11 18:49:10 +03:00
vg , _ , _ = self . _create_vdo_pool_and_lv ( )
2019-10-10 00:55:39 +03:00
self . handle_return ( vg . Vg . Remove ( dbus . Int32 ( g_tmo ) , EOD ) )
2020-01-06 13:28:41 +03:00
def _create_vdo_pool ( self ) :
pool_name = lv_n ( ' _vdo_pool ' )
lv_name = lv_n ( ' _vdo_data ' )
vg_proxy = self . _vg_create ( vg_prefix = " vdo_conv_ " )
lv = self . _test_lv_create (
vg_proxy . Vg . LvCreate ,
2022-05-26 00:03:27 +03:00
( dbus . String ( pool_name ) , dbus . UInt64 ( VDO_MIN_SIZE ) ,
2020-01-06 13:28:41 +03:00
dbus . Array ( [ ] , signature = ' (ott) ' ) , dbus . Int32 ( g_tmo ) ,
EOD ) , vg_proxy . Vg , LV_BASE_INT )
lv_obj_path = self . _lookup ( " %s / %s " % ( vg_proxy . Vg . Name , pool_name ) )
self . assertNotEqual ( lv_obj_path , " / " )
vdo_pool_path = self . handle_return (
vg_proxy . VgVdo . CreateVdoPool (
dbus . ObjectPath ( lv . object_path ) , lv_name ,
2022-05-26 00:03:27 +03:00
dbus . UInt64 ( VDO_MIN_SIZE ) ,
2020-01-06 13:28:41 +03:00
dbus . Int32 ( g_tmo ) ,
EOD ) )
self . assertNotEqual ( vdo_pool_path , " / " )
self . assertEqual (
vdo_pool_path ,
self . _lookup ( " %s / %s " % ( vg_proxy . Vg . Name , pool_name ) ) )
intf = [ LV_COMMON_INT , LV_INT ]
vdo_lv_obj_path = self . _lookup ( " %s / %s " % ( vg_proxy . Vg . Name , lv_name ) )
vdo_lv = ClientProxy ( self . bus , vdo_lv_obj_path , interfaces = intf )
intf . append ( VDOPOOL_INT )
vdo_pool_lv = ClientProxy ( self . bus , vdo_pool_path , interfaces = intf )
return vg_proxy , vdo_pool_lv , vdo_lv
def test_vdo_pool_convert ( self ) :
# Basic vdo sanity testing
if not self . vdo :
raise unittest . SkipTest ( ' vdo not supported ' )
vg , _pool , _lv = self . _create_vdo_pool ( )
self . handle_return ( vg . Vg . Remove ( dbus . Int32 ( g_tmo ) , EOD ) )
2019-12-27 17:29:15 +03:00
def test_vdo_pool_compression_deduplication ( self ) :
if not self . vdo :
raise unittest . SkipTest ( ' vdo not supported ' )
vg , pool , _lv = self . _create_vdo_pool_and_lv ( vg_prefix = " vdo2_ " )
# compression and deduplication should be enabled by default
self . assertEqual ( pool . VdoPool . Compression , " enabled " )
self . assertEqual ( pool . VdoPool . Deduplication , " enabled " )
self . handle_return (
pool . VdoPool . DisableCompression ( dbus . Int32 ( g_tmo ) , EOD ) )
self . handle_return (
pool . VdoPool . DisableDeduplication ( dbus . Int32 ( g_tmo ) , EOD ) )
pool . update ( )
self . assertEqual ( pool . VdoPool . Compression , " " )
self . assertEqual ( pool . VdoPool . Deduplication , " " )
self . handle_return (
pool . VdoPool . EnableCompression ( dbus . Int32 ( g_tmo ) , EOD ) )
self . handle_return (
pool . VdoPool . EnableDeduplication ( dbus . Int32 ( g_tmo ) , EOD ) )
pool . update ( )
self . assertEqual ( pool . VdoPool . Compression , " enabled " )
self . assertEqual ( pool . VdoPool . Deduplication , " enabled " )
self . handle_return ( vg . Vg . Remove ( dbus . Int32 ( g_tmo ) , EOD ) )
2019-10-11 18:49:10 +03:00
def _test_lv_method_interface ( self , lv ) :
self . _rename_lv_test ( lv )
self . _test_activate_deactivate ( lv )
self . _test_lv_tags ( lv )
self . _test_lv_resize ( lv )
def _test_lv_method_interface_sequence (
self , lv , test_ss = True , remove_lv = True ) :
self . _test_lv_method_interface ( lv )
# We can't take a snapshot of a pool lv (not yet).
if test_ss :
ss_lv = self . _take_lv_snapshot ( lv )
self . _test_lv_method_interface ( ss_lv )
self . _lv_remove ( ss_lv )
if remove_lv :
self . _lv_remove ( lv )
def test_lv_interface_plain_lv ( self ) :
self . _test_lv_method_interface_sequence ( self . _create_lv ( ) )
def test_lv_interface_vdo_lv ( self ) :
if not self . vdo :
raise unittest . SkipTest ( ' vdo not supported ' )
self . _test_lv_method_interface_sequence ( self . _create_vdo_lv ( ) )
def test_lv_interface_cache_lv ( self ) :
self . _test_lv_method_interface_sequence (
self . _create_cache_lv ( ) , remove_lv = False )
def test_lv_interface_thin_pool_lv ( self ) :
self . _test_lv_method_interface_sequence (
self . _create_thin_pool_lv ( ) , test_ss = False )
def test_lv_interface_vdo_pool_lv ( self ) :
if not self . vdo :
raise unittest . SkipTest ( ' vdo not supported ' )
self . _test_lv_method_interface_sequence (
self . _vdo_pool_lv ( ) , test_ss = False )
2022-08-17 20:11:07 +03:00
def _log_file_option ( self ) :
2023-03-02 00:59:14 +03:00
fn = os . path . join ( tempfile . gettempdir ( ) , rs ( 8 , " _lvm.log " ) )
2022-08-17 20:11:07 +03:00
try :
options = dbus . Dictionary ( { } , signature = dbus . Signature ( ' sv ' ) )
option_str = " log { level=7 file= %s syslog=0 } " % fn
options [ " config " ] = dbus . String ( option_str )
self . _vg_create ( None , None , options )
2023-03-02 00:52:23 +03:00
self . assertTrue ( os . path . exists ( fn ) ,
" We passed the following options %s to lvm while creating a VG and the "
" log file we expected to exist ( %s ) was not found " % ( option_str , fn ) )
2022-08-17 20:11:07 +03:00
finally :
if os . path . exists ( fn ) :
os . unlink ( fn )
def test_log_file_option ( self ) :
self . _log_file_option ( )
2017-03-09 00:52:48 +03:00
2022-08-17 20:12:17 +03:00
def test_external_event ( self ) :
# Call into the service to register an external event, so that we can test sending the path
# where we don't send notifications on the command line in addition to the logging
lvm_manager = dbus . Interface ( bus . get_object (
BUS_NAME , " /com/redhat/lvmdbus1/Manager " , introspect = False ) ,
" com.redhat.lvmdbus1.Manager " )
rc = lvm_manager . ExternalEvent ( " unit_test " )
self . assertTrue ( rc == 0 )
self . _log_file_option ( )
2022-08-17 20:14:02 +03:00
def test_delete_non_complete_job ( self ) :
# Let's create a vg with a number of lvs and then delete it all
# to hopefully create a long-running job.
vg_proxy = self . _create_num_lvs ( 64 )
job_path = vg_proxy . Vg . Remove ( dbus . Int32 ( 0 ) , EOD )
self . assertNotEqual ( job_path , " / " )
# Try to delete the job expecting an exception
job_proxy = ClientProxy ( self . bus , job_path , interfaces = ( JOB_INT , ) ) . Job
with self . assertRaises ( dbus . exceptions . DBusException ) :
try :
job_proxy . Remove ( )
except dbus . exceptions . DBusException as e :
# Verify we got the expected text in exception
self . assertTrue ( ' Job is not complete! ' in str ( e ) )
raise e
2022-09-08 23:28:52 +03:00
def test_z_sigint ( self ) :
2024-03-27 20:26:19 +03:00
number_of_intervals = 3
number_of_lvs = 10
2022-08-23 18:30:53 +03:00
# Issue SIGINT while daemon is processing work to ensure we shut down.
2023-03-14 16:05:57 +03:00
if bool ( int ( os . getenv ( " LVM_DBUSD_TEST_SKIP_SIGNAL " , " 0 " ) ) ) :
raise unittest . SkipTest ( " Skipping as env. LVM_DBUSD_TEST_SKIP_SIGNAL is ' 1 ' " )
2023-03-10 19:43:02 +03:00
2024-03-27 19:50:24 +03:00
if g_tmo != 0 :
raise unittest . SkipTest ( " Skipping for g_tmo != 0 " )
2022-08-23 18:30:53 +03:00
di = DaemonInfo . get ( )
self . assertTrue ( di is not None )
if di :
# Find out how long it takes to create a VG and a number of LVs
# we will then issue the creation of the LVs async., wait, then issue a signal
# and repeat stepping through the entire time range.
start = time . time ( )
2024-03-27 20:26:19 +03:00
vg_proxy = self . _create_num_lvs ( number_of_lvs )
2022-08-23 18:30:53 +03:00
end = time . time ( )
self . handle_return ( vg_proxy . Vg . Remove ( dbus . Int32 ( g_tmo ) , EOD ) )
total = end - start
2024-03-27 20:26:19 +03:00
for i in range ( number_of_intervals ) :
sleep_amt = i * ( total / float ( number_of_intervals ) )
self . _create_num_lvs ( number_of_lvs , True )
2022-08-23 18:30:53 +03:00
time . sleep ( sleep_amt )
exited = False
try :
di . term_signal ( signal . SIGINT )
exited = True
except Exception :
std_err_print ( " Failed to exit on SIGINT, sending SIGKILL... " )
di . term_signal ( signal . SIGKILL )
finally :
di . start ( )
self . clean_up ( )
self . assertTrue ( exited ,
" Failed to exit after sending signal %f seconds after "
" queuing up work for signal %d " % ( sleep_amt , signal . SIGINT ) )
2022-09-23 01:11:45 +03:00
set_exec_mode ( g_lvm_shell )
2022-08-23 18:30:53 +03:00
2022-09-08 23:28:52 +03:00
def test_z_singleton_daemon ( self ) :
2022-08-23 18:31:31 +03:00
# Ensure we can only have 1 daemon running at a time, daemon should exit with 114 if already running
di = DaemonInfo . get ( )
self . assertTrue ( di is not None )
2022-08-24 23:41:03 +03:00
if di . systemd :
raise unittest . SkipTest ( ' existing dameon running via systemd ' )
2022-08-23 18:31:31 +03:00
if di :
ec = di . start ( True )
self . assertEqual ( ec , 114 )
2022-09-23 01:11:45 +03:00
def test_z_switching ( self ) :
# Ensure we can switch from forking to shell repeatedly
try :
t_mode = True
for _ in range ( 50 ) :
t_mode = not t_mode
set_exec_mode ( t_mode )
finally :
set_exec_mode ( g_lvm_shell )
2022-10-18 20:26:14 +03:00
@staticmethod
def _wipe_it ( block_device ) :
cmd = [ " /usr/sbin/wipefs " , ' -a ' , block_device ]
config = Popen ( cmd , stdout = PIPE , stderr = PIPE , close_fds = True , env = os . environ )
config . communicate ( )
if config . returncode != 0 :
return False
return True
2022-10-20 20:48:40 +03:00
def _block_present_absent ( self , block_device , present = False ) :
start = time . time ( )
keep_looping = True
2023-10-17 17:45:10 +03:00
max_wait = 5
2023-03-02 00:25:00 +03:00
while keep_looping and time . time ( ) < start + max_wait :
2023-10-17 17:45:10 +03:00
time . sleep ( 0.2 )
2022-10-20 20:48:40 +03:00
if present :
if ( self . _lookup ( block_device ) != " / " ) :
keep_looping = False
else :
if ( self . _lookup ( block_device ) == " / " ) :
keep_looping = False
if keep_looping :
2023-03-02 00:25:00 +03:00
print ( " Daemon failed to update within %d seconds! " % max_wait )
2022-10-20 20:48:40 +03:00
else :
print ( " Note: Time for udev update = %f " % ( time . time ( ) - start ) )
if present :
rc = self . _lookup ( block_device )
2023-03-02 20:44:16 +03:00
self . assertNotEqual ( rc , ' / ' , " Daemon failed to update, missing udev change event? " )
2022-10-20 20:48:40 +03:00
return True
else :
rc = self . _lookup ( block_device )
2023-03-02 20:44:16 +03:00
self . assertEqual ( rc , ' / ' , " Daemon failed to update, missing udev change event? " )
2022-10-20 20:48:40 +03:00
return True
2022-10-18 20:26:14 +03:00
def test_wipefs ( self ) :
# Ensure we update the status of the daemon if an external process clears a PV
pv = self . objs [ PV_INT ] [ 0 ]
pv_device_path = pv . Pv . Name
wipe_result = TestDbusService . _wipe_it ( pv_device_path )
self . assertTrue ( wipe_result )
if wipe_result :
# Need to wait a bit before the daemon will reflect the change
2022-10-20 20:48:40 +03:00
self . _block_present_absent ( pv_device_path , False )
2022-10-18 20:26:14 +03:00
# Put it back
pv_object_path = self . _pv_create ( pv_device_path )
self . assertNotEqual ( pv_object_path , ' / ' )
2022-10-20 20:48:40 +03:00
@staticmethod
def _write_signature ( device , data = None ) :
fd = os . open ( device , os . O_RDWR | os . O_EXCL | os . O_NONBLOCK )
existing = os . read ( fd , 1024 )
os . lseek ( fd , 0 , os . SEEK_SET )
if data is None :
data_copy = bytearray ( existing )
# Clear lvm signature
data_copy [ 536 : 536 + 9 ] = bytearray ( 8 )
os . write ( fd , data_copy )
else :
os . write ( fd , data )
os . sync ( )
os . close ( fd )
return existing
def test_copy_signature ( self ) :
# Ensure we update the state of the daemon if an external process copies
# a pv signature onto a block device
pv = self . objs [ PV_INT ] [ 0 ]
pv_device_path = pv . Pv . Name
try :
existing = TestDbusService . _write_signature ( pv_device_path , None )
if self . _block_present_absent ( pv_device_path , False ) :
TestDbusService . _write_signature ( pv_device_path , existing )
self . _block_present_absent ( pv_device_path , True )
finally :
# Ensure we put the PV back for sure.
rc = self . _lookup ( pv_device_path )
if rc == " / " :
self . _pv_create ( pv_device_path )
2022-11-29 18:57:20 +03:00
def test_stderr_collection ( self ) :
lv_name = lv_n ( )
vg = self . _vg_create ( ) . Vg
( object_path , job_path ) = vg . LvCreate (
dbus . String ( lv_name ) , dbus . UInt64 ( vg . SizeBytes * 2 ) ,
dbus . Array ( [ ] , signature = ' (ott) ' ) , dbus . Int32 ( 0 ) ,
EOD )
self . assertTrue ( object_path == ' / ' )
self . assertTrue ( job_path != ' / ' )
j = ClientProxy ( self . bus , job_path , interfaces = ( JOB_INT , ) ) . Job
while True :
j . update ( )
if j . Complete :
( ec , error_msg ) = j . GetError
2023-03-02 20:44:16 +03:00
self . assertTrue ( " insufficient free space " in error_msg ,
" We ' re expecting ' insufficient free space ' in \n \" %s \" \n , stderr missing? " % error_msg )
2022-11-29 18:57:20 +03:00
break
else :
time . sleep ( 0.1 )
2023-03-03 20:17:30 +03:00
@staticmethod
def _is_vg_devices_supported ( ) :
rc , stdout_txt , stderr_txt = call_lvm ( [ " vgcreate " , " --help " ] )
if rc == 0 :
for line in stdout_txt . split ( " \n " ) :
if " --devices " in line :
return True
return False
@staticmethod
def _vg_create_specify_devices ( name , device ) :
cmd = [ LVM_EXECUTABLE , " vgcreate " , " --devices " , device , name , device ]
outcome = Popen ( cmd , stdout = PIPE , stderr = PIPE , close_fds = True , env = os . environ )
outcome . communicate ( )
if outcome . returncode == 0 :
return True
else :
print ( " Failed to create vg %s , stdout= %s , stderr= %s " % ( name , outcome . stdout , outcome . stderr ) )
return False
def test_duplicate_vg_name ( self ) :
# LVM allows duplicate VG names, test handling renames for now
if not TestDbusService . _is_vg_devices_supported ( ) :
raise unittest . SkipTest ( " lvm does not support vgcreate with --device syntax " )
if len ( self . objs [ PV_INT ] ) < 2 :
raise unittest . SkipTest ( " we need at least 2 PVs to run test " )
vg_name = vg_n ( )
if TestDbusService . _vg_create_specify_devices ( vg_name , self . objs [ PV_INT ] [ 0 ] . Pv . Name ) and \
TestDbusService . _vg_create_specify_devices ( vg_name , self . objs [ PV_INT ] [ 1 ] . Pv . Name ) :
objects , _ = get_objects ( )
self . assertEqual ( len ( objects [ VG_INT ] ) , 2 )
if len ( objects [ VG_INT ] ) == 2 :
for vg in objects [ VG_INT ] :
new_name = vg_n ( )
vg . Vg . Rename ( dbus . String ( new_name ) , dbus . Int32 ( g_tmo ) , EOD )
# Ensure we find the renamed VG
self . assertNotEqual ( " / " , self . _lookup ( new_name ) , " Expecting to find VG= ' %s ' " % new_name )
else :
self . assertFalse ( True , " We failed to create 2 VGs with same name! " )
2022-08-17 20:14:02 +03:00
2016-10-07 22:55:36 +03:00
class AggregateResults ( object ) :
def __init__ ( self ) :
self . no_errors = True
def register_result ( self , result ) :
if not result . result . wasSuccessful ( ) :
self . no_errors = False
2016-10-07 23:30:18 +03:00
def register_fail ( self ) :
self . no_errors = False
2016-10-07 22:55:36 +03:00
def exit_run ( self ) :
if self . no_errors :
sys . exit ( 0 )
sys . exit ( 1 )
2016-02-18 02:53:35 +03:00
if __name__ == ' __main__ ' :
2016-10-07 22:55:36 +03:00
r = AggregateResults ( )
2016-11-11 21:34:38 +03:00
mode = int ( test_shell )
2016-02-20 00:16:05 +03:00
2023-03-10 19:44:55 +03:00
# To test with error injection, simply set the env. variable LVM_BINARY to the error inject script
# and the LVM_MAN_IN_MIDDLE variable to the lvm binary to test which defaults to "/usr/sbin/lvm"
# An example
# export LVM_BINARY=/home/tasleson/projects/lvm2/test/dbus/lvm_error_inject.py
# export LVM_MAN_IN_MIDDLE=/home/tasleson/projects/lvm2/tools/lvm
2016-11-11 21:34:38 +03:00
if mode == 0 :
std_err_print ( ' \n *** Testing only lvm fork & exec test mode *** \n ' )
elif mode == 1 :
2022-05-26 00:21:14 +03:00
std_err_print ( ' \n *** Testing only lvm shell mode *** \n ' )
elif mode == 2 :
2016-11-11 21:34:38 +03:00
std_err_print ( ' \n *** Testing fork & exec & lvm shell mode *** \n ' )
2016-02-20 00:16:05 +03:00
else :
2022-05-26 00:21:14 +03:00
std_err_print ( " Unsupported \" LVM_DBUSD_TEST_MODE \" = %d , [0-2] valid " % mode )
sys . exit ( 1 )
2016-11-11 21:34:38 +03:00
for g_tmo in [ 0 , 15 ] :
2022-05-26 00:03:27 +03:00
std_err_print ( ' Testing TMO= %d \n ' % g_tmo )
2016-11-11 21:34:38 +03:00
if mode == 0 :
if set_execution ( False , r ) :
r . register_result ( unittest . main ( exit = False ) )
2022-05-26 00:21:14 +03:00
elif mode == 1 :
2016-11-11 21:34:38 +03:00
if set_execution ( True , r ) :
r . register_result ( unittest . main ( exit = False ) )
2016-08-29 22:26:16 +03:00
else :
2016-11-11 21:34:38 +03:00
if set_execution ( False , r ) :
r . register_result ( unittest . main ( exit = False ) )
# Test lvm shell
if set_execution ( True , r ) :
r . register_result ( unittest . main ( exit = False ) )
2016-10-07 22:55:36 +03:00
2016-11-16 20:39:57 +03:00
if not r . no_errors :
break
2016-10-07 22:55:36 +03:00
r . exit_run ( )