diff --git a/server/src/uds/core/ui/user_interface.py b/server/src/uds/core/ui/user_interface.py index 7a8303bec..0175e6701 100644 --- a/server/src/uds/core/ui/user_interface.py +++ b/server/src/uds/core/ui/user_interface.py @@ -124,7 +124,7 @@ class gui: Helper method to create a single choice item. """ return {'id': str(id_), 'text': str(text)} - + @staticmethod def choice_image(id_: typing.Union[str, int], text: str, img: str) -> types.ui.ChoiceItem: """ @@ -178,7 +178,9 @@ class gui: raise ValueError(f'Invalid type for convertToChoices: {vals}') @staticmethod - def sorted_choices(choices: collections.abc.Iterable[types.ui.ChoiceItem], *, by_id: bool = False, reverse: bool = False) -> list[types.ui.ChoiceItem]: + def sorted_choices( + choices: collections.abc.Iterable[types.ui.ChoiceItem], *, by_id: bool = False, reverse: bool = False + ) -> list[types.ui.ChoiceItem]: if by_id: return sorted(choices, key=lambda item: item['id'], reverse=reverse) return sorted(choices, key=lambda item: item['text'].lower(), reverse=reverse) @@ -210,7 +212,7 @@ class gui: "true" if bol evals to True, "false" if don't. """ return consts.TRUE_STR if bol else consts.FALSE_STR - + @staticmethod def as_int(value: typing.Union[str, bytes, bool, int]) -> int: """ @@ -227,7 +229,7 @@ class gui: return int(value) except Exception: return 0 - + @staticmethod def as_str(value: typing.Any) -> str: """ @@ -661,7 +663,7 @@ class gui: return int(self.value) except Exception: return 0 - + def _set_value(self, value: typing.Any) -> None: """ To ensure value is an int @@ -1457,7 +1459,9 @@ class UserInterface(metaclass=UserInterfaceAbstract): types.ui.FieldType.CHOICE: lambda x: x.value, types.ui.FieldType.MULTICHOICE: lambda x: codecs.encode(serialize(x.value), 'base64').decode(), types.ui.FieldType.EDITABLELIST: lambda x: codecs.encode(serialize(x.value), 'base64').decode(), - types.ui.FieldType.CHECKBOX: lambda x: consts.TRUE_STR if gui.as_bool(x.value) else consts.FALSE_STR, + types.ui.FieldType.CHECKBOX: lambda x: consts.TRUE_STR + if gui.as_bool(x.value) + else consts.FALSE_STR, types.ui.FieldType.IMAGECHOICE: lambda x: x.value, types.ui.FieldType.DATE: lambda x: x.value, types.ui.FieldType.INFO: lambda x: None, @@ -1545,9 +1549,7 @@ class UserInterface(metaclass=UserInterfaceAbstract): for fld_name, fld_type, fld_value in arr: if fld_name in field_names_translations: - fld_name = field_names_translations[ - fld_name - ] # Convert old field name to new one if needed + fld_name = field_names_translations[fld_name] # Convert old field name to new one if needed if fld_name not in self._gui: logger.warning('Field %s not found in form', fld_name) continue @@ -1659,7 +1661,5 @@ class UserInterface(metaclass=UserInterfaceAbstract): fld_old_field_name = fld.old_field_name() if fld_old_field_name and fld_old_field_name != fld_name: field_names_translations[fld_old_field_name] = fld_name - - return field_names_translations - \ No newline at end of file + return field_names_translations diff --git a/server/src/uds/core/util/unique_id_generator.py b/server/src/uds/core/util/unique_id_generator.py index da92e1ba9..bbf87c54c 100644 --- a/server/src/uds/core/util/unique_id_generator.py +++ b/server/src/uds/core/util/unique_id_generator.py @@ -69,21 +69,21 @@ class UniqueIDGenerator: self._base_name = newBaseName def __filter( - self, rangeStart: int, rangeEnd: int = MAX_SEQ, forUpdate: bool = False + self, range_start: int, range_end: int = MAX_SEQ, for_update: bool = False ) -> 'models.QuerySet[UniqueId]': # Order is defined on UniqueId model, and is '-seq' by default (so this gets items in sequence order) # if not for update, do not use the clause :) - obj = UniqueId.objects.select_for_update() if forUpdate else UniqueId.objects - return obj.filter(basename=self._base_name, seq__gte=rangeStart, seq__lte=rangeEnd) + obj = UniqueId.objects.select_for_update() if for_update else UniqueId.objects + return obj.filter(basename=self._base_name, seq__gte=range_start, seq__lte=range_end) - def get(self, rangeStart: int = 0, rangeEnd: int = MAX_SEQ) -> int: + def get(self, range_start: int = 0, range_end: int = MAX_SEQ) -> int: """ Tries to generate a new unique id in the range provided. This unique id is global to "unique ids' database """ # First look for a name in the range defined stamp = sql_stamp_seconds() - seq = rangeStart + seq = range_start # logger.debug(UniqueId) counter = 0 while True: @@ -91,7 +91,7 @@ class UniqueIDGenerator: try: # logger.debug('Creating new seq in range {}-{}'.format(rangeStart, rangeEnd)) with transaction.atomic(): - flt = self.__filter(rangeStart, rangeEnd, forUpdate=True) + flt = self.__filter(range_start, range_end, for_update=True) item: typing.Optional[UniqueId] = None try: item = flt.filter(assigned=False).order_by('seq')[0] # type: ignore # Slicing is not supported by pylance right now @@ -115,9 +115,9 @@ class UniqueIDGenerator: ] # DB Returns correct order so the 0 item is the last seq = last.seq + 1 except IndexError: # If there is no assigned at database - seq = rangeStart + seq = range_start # logger.debug('Found seq {0}'.format(seq)) - if seq > rangeEnd: + if seq > range_end: return -1 # No ids free in range # May ocurr on some circustance that a concurrency access gives same item twice, in this case, we # will get an "duplicate key error", @@ -144,7 +144,7 @@ class UniqueIDGenerator: return seq def transfer(self, seq: int, toUidGen: 'UniqueIDGenerator') -> bool: - self.__filter(0, forUpdate=True).filter(owner=self._owner, seq=seq).update( + self.__filter(0, for_update=True).filter(owner=self._owner, seq=seq).update( owner=toUidGen._owner, # pylint: disable=protected-access basename=toUidGen._base_name, # pylint: disable=protected-access stamp=sql_stamp_seconds(), @@ -155,7 +155,7 @@ class UniqueIDGenerator: logger.debug('Freeing seq %s from %s (%s)', seq, self._owner, self._base_name) with transaction.atomic(): flt = ( - self.__filter(0, forUpdate=True) + self.__filter(0, for_update=True) .filter(owner=self._owner, seq=seq) .update(owner='', assigned=False, stamp=sql_stamp_seconds()) ) @@ -165,7 +165,7 @@ class UniqueIDGenerator: def _purge(self) -> None: logger.debug('Purging UniqueID database') try: - last: UniqueId = self.__filter(0, forUpdate=False).filter(assigned=True)[0] # type: ignore # Slicing is not supported by pylance right now + last: UniqueId = self.__filter(0, for_update=False).filter(assigned=True)[0] # type: ignore # Slicing is not supported by pylance right now logger.debug('Last: %s', last) seq = last.seq + 1 except Exception: diff --git a/server/src/uds/services/OVirt/provider.py b/server/src/uds/services/OVirt/provider.py index e983d6549..1bb45af5f 100644 --- a/server/src/uds/services/OVirt/provider.py +++ b/server/src/uds/services/OVirt/provider.py @@ -51,9 +51,6 @@ if typing.TYPE_CHECKING: logger = logging.getLogger(__name__) -CACHE_TIME_FOR_SERVER = 1800 - - class OVirtProvider( services.ServiceProvider ): # pylint: disable=too-many-public-methods @@ -513,7 +510,7 @@ class OVirtProvider( return self.__getApi().getConsoleConnection(machineId) @cached('reachable', consts.cache.SHORT_CACHE_TIMEOUT) - def isAvailable(self) -> bool: + def is_available(self) -> bool: """ Check if aws provider is reachable """ diff --git a/server/src/uds/services/OVirt/service.py b/server/src/uds/services/OVirt/service.py index 6c3ebc56d..b18a195dc 100644 --- a/server/src/uds/services/OVirt/service.py +++ b/server/src/uds/services/OVirt/service.py @@ -466,4 +466,4 @@ class OVirtLinkedService(services.Service): # pylint: disable=too-many-public-m return self.parent().getConsoleConnection(machineId) def is_avaliable(self) -> bool: - return self.parent().isAvailable() + return self.parent().is_available() diff --git a/server/src/uds/services/OpenGnsys/provider.py b/server/src/uds/services/OpenGnsys/provider.py index 7b9fa3f39..2a1d920ca 100644 --- a/server/src/uds/services/OpenGnsys/provider.py +++ b/server/src/uds/services/OpenGnsys/provider.py @@ -293,7 +293,7 @@ class OGProvider(ServiceProvider): return self.api.status(machineId) @cached('reachable', consts.cache.SHORT_CACHE_TIMEOUT) - def isAvailable(self) -> bool: + def is_available(self) -> bool: """ Check if aws provider is reachable """ diff --git a/server/src/uds/services/OpenGnsys/service.py b/server/src/uds/services/OpenGnsys/service.py index 2bcda09e5..bb9953859 100644 --- a/server/src/uds/services/OpenGnsys/service.py +++ b/server/src/uds/services/OpenGnsys/service.py @@ -226,4 +226,4 @@ class OGService(services.Service): return self.startIfUnavailable.as_bool() def is_avaliable(self) -> bool: - return self.parent().isAvailable() + return self.parent().is_available() diff --git a/server/src/uds/services/OpenNebula/provider.py b/server/src/uds/services/OpenNebula/provider.py index daa2d0d69..f883149c3 100644 --- a/server/src/uds/services/OpenNebula/provider.py +++ b/server/src/uds/services/OpenNebula/provider.py @@ -343,7 +343,7 @@ class OpenNebulaProvider(ServiceProvider): # pylint: disable=too-many-public-me return OpenNebulaProvider(env, data).testConnection() @cached('reachable', consts.cache.SHORT_CACHE_TIMEOUT) - def isAvailable(self) -> bool: + def is_available(self) -> bool: """ Check if aws provider is reachable """ diff --git a/server/src/uds/services/OpenNebula/service.py b/server/src/uds/services/OpenNebula/service.py index 759814fd3..eaeba7e5c 100644 --- a/server/src/uds/services/OpenNebula/service.py +++ b/server/src/uds/services/OpenNebula/service.py @@ -321,4 +321,4 @@ class LiveService(services.Service): return self.parent().desktopLogin(machineId, username, password, domain) def is_avaliable(self) -> bool: - return self.parent().isAvailable() + return self.parent().is_available() diff --git a/server/src/uds/services/OpenStack/openstack/openstack_client.py b/server/src/uds/services/OpenStack/openstack/openstack_client.py index 2561102b8..091c6cb8f 100644 --- a/server/src/uds/services/OpenStack/openstack/openstack_client.py +++ b/server/src/uds/services/OpenStack/openstack/openstack_client.py @@ -776,7 +776,7 @@ class Client: # pylint: disable=too-many-public-methods ) ) - def isAvailable(self) -> bool: + def is_available(self) -> bool: try: # If we can connect, it is available self._session.get( diff --git a/server/src/uds/services/OpenStack/provider.py b/server/src/uds/services/OpenStack/provider.py index 0c26c6117..be1d45c24 100644 --- a/server/src/uds/services/OpenStack/provider.py +++ b/server/src/uds/services/OpenStack/provider.py @@ -299,9 +299,9 @@ class OpenStackProvider(ServiceProvider): return OpenStackProvider(env, data).testConnection() @cached('reachable', consts.cache.SHORT_CACHE_TIMEOUT) - def isAvailable(self) -> bool: + def is_available(self) -> bool: """ Check if aws provider is reachable """ - return self.api().isAvailable() + return self.api().is_available() diff --git a/server/src/uds/services/OpenStack/provider_legacy.py b/server/src/uds/services/OpenStack/provider_legacy.py index 4f2098212..f9c0839bf 100644 --- a/server/src/uds/services/OpenStack/provider_legacy.py +++ b/server/src/uds/services/OpenStack/provider_legacy.py @@ -282,7 +282,7 @@ class ProviderLegacy(ServiceProvider): return ProviderLegacy(env, data).testConnection() @cached('reachable', consts.cache.SHORT_CACHE_TIMEOUT) - def isAvailable(self) -> bool: + def is_available(self) -> bool: """ Check if aws provider is reachable """ diff --git a/server/src/uds/services/OpenStack/service.py b/server/src/uds/services/OpenStack/service.py index 9e3ddced6..ce5d6137f 100644 --- a/server/src/uds/services/OpenStack/service.py +++ b/server/src/uds/services/OpenStack/service.py @@ -443,4 +443,4 @@ class LiveService(services.Service): return int(self.lenName.value) def is_avaliable(self) -> bool: - return self.parent().isAvailable() + return self.parent().is_available() diff --git a/server/src/uds/services/Proxmox/client/__init__.py b/server/src/uds/services/Proxmox/client/__init__.py index c222a978f..2df0ad12c 100644 --- a/server/src/uds/services/Proxmox/client/__init__.py +++ b/server/src/uds/services/Proxmox/client/__init__.py @@ -266,7 +266,7 @@ class ProxmoxClient: return int(self._get('cluster/nextid')['data']) @ensure_connected - def isVMIdAvailable(self, vmId: int) -> bool: + def is_vmid_available(self, vmId: int) -> bool: try: self._get(f'cluster/nextid?vmid={vmId}') except Exception: # Not available @@ -428,7 +428,7 @@ class ProxmoxClient: return [g['group'] for g in self._get('cluster/ha/groups')['data']] @ensure_connected - def enableVmHA(self, vmId: int, started: bool = False, group: typing.Optional[str] = None) -> None: + def enable_machine_ha(self, vmId: int, started: bool = False, group: typing.Optional[str] = None) -> None: self._post( 'cluster/ha/resources', data=[ @@ -442,7 +442,7 @@ class ProxmoxClient: ) @ensure_connected - def disableVmHA(self, vmId: int) -> None: + def disable_machine_ha(self, vmId: int) -> None: try: self._delete('cluster/ha/resources/vm%3A{}'.format(vmId)) except Exception: @@ -457,12 +457,12 @@ class ProxmoxClient: self._post('nodes/{}/qemu/{}/config'.format(node, vmId), data=params) @ensure_connected - def deleteVm(self, vmId: int, node: typing.Optional[str] = None, purge: bool = True) -> types.UPID: + def remove_machine(self, vmId: int, node: typing.Optional[str] = None, purge: bool = True) -> types.UPID: node = node or self.getVmInfo(vmId).node return types.UPID.from_dict(self._delete('nodes/{}/qemu/{}?purge=1'.format(node, vmId))) @ensure_connected - def getTask(self, node: str, upid: str) -> types.TaskStatus: + def get_task(self, node: str, upid: str) -> types.TaskStatus: return types.TaskStatus.fromJson( self._get('nodes/{}/tasks/{}/status'.format(node, urllib.parse.quote(upid))) ) @@ -552,7 +552,7 @@ class ProxmoxClient: return types.VMConfiguration.from_dict(self._get('nodes/{}/qemu/{}/config'.format(node, vmId))['data']) @ensure_connected - def setVmMac( + def set_machine_ha( self, vmId: int, mac: str, @@ -581,29 +581,29 @@ class ProxmoxClient: ) @ensure_connected - def startVm(self, vmId: int, node: typing.Optional[str] = None) -> types.UPID: + def start_machine(self, vmId: int, node: typing.Optional[str] = None) -> types.UPID: # if exitstatus is "OK" or contains "already running", all is fine node = node or self.getVmInfo(vmId).node return types.UPID.from_dict(self._post('nodes/{}/qemu/{}/status/start'.format(node, vmId))) @ensure_connected - def stopVm(self, vmId: int, node: typing.Optional[str] = None) -> types.UPID: + def stop_machine(self, vmId: int, node: typing.Optional[str] = None) -> types.UPID: node = node or self.getVmInfo(vmId).node return types.UPID.from_dict(self._post('nodes/{}/qemu/{}/status/stop'.format(node, vmId))) @ensure_connected - def resetVm(self, vmId: int, node: typing.Optional[str] = None) -> types.UPID: + def reset_machine(self, vmId: int, node: typing.Optional[str] = None) -> types.UPID: node = node or self.getVmInfo(vmId).node return types.UPID.from_dict(self._post('nodes/{}/qemu/{}/status/reset'.format(node, vmId))) @ensure_connected - def suspendVm(self, vmId: int, node: typing.Optional[str] = None) -> types.UPID: + def suspend_machine(self, vmId: int, node: typing.Optional[str] = None) -> types.UPID: # if exitstatus is "OK" or contains "already running", all is fine node = node or self.getVmInfo(vmId).node return types.UPID.from_dict(self._post('nodes/{}/qemu/{}/status/suspend'.format(node, vmId))) @ensure_connected - def shutdownVm(self, vmId: int, node: typing.Optional[str] = None) -> types.UPID: + def shutdown_machine(self, vmId: int, node: typing.Optional[str] = None) -> types.UPID: # if exitstatus is "OK" or contains "already running", all is fine node = node or self.getVmInfo(vmId).node return types.UPID.from_dict(self._post('nodes/{}/qemu/{}/status/shutdown'.format(node, vmId))) @@ -616,7 +616,7 @@ class ProxmoxClient: self.getVmInfo(vmId, force=True) # proxmox has a "resume", but start works for suspended vm so we use it - resumeVm = startVm + resumeVm = start_machine @ensure_connected @cached( @@ -677,7 +677,7 @@ class ProxmoxClient: return [types.PoolInfo.from_dict(nodeStat) for nodeStat in self._get('pools')['data']] @ensure_connected - def getConsoleConnection( + def get_console_connection( self, vmId: int, node: typing.Optional[str] = None ) -> typing.Optional[collections.abc.MutableMapping[str, typing.Any]]: """ diff --git a/server/src/uds/services/Proxmox/jobs.py b/server/src/uds/services/Proxmox/jobs.py index 76a9a3925..a089f602d 100644 --- a/server/src/uds/services/Proxmox/jobs.py +++ b/server/src/uds/services/Proxmox/jobs.py @@ -63,11 +63,11 @@ class ProxmoxDeferredRemoval(jobs.Job): if vmInfo.status == 'running': # If running vm, simply stops it and wait for next ProxmoxDeferredRemoval.waitForTaskFinish( - providerInstance, providerInstance.stopMachine(vmId) + providerInstance, providerInstance.stop_machine(vmId) ) ProxmoxDeferredRemoval.waitForTaskFinish( - providerInstance, providerInstance.removeMachine(vmId) + providerInstance, providerInstance.remove_machine(vmId) ) except client.ProxmoxNotFound: return # Machine does not exists @@ -87,7 +87,7 @@ class ProxmoxDeferredRemoval(jobs.Job): ) -> bool: counter = 0 while ( - providerInstance.getTaskInfo(upid.node, upid.upid).is_running() + providerInstance.get_task_info(upid.node, upid.upid).is_running() and counter < maxWait ): time.sleep(0.3) @@ -118,7 +118,7 @@ class ProxmoxDeferredRemoval(jobs.Job): # tries to remove in sync mode if vmInfo.status == 'running': ProxmoxDeferredRemoval.waitForTaskFinish( - instance, instance.stopMachine(vmId) + instance, instance.stop_machine(vmId) ) return @@ -126,7 +126,7 @@ class ProxmoxDeferredRemoval(jobs.Job): vmInfo.status == 'stopped' ): # Machine exists, try to remove it now ProxmoxDeferredRemoval.waitForTaskFinish( - instance, instance.removeMachine(vmId) + instance, instance.remove_machine(vmId) ) # It this is reached, remove check diff --git a/server/src/uds/services/Proxmox/provider.py b/server/src/uds/services/Proxmox/provider.py index 42fde944f..806ae3514 100644 --- a/server/src/uds/services/Proxmox/provider.py +++ b/server/src/uds/services/Proxmox/provider.py @@ -35,8 +35,7 @@ from django.utils.translation import gettext_noop as _ from uds.core import services, types, consts from uds.core.ui import gui -from uds.core.util import validators -from uds.core.util.cache import Cache +from uds.core.util import validators, fields from uds.core.util.decorators import cached from uds.core.util.unique_id_generator import UniqueIDGenerator from uds.core.util.unique_mac_generator import UniqueMacGenerator @@ -51,13 +50,10 @@ if typing.TYPE_CHECKING: logger = logging.getLogger(__name__) -CACHE_TIME_FOR_SERVER = 1800 -MAX_VM_ID = 999999999 +MAX_VM_ID: typing.Final[int] = 999999999 -class ProxmoxProvider( - services.ServiceProvider -): # pylint: disable=too-many-public-methods +class ProxmoxProvider(services.ServiceProvider): # pylint: disable=too-many-public-methods offers = [ProxmoxLinkedService] type_name = _('Proxmox Platform Provider') type_type = 'ProxmoxPlatform' @@ -84,9 +80,7 @@ class ProxmoxProvider( length=32, label=_('Username'), order=3, - tooltip=_( - 'User with valid privileges on Proxmox, (use "user@authenticator" form)' - ), + tooltip=_('User with valid privileges on Proxmox, (use "user@authenticator" form)'), required=True, default='root@pam', ) @@ -98,42 +92,11 @@ class ProxmoxProvider( required=True, ) - max_preparing_services = gui.NumericField( - length=3, - label=_('Creation concurrency'), - default=10, - min_value=1, - max_value=65536, - order=50, - tooltip=_('Maximum number of concurrently creating VMs'), - required=True, - tab=types.ui.Tab.ADVANCED, - old_field_name='maxPreparingServices', - ) - max_removing_services = gui.NumericField( - length=3, - label=_('Removal concurrency'), - default=5, - min_value=1, - max_value=65536, - order=51, - tooltip=_('Maximum number of concurrently removing VMs'), - required=True, - tab=types.ui.Tab.ADVANCED, - old_field_name='maxRemovingServices', - ) + max_preparing_services = fields.max_preparing_services_field() + max_removing_services = fields.max_removing_services_field() + timeout = fields.timeout_field() - timeout = gui.NumericField( - length=3, - label=_('Timeout'), - default=20, - order=90, - tooltip=_('Timeout in seconds of connection to Proxmox'), - required=True, - tab=types.ui.Tab.ADVANCED, - ) - - startVmId = gui.NumericField( + start_vmid = gui.NumericField( length=3, label=_('Starting VmId'), default=10000, @@ -147,20 +110,7 @@ class ProxmoxProvider( old_field_name='startVmId', ) - macsRange = gui.TextField( - length=36, - label=_('Macs range'), - default='52:54:00:00:00:00-52:54:00:FF:FF:FF', - order=91, - readonly=False, - tooltip=_( - 'Range of valid macs for created machines. Any value accepted by Proxmox is valid here.' - ), - required=True, - tab=types.ui.Tab.ADVANCED, - old_field_name='macsRange', - ) - + macs_range = fields.macs_range_field(default='52:54:00:00:00:00-52:54:00:FF:FF:FF') # Own variables _api: typing.Optional[client.ProxmoxClient] = None @@ -214,9 +164,7 @@ class ProxmoxProvider( def listMachines(self) -> list[client.types.VMInfo]: return self._getApi().listVms() - def getMachineInfo( - self, vmId: int, poolId: typing.Optional[str] = None - ) -> client.types.VMInfo: + def getMachineInfo(self, vmId: int, poolId: typing.Optional[str] = None) -> client.types.VMInfo: return self._getApi().getVMPoolInfo(vmId, poolId, force=True) def getMachineConfiguration(self, vmId: int) -> client.types.VMConfiguration: @@ -225,9 +173,7 @@ class ProxmoxProvider( def getStorageInfo(self, storageId: str, node: str) -> client.types.StorageInfo: return self._getApi().getStorage(storageId, node) - def listStorages( - self, node: typing.Optional[str] - ) -> list[client.types.StorageInfo]: + def listStorages(self, node: typing.Optional[str]) -> list[client.types.StorageInfo]: return self._getApi().listStorages(node=node, content='images') def listPools(self) -> list[client.types.PoolInfo]: @@ -249,7 +195,7 @@ class ProxmoxProvider( ) -> client.types.VmCreationResult: return self._getApi().cloneVm( vmId, - self.getNewVmId(), + self.get_new_vmid(), name, description, linkedClone, @@ -258,68 +204,63 @@ class ProxmoxProvider( toPool, mustHaveVGPUS, ) - - def startMachine(self, vmId: int) -> client.types.UPID: - return self._getApi().startVm(vmId) + def start_machine(self, vmId: int) -> client.types.UPID: + return self._getApi().start_machine(vmId) - def stopMachine(self, vmId: int) -> client.types.UPID: - return self._getApi().stopVm(vmId) + def stop_machine(self, vmid: int) -> client.types.UPID: + return self._getApi().stop_machine(vmid) - def resetMachine(self, vmId: int) -> client.types.UPID: - return self._getApi().resetVm(vmId) + def reset_machine(self, vmid: int) -> client.types.UPID: + return self._getApi().reset_machine(vmid) - def suspendMachine(self, vmId: int) -> client.types.UPID: - return self._getApi().suspendVm(vmId) + def suspend_machine(self, vmId: int) -> client.types.UPID: + return self._getApi().suspend_machine(vmId) - def shutdownMachine(self, vmId: int) -> client.types.UPID: - return self._getApi().shutdownVm(vmId) + def shutdown_machine(self, vmId: int) -> client.types.UPID: + return self._getApi().shutdown_machine(vmId) - def removeMachine(self, vmId: int) -> client.types.UPID: - return self._getApi().deleteVm(vmId) + def remove_machine(self, vmid: int) -> client.types.UPID: + return self._getApi().remove_machine(vmid) - def getTaskInfo(self, node: str, upid: str) -> client.types.TaskStatus: - return self._getApi().getTask(node, upid) + def get_task_info(self, node: str, upid: str) -> client.types.TaskStatus: + return self._getApi().get_task(node, upid) - def enableHA( - self, vmId: int, started: bool = False, group: typing.Optional[str] = None - ) -> None: - self._getApi().enableVmHA(vmId, started, group) + def enable_ha(self, vmId: int, started: bool = False, group: typing.Optional[str] = None) -> None: + self._getApi().enable_machine_ha(vmId, started, group) - def set_machine_mac( - self, vmId: int, macAddress: str - ) -> None: - self._getApi().setVmMac(vmId, macAddress) + def set_machine_mac(self, vmId: int, macAddress: str) -> None: + self._getApi().set_machine_ha(vmId, macAddress) - def disableHA(self, vmId: int) -> None: - self._getApi().disableVmHA(vmId) + def disable_ha(self, vmid: int) -> None: + self._getApi().disable_machine_ha(vmid) - def set_protection( - self, vmId: int, node: typing.Optional[str] = None, protection: bool = False - ) -> None: + def set_protection(self, vmId: int, node: typing.Optional[str] = None, protection: bool = False) -> None: self._getApi().setProtection(vmId, node, protection) def list_ha_groups(self) -> list[str]: return self._getApi().listHAGroups() - def getConsoleConnection( + def get_console_connection( self, machineId: str ) -> typing.Optional[collections.abc.MutableMapping[str, typing.Any]]: - return self._getApi().getConsoleConnection(machineId) + return self._getApi().get_console_connection(machineId) - def getNewVmId(self) -> int: + def get_new_vmid(self) -> int: while True: # look for an unused VmId - vmId = self._vmid_generator.get(self.startVmId.as_int(), MAX_VM_ID) - if self._getApi().isVMIdAvailable(vmId): - return vmId - # All assigned VMId will be left as unusable on UDS until released by time (3 months) + vmid = self._vmid_generator.get(self.start_vmid.as_int(), MAX_VM_ID) + if self._getApi().is_vmid_available(vmid): + return vmid + # All assigned VMId will be left as unusable on UDS until released by time (3 years) + # This is not a problem at all, in the rare case that a machine id is released from uds db + # if it exists when we try to create a new one, we will simply try to get another one @cached('reachable', consts.cache.SHORT_CACHE_TIMEOUT) - def isAvailable(self) -> bool: + def is_available(self) -> bool: return self._getApi().test() - def getMacRange(self) -> str: - return self.macsRange.value + def get_macs_range(self) -> str: + return self.macs_range.value @staticmethod def test(env: 'Environment', data: 'Module.ValuesType') -> list[typing.Any]: diff --git a/server/src/uds/services/Proxmox/service.py b/server/src/uds/services/Proxmox/service.py index ccfdbf55a..4e1a47194 100644 --- a/server/src/uds/services/Proxmox/service.py +++ b/server/src/uds/services/Proxmox/service.py @@ -272,22 +272,22 @@ class ProxmoxLinkedService(services.Service): # pylint: disable=too-many-public return config.networks[0].mac.lower() def getTaskInfo(self, node: str, upid: str) -> 'client.types.TaskStatus': - return self.parent().getTaskInfo(node, upid) + return self.parent().get_task_info(node, upid) def startMachine(self, vmId: int) -> 'client.types.UPID': - return self.parent().startMachine(vmId) + return self.parent().start_machine(vmId) def stopMachine(self, vmId: int) -> 'client.types.UPID': - return self.parent().stopMachine(vmId) + return self.parent().stop_machine(vmId) def resetMachine(self, vmId: int) -> 'client.types.UPID': - return self.parent().resetMachine(vmId) + return self.parent().reset_machine(vmId) def suspendMachine(self, vmId: int) -> 'client.types.UPID': - return self.parent().suspendMachine(vmId) + return self.parent().suspend_machine(vmId) def shutdownMachine(self, vmId: int) -> 'client.types.UPID': - return self.parent().shutdownMachine(vmId) + return self.parent().shutdown_machine(vmId) def removeMachine(self, vmId: int) -> 'client.types.UPID': # First, remove from HA if needed @@ -298,17 +298,17 @@ class ProxmoxLinkedService(services.Service): # pylint: disable=too-many-public self.do_log(level=log.LogLevel.WARNING, message=f'Exception disabling HA for vm {vmId}: {e}') # And remove it - return self.parent().removeMachine(vmId) + return self.parent().remove_machine(vmId) def enableHA(self, vmId: int, started: bool = False) -> None: if self.ha.value == '__': return - self.parent().enableHA(vmId, started, self.ha.value or None) + self.parent().enable_ha(vmId, started, self.ha.value or None) def disableHA(self, vmId: int) -> None: if self.ha.value == '__': return - self.parent().disableHA(vmId) + self.parent().disable_ha(vmId) def set_protection( self, vmId: int, node: typing.Optional[str] = None, protection: bool = False @@ -328,7 +328,7 @@ class ProxmoxLinkedService(services.Service): # pylint: disable=too-many-public """ Returns de selected mac range """ - return self.parent().getMacRange() + return self.parent().get_macs_range() def isHaEnabled(self) -> bool: return self.ha.value != '__' @@ -339,8 +339,8 @@ class ProxmoxLinkedService(services.Service): # pylint: disable=too-many-public def getConsoleConnection( self, machineId: str ) -> typing.Optional[collections.abc.MutableMapping[str, typing.Any]]: - return self.parent().getConsoleConnection(machineId) + return self.parent().get_console_connection(machineId) @cached('reachable', consts.cache.SHORT_CACHE_TIMEOUT) def is_avaliable(self) -> bool: - return self.parent().isAvailable() + return self.parent().is_available() diff --git a/server/src/uds/services/Xen/provider.py b/server/src/uds/services/Xen/provider.py index 4b569f177..d2ce19574 100644 --- a/server/src/uds/services/Xen/provider.py +++ b/server/src/uds/services/Xen/provider.py @@ -56,8 +56,6 @@ if typing.TYPE_CHECKING: from uds.core.environment import Environment from uds.core.module import Module -CACHE_TIME_FOR_SERVER = 1800 - class XenProvider(ServiceProvider): # pylint: disable=too-many-public-methods """ @@ -459,7 +457,7 @@ class XenProvider(ServiceProvider): # pylint: disable=too-many-public-methods return self.macsRange.value @cached('reachable', consts.cache.SHORT_CACHE_TIMEOUT) - def isAvailable(self) -> bool: + def is_available(self) -> bool: try: self.testConnection() return True diff --git a/server/tests/services/proxmox/test_proxmox_init.py b/server/tests/services/proxmox/test_proxmox_init.py index 54a57eada..f8f4c6ae4 100644 --- a/server/tests/services/proxmox/test_proxmox_init.py +++ b/server/tests/services/proxmox/test_proxmox_init.py @@ -44,15 +44,31 @@ from django.conf import settings from uds.services.Vmware_enterprise import service_linked, provider PROVIDER_SERIALIZE_DATA: typing.Final[str] = ( - 'R1VJWgF2MRKGpo40r0qAyiorr5SEbg/cXmhQPC9zfAFccS20LF2du6+QhrCna7WykmcPW95FHOLWwEBpuYc3Fdh4Id' - '/jIs/hyWb/0f+30JduzD2Bjpgop+wO8sdXpy1/MilpVYKOycbGJ8JxNGov0zU4kw6FWpRD6MiCXaGBvQrzLmMFY78D' - '25y0YtOV6RhP+KKp1AUiEvS9bqGogiFuGrxq/bqI+at1CgLHXn0OK0ZSqLUroOizDu+3PNoMHC2lqbgO8CRIPVf0Cz' - '1/ZEyvJ44PCeOZZKLqzxhgbikL4g8GJptBAIMVedVMdxjpTo5oWS3O9TCtSB51iXkqpOjP7UFmUUQmsYe7/7CkHM8g' - '3y30ZN/lgB5pr5GSrfAwXKsxwNZ9cKAzm3G/xVtYpm69zcmNGWE+md+aGhGDBOVBCyvE9AkFsFdZ' + 'R1VJWgF2Mf5E0Eb/AlXtUzvdsF+YFTi08PsxvNhRm+Hu3Waqa0Gw0WeReoM5XTnmvopa9+Ex99oRhzW7xr6THkQ7vMZvwKlcI77l' + '+Zz3FKXnbZnXZkqY0GIqvUzHjQra2Xx9koxkvtAXl64aldXSCjO4xMqCzsCsxgn2fPYnD76TgSccUftTLr5UpaKxXrOg5qr836Si' + 'Y83F6Ko20viicmczi3NmMTR+ii+lmSCUrnRJc/IcxTrfmturJu0X0TipMX5C3xqMyIa1LtsPyHO3yTkYW9bGqP/B1DbDOHy27gu6' + 'DlJwQpi2SRSYEO9pOCTosuVqOpP7hDwCFYn5D1jcEDKZcOmOMuN9qDD423eXUUoCRx2YHmSS0mt03nWxZScV7Ny4U9gmv/x2jsK3' + '4YL88CPDjh/eMGc7V+LhCSqpEOFmvEz6DVAf' ) +PROVIDER_FIELDS_DATA: typing.Final[dict[str, typing.Any]] = { + 'host': 'proxmox_host', + 'port': 8666, + 'username': 'proxmox_username', + 'password': 'proxmox_passwd', + 'max_preparing_services': 31, + 'max_removing_services': 32, + 'timeout': 9999, + 'start_vmid': 99999, + 'macs_range': '52:54:01:02:03:04-52:54:05:06:07:08', +} -class ProxmoxUserInterface(UDSTestCase): - def test_provider_userinterface(self) -> None: + +class TestProxmoProvider(UDSTestCase): + def test_provider_serialization(self) -> None: provider = ProxmoxProvider(environment=Environment.get_temporary_environment()) provider.deserialize(PROVIDER_SERIALIZE_DATA) + + # Ensure values are ok + for field in PROVIDER_FIELDS_DATA: + self.assertEqual(getattr(provider, field).value, PROVIDER_FIELDS_DATA[field])