mirror of
https://github.com/dkmstr/openuds.git
synced 2025-03-20 06:50:23 +03:00
Working on clean Proxmox
This commit is contained in:
parent
ea5730c857
commit
3c7ec1067f
@ -398,7 +398,7 @@ CLIENT_METHODS_INFO: list[AutoSpecMethodInfo] = [
|
||||
AutoSpecMethodInfo(uds.services.Proxmox.proxmox.client.ProxmoxClient.restore_snapshot, returns=UPID),
|
||||
# get_task
|
||||
AutoSpecMethodInfo(
|
||||
uds.services.Proxmox.proxmox.client.ProxmoxClient.get_task,
|
||||
uds.services.Proxmox.proxmox.client.ProxmoxClient.get_task_info,
|
||||
returns=lambda *args, **kwargs: TASK_STATUS, # pyright: ignore
|
||||
),
|
||||
# list_machines
|
||||
@ -461,9 +461,9 @@ CLIENT_METHODS_INFO: list[AutoSpecMethodInfo] = [
|
||||
# list_storages
|
||||
AutoSpecMethodInfo(
|
||||
uds.services.Proxmox.proxmox.client.ProxmoxClient.list_storages,
|
||||
returns=lambda node, **kwargs: ( # pyright: ignore
|
||||
(list(filter(lambda s: s.node == node, STORAGES))) # pyright: ignore
|
||||
if node is not None
|
||||
returns=lambda **kwargs: ( # pyright: ignore[reportUnknownLambdaType]
|
||||
(list(filter(lambda s: s.node == kwargs.get('node') , STORAGES))) # pyright: ignore
|
||||
if kwargs.get('node') is not None # pyright: ignore
|
||||
else STORAGES # pyright: ignore
|
||||
),
|
||||
),
|
||||
|
@ -70,7 +70,7 @@ class TestProxmoxHelpers(UDSTransactionTestCase):
|
||||
self.assertIsInstance(choice['id'], str)
|
||||
self.assertIsInstance(choice['text'], str)
|
||||
|
||||
api.get_vm_pool_info.assert_called_once()
|
||||
api.get_vm_info.assert_called_once()
|
||||
api.list_storages.assert_called_once()
|
||||
|
||||
def test_get_machines(self) -> None:
|
||||
|
@ -42,6 +42,10 @@ from ...utils.test import UDSTransactionTestCase
|
||||
|
||||
|
||||
class TestProxmoxProvider(UDSTransactionTestCase):
|
||||
def setUp(self) -> None:
|
||||
super().setUp()
|
||||
fixtures.clear()
|
||||
|
||||
def test_provider_data(self) -> None:
|
||||
"""
|
||||
Test the provider
|
||||
@ -133,57 +137,39 @@ class TestProxmoxProvider(UDSTransactionTestCase):
|
||||
self.assertEqual(provider.test_connection(), True)
|
||||
api.test.assert_called_once_with()
|
||||
|
||||
self.assertEqual(provider.list_vms(force=True), fixtures.VMS_INFO)
|
||||
api.list_vms.assert_called_once_with(force=True)
|
||||
api.list_vms.reset_mock()
|
||||
self.assertEqual(provider.list_vms(), fixtures.VMS_INFO)
|
||||
api.list_vms.assert_called_once_with(force=False)
|
||||
self.assertEqual(provider.api.list_vms(force=True), fixtures.VMS_INFO)
|
||||
self.assertEqual(provider.api.list_vms(), fixtures.VMS_INFO)
|
||||
|
||||
self.assertEqual(provider.get_vm_info(1), fixtures.VMS_INFO[0])
|
||||
api.get_vm_pool_info.assert_called_once_with(1, None, force=True)
|
||||
self.assertEqual(provider.api.get_vm_info(1), fixtures.VMS_INFO[0])
|
||||
|
||||
self.assertEqual(provider.get_vm_config(1), fixtures.VMS_CONFIGURATION[0])
|
||||
api.get_vm_config.assert_called_once_with(1, force=True)
|
||||
self.assertEqual(provider.api.get_vm_config(1), fixtures.VMS_CONFIGURATION[0])
|
||||
|
||||
self.assertEqual(
|
||||
provider.get_storage_info(fixtures.STORAGES[2].storage, fixtures.STORAGES[2].node, force=True),
|
||||
provider.api.get_storage_info(
|
||||
fixtures.STORAGES[2].storage, fixtures.STORAGES[2].node, force=True
|
||||
),
|
||||
fixtures.STORAGES[2],
|
||||
)
|
||||
api.get_storage_info.assert_called_once_with(
|
||||
fixtures.STORAGES[2].storage, fixtures.STORAGES[2].node, force=True
|
||||
)
|
||||
|
||||
def test_provider_methods_2(self) -> None:
|
||||
"""
|
||||
Test the provider methods
|
||||
"""
|
||||
with fixtures.patched_provider() as provider:
|
||||
api = typing.cast(mock.MagicMock, provider.api)
|
||||
|
||||
self.assertEqual(
|
||||
provider.get_storage_info(fixtures.STORAGES[2].storage, fixtures.STORAGES[2].node),
|
||||
provider.api.get_storage_info(fixtures.STORAGES[2].storage, fixtures.STORAGES[2].node),
|
||||
fixtures.STORAGES[2],
|
||||
)
|
||||
api.get_storage_info.assert_called_once_with(
|
||||
fixtures.STORAGES[2].storage, fixtures.STORAGES[2].node, force=False
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
provider.list_storages(fixtures.STORAGES[2].node),
|
||||
provider.api.list_storages(node=fixtures.STORAGES[2].node),
|
||||
list(filter(lambda x: x.node == fixtures.STORAGES[2].node, fixtures.STORAGES)),
|
||||
)
|
||||
api.list_storages.assert_called_once_with(
|
||||
node=fixtures.STORAGES[2].node, content='images', force=False
|
||||
)
|
||||
api.list_storages.reset_mock()
|
||||
self.assertEqual(provider.list_storages(), fixtures.STORAGES)
|
||||
api.list_storages.assert_called_once_with(node=None, content='images', force=False)
|
||||
self.assertEqual(provider.api.list_storages(), fixtures.STORAGES)
|
||||
|
||||
self.assertEqual(provider.list_pools(force=True), fixtures.POOLS)
|
||||
api.list_pools.assert_called_once_with(force=True)
|
||||
api.list_pools.reset_mock()
|
||||
self.assertEqual(provider.list_pools(), fixtures.POOLS)
|
||||
api.list_pools.assert_called_once_with(force=False)
|
||||
self.assertEqual(provider.api.list_pools(force=True), fixtures.POOLS)
|
||||
self.assertEqual(provider.api.list_pools(), fixtures.POOLS)
|
||||
|
||||
def test_provider_methods3(self) -> None:
|
||||
"""
|
||||
@ -193,20 +179,12 @@ class TestProxmoxProvider(UDSTransactionTestCase):
|
||||
api = typing.cast(mock.MagicMock, provider.api)
|
||||
|
||||
self.assertEqual(
|
||||
provider.get_pool_info(fixtures.POOLS[2].id, retrieve_vm_names=True, force=True),
|
||||
provider.api.get_pool_info(fixtures.POOLS[2].id, retrieve_vm_names=True, force=True),
|
||||
fixtures.POOLS[2],
|
||||
)
|
||||
api.get_pool_info.assert_called_once_with(
|
||||
fixtures.POOLS[2].id, retrieve_vm_names=True, force=True
|
||||
)
|
||||
api.get_pool_info.reset_mock()
|
||||
self.assertEqual(provider.get_pool_info(fixtures.POOLS[2].id), fixtures.POOLS[2])
|
||||
api.get_pool_info.assert_called_once_with(
|
||||
fixtures.POOLS[2].id, retrieve_vm_names=False, force=False
|
||||
)
|
||||
self.assertEqual(provider.api.get_pool_info(fixtures.POOLS[2].id), fixtures.POOLS[2])
|
||||
|
||||
provider.create_template(1)
|
||||
api.convert_vm_to_template.assert_called_once_with(1)
|
||||
provider.api.convert_vm_to_template(1)
|
||||
|
||||
self.assertEqual(
|
||||
provider.clone_vm(1, 'name', 'description', True, 'node', 'storage', 'pool', True),
|
||||
@ -216,48 +194,34 @@ class TestProxmoxProvider(UDSTransactionTestCase):
|
||||
1, mock.ANY, 'name', 'description', True, 'node', 'storage', 'pool', True
|
||||
)
|
||||
|
||||
self.assertEqual(provider.start_machine(1), fixtures.UPID)
|
||||
api.start_vm.assert_called_once_with(1)
|
||||
self.assertEqual(provider.api.start_vm(1), fixtures.UPID)
|
||||
|
||||
self.assertEqual(provider.stop_machine(1), fixtures.UPID)
|
||||
api.stop_vm.assert_called_once_with(1)
|
||||
self.assertEqual(provider.api.stop_vm(1), fixtures.UPID)
|
||||
|
||||
self.assertEqual(provider.reset_machine(1), fixtures.UPID)
|
||||
api.reset_vm.assert_called_once_with(1)
|
||||
self.assertEqual(provider.api.reset_vm(1), fixtures.UPID)
|
||||
|
||||
self.assertEqual(provider.suspend_machine(1), fixtures.UPID)
|
||||
api.suspend_vm.assert_called_once_with(1)
|
||||
self.assertEqual(provider.api.suspend_vm(1), fixtures.UPID)
|
||||
|
||||
def test_provider_methods_4(self) -> None:
|
||||
"""
|
||||
Test the provider methods
|
||||
"""
|
||||
with fixtures.patched_provider() as provider:
|
||||
api = typing.cast(mock.MagicMock, provider.api)
|
||||
self.assertEqual(provider.api.shutdown_vm(1), fixtures.UPID)
|
||||
|
||||
self.assertEqual(provider.shutdown_machine(1), fixtures.UPID)
|
||||
api.shutdown_vm.assert_called_once_with(1)
|
||||
self.assertEqual(provider.api.delete_vm(1), fixtures.UPID)
|
||||
|
||||
self.assertEqual(provider.delete_vm(1), fixtures.UPID)
|
||||
api.delete_vm.assert_called_once_with(1)
|
||||
self.assertEqual(provider.api.get_task_info('node', 'upid'), fixtures.TASK_STATUS)
|
||||
|
||||
self.assertEqual(provider.get_task_info('node', 'upid'), fixtures.TASK_STATUS)
|
||||
api.get_task.assert_called_once_with('node', 'upid')
|
||||
provider.api.enable_vm_ha(1, True, 'group')
|
||||
|
||||
provider.enable_machine_ha(1, True, 'group')
|
||||
api.enable_vm_ha.assert_called_once_with(1, True, 'group')
|
||||
provider.api.set_vm_net_mac(1, 'mac')
|
||||
|
||||
provider.set_machine_mac(1, 'mac')
|
||||
api.set_vm_net_mac.assert_called_once_with(1, 'mac')
|
||||
provider.api.disable_vm_ha(1)
|
||||
|
||||
provider.disable_machine_ha(1)
|
||||
api.disable_vm_ha.assert_called_once_with(1)
|
||||
provider.api.set_vm_protection(1, node='node', protection=True)
|
||||
|
||||
provider.set_protection(1, 'node', True)
|
||||
api.set_vm_protection.assert_called_once_with(1, 'node', True)
|
||||
|
||||
self.assertEqual(provider.list_ha_groups(), fixtures.HA_GROUPS)
|
||||
api.list_ha_groups.assert_called_once_with()
|
||||
self.assertEqual(provider.api.list_ha_groups(), fixtures.HA_GROUPS)
|
||||
|
||||
def test_provider_methods_5(self) -> None:
|
||||
"""
|
||||
@ -272,7 +236,7 @@ class TestProxmoxProvider(UDSTransactionTestCase):
|
||||
for i in range(1, 128):
|
||||
self.assertEqual(provider.get_new_vmid(), vmid + i)
|
||||
|
||||
self.assertEqual(provider.get_guest_ip_address(1), fixtures.GUEST_IP_ADDRESS)
|
||||
self.assertEqual(provider.api.get_guest_ip_address(1), fixtures.GUEST_IP_ADDRESS)
|
||||
|
||||
self.assertEqual(provider.api.supports_snapshot(1), True)
|
||||
|
||||
|
@ -110,13 +110,13 @@ class TestProxmovLinkedService(UDSTestCase):
|
||||
|
||||
# Get machine info
|
||||
self.assertEqual(service.get_vm_info(1), fixtures.VMS_INFO[0])
|
||||
api.get_vm_pool_info.assert_called_with(1, service.pool.value, force=True)
|
||||
api.get_vm_pool_info.assert_called_with(1, service.pool.value)
|
||||
|
||||
# Get nic mac
|
||||
self.assertEqual(service.get_nic_mac(1), '00:01:02:03:04:05')
|
||||
|
||||
# remove machine, but this is from provider
|
||||
self.assertEqual(service.provider().delete_vm(1), fixtures.UPID)
|
||||
self.assertEqual(service.provider().api.delete_vm(1), fixtures.UPID)
|
||||
|
||||
# Enable HA
|
||||
service.enable_vm_ha(1, True)
|
||||
|
@ -87,13 +87,13 @@ class TestProxmoxLinkedUserService(UDSTransactionTestCase):
|
||||
None,
|
||||
)
|
||||
|
||||
# api.get_task should have been invoked at least once
|
||||
self.assertTrue(api.get_task.called)
|
||||
# api.get_task_info should have been invoked at least once
|
||||
self.assertTrue(api.get_task_info.called)
|
||||
|
||||
api.enable_vm_ha.assert_called()
|
||||
|
||||
api.set_vm_net_mac.assert_called_with(vmid, userservice._mac)
|
||||
api.get_vm_pool_info.assert_called_with(vmid, service.pool.value, force=True)
|
||||
api.get_vm_pool_info.assert_called_with(vmid, service.pool.value)
|
||||
api.start_vm.assert_called_with(vmid)
|
||||
|
||||
def test_userservice_linked_cache_l2_no_ha(self) -> None:
|
||||
@ -139,14 +139,14 @@ class TestProxmoxLinkedUserService(UDSTransactionTestCase):
|
||||
None,
|
||||
)
|
||||
|
||||
# api.get_task should have been invoked at least once
|
||||
self.assertTrue(api.get_task.called)
|
||||
# api.get_task_info should have been invoked at least once
|
||||
self.assertTrue(api.get_task_info.called)
|
||||
|
||||
# Shoud not have been called since HA is disabled
|
||||
api.enable_vm_ha.assert_not_called()
|
||||
|
||||
api.set_vm_net_mac.assert_called_with(vmid, userservice._mac)
|
||||
api.get_vm_pool_info.assert_called_with(vmid, service.pool.value, force=True)
|
||||
api.get_vm_pool_info.assert_called_with(vmid, service.pool.value)
|
||||
# Now, start should have been called
|
||||
api.start_vm.assert_called_with(vmid)
|
||||
# Stop machine should have been called
|
||||
@ -194,13 +194,13 @@ class TestProxmoxLinkedUserService(UDSTransactionTestCase):
|
||||
None,
|
||||
)
|
||||
|
||||
# api.get_task should have been invoked at least once
|
||||
self.assertTrue(api.get_task.called)
|
||||
# api.get_task_info should have been invoked at least once
|
||||
self.assertTrue(api.get_task_info.called)
|
||||
|
||||
api.enable_vm_ha.assert_called()
|
||||
|
||||
api.set_vm_net_mac.assert_called_with(vmid, userservice._mac)
|
||||
api.get_vm_pool_info.assert_called_with(vmid, service.pool.value, force=True)
|
||||
api.get_vm_pool_info.assert_called_with(vmid, service.pool.value)
|
||||
api.start_vm.assert_called_with(vmid)
|
||||
|
||||
# Ensure vm is stopped, because deployment should have started it (as api.start_machine was called)
|
||||
|
@ -96,7 +96,7 @@ class ProxmoxUserServiceFixed(FixedUserService, autoserializable.AutoSerializabl
|
||||
"""
|
||||
if self._vmid != '':
|
||||
try:
|
||||
self.service().provider().reset_machine(int(self._vmid))
|
||||
self.service().provider().api.reset_vm(int(self._vmid))
|
||||
except Exception: # nosec: if cannot reset, ignore it
|
||||
pass # If could not reset, ignore it...
|
||||
|
||||
@ -112,7 +112,7 @@ class ProxmoxUserServiceFixed(FixedUserService, autoserializable.AutoSerializabl
|
||||
raise Exception('Machine not found on start machine') from e
|
||||
|
||||
if vminfo.status == 'stopped':
|
||||
self._store_task(self.service().provider().start_machine(int(self._vmid)))
|
||||
self._store_task(self.service().provider().api.start_vm(int(self._vmid)))
|
||||
|
||||
# Check methods
|
||||
def _check_task_finished(self) -> types.states.TaskState:
|
||||
@ -122,7 +122,7 @@ class ProxmoxUserServiceFixed(FixedUserService, autoserializable.AutoSerializabl
|
||||
node, upid = self._retrieve_task()
|
||||
|
||||
try:
|
||||
task = self.service().provider().get_task_info(node, upid)
|
||||
task = self.service().provider().api.get_task_info(node, upid)
|
||||
except prox_exceptions.ProxmoxConnectionError:
|
||||
return types.states.TaskState.RUNNING # Try again later
|
||||
|
||||
|
@ -132,7 +132,7 @@ class ProxmoxUserserviceLinked(DynamicUserService):
|
||||
node, upid = self._retrieve_task()
|
||||
|
||||
try:
|
||||
task = self.service().provider().get_task_info(node, upid)
|
||||
task = self.service().provider().api.get_task_info(node, upid)
|
||||
except uds.services.Proxmox.proxmox.exceptions.ProxmoxConnectionError:
|
||||
return types.states.TaskState.RUNNING # Try again later
|
||||
|
||||
@ -177,7 +177,7 @@ class ProxmoxUserserviceLinked(DynamicUserService):
|
||||
|
||||
def op_reset(self) -> None:
|
||||
if self._vmid:
|
||||
self.service().provider().reset_machine(int(self._vmid))
|
||||
self.service().provider().api.reset_vm(int(self._vmid))
|
||||
|
||||
# No need for op_reset_checker
|
||||
|
||||
@ -204,7 +204,7 @@ class ProxmoxUserserviceLinked(DynamicUserService):
|
||||
self.service().enable_vm_ha(int(self._vmid), True) # Enable HA before continuing here
|
||||
|
||||
# Set vm mac address now on first interface
|
||||
self.service().provider().set_machine_mac(int(self._vmid), self.get_unique_id())
|
||||
self.service().provider().api.set_vm_net_mac(int(self._vmid), self.get_unique_id())
|
||||
except uds.services.Proxmox.proxmox.exceptions.ProxmoxConnectionError:
|
||||
self.retry_later() # Push nop to front of queue, so it is consumed instead of this one
|
||||
return
|
||||
|
@ -52,13 +52,13 @@ def get_storage(parameters: typing.Any) -> types.ui.CallbackResultType:
|
||||
|
||||
# Obtains machine info, to obtain the node and get the storages
|
||||
try:
|
||||
vm_info = provider.get_vm_info(int(parameters['machine']))
|
||||
vm_info = provider.api.get_vm_info(int(parameters['machine']))
|
||||
except Exception:
|
||||
return []
|
||||
|
||||
res: list[types.ui.ChoiceItem] = []
|
||||
# Get storages for that datacenter
|
||||
for storage in sorted(provider.list_storages(vm_info.node), key=lambda x: int(not x.shared)):
|
||||
for storage in sorted(provider.api.list_storages(node=vm_info.node), key=lambda x: int(not x.shared)):
|
||||
if storage.type in ('lvm', 'iscsi', 'iscsidirect'):
|
||||
continue
|
||||
space, free = (
|
||||
@ -82,7 +82,7 @@ def get_machines(parameters: typing.Any) -> types.ui.CallbackResultType:
|
||||
|
||||
# Obtains datacenter from cluster
|
||||
try:
|
||||
pool_info = provider.get_pool_info(parameters['pool'], retrieve_vm_names=True)
|
||||
pool_info = provider.api.get_pool_info(parameters['pool'], retrieve_vm_names=True)
|
||||
except Exception:
|
||||
return []
|
||||
|
||||
|
@ -104,12 +104,12 @@ class ProxmoxDeferredRemoval(jobs.Job):
|
||||
|
||||
@staticmethod
|
||||
def waitForTaskFinish(
|
||||
providerInstance: 'provider.ProxmoxProvider',
|
||||
provider_instance: 'provider.ProxmoxProvider',
|
||||
upid: 'prox_types.UPID',
|
||||
maxWait: int = 30, # 30 * 0.3 = 9 seconds
|
||||
) -> bool:
|
||||
counter = 0
|
||||
while providerInstance.get_task_info(upid.node, upid.upid).is_running() and counter < maxWait:
|
||||
while provider_instance.api.get_task_info(upid.node, upid.upid).is_running() and counter < maxWait:
|
||||
time.sleep(0.3)
|
||||
counter += 1
|
||||
|
||||
@ -134,16 +134,16 @@ class ProxmoxDeferredRemoval(jobs.Job):
|
||||
# The soft shutdown has already being initiated by the remove method
|
||||
|
||||
try:
|
||||
vmInfo = instance.get_vm_info(vmid)
|
||||
vm_info = instance.api.get_vm_info(vmid)
|
||||
logger.debug('Found %s for removal %s', vmid, data)
|
||||
# If machine is powered on, tries to stop it
|
||||
# tries to remove in sync mode
|
||||
if vmInfo.status == 'running':
|
||||
ProxmoxDeferredRemoval.waitForTaskFinish(instance, instance.stop_machine(vmid))
|
||||
if vm_info.status.is_running():
|
||||
ProxmoxDeferredRemoval.waitForTaskFinish(instance, instance.api.stop_vm(vmid))
|
||||
return
|
||||
|
||||
if vmInfo.status == 'stopped': # Machine exists, try to remove it now
|
||||
ProxmoxDeferredRemoval.waitForTaskFinish(instance, instance.delete_vm(vmid))
|
||||
if not vm_info.status.is_running(): # Machine exists, try to remove it now
|
||||
ProxmoxDeferredRemoval.waitForTaskFinish(instance, instance.api.delete_vm(vmid))
|
||||
|
||||
# It this is reached, remove check
|
||||
storage.remove('tr' + str(vmid))
|
||||
|
@ -167,34 +167,6 @@ class ProxmoxProvider(services.ServiceProvider):
|
||||
|
||||
return self.api.test()
|
||||
|
||||
def list_vms(self, force: bool = False) -> list[prox_types.VMInfo]:
|
||||
return self.api.list_vms(force=force)
|
||||
|
||||
def get_vm_info(self, vmid: int, poolid: typing.Optional[str] = None) -> prox_types.VMInfo:
|
||||
return self.api.get_vm_pool_info(vmid, poolid, force=True)
|
||||
|
||||
def get_vm_config(self, vmid: int) -> prox_types.VMConfiguration:
|
||||
return self.api.get_vm_config(vmid, force=True)
|
||||
|
||||
def get_storage_info(self, storageid: str, node: str, force: bool = False) -> prox_types.StorageInfo:
|
||||
return self.api.get_storage_info(storageid, node, force=force)
|
||||
|
||||
def list_storages(
|
||||
self, node: typing.Optional[str] = None, force: bool = False
|
||||
) -> list[prox_types.StorageInfo]:
|
||||
return self.api.list_storages(node=node, content='images', force=force)
|
||||
|
||||
def list_pools(self, force: bool = False) -> list[prox_types.PoolInfo]:
|
||||
return self.api.list_pools(force=force)
|
||||
|
||||
def get_pool_info(
|
||||
self, pool_id: str, retrieve_vm_names: bool = False, force: bool = False
|
||||
) -> prox_types.PoolInfo:
|
||||
return self.api.get_pool_info(pool_id, retrieve_vm_names=retrieve_vm_names, force=force)
|
||||
|
||||
def create_template(self, vmid: int) -> None:
|
||||
self.api.convert_vm_to_template(vmid)
|
||||
|
||||
def clone_vm(
|
||||
self,
|
||||
vmid: int,
|
||||
@ -218,42 +190,6 @@ class ProxmoxProvider(services.ServiceProvider):
|
||||
must_have_vgpus,
|
||||
)
|
||||
|
||||
def start_machine(self, vmid: int) -> prox_types.UPID:
|
||||
return self.api.start_vm(vmid)
|
||||
|
||||
def stop_machine(self, vmid: int) -> prox_types.UPID:
|
||||
return self.api.stop_vm(vmid)
|
||||
|
||||
def reset_machine(self, vmid: int) -> prox_types.UPID:
|
||||
return self.api.reset_vm(vmid)
|
||||
|
||||
def suspend_machine(self, vmId: int) -> prox_types.UPID:
|
||||
return self.api.suspend_vm(vmId)
|
||||
|
||||
def shutdown_machine(self, vmid: int) -> prox_types.UPID:
|
||||
return self.api.shutdown_vm(vmid)
|
||||
|
||||
def delete_vm(self, vmid: int) -> prox_types.UPID:
|
||||
return self.api.delete_vm(vmid)
|
||||
|
||||
def get_task_info(self, node: str, upid: str) -> prox_types.TaskStatus:
|
||||
return self.api.get_task(node, upid)
|
||||
|
||||
def enable_machine_ha(self, vmid: int, started: bool = False, group: typing.Optional[str] = None) -> None:
|
||||
self.api.enable_vm_ha(vmid, started, group)
|
||||
|
||||
def set_machine_mac(self, vmid: int, macAddress: str) -> None:
|
||||
self.api.set_vm_net_mac(vmid, macAddress)
|
||||
|
||||
def disable_machine_ha(self, vmid: int) -> None:
|
||||
self.api.disable_vm_ha(vmid)
|
||||
|
||||
def set_protection(self, vmid: int, node: typing.Optional[str] = None, protection: bool = False) -> None:
|
||||
self.api.set_vm_protection(vmid, node, protection)
|
||||
|
||||
def list_ha_groups(self) -> list[str]:
|
||||
return self.api.list_ha_groups()
|
||||
|
||||
def get_new_vmid(self) -> int:
|
||||
MAX_RETRIES: typing.Final[int] = 512 # So we don't loop forever, just in case...
|
||||
vmid = 0
|
||||
@ -266,9 +202,6 @@ class ProxmoxProvider(services.ServiceProvider):
|
||||
# if it exists when we try to create a new one, we will simply try to get another one
|
||||
raise prox_exceptions.ProxmoxError(f'Could not get a new vmid!!: last tried {vmid}')
|
||||
|
||||
def get_guest_ip_address(self, vmid: int, node: typing.Optional[str] = None, ip_version: typing.Literal['4', '6', ''] = '') -> str:
|
||||
return self.api.get_guest_ip_address(vmid, node, ip_version)
|
||||
|
||||
@cached('reachable', consts.cache.SHORT_CACHE_TIMEOUT, key_helper=cache_key_helper)
|
||||
def is_available(self) -> bool:
|
||||
return self.api.test()
|
||||
|
@ -427,7 +427,7 @@ class ProxmoxClient:
|
||||
except Exception:
|
||||
logger.exception('removeFromHA')
|
||||
|
||||
def set_vm_protection(self, vmid: int, node: typing.Optional[str] = None, protection: bool = False) -> None:
|
||||
def set_vm_protection(self, vmid: int, *, node: typing.Optional[str] = None, protection: bool = False) -> None:
|
||||
params: list[tuple[str, str]] = [
|
||||
('protection', str(int(protection))),
|
||||
]
|
||||
@ -435,7 +435,7 @@ class ProxmoxClient:
|
||||
self.do_post(f'nodes/{node}/qemu/{vmid}/config', data=params, node=node)
|
||||
|
||||
def get_guest_ip_address(
|
||||
self, vmid: int, node: typing.Optional[str], ip_version: typing.Literal['4', '6', ''] = ''
|
||||
self, vmid: int, *, node: typing.Optional[str] = None, ip_version: typing.Literal['4', '6', ''] = ''
|
||||
) -> str:
|
||||
"""Returns the guest ip address of the specified machine"""
|
||||
try:
|
||||
@ -539,7 +539,7 @@ class ProxmoxClient:
|
||||
self.do_post(f'nodes/{node}/qemu/{vmid}/snapshot/{name}/rollback', node=node)
|
||||
)
|
||||
|
||||
def get_task(self, node: str, upid: str) -> types.TaskStatus:
|
||||
def get_task_info(self, node: str, upid: str) -> types.TaskStatus:
|
||||
return types.TaskStatus.from_dict(
|
||||
self.do_get(f'nodes/{node}/tasks/{urllib.parse.quote(upid)}/status', node=node)
|
||||
)
|
||||
@ -564,11 +564,11 @@ class ProxmoxClient:
|
||||
|
||||
return sorted(result, key=lambda x: '{}{}'.format(x.node, x.name))
|
||||
|
||||
@cached('vmip', consts.CACHE_INFO_DURATION, key_helper=caching_key_helper)
|
||||
def get_vm_pool_info(self, vmid: int, poolid: typing.Optional[str], **kwargs: typing.Any) -> types.VMInfo:
|
||||
# try to locate machine in pool
|
||||
node = None
|
||||
if poolid:
|
||||
# If for an specific pool, try to locate the node where the machine is
|
||||
try:
|
||||
for i in self.do_get(f'pools/{poolid}', node=node)['data']['members']:
|
||||
try:
|
||||
@ -582,7 +582,6 @@ class ProxmoxClient:
|
||||
|
||||
return self.get_vm_info(vmid, node, **kwargs)
|
||||
|
||||
@cached('vmin', consts.CACHE_INFO_DURATION, key_helper=caching_key_helper)
|
||||
def get_vm_info(self, vmid: int, node: typing.Optional[str] = None, **kwargs: typing.Any) -> types.VMInfo:
|
||||
nodes = [types.Node(node, False, False, 0, '', '', '')] if node else self.get_cluster_info().nodes
|
||||
any_node_is_down = False
|
||||
@ -604,7 +603,7 @@ class ProxmoxClient:
|
||||
raise exceptions.ProxmoxNotFound(f'VM {vmid} not found')
|
||||
|
||||
def get_vm_config(
|
||||
self, vmid: int, node: typing.Optional[str] = None, **kwargs: typing.Any
|
||||
self, vmid: int, node: typing.Optional[str] = None
|
||||
) -> types.VMConfiguration:
|
||||
node = node or self.get_vm_info(vmid).node
|
||||
return types.VMConfiguration.from_dict(
|
||||
@ -683,6 +682,7 @@ class ProxmoxClient:
|
||||
@cached('storages', consts.CACHE_DURATION, key_helper=caching_key_helper)
|
||||
def list_storages(
|
||||
self,
|
||||
*,
|
||||
node: typing.Union[None, str, collections.abc.Iterable[str]] = None,
|
||||
content: typing.Optional[str] = None,
|
||||
**kwargs: typing.Any,
|
||||
|
@ -38,5 +38,6 @@ import typing
|
||||
|
||||
CACHE_DURATION: typing.Final[int] = consts.cache.DEFAULT_CACHE_TIMEOUT
|
||||
CACHE_INFO_DURATION: typing.Final[int] = consts.cache.SHORT_CACHE_TIMEOUT
|
||||
CACHE_VM_INFO_DURATION: typing.Final[int] = consts.cache.SHORTEST_CACHE_TIMEOUT
|
||||
# Cache duration is 3 minutes, so this is 60 mins * 24 = 1 day (default)
|
||||
CACHE_DURATION_LONG: typing.Final[int] = consts.cache.EXTREME_CACHE_TIMEOUT
|
@ -104,7 +104,7 @@ class ProxmoxPublication(DynamicPublication, autoserializable.AutoSerializable):
|
||||
|
||||
def op_create_checker(self) -> types.states.TaskState:
|
||||
node, upid = self._task.split(',')
|
||||
task = self.service().provider().get_task_info(node, upid)
|
||||
task = self.service().provider().api.get_task_info(node, upid)
|
||||
if task.is_running():
|
||||
return types.states.TaskState.RUNNING
|
||||
|
||||
@ -115,14 +115,14 @@ class ProxmoxPublication(DynamicPublication, autoserializable.AutoSerializable):
|
||||
|
||||
def op_create_completed(self) -> None:
|
||||
# Complete the creation, disabling ha protection and adding to HA and marking as template
|
||||
self.service().provider().set_protection(int(self._vmid), protection=False)
|
||||
self.service().provider().api.set_vm_protection(int(self._vmid), protection=False)
|
||||
time.sleep(0.5) # Give some tome to proxmox. We have observed some concurrency issues
|
||||
# And add it to HA if needed (decided by service configuration)
|
||||
self.service().enable_vm_ha(int(self._vmid))
|
||||
# Wait a bit, if too fast, proxmox fails.. (Have not tested on 8.x, but previous versions failed if too fast..)
|
||||
time.sleep(0.5)
|
||||
# Mark vm as template
|
||||
self.service().provider().create_template(int(self._vmid))
|
||||
self.service().provider().api.convert_vm_to_template(int(self._vmid))
|
||||
|
||||
def op_delete(self) -> None:
|
||||
self.service().delete(self, self._vmid)
|
||||
|
@ -107,7 +107,7 @@ class ProxmoxServiceFixed(FixedService): # pylint: disable=too-many-public-meth
|
||||
|
||||
self.pool.set_choices(
|
||||
[gui.choice_item('', _('None'))]
|
||||
+ [gui.choice_item(p.id, p.id) for p in self.provider().list_pools()]
|
||||
+ [gui.choice_item(p.id, p.id) for p in self.provider().api.list_pools()]
|
||||
)
|
||||
|
||||
def provider(self) -> 'ProxmoxProvider':
|
||||
@ -127,7 +127,7 @@ class ProxmoxServiceFixed(FixedService): # pylint: disable=too-many-public-meth
|
||||
# Only machines that already exists on proxmox and are not already assigned
|
||||
vms: dict[int, str] = {}
|
||||
|
||||
for member in self.provider().get_pool_info(self.pool.value.strip(), retrieve_vm_names=True).members:
|
||||
for member in self.provider().api.get_pool_info(self.pool.value.strip(), retrieve_vm_names=True).members:
|
||||
vms[member.vmid] = member.vmname
|
||||
|
||||
with self._assigned_access() as assigned_vms:
|
||||
@ -188,7 +188,7 @@ class ProxmoxServiceFixed(FixedService): # pylint: disable=too-many-public-meth
|
||||
if checking_vmid not in assigned_vms: # Not already assigned
|
||||
try:
|
||||
# Invoke to check it exists, do not need to store the result
|
||||
self.provider().get_vm_info(int(checking_vmid), self.pool.value.strip())
|
||||
self.provider().api.get_vm_pool_info(int(checking_vmid), self.pool.value.strip())
|
||||
found_vmid = checking_vmid
|
||||
break
|
||||
except Exception: # Notifies on log, but skipt it
|
||||
@ -212,11 +212,11 @@ class ProxmoxServiceFixed(FixedService): # pylint: disable=too-many-public-meth
|
||||
return str(found_vmid)
|
||||
|
||||
def get_mac(self, vmid: str) -> str:
|
||||
config = self.provider().get_vm_config(int(vmid))
|
||||
config = self.provider().api.get_vm_config(int(vmid))
|
||||
return config.networks[0].mac.lower()
|
||||
|
||||
def get_ip(self, vmid: str) -> str:
|
||||
return self.provider().get_guest_ip_address(int(vmid))
|
||||
return self.provider().api.get_guest_ip_address(int(vmid))
|
||||
|
||||
def get_name(self, vmid: str) -> str:
|
||||
return self.provider().get_vm_info(int(vmid)).name or ''
|
||||
return self.provider().api.get_vm_info(int(vmid)).name or ''
|
||||
|
@ -185,17 +185,17 @@ class ProxmoxServiceLinked(DynamicService):
|
||||
self.machine.set_choices(
|
||||
[
|
||||
gui.choice_item(str(m.id), f'{m.node}\\{m.name or m.id} ({m.id})')
|
||||
for m in self.provider().list_vms()
|
||||
for m in self.provider().api.list_vms()
|
||||
if m.name and m.name[:3] != 'UDS'
|
||||
]
|
||||
)
|
||||
self.pool.set_choices(
|
||||
[gui.choice_item('', _('None'))]
|
||||
+ [gui.choice_item(p.id, p.id) for p in self.provider().list_pools()]
|
||||
+ [gui.choice_item(p.id, p.id) for p in self.provider().api.list_pools()]
|
||||
)
|
||||
self.ha.set_choices(
|
||||
[gui.choice_item('', _('Enabled')), gui.choice_item('__', _('Disabled'))]
|
||||
+ [gui.choice_item(group, group) for group in self.provider().list_ha_groups()]
|
||||
+ [gui.choice_item(group, group) for group in self.provider().api.list_ha_groups()]
|
||||
)
|
||||
|
||||
def provider(self) -> 'ProxmoxProvider':
|
||||
@ -231,10 +231,10 @@ class ProxmoxServiceLinked(DynamicService):
|
||||
)
|
||||
|
||||
def get_vm_info(self, vmid: int) -> 'prox_types.VMInfo':
|
||||
return self.provider().get_vm_info(vmid, self.pool.value.strip())
|
||||
return self.provider().api.get_vm_pool_info(vmid, self.pool.value.strip())
|
||||
|
||||
def get_nic_mac(self, vmid: int) -> str:
|
||||
config = self.provider().get_vm_config(vmid)
|
||||
config = self.provider().api.get_vm_config(vmid)
|
||||
return config.networks[0].mac.lower()
|
||||
|
||||
# TODO: Remove this method, kept for reference of old code
|
||||
@ -247,17 +247,17 @@ class ProxmoxServiceLinked(DynamicService):
|
||||
self.do_log(level=types.log.LogLevel.WARNING, message=f'Exception disabling HA for vm {vmid}: {e}')
|
||||
|
||||
# And remove it
|
||||
return self.provider().delete_vm(vmid)
|
||||
return self.provider().api.delete_vm(vmid)
|
||||
|
||||
def enable_vm_ha(self, vmid: int, started: bool = False) -> None:
|
||||
if self.ha.value == '__':
|
||||
return
|
||||
self.provider().enable_machine_ha(vmid, started, self.ha.value or None)
|
||||
self.provider().api.enable_vm_ha(vmid, started, self.ha.value or None)
|
||||
|
||||
def disable_vm_ha(self, vmid: int) -> None:
|
||||
if self.ha.value == '__':
|
||||
return
|
||||
self.provider().disable_machine_ha(vmid)
|
||||
self.provider().api.disable_vm_ha(vmid)
|
||||
|
||||
def get_macs_range(self) -> str:
|
||||
"""
|
||||
@ -275,7 +275,7 @@ class ProxmoxServiceLinked(DynamicService):
|
||||
return self.provider().is_available()
|
||||
|
||||
def get_ip(self, caller_instance: typing.Optional['DynamicUserService | DynamicPublication'], vmid: str) -> str:
|
||||
return self.provider().get_guest_ip_address(int(vmid))
|
||||
return self.provider().api.get_guest_ip_address(int(vmid))
|
||||
|
||||
def get_mac(self, caller_instance: typing.Optional['DynamicUserService | DynamicPublication'], vmid: str) -> str:
|
||||
# If vmid is empty, we are requesting a new mac
|
||||
@ -288,14 +288,14 @@ class ProxmoxServiceLinked(DynamicService):
|
||||
if self.is_running(caller_instance, vmid): # If running, skip
|
||||
caller_instance._task = ''
|
||||
else:
|
||||
caller_instance._store_task(self.provider().start_machine(int(vmid)))
|
||||
caller_instance._store_task(self.provider().api.start_vm(int(vmid)))
|
||||
else:
|
||||
raise Exception('Invalid caller instance (publication) for start_machine()')
|
||||
|
||||
def stop(self, caller_instance: typing.Optional['DynamicUserService | DynamicPublication'], vmid: str) -> None:
|
||||
if isinstance(caller_instance, ProxmoxUserserviceLinked):
|
||||
if self.is_running(caller_instance, vmid):
|
||||
caller_instance._store_task(self.provider().stop_machine(int(vmid)))
|
||||
caller_instance._store_task(self.provider().api.stop_vm(int(vmid)))
|
||||
else:
|
||||
caller_instance._task = ''
|
||||
else:
|
||||
@ -304,7 +304,7 @@ class ProxmoxServiceLinked(DynamicService):
|
||||
def shutdown(self, caller_instance: typing.Optional['DynamicUserService | DynamicPublication'], vmid: str) -> None:
|
||||
if isinstance(caller_instance, ProxmoxUserserviceLinked):
|
||||
if self.is_running(caller_instance, vmid):
|
||||
caller_instance._store_task(self.provider().shutdown_machine(int(vmid)))
|
||||
caller_instance._store_task(self.provider().api.shutdown_vm(int(vmid)))
|
||||
else:
|
||||
caller_instance._task = ''
|
||||
else:
|
||||
@ -319,4 +319,4 @@ class ProxmoxServiceLinked(DynamicService):
|
||||
def execute_delete(self, vmid: str) -> None:
|
||||
# All removals are deferred, so we can do it async
|
||||
# Try to stop it if already running... Hard stop
|
||||
self.provider().delete_vm(int(vmid))
|
||||
self.provider().api.delete_vm(int(vmid))
|
||||
|
Loading…
x
Reference in New Issue
Block a user