mirror of
https://github.com/dkmstr/openuds.git
synced 2025-03-11 00:58:39 +03:00
Final refactoring, i hope :)
This commit is contained in:
parent
83521cfd7a
commit
9641776ce1
@ -206,7 +206,7 @@ class Tickets(Handler):
|
||||
time = 60
|
||||
realname: str = self.get_param('realname', 'username') or ''
|
||||
|
||||
pool_uuid = self.get_param('servicePool')
|
||||
pool_uuid = self.get_param('servicepool', 'servicePool')
|
||||
if pool_uuid:
|
||||
# Check if is pool or metapool
|
||||
pool_uuid = process_uuid(pool_uuid)
|
||||
|
@ -95,8 +95,8 @@ class TunnelTicket(Handler):
|
||||
# Ensures extra exists...
|
||||
extra = extra or {}
|
||||
now = sql_stamp_seconds()
|
||||
totalTime = now - extra.get('b', now - 1)
|
||||
msg = f'User {user.name} stopped tunnel {extra.get("t", "")[:8]}... to {host}:{port}: u:{sent}/d:{recv}/t:{totalTime}.'
|
||||
total_time = now - extra.get('b', now - 1)
|
||||
msg = f'User {user.name} stopped tunnel {extra.get("t", "")[:8]}... to {host}:{port}: u:{sent}/d:{recv}/t:{total_time}.'
|
||||
log.log(user.manager, types.log.LogLevel.INFO, msg)
|
||||
log.log(user_service, types.log.LogLevel.INFO, msg)
|
||||
|
||||
@ -106,7 +106,7 @@ class TunnelTicket(Handler):
|
||||
events.add_event(
|
||||
user_service.deployed_service,
|
||||
events.types.stats.EventType.TUNNEL_CLOSE,
|
||||
duration=totalTime,
|
||||
duration=total_time,
|
||||
sent=sent,
|
||||
received=recv,
|
||||
tunnel=extra.get('t', 'unknown'),
|
||||
|
@ -283,8 +283,8 @@ class CachedService(AssignedService):
|
||||
.all()
|
||||
.prefetch_related('deployed_service', 'publication')
|
||||
]
|
||||
cachedService: models.UserService = parent.cached_users_services().get(uuid=process_uuid(item))
|
||||
return AssignedService.item_as_dict(cachedService, is_cache=True)
|
||||
cached_userservice: models.UserService = parent.cached_users_services().get(uuid=process_uuid(item))
|
||||
return AssignedService.item_as_dict(cached_userservice, is_cache=True)
|
||||
except Exception as e:
|
||||
logger.exception('get_items')
|
||||
raise self.invalid_item_response() from e
|
||||
@ -476,7 +476,7 @@ class Publications(DetailHandler):
|
||||
:param parent: Parent service pool
|
||||
"""
|
||||
parent = ensure.is_instance(parent, models.ServicePool)
|
||||
changeLog = self._params['changelog'] if 'changelog' in self._params else None
|
||||
change_log = self._params['changelog'] if 'changelog' in self._params else None
|
||||
|
||||
if (
|
||||
permissions.has_access(self._user, parent, uds.core.types.permissions.PermissionType.MANAGEMENT)
|
||||
@ -486,7 +486,7 @@ class Publications(DetailHandler):
|
||||
raise self.access_denied_response()
|
||||
|
||||
logger.debug('Custom "publish" invoked for %s', parent)
|
||||
parent.publish(changeLog) # Can raise exceptions that will be processed on response
|
||||
parent.publish(change_log) # Can raise exceptions that will be processed on response
|
||||
|
||||
log.log(
|
||||
parent,
|
||||
|
@ -368,11 +368,11 @@ class UserService(Environmentable, Serializable, abc.ABC):
|
||||
|
||||
Things to take care with this method are:
|
||||
|
||||
* cacheLevel can be L1 or L2 (class constants)
|
||||
* level can be L1 or L2 (class constants)
|
||||
* If a deploy for cache is asked for a L1 cache, the generated
|
||||
element is expected to be all-done for user consume. L1 cache items
|
||||
will get directly assigned to users whenever needed, and are expected
|
||||
to be fast. (You always have setReady method to do anything else needed
|
||||
to be fast. (You always have set_ready method to do anything else needed
|
||||
to assign the cache element to an user, but generally L1 cached items
|
||||
must be ready to use.
|
||||
* An L2 cache is expected to be an cached element that is "almost ready".
|
||||
|
@ -219,7 +219,7 @@ class UserService(UUIDModel, properties.PropertiesMixin):
|
||||
# The publication to which this item points to, does not exists
|
||||
self.publication = None
|
||||
logger.exception(
|
||||
'Got exception at get_instance of an userService %s (seems that publication does not exists!)',
|
||||
'Got exception at get_instance of an userservice %s (seems that publication does not exists!)',
|
||||
self,
|
||||
)
|
||||
if service_instance.user_service_type is None:
|
||||
|
@ -109,7 +109,7 @@ class LinuxOsManager(osmanagers.OSManager):
|
||||
def handle_unused(self, userservice: 'UserService') -> None:
|
||||
"""
|
||||
This will be invoked for every assigned and unused user service that has been in this state at least 1/2 of Globalconfig.CHECK_UNUSED_TIME
|
||||
This function can update userService values. Normal operation will be remove machines if this state is not valid
|
||||
This function can update userservice values. Normal operation will be remove machines if this state is not valid
|
||||
"""
|
||||
if self.is_removable_on_logout(userservice):
|
||||
log.log(
|
||||
|
@ -113,7 +113,7 @@ class TestOSManager(osmanagers.OSManager):
|
||||
def handle_unused(self, userservice: 'UserService') -> None:
|
||||
"""
|
||||
This will be invoked for every assigned and unused user service that has been in this state at least 1/2 of Globalconfig.CHECK_UNUSED_TIME
|
||||
This function can update userService values. Normal operation will be remove machines if this state is not valid
|
||||
This function can update userservice values. Normal operation will be remove machines if this state is not valid
|
||||
"""
|
||||
if self.is_removable_on_logout(userservice):
|
||||
log.log(
|
||||
|
@ -72,7 +72,7 @@ def _key_helper(obj: 'Client') -> str:
|
||||
|
||||
class Client:
|
||||
"""
|
||||
Module to manage oVirt connections using ovirtsdk.
|
||||
Module to manage OVirt connections using ovirtsdk.
|
||||
|
||||
Due to the fact that we can't create two proxy connections at same time, we serialize all access to ovirt platform.
|
||||
Only one request and one live connection can exists at a time.
|
||||
@ -391,7 +391,7 @@ class Client:
|
||||
# Create initally the machine without usb support, will be added later
|
||||
usb = ovirtsdk4.types.Usb(enabled=False)
|
||||
|
||||
memoryPolicy = ovirtsdk4.types.MemoryPolicy(guaranteed=guaranteed_mb * 1024 * 1024)
|
||||
memory_policy = ovirtsdk4.types.MemoryPolicy(guaranteed=guaranteed_mb * 1024 * 1024)
|
||||
par = ovirtsdk4.types.Vm(
|
||||
name=name,
|
||||
cluster=cluster,
|
||||
@ -399,7 +399,7 @@ class Client:
|
||||
description=comments,
|
||||
type=ovirtsdk4.types.VmType.DESKTOP,
|
||||
memory=memory_mb * 1024 * 1024,
|
||||
memory_policy=memoryPolicy,
|
||||
memory_policy=memory_policy,
|
||||
usb=usb,
|
||||
) # display=display,
|
||||
|
||||
@ -543,12 +543,12 @@ class Client:
|
||||
Returns:
|
||||
"""
|
||||
with _access_lock():
|
||||
vmService: typing.Any = self.api.system_service().vms_service().service(vmid)
|
||||
vm_service: typing.Any = self.api.system_service().vms_service().service(vmid)
|
||||
|
||||
if vmService.get() is None:
|
||||
if vm_service.get() is None:
|
||||
raise Exception('Machine not found')
|
||||
|
||||
vmService.suspend()
|
||||
vm_service.suspend()
|
||||
|
||||
def remove_machine(self, vmid: str) -> None:
|
||||
"""
|
||||
@ -580,8 +580,8 @@ class Client:
|
||||
|
||||
nic = vm_service.nics_service().list()[0] # If has no nic, will raise an exception (IndexError)
|
||||
nic.mac.address = mac
|
||||
nicService = vm_service.nics_service().service(nic.id)
|
||||
nicService.update(nic)
|
||||
nic_service = vm_service.nics_service().service(nic.id)
|
||||
nic_service.update(nic)
|
||||
except IndexError:
|
||||
raise Exception('Machine do not have network interfaces!!')
|
||||
|
||||
|
@ -204,7 +204,7 @@ class OpenGnsysUserService(services.UserService, autoserializable.AutoSerializab
|
||||
try:
|
||||
status = self.service().status(self._machine_id)
|
||||
except Exception as e:
|
||||
logger.exception('Exception at checkMachineReady')
|
||||
logger.exception('Exception at _check_machine_ready')
|
||||
return self._error(f'Error checking machine: {e}')
|
||||
|
||||
# possible status are ("off", "oglive", "busy", "linux", "windows", "macos" o "unknown").
|
||||
@ -264,12 +264,12 @@ class OpenGnsysUserService(services.UserService, autoserializable.AutoSerializab
|
||||
}
|
||||
|
||||
try:
|
||||
execFnc: typing.Optional[collections.abc.Callable[[], str]] = fncs.get(op)
|
||||
exec_fnc: typing.Optional[collections.abc.Callable[[], str]] = fncs.get(op)
|
||||
|
||||
if execFnc is None:
|
||||
if exec_fnc is None:
|
||||
return self._error(f'Unknown operation found at execution queue ({op})')
|
||||
|
||||
execFnc()
|
||||
exec_fnc()
|
||||
|
||||
return types.states.TaskState.RUNNING
|
||||
except Exception as e:
|
||||
@ -283,7 +283,7 @@ class OpenGnsysUserService(services.UserService, autoserializable.AutoSerializab
|
||||
In fact, this will not be never invoked, unless we push it twice, because
|
||||
check_state method will "pop" first item when a check operation returns types.states.DeployState.FINISHED
|
||||
|
||||
At executeQueue this return value will be ignored, and it will only be used at check_state
|
||||
At execute_queue this return value will be ignored, and it will only be used at check_state
|
||||
"""
|
||||
return types.states.TaskState.FINISHED
|
||||
|
||||
@ -299,7 +299,7 @@ class OpenGnsysUserService(services.UserService, autoserializable.AutoSerializab
|
||||
except Exception as e:
|
||||
# logger.exception('Creating machine')
|
||||
if r: # Reservation was done, unreserve it!!!
|
||||
logger.error('Error on notifyEvent (machine was reserved): %s', e)
|
||||
logger.error('Error on notify_endpoints (machine was reserved): %s', e)
|
||||
try:
|
||||
self.service().unreserve(self._machine_id)
|
||||
except Exception as ei:
|
||||
@ -383,12 +383,12 @@ class OpenGnsysUserService(services.UserService, autoserializable.AutoSerializab
|
||||
}
|
||||
|
||||
try:
|
||||
chkFnc: typing.Optional[typing.Optional[collections.abc.Callable[[], types.states.TaskState]]] = fncs.get(op)
|
||||
check_fnc: typing.Optional[typing.Optional[collections.abc.Callable[[], types.states.TaskState]]] = fncs.get(op)
|
||||
|
||||
if chkFnc is None:
|
||||
if check_fnc is None:
|
||||
return self._error(f'Unknown operation found at check queue ({op})')
|
||||
|
||||
state = chkFnc()
|
||||
state = check_fnc()
|
||||
if state == types.states.TaskState.FINISHED:
|
||||
self._pop_current_op() # Remove runing op
|
||||
return self._execute_queue()
|
||||
|
@ -60,7 +60,7 @@ class OpenGnsysMaintainer(jobs.Job):
|
||||
for provider in models.Provider.objects.filter(
|
||||
maintenance_mode=False, data_type=OGProvider.type_type
|
||||
):
|
||||
logger.debug('Provider %s is type openGnsys', provider)
|
||||
logger.debug('Provider %s is type OpenGnsys', provider)
|
||||
|
||||
# Locate all services inside the provider
|
||||
service: models.Service
|
||||
|
@ -165,8 +165,8 @@ class OpenGnsysClient:
|
||||
if self.auth:
|
||||
return
|
||||
|
||||
cacheKey = 'auth{}{}'.format(self.endpoint, self.username)
|
||||
self.auth = self.cache.get(cacheKey)
|
||||
cache_key = 'auth{}{}'.format(self.endpoint, self.username)
|
||||
self.auth = self.cache.get(cache_key)
|
||||
if self.auth:
|
||||
return
|
||||
|
||||
@ -177,7 +177,7 @@ class OpenGnsysClient:
|
||||
)
|
||||
|
||||
self.auth = auth['apikey']
|
||||
self.cache.set(cacheKey, self.auth, CACHE_VALIDITY)
|
||||
self.cache.set(cache_key, self.auth, CACHE_VALIDITY)
|
||||
|
||||
@property
|
||||
def version(self) -> str:
|
||||
|
@ -132,9 +132,9 @@ STATUS_READY_WINDOWS = {
|
||||
|
||||
# FAKE post
|
||||
def post(
|
||||
path: str, data: typing.Any, errMsg: typing.Optional[str] = None
|
||||
path: str, data: typing.Any, error_message: typing.Optional[str] = None
|
||||
) -> typing.Any:
|
||||
logger.info('FAKE POST request to %s with %s data. (%s)', path, data, errMsg)
|
||||
logger.info('FAKE POST request to %s with %s data. (%s)', path, data, error_message)
|
||||
if path == urls.LOGIN:
|
||||
return AUTH
|
||||
|
||||
@ -153,9 +153,9 @@ def post(
|
||||
|
||||
# FAKE get
|
||||
def get(
|
||||
path: str, errMsg: typing.Optional[str]
|
||||
path: str, error_message: typing.Optional[str]
|
||||
) -> typing.Any: # pylint: disable=too-many-return-statements
|
||||
logger.info('FAKE GET request to %s. (%s)', path, errMsg)
|
||||
logger.info('FAKE GET request to %s. (%s)', path, error_message)
|
||||
if path == urls.INFO:
|
||||
return INFO
|
||||
if path == urls.OUS:
|
||||
@ -179,8 +179,8 @@ def get(
|
||||
raise Exception('Unknown FAKE URL on GET: {}'.format(path))
|
||||
|
||||
|
||||
def delete(path: str, errMsg: typing.Optional[str]) -> typing.Any:
|
||||
logger.info('FAKE DELETE request to %s. (%s)', path, errMsg)
|
||||
def delete(path: str, error_message: typing.Optional[str]) -> typing.Any:
|
||||
logger.info('FAKE DELETE request to %s. (%s)', path, error_message)
|
||||
# Right now, only "unreserve" uses delete, so simply return
|
||||
return UNRESERVE
|
||||
# raise Exception('Unknown FAKE URL on DELETE: {}'.format(path))
|
||||
|
@ -247,11 +247,11 @@ class OGProvider(ServiceProvider):
|
||||
def notify_endpoints(self, vmid: str, login_url: str, logout_url: str, release_url: str) -> None:
|
||||
self.api.notify_endpoints(vmid, login_url, logout_url, release_url)
|
||||
|
||||
def notify_deadline(self, machineId: str, deadLine: typing.Optional[int]) -> None:
|
||||
self.api.notify_deadline(machineId, deadLine)
|
||||
def notify_deadline(self, machine_id: str, deadline: typing.Optional[int]) -> None:
|
||||
self.api.notify_deadline(machine_id, deadline)
|
||||
|
||||
def status(self, machineId: str) -> typing.Any:
|
||||
return self.api.status(machineId)
|
||||
def status(self, machineid: str) -> typing.Any:
|
||||
return self.api.status(machineid)
|
||||
|
||||
@cached('reachable', consts.cache.SHORT_CACHE_TIMEOUT)
|
||||
def is_available(self) -> bool:
|
||||
|
@ -181,8 +181,8 @@ class OGService(services.Service):
|
||||
self.get_relase_url(uuid, token),
|
||||
)
|
||||
|
||||
def notify_deadline(self, vmid: str, deadLine: typing.Optional[int]) -> None:
|
||||
self.provider().notify_deadline(vmid, deadLine)
|
||||
def notify_deadline(self, vmid: str, deadline: typing.Optional[int]) -> None:
|
||||
self.provider().notify_deadline(vmid, deadline)
|
||||
|
||||
def power_on(self, vmid: str) -> None:
|
||||
self.provider().power_on(vmid, self.image.value)
|
||||
|
@ -117,7 +117,7 @@ class OpenNebulaLiveDeployment(services.UserService, autoserializable.AutoSerial
|
||||
if self._name == '':
|
||||
try:
|
||||
self._name = self.name_generator().get(
|
||||
self.service().get_basename(), self.service().getLenName()
|
||||
self.service().get_basename(), self.service().get_lenname()
|
||||
)
|
||||
except KeyError:
|
||||
return consts.NO_MORE_NAMES
|
||||
@ -138,23 +138,23 @@ class OpenNebulaLiveDeployment(services.UserService, autoserializable.AutoSerial
|
||||
return types.states.TaskState.FINISHED
|
||||
|
||||
try:
|
||||
state = self.service().getMachineState(self._vmid)
|
||||
state = self.service().get_machine_state(self._vmid)
|
||||
|
||||
if state == on.types.VmState.UNKNOWN: # @UndefinedVariable
|
||||
return self._error('Machine is not available anymore')
|
||||
|
||||
self.service().startMachine(self._vmid)
|
||||
self.service().start_machine(self._vmid)
|
||||
|
||||
self.cache.set('ready', '1')
|
||||
except Exception as e:
|
||||
self.do_log(types.log.LogLevel.ERROR, 'Error on setReady: {}'.format(e))
|
||||
self.do_log(types.log.LogLevel.ERROR, 'Error on set_ready: {}'.format(e))
|
||||
# Treat as operation done, maybe the machine is ready and we can continue
|
||||
|
||||
return types.states.TaskState.FINISHED
|
||||
|
||||
def reset(self) -> types.states.TaskState:
|
||||
if self._vmid != '':
|
||||
self.service().resetMachine(self._vmid)
|
||||
self.service().reset_machine(self._vmid)
|
||||
|
||||
return types.states.TaskState.FINISHED
|
||||
|
||||
@ -208,7 +208,7 @@ class OpenNebulaLiveDeployment(services.UserService, autoserializable.AutoSerial
|
||||
self._name,
|
||||
state,
|
||||
)
|
||||
state = self.service().getMachineState(self._vmid)
|
||||
state = self.service().get_machine_state(self._vmid)
|
||||
|
||||
# If we want to check an state and machine does not exists (except in case that we whant to check this)
|
||||
if state in [
|
||||
@ -256,7 +256,7 @@ class OpenNebulaLiveDeployment(services.UserService, autoserializable.AutoSerial
|
||||
|
||||
if self._vmid: # Powers off & delete it
|
||||
try:
|
||||
self.service().removeMachine(self._vmid)
|
||||
self.service().remove_machine(self._vmid)
|
||||
except Exception:
|
||||
logger.warning('Can\'t set remove errored machine: %s', self._vmid)
|
||||
|
||||
@ -303,13 +303,13 @@ class OpenNebulaLiveDeployment(services.UserService, autoserializable.AutoSerial
|
||||
In fact, this will not be never invoked, unless we push it twice, because
|
||||
check_state method will "pop" first item when a check operation returns types.states.DeployState.FINISHED
|
||||
|
||||
At executeQueue this return value will be ignored, and it will only be used at check_state
|
||||
At execute_queue this return value will be ignored, and it will only be used at check_state
|
||||
"""
|
||||
return types.states.TaskState.FINISHED
|
||||
|
||||
def _wait(self) -> types.states.TaskState:
|
||||
"""
|
||||
Executes opWait, it simply waits something "external" to end
|
||||
Executes op_wait, it simply waits something "external" to end
|
||||
"""
|
||||
return types.states.TaskState.RUNNING
|
||||
|
||||
@ -317,7 +317,7 @@ class OpenNebulaLiveDeployment(services.UserService, autoserializable.AutoSerial
|
||||
"""
|
||||
Deploys a machine from template for user/cache
|
||||
"""
|
||||
templateId = self.publication().getTemplateId()
|
||||
template_id = self.publication().get_template_id()
|
||||
name = self.get_name()
|
||||
if name == consts.NO_MORE_NAMES:
|
||||
raise Exception(
|
||||
@ -328,7 +328,7 @@ class OpenNebulaLiveDeployment(services.UserService, autoserializable.AutoSerial
|
||||
name
|
||||
) # OpenNebula don't let us to create machines with more than 15 chars!!!
|
||||
|
||||
self._vmid = self.service().deploy_from_template(name, templateId)
|
||||
self._vmid = self.service().deploy_from_template(name, template_id)
|
||||
if not self._vmid:
|
||||
raise Exception('Can\'t create machine')
|
||||
|
||||
@ -341,19 +341,19 @@ class OpenNebulaLiveDeployment(services.UserService, autoserializable.AutoSerial
|
||||
"""
|
||||
Removes a machine from system
|
||||
"""
|
||||
state = self.service().getMachineState(self._vmid)
|
||||
state = self.service().get_machine_state(self._vmid)
|
||||
|
||||
if state == on.types.VmState.UNKNOWN: # @UndefinedVariable
|
||||
raise Exception('Machine not found')
|
||||
|
||||
if state == on.types.VmState.ACTIVE: # @UndefinedVariable
|
||||
subState = self.service().getMachineSubstate(self._vmid)
|
||||
if subState < 3: # Less than running
|
||||
logger.info('Must wait before remove: %s', subState)
|
||||
sub_state = self.service().get_machine_substate(self._vmid)
|
||||
if sub_state < 3: # Less than running
|
||||
logger.info('Must wait before remove: %s', sub_state)
|
||||
self._push_front_op(Operation.RETRY)
|
||||
return types.states.TaskState.RUNNING
|
||||
|
||||
self.service().removeMachine(self._vmid)
|
||||
self.service().remove_machine(self._vmid)
|
||||
|
||||
return types.states.TaskState.RUNNING
|
||||
|
||||
@ -361,10 +361,10 @@ class OpenNebulaLiveDeployment(services.UserService, autoserializable.AutoSerial
|
||||
"""
|
||||
Powers on the machine
|
||||
"""
|
||||
self.service().startMachine(self._vmid)
|
||||
self.service().start_machine(self._vmid)
|
||||
|
||||
# Get IP & MAC (later stage, after "powering on")
|
||||
self._mac, self._ip = self.service().getNetInfo(self._vmid)
|
||||
self._mac, self._ip = self.service().get_network_info(self._vmid)
|
||||
|
||||
return types.states.TaskState.RUNNING
|
||||
|
||||
@ -372,7 +372,7 @@ class OpenNebulaLiveDeployment(services.UserService, autoserializable.AutoSerial
|
||||
"""
|
||||
Suspends the machine
|
||||
"""
|
||||
self.service().shutdownMachine(self._vmid)
|
||||
self.service().shutdown_machine(self._vmid)
|
||||
return types.states.TaskState.RUNNING
|
||||
|
||||
# Check methods
|
||||
@ -423,12 +423,12 @@ class OpenNebulaLiveDeployment(services.UserService, autoserializable.AutoSerial
|
||||
}
|
||||
|
||||
try:
|
||||
chkFnc: typing.Optional[collections.abc.Callable[[], types.states.TaskState]] = fncs.get(op, None)
|
||||
check_fnc: typing.Optional[collections.abc.Callable[[], types.states.TaskState]] = fncs.get(op, None)
|
||||
|
||||
if chkFnc is None:
|
||||
if check_fnc is None:
|
||||
return self._error('Unknown operation found at check queue ({0})'.format(op))
|
||||
|
||||
state = chkFnc()
|
||||
state = check_fnc()
|
||||
if state == types.states.TaskState.FINISHED:
|
||||
self._get_and_pop_current_op() # Remove runing op
|
||||
return self._execute_queue()
|
||||
|
@ -110,12 +110,12 @@ class OpenNebulaClient: # pylint: disable=too-many-public-methods
|
||||
self.connection = xmlrpc.client.ServerProxy(self.endpoint)
|
||||
|
||||
@ensure_connected
|
||||
def enum_storage(self, storageType: int = 0) -> collections.abc.Iterable[types.StorageType]:
|
||||
sstorageType = str(storageType) # Ensure it is an string
|
||||
def enum_storage(self, storage_type: int = 0) -> collections.abc.Iterable[types.StorageType]:
|
||||
sstorage_type = str(storage_type) # Ensure it is an string
|
||||
# Invoke datastore pools info, no parameters except connection string
|
||||
result, _ = check_result(self.connection.one.datastorepool.info(self.session_string))
|
||||
for ds in as_iterable(result['DATASTORE_POOL']['DATASTORE']):
|
||||
if ds['TYPE'] == sstorageType:
|
||||
if ds['TYPE'] == sstorage_type:
|
||||
yield types.StorageType(ds['ID'], ds['NAME'], int(ds['TOTAL_MB']), int(ds['FREE_MB']), None)
|
||||
|
||||
@ensure_connected
|
||||
@ -151,18 +151,18 @@ class OpenNebulaClient: # pylint: disable=too-many-public-methods
|
||||
int(ds.get('SIZE', -1)),
|
||||
ds.get('PERSISTENT', '0') != '0',
|
||||
int(ds.get('RUNNING_VMS', '0')),
|
||||
types.ImageState.fromState(ds['STATE']),
|
||||
types.ImageState.from_str(ds['STATE']),
|
||||
None,
|
||||
)
|
||||
|
||||
@ensure_connected
|
||||
def template_info(self, templateId: str, extraInfo: bool = False) -> types.TemplateType:
|
||||
def template_info(self, template_id: str, extra_info: bool = False) -> types.TemplateType:
|
||||
"""
|
||||
Returns a list
|
||||
first element is a dictionary (built from XML)
|
||||
second is original XML
|
||||
"""
|
||||
result = self.connection.one.template.info(self.session_string, int(templateId), extraInfo)
|
||||
result = self.connection.one.template.info(self.session_string, int(template_id), extra_info)
|
||||
ds, xml = check_result(result)
|
||||
return types.TemplateType(
|
||||
ds['VMTEMPLATE']['ID'],
|
||||
@ -174,11 +174,11 @@ class OpenNebulaClient: # pylint: disable=too-many-public-methods
|
||||
@ensure_connected
|
||||
def instantiate_template(
|
||||
self,
|
||||
templateId: str,
|
||||
vmName: str,
|
||||
createHold: bool = False,
|
||||
templateToMerge: str = '',
|
||||
privatePersistent: bool = False,
|
||||
template_id: str,
|
||||
vm_name: str,
|
||||
create_hold: bool = False,
|
||||
template_to_merge: str = '',
|
||||
private_persistent: bool = False,
|
||||
) -> str:
|
||||
"""
|
||||
Instantiates a template (compatible with open nebula 4 & 5)
|
||||
@ -192,22 +192,22 @@ class OpenNebulaClient: # pylint: disable=too-many-public-methods
|
||||
"""
|
||||
if self.version[0] == '4':
|
||||
result = self.connection.one.template.instantiate(
|
||||
self.session_string, int(templateId), vmName, createHold, templateToMerge
|
||||
self.session_string, int(template_id), vm_name, create_hold, template_to_merge
|
||||
)
|
||||
else:
|
||||
result = self.connection.one.template.instantiate(
|
||||
self.session_string,
|
||||
int(templateId),
|
||||
vmName,
|
||||
createHold,
|
||||
templateToMerge,
|
||||
privatePersistent,
|
||||
int(template_id),
|
||||
vm_name,
|
||||
create_hold,
|
||||
template_to_merge,
|
||||
private_persistent,
|
||||
)
|
||||
|
||||
return check_result_raw(result)
|
||||
|
||||
@ensure_connected
|
||||
def update_template(self, templateId: str, templateData: str, updateType: int = 0) -> str:
|
||||
def update_template(self, template_id: str, template_data: str, update_type: int = 0) -> str:
|
||||
"""
|
||||
Updates the template with the templateXml
|
||||
1.- Session string
|
||||
@ -216,54 +216,54 @@ class OpenNebulaClient: # pylint: disable=too-many-public-methods
|
||||
4.- Update type. 0 replace the whole template, 1 merge with the existing one
|
||||
"""
|
||||
result = self.connection.one.template.update(
|
||||
self.session_string, int(templateId), templateData, int(updateType)
|
||||
self.session_string, int(template_id), template_data, int(update_type)
|
||||
)
|
||||
return check_result_raw(result)
|
||||
|
||||
@ensure_connected
|
||||
def clone_template(self, templateId: str, name: str) -> str:
|
||||
def clone_template(self, template_id: str, name: str) -> str:
|
||||
"""
|
||||
Clones the template
|
||||
"""
|
||||
if self.version[0] == '4':
|
||||
result = self.connection.one.template.clone(self.session_string, int(templateId), name)
|
||||
result = self.connection.one.template.clone(self.session_string, int(template_id), name)
|
||||
else:
|
||||
result = self.connection.one.template.clone(
|
||||
self.session_string, int(templateId), name, False
|
||||
self.session_string, int(template_id), name, False
|
||||
) # This works as previous version clone
|
||||
|
||||
return check_result_raw(result)
|
||||
|
||||
@ensure_connected
|
||||
def delete_template(self, templateId: str) -> str:
|
||||
def delete_template(self, template_id: str) -> str:
|
||||
"""
|
||||
Deletes the template (not images)
|
||||
"""
|
||||
result = self.connection.one.template.delete(self.session_string, int(templateId))
|
||||
result = self.connection.one.template.delete(self.session_string, int(template_id))
|
||||
return check_result_raw(result)
|
||||
|
||||
@ensure_connected
|
||||
def clone_image(self, srcId: str, name: str, datastoreId: typing.Union[str, int] = -1) -> str:
|
||||
def clone_image(self, src_id: str, name: str, datastore_id: typing.Union[str, int] = -1) -> str:
|
||||
"""
|
||||
Clones the image.
|
||||
"""
|
||||
result = self.connection.one.image.clone(self.session_string, int(srcId), name, int(datastoreId))
|
||||
result = self.connection.one.image.clone(self.session_string, int(src_id), name, int(datastore_id))
|
||||
return check_result_raw(result)
|
||||
|
||||
@ensure_connected
|
||||
def make_persistent_image(self, imageId: str, persistent: bool = False) -> str:
|
||||
def make_persistent_image(self, image_id: str, persistent: bool = False) -> str:
|
||||
"""
|
||||
Clones the image.
|
||||
"""
|
||||
result = self.connection.one.image.persistent(self.session_string, int(imageId), persistent)
|
||||
result = self.connection.one.image.persistent(self.session_string, int(image_id), persistent)
|
||||
return check_result_raw(result)
|
||||
|
||||
@ensure_connected
|
||||
def delete_image(self, imageId: str) -> str:
|
||||
def delete_image(self, image_id: str) -> str:
|
||||
"""
|
||||
Deletes an image
|
||||
"""
|
||||
result = self.connection.one.image.delete(self.session_string, int(imageId))
|
||||
result = self.connection.one.image.delete(self.session_string, int(image_id))
|
||||
return check_result_raw(result)
|
||||
|
||||
@ensure_connected
|
||||
@ -281,7 +281,7 @@ class OpenNebulaClient: # pylint: disable=too-many-public-methods
|
||||
int(ds.get('SIZE', -1)),
|
||||
ds.get('PERSISTENT', '0') != '0',
|
||||
int(ds.get('RUNNING_VMS', '0')),
|
||||
types.ImageState.fromState(ds['STATE']),
|
||||
types.ImageState.from_str(ds['STATE']),
|
||||
xml,
|
||||
)
|
||||
|
||||
@ -302,24 +302,24 @@ class OpenNebulaClient: # pylint: disable=too-many-public-methods
|
||||
ds['ID'],
|
||||
ds['NAME'],
|
||||
int(ds.get('MEMORY', '0')),
|
||||
types.VmState.fromState(ds['STATE']),
|
||||
types.VmState.from_str(ds['STATE']),
|
||||
None,
|
||||
)
|
||||
|
||||
@ensure_connected
|
||||
def vm_info(self, vmId: str) -> types.VirtualMachineType:
|
||||
def vm_info(self, vmid: str) -> types.VirtualMachineType:
|
||||
"""
|
||||
Returns a list
|
||||
first element is a dictionary (built from XML)
|
||||
second is original XML
|
||||
"""
|
||||
result, xml = check_result(self.connection.one.vm.info(self.session_string, int(vmId)))
|
||||
result, xml = check_result(self.connection.one.vm.info(self.session_string, int(vmid)))
|
||||
ds = result['VM']
|
||||
return types.VirtualMachineType(
|
||||
ds['ID'],
|
||||
ds['NAME'],
|
||||
int(ds.get('MEMORY', '0')),
|
||||
types.VmState.fromState(ds['STATE']),
|
||||
types.VmState.from_str(ds['STATE']),
|
||||
xml,
|
||||
)
|
||||
|
||||
@ -335,18 +335,18 @@ class OpenNebulaClient: # pylint: disable=too-many-public-methods
|
||||
return self.set_machine_state(vmid, 'terminate-hard')
|
||||
|
||||
@ensure_connected
|
||||
def get_machine_state(self, vmId: str) -> types.VmState:
|
||||
def get_machine_state(self, vmid: str) -> types.VmState:
|
||||
"""
|
||||
Returns the VM State
|
||||
"""
|
||||
return self.vm_info(vmId).state
|
||||
return self.vm_info(vmid).state
|
||||
|
||||
@ensure_connected
|
||||
def get_machine_substate(self, vmId: str) -> int:
|
||||
def get_machine_substate(self, vmid: str) -> int:
|
||||
"""
|
||||
Returns the VM State
|
||||
"""
|
||||
result = self.connection.one.vm.info(self.session_string, int(vmId))
|
||||
result = self.connection.one.vm.info(self.session_string, int(vmid))
|
||||
r, _ = check_result(result)
|
||||
try:
|
||||
if int(r['VM']['STATE']) == types.VmState.ACTIVE.value:
|
||||
@ -358,6 +358,6 @@ class OpenNebulaClient: # pylint: disable=too-many-public-methods
|
||||
return -1
|
||||
|
||||
@ensure_connected
|
||||
def set_machine_state(self, vmId: str, action: str) -> str:
|
||||
result = self.connection.one.vm.action(self.session_string, action, int(vmId))
|
||||
def set_machine_state(self, vmid: str, action: str) -> str:
|
||||
result = self.connection.one.vm.action(self.session_string, action, int(vmid))
|
||||
return check_result_raw(result)
|
||||
|
@ -41,7 +41,7 @@ if typing.TYPE_CHECKING:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def enumerateDatastores(
|
||||
def enumerate_datastores(
|
||||
api: 'client.OpenNebulaClient', datastoreType: int = 0
|
||||
) -> collections.abc.Iterable['types.StorageType']:
|
||||
"""
|
||||
|
@ -48,7 +48,7 @@ class VmState(enum.Enum):
|
||||
UNKNOWN = 99
|
||||
|
||||
@staticmethod
|
||||
def fromState(state: str) -> 'VmState':
|
||||
def from_str(state: str) -> 'VmState':
|
||||
try:
|
||||
return VmState(int(state))
|
||||
except Exception:
|
||||
@ -71,7 +71,7 @@ class ImageState(enum.Enum): # pylint: disable=too-few-public-methods
|
||||
UNKNOWN = 99
|
||||
|
||||
@staticmethod
|
||||
def fromState(state: str) -> 'ImageState':
|
||||
def from_str(state: str) -> 'ImageState':
|
||||
try:
|
||||
return ImageState(int(state))
|
||||
except Exception:
|
||||
|
@ -53,7 +53,7 @@ def get_machine_state(api: 'client.OpenNebulaClient', vmid: str) -> types.VmStat
|
||||
This method do not uses cache at all (it always tries to get machine state from OpenNebula server)
|
||||
|
||||
Args:
|
||||
machineId: Id of the machine to get state
|
||||
vmid: Id of the machine to get state
|
||||
|
||||
Returns:
|
||||
one of the on.VmState Values
|
||||
@ -66,14 +66,14 @@ def get_machine_state(api: 'client.OpenNebulaClient', vmid: str) -> types.VmStat
|
||||
return types.VmState.UNKNOWN
|
||||
|
||||
|
||||
def get_machine_substate(api: 'client.OpenNebulaClient', machineId: str) -> int:
|
||||
def get_machine_substate(api: 'client.OpenNebulaClient', vmid: str) -> int:
|
||||
'''
|
||||
Returns the lcm_state
|
||||
'''
|
||||
try:
|
||||
return api.get_machine_substate(machineId)
|
||||
return api.get_machine_substate(vmid)
|
||||
except Exception as e:
|
||||
logger.error('Error obtaining machine substate for %s on OpenNebula: %s', machineId, e)
|
||||
logger.error('Error obtaining machine substate for %s on OpenNebula: %s', vmid, e)
|
||||
|
||||
return types.VmState.UNKNOWN.value
|
||||
|
||||
@ -85,7 +85,7 @@ def start_machine(api: 'client.OpenNebulaClient', vmid: str) -> None:
|
||||
This start also "resume" suspended/paused machines
|
||||
|
||||
Args:
|
||||
machineId: Id of the machine
|
||||
vmid: Id of the machine
|
||||
|
||||
Returns:
|
||||
'''
|
||||
@ -101,7 +101,7 @@ def stop_machine(api: 'client.OpenNebulaClient', vmid: str) -> None:
|
||||
Tries to start a machine. No check is done, it is simply requested to OpenNebula
|
||||
|
||||
Args:
|
||||
machineId: Id of the machine
|
||||
vmid: Id of the machine
|
||||
|
||||
Returns:
|
||||
'''
|
||||
@ -116,7 +116,7 @@ def suspend_machine(api: 'client.OpenNebulaClient', vmid: str) -> None:
|
||||
Tries to suspend a machine. No check is done, it is simply requested to OpenNebula
|
||||
|
||||
Args:
|
||||
machineId: Id of the machine
|
||||
vmid: Id of the machine
|
||||
|
||||
Returns:
|
||||
'''
|
||||
@ -131,7 +131,7 @@ def shutdown_machine(api: 'client.OpenNebulaClient', vmid: str) -> None:
|
||||
Tries to "gracefully" shutdown a machine. No check is done, it is simply requested to OpenNebula
|
||||
|
||||
Args:
|
||||
machineId: Id of the machine
|
||||
vmid: Id of the machine
|
||||
|
||||
Returns:
|
||||
'''
|
||||
@ -146,7 +146,7 @@ def reset_machine(api: 'client.OpenNebulaClient', vmid: str) -> None:
|
||||
Tries to suspend a machine. No check is done, it is simply requested to OpenNebula
|
||||
|
||||
Args:
|
||||
machineId: Id of the machine
|
||||
vmid: Id of the machine
|
||||
|
||||
Returns:
|
||||
'''
|
||||
@ -161,12 +161,12 @@ def remove_machine(api: 'client.OpenNebulaClient', vmid: str) -> None:
|
||||
Tries to delete a machine. No check is done, it is simply requested to OpenNebula
|
||||
|
||||
Args:
|
||||
machineId: Id of the machine
|
||||
vmid: Id of the machine
|
||||
|
||||
Returns:
|
||||
'''
|
||||
try:
|
||||
# vm = oca.VirtualMachine.new_with_id(api, int(machineId))
|
||||
# vm = oca.VirtualMachine.new_with_id(api, int(vmid))
|
||||
# vm.delete()
|
||||
api.remove_machine(vmid)
|
||||
except Exception as e:
|
||||
@ -202,7 +202,7 @@ def get_network_info(
|
||||
'''
|
||||
Get the MAC and the IP for the network and machine. If network is None, for the first network
|
||||
'''
|
||||
# md = minidom.parseString(api.call('vm.info', int(machineId)))
|
||||
# md = minidom.parseString(api.call('vm.info', int(vmid)))
|
||||
md: typing.Any = minidom.parseString(api.vm_info(vmid).xml or '') # pyright: ignore[reportUnknownMemberType]
|
||||
node = md
|
||||
|
||||
|
@ -138,7 +138,7 @@ class OpenNebulaProvider(ServiceProvider): # pylint: disable=too-many-public-me
|
||||
|
||||
return self._api
|
||||
|
||||
def resetApi(self) -> None:
|
||||
def reset_api(self) -> None:
|
||||
self._api = None
|
||||
|
||||
def sanitized_name(self, name: str) -> str:
|
||||
@ -164,111 +164,111 @@ class OpenNebulaProvider(ServiceProvider): # pylint: disable=too-many-public-me
|
||||
|
||||
return types.core.TestResult(True, _('Opennebula test connection passed'))
|
||||
|
||||
def getDatastores(self, datastoreType: int = 0) -> collections.abc.Iterable[on.types.StorageType]:
|
||||
yield from on.storage.enumerateDatastores(self.api, datastoreType)
|
||||
def get_datastores(self, datastore_type: int = 0) -> collections.abc.Iterable[on.types.StorageType]:
|
||||
yield from on.storage.enumerate_datastores(self.api, datastore_type)
|
||||
|
||||
def getTemplates(self, force: bool = False) -> collections.abc.Iterable[on.types.TemplateType]:
|
||||
def get_templates(self, force: bool = False) -> collections.abc.Iterable[on.types.TemplateType]:
|
||||
yield from on.template.enumerate_templates(self.api, force)
|
||||
|
||||
def make_template(self, from_template_id: str, name: str, dest_storage: str) -> str:
|
||||
return on.template.create(self.api, from_template_id, name, dest_storage)
|
||||
|
||||
def check_template_published(self, templateId: str) -> bool:
|
||||
return on.template.check_published(self.api, templateId)
|
||||
def check_template_published(self, template_id: str) -> bool:
|
||||
return on.template.check_published(self.api, template_id)
|
||||
|
||||
def removeTemplate(self, templateId: str) -> None:
|
||||
on.template.remove(self.api, templateId)
|
||||
def remove_template(self, template_id: str) -> None:
|
||||
on.template.remove(self.api, template_id)
|
||||
|
||||
def deply_from_template(self, name: str, templateId: str) -> str:
|
||||
return on.template.deploy_from(self.api, templateId, name)
|
||||
def deply_from_template(self, name: str, template_id: str) -> str:
|
||||
return on.template.deploy_from(self.api, template_id, name)
|
||||
|
||||
def getMachineState(self, machineId: str) -> on.types.VmState:
|
||||
def get_machine_state(self, machine_id: str) -> on.types.VmState:
|
||||
'''
|
||||
Returns the state of the machine
|
||||
This method do not uses cache at all (it always tries to get machine state from OpenNebula server)
|
||||
|
||||
Args:
|
||||
machineId: Id of the machine to get state
|
||||
machine_id: Id of the machine to get state
|
||||
|
||||
Returns:
|
||||
one of the on.VmState Values
|
||||
'''
|
||||
return on.vm.get_machine_state(self.api, machineId)
|
||||
return on.vm.get_machine_state(self.api, machine_id)
|
||||
|
||||
def getMachineSubstate(self, machineId: str) -> int:
|
||||
def get_machine_substate(self, machine_id: str) -> int:
|
||||
'''
|
||||
Returns the LCM_STATE of a machine (STATE must be ready or this will return -1)
|
||||
'''
|
||||
return on.vm.get_machine_substate(self.api, machineId)
|
||||
return on.vm.get_machine_substate(self.api, machine_id)
|
||||
|
||||
def startMachine(self, machineId: str) -> None:
|
||||
def start_machine(self, machine_id: str) -> None:
|
||||
'''
|
||||
Tries to start a machine. No check is done, it is simply requested to OpenNebula.
|
||||
|
||||
This start also "resume" suspended/paused machines
|
||||
|
||||
Args:
|
||||
machineId: Id of the machine
|
||||
machineid: Id of the machine
|
||||
|
||||
Returns:
|
||||
'''
|
||||
on.vm.start_machine(self.api, machineId)
|
||||
on.vm.start_machine(self.api, machine_id)
|
||||
|
||||
def stopMachine(self, machineId: str) -> None:
|
||||
def stop_machine(self, machine_id: str) -> None:
|
||||
'''
|
||||
Tries to stop a machine. No check is done, it is simply requested to OpenNebula
|
||||
|
||||
Args:
|
||||
machineId: Id of the machine
|
||||
machine_id: Id of the machine
|
||||
|
||||
Returns:
|
||||
'''
|
||||
on.vm.stop_machine(self.api, machineId)
|
||||
on.vm.stop_machine(self.api, machine_id)
|
||||
|
||||
def suspendMachine(self, machineId: str) -> None:
|
||||
def suspend_machine(self, machine_id: str) -> None:
|
||||
'''
|
||||
Tries to suspend a machine. No check is done, it is simply requested to OpenNebula
|
||||
|
||||
Args:
|
||||
machineId: Id of the machine
|
||||
machine_id: Id of the machine
|
||||
|
||||
Returns:
|
||||
'''
|
||||
on.vm.suspend_machine(self.api, machineId)
|
||||
on.vm.suspend_machine(self.api, machine_id)
|
||||
|
||||
def shutdownMachine(self, machineId: str) -> None:
|
||||
def shutdown_machine(self, machine_id: str) -> None:
|
||||
'''
|
||||
Tries to shutdown "gracefully" a machine. No check is done, it is simply requested to OpenNebula
|
||||
|
||||
Args:
|
||||
machineId: Id of the machine
|
||||
machine_id: Id of the machine
|
||||
|
||||
Returns:
|
||||
'''
|
||||
on.vm.shutdown_machine(self.api, machineId)
|
||||
on.vm.shutdown_machine(self.api, machine_id)
|
||||
|
||||
def resetMachine(self, machineId: str) -> None:
|
||||
def reset_machine(self, machine_id: str) -> None:
|
||||
'''
|
||||
Resets a machine (hard-reboot)
|
||||
'''
|
||||
on.vm.reset_machine(self.api, machineId)
|
||||
on.vm.reset_machine(self.api, machine_id)
|
||||
|
||||
def removeMachine(self, machineId: str) -> None:
|
||||
def remove_machine(self, machine_id: str) -> None:
|
||||
'''
|
||||
Tries to delete a machine. No check is done, it is simply requested to OpenNebula
|
||||
|
||||
Args:
|
||||
machineId: Id of the machine
|
||||
machine_id: Id of the machine
|
||||
|
||||
Returns:
|
||||
'''
|
||||
on.vm.remove_machine(self.api, machineId)
|
||||
on.vm.remove_machine(self.api, machine_id)
|
||||
|
||||
def getNetInfo(self, machineId: str, networkId: typing.Optional[str] = None) -> tuple[str, str]:
|
||||
def get_network_info(self, machine_id: str, network_id: typing.Optional[str] = None) -> tuple[str, str]:
|
||||
'''
|
||||
Changes the mac address of first nic of the machine to the one specified
|
||||
'''
|
||||
return on.vm.get_network_info(self.api, machineId, networkId)
|
||||
return on.vm.get_network_info(self.api, machine_id, network_id)
|
||||
|
||||
def get_console_connection(self, vmid: str) -> typing.Optional[types.services.ConsoleConnectionInfo]:
|
||||
console_connection_info = on.vm.get_console_connection(self.api, vmid)
|
||||
|
@ -46,7 +46,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
class OpenNebulaLivePublication(Publication, autoserializable.AutoSerializable):
|
||||
"""
|
||||
This class provides the publication of a oVirtLinkedService
|
||||
This class provides the publication of a service for OpenNebula
|
||||
"""
|
||||
|
||||
suggested_delay = 2 # : Suggested recheck time if publication is unfinished in seconds
|
||||
@ -173,7 +173,7 @@ class OpenNebulaLivePublication(Publication, autoserializable.AutoSerializable):
|
||||
# Methods provided below are specific for this publication
|
||||
# and will be used by user deployments that uses this kind of publication
|
||||
|
||||
def getTemplateId(self) -> str:
|
||||
def get_template_id(self) -> str:
|
||||
"""
|
||||
Returns the template id associated with the publication
|
||||
"""
|
||||
|
@ -157,11 +157,11 @@ class OpenNebulaLiveService(services.Service):
|
||||
"""
|
||||
|
||||
self.template.set_choices(
|
||||
[gui.choice_item(t.id, t.name) for t in self.provider().getTemplates()]
|
||||
[gui.choice_item(t.id, t.name) for t in self.provider().get_templates()]
|
||||
)
|
||||
|
||||
self.datastore.set_choices(
|
||||
[gui.choice_item(d.id, d.name) for d in self.provider().getDatastores()]
|
||||
[gui.choice_item(d.id, d.name) for d in self.provider().get_datastores()]
|
||||
)
|
||||
|
||||
def sanitized_name(self, name: str) -> str:
|
||||
@ -175,7 +175,7 @@ class OpenNebulaLiveService(services.Service):
|
||||
def check_template_published(self, template_id: str) -> bool:
|
||||
return self.provider().check_template_published(template_id)
|
||||
|
||||
def deploy_from_template(self, name: str, templateId: str) -> str:
|
||||
def deploy_from_template(self, name: str, template_id: str) -> str:
|
||||
"""
|
||||
Deploys a virtual machine on selected cluster from selected template
|
||||
|
||||
@ -187,23 +187,23 @@ class OpenNebulaLiveService(services.Service):
|
||||
Returns:
|
||||
Id of the machine being created form template
|
||||
"""
|
||||
logger.debug('Deploying from template %s machine %s', templateId, name)
|
||||
logger.debug('Deploying from template %s machine %s', template_id, name)
|
||||
# self.datastoreHasSpace()
|
||||
return self.provider().deply_from_template(name, templateId)
|
||||
return self.provider().deply_from_template(name, template_id)
|
||||
|
||||
def remove_template(self, templateId: str) -> None:
|
||||
def remove_template(self, template_id: str) -> None:
|
||||
"""
|
||||
invokes removeTemplate from parent provider
|
||||
invokes template_id from parent provider
|
||||
"""
|
||||
self.provider().removeTemplate(templateId)
|
||||
self.provider().remove_template(template_id)
|
||||
|
||||
def getMachineState(self, machineId: str) -> 'on.types.VmState':
|
||||
def get_machine_state(self, machine_id: str) -> 'on.types.VmState':
|
||||
"""
|
||||
Invokes getMachineState from parent provider
|
||||
(returns if machine is "active" or "inactive"
|
||||
|
||||
Args:
|
||||
machineId: If of the machine to get state
|
||||
machine_id: If of the machine to get state
|
||||
|
||||
Returns:
|
||||
one of this values:
|
||||
@ -213,82 +213,82 @@ class OpenNebulaLiveService(services.Service):
|
||||
suspended, image_illegal, image_locked or powering_down
|
||||
Also can return'unknown' if Machine is not known
|
||||
"""
|
||||
return self.provider().getMachineState(machineId)
|
||||
return self.provider().get_machine_state(machine_id)
|
||||
|
||||
def getMachineSubstate(self, machineId: str) -> int:
|
||||
def get_machine_substate(self, machine_id: str) -> int:
|
||||
"""
|
||||
On OpenNebula, the machine can be "active" but not "running".
|
||||
Any active machine will have a LCM_STATE, that is what we get here
|
||||
"""
|
||||
return self.provider().getMachineSubstate(machineId)
|
||||
return self.provider().get_machine_substate(machine_id)
|
||||
|
||||
def startMachine(self, machineId: str) -> None:
|
||||
def start_machine(self, machine_id: str) -> None:
|
||||
"""
|
||||
Tries to start a machine. No check is done, it is simply requested to OpenNebula.
|
||||
|
||||
This start also "resume" suspended/paused machines
|
||||
|
||||
Args:
|
||||
machineId: Id of the machine
|
||||
machine_id: Id of the machine
|
||||
|
||||
Returns:
|
||||
"""
|
||||
self.provider().startMachine(machineId)
|
||||
self.provider().start_machine(machine_id)
|
||||
|
||||
def stopMachine(self, machineId: str) -> None:
|
||||
def stop_machine(self, machine_id: str) -> None:
|
||||
"""
|
||||
Tries to stop a machine. No check is done, it is simply requested to OpenNebula
|
||||
|
||||
Args:
|
||||
machineId: Id of the machine
|
||||
machine_id: Id of the machine
|
||||
|
||||
Returns:
|
||||
"""
|
||||
self.provider().stopMachine(machineId)
|
||||
self.provider().stop_machine(machine_id)
|
||||
|
||||
def suspendMachine(self, machineId: str) -> None:
|
||||
def suspend_machine(self, machine_id: str) -> None:
|
||||
"""
|
||||
Tries to suspend machine. No check is done, it is simply requested to OpenNebula
|
||||
|
||||
Args:
|
||||
machineId: Id of the machine
|
||||
machine_id: Id of the machine
|
||||
|
||||
Returns:
|
||||
"""
|
||||
self.provider().suspendMachine(machineId)
|
||||
self.provider().suspend_machine(machine_id)
|
||||
|
||||
def shutdownMachine(self, machineId: str) -> None:
|
||||
def shutdown_machine(self, machine_id: str) -> None:
|
||||
"""
|
||||
Tries to "gracefully" shutdown machine. No check is done, it is simply requested to OpenNebula
|
||||
|
||||
Args:
|
||||
machineId: Id of the machine
|
||||
machine_id: Id of the machine
|
||||
|
||||
Returns:
|
||||
"""
|
||||
self.provider().shutdownMachine(machineId)
|
||||
self.provider().shutdown_machine(machine_id)
|
||||
|
||||
def resetMachine(self, machineId: str) -> None:
|
||||
self.provider().resetMachine(machineId)
|
||||
def reset_machine(self, machine_id: str) -> None:
|
||||
self.provider().reset_machine(machine_id)
|
||||
|
||||
def removeMachine(self, machineId: str) -> None:
|
||||
def remove_machine(self, machine_id: str) -> None:
|
||||
"""
|
||||
Tries to delete a machine. No check is done, it is simply requested to OpenNebula
|
||||
|
||||
Args:
|
||||
machineId: Id of the machine
|
||||
machine_id: Id of the machine
|
||||
|
||||
Returns:
|
||||
"""
|
||||
self.provider().removeMachine(machineId)
|
||||
self.provider().remove_machine(machine_id)
|
||||
|
||||
def getNetInfo(
|
||||
self, machineId: str, networkId: typing.Optional[str] = None
|
||||
def get_network_info(
|
||||
self, machine_id: str, network_id: typing.Optional[str] = None
|
||||
) -> tuple[str, str]:
|
||||
"""
|
||||
Changes the mac address of first nic of the machine to the one specified
|
||||
Gets the network info for a machine
|
||||
"""
|
||||
return self.provider().getNetInfo(machineId, networkId=None)
|
||||
return self.provider().get_network_info(machine_id, network_id=None)
|
||||
|
||||
def get_basename(self) -> str:
|
||||
"""
|
||||
@ -296,14 +296,14 @@ class OpenNebulaLiveService(services.Service):
|
||||
"""
|
||||
return self.baseName.value
|
||||
|
||||
def getLenName(self) -> int:
|
||||
def get_lenname(self) -> int:
|
||||
"""
|
||||
Returns the length of numbers part
|
||||
"""
|
||||
return self.lenName.as_int()
|
||||
|
||||
def get_console_connection(self, machineId: str) -> typing.Optional[types.services.ConsoleConnectionInfo]:
|
||||
return self.provider().get_console_connection(machineId)
|
||||
def get_console_connection(self, vmid: str) -> typing.Optional[types.services.ConsoleConnectionInfo]:
|
||||
return self.provider().get_console_connection(vmid)
|
||||
|
||||
def desktop_login(
|
||||
self, vmid: str, username: str, password: str, domain: str
|
||||
|
@ -93,9 +93,9 @@ class IPMachineUserService(services.UserService, autoserializable.AutoSerializab
|
||||
|
||||
def _deploy(self) -> types.states.TaskState:
|
||||
# If not to be managed by a token, autologin user
|
||||
userService = self.db_obj()
|
||||
if userService:
|
||||
userService.set_in_use(True)
|
||||
user_service = self.db_obj()
|
||||
if user_service:
|
||||
user_service.set_in_use(True)
|
||||
|
||||
return types.states.TaskState.FINISHED
|
||||
|
||||
|
@ -103,29 +103,29 @@ class ProxmoxDeferredRemoval(jobs.Job):
|
||||
# )
|
||||
|
||||
@staticmethod
|
||||
def waitForTaskFinish(
|
||||
def wait_for_task_to_finish(
|
||||
provider_instance: 'provider.ProxmoxProvider',
|
||||
upid: 'prox_types.ExecResult',
|
||||
maxWait: int = 30, # 30 * 0.3 = 9 seconds
|
||||
timeout: int = 30, # 30 * 0.3 = 9 seconds
|
||||
) -> bool:
|
||||
counter = 0
|
||||
while provider_instance.api.get_task_info(upid.node, upid.upid).is_running() and counter < maxWait:
|
||||
while provider_instance.api.get_task_info(upid.node, upid.upid).is_running() and counter < timeout:
|
||||
time.sleep(0.3)
|
||||
counter += 1
|
||||
|
||||
return counter < maxWait
|
||||
return counter < timeout
|
||||
|
||||
def run(self) -> None:
|
||||
dbProvider: Provider
|
||||
db_provider: Provider
|
||||
# Look for Providers of type proxmox
|
||||
for dbProvider in Provider.objects.filter(
|
||||
for db_provider in Provider.objects.filter(
|
||||
maintenance_mode=False, data_type=provider.ProxmoxProvider.type_type
|
||||
):
|
||||
logger.debug('Provider %s if os type proxmox', dbProvider)
|
||||
logger.debug('Provider %s if os type proxmox', db_provider)
|
||||
|
||||
storage = dbProvider.get_environment().storage
|
||||
storage = db_provider.get_environment().storage
|
||||
instance: provider.ProxmoxProvider = typing.cast(
|
||||
provider.ProxmoxProvider, dbProvider.get_instance()
|
||||
provider.ProxmoxProvider, db_provider.get_instance()
|
||||
)
|
||||
|
||||
for data in storage.filter('tRm'):
|
||||
@ -139,11 +139,11 @@ class ProxmoxDeferredRemoval(jobs.Job):
|
||||
# If machine is powered on, tries to stop it
|
||||
# tries to remove in sync mode
|
||||
if vm_info.status.is_running():
|
||||
ProxmoxDeferredRemoval.waitForTaskFinish(instance, instance.api.stop_vm(vmid))
|
||||
ProxmoxDeferredRemoval.wait_for_task_to_finish(instance, instance.api.stop_vm(vmid))
|
||||
return
|
||||
|
||||
if not vm_info.status.is_running(): # Machine exists, try to remove it now
|
||||
ProxmoxDeferredRemoval.waitForTaskFinish(instance, instance.api.delete_vm(vmid))
|
||||
ProxmoxDeferredRemoval.wait_for_task_to_finish(instance, instance.api.delete_vm(vmid))
|
||||
|
||||
# It this is reached, remove check
|
||||
storage.remove('tr' + str(vmid))
|
||||
|
@ -815,7 +815,7 @@ class ProxmoxClient:
|
||||
@cached('nost', consts.CACHE_INFO_DURATION, key_helper=caching_key_helper)
|
||||
def get_nodes_stats(self, **kwargs: typing.Any) -> list[types.NodeStats]:
|
||||
# vm | storage | node | sdn are valid types for cluster/resources
|
||||
return [types.NodeStats.from_dict(nodeStat) for nodeStat in self.get_cluster_resources('node')]
|
||||
return [types.NodeStats.from_dict(node_stat) for node_stat in self.get_cluster_resources('node')]
|
||||
|
||||
@cached('pools', consts.CACHE_DURATION // 6, key_helper=caching_key_helper)
|
||||
def list_pools(self, **kwargs: typing.Any) -> list[types.PoolInfo]:
|
||||
|
@ -116,7 +116,7 @@ class SampleUserServiceOne(services.UserService):
|
||||
name: str = typing.cast(str, self.storage.read_from_db('name'))
|
||||
if not name:
|
||||
name = self.name_generator().get(
|
||||
self.service().get_basename() + '-' + self.service().getColour(), 3
|
||||
self.service().get_basename() + '-' + self.service().get_colour(), 3
|
||||
)
|
||||
# Store value for persistence
|
||||
self.storage.save_to_db('name', name)
|
||||
@ -269,12 +269,12 @@ class SampleUserServiceOne(services.UserService):
|
||||
"""
|
||||
import random
|
||||
|
||||
countStr: typing.Optional[str] = typing.cast(
|
||||
counter_str: typing.Optional[str] = typing.cast(
|
||||
str, self.storage.read_from_db('count')
|
||||
)
|
||||
count: int = 0
|
||||
if countStr:
|
||||
count = int(countStr) + 1
|
||||
if counter_str:
|
||||
count = int(counter_str) + 1
|
||||
# Count is always a valid value, because this method will never get
|
||||
# called before deployForUser, deployForCache, destroy or cancel.
|
||||
# In our sample, we only use check_state in case of deployForUser,
|
||||
|
@ -93,7 +93,7 @@ class Provider(services.ServiceProvider):
|
||||
|
||||
# : Remote host. Here core will translate label and tooltip, remember to
|
||||
# : mark them as _ using gettext_noop.
|
||||
remoteHost = gui.TextField(
|
||||
remote_host = gui.TextField(
|
||||
order=1,
|
||||
length=64,
|
||||
label=_('Remote host'),
|
||||
@ -102,7 +102,7 @@ class Provider(services.ServiceProvider):
|
||||
)
|
||||
|
||||
# simple password field
|
||||
passwdField = gui.PasswordField(
|
||||
passwd_field = gui.PasswordField(
|
||||
order=2,
|
||||
length=32,
|
||||
label=_('Password'),
|
||||
@ -111,7 +111,7 @@ class Provider(services.ServiceProvider):
|
||||
)
|
||||
|
||||
# : Name of your pet (sample, not really needed :-) )
|
||||
petName = gui.TextField(
|
||||
pet_name = gui.TextField(
|
||||
order=3,
|
||||
length=32,
|
||||
label=_('Your pet\'s name'),
|
||||
@ -122,7 +122,7 @@ class Provider(services.ServiceProvider):
|
||||
# : Age of Methuselah (matusalén in spanish)
|
||||
# : in Spain there is a well-known to say that something is very old,
|
||||
# : "Tiene mas años que matusalén"(is older than Methuselah)
|
||||
methAge = gui.NumericField(
|
||||
meth_age = gui.NumericField(
|
||||
order=4,
|
||||
length=4, # That is, max allowed value is 9999
|
||||
label=_('Age of Methuselah'),
|
||||
@ -132,7 +132,7 @@ class Provider(services.ServiceProvider):
|
||||
)
|
||||
|
||||
# : Is Methuselah istill alive?
|
||||
methAlive = gui.CheckBoxField(
|
||||
meth_alive = gui.CheckBoxField(
|
||||
order=5,
|
||||
label=_('Is Methuselah still alive?'),
|
||||
tooltip=_('If you fail, this will not get saved :-)'),
|
||||
@ -140,7 +140,7 @@ class Provider(services.ServiceProvider):
|
||||
)
|
||||
|
||||
# : Is Methuselah istill alive?
|
||||
methAlive2 = gui.CheckBoxField(
|
||||
meth_alive2 = gui.CheckBoxField(
|
||||
order=5,
|
||||
label=_('Is Methuselah still alive BBBB?'),
|
||||
tooltip=_('If you fail, this will not get saved BBBB'),
|
||||
@ -148,14 +148,14 @@ class Provider(services.ServiceProvider):
|
||||
)
|
||||
|
||||
# : Is Methuselah istill alive?
|
||||
methAlive3 = gui.CheckBoxField(
|
||||
meth_alive3 = gui.CheckBoxField(
|
||||
order=5,
|
||||
label=_('Is Methuselah still alive CCCC?'),
|
||||
tooltip=_('If you fail, this will not get saved CCCC'),
|
||||
default=True, # : By default, at new item, check this
|
||||
)
|
||||
|
||||
methText = gui.TextField(
|
||||
meth_text = gui.TextField(
|
||||
order=6,
|
||||
length=512,
|
||||
lines=5,
|
||||
@ -179,7 +179,7 @@ class Provider(services.ServiceProvider):
|
||||
# If you say meth is alive, you are wrong!!! (i guess..)
|
||||
# values are only passed from administration client. Internals
|
||||
# instantiations are always empty.
|
||||
if values and self.methAlive.as_bool():
|
||||
if values and self.meth_alive.as_bool():
|
||||
raise exceptions.ui.ValidationError(_('Methuselah is not alive!!! :-)'))
|
||||
|
||||
# Marshal and unmarshal are defaults ones, also enought
|
||||
@ -214,8 +214,8 @@ class Provider(services.ServiceProvider):
|
||||
instance = Provider(env, data)
|
||||
logger.debug(
|
||||
'Methuselah has %s years and is %s :-)',
|
||||
instance.methAge.value,
|
||||
instance.methAlive.value,
|
||||
instance.meth_age.value,
|
||||
instance.meth_alive.value,
|
||||
)
|
||||
except exceptions.ui.ValidationError as e:
|
||||
# If we say that meth is alive, instantiation will
|
||||
@ -236,10 +236,10 @@ class Provider(services.ServiceProvider):
|
||||
Sample method, in fact in this we just return
|
||||
the value of host field, that is an string
|
||||
"""
|
||||
return self.remoteHost.value
|
||||
return self.remote_host.value
|
||||
|
||||
def methYears(self) -> int:
|
||||
def meth_years(self) -> int:
|
||||
"""
|
||||
Another sample return, it will in fact return the Methuselah years
|
||||
"""
|
||||
return self.methAge.as_int()
|
||||
return self.meth_age.as_int()
|
||||
|
@ -138,7 +138,7 @@ class ServiceOne(services.Service):
|
||||
default='1234', # : Default password are nonsense?? :-)
|
||||
)
|
||||
|
||||
baseName = gui.TextField(
|
||||
basename = gui.TextField(
|
||||
order=3,
|
||||
label=_('Services names'),
|
||||
tooltip=_('Base name for this user services'),
|
||||
@ -173,7 +173,7 @@ class ServiceOne(services.Service):
|
||||
# From now onwards, we implement our own methods, that will be used by,
|
||||
# for example, services derived from this provider
|
||||
|
||||
def getColour(self) -> str:
|
||||
def get_colour(self) -> str:
|
||||
"""
|
||||
Simply returns colour, for deployed user services.
|
||||
|
||||
@ -181,14 +181,14 @@ class ServiceOne(services.Service):
|
||||
"""
|
||||
return self.colour.value
|
||||
|
||||
def getPassw(self) -> str:
|
||||
def get_passwd(self) -> str:
|
||||
"""
|
||||
Simply returns passwd, for deloyed user services
|
||||
"""
|
||||
return self.passw.value
|
||||
|
||||
def get_basename(self) -> str:
|
||||
return self.baseName.value
|
||||
return self.basename.value
|
||||
|
||||
|
||||
class ServiceTwo(services.Service):
|
||||
|
@ -58,7 +58,7 @@ class XenFailure(XenAPI.Failure, XenFault):
|
||||
details = [] if details is None else details
|
||||
super(XenFailure, self).__init__(details)
|
||||
|
||||
def isHandleInvalid(self) -> bool:
|
||||
def is_valid_handle(self) -> bool:
|
||||
return typing.cast(typing.Any, self.details[0]) == XenFailure.ex_handle_invalid
|
||||
|
||||
def needs_xen_tools(self) -> bool:
|
||||
|
@ -361,10 +361,10 @@ class HTML5RDPTransport(transports.Transport):
|
||||
domain = ''
|
||||
username = proc[0]
|
||||
|
||||
azureAd = False
|
||||
for_azure = False
|
||||
if self.forced_domain.value != '':
|
||||
if self.forced_domain.value.lower() == 'azuread':
|
||||
azureAd = True
|
||||
for_azure = True
|
||||
else:
|
||||
domain = self.forced_domain.value
|
||||
|
||||
@ -379,7 +379,7 @@ class HTML5RDPTransport(transports.Transport):
|
||||
domain = ''
|
||||
|
||||
# If AzureAD, include it on username
|
||||
if azureAd:
|
||||
if for_azure:
|
||||
username = 'AzureAD\\' + username
|
||||
|
||||
# Fix username/password acording to os manager
|
||||
@ -403,15 +403,15 @@ class HTML5RDPTransport(transports.Transport):
|
||||
password: str,
|
||||
request: 'ExtendedHttpRequestWithUser', # pylint: disable=unused-argument
|
||||
) -> str:
|
||||
credsInfo = self.get_connection_info(userservice, user, password)
|
||||
creds_info = self.get_connection_info(userservice, user, password)
|
||||
username, password, domain = (
|
||||
credsInfo.username,
|
||||
credsInfo.password,
|
||||
credsInfo.domain,
|
||||
creds_info.username,
|
||||
creds_info.password,
|
||||
creds_info.domain,
|
||||
)
|
||||
|
||||
scrambler = CryptoManager().random_string(32)
|
||||
passwordCrypted = CryptoManager().symmetric_encrypt(password, scrambler)
|
||||
crypted_password = CryptoManager().symmetric_encrypt(password, scrambler)
|
||||
|
||||
def as_txt(txt: typing.Any) -> str:
|
||||
return 'true' if txt else 'false'
|
||||
@ -422,7 +422,7 @@ class HTML5RDPTransport(transports.Transport):
|
||||
'hostname': ip,
|
||||
'port': self.rdp_port.as_int(),
|
||||
'username': username,
|
||||
'password': passwordCrypted,
|
||||
'password': crypted_password,
|
||||
'resize-method': 'display-update',
|
||||
'ignore-cert': 'true',
|
||||
'security': self.security.value,
|
||||
|
@ -41,7 +41,7 @@ from uds.core import types
|
||||
|
||||
|
||||
class RDPFile:
|
||||
fullScreen: bool = False
|
||||
fullscreen: bool = False
|
||||
width: str = '800'
|
||||
height: str = '600'
|
||||
bpp: str = '32'
|
||||
@ -84,7 +84,7 @@ class RDPFile:
|
||||
self.width = str(width)
|
||||
self.height = str(height)
|
||||
self.bpp = str(bpp)
|
||||
self.fullScreen = fullscreen
|
||||
self.fullscreen = fullscreen
|
||||
self.target = target
|
||||
|
||||
def get(self) -> str:
|
||||
@ -156,7 +156,7 @@ class RDPFile:
|
||||
if self.multimon:
|
||||
params.append('/multimon')
|
||||
|
||||
if self.fullScreen:
|
||||
if self.fullscreen:
|
||||
if self.target != types.os.KnownOS.MAC_OS:
|
||||
params.append('/f')
|
||||
else: # On mac, will fix this later...
|
||||
@ -173,19 +173,19 @@ class RDPFile:
|
||||
|
||||
# RDP Security is A MUST if no username nor password is provided
|
||||
# NLA requires USERNAME&PASSWORD previously
|
||||
forceRDPSecurity = False
|
||||
force_rdp_security = False
|
||||
if self.username != '':
|
||||
params.append('/u:{}'.format(self.username))
|
||||
else:
|
||||
forceRDPSecurity = True
|
||||
force_rdp_security = True
|
||||
if self.password:
|
||||
params.append('/p:{}'.format(self.password))
|
||||
else:
|
||||
forceRDPSecurity = True
|
||||
force_rdp_security = True
|
||||
if self.domain != '':
|
||||
params.append('/d:{}'.format(self.domain))
|
||||
|
||||
if forceRDPSecurity:
|
||||
if force_rdp_security:
|
||||
params.append('/sec:rdp')
|
||||
|
||||
if self.custom_parameters and self.custom_parameters.strip() != '':
|
||||
@ -202,34 +202,34 @@ class RDPFile:
|
||||
@property
|
||||
def as_mstsc_file(self) -> str: # pylint: disable=too-many-statements
|
||||
password = '{password}' # nosec: placeholder
|
||||
screenMode = '2' if self.fullScreen else '1'
|
||||
audioMode = '0' if self.redir_audio else '2'
|
||||
screen_mode = '2' if self.fullscreen else '1'
|
||||
audio_mode = '0' if self.redir_audio else '2'
|
||||
serials = '1' if self.redir_serials else '0'
|
||||
scards = '1' if self.redir_smartcards else '0'
|
||||
printers = '1' if self.redir_printers else '0'
|
||||
compression = '1' if self.compression else '0'
|
||||
connectionBar = '1' if self.pin_bar else '0'
|
||||
disableWallpaper = '0' if self.show_wallpaper else '1'
|
||||
useMultimon = '1' if self.multimon else '0'
|
||||
enableClipboard = '1' if self.enable_clipboard else '0'
|
||||
connection_bar = '1' if self.pin_bar else '0'
|
||||
disable_wallpaper = '0' if self.show_wallpaper else '1'
|
||||
use_multimon = '1' if self.multimon else '0'
|
||||
enable_clipboard = '1' if self.enable_clipboard else '0'
|
||||
|
||||
res = ''
|
||||
res += 'screen mode id:i:' + screenMode + '\n'
|
||||
res += 'screen mode id:i:' + screen_mode + '\n'
|
||||
if self.width[0] != '-' and self.height[0] != '-':
|
||||
res += 'desktopwidth:i:' + self.width + '\n'
|
||||
res += 'desktopheight:i:' + self.height + '\n'
|
||||
res += 'session bpp:i:' + self.bpp + '\n'
|
||||
res += 'use multimon:i:' + useMultimon + '\n'
|
||||
res += 'use multimon:i:' + use_multimon + '\n'
|
||||
res += 'auto connect:i:1' + '\n'
|
||||
res += 'full address:s:' + self.address + '\n'
|
||||
res += 'compression:i:' + compression + '\n'
|
||||
res += 'keyboardhook:i:2' + '\n'
|
||||
res += 'audiomode:i:' + audioMode + '\n'
|
||||
res += 'audiomode:i:' + audio_mode + '\n'
|
||||
res += 'redirectprinters:i:' + printers + '\n'
|
||||
res += 'redirectcomports:i:' + serials + '\n'
|
||||
res += 'redirectsmartcards:i:' + scards + '\n'
|
||||
res += 'redirectclipboard:i:' + enableClipboard + '\n'
|
||||
res += 'displayconnectionbar:i:' + connectionBar + '\n'
|
||||
res += 'redirectclipboard:i:' + enable_clipboard + '\n'
|
||||
res += 'displayconnectionbar:i:' + connection_bar + '\n'
|
||||
if self.username:
|
||||
res += 'username:s:' + self.username + '\n'
|
||||
res += 'domain:s:' + self.domain + '\n'
|
||||
@ -238,10 +238,10 @@ class RDPFile:
|
||||
|
||||
res += 'alternate shell:s:' + '\n'
|
||||
res += 'shell working directory:s:' + '\n'
|
||||
res += 'disable wallpaper:i:' + disableWallpaper + '\n'
|
||||
res += 'disable wallpaper:i:' + disable_wallpaper + '\n'
|
||||
res += 'disable full window drag:i:1' + '\n'
|
||||
res += 'disable menu anims:i:' + disableWallpaper + '\n'
|
||||
res += 'disable themes:i:' + disableWallpaper + '\n'
|
||||
res += 'disable menu anims:i:' + disable_wallpaper + '\n'
|
||||
res += 'disable themes:i:' + disable_wallpaper + '\n'
|
||||
res += 'bitmapcachepersistenable:i:1' + '\n'
|
||||
res += 'authentication level:i:0' + '\n'
|
||||
res += 'prompt for credentials:i:0' + '\n'
|
||||
@ -265,7 +265,7 @@ class RDPFile:
|
||||
if self.redir_webcam:
|
||||
res += 'camerastoredirect:s:*\n'
|
||||
|
||||
enforcedSharesStr = (
|
||||
enforced_shares_str = (
|
||||
';'.join(self.enforced_shares.replace(' ', '').upper().split(',')) + ';'
|
||||
if self.enforced_shares
|
||||
else ''
|
||||
@ -273,9 +273,9 @@ class RDPFile:
|
||||
|
||||
if self.redir_drives != 'false':
|
||||
if self.redir_drives == 'true':
|
||||
res += 'drivestoredirect:s:{}\n'.format(enforcedSharesStr or '*')
|
||||
res += 'drivestoredirect:s:{}\n'.format(enforced_shares_str or '*')
|
||||
else: # Dynamic
|
||||
res += 'drivestoredirect:s:{}DynamicDrives\n'.format(enforcedSharesStr)
|
||||
res += 'drivestoredirect:s:{}DynamicDrives\n'.format(enforced_shares_str)
|
||||
res += 'devicestoredirect:s:*\n'
|
||||
|
||||
if self.redir_usb != 'false':
|
||||
@ -306,24 +306,24 @@ class RDPFile:
|
||||
@property
|
||||
def as_rdp_url(self) -> str:
|
||||
# Some parameters
|
||||
screenMode = '2' if self.fullScreen else '1'
|
||||
audioMode = '0' if self.redir_audio else '2'
|
||||
useMultimon = '1' if self.multimon else '0'
|
||||
disableWallpaper = '0' if self.show_wallpaper else '1'
|
||||
screen_mode = '2' if self.fullscreen else '1'
|
||||
audio_mode = '0' if self.redir_audio else '2'
|
||||
use_multimon = '1' if self.multimon else '0'
|
||||
disable_wallpaper = '0' if self.show_wallpaper else '1'
|
||||
printers = '1' if self.redir_printers else '0'
|
||||
credsspsupport = '1' if self.enable_credssp_support else '0'
|
||||
|
||||
parameters: list[tuple[str, str]] = [
|
||||
('full address', f's:{self.address}'),
|
||||
('audiomode', f'i:{audioMode}'),
|
||||
('screen mode id', f'i:{screenMode}'),
|
||||
('use multimon', f'i:{useMultimon}'),
|
||||
('audiomode', f'i:{audio_mode}'),
|
||||
('screen mode id', f'i:{screen_mode}'),
|
||||
('use multimon', f'i:{use_multimon}'),
|
||||
('desktopwidth', f'i:{self.width}'),
|
||||
('desktopheight', f':{self.height}'),
|
||||
('session bpp', f'i:{self.bpp}'),
|
||||
('disable menu anims', f'i:{disableWallpaper}'),
|
||||
('disable themes', f'i:{disableWallpaper}'),
|
||||
('disable wallpaper', f'i:{disableWallpaper}'),
|
||||
('disable menu anims', f'i:{disable_wallpaper}'),
|
||||
('disable themes', f'i:{disable_wallpaper}'),
|
||||
('disable wallpaper', f'i:{disable_wallpaper}'),
|
||||
('redirectprinters', f'i:{printers}'),
|
||||
('disable full window drag', 'i:1'),
|
||||
('authentication level', f'i:0'),
|
||||
|
@ -18,7 +18,7 @@ if 'sp' not in globals():
|
||||
globals()['sp'] = sp # type: ignore # pylint: disable=undefined-variable
|
||||
|
||||
|
||||
def fixResolution() -> typing.List[str]:
|
||||
def fix_resolution() -> typing.List[str]:
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
@ -112,7 +112,7 @@ if executable in msrdc_list:
|
||||
elif executable == xfreerdp:
|
||||
# Fix resolution...
|
||||
try:
|
||||
xfparms = fixResolution()
|
||||
xfparms = fix_resolution()
|
||||
except Exception as e:
|
||||
xfparms = list(map(lambda x: x.replace('#WIDTH#', '1400').replace('#HEIGHT#', '800'), sp['as_new_xfreerdp_params'])) # type: ignore
|
||||
|
||||
|
@ -25,7 +25,7 @@ if 'sp' not in globals():
|
||||
globals()['sp'] = sp # type: ignore # pylint: disable=undefined-variable
|
||||
|
||||
|
||||
def fixResolution() -> typing.List[str]:
|
||||
def fix_resolution() -> typing.List[str]:
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
@ -129,7 +129,7 @@ if executable in msrdc_list:
|
||||
elif executable == xfreerdp:
|
||||
# Fix resolution...
|
||||
try:
|
||||
xfparms = fixResolution()
|
||||
xfparms = fix_resolution()
|
||||
except Exception as e:
|
||||
xfparms = list(map(lambda x: x.replace('#WIDTH#', '1400').replace('#HEIGHT#', '800'), sp['as_new_xfreerdp_params'])) # type: ignore
|
||||
|
||||
|
@ -53,7 +53,7 @@ secure-attention=ctrl+alt+end
|
||||
|
||||
|
||||
class RemoteViewerFile:
|
||||
connectionType: str = 'spice'
|
||||
connection_type: str = 'spice'
|
||||
host: str = ''
|
||||
port: typing.Optional[str] = None
|
||||
tls_port: typing.Optional[str] = None
|
||||
@ -112,7 +112,7 @@ class RemoteViewerFile:
|
||||
# If proxy is set
|
||||
|
||||
return TEMPLATE.format(
|
||||
type=self.connectionType,
|
||||
type=self.connection_type,
|
||||
host=self.host,
|
||||
port=self.port,
|
||||
tls_port=self.tls_port,
|
||||
|
@ -147,8 +147,8 @@ class BaseSpiceTransport(transports.Transport):
|
||||
"""
|
||||
ready = self.cache.get(ip)
|
||||
if ready is None:
|
||||
userServiceInstance = userservice.get_instance()
|
||||
con = userServiceInstance.get_console_connection()
|
||||
userservice_instance = userservice.get_instance()
|
||||
con = userservice_instance.get_console_connection()
|
||||
|
||||
logger.debug('Connection data: %s', con)
|
||||
|
||||
@ -193,7 +193,7 @@ class BaseSpiceTransport(transports.Transport):
|
||||
|
||||
def process_user_password(
|
||||
self,
|
||||
userService: typing.Union['models.UserService', 'models.ServicePool'],
|
||||
userservice: typing.Union['models.UserService', 'models.ServicePool'],
|
||||
user: 'models.User',
|
||||
password: str,
|
||||
) -> types.connections.ConnectionData:
|
||||
@ -209,7 +209,7 @@ class BaseSpiceTransport(transports.Transport):
|
||||
username, password = '', ''
|
||||
|
||||
# Fix username/password acording to os manager
|
||||
username, password = userService.process_user_password(username, password)
|
||||
username, password = userservice.process_user_password(username, password)
|
||||
|
||||
return types.connections.ConnectionData(
|
||||
protocol=self.protocol,
|
||||
|
@ -98,8 +98,8 @@ class TSPICETransport(BaseSpiceTransport):
|
||||
request: 'ExtendedHttpRequestWithUser',
|
||||
) -> types.transports.TransportScript:
|
||||
try:
|
||||
userServiceInstance = userservice.get_instance()
|
||||
con = userServiceInstance.get_console_connection()
|
||||
userservice_instance = userservice.get_instance()
|
||||
con = userservice_instance.get_console_connection()
|
||||
except Exception:
|
||||
logger.exception('Error getting console connection data')
|
||||
raise
|
||||
@ -109,8 +109,8 @@ class TSPICETransport(BaseSpiceTransport):
|
||||
_('No console connection data received'),
|
||||
)
|
||||
|
||||
tunnelFields = fields.get_tunnel_from_field(self.tunnel)
|
||||
tunHost, tunPort = tunnelFields.host, tunnelFields.port
|
||||
tunnel_field = fields.get_tunnel_from_field(self.tunnel)
|
||||
tunnel_host, tunnel_port = tunnel_field.host, tunnel_field.port
|
||||
|
||||
# We MAY need two tickets, one for 'insecure' port an one for secure
|
||||
ticket = ''
|
||||
@ -161,8 +161,8 @@ class TSPICETransport(BaseSpiceTransport):
|
||||
sp = {
|
||||
'as_file': r.as_file,
|
||||
'as_file_ns': r.as_file_ns,
|
||||
'tunHost': tunHost,
|
||||
'tunPort': tunPort,
|
||||
'tunHost': tunnel_host,
|
||||
'tunPort': tunnel_port,
|
||||
'tunWait': self.tunnel_wait.as_int(),
|
||||
'tunChk': self.verify_certificate.as_bool(),
|
||||
'ticket': ticket,
|
||||
|
@ -14,7 +14,7 @@ def log_error(err, username: str = None):
|
||||
print(err)
|
||||
|
||||
|
||||
def update_authorized_keys(username, pubKey):
|
||||
def update_authorized_keys(username, public_key):
|
||||
# No X2Go server on windows
|
||||
if 'win' in sys.platform:
|
||||
log_error('Not a linux platform')
|
||||
@ -58,7 +58,7 @@ def update_authorized_keys(username, pubKey):
|
||||
)
|
||||
)
|
||||
# Append pubkey
|
||||
f.write('ssh-rsa {} UDS@X2GOCLIENT\n'.format(pubKey))
|
||||
f.write('ssh-rsa {} UDS@X2GOCLIENT\n'.format(public_key))
|
||||
|
||||
# Ensure access is correct
|
||||
os.chown(authorized_keys, uid, -1)
|
||||
|
@ -92,13 +92,13 @@ class X2GOTransport(BaseX2GOTransport):
|
||||
desktop = "/usr/bin/udsvapp " + self.custom_cmd.value
|
||||
rootless = True
|
||||
|
||||
xf = x2go_file.getTemplate(
|
||||
xf = x2go_file.get_template(
|
||||
speed=self.speed.value,
|
||||
pack=self.pack.value,
|
||||
quality=self.quality.value,
|
||||
sound=self.sound.as_bool(),
|
||||
soundSystem=self.sound.value,
|
||||
windowManager=desktop,
|
||||
sound_system=self.sound.value,
|
||||
window_manager=desktop,
|
||||
exports=self.exports.as_bool(),
|
||||
rootless=rootless,
|
||||
width=width,
|
||||
|
@ -276,11 +276,11 @@ class BaseX2GOTransport(transports.Transport):
|
||||
"""
|
||||
return security.generate_ssh_keypair_for_ssh(SSH_KEY_LENGTH)
|
||||
|
||||
def get_authorization_script(self, user: str, pubKey: str) -> str:
|
||||
def get_authorization_script(self, user: str, public_key: str) -> str:
|
||||
with open(os.path.join(os.path.dirname(__file__), 'scripts/authorize.py'), encoding='utf8') as f:
|
||||
data = f.read()
|
||||
|
||||
return data.replace('__USER__', user).replace('__KEY__', pubKey)
|
||||
return data.replace('__USER__', user).replace('__KEY__', public_key)
|
||||
|
||||
def get_and_push_key(self, username: str, userservice: 'models.UserService') -> tuple[str, str]:
|
||||
private_ssh_key, public_ssh_key = self.gen_keypair_for_ssh()
|
||||
|
@ -97,20 +97,20 @@ sshproxykrblogin=false
|
||||
'''
|
||||
|
||||
|
||||
def getTemplate(
|
||||
def get_template(
|
||||
speed: typing.Any,
|
||||
pack: typing.Any,
|
||||
quality: typing.Any,
|
||||
sound: typing.Any,
|
||||
soundSystem: typing.Any,
|
||||
windowManager: typing.Any,
|
||||
sound_system: typing.Any,
|
||||
window_manager: typing.Any,
|
||||
exports: typing.Any,
|
||||
rootless: bool,
|
||||
width: typing.Any,
|
||||
height: typing.Any,
|
||||
user: typing.Any,
|
||||
) -> str:
|
||||
trueFalse: collections.abc.Callable[[bool], str] = lambda x: 'true' if x else 'false'
|
||||
true_false: collections.abc.Callable[[bool], str] = lambda x: 'true' if x else 'false'
|
||||
export = 'export="{export}"' if exports else ''
|
||||
if width == -1 or height == -1:
|
||||
width = 800
|
||||
@ -122,9 +122,9 @@ def getTemplate(
|
||||
speed=speed,
|
||||
pack=pack,
|
||||
quality=quality,
|
||||
sound=trueFalse(sound),
|
||||
soundSystem=soundSystem,
|
||||
windowManager=windowManager,
|
||||
sound=true_false(sound),
|
||||
soundSystem=sound_system,
|
||||
windowManager=window_manager,
|
||||
export=export,
|
||||
rootless=rootless and 'true' or 'false',
|
||||
width=width,
|
||||
|
@ -115,13 +115,13 @@ class TX2GOTransport(BaseX2GOTransport):
|
||||
desktop = "/usr/bin/udsvapp " + self.custom_cmd.value
|
||||
rootless = True
|
||||
|
||||
xf = x2go_file.getTemplate(
|
||||
xf = x2go_file.get_template(
|
||||
speed=self.speed.value,
|
||||
pack=self.pack.value,
|
||||
quality=self.quality.value,
|
||||
sound=self.sound.as_bool(),
|
||||
soundSystem=self.sound.value,
|
||||
windowManager=desktop,
|
||||
sound_system=self.sound.value,
|
||||
window_manager=desktop,
|
||||
exports=self.exports.as_bool(),
|
||||
rootless=rootless,
|
||||
width=width,
|
||||
@ -137,12 +137,12 @@ class TX2GOTransport(BaseX2GOTransport):
|
||||
key=key,
|
||||
)
|
||||
|
||||
tunnelFields = fields.get_tunnel_from_field(self.tunnel)
|
||||
tunHost, tunPort = tunnelFields.host, tunnelFields.port
|
||||
tunnel_field = fields.get_tunnel_from_field(self.tunnel)
|
||||
tunnel_host, tunnel_port = tunnel_field.host, tunnel_field.port
|
||||
|
||||
sp = {
|
||||
'tunHost': tunHost,
|
||||
'tunPort': tunPort,
|
||||
'tunHost': tunnel_host,
|
||||
'tunPort': tunnel_port,
|
||||
'tunWait': self.tunnel_wait.as_int(),
|
||||
'tunChk': self.verify_certificate.as_bool(),
|
||||
'ticket': ticket,
|
||||
|
@ -125,8 +125,8 @@ def auth_callback_stage2(request: 'ExtendedHttpRequestWithUser', ticket_id: str)
|
||||
# If MFA is provided, we need to redirect to MFA page
|
||||
request.authorized = True
|
||||
if authenticator.get_type().provides_mfa() and authenticator.mfa:
|
||||
authInstance = authenticator.get_instance()
|
||||
if authInstance.mfa_identifier(result.user.name):
|
||||
auth_instance = authenticator.get_instance()
|
||||
if auth_instance.mfa_identifier(result.user.name):
|
||||
request.authorized = False # We can ask for MFA so first disauthorize user
|
||||
response = HttpResponseRedirect(reverse('page.mfa'))
|
||||
|
||||
@ -160,19 +160,19 @@ def auth_info(request: 'HttpRequest', authenticator_name: str) -> HttpResponse:
|
||||
)
|
||||
if not authenticator:
|
||||
raise Exception('Authenticator not found')
|
||||
authInstance = authenticator.get_instance()
|
||||
if typing.cast(typing.Any, authInstance.get_info) == auths.Authenticator.get_info:
|
||||
auth_instance = authenticator.get_instance()
|
||||
if typing.cast(typing.Any, auth_instance.get_info) == auths.Authenticator.get_info:
|
||||
raise Exception() # This authenticator do not provides info
|
||||
|
||||
info = authInstance.get_info(request.GET)
|
||||
info = auth_instance.get_info(request.GET)
|
||||
|
||||
if info is None:
|
||||
raise Exception() # This auth do not provides info
|
||||
|
||||
infoContent = info[0]
|
||||
infoType = info[1] or 'text/html'
|
||||
info_content = info[0]
|
||||
info_type = info[1] or 'text/html'
|
||||
|
||||
return HttpResponse(infoContent, content_type=infoType)
|
||||
return HttpResponse(info_content, content_type=info_type)
|
||||
except Exception:
|
||||
logger.exception('got')
|
||||
return HttpResponse(_('Authenticator does not provide information'))
|
||||
@ -355,7 +355,7 @@ def logout(request: types.requests.ExtendedHttpRequestWithUser) -> HttpResponse:
|
||||
auth.log_logout(request)
|
||||
request.session['restricted'] = False # Remove restricted
|
||||
request.authorized = False
|
||||
logoutResponse = request.user.logout(request)
|
||||
url = logoutResponse.url if logoutResponse.success == types.auth.AuthenticationState.REDIRECT else None
|
||||
logout_response = request.user.logout(request)
|
||||
url = logout_response.url if logout_response.success == types.auth.AuthenticationState.REDIRECT else None
|
||||
|
||||
return auth.web_logout(request, url or request.session.get('logouturl', None))
|
||||
|
@ -70,15 +70,15 @@ def transport_icon(request: 'ExtendedHttpRequest', transport_id: str) -> HttpRes
|
||||
|
||||
|
||||
@cache_page(3600, key_prefix='img', cache='memory')
|
||||
def service_image(request: 'ExtendedHttpRequest', idImage: str) -> HttpResponse:
|
||||
def service_image(request: 'ExtendedHttpRequest', image_id: str) -> HttpResponse:
|
||||
try:
|
||||
icon = Image.objects.get(uuid=process_uuid(idImage))
|
||||
icon = Image.objects.get(uuid=process_uuid(image_id))
|
||||
return icon.image_as_response()
|
||||
except Image.DoesNotExist:
|
||||
pass # Tries to get image from transport
|
||||
|
||||
try:
|
||||
transport: Transport = Transport.objects.get(uuid=process_uuid(idImage))
|
||||
transport: Transport = Transport.objects.get(uuid=process_uuid(image_id))
|
||||
return HttpResponse(transport.get_instance().icon(), content_type='image/png')
|
||||
except Exception:
|
||||
return HttpResponse(DEFAULT_IMAGE, content_type='image/png')
|
||||
|
@ -258,11 +258,11 @@ def update_transport_ticket(
|
||||
|
||||
if username:
|
||||
try:
|
||||
userService = models.UserService.objects.get(
|
||||
userservice = models.UserService.objects.get(
|
||||
uuid=data['ticket-info'].get('userService', None)
|
||||
)
|
||||
UserServiceManager.manager().notify_preconnect(
|
||||
userService,
|
||||
userservice,
|
||||
types.connections.ConnectionData(
|
||||
username=username,
|
||||
protocol=data.get('protocol', ''),
|
||||
|
@ -86,12 +86,12 @@ class HangedCleaner(Job):
|
||||
)
|
||||
|
||||
# Type
|
||||
servicePool: ServicePool
|
||||
servicepool: ServicePool
|
||||
|
||||
for servicePool in servicepools_with_hanged:
|
||||
logger.debug('Searching for hanged services for %s', servicePool)
|
||||
for servicepool in servicepools_with_hanged:
|
||||
logger.debug('Searching for hanged services for %s', servicepool)
|
||||
us: UserService
|
||||
for us in servicePool.userServices.filter(flt):
|
||||
for us in servicepool.userServices.filter(flt):
|
||||
if us.destroy_after: # It's waiting for removal, skip this very specific case
|
||||
continue
|
||||
logger.debug('Found hanged service %s', us)
|
||||
@ -105,7 +105,7 @@ class HangedCleaner(Job):
|
||||
types.log.LogSource.INTERNAL,
|
||||
)
|
||||
log.log(
|
||||
servicePool,
|
||||
servicepool,
|
||||
types.log.LogLevel.ERROR,
|
||||
f'User service {us.friendly_name} hanged on removal. Restarting removal.',
|
||||
)
|
||||
@ -118,7 +118,7 @@ class HangedCleaner(Job):
|
||||
types.log.LogSource.INTERNAL,
|
||||
)
|
||||
log.log(
|
||||
servicePool,
|
||||
servicepool,
|
||||
types.log.LogLevel.ERROR,
|
||||
f'Removing user service {us.friendly_name} because it seems to be hanged'
|
||||
)
|
||||
|
@ -58,13 +58,13 @@ class LogMaintenance(Job):
|
||||
):
|
||||
# First, ensure we do not have more than requested logs, and we can put one more log item
|
||||
try:
|
||||
ownerType = log.LogObjectType(owner_type)
|
||||
owner_type = log.LogObjectType(owner_type)
|
||||
except ValueError:
|
||||
# If we do not know the owner type, we will delete all logs for this owner
|
||||
models.Log.objects.filter(owner_id=owner_id, owner_type=owner_type).delete()
|
||||
continue
|
||||
|
||||
max_elements = ownerType.get_max_elements()
|
||||
max_elements = owner_type.get_max_elements()
|
||||
if 0 < max_elements < count: # Negative max elements means "unlimited"
|
||||
# We will delete the oldest ones
|
||||
for record in models.Log.objects.filter(owner_id=owner_id, owner_type=owner_type).order_by(
|
||||
|
@ -45,22 +45,22 @@ class ScheduledAction(Job):
|
||||
friendly_name = 'Scheduled action runner'
|
||||
|
||||
def run(self) -> None:
|
||||
configuredAction: CalendarAction
|
||||
for configuredAction in CalendarAction.objects.filter(
|
||||
configured_action: CalendarAction
|
||||
for configured_action in CalendarAction.objects.filter(
|
||||
service_pool__service__provider__maintenance_mode=False, # Avoid maintenance
|
||||
service_pool__state=types.states.State.ACTIVE, # Avoid Non active pools
|
||||
next_execution__lt=sql_now(),
|
||||
).order_by('next_execution'):
|
||||
logger.info(
|
||||
'Executing calendar action %s.%s (%s)',
|
||||
configuredAction.service_pool.name,
|
||||
configuredAction.calendar.name,
|
||||
configuredAction.action,
|
||||
configured_action.service_pool.name,
|
||||
configured_action.calendar.name,
|
||||
configured_action.action,
|
||||
)
|
||||
try:
|
||||
configuredAction.execute()
|
||||
configured_action.execute()
|
||||
except Exception:
|
||||
logger.exception(
|
||||
'Got an exception executing calendar access action: %s',
|
||||
configuredAction,
|
||||
configured_action,
|
||||
)
|
||||
|
@ -83,12 +83,12 @@ class DeployedServiceRemover(Job):
|
||||
for pub in publishing:
|
||||
pub.cancel()
|
||||
# Now all publishments are canceling, let's try to cancel cache and assigned
|
||||
uServices: collections.abc.Iterable[UserService] = service_pool.userServices.filter(
|
||||
userservices: collections.abc.Iterable[UserService] = service_pool.userServices.filter(
|
||||
state=State.PREPARING
|
||||
)
|
||||
for userService in uServices:
|
||||
logger.debug('Canceling %s', userService)
|
||||
userService.cancel()
|
||||
for userservice in userservices:
|
||||
logger.debug('Canceling %s', userservice)
|
||||
userservice.cancel()
|
||||
# Nice start of removal, maybe we need to do some limitation later, but there should not be too much services nor publications cancelable at once
|
||||
service_pool.state = State.REMOVING
|
||||
service_pool.state_date = sql_now() # Now
|
||||
|
@ -212,7 +212,7 @@ class ServiceCacheUpdater(Job):
|
||||
if servicepool_stats.servicepool is None:
|
||||
return
|
||||
|
||||
cacheItems: list[UserService] = [
|
||||
cache_items: list[UserService] = [
|
||||
i
|
||||
for i in servicepool_stats.servicepool.cached_users_services()
|
||||
.filter(
|
||||
@ -225,7 +225,7 @@ class ServiceCacheUpdater(Job):
|
||||
if not i.destroy_after
|
||||
]
|
||||
|
||||
if not cacheItems:
|
||||
if not cache_items:
|
||||
logger.debug(
|
||||
'There is more services than max configured, but could not reduce cache L1 cause its already empty'
|
||||
)
|
||||
@ -233,7 +233,7 @@ class ServiceCacheUpdater(Job):
|
||||
|
||||
if servicepool_stats.l2_cache_count < servicepool_stats.servicepool.cache_l2_srvs:
|
||||
valid = None
|
||||
for n in cacheItems:
|
||||
for n in cache_items:
|
||||
if n.needs_osmanager():
|
||||
if State.from_str(n.state).is_usable() is False or State.from_str(n.os_state).is_usable():
|
||||
valid = n
|
||||
@ -246,7 +246,7 @@ class ServiceCacheUpdater(Job):
|
||||
valid.move_to_level(types.services.CacheLevel.L2)
|
||||
return
|
||||
|
||||
cache = cacheItems[0]
|
||||
cache = cache_items[0]
|
||||
cache.remove_or_cancel()
|
||||
|
||||
def reduce_l2_cache(
|
||||
@ -257,7 +257,7 @@ class ServiceCacheUpdater(Job):
|
||||
return
|
||||
logger.debug("Reducing L2 cache erasing a service in cache for %s", servicepool_stats.servicepool.name)
|
||||
if servicepool_stats.l2_cache_count > 0:
|
||||
cacheItems = (
|
||||
cache_items = (
|
||||
servicepool_stats.servicepool.cached_users_services()
|
||||
.filter(
|
||||
UserServiceManager.manager().get_cache_state_filter(
|
||||
@ -267,7 +267,7 @@ class ServiceCacheUpdater(Job):
|
||||
.order_by('creation_date')
|
||||
)
|
||||
# TODO: Look first for non finished cache items and cancel them?
|
||||
cache: UserService = cacheItems[0]
|
||||
cache: UserService = cache_items[0]
|
||||
cache.remove_or_cancel()
|
||||
|
||||
def run(self) -> None:
|
||||
|
@ -61,7 +61,7 @@ class StuckCleaner(Job):
|
||||
# Locate service pools with pending assigned service in use
|
||||
servicepools_with_stucks = (
|
||||
ServicePool.objects.annotate(
|
||||
stuckCount=Count(
|
||||
stuck_count=Count(
|
||||
'userServices',
|
||||
filter=Q(userServices__state_date__lt=since_state)
|
||||
& (
|
||||
@ -73,12 +73,12 @@ class StuckCleaner(Job):
|
||||
)
|
||||
)
|
||||
.filter(service__provider__maintenance_mode=False, state=types.states.State.ACTIVE)
|
||||
.exclude(stuckCount=0)
|
||||
.exclude(stuck_count=0)
|
||||
)
|
||||
|
||||
# Info states are removed on UserServiceCleaner and VALID_STATES are ok, or if "hanged", checked on "HangedCleaner"
|
||||
def _retrieve_stuck_user_services(servicePool: ServicePool) -> collections.abc.Iterable[UserService]:
|
||||
q = servicePool.userServices.filter(state_date__lt=since_state)
|
||||
def _retrieve_stuck_user_services(servicepool: ServicePool) -> collections.abc.Iterable[UserService]:
|
||||
q = servicepool.userServices.filter(state_date__lt=since_state)
|
||||
# Get all that are not in valid or info states, AND the ones that are "PREPARING" with
|
||||
# "destroy_after" property set (exists) (that means that are waiting to be destroyed after initializations)
|
||||
yield from q.exclude(state__in=types.states.State.INFO_STATES + types.states.State.VALID_STATES)
|
||||
|
@ -80,12 +80,12 @@ class UserServiceRemover(Job):
|
||||
manager = UserServiceManager.manager()
|
||||
|
||||
with transaction.atomic():
|
||||
removeFrom = sql_now() - timedelta(
|
||||
remove_since = sql_now() - timedelta(
|
||||
seconds=10
|
||||
) # We keep at least 10 seconds the machine before removing it, so we avoid connections errors
|
||||
candidates: collections.abc.Iterable[UserService] = UserService.objects.filter(
|
||||
state=State.REMOVABLE,
|
||||
state_date__lt=removeFrom,
|
||||
state_date__lt=remove_since,
|
||||
deployed_service__service__provider__maintenance_mode=False,
|
||||
).iterator(chunk_size=max_to_remove)
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user