1
0
mirror of https://github.com/dkmstr/openuds.git synced 2024-12-22 13:34:04 +03:00

Enough linting for today... :)

This commit is contained in:
Adolfo Gómez García 2023-04-15 21:42:57 +02:00
parent 8adc3ca40d
commit fa8e77c750
No known key found for this signature in database
GPG Key ID: DD1ABF20724CDA23
10 changed files with 53 additions and 44 deletions

View File

@ -29,6 +29,8 @@
"""
@author: Adolfo Gómez, dkmaster at dkmon dot com
"""
# pylint: disable=unused-import
from .unique_gid_generator import UniqueGIDGenerator
from .unique_mac_generator import UniqueMacGenerator
from .unique_name_generator import UniqueNameGenerator

View File

@ -35,31 +35,31 @@ from collections import defaultdict
import defusedxml.ElementTree as ET
if typing.TYPE_CHECKING:
from xml.etree.cElementTree import Element # nosec: Only type checking
from xml.etree.ElementTree import Element # nosec: Only type checking
def etree_to_dict(t: 'Element') -> typing.Mapping[str, typing.Any]:
d: typing.MutableMapping[str, typing.Any] = {}
if t.attrib:
d.update({t.tag: {}})
def etree_to_dict(tree: 'Element') -> typing.Mapping[str, typing.Any]:
dct: typing.MutableMapping[str, typing.Any] = {}
if tree.attrib:
dct.update({tree.tag: {}})
children = list(t)
children = list(tree)
if children:
dd = defaultdict(list)
for dc in map(etree_to_dict, children):
for k, v in dc.items():
dd[k].append(v)
d = {t.tag: {k: v[0] if len(v) == 1 else v for k, v in dd.items()}}
if t.attrib:
d[t.tag].update(('@' + k, v) for k, v in t.attrib.items())
if t.text:
text = t.text.strip()
if children or t.attrib:
dct = {tree.tag: {k: v[0] if len(v) == 1 else v for k, v in dd.items()}}
if tree.attrib:
dct[tree.tag].update(('@' + k, v) for k, v in tree.attrib.items())
if tree.text:
text = tree.text.strip()
if children or tree.attrib:
if text:
d[t.tag]['#text'] = text
dct[tree.tag]['#text'] = text
else:
d[t.tag] = text
return d
dct[tree.tag] = text
return dct
def parse(xml_string: str) -> typing.Mapping[str, typing.Any]:

View File

@ -43,8 +43,8 @@ def initialize() -> None:
This imports all packages that are descendant of this package, and, after that,
it register all subclases of service provider as
"""
from uds.core import jobs
from uds.core.managers import taskManager
from uds.core import jobs # pylint: disable=import-outside-toplevel
from uds.core.managers import taskManager # pylint: disable=import-outside-toplevel
def registerer(cls: typing.Type[jobs.Job]) -> None:
if cls.__module__.startswith('uds.core.workers'):

View File

@ -105,9 +105,7 @@ class HangedCleaner(Job):
log.doLog(
servicePool,
log.ERROR,
'User service {} hanged on removal. Restarting removal.'.format(
us.friendly_name
),
f'User service {us.friendly_name} hanged on removal. Restarting removal.',
)
us.release() # Mark it again as removable, and let's see
else:
@ -120,8 +118,6 @@ class HangedCleaner(Job):
log.doLog(
servicePool,
log.ERROR,
'Removing user service {} because it seems to be hanged'.format(
us.friendly_name
),
f'Removing user service {us.friendly_name} because it seems to be hanged'
)
us.releaseOrCancel()

View File

@ -32,16 +32,14 @@
import logging
from uds.core.jobs import Job
from uds.core.util.config import GlobalConfig
from uds.core.util.state import State
from uds.models import ServicePool, getSqlDatetime
# from uds.core.util.config import GlobalConfig
logger = logging.getLogger(__name__)
class Notifications(Job):
frecuency = 60 # Once every minute
frecuency_cfg = GlobalConfig.CHECK_UNUSED_DELAY
# frecuency_cfg = GlobalConfig.XXXX
friendly_name = 'Notifications worker'
def run(self) -> None:

View File

@ -57,14 +57,13 @@ class SchedulerHousekeeping(Job):
Look for "hanged" scheduler tasks and reschedule them
"""
since = getSqlDatetime() - timedelta(minutes=MAX_EXECUTION_MINUTES)
for i in range(3): # Retry three times in case of lockout error
for _ in range(3): # Retry three times in case of lockout error
try:
with transaction.atomic():
Scheduler.objects.select_for_update(skip_locked=True).filter(
last_execution__lt=since, state=State.RUNNING
).update(owner_server='', state=State.FOR_EXECUTE)
break
except Exception as e:
except Exception:
logger.info('Retrying Scheduler cleanup transaction')
time.sleep(1)

View File

@ -209,7 +209,11 @@ class ServiceCacheUpdater(Job):
return servicesPools
def growL1Cache(
self, servicePool: ServicePool, cacheL1: int, cacheL2: int, assigned: int
self,
servicePool: ServicePool,
cacheL1: int, # pylint: disable=unused-argument
cacheL2: int,
assigned: int, # pylint: disable=unused-argument
) -> None:
"""
This method tries to enlarge L1 cache.
@ -268,7 +272,11 @@ class ServiceCacheUpdater(Job):
logger.exception('Exception')
def growL2Cache(
self, servicePool: ServicePool, cacheL1: int, cacheL2: int, assigned: int
self,
servicePool: ServicePool,
cacheL1: int, # pylint: disable=unused-argument
cacheL2: int, # pylint: disable=unused-argument
assigned: int, # pylint: disable=unused-argument
) -> None:
"""
Tries to grow L2 cache of service.
@ -293,7 +301,11 @@ class ServiceCacheUpdater(Job):
# TODO: When alerts are ready, notify this
def reduceL1Cache(
self, servicePool: ServicePool, cacheL1: int, cacheL2: int, assigned: int
self,
servicePool: ServicePool,
cacheL1: int, # pylint: disable=unused-argument
cacheL2: int,
assigned: int, # pylint: disable=unused-argument
):
logger.debug("Reducing L1 cache erasing a service in cache for %s", servicePool)
# We will try to destroy the newest cacheL1 element that is USABLE if the deployer can't cancel a new service creation
@ -334,7 +346,11 @@ class ServiceCacheUpdater(Job):
cache.removeOrCancel()
def reduceL2Cache(
self, servicePool: ServicePool, cacheL1: int, cacheL2: int, assigned: int
self,
servicePool: ServicePool,
cacheL1: int, # pylint: disable=unused-argument
cacheL2: int,
assigned: int, # pylint: disable=unused-argument
):
logger.debug(
"Reducing L2 cache erasing a service in cache for %s", servicePool.name
@ -350,7 +366,7 @@ class ServiceCacheUpdater(Job):
.order_by('creation_date')
)
# TODO: Look first for non finished cache items and cancel them?
cache: UserService = cacheItems[0] # type: ignore # Slicing is not supported by pylance right now
cache: UserService = cacheItems[0]
cache.removeOrCancel()
def run(self) -> None:

View File

@ -32,8 +32,6 @@
import logging
import typing
from django.utils.translation import gettext_lazy as _
from uds import models
from uds.core.util.state import State
from uds.core.util.stats import counters
@ -172,7 +170,7 @@ class StatsAccumulator(Job):
def run(self):
try:
StatsManager.manager().acummulate(config.GlobalConfig.STATS_ACCUM_MAX_CHUNK_TIME.getInt())
except Exception as e:
except Exception:
logger.exception('Compressing counters')
logger.debug('Done statistics compression')

View File

@ -96,9 +96,7 @@ class StuckCleaner(Job):
log.doLog(
servicePool,
log.ERROR,
'User service {} has been hard removed because it\'s stuck'.format(
stuck.name
),
f'User service {stuck.name} has been hard removed because it\'s stuck',
)
# stuck.setState(State.ERROR)
stuck.delete()

View File

@ -89,7 +89,9 @@ class UserServiceRemover(Job):
state=State.REMOVABLE,
state_date__lt=removeFrom,
deployed_service__service__provider__maintenance_mode=False,
).iterator(chunk_size=removeAtOnce)
).iterator(
chunk_size=removeAtOnce
)
# We remove at once, but we limit the number of items to remove