From fa8e77c7505cb5dddfeba1a437324b450099a2f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adolfo=20G=C3=B3mez=20Garc=C3=ADa?= Date: Sat, 15 Apr 2023 21:42:57 +0200 Subject: [PATCH] Enough linting for today... :) --- server/src/uds/core/util/unique.py | 2 ++ server/src/uds/core/util/xml2dict.py | 30 +++++++++---------- server/src/uds/core/workers/__init__.py | 4 +-- .../workers/hanged_userservice_cleaner.py | 8 ++--- server/src/uds/core/workers/notifications.py | 6 ++-- .../core/workers/scheduler_housekeeping.py | 5 ++-- .../workers/servicepools_cache_updater.py | 26 ++++++++++++---- .../src/uds/core/workers/stats_collector.py | 4 +-- server/src/uds/core/workers/stuck_cleaner.py | 6 ++-- .../uds/core/workers/userservice_cleaner.py | 6 ++-- 10 files changed, 53 insertions(+), 44 deletions(-) diff --git a/server/src/uds/core/util/unique.py b/server/src/uds/core/util/unique.py index bd420cfb4..21a18c29d 100644 --- a/server/src/uds/core/util/unique.py +++ b/server/src/uds/core/util/unique.py @@ -29,6 +29,8 @@ """ @author: Adolfo Gómez, dkmaster at dkmon dot com """ + +# pylint: disable=unused-import from .unique_gid_generator import UniqueGIDGenerator from .unique_mac_generator import UniqueMacGenerator from .unique_name_generator import UniqueNameGenerator diff --git a/server/src/uds/core/util/xml2dict.py b/server/src/uds/core/util/xml2dict.py index bb0785e3f..3e32d5a42 100644 --- a/server/src/uds/core/util/xml2dict.py +++ b/server/src/uds/core/util/xml2dict.py @@ -35,31 +35,31 @@ from collections import defaultdict import defusedxml.ElementTree as ET if typing.TYPE_CHECKING: - from xml.etree.cElementTree import Element # nosec: Only type checking + from xml.etree.ElementTree import Element # nosec: Only type checking -def etree_to_dict(t: 'Element') -> typing.Mapping[str, typing.Any]: - d: typing.MutableMapping[str, typing.Any] = {} - if t.attrib: - d.update({t.tag: {}}) +def etree_to_dict(tree: 'Element') -> typing.Mapping[str, typing.Any]: + dct: typing.MutableMapping[str, typing.Any] = {} + if tree.attrib: + dct.update({tree.tag: {}}) - children = list(t) + children = list(tree) if children: dd = defaultdict(list) for dc in map(etree_to_dict, children): for k, v in dc.items(): dd[k].append(v) - d = {t.tag: {k: v[0] if len(v) == 1 else v for k, v in dd.items()}} - if t.attrib: - d[t.tag].update(('@' + k, v) for k, v in t.attrib.items()) - if t.text: - text = t.text.strip() - if children or t.attrib: + dct = {tree.tag: {k: v[0] if len(v) == 1 else v for k, v in dd.items()}} + if tree.attrib: + dct[tree.tag].update(('@' + k, v) for k, v in tree.attrib.items()) + if tree.text: + text = tree.text.strip() + if children or tree.attrib: if text: - d[t.tag]['#text'] = text + dct[tree.tag]['#text'] = text else: - d[t.tag] = text - return d + dct[tree.tag] = text + return dct def parse(xml_string: str) -> typing.Mapping[str, typing.Any]: diff --git a/server/src/uds/core/workers/__init__.py b/server/src/uds/core/workers/__init__.py index 7faafb328..6f91381f8 100644 --- a/server/src/uds/core/workers/__init__.py +++ b/server/src/uds/core/workers/__init__.py @@ -43,8 +43,8 @@ def initialize() -> None: This imports all packages that are descendant of this package, and, after that, it register all subclases of service provider as """ - from uds.core import jobs - from uds.core.managers import taskManager + from uds.core import jobs # pylint: disable=import-outside-toplevel + from uds.core.managers import taskManager # pylint: disable=import-outside-toplevel def registerer(cls: typing.Type[jobs.Job]) -> None: if cls.__module__.startswith('uds.core.workers'): diff --git a/server/src/uds/core/workers/hanged_userservice_cleaner.py b/server/src/uds/core/workers/hanged_userservice_cleaner.py index 928377429..98c485a40 100644 --- a/server/src/uds/core/workers/hanged_userservice_cleaner.py +++ b/server/src/uds/core/workers/hanged_userservice_cleaner.py @@ -105,9 +105,7 @@ class HangedCleaner(Job): log.doLog( servicePool, log.ERROR, - 'User service {} hanged on removal. Restarting removal.'.format( - us.friendly_name - ), + f'User service {us.friendly_name} hanged on removal. Restarting removal.', ) us.release() # Mark it again as removable, and let's see else: @@ -120,8 +118,6 @@ class HangedCleaner(Job): log.doLog( servicePool, log.ERROR, - 'Removing user service {} because it seems to be hanged'.format( - us.friendly_name - ), + f'Removing user service {us.friendly_name} because it seems to be hanged' ) us.releaseOrCancel() diff --git a/server/src/uds/core/workers/notifications.py b/server/src/uds/core/workers/notifications.py index 7c808aa47..c57560280 100644 --- a/server/src/uds/core/workers/notifications.py +++ b/server/src/uds/core/workers/notifications.py @@ -32,16 +32,14 @@ import logging from uds.core.jobs import Job -from uds.core.util.config import GlobalConfig -from uds.core.util.state import State -from uds.models import ServicePool, getSqlDatetime +# from uds.core.util.config import GlobalConfig logger = logging.getLogger(__name__) class Notifications(Job): frecuency = 60 # Once every minute - frecuency_cfg = GlobalConfig.CHECK_UNUSED_DELAY + # frecuency_cfg = GlobalConfig.XXXX friendly_name = 'Notifications worker' def run(self) -> None: diff --git a/server/src/uds/core/workers/scheduler_housekeeping.py b/server/src/uds/core/workers/scheduler_housekeeping.py index 4d9ca7978..8b74471b5 100644 --- a/server/src/uds/core/workers/scheduler_housekeeping.py +++ b/server/src/uds/core/workers/scheduler_housekeeping.py @@ -57,14 +57,13 @@ class SchedulerHousekeeping(Job): Look for "hanged" scheduler tasks and reschedule them """ since = getSqlDatetime() - timedelta(minutes=MAX_EXECUTION_MINUTES) - for i in range(3): # Retry three times in case of lockout error + for _ in range(3): # Retry three times in case of lockout error try: with transaction.atomic(): Scheduler.objects.select_for_update(skip_locked=True).filter( last_execution__lt=since, state=State.RUNNING ).update(owner_server='', state=State.FOR_EXECUTE) break - except Exception as e: + except Exception: logger.info('Retrying Scheduler cleanup transaction') time.sleep(1) - diff --git a/server/src/uds/core/workers/servicepools_cache_updater.py b/server/src/uds/core/workers/servicepools_cache_updater.py index 234c0150d..bfadc21a1 100644 --- a/server/src/uds/core/workers/servicepools_cache_updater.py +++ b/server/src/uds/core/workers/servicepools_cache_updater.py @@ -209,7 +209,11 @@ class ServiceCacheUpdater(Job): return servicesPools def growL1Cache( - self, servicePool: ServicePool, cacheL1: int, cacheL2: int, assigned: int + self, + servicePool: ServicePool, + cacheL1: int, # pylint: disable=unused-argument + cacheL2: int, + assigned: int, # pylint: disable=unused-argument ) -> None: """ This method tries to enlarge L1 cache. @@ -268,7 +272,11 @@ class ServiceCacheUpdater(Job): logger.exception('Exception') def growL2Cache( - self, servicePool: ServicePool, cacheL1: int, cacheL2: int, assigned: int + self, + servicePool: ServicePool, + cacheL1: int, # pylint: disable=unused-argument + cacheL2: int, # pylint: disable=unused-argument + assigned: int, # pylint: disable=unused-argument ) -> None: """ Tries to grow L2 cache of service. @@ -293,7 +301,11 @@ class ServiceCacheUpdater(Job): # TODO: When alerts are ready, notify this def reduceL1Cache( - self, servicePool: ServicePool, cacheL1: int, cacheL2: int, assigned: int + self, + servicePool: ServicePool, + cacheL1: int, # pylint: disable=unused-argument + cacheL2: int, + assigned: int, # pylint: disable=unused-argument ): logger.debug("Reducing L1 cache erasing a service in cache for %s", servicePool) # We will try to destroy the newest cacheL1 element that is USABLE if the deployer can't cancel a new service creation @@ -334,7 +346,11 @@ class ServiceCacheUpdater(Job): cache.removeOrCancel() def reduceL2Cache( - self, servicePool: ServicePool, cacheL1: int, cacheL2: int, assigned: int + self, + servicePool: ServicePool, + cacheL1: int, # pylint: disable=unused-argument + cacheL2: int, + assigned: int, # pylint: disable=unused-argument ): logger.debug( "Reducing L2 cache erasing a service in cache for %s", servicePool.name @@ -350,7 +366,7 @@ class ServiceCacheUpdater(Job): .order_by('creation_date') ) # TODO: Look first for non finished cache items and cancel them? - cache: UserService = cacheItems[0] # type: ignore # Slicing is not supported by pylance right now + cache: UserService = cacheItems[0] cache.removeOrCancel() def run(self) -> None: diff --git a/server/src/uds/core/workers/stats_collector.py b/server/src/uds/core/workers/stats_collector.py index c5de4f408..4c8207c75 100644 --- a/server/src/uds/core/workers/stats_collector.py +++ b/server/src/uds/core/workers/stats_collector.py @@ -32,8 +32,6 @@ import logging import typing -from django.utils.translation import gettext_lazy as _ - from uds import models from uds.core.util.state import State from uds.core.util.stats import counters @@ -172,7 +170,7 @@ class StatsAccumulator(Job): def run(self): try: StatsManager.manager().acummulate(config.GlobalConfig.STATS_ACCUM_MAX_CHUNK_TIME.getInt()) - except Exception as e: + except Exception: logger.exception('Compressing counters') logger.debug('Done statistics compression') diff --git a/server/src/uds/core/workers/stuck_cleaner.py b/server/src/uds/core/workers/stuck_cleaner.py index ed5168dac..1983a3152 100644 --- a/server/src/uds/core/workers/stuck_cleaner.py +++ b/server/src/uds/core/workers/stuck_cleaner.py @@ -84,7 +84,7 @@ class StuckCleaner(Job): # Info states are removed on UserServiceCleaner and VALID_STATES are ok, or if "hanged", checked on "HangedCleaner" def stuckUserServices(servicePool: ServicePool) -> typing.Iterable[UserService]: q = servicePool.userServices.filter(state_date__lt=since_state) - # Get all that are not in valid or info states, AND the ones that are "PREPARING" with + # Get all that are not in valid or info states, AND the ones that are "PREPARING" with # "destroy_after" property set (exists) (that means that are waiting to be destroyed after initializations) yield from q.exclude(state__in=State.INFO_STATES + State.VALID_STATES) yield from q.filter(state=State.PREPARING, properties__name='destroy_after') @@ -96,9 +96,7 @@ class StuckCleaner(Job): log.doLog( servicePool, log.ERROR, - 'User service {} has been hard removed because it\'s stuck'.format( - stuck.name - ), + f'User service {stuck.name} has been hard removed because it\'s stuck', ) # stuck.setState(State.ERROR) stuck.delete() diff --git a/server/src/uds/core/workers/userservice_cleaner.py b/server/src/uds/core/workers/userservice_cleaner.py index 9d3805bfd..e99f13c62 100644 --- a/server/src/uds/core/workers/userservice_cleaner.py +++ b/server/src/uds/core/workers/userservice_cleaner.py @@ -78,7 +78,7 @@ class UserServiceRemover(Job): # This configuration value is cached at startup, so it is not updated until next reload removeAtOnce: int = GlobalConfig.USER_SERVICE_CLEAN_NUMBER.getInt() manager = managers.userServiceManager() - + with transaction.atomic(): removeFrom = getSqlDatetime() - timedelta( seconds=10 @@ -89,7 +89,9 @@ class UserServiceRemover(Job): state=State.REMOVABLE, state_date__lt=removeFrom, deployed_service__service__provider__maintenance_mode=False, - ).iterator(chunk_size=removeAtOnce) + ).iterator( + chunk_size=removeAtOnce + ) # We remove at once, but we limit the number of items to remove