forked from shaba/openuds
Removed last focos of possible slowness showing services
This commit is contained in:
parent
6ff997b054
commit
786e419b27
@ -41,7 +41,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
# Pair of section/value removed from current UDS version
|
||||
REMOVED = {
|
||||
'UDS': ('allowPreferencesAccess', 'customHtmlLogin', 'UDS Theme', 'UDS Theme Enhaced', 'css'),
|
||||
'UDS': ('allowPreferencesAccess', 'customHtmlLogin', 'UDS Theme', 'UDS Theme Enhaced', 'css', 'allowPreferencesAccess'),
|
||||
'Cluster': ('Destination CPU Load', 'Migration CPU Load', 'Migration Free Memory'),
|
||||
'IPAUTH': ('autoLogin',),
|
||||
'VMWare': ('minUsableDatastoreGB', 'maxRetriesOnError'),
|
||||
|
@ -36,6 +36,8 @@ import logging
|
||||
|
||||
import bitarray
|
||||
|
||||
from django.core.cache import caches
|
||||
|
||||
from uds.models.util import NEVER
|
||||
from uds.models.util import getSqlDatetime
|
||||
|
||||
@ -135,11 +137,15 @@ class CalendarChecker:
|
||||
if dtime is None:
|
||||
dtime = getSqlDatetime()
|
||||
|
||||
# memcached access
|
||||
memCache = caches['memory']
|
||||
|
||||
# First, try to get data from cache if it is valid
|
||||
cacheKey = str(hash(self.calendar.modified)) + str(dtime.date().toordinal()) + self.calendar.uuid + 'checker'
|
||||
cached = CalendarChecker.cache.get(cacheKey, None)
|
||||
# First, check "local memory cache", and if not found, from DB cache
|
||||
cached = memCache.get(cacheKey) or CalendarChecker.cache.get(cacheKey, None)
|
||||
|
||||
if cached is not None:
|
||||
if cached:
|
||||
data = bitarray.bitarray() # Empty bitarray
|
||||
data.frombytes(cached)
|
||||
CalendarChecker.cache_hit += 1
|
||||
@ -149,6 +155,7 @@ class CalendarChecker:
|
||||
# Now data can be accessed as an array of booleans.
|
||||
# Store data on persistent cache
|
||||
CalendarChecker.cache.put(cacheKey, data.tobytes(), 3600 * 24)
|
||||
memCache.set(cacheKey, data.tobytes(), 3600*24)
|
||||
|
||||
return data[dtime.hour * 60 + dtime.minute]
|
||||
|
||||
|
@ -295,7 +295,7 @@ class GlobalConfig:
|
||||
DISALLOW_GLOBAL_LOGIN: Config.Value = Config.section(GLOBAL_SECTION).value('disallowGlobalLogin', '0', type=Config.BOOLEAN_FIELD)
|
||||
|
||||
# Allos preferences access to users
|
||||
PREFERENCES_ALLOWED: Config.Value = Config.section(GLOBAL_SECTION).value('allowPreferencesAccess', '1', type=Config.BOOLEAN_FIELD)
|
||||
NOTIFY_REMOVAL_BY_PUB: Config.Value = Config.section(GLOBAL_SECTION).value('Notify on new publication', '0', type=Config.BOOLEAN_FIELD)
|
||||
|
||||
# Allowed "trusted sources" for request
|
||||
TRUSTED_SOURCES: Config.Value = Config.section(SECURITY_SECTION).value('Trusted Hosts', '*', type=Config.TEXT_FIELD)
|
||||
|
@ -440,14 +440,16 @@ class ServicePool(UUIDModel, TaggingMixin): # type: ignore
|
||||
List of accesible deployed services
|
||||
"""
|
||||
from uds.core import services
|
||||
servicesNotNeedingPub = [t.type() for t in services.factory().servicesThatDoNotNeedPublication()]
|
||||
# Get services that HAS publications
|
||||
list1 = (
|
||||
query = (
|
||||
ServicePool.objects.filter(
|
||||
assignedGroups__in=groups,
|
||||
assignedGroups__state=states.group.ACTIVE,
|
||||
state=states.servicePool.ACTIVE,
|
||||
visible=True
|
||||
).distinct().annotate(cuenta=models.Count('publications')).exclude(cuenta=0)
|
||||
)
|
||||
.annotate(pubs_active=models.Count('publications', filter=models.Q(publications__state=states.publication.USABLE)))
|
||||
.prefetch_related(
|
||||
'transports',
|
||||
'transports__networks',
|
||||
@ -464,32 +466,9 @@ class ServicePool(UUIDModel, TaggingMixin): # type: ignore
|
||||
'image'
|
||||
)
|
||||
)
|
||||
# Now get deployed services that DO NOT NEED publication
|
||||
doNotNeedPublishing = [t.type() for t in services.factory().servicesThatDoNotNeedPublication()]
|
||||
list2 = (
|
||||
ServicePool.objects.filter(
|
||||
assignedGroups__in=groups,
|
||||
assignedGroups__state=states.group.ACTIVE,
|
||||
service__data_type__in=doNotNeedPublishing,
|
||||
state=states.servicePool.ACTIVE,
|
||||
visible=True
|
||||
)
|
||||
.prefetch_related(
|
||||
'transports',
|
||||
'transports__networks',
|
||||
'memberOfMeta',
|
||||
'servicesPoolGroup',
|
||||
'servicesPoolGroup__image',
|
||||
'service',
|
||||
'service__provider',
|
||||
'calendarAccess',
|
||||
'calendarAccess__calendar',
|
||||
'calendarAccess__calendar__rules',
|
||||
'image'
|
||||
)
|
||||
)
|
||||
|
||||
if user: # Optimize loading if there is some assgned service..
|
||||
list1 = list1.annotate(
|
||||
query = query.annotate(
|
||||
number_in_use=models.Count(
|
||||
'userServices', filter=models.Q(
|
||||
userServices__user=user,
|
||||
@ -498,17 +477,8 @@ class ServicePool(UUIDModel, TaggingMixin): # type: ignore
|
||||
)
|
||||
)
|
||||
)
|
||||
list2 = list2.annotate(
|
||||
number_in_use=models.Count(
|
||||
'userServices', filter=models.Q(
|
||||
userServices__user=user,
|
||||
userServices__in_use=True,
|
||||
userServices__state__in=states.userService.USABLE
|
||||
)
|
||||
)
|
||||
)
|
||||
# And generate a single list without duplicates
|
||||
return list(set([r for r in list1] + [r for r in list2]))
|
||||
servicePool: 'ServicePool'
|
||||
return [servicePool for servicePool in query if servicePool.pubs_active or servicePool.service.data_type in servicesNotNeedingPub]
|
||||
|
||||
def publish(self, changeLog: typing.Optional[str] = None) -> None:
|
||||
"""
|
||||
|
@ -188,7 +188,8 @@ def getServicesData(request: 'HttpRequest') -> typing.Dict[str, typing.Any]: #
|
||||
|
||||
group = svr.servicesPoolGroup.as_dict if svr.servicesPoolGroup else ServicePoolGroup.default().as_dict
|
||||
|
||||
toBeReplaced = svr.toBeReplaced(request.user)
|
||||
# Only add toBeReplaced info in case we allow it. This will generate some "overload" on the services
|
||||
toBeReplaced = svr.toBeReplaced(request.user) if svr.pubs_active > 0 and GlobalConfig.NOTIFY_REMOVAL_BY_PUB.getBool(False) else None
|
||||
# tbr = False
|
||||
if toBeReplaced:
|
||||
toBeReplaced = formats.date_format(toBeReplaced, "SHORT_DATETIME_FORMAT")
|
||||
|
Loading…
Reference in New Issue
Block a user