1
0
mirror of https://github.com/dkmstr/openuds.git synced 2025-03-20 06:50:23 +03:00

Improved a bit stats charts

This commit is contained in:
Adolfo Gómez García 2024-07-04 18:55:52 +02:00
parent 43389248c8
commit 4163b5aae2
No known key found for this signature in database
GPG Key ID: DD1ABF20724CDA23
4 changed files with 128 additions and 83 deletions

View File

@ -31,7 +31,6 @@
Author: Adolfo Gómez, dkmaster at dkmon dot com
"""
import codecs
import collections.abc
import datetime
import logging
import pickle # nosec: pickle is used to cache data, not to load it
@ -61,43 +60,53 @@ CACHE_TIME = SINCE * 24 * 3600 // POINTS
def get_servicepools_counters(
servicePool: typing.Optional[models.ServicePool],
servicepool: typing.Optional[models.ServicePool],
counter_type: types.stats.CounterType,
since_days: int = SINCE,
) -> list[collections.abc.Mapping[str, typing.Any]]:
) -> list[dict[str, typing.Any]]:
val: list[dict[str, typing.Any]] = []
try:
cacheKey = (
(servicePool and str(servicePool.id) or 'all') + str(counter_type) + str(POINTS) + str(since_days)
cache_key = (
(servicepool and str(servicepool.id) or 'all') + str(counter_type) + str(POINTS) + str(since_days)
)
to = sql_now()
since: datetime.datetime = to - datetime.timedelta(days=since_days)
cachedValue: typing.Optional[bytes] = cache.get(cacheKey)
if not cachedValue:
if not servicePool:
cached_value: typing.Optional[bytes] = cache.get(cache_key)
if not cached_value:
if not servicepool:
us = models.ServicePool()
us.id = -1 # Global stats
else:
us = servicePool
val: list[collections.abc.Mapping[str, typing.Any]] = []
for x in counters.enumerate_counters(
us,
counter_type,
since=since,
to=to,
max_intervals=POINTS,
use_max=USE_MAX,
all=us.id == -1,
):
val.append({'stamp': x[0], 'value': int(x[1])})
logger.debug('val: %s', val)
us = servicepool
start = datetime.datetime.now()
logger.error(' - Getting counters at %s', start)
val = [
{
'stamp': x[0],
'value': int(x[1]),
}
for x in counters.enumerate_counters(
us,
counter_type,
since=since,
to=to,
max_intervals=POINTS,
use_max=USE_MAX,
all=us.id == -1,
)
]
logger.error(' - Getting counters took %s', datetime.datetime.now() - start)
# logger.debug('val: %s', val)
if len(val) >= 2:
cache.put(cacheKey, codecs.encode(pickle.dumps(val), 'zip'), CACHE_TIME * 2)
cache.put(cache_key, codecs.encode(pickle.dumps(val), 'zip'), CACHE_TIME * 2)
else:
val = [{'stamp': since, 'value': 0}, {'stamp': to, 'value': 0}]
else:
val = pickle.loads(
codecs.decode(cachedValue, 'zip')
codecs.decode(cached_value, 'zip')
) # nosec: pickle is used to cache data, not to load it
# return [{'stamp': since + datetime.timedelta(hours=i*10), 'value': i*i*counter_type//4} for i in range(300)]
@ -194,16 +203,22 @@ class System(Handler):
elif self.args[1] == 'cached':
return get_servicepools_counters(pool, counters.types.stats.CounterType.CACHED)
elif self.args[1] == 'complete':
start = datetime.datetime.now()
logger.error('** Getting assigned services at %s', start)
assigned = get_servicepools_counters(pool, counters.types.stats.CounterType.ASSIGNED)
logger.error(' * Assigned services took %s', datetime.datetime.now() - start)
start = datetime.datetime.now()
logger.error('** Getting inuse services at %s', start)
inuse = get_servicepools_counters(pool, counters.types.stats.CounterType.INUSE)
logger.error(' * Inuse services took %s', datetime.datetime.now() - start)
start = datetime.datetime.now()
logger.error('** Getting cached services at %s', start)
cached = get_servicepools_counters(pool, counters.types.stats.CounterType.CACHED)
logger.error(' * Cached services took %s', datetime.datetime.now() - start)
return {
'assigned': get_servicepools_counters(
pool, counters.types.stats.CounterType.ASSIGNED, since_days=7
),
'inuse': get_servicepools_counters(
pool, counters.types.stats.CounterType.INUSE, since_days=7
),
'cached': get_servicepools_counters(
pool, counters.types.stats.CounterType.CACHED, since_days=7
),
'assigned': assigned,
'inuse': inuse,
'cached': cached,
}
raise exceptions.rest.RequestError('invalid request')

View File

@ -161,15 +161,15 @@ class StatsManager(metaclass=singleton.Singleton):
Iterator, containing (date, counter) each element
"""
# To Unix epoch
sinceInt = int(time.mktime(since.timetuple()))
toInt = int(time.mktime(to.timetuple()))
since_stamp = int(time.mktime(since.timetuple()))
to_stamp = int(time.mktime(to.timetuple()))
return StatsCounters.get_grouped(
ownerType,
counterType,
owner_id=ownerIds,
since=sinceInt,
to=toInt,
since=since_stamp,
to=to_stamp,
interval=interval,
max_intervals=max_intervals,
limit=limit,

View File

@ -170,13 +170,13 @@ def enumerate_counters(
use_max = kwargs.get('use_max', False)
type_ = type(obj)
readFncTbl = _id_retriever.get(type_)
read_fnc_tbl = _id_retriever.get(type_)
if not readFncTbl:
if not read_fnc_tbl:
logger.error('Type %s has no registered stats', type_)
return
fnc = readFncTbl.get(counterType)
fnc = read_fnc_tbl.get(counterType)
if not fnc:
logger.error('Type %s has no registerd stats of type %s', type_, counterType)

View File

@ -30,6 +30,7 @@
"""
Author: Adolfo Gómez, dkmaster at dkmon dot com
"""
from collections import defaultdict
import typing
import collections.abc
import datetime
@ -64,30 +65,19 @@ class StatsCounters(models.Model):
@staticmethod
def get_grouped(
owner_type: typing.Union[int, collections.abc.Iterable[int]], counter_type: int, **kwargs: typing.Any
owner_type: typing.Union[int, collections.abc.Iterable[int]],
counter_type: int,
since: typing.Union[None, int, datetime.datetime] = None,
to: typing.Union[None, int, datetime.datetime] = None,
owner_id: typing.Union[None, int, collections.abc.Iterable[int]] = None,
interval: typing.Optional[int] = None,
max_intervals: typing.Optional[int] = None,
use_max: bool = False,
limit: typing.Optional[int] = None,
) -> typing.Generator[tuple[int, int], None, None]:
"""
Returns a QuerySet of counters grouped by owner_type and counter_type
"""
if isinstance(owner_type, int):
owner_type = [owner_type]
q = StatsCounters.objects.filter(
owner_type__in=owner_type,
counter_type=counter_type,
)
if kwargs.get('owner_id'):
# If owner_id is a int, we add it to the list
if isinstance(kwargs['owner_id'], int):
kwargs['owner_id'] = [kwargs['owner_id']]
q = q.filter(owner_id__in=kwargs['owner_id'])
if q.count() == 0:
return
since = typing.cast('int', kwargs.get('since'))
if isinstance(since, datetime.datetime):
# Convert to unix timestamp
since = int(since.timestamp())
@ -97,7 +87,6 @@ class StatsCounters(models.Model):
if first is None:
return # No data
since = first.stamp
to = typing.cast('int', kwargs.get('to'))
if isinstance(to, datetime.datetime):
# Convert to unix timestamp
to = int(to.timestamp())
@ -108,41 +97,82 @@ class StatsCounters(models.Model):
return
to = last.stamp
q = q.filter(stamp__gte=since, stamp__lte=to)
q = StatsCounters.objects.filter(counter_type=counter_type, stamp__gte=since, stamp__lte=to)
if isinstance(owner_type, int):
q = q.filter(owner_type=owner_type)
else:
q = q.filter(owner_type__in=owner_type)
if owner_id:
# If owner_id is a int, we add it to the list
if isinstance(owner_id, int):
q = q.filter(owner_id=owner_id)
else:
q = q.filter(owner_id__in=owner_id)
if q.count() == 0:
return
interval = kwargs.get('interval') or 600
interval = interval or 600
# Max intervals, if present, will adjust interval (that are seconds)
max_intervals = kwargs.get('max_intervals') or 0
max_intervals = max_intervals or 0
start = datetime.datetime.now()
logger.error('Getting values at %s', start)
values = q.values_list('stamp', 'value')
logger.error('Elapsed time: %s', datetime.datetime.now() - start)
if max_intervals > 0:
count = q.count()
count = len(values)
max_intervals = max(min(max_intervals, count), 2)
interval = int(to - since) / max_intervals
interval = int(to - since) // max_intervals
if interval > 0:
q = q.extra( # type: ignore # nosec: SQL injection is not possible here
select={
'group_by_stamp': f'stamp - (stamp %% {interval})', # f'{floor}(stamp / {interval}) * {interval}',
},
)
# If interval is 0, we return the values as they are
if interval == 0:
yield from values
return
fnc = models.Avg('value') if not kwargs.get('use_max') else models.Max('value')
# If interval is greater than 0, we group by interval using average or max as requested
start = datetime.datetime.now()
logger.error('Grouping values at %s', start)
result: dict[int, int] = defaultdict(int)
for counter, i in enumerate(values, 1):
group_by_stamp = i[0] - (i[0] % interval)
if use_max:
result[group_by_stamp] = max(result[group_by_stamp], i[1])
else:
result[group_by_stamp] = (result[group_by_stamp] * (counter - 1) + i[1]) // counter
q = (
q.order_by('group_by_stamp') # type: ignore
.values('group_by_stamp')
.annotate(
value=fnc,
)
)
if kwargs.get('limit'):
q = q[: kwargs['limit']]
logger.error('Elapsed time: %s', datetime.datetime.now() - start)
for i in q.values('group_by_stamp', 'value'):
yield (int(i['group_by_stamp']), i['value'])
start = datetime.datetime.now()
logger.error('Yielding values at %s', start)
for k, v in result.items():
yield (k, v)
logger.error('Elapsed time: %s', datetime.datetime.now() - start)
# if interval > 0:
# q = q.extra( # type: ignore # nosec: SQL injection is not possible here
# select={
# 'group_by_stamp': f'stamp - (stamp %% {interval})', # f'{floor}(stamp / {interval}) * {interval}',
# },
# )
# fnc = models.Avg('value') if not use_max else models.Max('value')
# q = (
# q.order_by('group_by_stamp') # type: ignore
# .values('group_by_stamp')
# .annotate(
# value=fnc,
# )
# )
# if limit:
# q = q[: limit]
# for i in q.values('group_by_stamp', 'value'):
# yield (int(i['group_by_stamp']), i['value'])
def __str__(self) -> str:
return f'{datetime.datetime.fromtimestamp(self.stamp)} - {self.owner_id}:{self.owner_type}:{self.counter_type} {self.value}'