1
0
mirror of https://github.com/dkmstr/openuds.git synced 2025-01-18 06:03:54 +03:00

Changed stats graphs on admin to use days instead of hours

This commit is contained in:
Adolfo Gómez García 2024-07-04 20:16:27 +02:00
parent 4163b5aae2
commit e5035efafb
No known key found for this signature in database
GPG Key ID: DD1ABF20724CDA23
4 changed files with 39 additions and 45 deletions

View File

@ -54,9 +54,9 @@ cache = Cache('StatsDispatcher')
# Enclosed methods under /stats path
POINTS = 70
SINCE = 7 # Days, if higer values used, ensure mysql/mariadb has a bigger sort buffer
SINCE = 180 # Days, if higer values used, ensure mysql/mariadb has a bigger sort buffer
USE_MAX = True
CACHE_TIME = SINCE * 24 * 3600 // POINTS
CACHE_TIME = 60 * 60 # 1 hour
def get_servicepools_counters(
@ -80,24 +80,37 @@ def get_servicepools_counters(
else:
us = servicepool
start = datetime.datetime.now()
logger.error(' - Getting counters at %s', start)
stats = counters.get_accumulated_counters(
interval_type=models.StatsCountersAccum.IntervalType.DAY,
counter_type=counter_type,
owner_type=types.stats.CounterOwnerType.SERVICEPOOL,
owner_id=us.id if us.id != -1 else None,
since=since,
points=since_days, # One point per hour
)
val = [
{
'stamp': x[0],
'value': int(x[1]),
'stamp': x.stamp,
'value': (x.sum // x.count if x.count > 0 else 0) if not USE_MAX else x.max,
}
for x in counters.enumerate_counters(
us,
counter_type,
since=since,
to=to,
max_intervals=POINTS,
use_max=USE_MAX,
all=us.id == -1,
)
for x in stats
]
logger.error(' - Getting counters took %s', datetime.datetime.now() - start)
# val = [
# {
# 'stamp': x[0],
# 'value': int(x[1]),
# }
# for x in counters.enumerate_counters(
# us,
# counter_type,
# since=since,
# to=to,
# max_intervals=POINTS,
# use_max=USE_MAX,
# all=us.id == -1,
# )
# ]
# logger.debug('val: %s', val)
if len(val) >= 2:
@ -203,18 +216,9 @@ class System(Handler):
elif self.args[1] == 'cached':
return get_servicepools_counters(pool, counters.types.stats.CounterType.CACHED)
elif self.args[1] == 'complete':
start = datetime.datetime.now()
logger.error('** Getting assigned services at %s', start)
assigned = get_servicepools_counters(pool, counters.types.stats.CounterType.ASSIGNED)
logger.error(' * Assigned services took %s', datetime.datetime.now() - start)
start = datetime.datetime.now()
logger.error('** Getting inuse services at %s', start)
inuse = get_servicepools_counters(pool, counters.types.stats.CounterType.INUSE)
logger.error(' * Inuse services took %s', datetime.datetime.now() - start)
start = datetime.datetime.now()
logger.error('** Getting cached services at %s', start)
cached = get_servicepools_counters(pool, counters.types.stats.CounterType.CACHED)
logger.error(' * Cached services took %s', datetime.datetime.now() - start)
return {
'assigned': assigned,
'inuse': inuse,

View File

@ -62,7 +62,7 @@ _REVERSE_FLDS_EQUIV: typing.Final[collections.abc.Mapping[str, str]] = {
@dataclasses.dataclass
class AccumStat:
stamp: int
n: int # Number of elements in this interval
count: int # Number of elements in this interval
sum: int # Sum of elements in this interval
max: int # Max of elements in this interval
min: int # Min of elements in this interval
@ -198,11 +198,12 @@ class StatsManager(metaclass=singleton.Singleton):
interval_type=intervalType,
counter_type=counterType,
stamp__gte=since,
).order_by('stamp')[0:points]
).order_by('stamp')
if owner_type is not None:
query = query.filter(owner_type=owner_type)
if owner_id is not None:
query = query.filter(owner_id=owner_id)
query = query[:points]
# Yields all data, stamp, n, sum, max, min (stamp, v_count,v_sum,v_max,v_min)
# Now, get exactly the points we need

View File

@ -202,17 +202,17 @@ def enumerate_counters(
def get_accumulated_counters(
intervalType: StatsCountersAccum.IntervalType,
counterType: types.stats.CounterType,
onwer_type: typing.Optional[types.stats.CounterOwnerType] = None,
interval_type: StatsCountersAccum.IntervalType,
counter_type: types.stats.CounterType,
owner_type: typing.Optional[types.stats.CounterOwnerType] = None,
owner_id: typing.Optional[int] = None,
since: typing.Optional[typing.Union[datetime.datetime, int]] = None,
points: typing.Optional[int] = None,
) -> typing.Generator[AccumStat, None, None]:
yield from StatsManager.manager().get_accumulated_counters(
intervalType=intervalType,
counterType=counterType,
owner_type=onwer_type,
intervalType=interval_type,
counterType=counter_type,
owner_type=owner_type,
owner_id=owner_id,
since=since,
points=points,

View File

@ -118,10 +118,7 @@ class StatsCounters(models.Model):
# Max intervals, if present, will adjust interval (that are seconds)
max_intervals = max_intervals or 0
start = datetime.datetime.now()
logger.error('Getting values at %s', start)
values = q.values_list('stamp', 'value')
logger.error('Elapsed time: %s', datetime.datetime.now() - start)
if max_intervals > 0:
count = len(values)
max_intervals = max(min(max_intervals, count), 2)
@ -133,8 +130,6 @@ class StatsCounters(models.Model):
return
# If interval is greater than 0, we group by interval using average or max as requested
start = datetime.datetime.now()
logger.error('Grouping values at %s', start)
result: dict[int, int] = defaultdict(int)
for counter, i in enumerate(values, 1):
group_by_stamp = i[0] - (i[0] % interval)
@ -143,15 +138,9 @@ class StatsCounters(models.Model):
else:
result[group_by_stamp] = (result[group_by_stamp] * (counter - 1) + i[1]) // counter
logger.error('Elapsed time: %s', datetime.datetime.now() - start)
start = datetime.datetime.now()
logger.error('Yielding values at %s', start)
for k, v in result.items():
yield (k, v)
logger.error('Elapsed time: %s', datetime.datetime.now() - start)
# if interval > 0:
# q = q.extra( # type: ignore # nosec: SQL injection is not possible here
# select={