1
0
mirror of https://github.com/dkmstr/openuds.git synced 2025-01-03 01:17:56 +03:00

Changed stats graphs on admin to use days instead of hours

This commit is contained in:
Adolfo Gómez García 2024-07-04 20:16:27 +02:00
parent 4163b5aae2
commit e5035efafb
No known key found for this signature in database
GPG Key ID: DD1ABF20724CDA23
4 changed files with 39 additions and 45 deletions

View File

@ -54,9 +54,9 @@ cache = Cache('StatsDispatcher')
# Enclosed methods under /stats path # Enclosed methods under /stats path
POINTS = 70 POINTS = 70
SINCE = 7 # Days, if higer values used, ensure mysql/mariadb has a bigger sort buffer SINCE = 180 # Days, if higer values used, ensure mysql/mariadb has a bigger sort buffer
USE_MAX = True USE_MAX = True
CACHE_TIME = SINCE * 24 * 3600 // POINTS CACHE_TIME = 60 * 60 # 1 hour
def get_servicepools_counters( def get_servicepools_counters(
@ -80,25 +80,38 @@ def get_servicepools_counters(
else: else:
us = servicepool us = servicepool
start = datetime.datetime.now() stats = counters.get_accumulated_counters(
logger.error(' - Getting counters at %s', start) interval_type=models.StatsCountersAccum.IntervalType.DAY,
counter_type=counter_type,
owner_type=types.stats.CounterOwnerType.SERVICEPOOL,
owner_id=us.id if us.id != -1 else None,
since=since,
points=since_days, # One point per hour
)
val = [ val = [
{ {
'stamp': x[0], 'stamp': x.stamp,
'value': int(x[1]), 'value': (x.sum // x.count if x.count > 0 else 0) if not USE_MAX else x.max,
} }
for x in counters.enumerate_counters( for x in stats
us,
counter_type,
since=since,
to=to,
max_intervals=POINTS,
use_max=USE_MAX,
all=us.id == -1,
)
] ]
logger.error(' - Getting counters took %s', datetime.datetime.now() - start)
# val = [
# {
# 'stamp': x[0],
# 'value': int(x[1]),
# }
# for x in counters.enumerate_counters(
# us,
# counter_type,
# since=since,
# to=to,
# max_intervals=POINTS,
# use_max=USE_MAX,
# all=us.id == -1,
# )
# ]
# logger.debug('val: %s', val) # logger.debug('val: %s', val)
if len(val) >= 2: if len(val) >= 2:
cache.put(cache_key, codecs.encode(pickle.dumps(val), 'zip'), CACHE_TIME * 2) cache.put(cache_key, codecs.encode(pickle.dumps(val), 'zip'), CACHE_TIME * 2)
@ -203,18 +216,9 @@ class System(Handler):
elif self.args[1] == 'cached': elif self.args[1] == 'cached':
return get_servicepools_counters(pool, counters.types.stats.CounterType.CACHED) return get_servicepools_counters(pool, counters.types.stats.CounterType.CACHED)
elif self.args[1] == 'complete': elif self.args[1] == 'complete':
start = datetime.datetime.now()
logger.error('** Getting assigned services at %s', start)
assigned = get_servicepools_counters(pool, counters.types.stats.CounterType.ASSIGNED) assigned = get_servicepools_counters(pool, counters.types.stats.CounterType.ASSIGNED)
logger.error(' * Assigned services took %s', datetime.datetime.now() - start)
start = datetime.datetime.now()
logger.error('** Getting inuse services at %s', start)
inuse = get_servicepools_counters(pool, counters.types.stats.CounterType.INUSE) inuse = get_servicepools_counters(pool, counters.types.stats.CounterType.INUSE)
logger.error(' * Inuse services took %s', datetime.datetime.now() - start)
start = datetime.datetime.now()
logger.error('** Getting cached services at %s', start)
cached = get_servicepools_counters(pool, counters.types.stats.CounterType.CACHED) cached = get_servicepools_counters(pool, counters.types.stats.CounterType.CACHED)
logger.error(' * Cached services took %s', datetime.datetime.now() - start)
return { return {
'assigned': assigned, 'assigned': assigned,
'inuse': inuse, 'inuse': inuse,

View File

@ -62,7 +62,7 @@ _REVERSE_FLDS_EQUIV: typing.Final[collections.abc.Mapping[str, str]] = {
@dataclasses.dataclass @dataclasses.dataclass
class AccumStat: class AccumStat:
stamp: int stamp: int
n: int # Number of elements in this interval count: int # Number of elements in this interval
sum: int # Sum of elements in this interval sum: int # Sum of elements in this interval
max: int # Max of elements in this interval max: int # Max of elements in this interval
min: int # Min of elements in this interval min: int # Min of elements in this interval
@ -198,11 +198,12 @@ class StatsManager(metaclass=singleton.Singleton):
interval_type=intervalType, interval_type=intervalType,
counter_type=counterType, counter_type=counterType,
stamp__gte=since, stamp__gte=since,
).order_by('stamp')[0:points] ).order_by('stamp')
if owner_type is not None: if owner_type is not None:
query = query.filter(owner_type=owner_type) query = query.filter(owner_type=owner_type)
if owner_id is not None: if owner_id is not None:
query = query.filter(owner_id=owner_id) query = query.filter(owner_id=owner_id)
query = query[:points]
# Yields all data, stamp, n, sum, max, min (stamp, v_count,v_sum,v_max,v_min) # Yields all data, stamp, n, sum, max, min (stamp, v_count,v_sum,v_max,v_min)
# Now, get exactly the points we need # Now, get exactly the points we need

View File

@ -202,17 +202,17 @@ def enumerate_counters(
def get_accumulated_counters( def get_accumulated_counters(
intervalType: StatsCountersAccum.IntervalType, interval_type: StatsCountersAccum.IntervalType,
counterType: types.stats.CounterType, counter_type: types.stats.CounterType,
onwer_type: typing.Optional[types.stats.CounterOwnerType] = None, owner_type: typing.Optional[types.stats.CounterOwnerType] = None,
owner_id: typing.Optional[int] = None, owner_id: typing.Optional[int] = None,
since: typing.Optional[typing.Union[datetime.datetime, int]] = None, since: typing.Optional[typing.Union[datetime.datetime, int]] = None,
points: typing.Optional[int] = None, points: typing.Optional[int] = None,
) -> typing.Generator[AccumStat, None, None]: ) -> typing.Generator[AccumStat, None, None]:
yield from StatsManager.manager().get_accumulated_counters( yield from StatsManager.manager().get_accumulated_counters(
intervalType=intervalType, intervalType=interval_type,
counterType=counterType, counterType=counter_type,
owner_type=onwer_type, owner_type=owner_type,
owner_id=owner_id, owner_id=owner_id,
since=since, since=since,
points=points, points=points,

View File

@ -118,10 +118,7 @@ class StatsCounters(models.Model):
# Max intervals, if present, will adjust interval (that are seconds) # Max intervals, if present, will adjust interval (that are seconds)
max_intervals = max_intervals or 0 max_intervals = max_intervals or 0
start = datetime.datetime.now()
logger.error('Getting values at %s', start)
values = q.values_list('stamp', 'value') values = q.values_list('stamp', 'value')
logger.error('Elapsed time: %s', datetime.datetime.now() - start)
if max_intervals > 0: if max_intervals > 0:
count = len(values) count = len(values)
max_intervals = max(min(max_intervals, count), 2) max_intervals = max(min(max_intervals, count), 2)
@ -133,8 +130,6 @@ class StatsCounters(models.Model):
return return
# If interval is greater than 0, we group by interval using average or max as requested # If interval is greater than 0, we group by interval using average or max as requested
start = datetime.datetime.now()
logger.error('Grouping values at %s', start)
result: dict[int, int] = defaultdict(int) result: dict[int, int] = defaultdict(int)
for counter, i in enumerate(values, 1): for counter, i in enumerate(values, 1):
group_by_stamp = i[0] - (i[0] % interval) group_by_stamp = i[0] - (i[0] % interval)
@ -143,15 +138,9 @@ class StatsCounters(models.Model):
else: else:
result[group_by_stamp] = (result[group_by_stamp] * (counter - 1) + i[1]) // counter result[group_by_stamp] = (result[group_by_stamp] * (counter - 1) + i[1]) // counter
logger.error('Elapsed time: %s', datetime.datetime.now() - start)
start = datetime.datetime.now()
logger.error('Yielding values at %s', start)
for k, v in result.items(): for k, v in result.items():
yield (k, v) yield (k, v)
logger.error('Elapsed time: %s', datetime.datetime.now() - start)
# if interval > 0: # if interval > 0:
# q = q.extra( # type: ignore # nosec: SQL injection is not possible here # q = q.extra( # type: ignore # nosec: SQL injection is not possible here
# select={ # select={