1
0
mirror of https://github.com/dkmstr/openuds.git synced 2025-03-23 18:50:22 +03:00

Merge remote-tracking branch 'origin/v3.6'

This commit is contained in:
Adolfo Gómez García 2022-11-02 21:45:32 +01:00
commit 54663e756d
No known key found for this signature in database
GPG Key ID: DD1ABF20724CDA23
4 changed files with 22 additions and 145 deletions
server/src/uds
REST/methods
core/util/stats
migrations
models

@ -53,7 +53,7 @@ if typing.TYPE_CHECKING:
cache = Cache('StatsDispatcher')
# Enclosed methods under /stats path
POINTS = 150
POINTS = 70
SINCE = 7 # Days, if higer values used, ensure mysql/mariadb has a bigger sort buffer
USE_MAX = True
CACHE_TIME = SINCE * 24 * 3600 // POINTS

@ -214,7 +214,7 @@ def getCounters(
limit,
use_max,
):
yield (datetime.datetime.fromtimestamp(i.stamp), i.value)
yield (datetime.datetime.fromtimestamp(i[0]), i[1])
def getCounterTitle(counterType: int) -> str:

@ -2,6 +2,7 @@
from django.db import migrations, models
import django.db.models.deletion
import uds.models.stats_counters
class Migration(migrations.Migration):
@ -38,6 +39,11 @@ class Migration(migrations.Migration):
name='mfa',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='authenticators', to='uds.mfa'),
),
migrations.AddField(
model_name='statscounters',
name='interval_type',
field=models.SmallIntegerField(db_index=True, default=uds.models.stats_counters.StatsCounters.CounterIntervalType['NONE']),
),
migrations.RemoveIndex(
model_name='statscounters',
name='uds_stats_c_owner_t_db894d_idx',

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2020 Virtual Cable S.L.U.
# Copyright (c) 2012-2022 Virtual Cable S.L.U.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
@ -31,7 +31,7 @@
.. moduleauthor:: Adolfo Gómez, dkmaster at dkmon dot com
"""
import typing
import types
import enum
import datetime
import logging
@ -49,15 +49,23 @@ class StatsCounters(models.Model):
"""
Statistics about counters (number of users at a given time, number of services at a time, whatever...)
"""
# Valid intervals types for counters data
class CounterIntervalType(enum.IntEnum):
NONE = 0
MINUTE = 1
HOUR = 2
DAY = 3
WEEK = 4
owner_id = models.IntegerField(db_index=True, default=0)
owner_type = models.SmallIntegerField(db_index=True, default=0)
counter_type = models.SmallIntegerField(db_index=True, default=0)
stamp = models.IntegerField(db_index=True, default=0)
interval_type = models.SmallIntegerField(db_index=True, default=CounterIntervalType.NONE)
value = models.IntegerField(db_index=True, default=0)
# "fake" declarations for type checking
# objects: 'models.manager.Manager[StatsCounters]'
objects: 'models.manager.Manager[StatsCounters]'
class Meta:
"""
@ -70,7 +78,7 @@ class StatsCounters(models.Model):
@staticmethod
def get_grouped(
owner_type: typing.Union[int, typing.Iterable[int]], counter_type: int, **kwargs
) -> typing.Generator['StatsCounters', None, None]:
) -> typing.Generator[typing.Tuple[int, int], None, None]:
"""
Returns a QuerySet of counters grouped by owner_type and counter_type
"""
@ -136,145 +144,8 @@ class StatsCounters(models.Model):
if kwargs.get('limit'):
q = q[:kwargs['limit']]
for i in q:
yield StatsCounters(id=-1, owner_type=-1, counter_type=-1, stamp=i['group_by_stamp'], value=i['value'])
@staticmethod
def gp(
owner_type: typing.Union[int, typing.Iterable[int]], counter_type: int, **kwargs
) -> typing.Any:
"""
Returns a QuerySet of counters grouped by owner_type and counter_type
"""
if isinstance(owner_type, int):
owner_type = [owner_type]
q = StatsCounters.objects.filter(owner_type__in=owner_type, counter_type=counter_type)
if kwargs.get('owner_id'):
# If owner_id is a int, we add it to the list
if isinstance(kwargs['owner_id'], int):
kwargs['owner_id'] = [kwargs['owner_id']]
q = q.filter(owner_id__in=kwargs['owner_id'])
since = kwargs.get('since')
if isinstance(since, datetime.datetime):
# Convert to unix timestamp
since = int(since.timestamp())
if not since:
# Get first timestamp from table, we knwo table has at least one record
since = StatsCounters.objects.order_by('stamp').first().stamp # type: ignore
to = kwargs.get('to')
if isinstance(to, datetime.datetime):
# Convert to unix timestamp
to = int(to.timestamp())
if not to:
# Get last timestamp from table, we know table has at least one record
to = StatsCounters.objects.order_by('-stamp').first().stamp # type: ignore
q = q.filter(stamp__gte=since, stamp__lte=to)
return q
@staticmethod
def get_grouped_old(
owner_type: typing.Union[int, typing.Iterable[int]], counter_type: int, **kwargs
) -> 'models.QuerySet[StatsCounters]':
"""
Returns the average stats grouped by interval for owner_type and owner_id (optional)
Note: if someone cant get this more optimized, please, contribute it!
"""
filt = 'owner_type'
if isinstance(owner_type, (list, tuple, types.GeneratorType)):
filt += ' in (' + ','.join((str(x) for x in owner_type)) + ')'
else:
filt += '=' + str(owner_type)
owner_id = kwargs.get('owner_id', None)
if owner_id:
filt += ' AND OWNER_ID'
if isinstance(owner_id, (list, tuple, types.GeneratorType)):
filt += ' in (' + ','.join(str(x) for x in owner_id) + ')'
else:
filt += '=' + str(owner_id)
filt += ' AND counter_type=' + str(counter_type)
since = kwargs.get('since', None)
to = kwargs.get('to', None)
since = int(since) if since else NEVER_UNIX
to = int(to) if to else getSqlDatetimeAsUnix()
interval = int(
kwargs.get('interval') or '600'
) # By default, group items in ten minutes interval (600 seconds)
max_intervals = kwargs.get('max_intervals')
limit = kwargs.get('limit')
if max_intervals:
# Protect against division by "elements-1" a few lines below
max_intervals = int(max_intervals) if int(max_intervals) > 1 else 2
if owner_id is None:
q = StatsCounters.objects.filter(stamp__gte=since, stamp__lte=to)
else:
if isinstance(owner_id, (list, tuple, types.GeneratorType)):
q = StatsCounters.objects.filter(
owner_id__in=owner_id, stamp__gte=since, stamp__lte=to
)
else:
q = StatsCounters.objects.filter(
owner_id=owner_id, stamp__gte=since, stamp__lte=to
)
if isinstance(owner_type, (list, tuple, types.GeneratorType)):
q = q.filter(owner_type__in=owner_type)
else:
q = q.filter(owner_type=owner_type)
if q.count() > max_intervals:
first = q.order_by('stamp')[0].stamp # type: ignore # Slicing is not supported by pylance right now
last = q.order_by('stamp').reverse()[0].stamp # type: ignore # Slicing is not supported by pylance right now
interval = int((last - first) / (max_intervals - 1))
stampValue = '{ceil}(stamp/{interval})'.format(
ceil=getSqlFnc('CEIL'), interval=interval
)
filt += ' AND stamp>={since} AND stamp<={to} GROUP BY {stampValue} ORDER BY stamp'.format(
since=since, to=to, stampValue=stampValue
)
if limit:
filt += ' LIMIT {}'.format(limit)
if kwargs.get('use_max', False):
fnc = getSqlFnc('MAX') + ('(value)')
else:
fnc = getSqlFnc('CEIL') + '({}(value))'.format(getSqlFnc('AVG'))
# fnc = getSqlFnc('MAX' if kwargs.get('use_max', False) else 'AVG')
query = (
'SELECT -1 as id,-1 as owner_id,-1 as owner_type,-1 as counter_type, ' # nosec: SQL injection is not possible here, all values are controlled
+ stampValue
+ '*{}'.format(interval)
+ ' AS stamp, '
+ '{} AS value '
'FROM {} WHERE {}'
).format(fnc, StatsCounters._meta.db_table, filt)
logger.debug('Stats query: %s', query)
# We use result as an iterator
return typing.cast(
'models.QuerySet[StatsCounters]', StatsCounters.objects.raw(query)
)
for i in q.values('group_by_stamp', 'value'):
yield (i['group_by_stamp'], i['value'])
def __str__(self):
return u"Counter of {}({}): {} - {} - {}".format(