1
0
mirror of https://github.com/dkmstr/openuds.git synced 2025-01-03 01:17:56 +03:00

Added some fixes to services_info_dict and finished tests for metapools transports grouping

This commit is contained in:
Adolfo Gómez García 2024-02-10 21:28:15 +01:00
parent 3d02a99b42
commit 9800218df9
No known key found for this signature in database
GPG Key ID: DD1ABF20724CDA23
18 changed files with 403 additions and 298 deletions

View File

@ -66,3 +66,6 @@ DEFAULT_MAX_PREPARING_SERVICES: typing.Final[int] = 15
# Default wait time for rechecks, etc...
DEFAULT_WAIT_TIME: typing.Final[int] = 8 # seconds
# UDS Action url scheme
UDS_ACTION_SCHEME: typing.Final[str] = 'udsa://'

View File

@ -55,6 +55,7 @@ class FixedService(services.Service, abc.ABC): # pylint: disable=too-many-publi
"""
Proxmox fixed machines service.
"""
is_base: typing.ClassVar[bool] = True # This is a base service, not a final one
uses_cache = False # Cache are running machine awaiting to be assigned
@ -73,7 +74,7 @@ class FixedService(services.Service, abc.ABC): # pylint: disable=too-many-publi
# allowed_protocols = types.transports.Protocol.generic_vdi(types.transports.Protocol.SPICE)
# services_type_provided = types.services.ServiceType.VDI
# Gui
# Gui remplates, to be "incorporated" by inherited classes if needed
token = gui.TextField(
order=1,
label=_('Service Token'),
@ -86,6 +87,25 @@ class FixedService(services.Service, abc.ABC): # pylint: disable=too-many-publi
readonly=False,
)
use_snapshots = gui.CheckBoxField(
label=_('Use snapshots'),
default=False,
order=22,
tooltip=_('If active, UDS will try to create an snapshot (if one already does not exists) before accessing a machine, and restore it after usage.'),
tab=_('Machines'),
old_field_name='useSnapshots',
)
# Keep name as "machine" so we can use VCHelpers.getMachines
machines = gui.MultiChoiceField(
label=_("Machines"),
order=21,
tooltip=_('Machines for this service'),
required=True,
tab=_('Machines'),
rows=10,
)
def _get_assigned_machines(self) -> typing.Set[str]:
vals = self.storage.get_unpickle('vms')
logger.debug('Got storage VMS: %s', vals)

View File

@ -85,6 +85,7 @@ class HighAvailabilityPolicy(enum.IntEnum):
(HighAvailabilityPolicy.ENABLED, _('Enabled')),
]
@dataclasses.dataclass(frozen=True)
class UsageInfo:
used: int
@ -93,3 +94,31 @@ class UsageInfo:
@property
def percent(self) -> int:
return (self.used * 100 // self.total) if self.total > 0 else 0
class UsageInfoVars:
use_percent: str
use_count: str
left_count: str
max_srvs: str
def __init__(self, pool_usage_info: typing.Optional[UsageInfo] = None):
if pool_usage_info is None:
pool_usage_info = UsageInfo(0, 0)
self.use_percent = str(pool_usage_info.percent) + '%' if pool_usage_info.total > 0 else ''
self.use_count = str(pool_usage_info.used) if pool_usage_info.total > 0 else ''
self.left_count = str(pool_usage_info.total - pool_usage_info.used) if pool_usage_info.total > 0 else ''
self.max_srvs = str(pool_usage_info.total) if pool_usage_info.total > 0 else ''
def replace(self, x: str) -> str:
return (
x.replace('{use}', self.use_percent)
.replace('{total}', self.max_srvs)
.replace('{usec}', self.use_count)
.replace('{left}', self.left_count)
)
@staticmethod
def has_macros(x: str) -> bool:
return any(y in x for y in ('{use}', '{total}', '{usec}', '{left}'))

View File

@ -290,7 +290,8 @@ def concurrent_removal_limit_field(
def remove_duplicates_field(
order: int = 102, tab: 'types.ui.Tab|str|None' = types.ui.Tab.ADVANCED
order: int = 102, tab: 'types.ui.Tab|str|None' = types.ui.Tab.ADVANCED,
old_field_name: typing.Optional[str] = None,
) -> ui.gui.CheckBoxField:
return ui.gui.CheckBoxField(
label=_('Remove found duplicates'),
@ -298,7 +299,7 @@ def remove_duplicates_field(
order=order,
tooltip=_('If active, found duplicates vApps for this service will be removed'),
tab=tab,
old_field_name='removeDuplicates',
old_field_name=old_field_name,
)

View File

@ -37,6 +37,8 @@ import collections.abc
from django.utils.translation import get_language
from django.utils import formats
from uds.core import consts
if typing.TYPE_CHECKING:
from django.http import HttpRequest # pylint: disable=ungrouped-imports
@ -62,7 +64,7 @@ def uds_access_link(
'''
If transportId (uuid) is None, this will be a metaLink
'''
return f'udsa://{serviceId}/{transportId or "meta"}'
return f'{consts.system.UDS_ACTION_SCHEME}{serviceId}/{transportId or "meta"}'
def parse_date(dateToParse) -> datetime.date:

View File

@ -78,6 +78,11 @@ class Transport(ManagedObjectModel, TaggingMixin):
deployedServices: 'models.manager.RelatedManager[ServicePool]'
networks: 'models.manager.RelatedManager[Network]'
@property
# Alias for future renaming (start using alias asap)
def service_pools(self) -> 'models.manager.RelatedManager[ServicePool]':
return self.deployedServices
class Meta(ManagedObjectModel.Meta): # pylint: disable=too-few-public-methods
"""
Meta class to declare default order

View File

@ -32,6 +32,7 @@ import logging
import typing
from django.utils.translation import gettext_noop as _, gettext
from regex import F
from uds.core import services, types, consts, exceptions
from uds.core.services.specializations.fixed_machine.fixed_service import FixedService
from uds.core.services.specializations.fixed_machine.fixed_userservice import FixedUserService
@ -92,24 +93,9 @@ class ProxmoxFixedService(FixedService): # pylint: disable=too-many-public-meth
tab=_('Machines'),
old_field_name='resourcePool',
)
# Keep name as "machine" so we can use VCHelpers.getMachines
machines = gui.MultiChoiceField(
label=_("Machines"),
order=21,
tooltip=_('Machines for this service'),
required=True,
tab=_('Machines'),
rows=10,
)
use_snapshots = gui.CheckBoxField(
label=_('Use snapshots'),
default=False,
order=22,
tooltip=_('If active, UDS will try to create an snapshot on VM use and recover if on exit.'),
tab=_('Machines'),
old_field_name='useSnapshots',
)
machines = FixedService.machines
use_snapshots = FixedService.use_snapshots
prov_uuid = gui.HiddenField(value=None)

View File

@ -55,7 +55,8 @@ logger = logging.getLogger(__name__)
class XenFixedService(FixedService): # pylint: disable=too-many-public-methods
"""
Proxmox fixed machines service.
Represents a Proxmox service based on fixed machines.
This service requires the qemu agent to be installed on the machines.
"""
type_name = _('Proxmox Fixed Machines')
@ -86,29 +87,13 @@ class XenFixedService(FixedService): # pylint: disable=too-many-public-methods
'function': helpers.get_machines,
'parameters': ['prov_uuid', 'folder'],
},
tooltip=_('Resource Pool containing base machines'),
tooltip=_('Folder containing base machines'),
required=True,
tab=_('Machines'),
old_field_name='resourcePool',
)
# Keep name as "machine" so we can use VCHelpers.getMachines
machines = gui.MultiChoiceField(
label=_("Machines"),
order=21,
tooltip=_('Machines for this service'),
required=True,
tab=_('Machines'),
rows=10,
)
use_snapshots = gui.CheckBoxField(
label=_('Use snapshots'),
default=False,
order=22,
tooltip=_('If active, UDS will try to create an snapshot on VM use and recover if on exit.'),
tab=_('Machines'),
old_field_name='useSnapshots',
)
machines = FixedService.machines
use_snapshots = FixedService.use_snapshots
prov_uuid = gui.HiddenField(value=None)

View File

@ -29,6 +29,7 @@
@author: Adolfo Gómez, dkmaster at dkmon dot com
'''
import collections.abc
from functools import reduce
import logging
import typing
@ -53,7 +54,7 @@ from uds.models import MetaPool, Network, ServicePool, ServicePoolGroup, TicketS
# Not imported at runtime, just for type checking
if typing.TYPE_CHECKING:
from uds.core.types.requests import ExtendedHttpRequestWithUser
from uds.models import Image
from uds.models import Image, MetaPoolMember
logger = logging.getLogger(__name__)
@ -120,54 +121,45 @@ def get_services_info_dict(
"""
# We look for services for this authenticator groups. User is logged in in just 1 authenticator, so his groups must coincide with those assigned to ds
groups = list(request.user.get_groups())
availServicePools = list(
available_service_pools = list(
ServicePool.get_pools_for_groups(groups, request.user)
) # Pass in user to get "number_assigned" to optimize
availMetaPools = list(
available_metapools = list(
MetaPool.metapools_for_groups(groups, request.user)
) # Pass in user to get "number_assigned" to optimize
now = sql_datetime()
# Information for administrators
nets = ''
validTrans = ''
valid_transports = ''
osType: 'types.os.KnownOS' = request.os.os
logger.debug('OS: %s', osType)
os_type: 'types.os.KnownOS' = request.os.os
logger.debug('OS: %s', os_type)
if request.user.is_staff():
nets = ','.join([n.name for n in Network.get_networks_for_ip(request.ip)])
tt = []
t: Transport
for t in Transport.objects.all().prefetch_related('networks'):
if t.is_ip_allowed(request.ip):
tt.append(t.name)
validTrans = ','.join(tt)
logger.debug('Checking meta pools: %s', availMetaPools)
services = []
def _is_valid_transport(t: Transport) -> bool:
transport_type = t.get_type()
return (
transport_type is not None
and t.is_ip_allowed(request.ip)
and transport_type.supports_os(os_type)
and t.is_os_allowed(os_type)
)
# Metapool helpers
def transportIterator(member) -> collections.abc.Iterable[Transport]:
def _valid_transports(member: 'MetaPoolMember') -> collections.abc.Iterable[Transport]:
t: Transport
for t in member.pool.transports.all().order_by('priority'):
try:
typeTrans = t.get_type()
if (
typeTrans
and t.is_ip_allowed(request.ip)
and typeTrans.supports_os(osType)
and t.is_os_allowed(osType)
):
if _is_valid_transport(t):
yield t
except Exception as e:
logger.warning('Transport %s of %s not found. Ignoring. (%s)', t, member.pool, e)
def buildMetaTransports(
transports: collections.abc.Iterable[Transport], isLabel: bool, meta: 'MetaPool'
def _build_transports_for_meta(
transports: collections.abc.Iterable[Transport], is_by_label: bool, meta: 'MetaPool'
) -> list[collections.abc.Mapping[str, typing.Any]]:
def idd(i):
return i.uuid if not isLabel else 'LABEL:' + i.label
return i.uuid if not is_by_label else 'LABEL:' + i.label
return [
{
@ -176,110 +168,96 @@ def get_services_info_dict(
'link': html.uds_access_link(request, 'M' + meta.uuid, idd(i)), # type: ignore
'priority': i.priority,
}
for i in transports
for i in sorted(transports, key=lambda x: x.priority) # Sorted by priority
]
if request.user.is_staff():
nets = ','.join([n.name for n in Network.get_networks_for_ip(request.ip)])
tt = []
t: Transport
for t in Transport.objects.all().prefetch_related('networks'):
if t.is_ip_allowed(request.ip):
tt.append(t.name)
valid_transports = ','.join(tt)
logger.debug('Checking meta pools: %s', available_metapools)
services = []
# Preload all assigned user services for this user
# Add meta pools data first
for meta in availMetaPools:
for meta in available_metapools:
# Check that we have access to at least one transport on some of its children
metaTransports: list[collections.abc.Mapping[str, typing.Any]] = []
transports_in_meta: list[collections.abc.Mapping[str, typing.Any]] = []
in_use = meta.number_in_use > 0 # type: ignore # anotated value
inAll: typing.Optional[typing.Set[str]] = None
tmpSet: typing.Set[str]
# If no macro on names, skip calculation (and set to empty)
if '{' in meta.name or '{' in meta.visual_name:
poolUsageInfo = meta.usage()
use_percent = str(poolUsageInfo.percent) + '%'
use_count = str(poolUsageInfo.used)
left_count = str(poolUsageInfo.total - poolUsageInfo.used)
max_srvs = str(poolUsageInfo.total)
else:
max_srvs = ''
use_percent = ''
use_count = ''
left_count = ''
# pylint: disable=cell-var-from-loop
def macro_info(x: str) -> str:
return (
x.replace('{use}', use_percent)
.replace('{total}', max_srvs)
.replace('{usec}', use_count)
.replace('{left}', left_count)
# Calculate info variable macros content if needed
info_vars = (
types.pools.UsageInfoVars(meta.usage())
if (
types.pools.UsageInfoVars.has_macros(meta.name)
or types.pools.UsageInfoVars.has_macros(meta.visual_name)
)
else types.pools.UsageInfoVars()
)
if meta.transport_grouping == types.pools.TransportSelectionPolicy.COMMON:
# only keep transports that are in ALL members
for member in meta.members.all().order_by('priority'):
tmpSet = set()
# if first pool, get all its transports and check that are valid
for t in transportIterator(member):
if inAll is None:
tmpSet.add(t.uuid) # type: ignore
elif t.uuid in inAll: # For subsequent, reduce...
tmpSet.add(t.uuid) # type: ignore
# Keep only transports that are in all pools
# This will be done by getting all transports from all pools and then intersecting them
# using reduce
transports_in_all_pools = typing.cast(
set[Transport],
reduce(
lambda x, y: x & y,
[{t for t in _valid_transports(member)} for member in meta.members.all()],
),
)
inAll = tmpSet
# tmpSet has ALL common transports
metaTransports = buildMetaTransports(
Transport.objects.filter(uuid__in=inAll or []), isLabel=False, meta=meta
transports_in_meta = _build_transports_for_meta(
transports_in_all_pools,
is_by_label=False,
meta=meta,
)
elif meta.transport_grouping == types.pools.TransportSelectionPolicy.LABEL:
ltrans: collections.abc.MutableMapping[str, Transport] = {}
for member in meta.members.all().order_by('priority'):
tmpSet = set()
transports_in_all_pools_by_label: typing.Optional[typing.Set[str]] = None
temporary_transport_set_by_label: typing.Set[str]
for member in meta.members.all():
temporary_transport_set_by_label = set()
# if first pool, get all its transports and check that are valid
for t in transportIterator(member):
for t in _valid_transports(member):
if not t.label:
continue
if t.label not in ltrans or ltrans[t.label].priority > t.priority:
ltrans[t.label] = t
if inAll is None:
tmpSet.add(t.label)
elif t.label in inAll: # For subsequent, reduce...
tmpSet.add(t.label)
if transports_in_all_pools_by_label is None:
temporary_transport_set_by_label.add(t.label)
elif t.label in transports_in_all_pools_by_label: # For subsequent, reduce...
temporary_transport_set_by_label.add(t.label)
inAll = tmpSet
transports_in_all_pools_by_label = temporary_transport_set_by_label
# tmpSet has ALL common transports
metaTransports = buildMetaTransports(
(v for k, v in ltrans.items() if k in (inAll or set())), isLabel=True, meta=meta
transports_in_meta = _build_transports_for_meta(
(v for k, v in ltrans.items() if k in (transports_in_all_pools_by_label or set())),
is_by_label=True,
meta=meta,
)
else:
for member in meta.members.all():
# if pool.is_in_maintenance():
# continue
for t in member.pool.transports.all():
typeTrans = t.get_type()
if (
typeTrans
and t.is_ip_allowed(request.ip)
and typeTrans.supports_os(osType)
and t.is_os_allowed(osType)
):
metaTransports = [
# If we have at least one valid transport,
# mark as "meta" transport and add it to the list
transports_in_meta = [
{
'id': 'meta',
'name': 'meta',
'link': html.uds_access_link(request, 'M' + meta.uuid, None), # type: ignore
'priority': 0,
}
if any(_valid_transports(member) for member in meta.members.all())
else {}
]
break
# if not in_use and meta.number_in_use: # Only look for assignation on possible used
# assignedUserService = UserServiceManager().getExistingAssignationForUser(pool, request.user)
# if assignedUserService:
# in_use = assignedUserService.in_use
# Stop when 1 usable pool is found (metaTransports is filled)
if metaTransports:
break
# If no usable pools, this is not visible
if metaTransports:
if transports_in_meta:
group: collections.abc.MutableMapping[str, typing.Any] = (
meta.servicesPoolGroup.as_dict if meta.servicesPoolGroup else ServicePoolGroup.default().as_dict
)
@ -288,13 +266,13 @@ def get_services_info_dict(
_service_info(
uuid=meta.uuid,
is_meta=True,
name=macro_info(meta.name),
visual_name=macro_info(meta.visual_name),
name=info_vars.replace(meta.name),
visual_name=info_vars.replace(meta.visual_name),
description=meta.comments,
group=group,
transports=metaTransports,
transports=transports_in_meta,
image=meta.image,
show_transports=len(metaTransports) > 1,
show_transports=len(transports_in_meta) > 1,
allow_users_remove=meta.allow_users_remove,
allow_users_reset=meta.allow_users_remove,
maintenance=meta.is_in_maintenance(),
@ -307,20 +285,20 @@ def get_services_info_dict(
)
# Now generic user service
for sPool in availServicePools:
for service_pool in available_service_pools:
# Skip pools that are part of meta pools
if sPool.owned_by_meta:
if service_pool.owned_by_meta:
continue
# If no macro on names, skip calculation
if '{' in sPool.name or '{' in sPool.visual_name:
poolUsageInfo = sPool.usage(
sPool.usage_count, # type: ignore # anotated value
if '{' in service_pool.name or '{' in service_pool.visual_name:
pool_usage_info = service_pool.usage(
service_pool.usage_count, # type: ignore # anotated value
)
use_percent = str(poolUsageInfo.percent) + '%'
use_count = str(poolUsageInfo.used)
left_count = str(poolUsageInfo.total - poolUsageInfo.used)
max_srvs = str(poolUsageInfo.total)
use_percent = str(pool_usage_info.percent) + '%'
use_count = str(pool_usage_info.used)
left_count = str(pool_usage_info.total - pool_usage_info.used)
max_srvs = str(pool_usage_info.total)
else:
max_srvs = ''
use_percent = ''
@ -328,7 +306,7 @@ def get_services_info_dict(
left_count = ''
# pylint: disable=cell-var-from-loop
def macro_info(x: str) -> str:
def _replace_macro_vars(x: str) -> str:
return (
x.replace('{use}', use_percent)
.replace('{total}', max_srvs)
@ -338,19 +316,14 @@ def get_services_info_dict(
trans: list[collections.abc.Mapping[str, typing.Any]] = []
for t in sorted(
sPool.transports.all(), key=lambda x: x.priority
service_pool.transports.all(), key=lambda x: x.priority
): # In memory sort, allows reuse prefetched and not too big array
typeTrans = t.get_type()
if (
typeTrans
and t.is_ip_allowed(request.ip)
and typeTrans.supports_os(osType)
and t.is_os_allowed(osType)
):
if typeTrans.own_link:
link = reverse('webapi.transport_own_link', args=('F' + sPool.uuid, t.uuid)) # type: ignore
transport_type = t.get_type()
if _is_valid_transport(t):
if transport_type.own_link:
link = reverse('webapi.transport_own_link', args=('F' + service_pool.uuid, t.uuid)) # type: ignore
else:
link = html.uds_access_link(request, 'F' + sPool.uuid, t.uuid) # type: ignore
link = html.uds_access_link(request, 'F' + service_pool.uuid, t.uuid) # type: ignore
trans.append({'id': t.uuid, 'name': t.name, 'link': link, 'priority': t.priority})
# If empty transports, do not include it on list
@ -358,52 +331,54 @@ def get_services_info_dict(
continue
# Locate if user service has any already assigned user service for this. Use "pre cached" number of assignations in this pool to optimize
in_use = typing.cast(typing.Any, sPool).number_in_use > 0
in_use = typing.cast(typing.Any, service_pool).number_in_use > 0
# if svr.number_in_use: # Anotated value got from getDeployedServicesForGroups(...). If 0, no assignation for this user
# ads = UserServiceManager().getExistingAssignationForUser(svr, request.user)
# if ads:
# in_use = ads.in_use
group = (
sPool.servicesPoolGroup.as_dict if sPool.servicesPoolGroup else ServicePoolGroup.default().as_dict
service_pool.servicesPoolGroup.as_dict
if service_pool.servicesPoolGroup
else ServicePoolGroup.default().as_dict
)
# Only add toBeReplaced info in case we allow it. This will generate some "overload" on the services
toBeReplacedDate = (
sPool.when_will_be_replaced(request.user)
if typing.cast(typing.Any, sPool).pubs_active > 0
when_will_be_replaced = (
service_pool.when_will_be_replaced(request.user)
if typing.cast(typing.Any, service_pool).pubs_active > 0
and GlobalConfig.NOTIFY_REMOVAL_BY_PUB.as_bool(False)
else None
)
# tbr = False
if toBeReplacedDate:
toBeReplaced = formats.date_format(toBeReplacedDate, 'SHORT_DATETIME_FORMAT')
toBeReplacedTxt = gettext(
if when_will_be_replaced:
replace_date_as_str = formats.date_format(when_will_be_replaced, 'SHORT_DATETIME_FORMAT')
replace_date_info_text = gettext(
'This service is about to be replaced by a new version. Please, close the session before {} and save all your work to avoid loosing it.'
).format(toBeReplacedDate)
).format(when_will_be_replaced)
else:
toBeReplaced = None
toBeReplacedTxt = ''
replace_date_as_str = None
replace_date_info_text = ''
services.append(
_service_info(
uuid=sPool.uuid,
uuid=service_pool.uuid,
is_meta=False,
name=macro_info(sPool.name),
visual_name=macro_info(sPool.visual_name),
description=sPool.comments,
name=_replace_macro_vars(service_pool.name),
visual_name=_replace_macro_vars(service_pool.visual_name),
description=service_pool.comments,
group=group,
transports=trans,
image=sPool.image,
show_transports=sPool.show_transports,
allow_users_remove=sPool.allow_users_remove,
allow_users_reset=sPool.allow_users_reset,
maintenance=sPool.is_in_maintenance(),
not_accesible=not sPool.is_access_allowed(now),
image=service_pool.image,
show_transports=service_pool.show_transports,
allow_users_remove=service_pool.allow_users_remove,
allow_users_reset=service_pool.allow_users_reset,
maintenance=service_pool.is_in_maintenance(),
not_accesible=not service_pool.is_access_allowed(now),
in_use=in_use,
to_be_replaced=toBeReplaced,
to_be_replaced_text=toBeReplacedTxt,
custom_calendar_text=sPool.calendar_message,
to_be_replaced=replace_date_as_str,
to_be_replaced_text=replace_date_info_text,
custom_calendar_text=service_pool.calendar_message,
)
)
@ -427,7 +402,7 @@ def get_services_info_dict(
'services': services,
'ip': request.ip,
'nets': nets,
'transports': validTrans,
'transports': valid_transports,
'autorun': autorun,
}

View File

@ -48,18 +48,18 @@ class LoginLogoutTest(test.UDSTestCase):
"""
Test login and logout
"""
auth = fixtures_authenticators.createAuthenticator()
auth = fixtures_authenticators.create_authenticator()
# Create some ramdom users
admins = fixtures_authenticators.createUsers(
admins = fixtures_authenticators.create_users(
auth, number_of_users=8, is_admin=True
)
staffs = fixtures_authenticators.createUsers(
staffs = fixtures_authenticators.create_users(
auth, number_of_users=8, is_staff=True
)
users = fixtures_authenticators.createUsers(auth, number_of_users=8)
users = fixtures_authenticators.create_users(auth, number_of_users=8)
# Create some groups
groups = fixtures_authenticators.createGroups(auth, number_of_groups=32)
groups = fixtures_authenticators.create_groups(auth, number_of_groups=32)
# Add users to some groups, ramdomly
for user in users + admins + staffs:

View File

@ -47,9 +47,9 @@ class ModelUUIDTest(UDSTestCase):
def setUp(self) -> None:
super().setUp()
self.auth = authenticators_fixtures.createAuthenticator()
self.group = authenticators_fixtures.createGroups(self.auth, 1)[0]
self.user = authenticators_fixtures.createUsers(self.auth, 1, groups=[self.group])[0]
self.auth = authenticators_fixtures.create_authenticator()
self.group = authenticators_fixtures.create_groups(self.auth, 1)[0]
self.user = authenticators_fixtures.create_users(self.auth, 1, groups=[self.group])[0]
def test_uuid_lowercase(self):
"""

View File

@ -60,15 +60,15 @@ class PermissionsTest(UDSTestCase):
network: models.Network
def setUp(self) -> None:
self.authenticator = authenticators_fixtures.createAuthenticator()
self.groups = authenticators_fixtures.createGroups(self.authenticator)
self.users = authenticators_fixtures.createUsers(
self.authenticator = authenticators_fixtures.create_authenticator()
self.groups = authenticators_fixtures.create_groups(self.authenticator)
self.users = authenticators_fixtures.create_users(
self.authenticator, groups=self.groups
)
self.admins = authenticators_fixtures.createUsers(
self.admins = authenticators_fixtures.create_users(
self.authenticator, is_admin=True, groups=self.groups
)
self.staffs = authenticators_fixtures.createUsers(
self.staffs = authenticators_fixtures.create_users(
self.authenticator, is_staff=True, groups=self.groups
)
self.userService = services_fixtures.create_one_cache_testing_userservice(

View File

@ -40,7 +40,7 @@ from uds.core.managers.crypto import CryptoManager
glob = {'user_id': 0, 'group_id': 0}
def createAuthenticator(
def create_authenticator(
authenticator: typing.Optional[models.Authenticator] = None,
) -> models.Authenticator:
"""
@ -61,7 +61,7 @@ def createAuthenticator(
return authenticator
def createUsers(
def create_users(
authenticator: models.Authenticator,
number_of_users: int = 1,
is_staff: bool = False,
@ -96,7 +96,7 @@ def createUsers(
return users
def createGroups(
def create_groups(
authenticator: models.Authenticator, number_of_groups: int = 1
) -> list[models.Group]:
"""

View File

@ -159,7 +159,7 @@ def create_test_publication(
return publication
def create_test_transport() -> models.Transport:
def create_test_transport(**kwargs) -> models.Transport:
from uds.transports.Test import TestTransport
values = TestTransport(
@ -170,6 +170,7 @@ def create_test_transport() -> models.Transport:
comments='Comment for Transport %d' % (glob['transport_id']),
data_type=TestTransport.type_type,
data=TestTransport(environment.Environment(str(glob['transport_id'])), values).serialize(),
**kwargs,
)
glob['transport_id'] += 1
return transport
@ -255,9 +256,9 @@ def create_cache_testing_userservices(
from . import authenticators
if not user or not groups:
auth = authenticators.createAuthenticator()
groups = authenticators.createGroups(auth, 3)
user = authenticators.createUsers(auth, 1, groups=groups)[0]
auth = authenticators.create_authenticator()
groups = authenticators.create_groups(auth, 3)
user = authenticators.create_users(auth, 1, groups=groups)[0]
user_services: list[models.UserService] = []
for _ in range(count):
user_services.append(create_one_cache_testing_userservice(createProvider(), user, groups, type_))

View File

@ -65,23 +65,23 @@ class RESTTestCase(test.UDSTransactionTestCase):
def setUp(self) -> None:
# Set up data for REST Test cases
# First, the authenticator related
self.auth = authenticators_fixtures.createAuthenticator()
self.simple_groups = authenticators_fixtures.createGroups(self.auth, NUMBER_OF_ITEMS_TO_CREATE)
self.auth = authenticators_fixtures.create_authenticator()
self.simple_groups = authenticators_fixtures.create_groups(self.auth, NUMBER_OF_ITEMS_TO_CREATE)
self.meta_groups = authenticators_fixtures.createMetaGroups(self.auth, NUMBER_OF_ITEMS_TO_CREATE)
# Create some users, one admin, one staff and one user
self.admins = authenticators_fixtures.createUsers(
self.admins = authenticators_fixtures.create_users(
self.auth,
number_of_users=NUMBER_OF_ITEMS_TO_CREATE,
is_admin=True,
groups=self.groups,
)
self.staffs = authenticators_fixtures.createUsers(
self.staffs = authenticators_fixtures.create_users(
self.auth,
number_of_users=NUMBER_OF_ITEMS_TO_CREATE,
is_staff=True,
groups=self.groups,
)
self.plain_users = authenticators_fixtures.createUsers(
self.plain_users = authenticators_fixtures.create_users(
self.auth, number_of_users=NUMBER_OF_ITEMS_TO_CREATE, groups=self.groups
)

View File

@ -52,24 +52,24 @@ class WEBTestCase(test.UDSTransactionTestCase):
def setUp(self) -> None:
# Set up data for REST Test cases
# First, the authenticator related
self.auth = authenticators_fixtures.createAuthenticator()
self.groups = authenticators_fixtures.createGroups(
self.auth = authenticators_fixtures.create_authenticator()
self.groups = authenticators_fixtures.create_groups(
self.auth, NUMBER_OF_ITEMS_TO_CREATE
)
# Create some users, one admin, one staff and one user
self.admins = authenticators_fixtures.createUsers(
self.admins = authenticators_fixtures.create_users(
self.auth,
number_of_users=NUMBER_OF_ITEMS_TO_CREATE,
is_admin=True,
groups=self.groups,
)
self.staffs = authenticators_fixtures.createUsers(
self.staffs = authenticators_fixtures.create_users(
self.auth,
number_of_users=NUMBER_OF_ITEMS_TO_CREATE,
is_staff=True,
groups=self.groups,
)
self.plain_users = authenticators_fixtures.createUsers(
self.plain_users = authenticators_fixtures.create_users(
self.auth, number_of_users=NUMBER_OF_ITEMS_TO_CREATE, groups=self.groups
)

View File

@ -60,18 +60,18 @@ class WebLoginLogoutTest(test.WEBTestCase):
"""
Test login and logout
"""
auth = fixtures_authenticators.createAuthenticator()
auth = fixtures_authenticators.create_authenticator()
# Create some ramdom users
admins = fixtures_authenticators.createUsers(
admins = fixtures_authenticators.create_users(
auth, number_of_users=8, is_admin=True
)
stafs = fixtures_authenticators.createUsers(
stafs = fixtures_authenticators.create_users(
auth, number_of_users=8, is_staff=True
)
users = fixtures_authenticators.createUsers(auth, number_of_users=8)
users = fixtures_authenticators.create_users(auth, number_of_users=8)
# Create some groups
groups = fixtures_authenticators.createGroups(auth, number_of_groups=32)
groups = fixtures_authenticators.create_groups(auth, number_of_groups=32)
# Add users to some groups, ramdomly
for user in users + admins + stafs:
@ -118,8 +118,8 @@ class WebLoginLogoutTest(test.WEBTestCase):
self.assertInvalidLogin(response)
def test_login_valid_user_no_group(self):
user = fixtures_authenticators.createUsers(
fixtures_authenticators.createAuthenticator(),
user = fixtures_authenticators.create_users(
fixtures_authenticators.create_authenticator(),
)[0]
response = self.do_login(user.name, user.name, user.manager.uuid)
@ -127,8 +127,8 @@ class WebLoginLogoutTest(test.WEBTestCase):
self.assertEqual(models.Log.objects.count(), 4)
user = fixtures_authenticators.createUsers(
fixtures_authenticators.createAuthenticator(),
user = fixtures_authenticators.create_users(
fixtures_authenticators.create_authenticator(),
is_staff=True,
)[0]
@ -137,8 +137,8 @@ class WebLoginLogoutTest(test.WEBTestCase):
self.assertEqual(models.Log.objects.count(), 8)
user = fixtures_authenticators.createUsers(
fixtures_authenticators.createAuthenticator(),
user = fixtures_authenticators.create_users(
fixtures_authenticators.create_authenticator(),
is_admin=True,
)[0]
@ -148,8 +148,8 @@ class WebLoginLogoutTest(test.WEBTestCase):
self.assertEqual(models.Log.objects.count(), 12)
def test_login_invalid_user(self):
user = fixtures_authenticators.createUsers(
fixtures_authenticators.createAuthenticator(),
user = fixtures_authenticators.create_users(
fixtures_authenticators.create_authenticator(),
)[0]
response = self.do_login(user.name, 'wrong password', user.manager.uuid)
@ -159,8 +159,8 @@ class WebLoginLogoutTest(test.WEBTestCase):
# + 2 system logs (auth.log), one for each failed login
self.assertEqual(models.Log.objects.count(), 6)
user = fixtures_authenticators.createUsers(
fixtures_authenticators.createAuthenticator(),
user = fixtures_authenticators.create_users(
fixtures_authenticators.create_authenticator(),
is_staff=True,
)[0]
@ -169,8 +169,8 @@ class WebLoginLogoutTest(test.WEBTestCase):
self.assertEqual(models.Log.objects.count(), 12)
user = fixtures_authenticators.createUsers(
fixtures_authenticators.createAuthenticator(),
user = fixtures_authenticators.create_users(
fixtures_authenticators.create_authenticator(),
is_admin=True,
)[0]

View File

@ -31,19 +31,21 @@
"""
@author: Adolfo Gómez, dkmaster at dkmon dot com
"""
# We use commit/rollback
import datetime
import typing
import collections.abc
import datetime
import functools
import itertools
import random
import typing
from unittest import mock
from uds import models
from uds.core import types, consts
from uds.core import consts, types
from uds.web.util import services
from ...utils.test import UDSTransactionTestCase
from ...fixtures import authenticators as fixtures_authenticators
from ...fixtures import services as fixtures_services
from ...utils.test import UDSTransactionTestCase
class TestGetServicesData(UDSTransactionTestCase):
@ -51,14 +53,14 @@ class TestGetServicesData(UDSTransactionTestCase):
auth: models.Authenticator
groups: list[models.Group]
user: models.User
transports: list[models.Transport]
def setUp(self) -> None:
# We need to create a user with some services
self.auth = fixtures_authenticators.createAuthenticator()
self.groups = fixtures_authenticators.createGroups(self.auth, 3)
self.user = fixtures_authenticators.createUsers(
self.auth, 1, groups=self.groups
)[0]
self.auth = fixtures_authenticators.create_authenticator()
self.groups = fixtures_authenticators.create_groups(self.auth, 3)
self.user = fixtures_authenticators.create_users(self.auth, 1, groups=self.groups)[0]
self.transports = [fixtures_services.create_test_transport(priority=counter, label=f'label{counter}') for counter in range(10)]
self.request = mock.Mock()
self.request.user = self.user
@ -73,11 +75,35 @@ class TestGetServicesData(UDSTransactionTestCase):
return super().setUp()
def _create_metapools(
self,
grouping_method: types.pools.TransportSelectionPolicy,
ha_policy: types.pools.HighAvailabilityPolicy = types.pools.HighAvailabilityPolicy.DISABLED,
) -> None:
# Create 10 services, for this user
service_pools: list[models.ServicePool] = []
for i in range(110):
service_pools.append(
fixtures_services.create_cache_testing_userservices(
count=1, user=self.user, groups=self.groups
)[0].deployed_service
)
# Create 10 meta services, for this user, last 10 user_services will not be added to meta pools
meta_services: list[models.MetaPool] = []
for i in range(10):
service_pool = fixtures_services.create_test_metapool(
service_pools=service_pools[i * 10 : (i + 1) * 10],
groups=self.groups,
transport_grouping=grouping_method,
)
meta_services.append(service_pool)
def test_get_services_data(self) -> None:
# Create 10 services, for this user
user_services: list[models.ServicePool] = []
service_pools: list[models.ServicePool] = []
for i in range(10):
user_services.append(
service_pools.append(
fixtures_services.create_cache_testing_userservices(
count=1, user=self.user, groups=self.groups
)[0].deployed_service
@ -93,9 +119,7 @@ class TestGetServicesData(UDSTransactionTestCase):
# 'transports': validTrans,
# 'autorun': autorun,
# }
result_services: typing.Final[
list[collections.abc.Mapping[str, typing.Any]]
] = data['services']
result_services: typing.Final[list[dict[str, typing.Any]]] = data['services']
self.assertEqual(len(result_services), 10)
self.assertEqual(data['ip'], '127.0.0.1')
self.assertEqual(len(data['nets']), 0)
@ -126,7 +150,7 @@ class TestGetServicesData(UDSTransactionTestCase):
for user_service in result_services:
# Locate user service in user_services
found: models.ServicePool = next(
(x for x in user_services if x.uuid == user_service['id'][1:]),
(x for x in service_pools if x.uuid == user_service['id'][1:]),
models.ServicePool(uuid='x'),
)
if found.uuid == 'x':
@ -136,39 +160,27 @@ class TestGetServicesData(UDSTransactionTestCase):
self.assertEqual(user_service['name'], found.name)
self.assertEqual(user_service['visual_name'], found.visual_name)
self.assertEqual(user_service['description'], found.comments)
self.assertEqual(
user_service['group'], models.ServicePoolGroup.default().as_dict
)
self.assertEqual(user_service['group'], models.ServicePoolGroup.default().as_dict)
self.assertEqual(
[(i['id'], i['name']) for i in user_service['transports']],
[(t.uuid, t.name) for t in found.transports.all()],
)
self.assertEqual(
user_service['imageId'], found.image and found.image.uuid or 'x'
)
self.assertEqual(user_service['imageId'], found.image and found.image.uuid or 'x')
self.assertEqual(user_service['show_transports'], found.show_transports)
self.assertEqual(
user_service['allow_users_remove'], found.allow_users_remove
)
self.assertEqual(user_service['allow_users_remove'], found.allow_users_remove)
self.assertEqual(user_service['allow_users_reset'], found.allow_users_reset)
self.assertEqual(
user_service['maintenance'], found.service.provider.maintenance_mode
)
self.assertEqual(
user_service['not_accesible'], not found.is_access_allowed(now)
)
self.assertEqual(
user_service['in_use'], found.userServices.filter(in_use=True).count()
)
self.assertEqual(user_service['maintenance'], found.service.provider.maintenance_mode)
self.assertEqual(user_service['not_accesible'], not found.is_access_allowed(now))
self.assertEqual(user_service['in_use'], found.userServices.filter(in_use=True).count())
self.assertEqual(user_service['to_be_replaced'], None)
self.assertEqual(user_service['to_be_replaced_text'], '')
self.assertEqual(user_service['custom_calendar_text'], '')
def test_get_meta_services_data(self) -> None:
# Create 10 services, for this user
user_services: list[models.ServicePool] = []
service_pools: list[models.ServicePool] = []
for i in range(100):
user_services.append(
service_pools.append(
fixtures_services.create_cache_testing_userservices(
count=1, user=self.user, groups=self.groups
)[0].deployed_service
@ -179,17 +191,14 @@ class TestGetServicesData(UDSTransactionTestCase):
for i in range(10):
meta_services.append(
fixtures_services.create_test_metapool(
service_pools=user_services[i * 10 : (i + 1) * 10], groups=self.groups
service_pools=service_pools[i * 10 : (i + 1) * 10], groups=self.groups
)
)
data = services.get_services_info_dict(self.request)
now = datetime.datetime.now()
result_services: typing.Final[
list[collections.abc.Mapping[str, typing.Any]]
] = data['services']
result_services: typing.Final[list[dict[str, typing.Any]]] = data['services']
self.assertEqual(len(result_services), 10)
self.assertEqual(data['ip'], '127.0.0.1')
self.assertEqual(len(data['nets']), 0)
@ -209,12 +218,8 @@ class TestGetServicesData(UDSTransactionTestCase):
self.assertEqual(user_service['name'], found.name)
self.assertEqual(user_service['visual_name'], found.visual_name)
self.assertEqual(user_service['description'], found.comments)
self.assertEqual(
user_service['group'], models.ServicePoolGroup.default().as_dict
)
self.assertEqual(
user_service['not_accesible'], not found.is_access_allowed(now)
)
self.assertEqual(user_service['group'], models.ServicePoolGroup.default().as_dict)
self.assertEqual(user_service['not_accesible'], not found.is_access_allowed(now))
self.assertEqual(user_service['to_be_replaced'], None)
self.assertEqual(user_service['to_be_replaced_text'], '')
self.assertEqual(user_service['custom_calendar_text'], '')
@ -238,16 +243,109 @@ class TestGetServicesData(UDSTransactionTestCase):
)
)
data = services.get_services_info_dict(self.request)
now = datetime.datetime.now()
result_services: typing.Final[
list[collections.abc.Mapping[str, typing.Any]]
] = data['services']
result_services: typing.Final[list[dict[str, typing.Any]]] = data['services']
self.assertEqual(len(result_services), 20) # 10 metas and 10 normal pools
# Some checks are ommited, because are already tested in other tests
self.assertEqual(len(list(filter(lambda x: x['is_meta'], result_services))), 10)
self.assertEqual(len(list(filter(lambda x: not x['is_meta'], result_services))), 10)
def _generate_metapool_with_transports(
self, count: int, transport_grouping: types.pools.TransportSelectionPolicy, *,
add_random_transports: bool
) -> tuple[list[models.ServicePool], models.MetaPool]:
service_pools: list[models.ServicePool] = []
for i in range(count):
pool = fixtures_services.create_cache_testing_userservices(
count=1, user=self.user, groups=self.groups
)[0].deployed_service
pool.transports.add(*self.transports[:3]) # Add the first 3 transports to all pools
# add some random transports to each pool after the three common ones
if add_random_transports:
pool.transports.add(*random.sample(self.transports[3:], 3))
service_pools.append(pool)
return service_pools, fixtures_services.create_test_metapool(
service_pools=service_pools,
groups=self.groups,
transport_grouping=transport_grouping,
)
def test_meta_common_grouping(self) -> None:
# For this test, we don't mind returned value, we just want to create the pools on db
self._generate_metapool_with_transports(
10, types.pools.TransportSelectionPolicy.COMMON, # Group by common transports
add_random_transports=True
)
# Now, get the data
data = services.get_services_info_dict(self.request)
# Now, check that the meta pool has the same transports as the common ones
result_services: typing.Final[list[dict[str, typing.Any]]] = data['services']
# We except 1 result only, a meta pool (is_meta = True)
self.assertEqual(len(result_services), 1)
self.assertEqual(result_services[0]['is_meta'], True)
# Transpors for this meta pool should be the common ones, ordered by priority
# First compose a list of the common transports, ordered by priority
common_transports_ids = [t.uuid for t in sorted(self.transports[:3], key=lambda x: x.priority)]
# Now, check that the transports are the same, and ordered by priority
self.assertEqual([t['id'] for t in result_services[0]['transports']], common_transports_ids)
def test_meta_auto_grouping(self) -> None:
self._generate_metapool_with_transports(
10, types.pools.TransportSelectionPolicy.AUTO, # Group by common transports
add_random_transports=True
)
# Now, get the data
data = services.get_services_info_dict(self.request)
result_services: typing.Final[list[dict[str, typing.Any]]] = data['services']
# We except 1 result only, a meta pool (is_meta = True)
self.assertEqual(len(result_services), 1)
self.assertEqual(result_services[0]['is_meta'], True)
# Transport should be {'id': 'meta', 'name: 'meta', 'priority': 0, 'link': (an udsa://... link}, and only one
self.assertEqual(len(result_services[0]['transports']), 1)
transport = result_services[0]['transports'][0]
self.assertEqual(transport['id'], 'meta')
self.assertEqual(transport['name'], 'meta')
self.assertEqual(transport['priority'], 0)
self.assertTrue(transport['link'].startswith(consts.system.UDS_ACTION_SCHEME))
def test_meta_label_grouping(self) -> None:
pools, meta = self._generate_metapool_with_transports(
10, types.pools.TransportSelectionPolicy.LABEL, # Group by common transports
add_random_transports=False
)
# Now we hav to had 2 same labels on the transports, add some ramdon to transports, but ensuring
# that no transport is assigned to more ALL the transports
possible_transports = self.transports[3:]
transport_iterator = itertools.cycle(possible_transports)
for pool in pools:
pool.transports.add(next(transport_iterator))
pool.transports.add(next(transport_iterator))
# Whe know for sure that the only transports valid are the first 3 ones, because the rest are not present
# in ALL the transports
# Now, get the data
data = services.get_services_info_dict(self.request)
result_services: typing.Final[list[dict[str, typing.Any]]] = data['services']
# We except 1 result only, a meta pool (is_meta = True)
self.assertEqual(len(result_services), 1)
self.assertEqual(result_services[0]['is_meta'], True)
# should have 3 transports, the first 3 ones
self.assertEqual(len(result_services[0]['transports']), 3)
# id should be "LABEL:[the label]" for each transport. We added trasnports label "label0", "label1" and "label2", same as priority
self.assertEqual([t['id'] for t in result_services[0]['transports']], ['LABEL:label0', 'LABEL:label1', 'LABEL:label2'])
# And priority should be 0, 1 and 2
self.assertEqual([t['priority'] for t in result_services[0]['transports']], [0, 1, 2])