forked from shaba/openuds
Merge remote-tracking branch 'origin/v3.5'
This commit is contained in:
commit
0662100c30
@ -62,6 +62,7 @@ from uds.models.meta_pool import MetaPoolMember
|
||||
from uds.core import services, transports
|
||||
from uds.core.util import singleton
|
||||
from uds.core.util.stats import events
|
||||
from uds.web.util.errors import MAX_SERVICES_REACHED
|
||||
|
||||
from .userservice import comms
|
||||
from .userservice.opchecker import UserServiceOpChecker
|
||||
@ -81,12 +82,15 @@ class UserServiceManager(metaclass=singleton.Singleton):
|
||||
) # Singleton pattern will return always the same instance
|
||||
|
||||
@staticmethod
|
||||
def getCacheStateFilter(level: int) -> Q:
|
||||
return Q(cache_level=level) & UserServiceManager.getStateFilter()
|
||||
def getCacheStateFilter(servicePool: ServicePool, level: int) -> Q:
|
||||
return Q(cache_level=level) & UserServiceManager.getStateFilter(servicePool)
|
||||
|
||||
@staticmethod
|
||||
def getStateFilter() -> Q:
|
||||
if GlobalConfig.MAX_SERVICES_COUNT_NEW.getBool() == False:
|
||||
def getStateFilter(servicePool: ServicePool) -> Q:
|
||||
if (
|
||||
servicePool.service.getInstance().maxDeployed == services.Service.UNLIMITED
|
||||
and GlobalConfig.MAX_SERVICES_COUNT_NEW.getBool() is False
|
||||
):
|
||||
states = [State.PREPARING, State.USABLE]
|
||||
else:
|
||||
states = [State.PREPARING, State.USABLE, State.REMOVING, State.REMOVABLE]
|
||||
@ -530,7 +534,7 @@ class UserServiceManager(metaclass=singleton.Singleton):
|
||||
if serviceType.usesCache:
|
||||
inAssigned = (
|
||||
servicePool.assignedUserServices()
|
||||
.filter(UserServiceManager.getStateFilter())
|
||||
.filter(UserServiceManager.getStateFilter(servicePool))
|
||||
.count()
|
||||
)
|
||||
if (
|
||||
@ -930,19 +934,20 @@ class UserServiceManager(metaclass=singleton.Singleton):
|
||||
meta: MetaPool = MetaPool.objects.get(uuid=uuidMetapool)
|
||||
# Get pool members. Just pools "visible" and "usable"
|
||||
pools = [
|
||||
p.pool for p in meta.members.all() if p.pool.isVisible() and p.pool.isUsable()
|
||||
p.pool
|
||||
for p in meta.members.all()
|
||||
if p.pool.isVisible() and p.pool.isUsable()
|
||||
]
|
||||
# look for an existing user service in the pool
|
||||
try:
|
||||
return UserService.objects.filter(
|
||||
deployed_service__in=pools,
|
||||
state__in=State.VALID_STATES,
|
||||
user=user,
|
||||
cache_level=0,
|
||||
).order_by('deployed_service__name')[0]
|
||||
deployed_service__in=pools,
|
||||
state__in=State.VALID_STATES,
|
||||
user=user,
|
||||
cache_level=0,
|
||||
).order_by('deployed_service__name')[0]
|
||||
except IndexError:
|
||||
return None
|
||||
|
||||
|
||||
def getMeta(
|
||||
self,
|
||||
|
@ -127,6 +127,7 @@ class ServiceCacheUpdater(Job):
|
||||
servicePool.cachedUserServices()
|
||||
.filter(
|
||||
userServiceManager().getCacheStateFilter(
|
||||
servicePool,
|
||||
services.UserDeployment.L1_CACHE
|
||||
)
|
||||
)
|
||||
@ -137,6 +138,7 @@ class ServiceCacheUpdater(Job):
|
||||
servicePool.cachedUserServices()
|
||||
.filter(
|
||||
userServiceManager().getCacheStateFilter(
|
||||
servicePool,
|
||||
services.UserDeployment.L2_CACHE
|
||||
)
|
||||
)
|
||||
@ -144,7 +146,7 @@ class ServiceCacheUpdater(Job):
|
||||
)
|
||||
inAssigned: int = (
|
||||
servicePool.assignedUserServices()
|
||||
.filter(userServiceManager().getStateFilter())
|
||||
.filter(userServiceManager().getStateFilter(servicePool))
|
||||
.count()
|
||||
)
|
||||
# if we bypasses max cache, we will reduce it in first place. This is so because this will free resources on service provider
|
||||
@ -235,6 +237,7 @@ class ServiceCacheUpdater(Job):
|
||||
.select_for_update()
|
||||
.filter(
|
||||
userServiceManager().getCacheStateFilter(
|
||||
servicePool,
|
||||
services.UserDeployment.L2_CACHE
|
||||
)
|
||||
)
|
||||
@ -308,6 +311,7 @@ class ServiceCacheUpdater(Job):
|
||||
servicePool.cachedUserServices()
|
||||
.filter(
|
||||
userServiceManager().getCacheStateFilter(
|
||||
servicePool,
|
||||
services.UserDeployment.L1_CACHE
|
||||
)
|
||||
)
|
||||
@ -351,6 +355,7 @@ class ServiceCacheUpdater(Job):
|
||||
servicePool.cachedUserServices()
|
||||
.filter(
|
||||
userServiceManager().getCacheStateFilter(
|
||||
servicePool,
|
||||
services.UserDeployment.L2_CACHE
|
||||
)
|
||||
)
|
||||
|
@ -41,7 +41,7 @@ from uds.models import getSqlDatetimeAsUnix
|
||||
from uds.core.ui import gui
|
||||
from uds.core.util import log
|
||||
from uds.core.util import net
|
||||
from uds.core.services import types as serviceTypes
|
||||
from uds.core import services
|
||||
|
||||
from .deployment import IPMachineDeployed
|
||||
from .service_base import IPServiceBase
|
||||
@ -50,7 +50,6 @@ from .service_base import IPServiceBase
|
||||
if typing.TYPE_CHECKING:
|
||||
from uds import models
|
||||
from uds.core import Module
|
||||
from uds.core import services
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -124,7 +123,7 @@ class IPMachinesService(IPServiceBase):
|
||||
|
||||
# Characteristics of service
|
||||
maxDeployed = (
|
||||
-1
|
||||
services.Service.UNLIMITED
|
||||
) # If the service provides more than 1 "provided service" (-1 = no limit, 0 = ???? (do not use it!!!), N = max number to deploy
|
||||
usesCache = False # Cache are running machine awaiting to be assigned
|
||||
usesCache_L2 = False # L2 Cache are running machines in suspended state
|
||||
@ -133,7 +132,7 @@ class IPMachinesService(IPServiceBase):
|
||||
|
||||
deployedType = IPMachineDeployed
|
||||
|
||||
servicesTypeProvided = (serviceTypes.VDI,)
|
||||
servicesTypeProvided = (services.types.VDI,)
|
||||
|
||||
_ips: typing.List[str] = []
|
||||
_token: str = ''
|
||||
|
Loading…
Reference in New Issue
Block a user