forked from shaba/openuds
upgrading cache updater to take into account maxDeployed to stop creating cache services
This commit is contained in:
parent
e16be78ad5
commit
ac62aed420
@ -66,6 +66,9 @@ from uds.web.util.errors import MAX_SERVICES_REACHED
|
|||||||
from .userservice import comms
|
from .userservice import comms
|
||||||
from .userservice.opchecker import UserServiceOpChecker
|
from .userservice.opchecker import UserServiceOpChecker
|
||||||
|
|
||||||
|
if typing.TYPE_CHECKING:
|
||||||
|
from uds import models
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
traceLogger = logging.getLogger('traceLog')
|
traceLogger = logging.getLogger('traceLog')
|
||||||
|
|
||||||
@ -80,14 +83,12 @@ class UserServiceManager(metaclass=singleton.Singleton):
|
|||||||
UserServiceManager()
|
UserServiceManager()
|
||||||
) # Singleton pattern will return always the same instance
|
) # Singleton pattern will return always the same instance
|
||||||
|
|
||||||
@staticmethod
|
def getCacheStateFilter(self, servicePool: ServicePool, level: int) -> Q:
|
||||||
def getCacheStateFilter(servicePool: ServicePool, level: int) -> Q:
|
return Q(cache_level=level) & self.getStateFilter(servicePool.service)
|
||||||
return Q(cache_level=level) & UserServiceManager.getStateFilter(servicePool)
|
|
||||||
|
|
||||||
@staticmethod
|
def getStateFilter(self, service: 'models.Service') -> Q:
|
||||||
def getStateFilter(servicePool: ServicePool) -> Q:
|
|
||||||
if (
|
if (
|
||||||
servicePool.service.getInstance().maxDeployed == services.Service.UNLIMITED
|
service.getInstance().maxDeployed == services.Service.UNLIMITED
|
||||||
and GlobalConfig.MAX_SERVICES_COUNT_NEW.getBool() is False
|
and GlobalConfig.MAX_SERVICES_COUNT_NEW.getBool() is False
|
||||||
):
|
):
|
||||||
states = [State.PREPARING, State.USABLE]
|
states = [State.PREPARING, State.USABLE]
|
||||||
@ -95,23 +96,38 @@ class UserServiceManager(metaclass=singleton.Singleton):
|
|||||||
states = [State.PREPARING, State.USABLE, State.REMOVING, State.REMOVABLE]
|
states = [State.PREPARING, State.USABLE, State.REMOVING, State.REMOVABLE]
|
||||||
return Q(state__in=states)
|
return Q(state__in=states)
|
||||||
|
|
||||||
|
def getExistingUserServices(self, service: 'models.Service') -> int:
|
||||||
|
"""
|
||||||
|
Returns the number of running user services for this service
|
||||||
|
"""
|
||||||
|
return UserService.objects.filter(
|
||||||
|
self.getStateFilter(service) & Q(deployed_service__service=service)
|
||||||
|
).count()
|
||||||
|
|
||||||
|
def maximumUserServicesDeployed(self, service: 'models.Service') -> bool:
|
||||||
|
"""
|
||||||
|
Checks if the maximum number of user services for this service has been reached
|
||||||
|
"""
|
||||||
|
serviceInstance = service.getInstance()
|
||||||
|
# Early return, so no database count is needed
|
||||||
|
if serviceInstance.maxDeployed == services.Service.UNLIMITED:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if self.getExistingUserServices(service) >= serviceInstance.maxDeployed:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
def __checkMaxDeployedReached(self, servicePool: ServicePool) -> None:
|
def __checkMaxDeployedReached(self, servicePool: ServicePool) -> None:
|
||||||
"""
|
"""
|
||||||
Checks if maxDeployed for the service has been reached, and, if so,
|
Checks if maxDeployed for the service has been reached, and, if so,
|
||||||
raises an exception that no more services of this kind can be reached
|
raises an exception that no more services of this kind can be reached
|
||||||
"""
|
"""
|
||||||
serviceInstance = servicePool.service.getInstance()
|
if self.maximumUserServicesDeployed(servicePool.service):
|
||||||
# Early return, so no database count is needed
|
|
||||||
if serviceInstance.maxDeployed == services.Service.UNLIMITED:
|
|
||||||
return
|
|
||||||
|
|
||||||
numberOfServices = servicePool.userServices.filter(
|
|
||||||
state__in=[State.PREPARING, State.USABLE]
|
|
||||||
).count()
|
|
||||||
|
|
||||||
if serviceInstance.maxDeployed <= numberOfServices:
|
|
||||||
raise MaxServicesReachedError(
|
raise MaxServicesReachedError(
|
||||||
'Max number of allowed deployments for service reached'
|
_('Maximum number of user services reached for this {}').format(
|
||||||
|
servicePool
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
def __createCacheAtDb(
|
def __createCacheAtDb(
|
||||||
@ -528,7 +544,7 @@ class UserServiceManager(metaclass=singleton.Singleton):
|
|||||||
if serviceType.usesCache:
|
if serviceType.usesCache:
|
||||||
inAssigned = (
|
inAssigned = (
|
||||||
servicePool.assignedUserServices()
|
servicePool.assignedUserServices()
|
||||||
.filter(UserServiceManager.getStateFilter(servicePool))
|
.filter(self.getStateFilter(servicePool.service))
|
||||||
.count()
|
.count()
|
||||||
)
|
)
|
||||||
if (
|
if (
|
||||||
@ -546,12 +562,14 @@ class UserServiceManager(metaclass=singleton.Singleton):
|
|||||||
events.addEvent(servicePool, events.ET_CACHE_MISS, fld1=0)
|
events.addEvent(servicePool, events.ET_CACHE_MISS, fld1=0)
|
||||||
return self.createAssignedFor(servicePool, user)
|
return self.createAssignedFor(servicePool, user)
|
||||||
|
|
||||||
def getServicesInStateForProvider(self, provider_id: int, state: str) -> int:
|
def getUserServicesInStatesForProvider(
|
||||||
|
self, provider: 'models.Provider', states: typing.List[str]
|
||||||
|
) -> int:
|
||||||
"""
|
"""
|
||||||
Returns the number of services of a service provider in the state indicated
|
Returns the number of services of a service provider in the state indicated
|
||||||
"""
|
"""
|
||||||
return UserService.objects.filter(
|
return UserService.objects.filter(
|
||||||
deployed_service__service__provider__id=provider_id, state=state
|
deployed_service__service__provider=provider, state__in=states
|
||||||
).count()
|
).count()
|
||||||
|
|
||||||
def canRemoveServiceFromDeployedService(self, servicePool: ServicePool) -> bool:
|
def canRemoveServiceFromDeployedService(self, servicePool: ServicePool) -> bool:
|
||||||
@ -559,8 +577,8 @@ class UserServiceManager(metaclass=singleton.Singleton):
|
|||||||
checks if we can do a "remove" from a deployed service
|
checks if we can do a "remove" from a deployed service
|
||||||
serviceIsntance is just a helper, so if we already have unserialized deployedService
|
serviceIsntance is just a helper, so if we already have unserialized deployedService
|
||||||
"""
|
"""
|
||||||
removing = self.getServicesInStateForProvider(
|
removing = self.getUserServicesInStatesForProvider(
|
||||||
servicePool.service.provider.id, State.REMOVING
|
servicePool.service.provider, [State.REMOVING]
|
||||||
)
|
)
|
||||||
serviceInstance = servicePool.service.getInstance()
|
serviceInstance = servicePool.service.getInstance()
|
||||||
if (
|
if (
|
||||||
@ -574,12 +592,12 @@ class UserServiceManager(metaclass=singleton.Singleton):
|
|||||||
"""
|
"""
|
||||||
Checks if we can start a new service
|
Checks if we can start a new service
|
||||||
"""
|
"""
|
||||||
preparing = self.getServicesInStateForProvider(
|
preparingForProvider = self.getUserServicesInStatesForProvider(
|
||||||
servicePool.service.provider.id, State.PREPARING
|
servicePool.service.provider, [State.PREPARING]
|
||||||
)
|
)
|
||||||
serviceInstance = servicePool.service.getInstance()
|
serviceInstance = servicePool.service.getInstance()
|
||||||
if (
|
if self.maximumUserServicesDeployed(servicePool.service) or (
|
||||||
preparing >= serviceInstance.parent().getMaxPreparingServices()
|
preparingForProvider >= serviceInstance.parent().getMaxPreparingServices()
|
||||||
and serviceInstance.parent().getIgnoreLimits() is False
|
and serviceInstance.parent().getIgnoreLimits() is False
|
||||||
):
|
):
|
||||||
return False
|
return False
|
||||||
|
@ -146,7 +146,7 @@ class ServiceCacheUpdater(Job):
|
|||||||
)
|
)
|
||||||
inAssigned: int = (
|
inAssigned: int = (
|
||||||
servicePool.assignedUserServices()
|
servicePool.assignedUserServices()
|
||||||
.filter(userServiceManager().getStateFilter(servicePool))
|
.filter(userServiceManager().getStateFilter(servicePool.service))
|
||||||
.count()
|
.count()
|
||||||
)
|
)
|
||||||
# if we bypasses max cache, we will reduce it in first place. This is so because this will free resources on service provider
|
# if we bypasses max cache, we will reduce it in first place. This is so because this will free resources on service provider
|
||||||
|
Loading…
Reference in New Issue
Block a user