mirror of
https://github.com/dkmstr/openuds.git
synced 2025-01-03 01:17:56 +03:00
Refactor cache.set to cache.put for improved code readibility
This commit is contained in:
parent
c3d9a34c4a
commit
354ff27e2a
@ -119,7 +119,7 @@ def check_ip_is_blocked(request: 'ExtendedHttpRequest') -> None:
|
|||||||
|
|
||||||
def increase_failed_ip_count(request: 'ExtendedHttpRequest') -> None:
|
def increase_failed_ip_count(request: 'ExtendedHttpRequest') -> None:
|
||||||
fails = cache.get(request.ip, 0) + 1
|
fails = cache.get(request.ip, 0) + 1
|
||||||
cache.set(request.ip, fails, GlobalConfig.LOGIN_BLOCK.as_int())
|
cache.put(request.ip, fails, GlobalConfig.LOGIN_BLOCK.as_int())
|
||||||
|
|
||||||
|
|
||||||
P = typing.ParamSpec('P')
|
P = typing.ParamSpec('P')
|
||||||
|
@ -177,7 +177,7 @@ class Login(Handler):
|
|||||||
# Sleep a while here to "prottect"
|
# Sleep a while here to "prottect"
|
||||||
time.sleep(3) # Wait 3 seconds if credentials fails for "protection"
|
time.sleep(3) # Wait 3 seconds if credentials fails for "protection"
|
||||||
# And store in cache for blocking for a while if fails
|
# And store in cache for blocking for a while if fails
|
||||||
fail_cache.set(self._request.ip, fails + 1, GlobalConfig.LOGIN_BLOCK.as_int())
|
fail_cache.put(self._request.ip, fails + 1, GlobalConfig.LOGIN_BLOCK.as_int())
|
||||||
|
|
||||||
return Login.result(error=auth_result.errstr or 'Invalid credentials')
|
return Login.result(error=auth_result.errstr or 'Invalid credentials')
|
||||||
return Login.result(
|
return Login.result(
|
||||||
|
@ -115,7 +115,7 @@ def get_servicepools_counters(
|
|||||||
|
|
||||||
# logger.debug('val: %s', val)
|
# logger.debug('val: %s', val)
|
||||||
if len(val) >= 2:
|
if len(val) >= 2:
|
||||||
cache.set(
|
cache.put(
|
||||||
cache_key,
|
cache_key,
|
||||||
codecs.encode(pickletools.optimize(pickle.dumps(val, protocol=-1)), 'zip'),
|
codecs.encode(pickletools.optimize(pickle.dumps(val, protocol=-1)), 'zip'),
|
||||||
CACHE_TIME * 2,
|
CACHE_TIME * 2,
|
||||||
|
@ -305,11 +305,11 @@ class OAuth2Authenticator(auths.Authenticator):
|
|||||||
case oauth2_types.ResponseType.CODE | oauth2_types.ResponseType.TOKEN:
|
case oauth2_types.ResponseType.CODE | oauth2_types.ResponseType.TOKEN:
|
||||||
# Code or token flow
|
# Code or token flow
|
||||||
# Simply store state, no code_verifier, store "none" as code_verifier to later restore it
|
# Simply store state, no code_verifier, store "none" as code_verifier to later restore it
|
||||||
self.cache.set(state, 'none', 3600)
|
self.cache.put(state, 'none', 3600)
|
||||||
case oauth2_types.ResponseType.OPENID_CODE | oauth2_types.ResponseType.OPENID_ID_TOKEN:
|
case oauth2_types.ResponseType.OPENID_CODE | oauth2_types.ResponseType.OPENID_ID_TOKEN:
|
||||||
# OpenID flow
|
# OpenID flow
|
||||||
nonce = secrets.token_urlsafe(oauth2_consts.STATE_LENGTH)
|
nonce = secrets.token_urlsafe(oauth2_consts.STATE_LENGTH)
|
||||||
self.cache.set(state, nonce, 3600) # Store nonce
|
self.cache.put(state, nonce, 3600) # Store nonce
|
||||||
# Fix scope to ensure openid is present
|
# Fix scope to ensure openid is present
|
||||||
if 'openid' not in param_dict['scope']:
|
if 'openid' not in param_dict['scope']:
|
||||||
param_dict['scope'] = 'openid ' + param_dict['scope']
|
param_dict['scope'] = 'openid ' + param_dict['scope']
|
||||||
@ -322,7 +322,7 @@ class OAuth2Authenticator(auths.Authenticator):
|
|||||||
code_verifier, code_challenge = self.code_verifier_and_challenge()
|
code_verifier, code_challenge = self.code_verifier_and_challenge()
|
||||||
param_dict['code_challenge'] = code_challenge
|
param_dict['code_challenge'] = code_challenge
|
||||||
param_dict['code_challenge_method'] = 'S256'
|
param_dict['code_challenge_method'] = 'S256'
|
||||||
self.cache.set(state, code_verifier, 3600)
|
self.cache.put(state, code_verifier, 3600)
|
||||||
|
|
||||||
# Nonce only is used
|
# Nonce only is used
|
||||||
if False:
|
if False:
|
||||||
|
@ -489,7 +489,7 @@ class SAMLAuthenticator(auths.Authenticator):
|
|||||||
)
|
)
|
||||||
val = resp.content.decode()
|
val = resp.content.decode()
|
||||||
# 10 years, unless edited the metadata will be kept
|
# 10 years, unless edited the metadata will be kept
|
||||||
self.cache.set('idpMetadata', val, 86400 * 365 * 10)
|
self.cache.put('idpMetadata', val, 86400 * 365 * 10)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error('Error fetching idp metadata: %s', e)
|
logger.error('Error fetching idp metadata: %s', e)
|
||||||
raise exceptions.auth.AuthenticatorException(gettext('Can\'t access idp metadata'))
|
raise exceptions.auth.AuthenticatorException(gettext('Can\'t access idp metadata'))
|
||||||
|
@ -113,4 +113,4 @@ def weblogin(user: models.User) -> None:
|
|||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error('Error notifying login to callback URL: %s', e)
|
logger.error('Error notifying login to callback URL: %s', e)
|
||||||
FAILURE_CACHE.set('notify_failure', fail_count + 1)
|
FAILURE_CACHE.put('notify_failure', fail_count + 1)
|
||||||
|
@ -402,7 +402,7 @@ class DynamicUserService(services.UserService, autoserializable.AutoSerializable
|
|||||||
if self.cache.get('ready', '0') == '1':
|
if self.cache.get('ready', '0') == '1':
|
||||||
self._set_queue([types.services.Operation.FINISH])
|
self._set_queue([types.services.Operation.FINISH])
|
||||||
elif self.service().is_running(self, self._vmid):
|
elif self.service().is_running(self, self._vmid):
|
||||||
self.cache.set('ready', '1', consts.cache.SHORT_CACHE_TIMEOUT // 2) # short cache timeout
|
self.cache.put('ready', '1', consts.cache.SHORT_CACHE_TIMEOUT // 2) # short cache timeout
|
||||||
self._set_queue([types.services.Operation.FINISH])
|
self._set_queue([types.services.Operation.FINISH])
|
||||||
else:
|
else:
|
||||||
self._set_queue(
|
self._set_queue(
|
||||||
|
@ -278,7 +278,7 @@ class FixedUserService(services.UserService, autoserializable.AutoSerializable,
|
|||||||
if self.cache.get('ready', '0') == '1':
|
if self.cache.get('ready', '0') == '1':
|
||||||
self._queue = [types.services.Operation.FINISH]
|
self._queue = [types.services.Operation.FINISH]
|
||||||
elif self.service().is_ready(self._vmid):
|
elif self.service().is_ready(self._vmid):
|
||||||
self.cache.set('ready', '1', consts.cache.SHORT_CACHE_TIMEOUT // 2) # short cache timeout
|
self.cache.put('ready', '1', consts.cache.SHORT_CACHE_TIMEOUT // 2) # short cache timeout
|
||||||
self._queue = [types.services.Operation.FINISH]
|
self._queue = [types.services.Operation.FINISH]
|
||||||
else:
|
else:
|
||||||
self._queue = [types.services.Operation.START, types.services.Operation.FINISH]
|
self._queue = [types.services.Operation.START, types.services.Operation.FINISH]
|
||||||
|
@ -162,7 +162,7 @@ class Cache:
|
|||||||
with transaction.atomic():
|
with transaction.atomic():
|
||||||
Cache.delete(self._owner)
|
Cache.delete(self._owner)
|
||||||
|
|
||||||
def set(
|
def put(
|
||||||
self,
|
self,
|
||||||
skey: typing.Union[str, bytes],
|
skey: typing.Union[str, bytes],
|
||||||
value: typing.Any,
|
value: typing.Any,
|
||||||
@ -206,7 +206,7 @@ class Cache:
|
|||||||
"""
|
"""
|
||||||
Stores a value in the cache using the [] operator with default validity
|
Stores a value in the cache using the [] operator with default validity
|
||||||
"""
|
"""
|
||||||
self.set(key, value)
|
self.put(key, value)
|
||||||
|
|
||||||
def refresh(self, skey: typing.Union[str, bytes]) -> None:
|
def refresh(self, skey: typing.Union[str, bytes]) -> None:
|
||||||
# logger.debug('Refreshing key "%s" for cache "%s"' % (skey, self._owner,))
|
# logger.debug('Refreshing key "%s" for cache "%s"' % (skey, self._owner,))
|
||||||
|
@ -166,7 +166,7 @@ class CalendarChecker:
|
|||||||
|
|
||||||
# Now data can be accessed as an array of booleans.
|
# Now data can be accessed as an array of booleans.
|
||||||
# Store data on persistent cache
|
# Store data on persistent cache
|
||||||
CalendarChecker.cache.set(cache_key, state_on_minute.tobytes(), ONE_DAY)
|
CalendarChecker.cache.put(cache_key, state_on_minute.tobytes(), ONE_DAY)
|
||||||
memcache_storage.set(cache_key, state_on_minute.tobytes(), ONE_DAY)
|
memcache_storage.set(cache_key, state_on_minute.tobytes(), ONE_DAY)
|
||||||
|
|
||||||
return bool(state_on_minute[dtime.hour * 60 + dtime.minute])
|
return bool(state_on_minute[dtime.hour * 60 + dtime.minute])
|
||||||
@ -202,7 +202,7 @@ class CalendarChecker:
|
|||||||
) # We substract on checkin, so we can take into account for next execution the "offset" on start & end (just the inverse of current, so we substract it)
|
) # We substract on checkin, so we can take into account for next execution the "offset" on start & end (just the inverse of current, so we substract it)
|
||||||
if next_event:
|
if next_event:
|
||||||
next_event += offset
|
next_event += offset
|
||||||
CalendarChecker.cache.set(cache_key, next_event, 3600)
|
CalendarChecker.cache.put(cache_key, next_event, 3600)
|
||||||
else:
|
else:
|
||||||
CalendarChecker.hits += 1
|
CalendarChecker.hits += 1
|
||||||
|
|
||||||
|
@ -304,7 +304,7 @@ def cached(
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
# Maybe returned data is not serializable. In that case, cache will fail but no harm is done with this
|
# Maybe returned data is not serializable. In that case, cache will fail but no harm is done with this
|
||||||
cache.set(cache_key, data, effective_timeout)
|
cache.put(cache_key, data, effective_timeout)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.debug(
|
logger.debug(
|
||||||
'Data for %s is not serializable on call to %s, not cached. %s (%s)',
|
'Data for %s is not serializable on call to %s, not cached. %s (%s)',
|
||||||
@ -387,7 +387,7 @@ def blocker(
|
|||||||
result = f(*args, **kwargs)
|
result = f(*args, **kwargs)
|
||||||
except uds.core.exceptions.rest.BlockAccess:
|
except uds.core.exceptions.rest.BlockAccess:
|
||||||
# Increment
|
# Increment
|
||||||
mycache.set(ip, failures_count + 1, GlobalConfig.LOGIN_BLOCK.as_int())
|
mycache.put(ip, failures_count + 1, GlobalConfig.LOGIN_BLOCK.as_int())
|
||||||
raise exceptions.rest.AccessDenied
|
raise exceptions.rest.AccessDenied
|
||||||
# Any other exception will be raised
|
# Any other exception will be raised
|
||||||
except Exception:
|
except Exception:
|
||||||
|
@ -207,7 +207,7 @@ class StatsFS(types.UDSFSInterface):
|
|||||||
else:
|
else:
|
||||||
logger.debug('Cache miss for %s', path[0])
|
logger.debug('Cache miss for %s', path[0])
|
||||||
data = dispatcher(interval, extension, 0, 0)
|
data = dispatcher(interval, extension, 0, 0)
|
||||||
self._cache.set(path[0] + extension, data, cache_time)
|
self._cache.put(path[0] + extension, data, cache_time)
|
||||||
|
|
||||||
# Calculate the size of the file
|
# Calculate the size of the file
|
||||||
size = len(data)
|
size = len(data)
|
||||||
@ -239,7 +239,7 @@ class StatsFS(types.UDSFSInterface):
|
|||||||
else:
|
else:
|
||||||
logger.debug('Cache miss for %s', path[0])
|
logger.debug('Cache miss for %s', path[0])
|
||||||
data = dispatcher(interval, extension, 0, 0)
|
data = dispatcher(interval, extension, 0, 0)
|
||||||
self._cache.set(path[0] + extension, data, cache_time)
|
self._cache.put(path[0] + extension, data, cache_time)
|
||||||
|
|
||||||
# Dispatch the read to the dispatcher
|
# Dispatch the read to the dispatcher
|
||||||
data = dispatcher(interval, extension, size, offset)
|
data = dispatcher(interval, extension, size, offset)
|
||||||
|
@ -205,7 +205,7 @@ class TOTP_MFA(mfas.MFA):
|
|||||||
):
|
):
|
||||||
raise exceptions.auth.MFAError(gettext('Invalid code'))
|
raise exceptions.auth.MFAError(gettext('Invalid code'))
|
||||||
|
|
||||||
self.cache.set(userid + code, True, self.valid_window.as_int() * (TOTP_INTERVAL + 1))
|
self.cache.put(userid + code, True, self.valid_window.as_int() * (TOTP_INTERVAL + 1))
|
||||||
|
|
||||||
if qr_has_been_shown is False:
|
if qr_has_been_shown is False:
|
||||||
self._save_user_data(userid, (secret, True)) # Update user data to show QR code only once
|
self._save_user_data(userid, (secret, True)) # Update user data to show QR code only once
|
||||||
|
@ -246,7 +246,7 @@ class OVirtLinkedUserService(services.UserService, autoserializable.AutoSerializ
|
|||||||
self._queue = [Operation.START, Operation.FINISH]
|
self._queue = [Operation.START, Operation.FINISH]
|
||||||
return self._execute_queue()
|
return self._execute_queue()
|
||||||
|
|
||||||
self.cache.set('ready', '1')
|
self.cache.put('ready', '1')
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.do_log(types.log.LogLevel.ERROR, f'Error on setReady: {e}')
|
self.do_log(types.log.LogLevel.ERROR, f'Error on setReady: {e}')
|
||||||
# Treat as operation done, maybe the machine is ready and we can continue
|
# Treat as operation done, maybe the machine is ready and we can continue
|
||||||
|
@ -177,7 +177,7 @@ class OpenGnsysClient:
|
|||||||
)
|
)
|
||||||
|
|
||||||
self.auth = auth['apikey']
|
self.auth = auth['apikey']
|
||||||
self.cache.set(cache_key, self.auth, CACHE_VALIDITY)
|
self.cache.put(cache_key, self.auth, CACHE_VALIDITY)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def version(self) -> str:
|
def version(self) -> str:
|
||||||
|
@ -145,7 +145,7 @@ class OpenNebulaLiveDeployment(services.UserService, autoserializable.AutoSerial
|
|||||||
|
|
||||||
self.service().start_machine(self._vmid)
|
self.service().start_machine(self._vmid)
|
||||||
|
|
||||||
self.cache.set('ready', '1')
|
self.cache.put('ready', '1')
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.do_log(types.log.LogLevel.ERROR, 'Error on set_ready: {}'.format(e))
|
self.do_log(types.log.LogLevel.ERROR, 'Error on set_ready: {}'.format(e))
|
||||||
# Treat as operation done, maybe the machine is ready and we can continue
|
# Treat as operation done, maybe the machine is ready and we can continue
|
||||||
|
@ -81,7 +81,7 @@ class OpenStackUserServiceFixed(FixedUserService, autoserializable.AutoSerializa
|
|||||||
self._queue = [types.services.Operation.START, types.services.Operation.FINISH]
|
self._queue = [types.services.Operation.START, types.services.Operation.FINISH]
|
||||||
return self._execute_queue()
|
return self._execute_queue()
|
||||||
|
|
||||||
self.cache.set('ready', '1')
|
self.cache.put('ready', '1')
|
||||||
return types.states.TaskState.FINISHED
|
return types.states.TaskState.FINISHED
|
||||||
|
|
||||||
def reset(self) -> types.states.TaskState:
|
def reset(self) -> types.states.TaskState:
|
||||||
|
@ -253,7 +253,7 @@ class OpenStackClient: # pylint: disable=too-many-public-methods
|
|||||||
try:
|
try:
|
||||||
# set first cached endpoint as prefered
|
# set first cached endpoint as prefered
|
||||||
# Note that if fails, cached endpoint is removed and next one is tried
|
# Note that if fails, cached endpoint is removed and next one is tried
|
||||||
self.cache.set(cache_key, endpoint, consts.cache.EXTREME_CACHE_TIMEOUT)
|
self.cache.put(cache_key, endpoint, consts.cache.EXTREME_CACHE_TIMEOUT)
|
||||||
logger.debug(
|
logger.debug(
|
||||||
'Requesting from endpoint: %s and path %s using %s: %s', endpoint, path, type, data
|
'Requesting from endpoint: %s and path %s using %s: %s', endpoint, path, type, data
|
||||||
)
|
)
|
||||||
@ -299,7 +299,7 @@ class OpenStackClient: # pylint: disable=too-many-public-methods
|
|||||||
for i, endpoint in enumerate(found_endpoints):
|
for i, endpoint in enumerate(found_endpoints):
|
||||||
try:
|
try:
|
||||||
# If fails, cached endpoint is removed and next one is tried
|
# If fails, cached endpoint is removed and next one is tried
|
||||||
self.cache.set(
|
self.cache.put(
|
||||||
cache_key, endpoint, consts.cache.EXTREME_CACHE_TIMEOUT
|
cache_key, endpoint, consts.cache.EXTREME_CACHE_TIMEOUT
|
||||||
) # Cache endpoint for a very long time
|
) # Cache endpoint for a very long time
|
||||||
yield from OpenStackClient._get_recurring_url_json(
|
yield from OpenStackClient._get_recurring_url_json(
|
||||||
@ -420,7 +420,7 @@ class OpenStackClient: # pylint: disable=too-many-public-methods
|
|||||||
dateutil.parser.parse(token['expires_at']).replace(tzinfo=None)
|
dateutil.parser.parse(token['expires_at']).replace(tzinfo=None)
|
||||||
- dateutil.parser.parse(token['issued_at']).replace(tzinfo=None)
|
- dateutil.parser.parse(token['issued_at']).replace(tzinfo=None)
|
||||||
).seconds - 60
|
).seconds - 60
|
||||||
self.cache.set(
|
self.cache.put(
|
||||||
'auth',
|
'auth',
|
||||||
(self._authenticated_projectid, self._projectid, self._tokenid, self._userid, self._catalog),
|
(self._authenticated_projectid, self._projectid, self._tokenid, self._userid, self._catalog),
|
||||||
validity,
|
validity,
|
||||||
|
@ -152,7 +152,7 @@ class ProxmoxClient:
|
|||||||
csrf = data['CSRFPreventionToken']
|
csrf = data['CSRFPreventionToken']
|
||||||
|
|
||||||
if self.cache:
|
if self.cache:
|
||||||
self.cache.set(self._host + 'conn', (ticket, csrf), validity=1800) # 30 minutes
|
self.cache.put(self._host + 'conn', (ticket, csrf), validity=1800) # 30 minutes
|
||||||
|
|
||||||
_update_session(ticket, csrf)
|
_update_session(ticket, csrf)
|
||||||
except requests.RequestException as e:
|
except requests.RequestException as e:
|
||||||
|
@ -324,9 +324,9 @@ class HTML5RDPTransport(transports.Transport):
|
|||||||
if not ready:
|
if not ready:
|
||||||
# Check again for readyness
|
# Check again for readyness
|
||||||
if self.test_connectivity(userservice, ip, self.rdp_port.as_int()) is True:
|
if self.test_connectivity(userservice, ip, self.rdp_port.as_int()) is True:
|
||||||
self.cache.set(ip, 'Y', READY_CACHE_TIMEOUT)
|
self.cache.put(ip, 'Y', READY_CACHE_TIMEOUT)
|
||||||
return True
|
return True
|
||||||
self.cache.set(ip, 'N', READY_CACHE_TIMEOUT)
|
self.cache.put(ip, 'N', READY_CACHE_TIMEOUT)
|
||||||
return ready == 'Y'
|
return ready == 'Y'
|
||||||
|
|
||||||
def processed_username(self, userservice: 'models.UserService', user: 'models.User') -> str:
|
def processed_username(self, userservice: 'models.UserService', user: 'models.User') -> str:
|
||||||
|
@ -169,9 +169,9 @@ class HTML5SSHTransport(transports.Transport):
|
|||||||
if not ready:
|
if not ready:
|
||||||
# Check again for readyness
|
# Check again for readyness
|
||||||
if self.test_connectivity(userservice, ip, self.ssh_port.value) is True:
|
if self.test_connectivity(userservice, ip, self.ssh_port.value) is True:
|
||||||
self.cache.set(ip, 'Y', READY_CACHE_TIMEOUT)
|
self.cache.put(ip, 'Y', READY_CACHE_TIMEOUT)
|
||||||
return True
|
return True
|
||||||
self.cache.set(ip, 'N', READY_CACHE_TIMEOUT)
|
self.cache.put(ip, 'N', READY_CACHE_TIMEOUT)
|
||||||
return ready == 'Y'
|
return ready == 'Y'
|
||||||
|
|
||||||
def get_link(
|
def get_link(
|
||||||
|
@ -152,9 +152,9 @@ class HTML5VNCTransport(transports.Transport):
|
|||||||
if not ready:
|
if not ready:
|
||||||
# Check again for readyness
|
# Check again for readyness
|
||||||
if self.test_connectivity(userservice, ip, self.vnc_port.value) is True:
|
if self.test_connectivity(userservice, ip, self.vnc_port.value) is True:
|
||||||
self.cache.set(ip, 'Y', READY_CACHE_TIMEOUT)
|
self.cache.put(ip, 'Y', READY_CACHE_TIMEOUT)
|
||||||
return True
|
return True
|
||||||
self.cache.set(ip, 'N', READY_CACHE_TIMEOUT)
|
self.cache.put(ip, 'N', READY_CACHE_TIMEOUT)
|
||||||
return ready == 'Y'
|
return ready == 'Y'
|
||||||
|
|
||||||
def get_link(
|
def get_link(
|
||||||
|
@ -372,9 +372,9 @@ class BaseRDPTransport(transports.Transport):
|
|||||||
if ready is None:
|
if ready is None:
|
||||||
# Check again for ready
|
# Check again for ready
|
||||||
if self.test_connectivity(userservice, ip, self.rdp_port.as_int()) is True:
|
if self.test_connectivity(userservice, ip, self.rdp_port.as_int()) is True:
|
||||||
self.cache.set(ip, 'Y', READY_CACHE_TIMEOUT)
|
self.cache.put(ip, 'Y', READY_CACHE_TIMEOUT)
|
||||||
return True
|
return True
|
||||||
self.cache.set(ip, 'N', READY_CACHE_TIMEOUT)
|
self.cache.put(ip, 'N', READY_CACHE_TIMEOUT)
|
||||||
return ready == 'Y'
|
return ready == 'Y'
|
||||||
|
|
||||||
def processed_username(self, userservice: 'models.UserService', user: 'models.User') -> str:
|
def processed_username(self, userservice: 'models.UserService', user: 'models.User') -> str:
|
||||||
|
@ -161,13 +161,13 @@ class BaseSpiceTransport(transports.Transport):
|
|||||||
# test ANY of the ports
|
# test ANY of the ports
|
||||||
port_to_test = con.port if con.port != -1 else con.secure_port
|
port_to_test = con.port if con.port != -1 else con.secure_port
|
||||||
if port_to_test == -1:
|
if port_to_test == -1:
|
||||||
self.cache.set(
|
self.cache.put(
|
||||||
'cached_message', 'Could not find the PORT for connection', 120
|
'cached_message', 'Could not find the PORT for connection', 120
|
||||||
) # Write a message, that will be used from getCustom
|
) # Write a message, that will be used from getCustom
|
||||||
logger.info('SPICE didn\'t find has any port: %s', con)
|
logger.info('SPICE didn\'t find has any port: %s', con)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
self.cache.set(
|
self.cache.put(
|
||||||
'cached_message',
|
'cached_message',
|
||||||
'Could not reach server "{}" on port "{}" from broker (prob. causes are name resolution & firewall rules)'.format(
|
'Could not reach server "{}" on port "{}" from broker (prob. causes are name resolution & firewall rules)'.format(
|
||||||
con.address, port_to_test
|
con.address, port_to_test
|
||||||
@ -176,7 +176,7 @@ class BaseSpiceTransport(transports.Transport):
|
|||||||
)
|
)
|
||||||
|
|
||||||
if self.test_connectivity(userservice, con.address, port_to_test) is True:
|
if self.test_connectivity(userservice, con.address, port_to_test) is True:
|
||||||
self.cache.set(ip, 'Y', READY_CACHE_TIMEOUT)
|
self.cache.put(ip, 'Y', READY_CACHE_TIMEOUT)
|
||||||
ready = 'Y'
|
ready = 'Y'
|
||||||
|
|
||||||
return ready == 'Y'
|
return ready == 'Y'
|
||||||
|
@ -211,9 +211,9 @@ class BaseX2GOTransport(transports.Transport):
|
|||||||
if ready is None:
|
if ready is None:
|
||||||
# Check again for ready
|
# Check again for ready
|
||||||
if net.test_connectivity(ip, 22):
|
if net.test_connectivity(ip, 22):
|
||||||
self.cache.set(ip, 'Y', READY_CACHE_TIMEOUT)
|
self.cache.put(ip, 'Y', READY_CACHE_TIMEOUT)
|
||||||
return True
|
return True
|
||||||
self.cache.set(ip, 'N', READY_CACHE_TIMEOUT)
|
self.cache.put(ip, 'N', READY_CACHE_TIMEOUT)
|
||||||
return ready == 'Y'
|
return ready == 'Y'
|
||||||
|
|
||||||
def get_screen_size(self) -> tuple[int, int]:
|
def get_screen_size(self) -> tuple[int, int]:
|
||||||
|
@ -88,8 +88,8 @@ def check_login( # pylint: disable=too-many-branches, too-many-statements
|
|||||||
|
|
||||||
if auth_result.user is None:
|
if auth_result.user is None:
|
||||||
logger.debug("Invalid user %s (access denied)", username)
|
logger.debug("Invalid user %s (access denied)", username)
|
||||||
cache.set(tries_cache_key, tries + 1, GlobalConfig.LOGIN_BLOCK.as_int())
|
cache.put(tries_cache_key, tries + 1, GlobalConfig.LOGIN_BLOCK.as_int())
|
||||||
cache.set(request.ip, tries_in_this_ip + 1, GlobalConfig.LOGIN_BLOCK.as_int())
|
cache.put(request.ip, tries_in_this_ip + 1, GlobalConfig.LOGIN_BLOCK.as_int())
|
||||||
log_login(
|
log_login(
|
||||||
request,
|
request,
|
||||||
authenticator,
|
authenticator,
|
||||||
|
@ -67,7 +67,7 @@ class AuthCallbackTest(UDSTestCase):
|
|||||||
|
|
||||||
def test_callback_failed_url(self) -> None:
|
def test_callback_failed_url(self) -> None:
|
||||||
config.GlobalConfig.NOTIFY_CALLBACK_URL.set('http://localhost:1234') # Sample non existent url
|
config.GlobalConfig.NOTIFY_CALLBACK_URL.set('http://localhost:1234') # Sample non existent url
|
||||||
callbacks.FAILURE_CACHE.set('notify_failure', 3) # Already failed 3 times
|
callbacks.FAILURE_CACHE.put('notify_failure', 3) # Already failed 3 times
|
||||||
|
|
||||||
with mock.patch('uds.core.util.security.secure_requests_session') as session_mock:
|
with mock.patch('uds.core.util.security.secure_requests_session') as session_mock:
|
||||||
callbacks.weblogin(self.user)
|
callbacks.weblogin(self.user)
|
||||||
|
@ -58,7 +58,7 @@ class TestEnvironment(UDSTransactionTestCase):
|
|||||||
env.storage.put('test', 'test')
|
env.storage.put('test', 'test')
|
||||||
self.assertEqual(env.storage.read('test'), 'test')
|
self.assertEqual(env.storage.read('test'), 'test')
|
||||||
|
|
||||||
env.cache.set('test', 'test')
|
env.cache.put('test', 'test')
|
||||||
self.assertEqual(env.cache.get('test'), 'test')
|
self.assertEqual(env.cache.get('test'), 'test')
|
||||||
|
|
||||||
# Recreate environment
|
# Recreate environment
|
||||||
@ -110,7 +110,7 @@ class TestEnvironment(UDSTransactionTestCase):
|
|||||||
env.storage.put('test', 'test')
|
env.storage.put('test', 'test')
|
||||||
self.assertEqual(env.storage.read('test'), 'test')
|
self.assertEqual(env.storage.read('test'), 'test')
|
||||||
|
|
||||||
env.cache.set('test', 'test')
|
env.cache.put('test', 'test')
|
||||||
self.assertEqual(env.cache.get('test'), 'test')
|
self.assertEqual(env.cache.get('test'), 'test')
|
||||||
|
|
||||||
# Environment is cleared after exit, ensure it
|
# Environment is cleared after exit, ensure it
|
||||||
|
@ -57,13 +57,13 @@ class CacheTest(UDSTransactionTestCase):
|
|||||||
self.assertEqual(cache.remove('non-existing-1'), False, 'Removing unexisting key')
|
self.assertEqual(cache.remove('non-existing-1'), False, 'Removing unexisting key')
|
||||||
|
|
||||||
# Add new key (non existing) with default duration (60 seconds probable)
|
# Add new key (non existing) with default duration (60 seconds probable)
|
||||||
cache.set(UNICODE_CHARS_2, VALUE_1)
|
cache.put(UNICODE_CHARS_2, VALUE_1)
|
||||||
|
|
||||||
# checks it
|
# checks it
|
||||||
self.assertEqual(cache.get(UNICODE_CHARS_2), VALUE_1, 'Put a key and recover it')
|
self.assertEqual(cache.get(UNICODE_CHARS_2), VALUE_1, 'Put a key and recover it')
|
||||||
|
|
||||||
# Add new "str" key, with 1 second life, wait 2 seconds and recover
|
# Add new "str" key, with 1 second life, wait 2 seconds and recover
|
||||||
cache.set(b'key', VALUE_1, 1)
|
cache.put(b'key', VALUE_1, 1)
|
||||||
time.sleep(1.1)
|
time.sleep(1.1)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
cache.get(b'key'),
|
cache.get(b'key'),
|
||||||
@ -80,17 +80,17 @@ class CacheTest(UDSTransactionTestCase):
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Checks cache clean
|
# Checks cache clean
|
||||||
cache.set('key', VALUE_1)
|
cache.put('key', VALUE_1)
|
||||||
cache.clear()
|
cache.clear()
|
||||||
self.assertEqual(cache.get('key'), None, 'Get key from cleaned cache')
|
self.assertEqual(cache.get('key'), None, 'Get key from cleaned cache')
|
||||||
|
|
||||||
# Checks cache purge
|
# Checks cache purge
|
||||||
cache.set('key', 'test')
|
cache.put('key', 'test')
|
||||||
Cache.purge()
|
Cache.purge()
|
||||||
self.assertEqual(cache.get('key'), None, 'Get key from purged cache')
|
self.assertEqual(cache.get('key'), None, 'Get key from purged cache')
|
||||||
|
|
||||||
# Checks cache cleanup (remove expired keys)
|
# Checks cache cleanup (remove expired keys)
|
||||||
cache.set('key', 'test', 0)
|
cache.put('key', 'test', 0)
|
||||||
time.sleep(0.1)
|
time.sleep(0.1)
|
||||||
Cache.purge_outdated()
|
Cache.purge_outdated()
|
||||||
cache.refresh('key')
|
cache.refresh('key')
|
||||||
|
Loading…
Reference in New Issue
Block a user