1
0
mirror of https://github.com/ansible/awx.git synced 2024-10-31 23:51:09 +03:00

Mass active flag code removal

This commit is contained in:
Akita Noek 2016-03-10 16:44:55 -05:00
parent ba833d683e
commit 6ea99583da
27 changed files with 272 additions and 417 deletions

View File

@ -26,19 +26,6 @@ class MongoFilterBackend(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
return queryset
class ActiveOnlyBackend(BaseFilterBackend):
'''
Filter to show only objects where is_active/active is True.
'''
def filter_queryset(self, request, queryset, view):
for field in queryset.model._meta.fields:
if field.name == 'is_active':
queryset = queryset.filter(is_active=True)
elif field.name == 'active':
queryset = queryset.filter(active=True)
return queryset
class TypeFilterBackend(BaseFilterBackend):
'''
Filter on type field now returned with all objects.
@ -166,12 +153,12 @@ class FieldLookupBackend(BaseFilterBackend):
for key, values in request.query_params.lists():
if key in self.RESERVED_NAMES:
continue
# HACK: Make job event filtering by host name mostly work even
# when not capturing job event hosts M2M.
if queryset.model._meta.object_name == 'JobEvent' and key.startswith('hosts__name'):
key = key.replace('hosts__name', 'or__host__name')
or_filters.append((False, 'host__name__isnull', True))
or_filters.append((False, 'host__name__isnull', True))
# Custom __int filter suffix (internal use only).
q_int = False

View File

@ -103,11 +103,7 @@ class ModelAccessPermission(permissions.BasePermission):
if not request.user or request.user.is_anonymous():
return False
# Don't allow inactive users (and respond with a 403).
if not request.user.is_active:
raise PermissionDenied('your account is inactive')
# Always allow superusers (as long as they are active).
# Always allow superusers
if getattr(view, 'always_allow_superuser', True) and request.user.is_superuser:
return True
@ -161,8 +157,6 @@ class JobTemplateCallbackPermission(ModelAccessPermission):
raise PermissionDenied()
elif not host_config_key:
raise PermissionDenied()
elif obj and not obj.active:
raise PermissionDenied()
elif obj and obj.host_config_key != host_config_key:
raise PermissionDenied()
else:
@ -182,7 +176,7 @@ class TaskPermission(ModelAccessPermission):
# Verify that the ID present in the auth token is for a valid, active
# unified job.
try:
unified_job = UnifiedJob.objects.get(active=True, status='running',
unified_job = UnifiedJob.objects.get(status='running',
pk=int(request.auth.split('-')[0]))
except (UnifiedJob.DoesNotExist, TypeError):
return False

View File

@ -252,7 +252,6 @@ class BaseSerializer(serializers.ModelSerializer):
# make certain fields read only
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
active = serializers.SerializerMethodField()
def get_type(self, obj):
@ -288,9 +287,9 @@ class BaseSerializer(serializers.ModelSerializer):
def get_related(self, obj):
res = OrderedDict()
if getattr(obj, 'created_by', None) and obj.created_by.is_active:
if getattr(obj, 'created_by', None):
res['created_by'] = reverse('api:user_detail', args=(obj.created_by.pk,))
if getattr(obj, 'modified_by', None) and obj.modified_by.is_active:
if getattr(obj, 'modified_by', None):
res['modified_by'] = reverse('api:user_detail', args=(obj.modified_by.pk,))
return res
@ -315,10 +314,6 @@ class BaseSerializer(serializers.ModelSerializer):
continue
if fkval == obj:
continue
if hasattr(fkval, 'active') and not fkval.active:
continue
if hasattr(fkval, 'is_active') and not fkval.is_active:
continue
summary_fields[fk] = OrderedDict()
for field in related_fields:
fval = getattr(fkval, field, None)
@ -334,11 +329,11 @@ class BaseSerializer(serializers.ModelSerializer):
# Can be raised by the reverse accessor for a OneToOneField.
except ObjectDoesNotExist:
pass
if getattr(obj, 'created_by', None) and obj.created_by.is_active:
if getattr(obj, 'created_by', None):
summary_fields['created_by'] = OrderedDict()
for field in SUMMARIZABLE_FK_FIELDS['user']:
summary_fields['created_by'][field] = getattr(obj.created_by, field)
if getattr(obj, 'modified_by', None) and obj.modified_by.is_active:
if getattr(obj, 'modified_by', None):
summary_fields['modified_by'] = OrderedDict()
for field in SUMMARIZABLE_FK_FIELDS['user']:
summary_fields['modified_by'][field] = getattr(obj.modified_by, field)
@ -378,14 +373,6 @@ class BaseSerializer(serializers.ModelSerializer):
else:
return obj.modified
def get_active(self, obj):
if obj is None:
return False
elif isinstance(obj, User):
return obj.is_active
else:
return obj.active
def build_standard_field(self, field_name, model_field):
# DRF 3.3 serializers.py::build_standard_field() -> utils/field_mapping.py::get_field_kwargs() short circuits
@ -564,11 +551,11 @@ class UnifiedJobTemplateSerializer(BaseSerializer):
def get_related(self, obj):
res = super(UnifiedJobTemplateSerializer, self).get_related(obj)
if obj.current_job and obj.current_job.active:
if obj.current_job:
res['current_job'] = obj.current_job.get_absolute_url()
if obj.last_job and obj.last_job.active:
if obj.last_job:
res['last_job'] = obj.last_job.get_absolute_url()
if obj.next_schedule and obj.next_schedule.active:
if obj.next_schedule:
res['next_schedule'] = obj.next_schedule.get_absolute_url()
return res
@ -623,9 +610,9 @@ class UnifiedJobSerializer(BaseSerializer):
def get_related(self, obj):
res = super(UnifiedJobSerializer, self).get_related(obj)
if obj.unified_job_template and obj.unified_job_template.active:
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url()
if obj.schedule and obj.schedule.active:
if obj.schedule:
res['schedule'] = obj.schedule.get_absolute_url()
if isinstance(obj, ProjectUpdate):
res['stdout'] = reverse('api:project_update_stdout', args=(obj.pk,))
@ -874,7 +861,7 @@ class ProjectOptionsSerializer(BaseSerializer):
def get_related(self, obj):
res = super(ProjectOptionsSerializer, self).get_related(obj)
if obj.credential and obj.credential.active:
if obj.credential:
res['credential'] = reverse('api:credential_detail',
args=(obj.credential.pk,))
return res
@ -903,7 +890,7 @@ class ProjectOptionsSerializer(BaseSerializer):
def to_representation(self, obj):
ret = super(ProjectOptionsSerializer, self).to_representation(obj)
if obj is not None and 'credential' in ret and (not obj.credential or not obj.credential.active):
if obj is not None and 'credential' in ret and not obj.credential:
ret['credential'] = None
return ret
@ -1039,13 +1026,13 @@ class InventorySerializer(BaseSerializerWithVariables):
access_list = reverse('api:inventory_access_list', args=(obj.pk,)),
#single_fact = reverse('api:inventory_single_fact_view', args=(obj.pk,)),
))
if obj.organization and obj.organization.active:
if obj.organization:
res['organization'] = reverse('api:organization_detail', args=(obj.organization.pk,))
return res
def to_representation(self, obj):
ret = super(InventorySerializer, self).to_representation(obj)
if obj is not None and 'organization' in ret and (not obj.organization or not obj.organization.active):
if obj is not None and 'organization' in ret and not obj.organization:
ret['organization'] = None
return ret
@ -1100,11 +1087,11 @@ class HostSerializer(BaseSerializerWithVariables):
fact_versions = reverse('api:host_fact_versions_list', args=(obj.pk,)),
#single_fact = reverse('api:host_single_fact_view', args=(obj.pk,)),
))
if obj.inventory and obj.inventory.active:
if obj.inventory:
res['inventory'] = reverse('api:inventory_detail', args=(obj.inventory.pk,))
if obj.last_job and obj.last_job.active:
if obj.last_job:
res['last_job'] = reverse('api:job_detail', args=(obj.last_job.pk,))
if obj.last_job_host_summary and obj.last_job_host_summary.job.active:
if obj.last_job_host_summary:
res['last_job_host_summary'] = reverse('api:job_host_summary_detail', args=(obj.last_job_host_summary.pk,))
return res
@ -1120,7 +1107,7 @@ class HostSerializer(BaseSerializerWithVariables):
'name': j.job.job_template.name if j.job.job_template is not None else "",
'status': j.job.status,
'finished': j.job.finished,
} for j in obj.job_host_summaries.filter(job__active=True).select_related('job__job_template').order_by('-created')[:5]]})
} for j in obj.job_host_summaries.select_related('job__job_template').order_by('-created')[:5]]})
return d
def _get_host_port_from_name(self, name):
@ -1169,11 +1156,11 @@ class HostSerializer(BaseSerializerWithVariables):
ret = super(HostSerializer, self).to_representation(obj)
if not obj:
return ret
if 'inventory' in ret and (not obj.inventory or not obj.inventory.active):
if 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
if 'last_job' in ret and (not obj.last_job or not obj.last_job.active):
if 'last_job' in ret and not obj.last_job:
ret['last_job'] = None
if 'last_job_host_summary' in ret and (not obj.last_job_host_summary or not obj.last_job_host_summary.job.active):
if 'last_job_host_summary' in ret and not obj.last_job_host_summary:
ret['last_job_host_summary'] = None
return ret
@ -1210,7 +1197,7 @@ class GroupSerializer(BaseSerializerWithVariables):
access_list = reverse('api:group_access_list', args=(obj.pk,)),
#single_fact = reverse('api:group_single_fact_view', args=(obj.pk,)),
))
if obj.inventory and obj.inventory.active:
if obj.inventory:
res['inventory'] = reverse('api:inventory_detail', args=(obj.inventory.pk,))
if obj.inventory_source:
res['inventory_source'] = reverse('api:inventory_source_detail', args=(obj.inventory_source.pk,))
@ -1223,7 +1210,7 @@ class GroupSerializer(BaseSerializerWithVariables):
def to_representation(self, obj):
ret = super(GroupSerializer, self).to_representation(obj)
if obj is not None and 'inventory' in ret and (not obj.inventory or not obj.inventory.active):
if obj is not None and 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
return ret
@ -1239,7 +1226,7 @@ class GroupTreeSerializer(GroupSerializer):
def get_children(self, obj):
if obj is None:
return {}
children_qs = obj.children.filter(active=True)
children_qs = obj.children
children_qs = children_qs.select_related('inventory')
children_qs = children_qs.prefetch_related('inventory_source')
return GroupTreeSerializer(children_qs, many=True).data
@ -1304,7 +1291,7 @@ class CustomInventoryScriptSerializer(BaseSerializer):
def get_related(self, obj):
res = super(CustomInventoryScriptSerializer, self).get_related(obj)
if obj.organization and obj.organization.active:
if obj.organization:
res['organization'] = reverse('api:organization_detail', args=(obj.organization.pk,))
return res
@ -1317,10 +1304,10 @@ class InventorySourceOptionsSerializer(BaseSerializer):
def get_related(self, obj):
res = super(InventorySourceOptionsSerializer, self).get_related(obj)
if obj.credential and obj.credential.active:
if obj.credential:
res['credential'] = reverse('api:credential_detail',
args=(obj.credential.pk,))
if obj.source_script and obj.source_script.active:
if obj.source_script:
res['source_script'] = reverse('api:inventory_script_detail', args=(obj.source_script.pk,))
return res
@ -1365,7 +1352,7 @@ class InventorySourceOptionsSerializer(BaseSerializer):
ret = super(InventorySourceOptionsSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'credential' in ret and (not obj.credential or not obj.credential.active):
if 'credential' in ret and not obj.credential:
ret['credential'] = None
return ret
@ -1396,9 +1383,9 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
notifiers_success = reverse('api:inventory_source_notifiers_success_list', args=(obj.pk,)),
notifiers_error = reverse('api:inventory_source_notifiers_error_list', args=(obj.pk,)),
))
if obj.inventory and obj.inventory.active:
if obj.inventory:
res['inventory'] = reverse('api:inventory_detail', args=(obj.inventory.pk,))
if obj.group and obj.group.active:
if obj.group:
res['group'] = reverse('api:group_detail', args=(obj.group.pk,))
# Backwards compatibility.
if obj.current_update:
@ -1413,9 +1400,9 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
ret = super(InventorySourceSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'inventory' in ret and (not obj.inventory or not obj.inventory.active):
if 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
if 'group' in ret and (not obj.group or not obj.group.active):
if 'group' in ret and not obj.group:
ret['group'] = None
return ret
@ -1473,13 +1460,13 @@ class TeamSerializer(BaseSerializer):
activity_stream = reverse('api:team_activity_stream_list', args=(obj.pk,)),
access_list = reverse('api:team_access_list', args=(obj.pk,)),
))
if obj.organization and obj.organization.active:
if obj.organization:
res['organization'] = reverse('api:organization_detail', args=(obj.organization.pk,))
return res
def to_representation(self, obj):
ret = super(TeamSerializer, self).to_representation(obj)
if obj is not None and 'organization' in ret and (not obj.organization or not obj.organization.active):
if obj is not None and 'organization' in ret and not obj.organization:
ret['organization'] = None
return ret
@ -1563,9 +1550,9 @@ class CredentialSerializer(BaseSerializer):
def to_representation(self, obj):
ret = super(CredentialSerializer, self).to_representation(obj)
if obj is not None and 'user' in ret and (not obj.user or not obj.user.is_active):
if obj is not None and 'user' in ret and not obj.user:
ret['user'] = None
if obj is not None and 'team' in ret and (not obj.team or not obj.team.active):
if obj is not None and 'team' in ret and not obj.team:
ret['team'] = None
return ret
@ -1604,13 +1591,13 @@ class JobOptionsSerializer(BaseSerializer):
def get_related(self, obj):
res = super(JobOptionsSerializer, self).get_related(obj)
if obj.inventory and obj.inventory.active:
if obj.inventory:
res['inventory'] = reverse('api:inventory_detail', args=(obj.inventory.pk,))
if obj.project and obj.project.active:
if obj.project:
res['project'] = reverse('api:project_detail', args=(obj.project.pk,))
if obj.credential and obj.credential.active:
if obj.credential:
res['credential'] = reverse('api:credential_detail', args=(obj.credential.pk,))
if obj.cloud_credential and obj.cloud_credential.active:
if obj.cloud_credential:
res['cloud_credential'] = reverse('api:credential_detail',
args=(obj.cloud_credential.pk,))
return res
@ -1619,15 +1606,15 @@ class JobOptionsSerializer(BaseSerializer):
ret = super(JobOptionsSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'inventory' in ret and (not obj.inventory or not obj.inventory.active):
if 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
if 'project' in ret and (not obj.project or not obj.project.active):
if 'project' in ret and not obj.project:
ret['project'] = None
if 'playbook' in ret:
ret['playbook'] = ''
if 'credential' in ret and (not obj.credential or not obj.credential.active):
if 'credential' in ret and not obj.credential:
ret['credential'] = None
if 'cloud_credential' in ret and (not obj.cloud_credential or not obj.cloud_credential.active):
if 'cloud_credential' in ret and not obj.cloud_credential:
ret['cloud_credential'] = None
return ret
@ -1690,7 +1677,7 @@ class JobTemplateSerializer(UnifiedJobTemplateSerializer, JobOptionsSerializer):
else:
d['can_copy'] = False
d['can_edit'] = False
d['recent_jobs'] = [{'id': x.id, 'status': x.status, 'finished': x.finished} for x in obj.jobs.filter(active=True).order_by('-created')[:10]]
d['recent_jobs'] = [{'id': x.id, 'status': x.status, 'finished': x.finished} for x in obj.jobs.order_by('-created')[:10]]
return d
def validate(self, attrs):
@ -1721,7 +1708,7 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
activity_stream = reverse('api:job_activity_stream_list', args=(obj.pk,)),
notifications = reverse('api:job_notifications_list', args=(obj.pk,)),
))
if obj.job_template and obj.job_template.active:
if obj.job_template:
res['job_template'] = reverse('api:job_template_detail',
args=(obj.job_template.pk,))
if obj.can_start or True:
@ -1766,7 +1753,7 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
ret = super(JobSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'job_template' in ret and (not obj.job_template or not obj.job_template.active):
if 'job_template' in ret and not obj.job_template:
ret['job_template'] = None
if obj.job_template and obj.job_template.survey_enabled:
@ -1830,11 +1817,11 @@ class JobRelaunchSerializer(JobSerializer):
def validate(self, attrs):
obj = self.context.get('obj')
if not obj.credential or obj.credential.active is False:
if not obj.credential:
raise serializers.ValidationError(dict(credential=["Credential not found or deleted."]))
if obj.job_type != PERM_INVENTORY_SCAN and (obj.project is None or not obj.project.active):
if obj.job_type != PERM_INVENTORY_SCAN and obj.project is None:
raise serializers.ValidationError(dict(errors=["Job Template Project is missing or undefined"]))
if obj.inventory is None or not obj.inventory.active:
if obj.inventory is None:
raise serializers.ValidationError(dict(errors=["Job Template Inventory is missing or undefined"]))
attrs = super(JobRelaunchSerializer, self).validate(attrs)
return attrs
@ -1874,9 +1861,9 @@ class AdHocCommandSerializer(UnifiedJobSerializer):
def get_related(self, obj):
res = super(AdHocCommandSerializer, self).get_related(obj)
if obj.inventory and obj.inventory.active:
if obj.inventory:
res['inventory'] = reverse('api:inventory_detail', args=(obj.inventory.pk,))
if obj.credential and obj.credential.active:
if obj.credential:
res['credential'] = reverse('api:credential_detail', args=(obj.credential.pk,))
res.update(dict(
events = reverse('api:ad_hoc_command_ad_hoc_command_events_list', args=(obj.pk,)),
@ -1888,9 +1875,9 @@ class AdHocCommandSerializer(UnifiedJobSerializer):
def to_representation(self, obj):
ret = super(AdHocCommandSerializer, self).to_representation(obj)
if 'inventory' in ret and (not obj.inventory or not obj.inventory.active):
if 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
if 'credential' in ret and (not obj.credential or not obj.credential.active):
if 'credential' in ret and not obj.credential:
ret['credential'] = None
# For the UI, only module_name is returned for name, instead of the
# longer module name + module_args format.
@ -1942,7 +1929,7 @@ class SystemJobSerializer(UnifiedJobSerializer):
def get_related(self, obj):
res = super(SystemJobSerializer, self).get_related(obj)
if obj.system_job_template and obj.system_job_template.active:
if obj.system_job_template:
res['system_job_template'] = reverse('api:system_job_template_detail',
args=(obj.system_job_template.pk,))
if obj.can_cancel or True:
@ -2080,7 +2067,7 @@ class JobLaunchSerializer(BaseSerializer):
}
def get_credential_needed_to_start(self, obj):
return not (obj and obj.credential and obj.credential.active)
return not (obj and obj.credential)
def get_survey_enabled(self, obj):
if obj:
@ -2093,7 +2080,7 @@ class JobLaunchSerializer(BaseSerializer):
data = self.context.get('data')
credential = attrs.get('credential', obj and obj.credential or None)
if not credential or not credential.active:
if not credential:
errors['credential'] = 'Credential not provided'
# fill passwords dict with request data passwords
@ -2124,9 +2111,9 @@ class JobLaunchSerializer(BaseSerializer):
if validation_errors:
errors['variables_needed_to_start'] = validation_errors
if obj.job_type != PERM_INVENTORY_SCAN and (obj.project is None or not obj.project.active):
if obj.job_type != PERM_INVENTORY_SCAN and (obj.project is None):
errors['project'] = 'Job Template Project is missing or undefined'
if obj.inventory is None or not obj.inventory.active:
if obj.inventory is None:
errors['inventory'] = 'Job Template Inventory is missing or undefined'
if errors:
@ -2162,7 +2149,7 @@ class NotifierSerializer(BaseSerializer):
test = reverse('api:notifier_test', args=(obj.pk,)),
notifications = reverse('api:notifier_notification_list', args=(obj.pk,)),
))
if obj.organization and obj.organization.active:
if obj.organization:
res['organization'] = reverse('api:organization_detail', args=(obj.organization.pk,))
return res
@ -2220,7 +2207,7 @@ class ScheduleSerializer(BaseSerializer):
res.update(dict(
unified_jobs = reverse('api:schedule_unified_jobs_list', args=(obj.pk,)),
))
if obj.unified_job_template and obj.unified_job_template.active:
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url()
return res
@ -2447,8 +2434,6 @@ class AuthTokenSerializer(serializers.Serializer):
if username and password:
user = authenticate(username=username, password=password)
if user:
if not user.is_active:
raise serializers.ValidationError('User account is disabled.')
attrs['user'] = user
return attrs
else:

View File

@ -214,7 +214,7 @@ class ApiV1ConfigView(APIView):
user_ldap_fields.extend(getattr(settings, 'AUTH_LDAP_USER_FLAGS_BY_GROUP', {}).keys())
data['user_ldap_fields'] = user_ldap_fields
if request.user.is_superuser or request.user.admin_of_organizations.filter(active=True).count():
if request.user.is_superuser or request.user.admin_of_organizations.count():
data.update(dict(
project_base_dir = settings.PROJECTS_ROOT,
project_local_paths = Project.get_local_path_choices(),
@ -609,7 +609,7 @@ class OrganizationList(ListCreateAPIView):
# by the license, then we are only willing to create this organization
# if no organizations exist in the system.
if (not feature_enabled('multiple_organizations') and
self.model.objects.filter(active=True).count() > 0):
self.model.objects.count() > 0):
raise LicenseForbids('Your Tower license only permits a single '
'organization to exist.')
@ -804,7 +804,7 @@ class ProjectList(ListCreateAPIView):
def get(self, request, *args, **kwargs):
# Not optimal, but make sure the project status and last_updated fields
# are up to date here...
projects_qs = Project.objects.filter(active=True)
projects_qs = Project.objects
projects_qs = projects_qs.select_related('current_job', 'last_job')
for project in projects_qs:
project._set_status_and_last_job_run()
@ -1421,7 +1421,7 @@ class GroupChildrenList(SubListCreateAttachDetachAPIView):
sub, self.relationship):
raise PermissionDenied()
if sub.parents.filter(active=True).exclude(pk=parent.pk).count() == 0:
if sub.parents.exclude(pk=parent.pk).count() == 0:
sub.delete()
else:
relationship.remove(sub)
@ -1593,9 +1593,9 @@ class InventoryScriptView(RetrieveAPIView):
hostvars = bool(request.query_params.get('hostvars', ''))
show_all = bool(request.query_params.get('all', ''))
if show_all:
hosts_q = dict(active=True)
hosts_q = dict()
else:
hosts_q = dict(active=True, enabled=True)
hosts_q = dict(enabled=True)
if hostname:
host = get_object_or_404(obj.hosts, name=hostname, **hosts_q)
data = host.variables_dict
@ -1613,8 +1613,7 @@ class InventoryScriptView(RetrieveAPIView):
all_group['hosts'] = groupless_hosts
# Build in-memory mapping of groups and their hosts.
group_hosts_kw = dict(group__inventory_id=obj.id, group__active=True,
host__inventory_id=obj.id, host__active=True)
group_hosts_kw = dict(group__inventory_id=obj.id, host__inventory_id=obj.id)
if 'enabled' in hosts_q:
group_hosts_kw['host__enabled'] = hosts_q['enabled']
group_hosts_qs = Group.hosts.through.objects.filter(**group_hosts_kw)
@ -1627,8 +1626,8 @@ class InventoryScriptView(RetrieveAPIView):
# Build in-memory mapping of groups and their children.
group_parents_qs = Group.parents.through.objects.filter(
from_group__inventory_id=obj.id, from_group__active=True,
to_group__inventory_id=obj.id, to_group__active=True,
from_group__inventory_id=obj.id,
to_group__inventory_id=obj.id,
)
group_parents_qs = group_parents_qs.order_by('from_group__name')
group_parents_qs = group_parents_qs.values_list('from_group_id', 'from_group__name', 'to_group_id')
@ -1638,7 +1637,7 @@ class InventoryScriptView(RetrieveAPIView):
group_children.append(from_group_name)
# Now use in-memory maps to build up group info.
for group in obj.groups.filter(active=True):
for group in obj.groups:
group_info = OrderedDict()
group_info['hosts'] = group_hosts_map.get(group.id, [])
group_info['children'] = group_children_map.get(group.id, [])
@ -1684,9 +1683,9 @@ class InventoryTreeView(RetrieveAPIView):
def retrieve(self, request, *args, **kwargs):
inventory = self.get_object()
group_children_map = inventory.get_group_children_map(active=True)
root_group_pks = inventory.root_groups.filter(active=True).order_by('name').values_list('pk', flat=True)
groups_qs = inventory.groups.filter(active=True)
group_children_map = inventory.get_group_children_map()
root_group_pks = inventory.root_groups.order_by('name').values_list('pk', flat=True)
groups_qs = inventory.groups
groups_qs = groups_qs.select_related('inventory')
groups_qs = groups_qs.prefetch_related('inventory_source')
all_group_data = GroupSerializer(groups_qs, many=True).data
@ -1890,7 +1889,7 @@ class JobTemplateLaunch(RetrieveAPIView, GenericAPIView):
if obj:
for p in obj.passwords_needed_to_start:
data[p] = u''
if obj.credential and obj.credential.active:
if obj.credential:
data.pop('credential', None)
else:
data['credential'] = None
@ -2087,7 +2086,7 @@ class JobTemplateCallback(GenericAPIView):
return set()
# Find the host objects to search for a match.
obj = self.get_object()
qs = obj.inventory.hosts.filter(active=True)
qs = obj.inventory.hosts
# First try for an exact match on the name.
try:
return set([qs.get(name__in=remote_hosts)])
@ -2147,7 +2146,7 @@ class JobTemplateCallback(GenericAPIView):
# match again.
inventory_sources_already_updated = []
if len(matching_hosts) != 1:
inventory_sources = job_template.inventory.inventory_sources.filter(active=True, update_on_launch=True)
inventory_sources = job_template.inventory.inventory_sources.filter( update_on_launch=True)
inventory_update_pks = set()
for inventory_source in inventory_sources:
if inventory_source.needs_update_on_launch:

View File

@ -245,7 +245,7 @@ class UserAccess(BaseAccess):
return False
if self.user.is_superuser:
return True
return Organization.accessible_objects(self.user, ALL_PERMISSIONS).filter(active=True).exists()
return Organization.accessible_objects(self.user, ALL_PERMISSIONS).exists()
def can_change(self, obj, data):
if data is not None and 'is_superuser' in data:
@ -266,7 +266,7 @@ class UserAccess(BaseAccess):
if obj == self.user:
# cannot delete yourself
return False
super_users = User.objects.filter(is_active=True, is_superuser=True)
super_users = User.objects.filter(is_superuser=True)
if obj.is_superuser and super_users.count() == 1:
# cannot delete the last active superuser
return False
@ -525,7 +525,7 @@ class InventoryUpdateAccess(BaseAccess):
model = InventoryUpdate
def get_queryset(self):
qs = InventoryUpdate.objects.filter(active=True).distinct()
qs = InventoryUpdate.objects.distinct()
qs = qs.select_related('created_by', 'modified_by', 'inventory_source__group',
'inventory_source__inventory')
inventory_sources_qs = self.user.get_queryset(InventorySource)
@ -675,7 +675,7 @@ class ProjectUpdateAccess(BaseAccess):
model = ProjectUpdate
def get_queryset(self):
qs = ProjectUpdate.objects.filter(active=True).distinct()
qs = ProjectUpdate.objects.distinct()
qs = qs.select_related('created_by', 'modified_by', 'project')
project_ids = set(self.user.get_queryset(Project).values_list('id', flat=True))
return qs.filter(project_id__in=project_ids)
@ -819,7 +819,7 @@ class JobAccess(BaseAccess):
model = Job
def get_queryset(self):
qs = self.model.objects.filter(active=True).distinct()
qs = self.model.objects.distinct()
qs = qs.select_related('created_by', 'modified_by', 'job_template', 'inventory',
'project', 'credential', 'cloud_credential', 'job_template')
qs = qs.prefetch_related('unified_job_template')
@ -841,12 +841,10 @@ class JobAccess(BaseAccess):
# TODO: I think the below queries can be combined
deploy_permissions_ids = Permission.objects.filter(
Q(user=self.user) | Q(team__in=team_ids),
active=True,
permission_type__in=allowed_deploy,
)
check_permissions_ids = Permission.objects.filter(
Q(user=self.user) | Q(team__in=team_ids),
active=True,
permission_type__in=allowed_check,
)
@ -945,18 +943,17 @@ class AdHocCommandAccess(BaseAccess):
model = AdHocCommand
def get_queryset(self):
qs = self.model.objects.filter(active=True).distinct()
qs = self.model.objects.distinct()
qs = qs.select_related('created_by', 'modified_by', 'inventory',
'credential')
if self.user.is_superuser:
return qs
credential_ids = set(self.user.get_queryset(Credential).values_list('id', flat=True))
team_ids = set(Team.objects.filter(active=True, users__in=[self.user]).values_list('id', flat=True))
team_ids = set(Team.objects.filter( users__in=[self.user]).values_list('id', flat=True))
permission_ids = set(Permission.objects.filter(
Q(user=self.user) | Q(team__in=team_ids),
active=True,
permission_type__in=PERMISSION_TYPES_ALLOWING_INVENTORY_READ,
run_ad_hoc_commands=True,
).values_list('id', flat=True))
@ -980,7 +977,7 @@ class AdHocCommandAccess(BaseAccess):
# If a credential is provided, the user should have read access to it.
credential_pk = get_pk_from_dict(data, 'credential')
if credential_pk:
credential = get_object_or_400(Credential, pk=credential_pk, active=True)
credential = get_object_or_400(Credential, pk=credential_pk)
if not credential.accessible_by(self.user, {'read':True}):
return False
@ -988,7 +985,7 @@ class AdHocCommandAccess(BaseAccess):
# given inventory.
inventory_pk = get_pk_from_dict(data, 'inventory')
if inventory_pk:
inventory = get_object_or_400(Inventory, pk=inventory_pk, active=True)
inventory = get_object_or_400(Inventory, pk=inventory_pk)
if not inventory.accessible_by(self.user, {'execute': True}):
return False

View File

@ -19,7 +19,7 @@ class Command(BaseCommand):
# Create a default organization as the first superuser found.
try:
superuser = User.objects.filter(is_superuser=True, is_active=True).order_by('pk')[0]
superuser = User.objects.filter(is_superuser=True).order_by('pk')[0]
except IndexError:
superuser = None
with impersonate(superuser):

View File

@ -633,7 +633,7 @@ class Command(NoArgsCommand):
else:
q = dict(name=self.inventory_name)
try:
self.inventory = Inventory.objects.filter(active=True).get(**q)
self.inventory = Inventory.objects.get(**q)
except Inventory.DoesNotExist:
raise CommandError('Inventory with %s = %s cannot be found' % q.items()[0])
except Inventory.MultipleObjectsReturned:
@ -648,8 +648,7 @@ class Command(NoArgsCommand):
if inventory_source_id:
try:
self.inventory_source = InventorySource.objects.get(pk=inventory_source_id,
inventory=self.inventory,
active=True)
inventory=self.inventory)
except InventorySource.DoesNotExist:
raise CommandError('Inventory source with id=%s not found' %
inventory_source_id)
@ -669,7 +668,6 @@ class Command(NoArgsCommand):
source_path=os.path.abspath(self.source),
overwrite=self.overwrite,
overwrite_vars=self.overwrite_vars,
active=True,
)
self.inventory_update = self.inventory_source.create_inventory_update(
job_args=json.dumps(sys.argv),
@ -703,7 +701,7 @@ class Command(NoArgsCommand):
host_qs = self.inventory_source.group.all_hosts
else:
host_qs = self.inventory.hosts.all()
host_qs = host_qs.filter(active=True, instance_id='',
host_qs = host_qs.filter(instance_id='',
variables__contains=self.instance_id_var.split('.')[0])
for host in host_qs:
instance_id = self._get_instance_id(host.variables_dict)
@ -740,7 +738,7 @@ class Command(NoArgsCommand):
hosts_qs = self.inventory_source.group.all_hosts
# FIXME: Also include hosts from inventory_source.managed_hosts?
else:
hosts_qs = self.inventory.hosts.filter(active=True)
hosts_qs = self.inventory.hosts
# Build list of all host pks, remove all that should not be deleted.
del_host_pks = set(hosts_qs.values_list('pk', flat=True))
if self.instance_id_var:
@ -785,7 +783,7 @@ class Command(NoArgsCommand):
groups_qs = self.inventory_source.group.all_children
# FIXME: Also include groups from inventory_source.managed_groups?
else:
groups_qs = self.inventory.groups.filter(active=True)
groups_qs = self.inventory.groups
# Build list of all group pks, remove those that should not be deleted.
del_group_pks = set(groups_qs.values_list('pk', flat=True))
all_group_names = self.all_group.all_groups.keys()
@ -822,10 +820,10 @@ class Command(NoArgsCommand):
if self.inventory_source.group:
db_groups = self.inventory_source.group.all_children
else:
db_groups = self.inventory.groups.filter(active=True)
db_groups = self.inventory.groups
for db_group in db_groups:
# Delete child group relationships not present in imported data.
db_children = db_group.children.filter(active=True)
db_children = db_group.children
db_children_name_pk_map = dict(db_children.values_list('name', 'pk'))
mem_children = self.all_group.all_groups[db_group.name].children
for mem_group in mem_children:
@ -840,7 +838,7 @@ class Command(NoArgsCommand):
db_child.name, db_group.name)
# FIXME: Inventory source group relationships
# Delete group/host relationships not present in imported data.
db_hosts = db_group.hosts.filter(active=True)
db_hosts = db_group.hosts
del_host_pks = set(db_hosts.values_list('pk', flat=True))
mem_hosts = self.all_group.all_groups[db_group.name].hosts
all_mem_host_names = [h.name for h in mem_hosts if not h.instance_id]
@ -861,7 +859,7 @@ class Command(NoArgsCommand):
del_pks = del_host_pks[offset:(offset + self._batch_size)]
for db_host in db_hosts.filter(pk__in=del_pks):
group_host_count += 1
if db_host not in db_group.hosts.filter(active=True):
if db_host not in db_group.hosts:
continue
db_group.hosts.remove(db_host)
self.logger.info('Host "%s" removed from group "%s"',
@ -1037,7 +1035,7 @@ class Command(NoArgsCommand):
all_host_pks = sorted(mem_host_pk_map.keys())
for offset in xrange(0, len(all_host_pks), self._batch_size):
host_pks = all_host_pks[offset:(offset + self._batch_size)]
for db_host in self.inventory.hosts.filter(active=True, pk__in=host_pks):
for db_host in self.inventory.hosts.filter( pk__in=host_pks):
if db_host.pk in host_pks_updated:
continue
mem_host = mem_host_pk_map[db_host.pk]
@ -1049,7 +1047,7 @@ class Command(NoArgsCommand):
all_instance_ids = sorted(mem_host_instance_id_map.keys())
for offset in xrange(0, len(all_instance_ids), self._batch_size):
instance_ids = all_instance_ids[offset:(offset + self._batch_size)]
for db_host in self.inventory.hosts.filter(active=True, instance_id__in=instance_ids):
for db_host in self.inventory.hosts.filter( instance_id__in=instance_ids):
if db_host.pk in host_pks_updated:
continue
mem_host = mem_host_instance_id_map[db_host.instance_id]
@ -1061,7 +1059,7 @@ class Command(NoArgsCommand):
all_host_names = sorted(mem_host_name_map.keys())
for offset in xrange(0, len(all_host_names), self._batch_size):
host_names = all_host_names[offset:(offset + self._batch_size)]
for db_host in self.inventory.hosts.filter(active=True, name__in=host_names):
for db_host in self.inventory.hosts.filter( name__in=host_names):
if db_host.pk in host_pks_updated:
continue
mem_host = mem_host_name_map[db_host.name]

View File

@ -13,9 +13,9 @@ class HostManager(models.Manager):
def active_count(self):
"""Return count of active, unique hosts for licensing."""
try:
return self.filter(active=True, inventory__active=True).order_by('name').distinct('name').count()
return self.order_by('name').distinct('name').count()
except NotImplementedError: # For unit tests only, SQLite doesn't support distinct('name')
return len(set(self.filter(active=True, inventory__active=True).values_list('name', flat=True)))
return len(set(self.values_list('name', flat=True)))
class InstanceManager(models.Manager):
"""A custom manager class for the Instance model.

View File

@ -87,7 +87,7 @@ class AdHocCommand(UnifiedJob):
def clean_inventory(self):
inv = self.inventory
if not inv or not inv.active:
if not inv:
raise ValidationError('Inventory is no longer available.')
return inv
@ -123,7 +123,7 @@ class AdHocCommand(UnifiedJob):
@property
def passwords_needed_to_start(self):
'''Return list of password field names needed to start the job.'''
if self.credential and self.credential.active:
if self.credential:
return self.credential.passwords_needed
else:
return []
@ -164,14 +164,14 @@ class AdHocCommand(UnifiedJob):
def task_impact(self):
# NOTE: We sorta have to assume the host count matches and that forks default to 5
from awx.main.models.inventory import Host
count_hosts = Host.objects.filter(active=True, enabled=True, inventory__ad_hoc_commands__pk=self.pk).count()
count_hosts = Host.objects.filter( enabled=True, inventory__ad_hoc_commands__pk=self.pk).count()
return min(count_hosts, 5 if self.forks == 0 else self.forks) * 10
def generate_dependencies(self, active_tasks):
from awx.main.models import InventoryUpdate
if not self.inventory:
return []
inventory_sources = self.inventory.inventory_sources.filter(active=True, update_on_launch=True)
inventory_sources = self.inventory.inventory_sources.filter( update_on_launch=True)
inventory_sources_found = []
dependencies = []
for obj in active_tasks:

View File

@ -123,15 +123,12 @@ class Inventory(CommonModel, ResourceMixin):
variables_dict = VarsDictProperty('variables')
def get_group_hosts_map(self, active=None):
def get_group_hosts_map(self):
'''
Return dictionary mapping group_id to set of child host_id's.
'''
# FIXME: Cache this mapping?
group_hosts_kw = dict(group__inventory_id=self.pk, host__inventory_id=self.pk)
if active is not None:
group_hosts_kw['group__active'] = active
group_hosts_kw['host__active'] = active
group_hosts_qs = Group.hosts.through.objects.filter(**group_hosts_kw)
group_hosts_qs = group_hosts_qs.values_list('group_id', 'host_id')
group_hosts_map = {}
@ -140,15 +137,12 @@ class Inventory(CommonModel, ResourceMixin):
group_host_ids.add(host_id)
return group_hosts_map
def get_group_parents_map(self, active=None):
def get_group_parents_map(self):
'''
Return dictionary mapping group_id to set of parent group_id's.
'''
# FIXME: Cache this mapping?
group_parents_kw = dict(from_group__inventory_id=self.pk, to_group__inventory_id=self.pk)
if active is not None:
group_parents_kw['from_group__active'] = active
group_parents_kw['to_group__active'] = active
group_parents_qs = Group.parents.through.objects.filter(**group_parents_kw)
group_parents_qs = group_parents_qs.values_list('from_group_id', 'to_group_id')
group_parents_map = {}
@ -157,15 +151,12 @@ class Inventory(CommonModel, ResourceMixin):
group_parents.add(to_group_id)
return group_parents_map
def get_group_children_map(self, active=None):
def get_group_children_map(self):
'''
Return dictionary mapping group_id to set of child group_id's.
'''
# FIXME: Cache this mapping?
group_parents_kw = dict(from_group__inventory_id=self.pk, to_group__inventory_id=self.pk)
if active is not None:
group_parents_kw['from_group__active'] = active
group_parents_kw['to_group__active'] = active
group_parents_qs = Group.parents.through.objects.filter(**group_parents_kw)
group_parents_qs = group_parents_qs.values_list('from_group_id', 'to_group_id')
group_children_map = {}
@ -176,12 +167,12 @@ class Inventory(CommonModel, ResourceMixin):
def update_host_computed_fields(self):
'''
Update computed fields for all active hosts in this inventory.
Update computed fields for all hosts in this inventory.
'''
hosts_to_update = {}
hosts_qs = self.hosts.filter(active=True)
hosts_qs = self.hosts
# Define queryset of all hosts with active failures.
hosts_with_active_failures = hosts_qs.filter(last_job_host_summary__isnull=False, last_job_host_summary__job__active=True, last_job_host_summary__failed=True).values_list('pk', flat=True)
hosts_with_active_failures = hosts_qs.filter(last_job_host_summary__isnull=False, last_job_host_summary__failed=True).values_list('pk', flat=True)
# Find all hosts that need the has_active_failures flag set.
hosts_to_set = hosts_qs.filter(has_active_failures=False, pk__in=hosts_with_active_failures)
for host_pk in hosts_to_set.values_list('pk', flat=True):
@ -193,7 +184,7 @@ class Inventory(CommonModel, ResourceMixin):
host_updates = hosts_to_update.setdefault(host_pk, {})
host_updates['has_active_failures'] = False
# Define queryset of all hosts with cloud inventory sources.
hosts_with_cloud_inventory = hosts_qs.filter(inventory_sources__active=True, inventory_sources__source__in=CLOUD_INVENTORY_SOURCES).values_list('pk', flat=True)
hosts_with_cloud_inventory = hosts_qs.filter(inventory_sources__source__in=CLOUD_INVENTORY_SOURCES).values_list('pk', flat=True)
# Find all hosts that need the has_inventory_sources flag set.
hosts_to_set = hosts_qs.filter(has_inventory_sources=False, pk__in=hosts_with_cloud_inventory)
for host_pk in hosts_to_set.values_list('pk', flat=True):
@ -218,13 +209,13 @@ class Inventory(CommonModel, ResourceMixin):
'''
Update computed fields for all active groups in this inventory.
'''
group_children_map = self.get_group_children_map(active=True)
group_hosts_map = self.get_group_hosts_map(active=True)
active_host_pks = set(self.hosts.filter(active=True).values_list('pk', flat=True))
failed_host_pks = set(self.hosts.filter(active=True, last_job_host_summary__job__active=True, last_job_host_summary__failed=True).values_list('pk', flat=True))
# active_group_pks = set(self.groups.filter(active=True).values_list('pk', flat=True))
group_children_map = self.get_group_children_map()
group_hosts_map = self.get_group_hosts_map()
active_host_pks = set(self.hosts.values_list('pk', flat=True))
failed_host_pks = set(self.hosts.filter(last_job_host_summary__failed=True).values_list('pk', flat=True))
# active_group_pks = set(self.groups.values_list('pk', flat=True))
failed_group_pks = set() # Update below as we check each group.
groups_with_cloud_pks = set(self.groups.filter(active=True, inventory_sources__active=True, inventory_sources__source__in=CLOUD_INVENTORY_SOURCES).values_list('pk', flat=True))
groups_with_cloud_pks = set(self.groups.filter(inventory_sources__source__in=CLOUD_INVENTORY_SOURCES).values_list('pk', flat=True))
groups_to_update = {}
# Build list of group pks to check, starting with the groups at the
@ -296,11 +287,11 @@ class Inventory(CommonModel, ResourceMixin):
self.update_host_computed_fields()
if update_groups:
self.update_group_computed_fields()
active_hosts = self.hosts.filter(active=True)
active_hosts = self.hosts
failed_hosts = active_hosts.filter(has_active_failures=True)
active_groups = self.groups.filter(active=True)
active_groups = self.groups
failed_groups = active_groups.filter(has_active_failures=True)
active_inventory_sources = self.inventory_sources.filter(active=True, source__in=CLOUD_INVENTORY_SOURCES)
active_inventory_sources = self.inventory_sources.filter( source__in=CLOUD_INVENTORY_SOURCES)
failed_inventory_sources = active_inventory_sources.filter(last_job_failed=True)
computed_fields = {
'has_active_failures': bool(failed_hosts.count()),
@ -405,10 +396,8 @@ class Host(CommonModelNameNotUnique, ResourceMixin):
Update model fields that are computed from database relationships.
'''
has_active_failures = bool(self.last_job_host_summary and
self.last_job_host_summary.job.active and
self.last_job_host_summary.failed)
active_inventory_sources = self.inventory_sources.filter(active=True,
source__in=CLOUD_INVENTORY_SOURCES)
active_inventory_sources = self.inventory_sources.filter(source__in=CLOUD_INVENTORY_SOURCES)
computed_fields = {
'has_active_failures': has_active_failures,
'has_inventory_sources': bool(active_inventory_sources.count()),
@ -424,7 +413,7 @@ class Host(CommonModelNameNotUnique, ResourceMixin):
# change.
# NOTE: I think this is no longer needed
# if update_groups:
# for group in self.all_groups.filter(active=True):
# for group in self.all_groups:
# group.update_computed_fields()
# if update_inventory:
# self.inventory.update_computed_fields(update_groups=False,
@ -620,14 +609,12 @@ class Group(CommonModelNameNotUnique, ResourceMixin):
'''
Update model fields that are computed from database relationships.
'''
active_hosts = self.all_hosts.filter(active=True)
failed_hosts = active_hosts.filter(last_job_host_summary__job__active=True,
last_job_host_summary__failed=True)
active_groups = self.all_children.filter(active=True)
active_hosts = self.all_hosts
failed_hosts = active_hosts.filter(last_job_host_summary__failed=True)
active_groups = self.all_children
# FIXME: May not be accurate unless we always update groups depth-first.
failed_groups = active_groups.filter(has_active_failures=True)
active_inventory_sources = self.inventory_sources.filter(active=True,
source__in=CLOUD_INVENTORY_SOURCES)
active_inventory_sources = self.inventory_sources.filter(source__in=CLOUD_INVENTORY_SOURCES)
computed_fields = {
'total_hosts': active_hosts.count(),
'has_active_failures': bool(failed_hosts.count()),
@ -1154,7 +1141,7 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, ResourceMixin)
def _can_update(self):
if self.source == 'custom':
return bool(self.source_script and self.source_script.active)
return bool(self.source_script)
else:
return bool(self.source in CLOUD_INVENTORY_SOURCES)
@ -1171,7 +1158,7 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, ResourceMixin)
@property
def needs_update_on_launch(self):
if self.active and self.source and self.update_on_launch:
if self.source and self.update_on_launch:
if not self.last_job_run:
return True
if (self.last_job_run + datetime.timedelta(seconds=self.update_cache_timeout)) <= now():
@ -1180,7 +1167,7 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, ResourceMixin)
@property
def notifiers(self):
base_notifiers = Notifier.objects.filter(active=True)
base_notifiers = Notifier.objects
error_notifiers = list(base_notifiers.filter(organization_notifiers_for_errors=self.inventory.organization))
success_notifiers = list(base_notifiers.filter(organization_notifiers_for_success=self.inventory.organization))
any_notifiers = list(base_notifiers.filter(organization_notifiers_for_any=self.inventory.organization))
@ -1189,7 +1176,7 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, ResourceMixin)
def clean_source(self):
source = self.source
if source and self.group:
qs = self.group.inventory_sources.filter(source__in=CLOUD_INVENTORY_SOURCES, active=True, group__active=True)
qs = self.group.inventory_sources.filter(source__in=CLOUD_INVENTORY_SOURCES)
existing_sources = qs.exclude(pk=self.pk)
if existing_sources.count():
s = u', '.join([x.group.name for x in existing_sources])
@ -1233,7 +1220,7 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions):
def save(self, *args, **kwargs):
update_fields = kwargs.get('update_fields', [])
inventory_source = self.inventory_source
if self.active and inventory_source.inventory and self.name == inventory_source.name:
if inventory_source.inventory and self.name == inventory_source.name:
if inventory_source.group:
self.name = '%s (%s)' % (inventory_source.group.name, inventory_source.inventory.name)
else:
@ -1269,7 +1256,7 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions):
return False
if (self.source not in ('custom', 'ec2') and
not (self.credential and self.credential.active)):
not (self.credential)):
return False
return True

View File

@ -149,7 +149,7 @@ class JobOptions(BaseModel):
@property
def passwords_needed_to_start(self):
'''Return list of password field names needed to start the job.'''
if self.credential and self.credential.active:
if self.credential:
return self.credential.passwords_needed
else:
return []
@ -357,7 +357,7 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, ResourceMixin):
# Return all notifiers defined on the Job Template, on the Project, and on the Organization for each trigger type
# TODO: Currently there is no org fk on project so this will need to be added once that is
# available after the rbac pr
base_notifiers = Notifier.objects.filter(active=True)
base_notifiers = Notifier.objects
error_notifiers = list(base_notifiers.filter(unifiedjobtemplate_notifiers_for_errors__in=[self, self.project]))
success_notifiers = list(base_notifiers.filter(unifiedjobtemplate_notifiers_for_success__in=[self, self.project]))
any_notifiers = list(base_notifiers.filter(unifiedjobtemplate_notifiers_for_any__in=[self, self.project]))
@ -493,7 +493,7 @@ class Job(UnifiedJob, JobOptions):
from awx.main.models import InventoryUpdate, ProjectUpdate
if self.inventory is None or self.project is None:
return []
inventory_sources = self.inventory.inventory_sources.filter(active=True, update_on_launch=True)
inventory_sources = self.inventory.inventory_sources.filter( update_on_launch=True)
project_found = False
inventory_sources_found = []
dependencies = []
@ -592,7 +592,7 @@ class Job(UnifiedJob, JobOptions):
if not super(Job, self).can_start:
return False
if not (self.credential and self.credential.active):
if not (self.credential):
return False
return True

View File

@ -53,7 +53,7 @@ class ProjectOptions(models.Model):
paths = [x.decode('utf-8') for x in os.listdir(settings.PROJECTS_ROOT)
if (os.path.isdir(os.path.join(settings.PROJECTS_ROOT, x)) and
not x.startswith('.') and not x.startswith('_'))]
qs = Project.objects.filter(active=True)
qs = Project.objects
used_paths = qs.values_list('local_path', flat=True)
return [x for x in paths if x not in used_paths]
else:
@ -336,7 +336,7 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin):
@property
def needs_update_on_launch(self):
if self.active and self.scm_type and self.scm_update_on_launch:
if self.scm_type and self.scm_update_on_launch:
if not self.last_job_run:
return True
if (self.last_job_run + datetime.timedelta(seconds=self.scm_update_cache_timeout)) <= now():
@ -345,7 +345,7 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin):
@property
def notifiers(self):
base_notifiers = Notifier.objects.filter(active=True)
base_notifiers = Notifier.objects
error_notifiers = list(base_notifiers.filter(unifiedjobtemplate_notifiers_for_errors=self))
success_notifiers = list(base_notifiers.filter(unifiedjobtemplate_notifiers_for_success=self))
any_notifiers = list(base_notifiers.filter(unifiedjobtemplate_notifiers_for_any=self))

View File

@ -27,7 +27,7 @@ __all__ = ['Schedule']
class ScheduleFilterMethods(object):
def enabled(self, enabled=True):
return self.filter(enabled=enabled, active=enabled)
return self.filter(enabled=enabled)
def before(self, dt):
return self.filter(next_run__lt=dt)

View File

@ -8,7 +8,7 @@ import threading
import json
# Django
from django.db.models.signals import pre_save, post_save, pre_delete, post_delete, m2m_changed
from django.db.models.signals import post_save, pre_delete, post_delete, m2m_changed
from django.dispatch import receiver
# Django-CRUM
@ -27,9 +27,8 @@ __all__ = []
logger = logging.getLogger('awx.main.signals')
# Update has_active_failures for inventory/groups when a Host/Group is deleted
# or marked inactive, when a Host-Group or Group-Group relationship is updated,
# or when a Job is deleted or marked inactive.
# Update has_active_failures for inventory/groups when a Host/Group is deleted,
# when a Host-Group or Group-Group relationship is updated, or when a Job is deleted
def emit_job_event_detail(sender, **kwargs):
instance = kwargs['instance']
@ -69,7 +68,7 @@ def emit_update_inventory_computed_fields(sender, **kwargs):
else:
sender_name = unicode(sender._meta.verbose_name)
if kwargs['signal'] == post_save:
if sender == Job and instance.active:
if sender == Job:
return
sender_action = 'saved'
elif kwargs['signal'] == post_delete:
@ -92,7 +91,6 @@ def emit_update_inventory_on_created_or_deleted(sender, **kwargs):
return
instance = kwargs['instance']
if ('created' in kwargs and kwargs['created']) or \
(hasattr(instance, '_saved_active_state') and instance._saved_active_state != instance.active) or \
kwargs['signal'] == post_delete:
pass
else:
@ -108,13 +106,6 @@ def emit_update_inventory_on_created_or_deleted(sender, **kwargs):
if inventory is not None:
update_inventory_computed_fields.delay(inventory.id, True)
def store_initial_active_state(sender, **kwargs):
instance = kwargs['instance']
if instance.id is not None:
instance._saved_active_state = sender.objects.get(id=instance.id).active
else:
instance._saved_active_state = True
def rebuild_role_ancestor_list(reverse, model, instance, pk_set, **kwargs):
'When a role parent is added or removed, update our role hierarchy list'
if reverse:
@ -161,20 +152,16 @@ def org_admin_edit_members(instance, action, model, reverse, pk_set, **kwargs):
if action == 'pre_remove':
instance.content_object.admin_role.children.remove(user.admin_role)
pre_save.connect(store_initial_active_state, sender=Host)
post_save.connect(emit_update_inventory_on_created_or_deleted, sender=Host)
post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=Host)
pre_save.connect(store_initial_active_state, sender=Group)
post_save.connect(emit_update_inventory_on_created_or_deleted, sender=Group)
post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=Group)
m2m_changed.connect(emit_update_inventory_computed_fields, sender=Group.hosts.through)
m2m_changed.connect(emit_update_inventory_computed_fields, sender=Group.parents.through)
m2m_changed.connect(emit_update_inventory_computed_fields, sender=Host.inventory_sources.through)
m2m_changed.connect(emit_update_inventory_computed_fields, sender=Group.inventory_sources.through)
pre_save.connect(store_initial_active_state, sender=InventorySource)
post_save.connect(emit_update_inventory_on_created_or_deleted, sender=InventorySource)
post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=InventorySource)
pre_save.connect(store_initial_active_state, sender=Job)
post_save.connect(emit_update_inventory_on_created_or_deleted, sender=Job)
post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=Job)
post_save.connect(emit_job_event_detail, sender=JobEvent)
@ -184,8 +171,8 @@ m2m_changed.connect(org_admin_edit_members, Role.members.through)
post_save.connect(sync_superuser_status_to_rbac, sender=User)
post_save.connect(create_user_role, sender=User)
# Migrate hosts, groups to parent group(s) whenever a group is deleted or
# marked as inactive.
# Migrate hosts, groups to parent group(s) whenever a group is deleted
@receiver(pre_delete, sender=Group)
def save_related_pks_before_group_delete(sender, **kwargs):
@ -208,80 +195,28 @@ def migrate_children_from_deleted_group_to_parent_groups(sender, **kwargs):
with ignore_inventory_group_removal():
with ignore_inventory_computed_fields():
if parents_pks:
for parent_group in Group.objects.filter(pk__in=parents_pks, active=True):
for child_host in Host.objects.filter(pk__in=hosts_pks, active=True):
for parent_group in Group.objects.filter(pk__in=parents_pks):
for child_host in Host.objects.filter(pk__in=hosts_pks):
logger.debug('adding host %s to parent %s after group deletion',
child_host, parent_group)
parent_group.hosts.add(child_host)
for child_group in Group.objects.filter(pk__in=children_pks, active=True):
for child_group in Group.objects.filter(pk__in=children_pks):
logger.debug('adding group %s to parent %s after group deletion',
child_group, parent_group)
parent_group.children.add(child_group)
inventory_pk = getattr(instance, '_saved_inventory_pk', None)
if inventory_pk:
try:
inventory = Inventory.objects.get(pk=inventory_pk, active=True)
inventory = Inventory.objects.get(pk=inventory_pk)
inventory.update_computed_fields()
except Inventory.DoesNotExist:
pass
@receiver(pre_save, sender=Group)
def save_related_pks_before_group_marked_inactive(sender, **kwargs):
if getattr(_inventory_updates, 'is_removing', False):
return
instance = kwargs['instance']
if not instance.pk or instance.active:
return
instance._saved_inventory_pk = instance.inventory.pk
instance._saved_parents_pks = set(instance.parents.values_list('pk', flat=True))
instance._saved_hosts_pks = set(instance.hosts.values_list('pk', flat=True))
instance._saved_children_pks = set(instance.children.values_list('pk', flat=True))
instance._saved_inventory_source_pk = instance.inventory_source.pk
@receiver(post_save, sender=Group)
def migrate_children_from_inactive_group_to_parent_groups(sender, **kwargs):
if getattr(_inventory_updates, 'is_removing', False):
return
instance = kwargs['instance']
if instance.active:
return
parents_pks = getattr(instance, '_saved_parents_pks', [])
hosts_pks = getattr(instance, '_saved_hosts_pks', [])
children_pks = getattr(instance, '_saved_children_pks', [])
with ignore_inventory_group_removal():
with ignore_inventory_computed_fields():
if parents_pks:
for parent_group in Group.objects.filter(pk__in=parents_pks, active=True):
for child_host in Host.objects.filter(pk__in=hosts_pks, active=True):
logger.debug('moving host %s to parent %s after marking group %s inactive',
child_host, parent_group, instance)
parent_group.hosts.add(child_host)
for child_group in Group.objects.filter(pk__in=children_pks, active=True):
logger.debug('moving group %s to parent %s after marking group %s inactive',
child_group, parent_group, instance)
parent_group.children.add(child_group)
parent_group.children.remove(instance)
inventory_source_pk = getattr(instance, '_saved_inventory_source_pk', None)
if inventory_source_pk:
try:
inventory_source = InventorySource.objects.get(pk=inventory_source_pk, active=True)
inventory_source.delete()
except InventorySource.DoesNotExist:
pass
inventory_pk = getattr(instance, '_saved_inventory_pk', None)
if not getattr(_inventory_updates, 'is_updating', False):
if inventory_pk:
try:
inventory = Inventory.objects.get(pk=inventory_pk, active=True)
inventory.update_computed_fields()
except Inventory.DoesNotExist:
pass
# Update host pointers to last_job and last_job_host_summary when a job is
# marked inactive or deleted.
# Update host pointers to last_job and last_job_host_summary when a job is deleted
def _update_host_last_jhs(host):
jhs_qs = JobHostSummary.objects.filter(job__active=True, host__pk=host.pk)
jhs_qs = JobHostSummary.objects.filter(host__pk=host.pk)
try:
jhs = jhs_qs.order_by('-job__pk')[0]
except IndexError:
@ -297,19 +232,10 @@ def _update_host_last_jhs(host):
if update_fields:
host.save(update_fields=update_fields)
@receiver(post_save, sender=Job)
def update_host_last_job_when_job_marked_inactive(sender, **kwargs):
instance = kwargs['instance']
if instance.active:
return
hosts_qs = Host.objects.filter(active=True, last_job__pk=instance.pk)
for host in hosts_qs:
_update_host_last_jhs(host)
@receiver(pre_delete, sender=Job)
def save_host_pks_before_job_delete(sender, **kwargs):
instance = kwargs['instance']
hosts_qs = Host.objects.filter(active=True, last_job__pk=instance.pk)
hosts_qs = Host.objects.filter( last_job__pk=instance.pk)
instance._saved_hosts_pks = set(hosts_qs.values_list('pk', flat=True))
@receiver(post_delete, sender=Job)
@ -388,11 +314,6 @@ def activity_stream_update(sender, instance, **kwargs):
except sender.DoesNotExist:
return
# Handle the AWX mark-inactive for delete event
if hasattr(instance, 'active') and not instance.active:
activity_stream_delete(sender, instance, **kwargs)
return
new = instance
changes = model_instance_diff(old, new, model_serializer_mapping)
if changes is None:

View File

@ -13,7 +13,7 @@ class Migration(DataMigration):
# and orm['appname.ModelName'] for models in other applications.
# Refresh has_active_failures for all hosts.
for host in orm.Host.objects.filter(active=True):
for host in orm.Host.objects:
has_active_failures = bool(host.last_job_host_summary and
host.last_job_host_summary.job.active and
host.last_job_host_summary.failed)
@ -30,9 +30,9 @@ class Migration(DataMigration):
for subgroup in group.children.exclude(pk__in=except_group_pks):
qs = qs | get_all_hosts_for_group(subgroup, except_group_pks)
return qs
for group in orm.Group.objects.filter(active=True):
for group in orm.Group.objects:
all_hosts = get_all_hosts_for_group(group)
failed_hosts = all_hosts.filter(active=True,
failed_hosts = all_hosts.filter(
last_job_host_summary__job__active=True,
last_job_host_summary__failed=True)
hosts_with_active_failures = failed_hosts.count()
@ -49,8 +49,8 @@ class Migration(DataMigration):
# Now update has_active_failures and hosts_with_active_failures for all
# inventories.
for inventory in orm.Inventory.objects.filter(active=True):
failed_hosts = inventory.hosts.filter(active=True, has_active_failures=True)
for inventory in orm.Inventory.objects:
failed_hosts = inventory.hosts.filter( has_active_failures=True)
hosts_with_active_failures = failed_hosts.count()
has_active_failures = bool(hosts_with_active_failures)
changed = False

View File

@ -8,7 +8,7 @@ from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
for iu in orm.InventoryUpdate.objects.filter(active=True):
for iu in orm.InventoryUpdate.objects:
if iu.inventory_source is None or iu.inventory_source.group is None or iu.inventory_source.inventory is None:
continue
iu.name = "%s (%s)" % (iu.inventory_source.group.name, iu.inventory_source.inventory.name)

View File

@ -12,7 +12,7 @@ from django.conf import settings
class Migration(DataMigration):
def forwards(self, orm):
for j in orm.UnifiedJob.objects.filter(active=True):
for j in orm.UnifiedJob.objects:
cur = connection.cursor()
stdout_filename = os.path.join(settings.JOBOUTPUT_ROOT, "%d-%s.out" % (j.pk, str(uuid.uuid1())))
fd = open(stdout_filename, 'w')

View File

@ -51,7 +51,7 @@ from awx.main.queue import FifoQueue
from awx.main.conf import tower_settings
from awx.main.task_engine import TaskSerializer, TASK_TIMEOUT_INTERVAL
from awx.main.utils import (get_ansible_version, get_ssh_version, decrypt_field, update_scm_url,
ignore_inventory_computed_fields, emit_websocket_notification,
emit_websocket_notification,
check_proot_installed, build_proot_temp_dir, wrap_args_with_proot)
__all__ = ['RunJob', 'RunSystemJob', 'RunProjectUpdate', 'RunInventoryUpdate',
@ -883,12 +883,12 @@ class RunJob(BaseTask):
'tower_job_id': job.pk,
'tower_job_launch_type': job.launch_type,
}
if job.job_template and job.job_template.active:
if job.job_template:
extra_vars.update({
'tower_job_template_id': job.job_template.pk,
'tower_job_template_name': job.job_template.name,
})
if job.created_by and job.created_by.is_active:
if job.created_by:
extra_vars.update({
'tower_user_id': job.created_by.pk,
'tower_user_name': job.created_by.username,
@ -1381,7 +1381,7 @@ class RunInventoryUpdate(BaseTask):
runpath = tempfile.mkdtemp(prefix='ansible_tower_launch_')
handle, path = tempfile.mkstemp(dir=runpath)
f = os.fdopen(handle, 'w')
if inventory_update.source_script is None or not inventory_update.source_script.active:
if inventory_update.source_script is None:
raise RuntimeError('Inventory Script does not exist')
f.write(inventory_update.source_script.script.encode('utf-8'))
f.close()

View File

@ -229,13 +229,18 @@ class BaseJobTestMixin(BaseTestMixin):
self.team_ops_west.users.add(self.user_iris)
# The south team is no longer active having been folded into the east team
self.team_ops_south = self.org_ops.teams.create(
name='southerners',
created_by=self.user_sue,
active=False,
)
self.team_ops_south.projects.add(self.proj_prod)
self.team_ops_south.users.add(self.user_greg)
# FIXME: This code can be removed (probably)
# - this case has been removed as we've gotten rid of the active flag, keeping
# code around in case this has ramifications on some test failures.. if
# you find this message and all tests are passing, then feel free to remove this
# - anoek 2016-03-10
#self.team_ops_south = self.org_ops.teams.create(
# name='southerners',
# created_by=self.user_sue,
# active=False,
#)
#self.team_ops_south.projects.add(self.proj_prod)
#self.team_ops_south.users.add(self.user_greg)
# The north team is going to be deleted
self.team_ops_north = self.org_ops.teams.create(
@ -337,11 +342,18 @@ class BaseJobTestMixin(BaseTestMixin):
password='Heading270',
created_by = self.user_sue,
)
self.cred_ops_south = self.team_ops_south.credentials.create(
username='south',
password='Heading180',
created_by = self.user_sue,
)
# FIXME: This code can be removed (probably)
# - this case has been removed as we've gotten rid of the active flag, keeping
# code around in case this has ramifications on some test failures.. if
# you find this message and all tests are passing, then feel free to remove this
# - anoek 2016-03-10
#self.cred_ops_south = self.team_ops_south.credentials.create(
# username='south',
# password='Heading180',
# created_by = self.user_sue,
#)
self.cred_ops_north = self.team_ops_north.credentials.create(
username='north',

View File

@ -520,12 +520,12 @@ class InventoryImportTest(BaseCommandMixin, BaseLiveServerTest):
self.assertEqual(inventory_source.inventory_updates.count(), 1)
inventory_update = inventory_source.inventory_updates.all()[0]
self.assertEqual(inventory_update.status, 'successful')
for host in inventory.hosts.filter(active=True):
for host in inventory.hosts:
if host.pk in (except_host_pks or []):
continue
source_pks = host.inventory_sources.values_list('pk', flat=True)
self.assertTrue(inventory_source.pk in source_pks)
for group in inventory.groups.filter(active=True):
for group in inventory.groups:
if group.pk in (except_group_pks or []):
continue
source_pks = group.inventory_sources.values_list('pk', flat=True)
@ -693,7 +693,7 @@ class InventoryImportTest(BaseCommandMixin, BaseLiveServerTest):
'lbservers', 'others'])
if overwrite:
expected_group_names.remove('lbservers')
group_names = set(new_inv.groups.filter(active=True).values_list('name', flat=True))
group_names = set(new_inv.groups.values_list('name', flat=True))
self.assertEqual(expected_group_names, group_names)
expected_host_names = set(['web1.example.com', 'web2.example.com',
'web3.example.com', 'db1.example.com',
@ -703,13 +703,13 @@ class InventoryImportTest(BaseCommandMixin, BaseLiveServerTest):
'fe80::1610:9fff:fedd:b654', '::1'])
if overwrite:
expected_host_names.remove('lb.example.com')
host_names = set(new_inv.hosts.filter(active=True).values_list('name', flat=True))
host_names = set(new_inv.hosts.values_list('name', flat=True))
self.assertEqual(expected_host_names, host_names)
expected_inv_vars = {'vara': 'A', 'varc': 'C'}
if overwrite_vars:
expected_inv_vars.pop('varc')
self.assertEqual(new_inv.variables_dict, expected_inv_vars)
for host in new_inv.hosts.filter(active=True):
for host in new_inv.hosts:
if host.name == 'web1.example.com':
self.assertEqual(host.variables_dict,
{'ansible_ssh_host': 'w1.example.net'})
@ -721,35 +721,35 @@ class InventoryImportTest(BaseCommandMixin, BaseLiveServerTest):
self.assertEqual(host.variables_dict, {'lbvar': 'ni!'})
else:
self.assertEqual(host.variables_dict, {})
for group in new_inv.groups.filter(active=True):
for group in new_inv.groups:
if group.name == 'servers':
expected_vars = {'varb': 'B', 'vard': 'D'}
if overwrite_vars:
expected_vars.pop('vard')
self.assertEqual(group.variables_dict, expected_vars)
children = set(group.children.filter(active=True).values_list('name', flat=True))
children = set(group.children.values_list('name', flat=True))
expected_children = set(['dbservers', 'webservers', 'lbservers'])
if overwrite:
expected_children.remove('lbservers')
self.assertEqual(children, expected_children)
self.assertEqual(group.hosts.filter(active=True).count(), 0)
self.assertEqual(group.hosts.count(), 0)
elif group.name == 'dbservers':
self.assertEqual(group.variables_dict, {'dbvar': 'ugh'})
self.assertEqual(group.children.filter(active=True).count(), 0)
hosts = set(group.hosts.filter(active=True).values_list('name', flat=True))
self.assertEqual(group.children.count(), 0)
hosts = set(group.hosts.values_list('name', flat=True))
host_names = set(['db1.example.com','db2.example.com'])
self.assertEqual(hosts, host_names)
elif group.name == 'webservers':
self.assertEqual(group.variables_dict, {'webvar': 'blah'})
self.assertEqual(group.children.filter(active=True).count(), 0)
hosts = set(group.hosts.filter(active=True).values_list('name', flat=True))
self.assertEqual(group.children.count(), 0)
hosts = set(group.hosts.values_list('name', flat=True))
host_names = set(['web1.example.com','web2.example.com',
'web3.example.com'])
self.assertEqual(hosts, host_names)
elif group.name == 'lbservers':
self.assertEqual(group.variables_dict, {})
self.assertEqual(group.children.filter(active=True).count(), 0)
hosts = set(group.hosts.filter(active=True).values_list('name', flat=True))
self.assertEqual(group.children.count(), 0)
hosts = set(group.hosts.values_list('name', flat=True))
host_names = set(['lb.example.com'])
self.assertEqual(hosts, host_names)
if overwrite:
@ -799,7 +799,7 @@ class InventoryImportTest(BaseCommandMixin, BaseLiveServerTest):
# Check hosts in dotcom group.
group = new_inv.groups.get(name='dotcom')
self.assertEqual(group.hosts.count(), 65)
for host in group.hosts.filter(active=True, name__startswith='web'):
for host in group.hosts.filter( name__startswith='web'):
self.assertEqual(host.variables_dict.get('ansible_ssh_user', ''), 'example')
# Check hosts in dotnet group.
group = new_inv.groups.get(name='dotnet')
@ -807,7 +807,7 @@ class InventoryImportTest(BaseCommandMixin, BaseLiveServerTest):
# Check hosts in dotorg group.
group = new_inv.groups.get(name='dotorg')
self.assertEqual(group.hosts.count(), 61)
for host in group.hosts.filter(active=True):
for host in group.hosts:
if host.name.startswith('mx.'):
continue
self.assertEqual(host.variables_dict.get('ansible_ssh_user', ''), 'example')
@ -815,7 +815,7 @@ class InventoryImportTest(BaseCommandMixin, BaseLiveServerTest):
# Check hosts in dotus group.
group = new_inv.groups.get(name='dotus')
self.assertEqual(group.hosts.count(), 10)
for host in group.hosts.filter(active=True):
for host in group.hosts:
if int(host.name[2:4]) % 2 == 0:
self.assertEqual(host.variables_dict.get('even_odd', ''), 'even')
else:
@ -969,7 +969,7 @@ class InventoryImportTest(BaseCommandMixin, BaseLiveServerTest):
else:
return 0
def _check_largeinv_import(self, new_inv, nhosts, nhosts_inactive=0):
def _check_largeinv_import(self, new_inv, nhosts):
self._start_time = time.time()
inv_file = os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'largeinv.py')
ngroups = self._get_ngroups_for_nhosts(nhosts)
@ -982,9 +982,8 @@ class InventoryImportTest(BaseCommandMixin, BaseLiveServerTest):
# Check that inventory is populated as expected within a reasonable
# amount of time. Computed fields should also be updated.
new_inv = Inventory.objects.get(pk=new_inv.pk)
self.assertEqual(new_inv.hosts.filter(active=True).count(), nhosts)
self.assertEqual(new_inv.groups.filter(active=True).count(), ngroups)
self.assertEqual(new_inv.hosts.filter(active=False).count(), nhosts_inactive)
self.assertEqual(new_inv.hosts.count(), nhosts)
self.assertEqual(new_inv.groups.count(), ngroups)
self.assertEqual(new_inv.total_hosts, nhosts)
self.assertEqual(new_inv.total_groups, ngroups)
self.assertElapsedLessThan(120)
@ -998,10 +997,10 @@ class InventoryImportTest(BaseCommandMixin, BaseLiveServerTest):
self.assertEqual(new_inv.groups.count(), 0)
nhosts = 2000
# Test initial import into empty inventory.
self._check_largeinv_import(new_inv, nhosts, 0)
self._check_largeinv_import(new_inv, nhosts)
# Test re-importing and overwriting.
self._check_largeinv_import(new_inv, nhosts, 0)
self._check_largeinv_import(new_inv, nhosts)
# Test re-importing with only half as many hosts.
self._check_largeinv_import(new_inv, nhosts / 2, nhosts / 2)
self._check_largeinv_import(new_inv, nhosts / 2)
# Test re-importing that clears all hosts.
self._check_largeinv_import(new_inv, 0, nhosts)
self._check_largeinv_import(new_inv, 0)

View File

@ -69,7 +69,7 @@ class InventoryTest(BaseTest):
def test_get_inventory_list(self):
url = reverse('api:inventory_list')
qs = Inventory.objects.filter(active=True).distinct()
qs = Inventory.objects.distinct()
# Check list view with invalid authentication.
self.check_invalid_auth(url)
@ -226,6 +226,8 @@ class InventoryTest(BaseTest):
self.inventory_a.groups.create(name='group-a')
self.inventory_b.hosts.create(name='host-b')
self.inventory_b.groups.create(name='group-b')
a_pk = self.inventory_a.pk
b_pk = self.inventory_b.pk
# Check put to detail view with invalid authentication.
self.check_invalid_auth(url_a, methods=('delete',))
@ -248,24 +250,16 @@ class InventoryTest(BaseTest):
self.delete(url_a, expect=204)
self.delete(url_b, expect=403)
# Verify that the inventory is marked inactive, along with all its
# hosts and groups.
self.inventory_a = Inventory.objects.get(pk=self.inventory_a.pk)
self.assertFalse(self.inventory_a.active)
self.assertFalse(self.inventory_a.hosts.filter(active=True).count())
self.assertFalse(self.inventory_a.groups.filter(active=True).count())
# Verify that the inventory was deleted
assert Inventory.objects.filter(pk=a_pk).count() == 0
# a super user can delete inventory records
with self.current_user(self.super_django_user):
self.delete(url_a, expect=404)
self.delete(url_b, expect=204)
# Verify that the inventory is marked inactive, along with all its
# hosts and groups.
self.inventory_b = Inventory.objects.get(pk=self.inventory_b.pk)
self.assertFalse(self.inventory_b.active)
self.assertFalse(self.inventory_b.hosts.filter(active=True).count())
self.assertFalse(self.inventory_b.groups.filter(active=True).count())
# Verify that the inventory was deleted
assert Inventory.objects.filter(pk=b_pk).count() == 0
def test_inventory_access_deleted_permissions(self):
temp_org = self.make_organizations(self.super_django_user, 1)[0]
@ -747,13 +741,11 @@ class InventoryTest(BaseTest):
# removed group should be automatically marked inactive once it no longer has any parents.
removed_group = Group.objects.get(pk=result['id'])
self.assertTrue(removed_group.parents.count())
self.assertTrue(removed_group.active)
for parent in removed_group.parents.all():
parent_children_url = reverse('api:group_children_list', args=(parent.pk,))
data = {'id': removed_group.pk, 'disassociate': 1}
self.post(parent_children_url, data, expect=204, auth=self.get_super_credentials())
removed_group = Group.objects.get(pk=result['id'])
#self.assertFalse(removed_group.active) # FIXME: Disabled for now because automatically deleting group with no parents is also disabled.
# Removing a group from a hierarchy should migrate its children to the
# parent. The group itself will be deleted (marked inactive), and all
@ -766,7 +758,6 @@ class InventoryTest(BaseTest):
with self.current_user(self.super_django_user):
self.post(url, data, expect=204)
gx3 = Group.objects.get(pk=gx3.pk)
#self.assertFalse(gx3.active) # FIXME: Disabled for now....
self.assertFalse(gx3 in gx2.children.all())
#self.assertTrue(gx4 in gx2.children.all())
@ -1265,7 +1256,7 @@ class InventoryUpdatesTest(BaseTransactionTest):
url = reverse('api:inventory_source_hosts_list', args=(inventory_source.pk,))
response = self.get(url, expect=200)
self.assertNotEqual(response['count'], 0)
for host in inventory.hosts.filter(active=True):
for host in inventory.hosts:
source_pks = host.inventory_sources.values_list('pk', flat=True)
self.assertTrue(inventory_source.pk in source_pks)
self.assertTrue(host.has_inventory_sources)
@ -1279,12 +1270,12 @@ class InventoryUpdatesTest(BaseTransactionTest):
url = reverse('api:host_inventory_sources_list', args=(host.pk,))
response = self.get(url, expect=200)
self.assertNotEqual(response['count'], 0)
for group in inventory.groups.filter(active=True):
for group in inventory.groups:
source_pks = group.inventory_sources.values_list('pk', flat=True)
self.assertTrue(inventory_source.pk in source_pks)
self.assertTrue(group.has_inventory_sources)
self.assertTrue(group.children.filter(active=True).exists() or
group.hosts.filter(active=True).exists())
self.assertTrue(group.children.exists() or
group.hosts.exists())
# Make sure EC2 instance ID groups and RDS groups are excluded.
if inventory_source.source == 'ec2' and not instance_id_group_ok:
self.assertFalse(re.match(r'^i-[0-9a-f]{8}$', group.name, re.I),
@ -1302,7 +1293,7 @@ class InventoryUpdatesTest(BaseTransactionTest):
self.assertNotEqual(response['count'], 0)
# Try to set a source on a child group that was imported. Should not
# be allowed.
for group in inventory_source.group.children.filter(active=True):
for group in inventory_source.group.children:
inv_src_2 = group.inventory_source
inv_src_url2 = reverse('api:inventory_source_detail', args=(inv_src_2.pk,))
with self.current_user(self.super_django_user):
@ -1658,7 +1649,7 @@ class InventoryUpdatesTest(BaseTransactionTest):
inventory_source.overwrite = True
inventory_source.save()
self.check_inventory_source(inventory_source, initial=False)
for host in self.inventory.hosts.filter(active=True):
for host in self.inventory.hosts:
self.assertEqual(host.variables_dict['ec2_instance_type'], instance_type)
# Try invalid instance filters that should be ignored:
@ -1792,12 +1783,12 @@ class InventoryUpdatesTest(BaseTransactionTest):
inventory_source.save()
self.check_inventory_source(inventory_source, initial=False)
# Verify that only the desired groups are returned.
child_names = self.group.children.filter(active=True).values_list('name', flat=True)
child_names = self.group.children.values_list('name', flat=True)
self.assertTrue('ec2' in child_names)
self.assertTrue('regions' in child_names)
self.assertTrue(self.group.children.get(name='regions').children.filter(active=True).count())
self.assertTrue(self.group.children.get(name='regions').children.count())
self.assertTrue('types' in child_names)
self.assertTrue(self.group.children.get(name='types').children.filter(active=True).count())
self.assertTrue(self.group.children.get(name='types').children.count())
self.assertFalse('keys' in child_names)
self.assertFalse('security_groups' in child_names)
self.assertFalse('tags' in child_names)
@ -1814,27 +1805,27 @@ class InventoryUpdatesTest(BaseTransactionTest):
self.check_inventory_source(inventory_source, initial=False, instance_id_group_ok=True)
# Verify that only the desired groups are returned.
# Skip vpcs as selected inventory may or may not have any.
child_names = self.group.children.filter(active=True).values_list('name', flat=True)
child_names = self.group.children.values_list('name', flat=True)
self.assertTrue('ec2' in child_names)
self.assertFalse('tag_none' in child_names)
self.assertTrue('regions' in child_names)
self.assertTrue(self.group.children.get(name='regions').children.filter(active=True).count())
self.assertTrue(self.group.children.get(name='regions').children.count())
self.assertTrue('types' in child_names)
self.assertTrue(self.group.children.get(name='types').children.filter(active=True).count())
self.assertTrue(self.group.children.get(name='types').children.count())
self.assertTrue('keys' in child_names)
self.assertTrue(self.group.children.get(name='keys').children.filter(active=True).count())
self.assertTrue(self.group.children.get(name='keys').children.count())
self.assertTrue('security_groups' in child_names)
self.assertTrue(self.group.children.get(name='security_groups').children.filter(active=True).count())
self.assertTrue(self.group.children.get(name='security_groups').children.count())
self.assertTrue('tags' in child_names)
self.assertTrue(self.group.children.get(name='tags').children.filter(active=True).count())
self.assertTrue(self.group.children.get(name='tags').children.count())
# Only check for tag_none as a child of tags if there is a tag_none group;
# the test inventory *may* have tags set for all hosts.
if self.inventory.groups.filter(name='tag_none').exists():
self.assertTrue('tag_none' in self.group.children.get(name='tags').children.values_list('name', flat=True))
self.assertTrue('images' in child_names)
self.assertTrue(self.group.children.get(name='images').children.filter(active=True).count())
self.assertTrue(self.group.children.get(name='images').children.count())
self.assertTrue('instances' in child_names)
self.assertTrue(self.group.children.get(name='instances').children.filter(active=True).count())
self.assertTrue(self.group.children.get(name='instances').children.count())
# Sync again with overwrite set to False after renaming a group that
# was created by the sync. With overwrite false, the renamed group and
# the original group (created again by the sync) will both exist.
@ -1848,7 +1839,7 @@ class InventoryUpdatesTest(BaseTransactionTest):
inventory_source.overwrite = False
inventory_source.save()
self.check_inventory_source(inventory_source, initial=False, instance_id_group_ok=True)
child_names = self.group.children.filter(active=True).values_list('name', flat=True)
child_names = self.group.children.values_list('name', flat=True)
self.assertTrue(region_group_original_name in self.group.children.get(name='regions').children.values_list('name', flat=True))
self.assertTrue(region_group.name in self.group.children.get(name='regions').children.values_list('name', flat=True))
# Replacement text should not be left in inventory source name.

View File

@ -137,7 +137,7 @@ class JobTemplateLaunchTest(BaseJobTestMixin, django.test.TransactionTestCase):
self.post(self.launch_url, {'credential_id': 0}, expect=400)
self.post(self.launch_url, {'credential': 'one'}, expect=400)
self.post(self.launch_url, {'credential_id': 'one'}, expect=400)
doug_pk = self.cred_doug.pk
cred_doug_pk = self.cred_doug.pk
self.cred_doug.delete()
self.post(self.launch_url, {'credential': cred_doug_pk}, expect=400)
self.post(self.launch_url, {'credential_id': cred_doug_pk}, expect=400)

View File

@ -436,10 +436,8 @@ class OrganizationsTest(BaseTest):
self.delete(urls[0], expect=204, auth=self.get_super_credentials())
# check that when we have deleted an object it comes back 404 via GET
# but that it's still in the database as inactive
self.get(urls[1], expect=404, auth=self.get_normal_credentials())
org1 = Organization.objects.get(pk=urldata1['id'])
self.assertEquals(org1.active, False)
assert Organization.objects.filter(pk=urldata1['id']).count() == 0
# also check that DELETE on the collection doesn't work
self.delete(self.collection(), expect=405, auth=self.get_super_credentials())

View File

@ -162,7 +162,7 @@ class ProjectsTest(BaseTransactionTest):
set(Project.get_local_path_choices()))
# return local paths are only the ones not used by any active project.
qs = Project.objects.filter(active=True)
qs = Project.objects
used_paths = qs.values_list('local_path', flat=True)
self.assertFalse(set(response['project_local_paths']) & set(used_paths))
for project in self.projects:
@ -402,7 +402,7 @@ class ProjectsTest(BaseTransactionTest):
# =====================================================================
# TEAM PROJECTS
team = Team.objects.filter(active=True, organization__pk=self.organizations[1].pk)[0]
team = Team.objects.filter( organization__pk=self.organizations[1].pk)[0]
team_projects = reverse('api:team_projects_list', args=(team.pk,))
p1 = self.projects[0]
@ -419,7 +419,7 @@ class ProjectsTest(BaseTransactionTest):
# =====================================================================
# TEAMS USER MEMBERSHIP
team = Team.objects.filter(active=True, organization__pk=self.organizations[1].pk)[0]
team = Team.objects.filter( organization__pk=self.organizations[1].pk)[0]
team_users = reverse('api:team_users_list', args=(team.pk,))
for x in team.deprecated_users.all():
team.deprecated_users.remove(x)
@ -1262,7 +1262,7 @@ class ProjectUpdatesTest(BaseTransactionTest):
else:
self.check_project_update(project, should_fail=should_still_fail)
# Test that we can delete project updates.
for pu in project.project_updates.filter(active=True):
for pu in project.project_updates:
pu_url = reverse('api:project_update_detail', args=(pu.pk,))
with self.current_user(self.super_django_user):
self.delete(pu_url, expect=204)

View File

@ -146,12 +146,11 @@ class InventoryScriptTest(BaseScriptTest):
def test_list_with_inventory_id_as_argument(self):
inventory = self.inventories[0]
self.assertTrue(inventory.active)
rc, stdout, stderr = self.run_inventory_script(list=True,
inventory=inventory.pk)
self.assertEqual(rc, 0, stderr)
data = json.loads(stdout)
groups = inventory.groups.filter(active=True)
groups = inventory.groups
groupnames = [ x for x in groups.values_list('name', flat=True)]
# it's ok for all to be here because due to an Ansible inventory workaround
@ -167,16 +166,13 @@ class InventoryScriptTest(BaseScriptTest):
self.assertTrue(isinstance(v['children'], (list,tuple)))
self.assertTrue(isinstance(v['hosts'], (list,tuple)))
self.assertTrue(isinstance(v['vars'], (dict)))
group = inventory.groups.get(active=True, name=k)
hosts = group.hosts.filter(active=True)
group = inventory.groups.get(name=k)
hosts = group.hosts
hostnames = hosts.values_list('name', flat=True)
self.assertEqual(set(v['hosts']), set(hostnames))
else:
self.assertTrue(v['hosts'] == ['localhost'])
for group in inventory.groups.filter(active=False):
self.assertFalse(group.name in data.keys(),
'deleted group %s should not be in data' % group)
# Command line argument for inventory ID should take precedence over
# environment variable.
inventory_pks = set(map(lambda x: x.pk, self.inventories))
@ -189,12 +185,11 @@ class InventoryScriptTest(BaseScriptTest):
def test_list_with_inventory_id_in_environment(self):
inventory = self.inventories[1]
self.assertTrue(inventory.active)
os.environ['INVENTORY_ID'] = str(inventory.pk)
rc, stdout, stderr = self.run_inventory_script(list=True)
self.assertEqual(rc, 0, stderr)
data = json.loads(stdout)
groups = inventory.groups.filter(active=True)
groups = inventory.groups
groupnames = list(groups.values_list('name', flat=True)) + ['all']
self.assertEqual(set(data.keys()), set(groupnames))
# Groups for this inventory should have hosts, variable data, and one
@ -204,14 +199,14 @@ class InventoryScriptTest(BaseScriptTest):
if k == 'all':
self.assertEqual(v.get('vars', {}), inventory.variables_dict)
continue
group = inventory.groups.get(active=True, name=k)
hosts = group.hosts.filter(active=True)
group = inventory.groups.get(name=k)
hosts = group.hosts
hostnames = hosts.values_list('name', flat=True)
self.assertEqual(set(v.get('hosts', [])), set(hostnames))
if group.variables:
self.assertEqual(v.get('vars', {}), group.variables_dict)
if k == 'group-3':
children = group.children.filter(active=True)
children = group.children
childnames = children.values_list('name', flat=True)
self.assertEqual(set(v.get('children', [])), set(childnames))
else:
@ -219,13 +214,12 @@ class InventoryScriptTest(BaseScriptTest):
def test_list_with_hostvars_inline(self):
inventory = self.inventories[1]
self.assertTrue(inventory.active)
rc, stdout, stderr = self.run_inventory_script(list=True,
inventory=inventory.pk,
hostvars=True)
self.assertEqual(rc, 0, stderr)
data = json.loads(stdout)
groups = inventory.groups.filter(active=True)
groups = inventory.groups
groupnames = list(groups.values_list('name', flat=True))
groupnames.extend(['all', '_meta'])
self.assertEqual(set(data.keys()), set(groupnames))
@ -239,15 +233,15 @@ class InventoryScriptTest(BaseScriptTest):
continue
if k == '_meta':
continue
group = inventory.groups.get(active=True, name=k)
hosts = group.hosts.filter(active=True)
group = inventory.groups.get(name=k)
hosts = group.hosts
hostnames = hosts.values_list('name', flat=True)
all_hostnames.update(hostnames)
self.assertEqual(set(v.get('hosts', [])), set(hostnames))
if group.variables:
self.assertEqual(v.get('vars', {}), group.variables_dict)
if k == 'group-3':
children = group.children.filter(active=True)
children = group.children
childnames = children.values_list('name', flat=True)
self.assertEqual(set(v.get('children', [])), set(childnames))
else:
@ -269,8 +263,7 @@ class InventoryScriptTest(BaseScriptTest):
def test_valid_host(self):
# Host without variable data.
inventory = self.inventories[0]
self.assertTrue(inventory.active)
host = inventory.hosts.filter(active=True)[2]
host = inventory.hosts[2]
os.environ['INVENTORY_ID'] = str(inventory.pk)
rc, stdout, stderr = self.run_inventory_script(host=host.name)
self.assertEqual(rc, 0, stderr)
@ -278,8 +271,7 @@ class InventoryScriptTest(BaseScriptTest):
self.assertEqual(data, {})
# Host with variable data.
inventory = self.inventories[1]
self.assertTrue(inventory.active)
host = inventory.hosts.filter(active=True)[4]
host = inventory.hosts[4]
os.environ['INVENTORY_ID'] = str(inventory.pk)
rc, stdout, stderr = self.run_inventory_script(host=host.name)
self.assertEqual(rc, 0, stderr)
@ -289,8 +281,7 @@ class InventoryScriptTest(BaseScriptTest):
def test_invalid_host(self):
# Valid host, but not part of the specified inventory.
inventory = self.inventories[0]
self.assertTrue(inventory.active)
host = Host.objects.filter(active=True).exclude(inventory=inventory)[0]
host = Host.objects.exclude(inventory=inventory)[0]
os.environ['INVENTORY_ID'] = str(inventory.pk)
rc, stdout, stderr = self.run_inventory_script(host=host.name)
self.assertNotEqual(rc, 0, stderr)
@ -331,7 +322,6 @@ class InventoryScriptTest(BaseScriptTest):
def test_without_list_or_host_argument(self):
inventory = self.inventories[0]
self.assertTrue(inventory.active)
os.environ['INVENTORY_ID'] = str(inventory.pk)
rc, stdout, stderr = self.run_inventory_script()
self.assertNotEqual(rc, 0, stderr)
@ -339,7 +329,6 @@ class InventoryScriptTest(BaseScriptTest):
def test_with_both_list_and_host_arguments(self):
inventory = self.inventories[0]
self.assertTrue(inventory.active)
os.environ['INVENTORY_ID'] = str(inventory.pk)
rc, stdout, stderr = self.run_inventory_script(list=True, host='blah')
self.assertNotEqual(rc, 0, stderr)
@ -347,8 +336,7 @@ class InventoryScriptTest(BaseScriptTest):
def test_with_disabled_hosts(self):
inventory = self.inventories[1]
self.assertTrue(inventory.active)
for host in inventory.hosts.filter(active=True, enabled=True):
for host in inventory.hosts.filter(enabled=True):
host.enabled = False
host.save(update_fields=['enabled'])
os.environ['INVENTORY_ID'] = str(inventory.pk)
@ -356,7 +344,7 @@ class InventoryScriptTest(BaseScriptTest):
rc, stdout, stderr = self.run_inventory_script(list=True)
self.assertEqual(rc, 0, stderr)
data = json.loads(stdout)
groups = inventory.groups.filter(active=True)
groups = inventory.groups
groupnames = list(groups.values_list('name', flat=True)) + ['all']
self.assertEqual(set(data.keys()), set(groupnames))
for k,v in data.items():
@ -364,15 +352,15 @@ class InventoryScriptTest(BaseScriptTest):
if k == 'all':
self.assertEqual(v.get('vars', {}), inventory.variables_dict)
continue
group = inventory.groups.get(active=True, name=k)
hosts = group.hosts.filter(active=True, enabled=True)
group = inventory.groups.get(name=k)
hosts = group.hosts.filter(enabled=True)
hostnames = hosts.values_list('name', flat=True)
self.assertEqual(set(v.get('hosts', [])), set(hostnames))
self.assertFalse(hostnames)
if group.variables:
self.assertEqual(v.get('vars', {}), group.variables_dict)
if k == 'group-3':
children = group.children.filter(active=True)
children = group.children
childnames = children.values_list('name', flat=True)
self.assertEqual(set(v.get('children', [])), set(childnames))
else:
@ -381,7 +369,7 @@ class InventoryScriptTest(BaseScriptTest):
rc, stdout, stderr = self.run_inventory_script(list=True, all=True)
self.assertEqual(rc, 0, stderr)
data = json.loads(stdout)
groups = inventory.groups.filter(active=True)
groups = inventory.groups
groupnames = list(groups.values_list('name', flat=True)) + ['all']
self.assertEqual(set(data.keys()), set(groupnames))
for k,v in data.items():
@ -389,15 +377,15 @@ class InventoryScriptTest(BaseScriptTest):
if k == 'all':
self.assertEqual(v.get('vars', {}), inventory.variables_dict)
continue
group = inventory.groups.get(active=True, name=k)
hosts = group.hosts.filter(active=True)
group = inventory.groups.get(name=k)
hosts = group.hosts
hostnames = hosts.values_list('name', flat=True)
self.assertEqual(set(v.get('hosts', [])), set(hostnames))
self.assertTrue(hostnames)
if group.variables:
self.assertEqual(v.get('vars', {}), group.variables_dict)
if k == 'group-3':
children = group.children.filter(active=True)
children = group.children
childnames = children.values_list('name', flat=True)
self.assertEqual(set(v.get('children', [])), set(childnames))
else:

View File

@ -209,7 +209,6 @@ REST_FRAMEWORK = {
'awx.api.permissions.ModelAccessPermission',
),
'DEFAULT_FILTER_BACKENDS': (
'awx.api.filters.ActiveOnlyBackend',
'awx.api.filters.TypeFilterBackend',
'awx.api.filters.FieldLookupBackend',
'rest_framework.filters.SearchFilter',

View File

@ -90,7 +90,7 @@ def update_user_orgs(backend, details, user=None, *args, **kwargs):
org = Organization.objects.get_or_create(name=org_name)[0]
else:
try:
org = Organization.objects.filter(active=True).order_by('pk')[0]
org = Organization.objects.order_by('pk')[0]
except IndexError:
continue
@ -126,7 +126,7 @@ def update_user_teams(backend, details, user=None, *args, **kwargs):
org = Organization.objects.get_or_create(name=team_opts['organization'])[0]
else:
try:
org = Organization.objects.filter(active=True).order_by('pk')[0]
org = Organization.objects.order_by('pk')[0]
except IndexError:
continue