1
0
mirror of https://github.com/ansible/awx.git synced 2024-10-27 09:25:10 +03:00

Merge pull request #5047 from ryanpetrello/devel

merge a variety of downstream bug fixes
This commit is contained in:
Ryan Petrello 2019-10-21 12:27:54 -04:00 committed by GitHub
commit e9af6af97c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 180 additions and 48 deletions

View File

@ -189,7 +189,7 @@ requirements_awx: virtualenv_awx
cat requirements/requirements.txt requirements/requirements_git.txt | $(VENV_BASE)/awx/bin/pip install $(PIP_OPTIONS) --no-binary $(SRC_ONLY_PKGS) --ignore-installed -r /dev/stdin ; \
fi
echo "include-system-site-packages = true" >> $(VENV_BASE)/awx/lib/python$(PYTHON_VERSION)/pyvenv.cfg
#$(VENV_BASE)/awx/bin/pip uninstall --yes -r requirements/requirements_tower_uninstall.txt
$(VENV_BASE)/awx/bin/pip uninstall --yes -r requirements/requirements_tower_uninstall.txt
requirements_awx_dev:
$(VENV_BASE)/awx/bin/pip install -r requirements/requirements_dev.txt

View File

@ -92,7 +92,7 @@ class LoggedLoginView(auth_views.LoginView):
ret = super(LoggedLoginView, self).post(request, *args, **kwargs)
current_user = getattr(request, 'user', None)
if request.user.is_authenticated:
logger.info(smart_text(u"User {} logged in.".format(self.request.user.username)))
logger.info(smart_text(u"User {} logged in from {}".format(self.request.user.username,request.META.get('REMOTE_ADDR', None))))
ret.set_cookie('userLoggedIn', 'true')
current_user = UserSerializer(self.request.user)
current_user = smart_text(JSONRenderer().render(current_user.data))

View File

@ -4406,6 +4406,8 @@ class NotificationTemplateSerializer(BaseSerializer):
for event in messages:
if not messages[event]:
continue
if not isinstance(messages[event], dict):
continue
body = messages[event].get('body', {})
if body:
try:

View File

@ -88,8 +88,8 @@ def gather(dest=None, module=None, collection_type='scheduled'):
logger.exception("Invalid License provided, or No License Provided")
return "Error: Invalid License provided, or No License Provided"
if not settings.INSIGHTS_TRACKING_STATE:
logger.error("Automation Analytics not enabled")
if collection_type != 'dry-run' and not settings.INSIGHTS_TRACKING_STATE:
logger.error("Automation Analytics not enabled. Use --dry-run to gather locally without sending.")
return
if module is None:
@ -167,7 +167,7 @@ def ship(path):
files = {'file': (os.path.basename(path), f, settings.INSIGHTS_AGENT_MIME)}
response = requests.post(url,
files=files,
verify=True,
verify="/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem",
auth=(rh_user, rh_password),
timeout=(31, 31))
if response.status_code != 202:

View File

@ -351,8 +351,9 @@ register(
'AWX_RESOURCE_PROFILING_ENABLED',
field_class=fields.BooleanField,
default=False,
label=_('Enable resource profiling on all tower jobs'),
help_text=_('If set, resource profiling data will be collected on all jobs.'), # noqa
label=_('Enable detailed resource profiling on all playbook runs'),
help_text=_('If set, detailed resource profiling data will be collected on all jobs. '
'This data can be gathered with `sosreport`.'), # noqa
category=_('Jobs'),
category_slug='jobs',
)
@ -362,7 +363,8 @@ register(
field_class=FloatField,
default='0.25',
label=_('Interval (in seconds) between polls for cpu usage.'),
help_text=_('Interval (in seconds) between polls for cpu usage.'),
help_text=_('Interval (in seconds) between polls for cpu usage. '
'Setting this lower than the default will affect playbook performance.'),
category=_('Jobs'),
category_slug='jobs',
required=False,
@ -373,7 +375,8 @@ register(
field_class=FloatField,
default='0.25',
label=_('Interval (in seconds) between polls for memory usage.'),
help_text=_('Interval (in seconds) between polls for memory usage.'),
help_text=_('Interval (in seconds) between polls for memory usage. '
'Setting this lower than the default will affect playbook performance.'),
category=_('Jobs'),
category_slug='jobs',
required=False,
@ -384,7 +387,8 @@ register(
field_class=FloatField,
default='0.25',
label=_('Interval (in seconds) between polls for PID count.'),
help_text=_('Interval (in seconds) between polls for PID count.'),
help_text=_('Interval (in seconds) between polls for PID count. '
'Setting this lower than the default will affect playbook performance.'),
category=_('Jobs'),
category_slug='jobs',
required=False,

View File

@ -101,7 +101,7 @@ def aim_backend(**kwargs):
aim_plugin = CredentialPlugin(
'CyberArk AIM Secret Lookup',
'CyberArk AIM Central Credential Provider Lookup',
inputs=aim_inputs,
backend=aim_backend
)

View File

@ -172,6 +172,7 @@ class IsolatedManager(object):
if runner_obj.status == 'failed':
self.instance.result_traceback = runner_obj.stdout.read()
self.instance.save(update_fields=['result_traceback'])
return 'error', runner_obj.rc
return runner_obj.status, runner_obj.rc

View File

@ -11,6 +11,8 @@ class Command(BaseCommand):
help = 'Gather AWX analytics data'
def add_arguments(self, parser):
parser.add_argument('--dry-run', dest='dry-run', action='store_true',
help='Gather analytics without shipping. Works even if analytics are disabled in settings.')
parser.add_argument('--ship', dest='ship', action='store_true',
help='Enable to ship metrics to the Red Hat Cloud')
@ -23,9 +25,14 @@ class Command(BaseCommand):
self.logger.propagate = False
def handle(self, *args, **options):
tgz = gather(collection_type='manual')
self.init_logging()
opt_ship = options.get('ship')
opt_dry_run = options.get('dry-run')
if opt_ship and opt_dry_run:
self.logger.error('Both --ship and --dry-run cannot be processed at the same time.')
return
tgz = gather(collection_type='manual' if not opt_dry_run else 'dry-run')
if tgz:
self.logger.debug(tgz)
if options.get('ship'):
if opt_ship:
ship(tgz)

View File

@ -0,0 +1,31 @@
# Generated by Django 2.2.4 on 2019-10-16 19:51
from django.db import migrations
from awx.main.models import CredentialType
def update_cyberark_aim_name(apps, schema_editor):
CredentialType.setup_tower_managed_defaults()
aim_types = apps.get_model('main', 'CredentialType').objects.filter(
namespace='aim'
).order_by('id')
if aim_types.count() == 2:
original, renamed = aim_types.all()
apps.get_model('main', 'Credential').objects.filter(
credential_type_id=original.id
).update(
credential_type_id=renamed.id
)
original.delete()
class Migration(migrations.Migration):
dependencies = [
('main', '0097_v360_workflowapproval_approved_or_denied_by'),
]
operations = [
migrations.RunPython(update_cyberark_aim_name)
]

View File

@ -629,15 +629,17 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
@property
def task_impact(self):
# NOTE: We sorta have to assume the host count matches and that forks default to 5
from awx.main.models.inventory import Host
if self.launch_type == 'callback':
count_hosts = 2
else:
count_hosts = Host.objects.filter(inventory__jobs__pk=self.pk).count()
if self.job_slice_count > 1:
# Integer division intentional
count_hosts = (count_hosts + self.job_slice_count - self.job_slice_number) // self.job_slice_count
# If for some reason we can't count the hosts then lets assume the impact as forks
if self.inventory is not None:
count_hosts = self.inventory.hosts.count()
if self.job_slice_count > 1:
# Integer division intentional
count_hosts = (count_hosts + self.job_slice_count - self.job_slice_number) // self.job_slice_count
else:
count_hosts = 5 if self.forks == 0 else self.forks
return min(count_hosts, 5 if self.forks == 0 else self.forks) + 1
@property

View File

@ -1,3 +1,4 @@
import collections
import os
import stat
import time
@ -47,6 +48,27 @@ class PodManager(object):
else:
logger.warn(f"Pod {self.pod_name} did not start. Status is {pod.status.phase}.")
@classmethod
def list_active_jobs(self, instance_group):
task = collections.namedtuple('Task', 'id instance_group')(
id='',
instance_group=instance_group
)
pm = PodManager(task)
try:
for pod in pm.kube_api.list_namespaced_pod(
pm.namespace,
label_selector='ansible-awx={}'.format(settings.INSTALL_UUID)
).to_dict().get('items', []):
job = pod['metadata'].get('labels', {}).get('ansible-awx-job-id')
if job:
try:
yield int(job)
except ValueError:
pass
except Exception:
logger.exception('Failed to list pods for container group {}'.format(instance_group))
def delete(self):
return self.kube_api.delete_namespaced_pod(name=self.pod_name,
namespace=self.namespace,
@ -71,7 +93,7 @@ class PodManager(object):
@property
def pod_name(self):
return f"job-{self.task.id}"
return f"awx-job-{self.task.id}"
@property
def pod_definition(self):
@ -102,6 +124,10 @@ class PodManager(object):
if self.task:
pod_spec['metadata']['name'] = self.pod_name
pod_spec['metadata']['labels'] = {
'ansible-awx': settings.INSTALL_UUID,
'ansible-awx-job-id': str(self.task.id)
}
pod_spec['spec']['containers'][0]['name'] = self.pod_name
return pod_spec

View File

@ -253,6 +253,18 @@ class TaskManager():
task.log_format, task.execution_node, controller_node))
elif rampart_group.is_containerized:
task.instance_group = rampart_group
if not task.supports_isolation():
# project updates and inventory updates don't *actually* run in pods,
# so just pick *any* non-isolated, non-containerized host and use it
for group in InstanceGroup.objects.all():
if group.is_containerized or group.controller_id:
continue
match = group.find_largest_idle_instance()
if match:
task.execution_node = match.hostname
logger.debug('Submitting containerized {} to queue {}.'.format(
task.log_format, task.execution_node))
break
else:
task.instance_group = rampart_group
if instance is not None:

View File

@ -458,6 +458,25 @@ def cluster_node_heartbeat():
logger.exception('Error marking {} as lost'.format(other_inst.hostname))
@task(queue=get_local_queuename)
def awx_k8s_reaper():
from awx.main.scheduler.kubernetes import PodManager # prevent circular import
for group in InstanceGroup.objects.filter(credential__isnull=False).iterator():
if group.is_containerized:
logger.debug("Checking for orphaned k8s pods for {}.".format(group))
for job in UnifiedJob.objects.filter(
pk__in=list(PodManager.list_active_jobs(group))
).exclude(status__in=ACTIVE_STATES):
logger.debug('{} is no longer active, reaping orphaned k8s pod'.format(job.log_format))
try:
PodManager(job).delete()
except Exception:
logger.exception("Failed to delete orphaned pod {} from {}".format(
job.log_format, group
))
@task(queue=get_local_queuename)
def awx_isolated_heartbeat():
local_hostname = settings.CLUSTER_HOST_ID
@ -1094,6 +1113,13 @@ class BaseTask(object):
if os.path.isdir(job_profiling_dir):
shutil.copytree(job_profiling_dir, os.path.join(awx_profiling_dir, str(instance.pk)))
if instance.is_containerized:
from awx.main.scheduler.kubernetes import PodManager # prevent circular import
pm = PodManager(instance)
logger.debug(f"Deleting pod {pm.pod_name}")
pm.delete()
def event_handler(self, event_data):
#
# ⚠️ D-D-D-DANGER ZONE ⚠️
@ -1841,13 +1867,6 @@ class RunJob(BaseTask):
if isolated_manager_instance and not job.is_containerized:
isolated_manager_instance.cleanup()
if job.is_containerized:
from awx.main.scheduler.kubernetes import PodManager # prevent circular import
pm = PodManager(job)
logger.debug(f"Deleting pod {pm.pod_name}")
pm.delete()
try:
inventory = job.inventory
except Inventory.DoesNotExist:

View File

@ -479,6 +479,11 @@ CELERYBEAT_SCHEDULE = {
'schedule': timedelta(seconds=20),
'options': {'expires': 20}
},
'k8s_reaper': {
'task': 'awx.main.tasks.awx_k8s_reaper',
'schedule': timedelta(seconds=60),
'options': {'expires': 50,}
},
# 'isolated_heartbeat': set up at the end of production.py and development.py
}
AWX_INCONSISTENT_TASK_INTERVAL = 60 * 3

View File

@ -392,7 +392,8 @@ function last () {
return lastPage();
}
return lastRange();
return lastRange()
.then(() => previousRange());
}
function next () {

View File

@ -213,6 +213,18 @@ function JobRenderService ($q, $compile, $sce, $window) {
const record = this.createRecord(event, lines);
if (lines.length === 1 && lines[0] === '') {
// Some events, mainly runner_on_start events, have an actual line count of 1
// (stdout = '') and a claimed line count of 0 (end_line - start_line = 0).
// Since a zero-length string has an actual line count of 1, they'll still get
// rendered as blank lines unless we intercept them and add some special
// handling to remove them.
//
// Although we're not going to render the blank line, the actual line count of
// the zero-length stdout string, which is 1, has already been recorded at this
// point so we must also go back and set the event's recorded line length to 0
// in order to avoid deleting too many lines when we need to pop or shift a
// page that contains this event off of the view.
this.records[record.uuid].lineCount = 0;
return { html: '', count: 0 };
}
@ -473,7 +485,7 @@ function JobRenderService ($q, $compile, $sce, $window) {
this.shift = lines => {
// We multiply by two here under the assumption that one element and one text node
// is generated for each line of output.
const count = 2 * lines;
const count = (2 * lines) + 1;
const elements = this.el.contents().slice(0, count);
return this.remove(elements);
@ -482,7 +494,7 @@ function JobRenderService ($q, $compile, $sce, $window) {
this.pop = lines => {
// We multiply by two here under the assumption that one element and one text node
// is generated for each line of output.
const count = 2 * lines;
const count = (2 * lines) + 1;
const elements = this.el.contents().slice(-count);
return this.remove(elements);
@ -558,7 +570,7 @@ function JobRenderService ($q, $compile, $sce, $window) {
}
const max = this.state.tail;
const min = max - count;
const min = max - count + 1;
let lines = 0;
@ -589,7 +601,7 @@ function JobRenderService ($q, $compile, $sce, $window) {
}
const min = this.state.head;
const max = min + count;
const max = min + count - 1;
let lines = 0;

View File

@ -1,3 +1,4 @@
/* eslint camelcase: 0 */
import {
OUTPUT_SEARCH_DOCLINK,
OUTPUT_SEARCH_FIELDS,
@ -17,7 +18,7 @@ function toggleSearchKey () {
}
function getCurrentQueryset () {
const { job_event_search } = $state.params; // eslint-disable-line camelcase
const { job_event_search } = $state.params;
return qs.decodeArr(job_event_search);
}
@ -114,12 +115,13 @@ function JobSearchController (_$state_, _qs_, _strings_, { subscribe }) {
vm.key = false;
vm.rejected = false;
vm.disabled = true;
vm.running = false;
vm.isJobActive = false;
vm.tags = getSearchTags(getCurrentQueryset());
unsubscribe = subscribe(({ running }) => {
vm.disabled = running;
vm.running = running;
unsubscribe = subscribe(({ running, event_processing_finished }) => {
const isJobActive = running || !event_processing_finished;
vm.disabled = isJobActive;
vm.isJobActive = isJobActive;
});
};

View File

@ -7,7 +7,7 @@
ng-disabled="vm.disabled"
ng-class="{ 'at-Input--rejected': vm.rejected }"
ng-model="vm.value"
ng-attr-placeholder="{{ vm.running ?
ng-attr-placeholder="{{ vm.isJobActive ?
vm.strings.get('search.PLACEHOLDER_RUNNING') :
vm.strings.get('search.PLACEHOLDER_DEFAULT') }}">
<span class="input-group-btn input-group-append">

View File

@ -50,7 +50,8 @@ function JobStatusService (moment, message) {
inventoryScm: {
id: model.get('source_project_update'),
status: model.get('summary_fields.inventory_source.status')
}
},
event_processing_finished: model.get('event_processing_finished'),
};
this.initHostStatusCounts({ model });
@ -309,6 +310,10 @@ function JobStatusService (moment, message) {
this.state.resultTraceback = traceback;
};
this.setEventProcessingFinished = val => {
this.state.event_processing_finished = val;
};
this.setHostStatusCounts = counts => {
counts = counts || {};
@ -348,6 +353,7 @@ function JobStatusService (moment, message) {
this.setArtifacts(model.get('artifacts'));
this.setExecutionNode(model.get('execution_node'));
this.setResultTraceback(model.get('result_traceback'));
this.setEventProcessingFinished(model.get('event_processing_finished'));
this.initHostStatusCounts({ model });
this.initPlaybookCounts({ model });

View File

@ -372,7 +372,9 @@ table, tbody {
.List-noItems {
margin-top: 52px;
display: inline-block;
display: flex;
align-items: center;
justify-content: center;
width: 100%;
height: 200px;
border-radius: 5px;
@ -381,7 +383,7 @@ table, tbody {
color: @list-no-items-txt;
text-transform: uppercase;
text-align: center;
padding: 80px 10px;
padding: 10px;
}
.modal-body > .List-noItems {

View File

@ -60,27 +60,27 @@ export default [function() {
return;
}
let isCustomized = false;
if (messages.started.message) {
if (messages.started && messages.started.message) {
isCustomized = true;
$scope.started_message = messages.started.message;
}
if (messages.started.body) {
if (messages.started && messages.started.body) {
isCustomized = true;
$scope.started_body = messages.started.body;
}
if (messages.success.message) {
if (messages.success && messages.success.message) {
isCustomized = true;
$scope.success_message = messages.success.message;
}
if (messages.success.body) {
if (messages.success && messages.success.body) {
isCustomized = true;
$scope.success_body = messages.success.body;
}
if (messages.error.message) {
if (messages.error && messages.error.message) {
isCustomized = true;
$scope.error_message = messages.error.message;
}
if (messages.error.body) {
if (messages.error && messages.error.body) {
isCustomized = true;
$scope.error_body = messages.error.body;
}

View File

@ -1 +1 @@
enum34
rsa # stop adding new crypto libs