mirror of
https://github.com/ansible/awx.git
synced 2024-10-31 15:21:13 +03:00
Merge pull request #3266 from ansible/inventory_plugins
Transition to inventory plugins Reviewed-by: Alan Rominger <arominge@redhat.com> https://github.com/AlanCoding
This commit is contained in:
commit
0814a9c4a1
@ -25,7 +25,9 @@ STANDARD_INVENTORY_UPDATE_ENV = {
|
||||
# Failure to parse inventory should always be fatal
|
||||
'ANSIBLE_INVENTORY_UNPARSED_FAILED': 'True',
|
||||
# Always use the --export option for ansible-inventory
|
||||
'ANSIBLE_INVENTORY_EXPORT': 'True'
|
||||
'ANSIBLE_INVENTORY_EXPORT': 'True',
|
||||
# Redirecting output to stderr allows JSON parsing to still work with -vvv
|
||||
'ANSIBLE_VERBOSE_TO_STDERR': 'True'
|
||||
}
|
||||
CAN_CANCEL = ('new', 'pending', 'waiting', 'running')
|
||||
ACTIVE_STATES = CAN_CANCEL
|
||||
|
@ -124,7 +124,13 @@ class AnsibleInventoryLoader(object):
|
||||
|
||||
def get_base_args(self):
|
||||
# get ansible-inventory absolute path for running in bubblewrap/proot, in Popen
|
||||
bargs= [self.get_path_to_ansible_inventory(), '-i', self.source]
|
||||
|
||||
# NOTE: why do we add "python" to the start of these args?
|
||||
# the script that runs ansible-inventory specifies a python interpreter
|
||||
# that makes no sense in light of the fact that we put all the dependencies
|
||||
# inside of /venv/ansible, so we override the specified interpreter
|
||||
# https://github.com/ansible/ansible/issues/50714
|
||||
bargs = ['python', self.get_path_to_ansible_inventory(), '-i', self.source]
|
||||
logger.debug('Using base command: {}'.format(' '.join(bargs)))
|
||||
return bargs
|
||||
|
||||
@ -302,11 +308,6 @@ class Command(BaseCommand):
|
||||
raise NotImplementedError('Value of enabled {} not understood.'.format(enabled))
|
||||
|
||||
def get_source_absolute_path(self, source):
|
||||
# Sanity check: We sanitize these module names for our API but Ansible proper doesn't follow
|
||||
# good naming conventions
|
||||
source = source.replace('rhv.py', 'ovirt4.py')
|
||||
source = source.replace('satellite6.py', 'foreman.py')
|
||||
source = source.replace('vmware.py', 'vmware_inventory.py')
|
||||
if not os.path.exists(source):
|
||||
raise IOError('Source does not exist: %s' % source)
|
||||
source = os.path.join(os.getcwd(), os.path.dirname(source),
|
||||
@ -879,12 +880,24 @@ class Command(BaseCommand):
|
||||
self._create_update_group_children()
|
||||
self._create_update_group_hosts()
|
||||
|
||||
def remote_tower_license_compare(self, local_license_type):
|
||||
# this requires https://github.com/ansible/ansible/pull/52747
|
||||
source_vars = self.all_group.variables
|
||||
remote_license_type = source_vars.get('tower_metadata', {}).get('license_type', None)
|
||||
if remote_license_type is None:
|
||||
raise CommandError('Unexpected Error: Tower inventory plugin missing needed metadata!')
|
||||
if local_license_type != remote_license_type:
|
||||
raise CommandError('Tower server licenses must match: source: {} local: {}'.format(
|
||||
remote_license_type, local_license_type
|
||||
))
|
||||
|
||||
def check_license(self):
|
||||
license_info = get_licenser().validate()
|
||||
local_license_type = license_info.get('license_type', 'UNLICENSED')
|
||||
if license_info.get('license_key', 'UNLICENSED') == 'UNLICENSED':
|
||||
logger.error(LICENSE_NON_EXISTANT_MESSAGE)
|
||||
raise CommandError('No license found!')
|
||||
elif license_info.get('license_type', 'UNLICENSED') == 'open':
|
||||
elif local_license_type == 'open':
|
||||
return
|
||||
available_instances = license_info.get('available_instances', 0)
|
||||
free_instances = license_info.get('free_instances', 0)
|
||||
@ -893,6 +906,13 @@ class Command(BaseCommand):
|
||||
if time_remaining <= 0 and not license_info.get('demo', False):
|
||||
logger.error(LICENSE_EXPIRED_MESSAGE)
|
||||
raise CommandError("License has expired!")
|
||||
# special check for tower-type inventory sources
|
||||
# but only if running the plugin
|
||||
TOWER_SOURCE_FILES = ['tower.yml', 'tower.yaml']
|
||||
if self.inventory_source.source == 'tower' and any(f in self.source for f in TOWER_SOURCE_FILES):
|
||||
# only if this is the 2nd call to license check, we cannot compare before running plugin
|
||||
if hasattr(self, 'all_group'):
|
||||
self.remote_tower_license_compare(local_license_type)
|
||||
if free_instances < 0:
|
||||
d = {
|
||||
'new_count': new_count,
|
||||
|
@ -718,8 +718,8 @@ class CredentialType(CommonModelNameNotUnique):
|
||||
os.chmod(path, stat.S_IRUSR)
|
||||
return path
|
||||
|
||||
path = build_extra_vars_file(extra_vars, private_data_dir)
|
||||
if extra_vars:
|
||||
path = build_extra_vars_file(extra_vars, private_data_dir)
|
||||
args.extend(['-e', '@%s' % path])
|
||||
|
||||
|
||||
|
@ -24,27 +24,32 @@ def gce(cred, env, private_data_dir):
|
||||
'type': 'service_account',
|
||||
'private_key': cred.get_input('ssh_key_data', default=''),
|
||||
'client_email': username,
|
||||
'project_id': project
|
||||
'project_id': project,
|
||||
# need token uri for inventory plugins
|
||||
# should this really be hard coded? Good question.
|
||||
'token_uri': 'https://accounts.google.com/o/oauth2/token',
|
||||
}
|
||||
|
||||
handle, path = tempfile.mkstemp(dir=private_data_dir)
|
||||
f = os.fdopen(handle, 'w')
|
||||
json.dump(json_cred, f)
|
||||
json.dump(json_cred, f, indent=2)
|
||||
f.close()
|
||||
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
|
||||
env['GCE_CREDENTIALS_FILE_PATH'] = path
|
||||
return path
|
||||
|
||||
|
||||
def azure_rm(cred, env, private_data_dir):
|
||||
client = cred.get_input('client', default='')
|
||||
tenant = cred.get_input('tenant', default='')
|
||||
|
||||
env['AZURE_SUBSCRIPTION_ID'] = cred.get_input('subscription', default='')
|
||||
|
||||
if len(client) and len(tenant):
|
||||
env['AZURE_CLIENT_ID'] = client
|
||||
env['AZURE_TENANT'] = tenant
|
||||
env['AZURE_SECRET'] = cred.get_input('secret', default='')
|
||||
env['AZURE_SUBSCRIPTION_ID'] = cred.get_input('subscription', default='')
|
||||
else:
|
||||
env['AZURE_SUBSCRIPTION_ID'] = cred.get_input('subscription', default='')
|
||||
env['AZURE_AD_USER'] = cred.get_input('username', default='')
|
||||
env['AZURE_PASSWORD'] = cred.get_input('password', default='')
|
||||
|
||||
|
@ -10,6 +10,12 @@ import re
|
||||
import copy
|
||||
import os.path
|
||||
from urllib.parse import urljoin
|
||||
import yaml
|
||||
import configparser
|
||||
import stat
|
||||
import tempfile
|
||||
from io import StringIO
|
||||
from distutils.version import LooseVersion as Version
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
@ -18,6 +24,7 @@ from django.utils.translation import ugettext_lazy as _
|
||||
from django.db import transaction
|
||||
from django.core.exceptions import ValidationError
|
||||
from django.utils.timezone import now
|
||||
from django.utils.encoding import iri_to_uri
|
||||
from django.db.models import Q
|
||||
|
||||
# REST Framework
|
||||
@ -52,7 +59,7 @@ from awx.main.models.notifications import (
|
||||
NotificationTemplate,
|
||||
JobNotificationMixin,
|
||||
)
|
||||
from awx.main.utils import _inventory_updates, region_sorting
|
||||
from awx.main.utils import _inventory_updates, region_sorting, get_licenser
|
||||
|
||||
|
||||
__all__ = ['Inventory', 'Host', 'Group', 'InventorySource', 'InventoryUpdate',
|
||||
@ -1015,6 +1022,8 @@ class InventorySourceOptions(BaseModel):
|
||||
Common fields for InventorySource and InventoryUpdate.
|
||||
'''
|
||||
|
||||
injectors = dict()
|
||||
|
||||
SOURCE_CHOICES = [
|
||||
('', _('Manual')),
|
||||
('file', _('File, Directory or Script')),
|
||||
@ -1307,28 +1316,36 @@ class InventorySourceOptions(BaseModel):
|
||||
)
|
||||
return None
|
||||
|
||||
def get_inventory_plugin_name(self):
|
||||
if self.source in CLOUD_PROVIDERS or self.source == 'custom':
|
||||
# TODO: today, all vendored sources are scripts
|
||||
# in future release inventory plugins will replace these
|
||||
return 'script'
|
||||
# in other cases we do not specify which plugin to use
|
||||
return None
|
||||
|
||||
def get_deprecated_credential(self, kind):
|
||||
for cred in self.credentials.all():
|
||||
if cred.credential_type.kind == kind:
|
||||
return cred
|
||||
else:
|
||||
return None
|
||||
|
||||
def get_cloud_credential(self):
|
||||
"""Return the credential which is directly tied to the inventory source type.
|
||||
"""
|
||||
credential = None
|
||||
for cred in self.credentials.all():
|
||||
if cred.credential_type.kind != 'vault':
|
||||
credential = cred
|
||||
if self.source in CLOUD_PROVIDERS:
|
||||
if cred.kind == self.source.replace('ec2', 'aws'):
|
||||
credential = cred
|
||||
break
|
||||
else:
|
||||
# these need to be returned in the API credential field
|
||||
if cred.credential_type.kind != 'vault':
|
||||
credential = cred
|
||||
break
|
||||
return credential
|
||||
|
||||
def get_extra_credentials(self):
|
||||
"""Return all credentials that are not used by the inventory source injector.
|
||||
These are all credentials that should run their own inject_credential logic.
|
||||
"""
|
||||
special_cred = None
|
||||
if self.source in CLOUD_PROVIDERS:
|
||||
# these have special injection logic associated with them
|
||||
special_cred = self.get_cloud_credential()
|
||||
extra_creds = []
|
||||
for cred in self.credentials.all():
|
||||
if special_cred is None or cred.pk != special_cred.pk:
|
||||
extra_creds.append(cred)
|
||||
return extra_creds
|
||||
|
||||
@property
|
||||
def credential(self):
|
||||
cred = self.get_cloud_credential()
|
||||
@ -1532,8 +1549,16 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, RelatedJobsMix
|
||||
return bool(self.source_script)
|
||||
elif self.source == 'scm':
|
||||
return bool(self.source_project)
|
||||
else:
|
||||
return bool(self.source in CLOUD_INVENTORY_SOURCES)
|
||||
elif self.source == 'file':
|
||||
return False
|
||||
elif self.source == 'ec2':
|
||||
# Permit credential-less ec2 updates to allow IAM roles
|
||||
return True
|
||||
elif self.source == 'gce':
|
||||
# These updates will hang if correct credential is not supplied
|
||||
credential = self.get_cloud_credential()
|
||||
return bool(credential and credential.kind == 'gce')
|
||||
return True
|
||||
|
||||
def create_inventory_update(self, **kwargs):
|
||||
return self.create_unified_job(**kwargs)
|
||||
@ -1717,13 +1742,7 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin,
|
||||
def can_start(self):
|
||||
if not super(InventoryUpdate, self).can_start:
|
||||
return False
|
||||
|
||||
if (self.source not in ('custom', 'ec2', 'scm') and
|
||||
not (self.get_cloud_credential())):
|
||||
return False
|
||||
elif self.source == 'scm' and not self.inventory_source.source_project:
|
||||
return False
|
||||
elif self.source == 'file':
|
||||
elif not self.inventory_source or not self.inventory_source._can_update():
|
||||
return False
|
||||
return True
|
||||
|
||||
@ -1801,3 +1820,860 @@ class CustomInventoryScript(CommonModelNameNotUnique, ResourceMixin):
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:inventory_script_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
|
||||
# TODO: move to awx/main/models/inventory/injectors.py
|
||||
class PluginFileInjector(object):
|
||||
# if plugin_name is not given, no inventory plugin functionality exists
|
||||
plugin_name = None # Ansible core name used to reference plugin
|
||||
# if initial_version is None, but we have plugin name, injection logic exists,
|
||||
# but it is vaporware, meaning we do not use it for some reason in Ansible core
|
||||
initial_version = None # at what version do we switch to the plugin
|
||||
ini_env_reference = None # env var name that points to old ini config file
|
||||
# base injector should be one of None, "managed", or "template"
|
||||
# this dictates which logic to borrow from playbook injectors
|
||||
base_injector = None
|
||||
|
||||
def __init__(self, ansible_version):
|
||||
# This is InventoryOptions instance, could be source or inventory update
|
||||
self.ansible_version = ansible_version
|
||||
|
||||
@property
|
||||
def filename(self):
|
||||
"""Inventory filename for using the inventory plugin
|
||||
This is created dynamically, but the auto plugin requires this exact naming
|
||||
"""
|
||||
return '{0}.yml'.format(self.plugin_name)
|
||||
|
||||
@property
|
||||
def script_name(self):
|
||||
"""Name of the script located in awx/plugins/inventory
|
||||
"""
|
||||
return '{0}.py'.format(self.__class__.__name__)
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
"""Default implementation of inventory plugin file contents.
|
||||
There are some valid cases when all parameters can be obtained from
|
||||
the environment variables, example "plugin: linode" is valid
|
||||
ideally, however, some options should be filled from the inventory source data
|
||||
"""
|
||||
if self.plugin_name is None:
|
||||
raise NotImplementedError('At minimum the plugin name is needed for inventory plugin use.')
|
||||
return {'plugin': self.plugin_name}
|
||||
|
||||
def inventory_contents(self, inventory_update, private_data_dir):
|
||||
"""Returns a string that is the content for the inventory file for the inventory plugin
|
||||
"""
|
||||
return yaml.safe_dump(
|
||||
self.inventory_as_dict(inventory_update, private_data_dir),
|
||||
default_flow_style=False,
|
||||
width=1000
|
||||
)
|
||||
|
||||
def should_use_plugin(self):
|
||||
return bool(
|
||||
self.plugin_name and self.initial_version and
|
||||
Version(self.ansible_version) >= Version(self.initial_version)
|
||||
)
|
||||
|
||||
def build_env(self, inventory_update, env, private_data_dir, private_data_files):
|
||||
if self.should_use_plugin():
|
||||
injector_env = self.get_plugin_env(inventory_update, private_data_dir, private_data_files)
|
||||
else:
|
||||
injector_env = self.get_script_env(inventory_update, private_data_dir, private_data_files)
|
||||
env.update(injector_env)
|
||||
return env
|
||||
|
||||
def _get_shared_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
"""By default, we will apply the standard managed_by_tower injectors
|
||||
for the script injection
|
||||
"""
|
||||
injected_env = {}
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
# some sources may have no credential, specifically ec2
|
||||
if credential is None:
|
||||
return injected_env
|
||||
if self.base_injector == 'managed':
|
||||
from awx.main.models.credential import injectors as builtin_injectors
|
||||
cred_kind = inventory_update.source.replace('ec2', 'aws')
|
||||
if cred_kind in dir(builtin_injectors):
|
||||
getattr(builtin_injectors, cred_kind)(credential, injected_env, private_data_dir)
|
||||
elif self.base_injector == 'template':
|
||||
injected_env['INVENTORY_UPDATE_ID'] = str(inventory_update.pk) # so injector knows this is inventory
|
||||
safe_env = injected_env.copy()
|
||||
args = []
|
||||
credential.credential_type.inject_credential(
|
||||
credential, injected_env, safe_env, args, private_data_dir
|
||||
)
|
||||
# NOTE: safe_env is handled externally to injector class by build_safe_env static method
|
||||
# that means that managed_by_tower injectors must only inject detectable env keys
|
||||
# enforcement of this is accomplished by tests
|
||||
return injected_env
|
||||
|
||||
def get_plugin_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
return self._get_shared_env(inventory_update, private_data_dir, private_data_files)
|
||||
|
||||
def get_script_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
injected_env = self._get_shared_env(inventory_update, private_data_dir, private_data_files)
|
||||
|
||||
# Put in env var reference to private ini data files, if relevant
|
||||
if self.ini_env_reference:
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
cred_data = private_data_files['credentials']
|
||||
injected_env[self.ini_env_reference] = cred_data[credential]
|
||||
|
||||
return injected_env
|
||||
|
||||
def build_private_data(self, inventory_update, private_data_dir):
|
||||
if self.should_use_plugin():
|
||||
return self.build_plugin_private_data(inventory_update, private_data_dir)
|
||||
else:
|
||||
return self.build_script_private_data(inventory_update, private_data_dir)
|
||||
|
||||
def build_script_private_data(self, inventory_update, private_data_dir):
|
||||
return None
|
||||
|
||||
def build_plugin_private_data(self, inventory_update, private_data_dir):
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def dump_cp(cp, credential):
|
||||
"""Dump config parser data and return it as a string.
|
||||
Helper method intended for use by build_script_private_data
|
||||
"""
|
||||
if cp.sections():
|
||||
f = StringIO()
|
||||
cp.write(f)
|
||||
private_data = private_data = {'credentials': {}}
|
||||
private_data['credentials'][credential] = f.getvalue()
|
||||
return private_data
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
class azure_rm(PluginFileInjector):
|
||||
plugin_name = 'azure_rm'
|
||||
# FIXME: https://github.com/ansible/ansible/issues/54065 need resolving to enable
|
||||
# initial_version = '2.8' # Driven by unsafe group names issue, hostvars
|
||||
ini_env_reference = 'AZURE_INI_PATH'
|
||||
base_injector = 'managed'
|
||||
|
||||
def get_plugin_env(self, *args, **kwargs):
|
||||
ret = super(azure_rm, self).get_plugin_env(*args, **kwargs)
|
||||
# We need native jinja2 types so that tags can give JSON null value
|
||||
ret['ANSIBLE_JINJA2_NATIVE'] = str(True)
|
||||
return ret
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
ret = super(azure_rm, self).inventory_as_dict(inventory_update, private_data_dir)
|
||||
|
||||
source_vars = inventory_update.source_vars_dict
|
||||
|
||||
group_by_hostvar = {
|
||||
'location': {'prefix': '', 'separator': '', 'key': 'location'},
|
||||
'tag': {'prefix': '', 'separator': '', 'key': 'tags.keys() | list if tags else []'},
|
||||
# Introduced with https://github.com/ansible/ansible/pull/53046
|
||||
'security_group': {'prefix': '', 'separator': '', 'key': 'security_group'},
|
||||
'resource_group': {'prefix': '', 'separator': '', 'key': 'resource_group'},
|
||||
# Note, os_family was not documented correctly in script, but defaulted to grouping by it
|
||||
'os_family': {'prefix': '', 'separator': '', 'key': 'os_disk.operating_system_type'}
|
||||
}
|
||||
# by default group by everything
|
||||
# always respect user setting, if they gave it
|
||||
group_by = [
|
||||
grouping_name for grouping_name in group_by_hostvar
|
||||
if source_vars.get('group_by_{}'.format(grouping_name), True)
|
||||
]
|
||||
ret['keyed_groups'] = [group_by_hostvar[grouping_name] for grouping_name in group_by]
|
||||
if 'tag' in group_by:
|
||||
# Nasty syntax to reproduce "key_value" group names in addition to "key"
|
||||
ret['keyed_groups'].append({
|
||||
'prefix': '', 'separator': '',
|
||||
'key': r'dict(tags.keys() | map("regex_replace", "^(.*)$", "\1_") | list | zip(tags.values() | list)) if tags else []'
|
||||
})
|
||||
|
||||
# Compatibility content
|
||||
# TODO: add proper support for instance_filters non-specific to compatibility
|
||||
# TODO: add proper support for group_by non-specific to compatibility
|
||||
# Dashes were not configurable in azure_rm.py script, we do not want unicode, so always use this
|
||||
ret['use_contrib_script_compatible_sanitization'] = True
|
||||
# By default the script did not filter hosts
|
||||
ret['default_host_filters'] = []
|
||||
# User-given host filters
|
||||
user_filters = []
|
||||
old_filterables = [
|
||||
('resource_groups', 'resource_group'),
|
||||
('tags', 'tags')
|
||||
# locations / location would be an entry
|
||||
# but this would conflict with source_regions
|
||||
]
|
||||
for key, loc in old_filterables:
|
||||
value = source_vars.get(key, None)
|
||||
if value and isinstance(value, str):
|
||||
user_filters.append('{} not in {}'.format(
|
||||
loc, value.split(',')
|
||||
))
|
||||
if user_filters:
|
||||
ret.setdefault('exclude_host_filters', [])
|
||||
ret['exclude_host_filters'].extend(user_filters)
|
||||
|
||||
ret['conditional_groups'] = {'azure': True}
|
||||
ret['hostvar_expressions'] = {
|
||||
'provisioning_state': 'provisioning_state | title',
|
||||
'computer_name': 'name',
|
||||
'type': 'resource_type',
|
||||
'private_ip': 'private_ipv4_addresses[0]',
|
||||
'public_ip': 'public_ipv4_addresses[0]',
|
||||
'tags': 'tags if tags else None'
|
||||
}
|
||||
# Special functionality from script
|
||||
if source_vars.get('use_private_ip', False):
|
||||
ret['hostvar_expressions']['ansible_host'] = 'private_ipv4_addresses[0]'
|
||||
# end compatibility content
|
||||
|
||||
if inventory_update.source_regions and 'all' not in inventory_update.source_regions:
|
||||
# initialize a list for this section in inventory file
|
||||
ret.setdefault('exclude_host_filters', [])
|
||||
# make a python list of the regions we will use
|
||||
python_regions = [x.strip() for x in inventory_update.source_regions.split(',')]
|
||||
# convert that list in memory to python syntax in a string
|
||||
# now put that in jinja2 syntax operating on hostvar key "location"
|
||||
# and put that as an entry in the exclusions list
|
||||
ret['exclude_host_filters'].append("location not in {}".format(repr(python_regions)))
|
||||
return ret
|
||||
|
||||
def build_script_private_data(self, inventory_update, private_data_dir):
|
||||
cp = configparser.RawConfigParser()
|
||||
section = 'azure'
|
||||
cp.add_section(section)
|
||||
cp.set(section, 'include_powerstate', 'yes')
|
||||
cp.set(section, 'group_by_resource_group', 'yes')
|
||||
cp.set(section, 'group_by_location', 'yes')
|
||||
cp.set(section, 'group_by_tag', 'yes')
|
||||
|
||||
if inventory_update.source_regions and 'all' not in inventory_update.source_regions:
|
||||
cp.set(
|
||||
section, 'locations',
|
||||
','.join([x.strip() for x in inventory_update.source_regions.split(',')])
|
||||
)
|
||||
|
||||
azure_rm_opts = dict(inventory_update.source_vars_dict.items())
|
||||
for k, v in azure_rm_opts.items():
|
||||
cp.set(section, k, str(v))
|
||||
return self.dump_cp(cp, inventory_update.get_cloud_credential())
|
||||
|
||||
|
||||
class ec2(PluginFileInjector):
|
||||
plugin_name = 'aws_ec2'
|
||||
# blocked by https://github.com/ansible/ansible/issues/54059
|
||||
# initial_version = '2.8' # Driven by unsafe group names issue, parent_group templating, hostvars
|
||||
ini_env_reference = 'EC2_INI_PATH'
|
||||
base_injector = 'managed'
|
||||
|
||||
def get_plugin_env(self, *args, **kwargs):
|
||||
ret = super(ec2, self).get_plugin_env(*args, **kwargs)
|
||||
# We need native jinja2 types so that ec2_state_code will give integer
|
||||
ret['ANSIBLE_JINJA2_NATIVE'] = str(True)
|
||||
return ret
|
||||
|
||||
def _compat_compose_vars(self):
|
||||
return {
|
||||
# vars that change
|
||||
'ec2_block_devices': (
|
||||
"dict(block_device_mappings | map(attribute='device_name') | list | zip(block_device_mappings "
|
||||
"| map(attribute='ebs.volume_id') | list))"
|
||||
),
|
||||
'ec2_dns_name': 'public_dns_name',
|
||||
'ec2_group_name': 'placement.group_name',
|
||||
'ec2_instance_profile': 'iam_instance_profile | default("")',
|
||||
'ec2_ip_address': 'public_ip_address',
|
||||
'ec2_kernel': 'kernel_id | default("")',
|
||||
'ec2_monitored': "monitoring.state in ['enabled', 'pending']",
|
||||
'ec2_monitoring_state': 'monitoring.state',
|
||||
'ec2_placement': 'placement.availability_zone',
|
||||
'ec2_ramdisk': 'ramdisk_id | default("")',
|
||||
'ec2_reason': 'state_transition_reason',
|
||||
'ec2_security_group_ids': "security_groups | map(attribute='group_id') | list | join(',')",
|
||||
'ec2_security_group_names': "security_groups | map(attribute='group_name') | list | join(',')",
|
||||
'ec2_tag_Name': 'tags.Name',
|
||||
'ec2_state': 'state.name',
|
||||
'ec2_state_code': 'state.code',
|
||||
'ec2_state_reason': 'state_reason.message if state_reason is defined else ""',
|
||||
'ec2_sourceDestCheck': 'source_dest_check | default(false) | lower | string', # snake_case syntax intended
|
||||
'ec2_account_id': 'owner_id',
|
||||
# vars that just need ec2_ prefix
|
||||
'ec2_ami_launch_index': 'ami_launch_index | string',
|
||||
'ec2_architecture': 'architecture',
|
||||
'ec2_client_token': 'client_token',
|
||||
'ec2_ebs_optimized': 'ebs_optimized',
|
||||
'ec2_hypervisor': 'hypervisor',
|
||||
'ec2_image_id': 'image_id',
|
||||
'ec2_instance_type': 'instance_type',
|
||||
'ec2_key_name': 'key_name',
|
||||
'ec2_launch_time': r'launch_time | regex_replace(" ", "T") | regex_replace("(\+)(\d\d):(\d)(\d)$", ".\g<2>\g<3>Z")',
|
||||
'ec2_platform': 'platform | default("")',
|
||||
'ec2_private_dns_name': 'private_dns_name',
|
||||
'ec2_private_ip_address': 'private_ip_address',
|
||||
'ec2_public_dns_name': 'public_dns_name',
|
||||
'ec2_region': 'placement.region',
|
||||
'ec2_root_device_name': 'root_device_name',
|
||||
'ec2_root_device_type': 'root_device_type',
|
||||
# many items need blank defaults because the script tended to keep a common schema
|
||||
'ec2_spot_instance_request_id': 'spot_instance_request_id | default("")',
|
||||
'ec2_subnet_id': 'subnet_id | default("")',
|
||||
'ec2_virtualization_type': 'virtualization_type',
|
||||
'ec2_vpc_id': 'vpc_id | default("")',
|
||||
# same as ec2_ip_address, the script provided this
|
||||
'ansible_host': 'public_ip_address',
|
||||
# new with https://github.com/ansible/ansible/pull/53645
|
||||
'ec2_eventsSet': 'events | default("")',
|
||||
'ec2_persistent': 'persistent | default(false)',
|
||||
'ec2_requester_id': 'requester_id | default("")'
|
||||
}
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
ret = super(ec2, self).inventory_as_dict(inventory_update, private_data_dir)
|
||||
|
||||
keyed_groups = []
|
||||
group_by_hostvar = {
|
||||
'ami_id': {'prefix': '', 'separator': '', 'key': 'image_id', 'parent_group': 'images'},
|
||||
# 2 entries for zones for same groups to establish 2 parentage trees
|
||||
'availability_zone': {'prefix': '', 'separator': '', 'key': 'placement.availability_zone', 'parent_group': 'zones'},
|
||||
'aws_account': {'prefix': '', 'separator': '', 'key': 'ec2_account_id', 'parent_group': 'accounts'}, # composed var
|
||||
'instance_id': {'prefix': '', 'separator': '', 'key': 'instance_id', 'parent_group': 'instances'}, # normally turned off
|
||||
'instance_state': {'prefix': 'instance_state', 'key': 'ec2_state', 'parent_group': 'instance_states'}, # composed var
|
||||
# ec2_platform is a composed var, but group names do not match up to hostvar exactly
|
||||
'platform': {'prefix': 'platform', 'key': 'platform | default("undefined")', 'parent_group': 'platforms'},
|
||||
'instance_type': {'prefix': 'type', 'key': 'instance_type', 'parent_group': 'types'},
|
||||
'key_pair': {'prefix': 'key', 'key': 'key_name', 'parent_group': 'keys'},
|
||||
'region': {'prefix': '', 'separator': '', 'key': 'placement.region', 'parent_group': 'regions'},
|
||||
# Security requires some ninja jinja2 syntax, credit to s-hertel
|
||||
'security_group': {'prefix': 'security_group', 'key': 'security_groups | map(attribute="group_name")', 'parent_group': 'security_groups'},
|
||||
# tags cannot be parented in exactly the same way as the script due to
|
||||
# https://github.com/ansible/ansible/pull/53812
|
||||
'tag_keys': [
|
||||
{'prefix': 'tag', 'key': 'tags', 'parent_group': 'tags'},
|
||||
{'prefix': 'tag', 'key': 'tags.keys()', 'parent_group': 'tags'}
|
||||
],
|
||||
# 'tag_none': None, # grouping by no tags isn't a different thing with plugin
|
||||
# naming is redundant, like vpc_id_vpc_8c412cea, but intended
|
||||
'vpc_id': {'prefix': 'vpc_id', 'key': 'vpc_id', 'parent_group': 'vpcs'},
|
||||
}
|
||||
# -- same-ish as script here --
|
||||
group_by = [x.strip().lower() for x in inventory_update.group_by.split(',') if x.strip()]
|
||||
for choice in inventory_update.get_ec2_group_by_choices():
|
||||
value = bool((group_by and choice[0] in group_by) or (not group_by and choice[0] != 'instance_id'))
|
||||
# -- end sameness to script --
|
||||
if value:
|
||||
this_keyed_group = group_by_hostvar.get(choice[0], None)
|
||||
# If a keyed group syntax does not exist, there is nothing we can do to get this group
|
||||
if this_keyed_group is not None:
|
||||
if isinstance(this_keyed_group, list):
|
||||
keyed_groups.extend(this_keyed_group)
|
||||
else:
|
||||
keyed_groups.append(this_keyed_group)
|
||||
# special case, this parentage is only added if both zones and regions are present
|
||||
if not group_by or ('region' in group_by and 'availability_zone' in group_by):
|
||||
keyed_groups.append({'prefix': '', 'separator': '', 'key': 'placement.availability_zone', 'parent_group': '{{ placement.region }}'})
|
||||
|
||||
source_vars = inventory_update.source_vars_dict
|
||||
# This is a setting from the script, hopefully no one used it
|
||||
# if true, it replaces dashes, but not in region / loc names
|
||||
replace_dash = bool(source_vars.get('replace_dash_in_groups', True))
|
||||
# Compatibility content
|
||||
legacy_regex = {
|
||||
True: r"[^A-Za-z0-9\_]",
|
||||
False: r"[^A-Za-z0-9\_\-]" # do not replace dash, dash is whitelisted
|
||||
}[replace_dash]
|
||||
list_replacer = 'map("regex_replace", "{rx}", "_") | list'.format(rx=legacy_regex)
|
||||
# this option, a plugin option, will allow dashes, but not unicode
|
||||
# when set to False, unicode will be allowed, but it was not allowed by script
|
||||
# thus, we always have to use this option, and always use our custom regex
|
||||
ret['use_contrib_script_compatible_sanitization'] = True
|
||||
for grouping_data in keyed_groups:
|
||||
if grouping_data['key'] in ('placement.region', 'placement.availability_zone'):
|
||||
# us-east-2 is always us-east-2 according to ec2.py
|
||||
# no sanitization in region-ish groups for the script standards, ever ever
|
||||
continue
|
||||
if grouping_data['key'] == 'tags':
|
||||
# dict jinja2 transformation
|
||||
grouping_data['key'] = 'dict(tags.keys() | {replacer} | zip(tags.values() | {replacer}))'.format(
|
||||
replacer=list_replacer
|
||||
)
|
||||
elif grouping_data['key'] == 'tags.keys()' or grouping_data['prefix'] == 'security_group':
|
||||
# list jinja2 transformation
|
||||
grouping_data['key'] += ' | {replacer}'.format(replacer=list_replacer)
|
||||
else:
|
||||
# string transformation
|
||||
grouping_data['key'] += ' | regex_replace("{rx}", "_")'.format(rx=legacy_regex)
|
||||
# end compatibility content
|
||||
|
||||
# This was an allowed ec2.ini option, also plugin option, so pass through
|
||||
if source_vars.get('boto_profile', None):
|
||||
ret['boto_profile'] = source_vars['boto_profile']
|
||||
|
||||
elif not replace_dash:
|
||||
# Using the plugin, but still want dashes whitelisted
|
||||
ret['use_contrib_script_compatible_sanitization'] = True
|
||||
|
||||
if keyed_groups:
|
||||
ret['keyed_groups'] = keyed_groups
|
||||
|
||||
# Instance ID not part of compat vars, because of settings.EC2_INSTANCE_ID_VAR
|
||||
compose_dict = {'ec2_id': 'instance_id'}
|
||||
inst_filters = {}
|
||||
|
||||
# Compatibility content
|
||||
compose_dict.update(self._compat_compose_vars())
|
||||
# plugin provides "aws_ec2", but not this which the script gave
|
||||
ret['groups'] = {'ec2': True}
|
||||
# public_ip as hostname is non-default plugin behavior, script behavior
|
||||
ret['hostnames'] = [
|
||||
'network-interface.addresses.association.public-ip',
|
||||
'dns-name',
|
||||
'private-dns-name'
|
||||
]
|
||||
# The script returned only running state by default, the plugin does not
|
||||
# https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options
|
||||
# options: pending | running | shutting-down | terminated | stopping | stopped
|
||||
inst_filters['instance-state-name'] = ['running']
|
||||
# end compatibility content
|
||||
|
||||
if compose_dict:
|
||||
ret['compose'] = compose_dict
|
||||
|
||||
if inventory_update.instance_filters:
|
||||
# logic used to live in ec2.py, now it belongs to us. Yay more code?
|
||||
filter_sets = [f for f in inventory_update.instance_filters.split(',') if f]
|
||||
|
||||
for instance_filter in filter_sets:
|
||||
# AND logic not supported, unclear how to...
|
||||
instance_filter = instance_filter.strip()
|
||||
if not instance_filter or '=' not in instance_filter:
|
||||
continue
|
||||
filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)]
|
||||
if not filter_key:
|
||||
continue
|
||||
inst_filters[filter_key] = filter_value
|
||||
|
||||
if inst_filters:
|
||||
ret['filters'] = inst_filters
|
||||
|
||||
if inventory_update.source_regions and 'all' not in inventory_update.source_regions:
|
||||
ret['regions'] = inventory_update.source_regions.split(',')
|
||||
|
||||
return ret
|
||||
|
||||
def build_script_private_data(self, inventory_update, private_data_dir):
|
||||
cp = configparser.RawConfigParser()
|
||||
# Build custom ec2.ini for ec2 inventory script to use.
|
||||
section = 'ec2'
|
||||
cp.add_section(section)
|
||||
ec2_opts = dict(inventory_update.source_vars_dict.items())
|
||||
regions = inventory_update.source_regions or 'all'
|
||||
regions = ','.join([x.strip() for x in regions.split(',')])
|
||||
regions_blacklist = ','.join(settings.EC2_REGIONS_BLACKLIST)
|
||||
ec2_opts['regions'] = regions
|
||||
ec2_opts.setdefault('regions_exclude', regions_blacklist)
|
||||
ec2_opts.setdefault('destination_variable', 'public_dns_name')
|
||||
ec2_opts.setdefault('vpc_destination_variable', 'ip_address')
|
||||
ec2_opts.setdefault('route53', 'False')
|
||||
ec2_opts.setdefault('all_instances', 'True')
|
||||
ec2_opts.setdefault('all_rds_instances', 'False')
|
||||
ec2_opts.setdefault('include_rds_clusters', 'False')
|
||||
ec2_opts.setdefault('rds', 'False')
|
||||
ec2_opts.setdefault('nested_groups', 'True')
|
||||
ec2_opts.setdefault('elasticache', 'False')
|
||||
ec2_opts.setdefault('stack_filters', 'False')
|
||||
if inventory_update.instance_filters:
|
||||
ec2_opts.setdefault('instance_filters', inventory_update.instance_filters)
|
||||
group_by = [x.strip().lower() for x in inventory_update.group_by.split(',') if x.strip()]
|
||||
for choice in inventory_update.get_ec2_group_by_choices():
|
||||
value = bool((group_by and choice[0] in group_by) or (not group_by and choice[0] != 'instance_id'))
|
||||
ec2_opts.setdefault('group_by_%s' % choice[0], str(value))
|
||||
if 'cache_path' not in ec2_opts:
|
||||
cache_path = tempfile.mkdtemp(prefix='ec2_cache', dir=private_data_dir)
|
||||
ec2_opts['cache_path'] = cache_path
|
||||
ec2_opts.setdefault('cache_max_age', '300')
|
||||
for k, v in ec2_opts.items():
|
||||
cp.set(section, k, str(v))
|
||||
return self.dump_cp(cp, inventory_update.get_cloud_credential())
|
||||
|
||||
|
||||
class gce(PluginFileInjector):
|
||||
plugin_name = 'gcp_compute'
|
||||
initial_version = '2.8' # Driven by unsafe group names issue, hostvars
|
||||
base_injector = 'managed'
|
||||
|
||||
def get_script_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
env = super(gce, self).get_script_env(inventory_update, private_data_dir, private_data_files)
|
||||
env['GCE_ZONE'] = inventory_update.source_regions if inventory_update.source_regions != 'all' else '' # noqa
|
||||
|
||||
# by default, the GCE inventory source caches results on disk for
|
||||
# 5 minutes; disable this behavior
|
||||
cp = configparser.ConfigParser()
|
||||
cp.add_section('cache')
|
||||
cp.set('cache', 'cache_max_age', '0')
|
||||
handle, path = tempfile.mkstemp(dir=private_data_dir)
|
||||
cp.write(os.fdopen(handle, 'w'))
|
||||
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
|
||||
env['GCE_INI_PATH'] = path
|
||||
return env
|
||||
|
||||
def _compat_compose_vars(self):
|
||||
# missing: gce_image, gce_uuid
|
||||
# https://github.com/ansible/ansible/issues/51884
|
||||
return {
|
||||
'gce_description': 'description if description else None',
|
||||
'gce_machine_type': 'machineType',
|
||||
'gce_name': 'name',
|
||||
'gce_network': 'networkInterfaces[0].network.name',
|
||||
'gce_private_ip': 'networkInterfaces[0].networkIP',
|
||||
'gce_public_ip': 'networkInterfaces[0].accessConfigs[0].natIP',
|
||||
'gce_status': 'status',
|
||||
'gce_subnetwork': 'networkInterfaces[0].subnetwork.name',
|
||||
'gce_tags': 'tags.get("items", [])',
|
||||
'gce_zone': 'zone',
|
||||
'gce_metadata': 'metadata.get("items", []) | items2dict(key_name="key", value_name="value")',
|
||||
# We need this as long as hostnames is non-default, otherwise hosts
|
||||
# will not be addressed correctly, was returned in script
|
||||
'ansible_ssh_host': 'networkInterfaces[0].accessConfigs[0].natIP'
|
||||
}
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
ret = super(gce, self).inventory_as_dict(inventory_update, private_data_dir)
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
|
||||
# auth related items
|
||||
from awx.main.models.credential.injectors import gce as builtin_injector
|
||||
ret['service_account_file'] = builtin_injector(credential, {}, private_data_dir)
|
||||
ret['projects'] = [credential.get_input('project', default='')]
|
||||
ret['auth_kind'] = "serviceaccount"
|
||||
|
||||
filters = []
|
||||
# TODO: implement gce group_by options
|
||||
# gce never processed the group_by field, if it had, we would selectively
|
||||
# apply those options here, but it did not, so all groups are added here
|
||||
keyed_groups = [
|
||||
# the jinja2 syntax is duplicated with compose
|
||||
# https://github.com/ansible/ansible/issues/51883
|
||||
{'prefix': 'network', 'key': 'gce_subnetwork'}, # composed var
|
||||
{'prefix': '', 'separator': '', 'key': 'gce_private_ip'}, # composed var
|
||||
{'prefix': '', 'separator': '', 'key': 'gce_public_ip'}, # composed var
|
||||
{'prefix': '', 'separator': '', 'key': 'machineType'},
|
||||
{'prefix': '', 'separator': '', 'key': 'zone'},
|
||||
{'prefix': 'tag', 'key': 'gce_tags'}, # composed var
|
||||
{'prefix': 'status', 'key': 'status | lower'}
|
||||
]
|
||||
# This will be used as the gce instance_id, must be universal, non-compat
|
||||
compose_dict = {'gce_id': 'id'}
|
||||
|
||||
# Compatibility content
|
||||
# TODO: proper group_by and instance_filters support, irrelevant of compat mode
|
||||
# The gce.py script never sanitized any names in any way
|
||||
ret['use_contrib_script_compatible_sanitization'] = True
|
||||
# Add in old hostvars aliases
|
||||
compose_dict.update(self._compat_compose_vars())
|
||||
# Non-default names to match script
|
||||
ret['hostnames'] = ['name', 'public_ip', 'private_ip']
|
||||
# end compatibility content
|
||||
|
||||
if keyed_groups:
|
||||
ret['keyed_groups'] = keyed_groups
|
||||
if filters:
|
||||
ret['filters'] = filters
|
||||
if compose_dict:
|
||||
ret['compose'] = compose_dict
|
||||
if inventory_update.source_regions and 'all' not in inventory_update.source_regions:
|
||||
ret['zones'] = inventory_update.source_regions.split(',')
|
||||
return ret
|
||||
|
||||
def get_plugin_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
# gce wants everything defined in inventory & cred files
|
||||
# this explicitly turns off injection of environment variables
|
||||
return {}
|
||||
|
||||
|
||||
class vmware(PluginFileInjector):
|
||||
# plugin_name = 'vmware_vm_inventory' # FIXME: implement me
|
||||
ini_env_reference = 'VMWARE_INI_PATH'
|
||||
base_injector = 'managed'
|
||||
|
||||
@property
|
||||
def script_name(self):
|
||||
return 'vmware_inventory.py' # exception
|
||||
|
||||
def build_script_private_data(self, inventory_update, private_data_dir):
|
||||
cp = configparser.RawConfigParser()
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
|
||||
# Allow custom options to vmware inventory script.
|
||||
section = 'vmware'
|
||||
cp.add_section(section)
|
||||
cp.set('vmware', 'cache_max_age', '0')
|
||||
cp.set('vmware', 'validate_certs', str(settings.VMWARE_VALIDATE_CERTS))
|
||||
cp.set('vmware', 'username', credential.get_input('username', default=''))
|
||||
cp.set('vmware', 'password', credential.get_input('password', default=''))
|
||||
cp.set('vmware', 'server', credential.get_input('host', default=''))
|
||||
|
||||
vmware_opts = dict(inventory_update.source_vars_dict.items())
|
||||
if inventory_update.instance_filters:
|
||||
vmware_opts.setdefault('host_filters', inventory_update.instance_filters)
|
||||
if inventory_update.group_by:
|
||||
vmware_opts.setdefault('groupby_patterns', inventory_update.group_by)
|
||||
|
||||
for k, v in vmware_opts.items():
|
||||
cp.set(section, k, str(v))
|
||||
|
||||
return self.dump_cp(cp, credential)
|
||||
|
||||
|
||||
class openstack(PluginFileInjector):
|
||||
ini_env_reference = 'OS_CLIENT_CONFIG_FILE'
|
||||
plugin_name = 'openstack'
|
||||
# minimum version of 2.7.8 may be theoretically possible
|
||||
initial_version = '2.8' # Driven by consistency with other sources
|
||||
|
||||
@property
|
||||
def script_name(self):
|
||||
return 'openstack_inventory.py' # exception
|
||||
|
||||
def _get_clouds_dict(self, inventory_update, credential, private_data_dir, mk_cache=True):
|
||||
openstack_auth = dict(auth_url=credential.get_input('host', default=''),
|
||||
username=credential.get_input('username', default=''),
|
||||
password=credential.get_input('password', default=''),
|
||||
project_name=credential.get_input('project', default=''))
|
||||
if credential.has_input('domain'):
|
||||
openstack_auth['domain_name'] = credential.get_input('domain', default='')
|
||||
|
||||
private_state = inventory_update.source_vars_dict.get('private', True)
|
||||
verify_state = credential.get_input('verify_ssl', default=True)
|
||||
openstack_data = {
|
||||
'clouds': {
|
||||
'devstack': {
|
||||
'private': private_state,
|
||||
'verify': verify_state,
|
||||
'auth': openstack_auth,
|
||||
},
|
||||
},
|
||||
}
|
||||
if mk_cache:
|
||||
# Retrieve cache path from inventory update vars if available,
|
||||
# otherwise create a temporary cache path only for this update.
|
||||
cache = inventory_update.source_vars_dict.get('cache', {})
|
||||
if not isinstance(cache, dict):
|
||||
cache = {}
|
||||
if not cache.get('path', ''):
|
||||
cache_path = tempfile.mkdtemp(prefix='openstack_cache', dir=private_data_dir)
|
||||
cache['path'] = cache_path
|
||||
openstack_data['cache'] = cache
|
||||
ansible_variables = {
|
||||
'use_hostnames': True,
|
||||
'expand_hostvars': False,
|
||||
'fail_on_errors': True,
|
||||
}
|
||||
provided_count = 0
|
||||
for var_name in ansible_variables:
|
||||
if var_name in inventory_update.source_vars_dict:
|
||||
ansible_variables[var_name] = inventory_update.source_vars_dict[var_name]
|
||||
provided_count += 1
|
||||
if provided_count:
|
||||
# Must we provide all 3 because the user provides any 1 of these??
|
||||
# this probably results in some incorrect mangling of the defaults
|
||||
openstack_data['ansible'] = ansible_variables
|
||||
return openstack_data
|
||||
|
||||
def build_script_private_data(self, inventory_update, private_data_dir):
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
private_data = {'credentials': {}}
|
||||
|
||||
openstack_data = self._get_clouds_dict(inventory_update, credential, private_data_dir)
|
||||
private_data['credentials'][credential] = yaml.safe_dump(
|
||||
openstack_data, default_flow_style=False, allow_unicode=True
|
||||
)
|
||||
return private_data
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
|
||||
openstack_data = self._get_clouds_dict(inventory_update, credential, private_data_dir, mk_cache=False)
|
||||
handle, path = tempfile.mkstemp(dir=private_data_dir)
|
||||
f = os.fdopen(handle, 'w')
|
||||
yaml.dump(openstack_data, f, default_flow_style=False)
|
||||
f.close()
|
||||
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
|
||||
|
||||
def use_host_name_for_name(a_bool_maybe):
|
||||
if not isinstance(a_bool_maybe, bool):
|
||||
# Could be specified by user via "host" or "uuid"
|
||||
return a_bool_maybe
|
||||
elif a_bool_maybe:
|
||||
return 'name' # plugin default
|
||||
else:
|
||||
return 'uuid'
|
||||
|
||||
ret = dict(
|
||||
plugin=self.plugin_name,
|
||||
fail_on_errors=True,
|
||||
expand_hostvars=True,
|
||||
inventory_hostname=use_host_name_for_name(False),
|
||||
clouds_yaml_path=[path] # why a list? it just is
|
||||
)
|
||||
# Note: mucking with defaults will break import integrity
|
||||
# For the plugin, we need to use the same defaults as the old script
|
||||
# or else imports will conflict. To find script defaults you have
|
||||
# to read source code of the script.
|
||||
#
|
||||
# Script Defaults Plugin Defaults
|
||||
# 'use_hostnames': False, 'name' (True)
|
||||
# 'expand_hostvars': True, 'no' (False)
|
||||
# 'fail_on_errors': True, 'no' (False)
|
||||
#
|
||||
# These are, yet again, different from ansible_variables in script logic
|
||||
# but those are applied inconsistently
|
||||
source_vars = inventory_update.source_vars_dict
|
||||
for var_name in ['expand_hostvars', 'fail_on_errors']:
|
||||
if var_name in source_vars:
|
||||
ret[var_name] = source_vars[var_name]
|
||||
if 'use_hostnames' in source_vars:
|
||||
ret['inventory_hostname'] = use_host_name_for_name(source_vars['use_hostnames'])
|
||||
return ret
|
||||
|
||||
|
||||
class rhv(PluginFileInjector):
|
||||
"""ovirt uses the custom credential templating, and that is all
|
||||
"""
|
||||
# plugin_name = 'FIXME' # contribute inventory plugin to Ansible
|
||||
base_injector = 'template'
|
||||
|
||||
@property
|
||||
def script_name(self):
|
||||
return 'ovirt4.py' # exception
|
||||
|
||||
|
||||
class satellite6(PluginFileInjector):
|
||||
plugin_name = 'foreman'
|
||||
ini_env_reference = 'FOREMAN_INI_PATH'
|
||||
# initial_version = '2.8' # FIXME: turn on after plugin is validated
|
||||
# No base injector, because this does not work in playbooks. Bug??
|
||||
|
||||
@property
|
||||
def script_name(self):
|
||||
return 'foreman.py' # exception
|
||||
|
||||
def build_script_private_data(self, inventory_update, private_data_dir):
|
||||
cp = configparser.RawConfigParser()
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
|
||||
section = 'foreman'
|
||||
cp.add_section(section)
|
||||
|
||||
group_patterns = '[]'
|
||||
group_prefix = 'foreman_'
|
||||
want_hostcollections = 'False'
|
||||
foreman_opts = dict(inventory_update.source_vars_dict.items())
|
||||
foreman_opts.setdefault('ssl_verify', 'False')
|
||||
for k, v in foreman_opts.items():
|
||||
if k == 'satellite6_group_patterns' and isinstance(v, str):
|
||||
group_patterns = v
|
||||
elif k == 'satellite6_group_prefix' and isinstance(v, str):
|
||||
group_prefix = v
|
||||
elif k == 'satellite6_want_hostcollections' and isinstance(v, bool):
|
||||
want_hostcollections = v
|
||||
else:
|
||||
cp.set(section, k, str(v))
|
||||
|
||||
if credential:
|
||||
cp.set(section, 'url', credential.get_input('host', default=''))
|
||||
cp.set(section, 'user', credential.get_input('username', default=''))
|
||||
cp.set(section, 'password', credential.get_input('password', default=''))
|
||||
|
||||
section = 'ansible'
|
||||
cp.add_section(section)
|
||||
cp.set(section, 'group_patterns', group_patterns)
|
||||
cp.set(section, 'want_facts', 'True')
|
||||
cp.set(section, 'want_hostcollections', str(want_hostcollections))
|
||||
cp.set(section, 'group_prefix', group_prefix)
|
||||
|
||||
section = 'cache'
|
||||
cp.add_section(section)
|
||||
cp.set(section, 'path', '/tmp')
|
||||
cp.set(section, 'max_age', '0')
|
||||
|
||||
return self.dump_cp(cp, credential)
|
||||
|
||||
def get_plugin_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
# this assumes that this is merged
|
||||
# https://github.com/ansible/ansible/pull/52693
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
ret = {}
|
||||
if credential:
|
||||
ret['FOREMAN_SERVER'] = credential.get_input('host', default='')
|
||||
ret['FOREMAN_USER'] = credential.get_input('username', default='')
|
||||
ret['FOREMAN_PASSWORD'] = credential.get_input('password', default='')
|
||||
return ret
|
||||
|
||||
|
||||
class cloudforms(PluginFileInjector):
|
||||
# plugin_name = 'FIXME' # contribute inventory plugin to Ansible
|
||||
ini_env_reference = 'CLOUDFORMS_INI_PATH'
|
||||
# Also no base_injector because this does not work in playbooks
|
||||
|
||||
def build_script_private_data(self, inventory_update, private_data_dir):
|
||||
cp = configparser.RawConfigParser()
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
|
||||
section = 'cloudforms'
|
||||
cp.add_section(section)
|
||||
|
||||
if credential:
|
||||
cp.set(section, 'url', credential.get_input('host', default=''))
|
||||
cp.set(section, 'username', credential.get_input('username', default=''))
|
||||
cp.set(section, 'password', credential.get_input('password', default=''))
|
||||
cp.set(section, 'ssl_verify', "false")
|
||||
|
||||
cloudforms_opts = dict(inventory_update.source_vars_dict.items())
|
||||
for opt in ['version', 'purge_actions', 'clean_group_keys', 'nest_tags', 'suffix', 'prefer_ipv4']:
|
||||
if opt in cloudforms_opts:
|
||||
cp.set(section, opt, str(cloudforms_opts[opt]))
|
||||
|
||||
section = 'cache'
|
||||
cp.add_section(section)
|
||||
cp.set(section, 'max_age', "0")
|
||||
cache_path = tempfile.mkdtemp(
|
||||
prefix='cloudforms_cache',
|
||||
dir=private_data_dir
|
||||
)
|
||||
cp.set(section, 'path', cache_path)
|
||||
|
||||
return self.dump_cp(cp, credential)
|
||||
|
||||
|
||||
class tower(PluginFileInjector):
|
||||
plugin_name = 'tower'
|
||||
base_injector = 'template'
|
||||
initial_version = '2.8' # Driven by "include_metadata" hostvars
|
||||
|
||||
def get_script_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
env = super(tower, self).get_script_env(inventory_update, private_data_dir, private_data_files)
|
||||
env['TOWER_INVENTORY'] = inventory_update.instance_filters
|
||||
env['TOWER_LICENSE_TYPE'] = get_licenser().validate().get('license_type', 'unlicensed')
|
||||
return env
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
# Credentials injected as env vars, same as script
|
||||
try:
|
||||
# plugin can take an actual int type
|
||||
identifier = int(inventory_update.instance_filters)
|
||||
except ValueError:
|
||||
# inventory_id could be a named URL
|
||||
identifier = iri_to_uri(inventory_update.instance_filters)
|
||||
return {
|
||||
'plugin': self.plugin_name,
|
||||
'inventory_id': identifier,
|
||||
'include_metadata': True # used for license check
|
||||
}
|
||||
|
||||
|
||||
for cls in PluginFileInjector.__subclasses__():
|
||||
InventorySourceOptions.injectors[cls.__name__] = cls
|
||||
|
@ -3,7 +3,6 @@
|
||||
|
||||
# Python
|
||||
from collections import OrderedDict, namedtuple
|
||||
import configparser
|
||||
import errno
|
||||
import fnmatch
|
||||
import functools
|
||||
@ -24,7 +23,6 @@ try:
|
||||
import psutil
|
||||
except Exception:
|
||||
psutil = None
|
||||
from io import StringIO
|
||||
import urllib.parse as urlparse
|
||||
|
||||
# Django
|
||||
@ -52,7 +50,7 @@ from awx.main.access import access_registry
|
||||
from awx.main.models import (
|
||||
Schedule, TowerScheduleState, Instance, InstanceGroup,
|
||||
UnifiedJob, Notification,
|
||||
Inventory, SmartInventoryMembership,
|
||||
Inventory, InventorySource, SmartInventoryMembership,
|
||||
Job, AdHocCommand, ProjectUpdate, InventoryUpdate, SystemJob,
|
||||
JobEvent, ProjectUpdateEvent, InventoryUpdateEvent, AdHocCommandEvent, SystemJobEvent,
|
||||
build_safe_env
|
||||
@ -67,6 +65,7 @@ from awx.main.utils import (get_ssh_version, update_scm_url,
|
||||
get_licenser,
|
||||
ignore_inventory_computed_fields,
|
||||
ignore_inventory_group_removal, extract_ansible_vars, schedule_task_manager)
|
||||
from awx.main.utils.common import _get_ansible_version
|
||||
from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja
|
||||
from awx.main.utils.reload import stop_local_services
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
@ -713,12 +712,25 @@ class BaseTask(object):
|
||||
logger.error('Failed to update %s after %d retries.',
|
||||
self.model._meta.object_name, _attempt)
|
||||
|
||||
def get_ansible_version(self, instance):
|
||||
if not hasattr(self, '_ansible_version'):
|
||||
self._ansible_version = _get_ansible_version(
|
||||
ansible_path=self.get_path_to_ansible(instance, executable='ansible'))
|
||||
return self._ansible_version
|
||||
|
||||
def get_path_to(self, *args):
|
||||
'''
|
||||
Return absolute path relative to this file.
|
||||
'''
|
||||
return os.path.abspath(os.path.join(os.path.dirname(__file__), *args))
|
||||
|
||||
def get_path_to_ansible(self, instance, executable='ansible-playbook', **kwargs):
|
||||
venv_path = getattr(instance, 'ansible_virtualenv_path', settings.ANSIBLE_VENV_PATH)
|
||||
venv_exe = os.path.join(venv_path, 'bin', executable)
|
||||
if os.path.exists(venv_exe):
|
||||
return venv_exe
|
||||
return shutil.which(executable)
|
||||
|
||||
def build_private_data(self, instance, private_data_dir):
|
||||
'''
|
||||
Return SSH private key data (only if stored in DB as ssh_key_data).
|
||||
@ -1925,191 +1937,9 @@ class RunInventoryUpdate(BaseTask):
|
||||
|
||||
If no private data is needed, return None.
|
||||
"""
|
||||
private_data = {'credentials': {}}
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
|
||||
if inventory_update.source == 'openstack':
|
||||
openstack_auth = dict(auth_url=credential.get_input('host', default=''),
|
||||
username=credential.get_input('username', default=''),
|
||||
password=credential.get_input('password', default=''),
|
||||
project_name=credential.get_input('project', default=''))
|
||||
if credential.has_input('domain'):
|
||||
openstack_auth['domain_name'] = credential.get_input('domain', default='')
|
||||
|
||||
private_state = inventory_update.source_vars_dict.get('private', True)
|
||||
verify_state = credential.get_input('verify_ssl', default=True)
|
||||
# Retrieve cache path from inventory update vars if available,
|
||||
# otherwise create a temporary cache path only for this update.
|
||||
cache = inventory_update.source_vars_dict.get('cache', {})
|
||||
if not isinstance(cache, dict):
|
||||
cache = {}
|
||||
if not cache.get('path', ''):
|
||||
cache_path = tempfile.mkdtemp(prefix='openstack_cache', dir=private_data_dir)
|
||||
cache['path'] = cache_path
|
||||
openstack_data = {
|
||||
'clouds': {
|
||||
'devstack': {
|
||||
'private': private_state,
|
||||
'verify': verify_state,
|
||||
'auth': openstack_auth,
|
||||
},
|
||||
},
|
||||
'cache': cache,
|
||||
}
|
||||
ansible_variables = {
|
||||
'use_hostnames': True,
|
||||
'expand_hostvars': False,
|
||||
'fail_on_errors': True,
|
||||
}
|
||||
provided_count = 0
|
||||
for var_name in ansible_variables:
|
||||
if var_name in inventory_update.source_vars_dict:
|
||||
ansible_variables[var_name] = inventory_update.source_vars_dict[var_name]
|
||||
provided_count += 1
|
||||
if provided_count:
|
||||
openstack_data['ansible'] = ansible_variables
|
||||
private_data['credentials'][credential] = yaml.safe_dump(
|
||||
openstack_data, default_flow_style=False, allow_unicode=True
|
||||
)
|
||||
return private_data
|
||||
|
||||
cp = configparser.RawConfigParser()
|
||||
# Build custom ec2.ini for ec2 inventory script to use.
|
||||
if inventory_update.source == 'ec2':
|
||||
section = 'ec2'
|
||||
cp.add_section(section)
|
||||
ec2_opts = dict(inventory_update.source_vars_dict.items())
|
||||
regions = inventory_update.source_regions or 'all'
|
||||
regions = ','.join([x.strip() for x in regions.split(',')])
|
||||
regions_blacklist = ','.join(settings.EC2_REGIONS_BLACKLIST)
|
||||
ec2_opts['regions'] = regions
|
||||
ec2_opts.setdefault('regions_exclude', regions_blacklist)
|
||||
ec2_opts.setdefault('destination_variable', 'public_dns_name')
|
||||
ec2_opts.setdefault('vpc_destination_variable', 'ip_address')
|
||||
ec2_opts.setdefault('route53', 'False')
|
||||
ec2_opts.setdefault('all_instances', 'True')
|
||||
ec2_opts.setdefault('all_rds_instances', 'False')
|
||||
ec2_opts.setdefault('include_rds_clusters', 'False')
|
||||
ec2_opts.setdefault('rds', 'False')
|
||||
ec2_opts.setdefault('nested_groups', 'True')
|
||||
ec2_opts.setdefault('elasticache', 'False')
|
||||
ec2_opts.setdefault('stack_filters', 'False')
|
||||
if inventory_update.instance_filters:
|
||||
ec2_opts.setdefault('instance_filters', inventory_update.instance_filters)
|
||||
group_by = [x.strip().lower() for x in inventory_update.group_by.split(',') if x.strip()]
|
||||
for choice in inventory_update.get_ec2_group_by_choices():
|
||||
value = bool((group_by and choice[0] in group_by) or (not group_by and choice[0] != 'instance_id'))
|
||||
ec2_opts.setdefault('group_by_%s' % choice[0], str(value))
|
||||
if 'cache_path' not in ec2_opts:
|
||||
cache_path = tempfile.mkdtemp(prefix='ec2_cache', dir=private_data_dir)
|
||||
ec2_opts['cache_path'] = cache_path
|
||||
ec2_opts.setdefault('cache_max_age', '300')
|
||||
for k, v in ec2_opts.items():
|
||||
cp.set(section, k, str(v))
|
||||
# Allow custom options to vmware inventory script.
|
||||
elif inventory_update.source == 'vmware':
|
||||
|
||||
section = 'vmware'
|
||||
cp.add_section(section)
|
||||
cp.set('vmware', 'cache_max_age', '0')
|
||||
cp.set('vmware', 'validate_certs', str(settings.VMWARE_VALIDATE_CERTS))
|
||||
cp.set('vmware', 'username', credential.get_input('username', default=''))
|
||||
cp.set('vmware', 'password', credential.get_input('password', default=''))
|
||||
cp.set('vmware', 'server', credential.get_input('host', default=''))
|
||||
|
||||
vmware_opts = dict(inventory_update.source_vars_dict.items())
|
||||
if inventory_update.instance_filters:
|
||||
vmware_opts.setdefault('host_filters', inventory_update.instance_filters)
|
||||
if inventory_update.group_by:
|
||||
vmware_opts.setdefault('groupby_patterns', inventory_update.group_by)
|
||||
|
||||
for k, v in vmware_opts.items():
|
||||
cp.set(section, k, str(v))
|
||||
|
||||
elif inventory_update.source == 'satellite6':
|
||||
section = 'foreman'
|
||||
cp.add_section(section)
|
||||
|
||||
group_patterns = '[]'
|
||||
group_prefix = 'foreman_'
|
||||
want_hostcollections = 'False'
|
||||
foreman_opts = dict(inventory_update.source_vars_dict.items())
|
||||
foreman_opts.setdefault('ssl_verify', 'False')
|
||||
for k, v in foreman_opts.items():
|
||||
if k == 'satellite6_group_patterns' and isinstance(v, str):
|
||||
group_patterns = v
|
||||
elif k == 'satellite6_group_prefix' and isinstance(v, str):
|
||||
group_prefix = v
|
||||
elif k == 'satellite6_want_hostcollections' and isinstance(v, bool):
|
||||
want_hostcollections = v
|
||||
else:
|
||||
cp.set(section, k, str(v))
|
||||
|
||||
if credential:
|
||||
cp.set(section, 'url', credential.get_input('host', default=''))
|
||||
cp.set(section, 'user', credential.get_input('username', default=''))
|
||||
cp.set(section, 'password', credential.get_input('password', default=''))
|
||||
|
||||
section = 'ansible'
|
||||
cp.add_section(section)
|
||||
cp.set(section, 'group_patterns', group_patterns)
|
||||
cp.set(section, 'want_facts', 'True')
|
||||
cp.set(section, 'want_hostcollections', str(want_hostcollections))
|
||||
cp.set(section, 'group_prefix', group_prefix)
|
||||
|
||||
section = 'cache'
|
||||
cp.add_section(section)
|
||||
cp.set(section, 'path', '/tmp')
|
||||
cp.set(section, 'max_age', '0')
|
||||
|
||||
elif inventory_update.source == 'cloudforms':
|
||||
section = 'cloudforms'
|
||||
cp.add_section(section)
|
||||
|
||||
if credential:
|
||||
cp.set(section, 'url', credential.get_input('host', default=''))
|
||||
cp.set(section, 'username', credential.get_input('username', default=''))
|
||||
cp.set(section, 'password', credential.get_input('password', default=''))
|
||||
cp.set(section, 'ssl_verify', "false")
|
||||
|
||||
cloudforms_opts = dict(inventory_update.source_vars_dict.items())
|
||||
for opt in ['version', 'purge_actions', 'clean_group_keys', 'nest_tags', 'suffix', 'prefer_ipv4']:
|
||||
if opt in cloudforms_opts:
|
||||
cp.set(section, opt, str(cloudforms_opts[opt]))
|
||||
|
||||
section = 'cache'
|
||||
cp.add_section(section)
|
||||
cp.set(section, 'max_age', "0")
|
||||
cache_path = tempfile.mkdtemp(
|
||||
prefix='cloudforms_cache',
|
||||
dir=private_data_dir
|
||||
)
|
||||
cp.set(section, 'path', cache_path)
|
||||
|
||||
elif inventory_update.source == 'azure_rm':
|
||||
section = 'azure'
|
||||
cp.add_section(section)
|
||||
cp.set(section, 'include_powerstate', 'yes')
|
||||
cp.set(section, 'group_by_resource_group', 'yes')
|
||||
cp.set(section, 'group_by_location', 'yes')
|
||||
cp.set(section, 'group_by_tag', 'yes')
|
||||
|
||||
if inventory_update.source_regions and 'all' not in inventory_update.source_regions:
|
||||
cp.set(
|
||||
section, 'locations',
|
||||
','.join([x.strip() for x in inventory_update.source_regions.split(',')])
|
||||
)
|
||||
|
||||
azure_rm_opts = dict(inventory_update.source_vars_dict.items())
|
||||
for k, v in azure_rm_opts.items():
|
||||
cp.set(section, k, str(v))
|
||||
|
||||
# Return INI content.
|
||||
if cp.sections():
|
||||
f = StringIO()
|
||||
cp.write(f)
|
||||
private_data['credentials'][credential] = f.getvalue()
|
||||
return private_data
|
||||
if inventory_update.source in InventorySource.injectors:
|
||||
injector = InventorySource.injectors[inventory_update.source](self.get_ansible_version(inventory_update))
|
||||
return injector.build_private_data(inventory_update, private_data_dir)
|
||||
|
||||
def build_passwords(self, inventory_update, runtime_passwords):
|
||||
"""Build a dictionary of authentication/credential information for
|
||||
@ -2134,9 +1964,13 @@ class RunInventoryUpdate(BaseTask):
|
||||
def build_env(self, inventory_update, private_data_dir, isolated, private_data_files=None):
|
||||
"""Build environment dictionary for inventory import.
|
||||
|
||||
This is the mechanism by which any data that needs to be passed
|
||||
This used to be the mechanism by which any data that needs to be passed
|
||||
to the inventory update script is set up. In particular, this is how
|
||||
inventory update is aware of its proper credentials.
|
||||
|
||||
Most environment injection is now accomplished by the credential
|
||||
injectors. The primary purpose this still serves is to
|
||||
still point to the inventory update INI or config file.
|
||||
"""
|
||||
env = super(RunInventoryUpdate, self).build_env(inventory_update,
|
||||
private_data_dir,
|
||||
@ -2149,52 +1983,25 @@ class RunInventoryUpdate(BaseTask):
|
||||
env['INVENTORY_SOURCE_ID'] = str(inventory_update.inventory_source_id)
|
||||
env['INVENTORY_UPDATE_ID'] = str(inventory_update.pk)
|
||||
env.update(STANDARD_INVENTORY_UPDATE_ENV)
|
||||
plugin_name = inventory_update.get_inventory_plugin_name()
|
||||
if plugin_name is not None:
|
||||
env['ANSIBLE_INVENTORY_ENABLED'] = plugin_name
|
||||
|
||||
# Set environment variables specific to each source.
|
||||
#
|
||||
# These are set here and then read in by the various Ansible inventory
|
||||
# modules, which will actually do the inventory sync.
|
||||
#
|
||||
# The inventory modules are vendored in AWX in the
|
||||
# `awx/plugins/inventory` directory; those files should be kept in
|
||||
# sync with those in Ansible core at all times.
|
||||
injector = None
|
||||
if inventory_update.source in InventorySource.injectors:
|
||||
injector = InventorySource.injectors[inventory_update.source](self.get_ansible_version(inventory_update))
|
||||
|
||||
ini_mapping = {
|
||||
'ec2': 'EC2_INI_PATH',
|
||||
'vmware': 'VMWARE_INI_PATH',
|
||||
'azure_rm': 'AZURE_INI_PATH',
|
||||
'openstack': 'OS_CLIENT_CONFIG_FILE',
|
||||
'satellite6': 'FOREMAN_INI_PATH',
|
||||
'cloudforms': 'CLOUDFORMS_INI_PATH'
|
||||
}
|
||||
if inventory_update.source in ini_mapping:
|
||||
cred_data = private_data_files.get('credentials', {})
|
||||
env[ini_mapping[inventory_update.source]] = cred_data.get(
|
||||
inventory_update.get_cloud_credential(), ''
|
||||
)
|
||||
if injector is not None:
|
||||
env = injector.build_env(inventory_update, env, private_data_dir, private_data_files)
|
||||
# All CLOUD_PROVIDERS sources implement as either script or auto plugin
|
||||
if injector.should_use_plugin():
|
||||
env['ANSIBLE_INVENTORY_ENABLED'] = 'auto'
|
||||
else:
|
||||
env['ANSIBLE_INVENTORY_ENABLED'] = 'script'
|
||||
|
||||
if inventory_update.source == 'gce':
|
||||
env['GCE_ZONE'] = inventory_update.source_regions if inventory_update.source_regions != 'all' else '' # noqa
|
||||
# TODO: option for Automatic transformation of group names, ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS
|
||||
|
||||
# by default, the GCE inventory source caches results on disk for
|
||||
# 5 minutes; disable this behavior
|
||||
cp = configparser.ConfigParser()
|
||||
cp.add_section('cache')
|
||||
cp.set('cache', 'cache_max_age', '0')
|
||||
handle, path = tempfile.mkstemp(dir=private_data_dir)
|
||||
cp.write(os.fdopen(handle, 'w'))
|
||||
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
|
||||
env['GCE_INI_PATH'] = path
|
||||
elif inventory_update.source in ['scm', 'custom']:
|
||||
if inventory_update.source in ['scm', 'custom']:
|
||||
for env_k in inventory_update.source_vars_dict:
|
||||
if str(env_k) not in env and str(env_k) not in settings.INV_ENV_VARIABLE_BLACKLIST:
|
||||
env[str(env_k)] = str(inventory_update.source_vars_dict[env_k])
|
||||
elif inventory_update.source == 'tower':
|
||||
env['TOWER_INVENTORY'] = inventory_update.instance_filters
|
||||
env['TOWER_LICENSE_TYPE'] = get_licenser().validate()['license_type']
|
||||
elif inventory_update.source == 'file':
|
||||
raise NotImplementedError('Cannot update file sources through the task system.')
|
||||
return env
|
||||
@ -2259,40 +2066,78 @@ class RunInventoryUpdate(BaseTask):
|
||||
getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper()),])
|
||||
# Add arguments for the source inventory script
|
||||
args.append('--source')
|
||||
if src in CLOUD_PROVIDERS:
|
||||
# Get the path to the inventory plugin, and append it to our
|
||||
# arguments.
|
||||
plugin_path = self.get_path_to('..', 'plugins', 'inventory',
|
||||
'%s.py' % src)
|
||||
args.append(plugin_path)
|
||||
elif src == 'scm':
|
||||
args.append(inventory_update.get_actual_source_path())
|
||||
elif src == 'custom':
|
||||
handle, path = tempfile.mkstemp(dir=private_data_dir)
|
||||
f = os.fdopen(handle, 'w')
|
||||
if inventory_update.source_script is None:
|
||||
raise RuntimeError('Inventory Script does not exist')
|
||||
f.write(inventory_update.source_script.script)
|
||||
f.close()
|
||||
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
||||
args.append(path)
|
||||
args.append(self.psuedo_build_inventory(inventory_update, private_data_dir))
|
||||
if src == 'custom':
|
||||
args.append("--custom")
|
||||
args.append('-v%d' % inventory_update.verbosity)
|
||||
if settings.DEBUG:
|
||||
args.append('--traceback')
|
||||
return args
|
||||
|
||||
def build_inventory(self, inventory_update, private_data_dir):
|
||||
return None # what runner expects in order to not deal with inventory
|
||||
|
||||
def psuedo_build_inventory(self, inventory_update, private_data_dir):
|
||||
"""Inventory imports are ran through a management command
|
||||
we pass the inventory in args to that command, so this is not considered
|
||||
to be "Ansible" inventory (by runner) even though it is
|
||||
Eventually, we would like to cut out the management command,
|
||||
and thus use this as the real inventory
|
||||
"""
|
||||
src = inventory_update.source
|
||||
|
||||
injector = None
|
||||
if inventory_update.source in InventorySource.injectors:
|
||||
injector = InventorySource.injectors[src](self.get_ansible_version(inventory_update))
|
||||
|
||||
if injector is not None:
|
||||
if injector.should_use_plugin():
|
||||
content = injector.inventory_contents(inventory_update, private_data_dir)
|
||||
# must be a statically named file
|
||||
inventory_path = os.path.join(private_data_dir, injector.filename)
|
||||
with open(inventory_path, 'w') as f:
|
||||
f.write(content)
|
||||
os.chmod(inventory_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
||||
else:
|
||||
# Use the vendored script path
|
||||
inventory_path = self.get_path_to('..', 'plugins', 'inventory', injector.script_name)
|
||||
elif src == 'scm':
|
||||
inventory_path = inventory_update.get_actual_source_path()
|
||||
elif src == 'custom':
|
||||
handle, inventory_path = tempfile.mkstemp(dir=private_data_dir)
|
||||
f = os.fdopen(handle, 'w')
|
||||
if inventory_update.source_script is None:
|
||||
raise RuntimeError('Inventory Script does not exist')
|
||||
f.write(inventory_update.source_script.script)
|
||||
f.close()
|
||||
os.chmod(inventory_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
||||
return inventory_path
|
||||
|
||||
def build_cwd(self, inventory_update, private_data_dir):
|
||||
if inventory_update.source == 'scm' and inventory_update.source_project_update:
|
||||
'''
|
||||
There are two cases where the inventory "source" is in a different
|
||||
location from the private data:
|
||||
- deprecated vendored inventory scripts in awx/plugins/inventory
|
||||
- SCM, where source needs to live in the project folder
|
||||
in these cases, the inventory does not exist in the standard tempdir
|
||||
'''
|
||||
src = inventory_update.source
|
||||
if src == 'scm' and inventory_update.source_project_update:
|
||||
return inventory_update.source_project_update.get_project_path(check_if_exists=False)
|
||||
return self.get_path_to('..', 'plugins', 'inventory')
|
||||
if src in CLOUD_PROVIDERS:
|
||||
injector = None
|
||||
if src in InventorySource.injectors:
|
||||
injector = InventorySource.injectors[src](self.get_ansible_version(inventory_update))
|
||||
if (not injector) or (not injector.should_use_plugin()):
|
||||
return self.get_path_to('..', 'plugins', 'inventory')
|
||||
return private_data_dir
|
||||
|
||||
def build_playbook_path_relative_to_cwd(self, inventory_update, private_data_dir):
|
||||
return None
|
||||
|
||||
def build_credentials_list(self, inventory_update):
|
||||
# TODO: allow multiple custom creds for inv updates
|
||||
return [inventory_update.get_cloud_credential()]
|
||||
# All credentials not used by inventory source injector
|
||||
return inventory_update.get_extra_credentials()
|
||||
|
||||
def get_idle_timeout(self):
|
||||
return getattr(settings, 'INVENTORY_UPDATE_IDLE_TIMEOUT', None)
|
||||
|
8
awx/main/tests/data/inventory/plugins/azure_rm/env.json
Normal file
8
awx/main/tests/data/inventory/plugins/azure_rm/env.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"AZURE_SUBSCRIPTION_ID": "fooo",
|
||||
"AZURE_CLIENT_ID": "fooo",
|
||||
"AZURE_TENANT": "fooo",
|
||||
"AZURE_SECRET": "fooo",
|
||||
"AZURE_CLOUD_ENVIRONMENT": "fooo",
|
||||
"ANSIBLE_JINJA2_NATIVE": "True"
|
||||
}
|
@ -0,0 +1,35 @@
|
||||
conditional_groups:
|
||||
azure: true
|
||||
default_host_filters: []
|
||||
exclude_host_filters:
|
||||
- resource_group not in ['foo_resources', 'bar_resources']
|
||||
- location not in ['southcentralus', 'westus']
|
||||
hostvar_expressions:
|
||||
ansible_host: private_ipv4_addresses[0]
|
||||
computer_name: name
|
||||
private_ip: private_ipv4_addresses[0]
|
||||
provisioning_state: provisioning_state | title
|
||||
public_ip: public_ipv4_addresses[0]
|
||||
tags: tags if tags else None
|
||||
type: resource_type
|
||||
keyed_groups:
|
||||
- key: location
|
||||
prefix: ''
|
||||
separator: ''
|
||||
- key: tags.keys() | list if tags else []
|
||||
prefix: ''
|
||||
separator: ''
|
||||
- key: security_group
|
||||
prefix: ''
|
||||
separator: ''
|
||||
- key: resource_group
|
||||
prefix: ''
|
||||
separator: ''
|
||||
- key: os_disk.operating_system_type
|
||||
prefix: ''
|
||||
separator: ''
|
||||
- key: dict(tags.keys() | map("regex_replace", "^(.*)$", "\1_") | list | zip(tags.values() | list)) if tags else []
|
||||
prefix: ''
|
||||
separator: ''
|
||||
plugin: azure_rm
|
||||
use_contrib_script_compatible_sanitization: true
|
6
awx/main/tests/data/inventory/plugins/ec2/env.json
Normal file
6
awx/main/tests/data/inventory/plugins/ec2/env.json
Normal file
@ -0,0 +1,6 @@
|
||||
{
|
||||
"AWS_ACCESS_KEY_ID": "fooo",
|
||||
"AWS_SECRET_ACCESS_KEY": "fooo",
|
||||
"AWS_SECURITY_TOKEN": "fooo",
|
||||
"ANSIBLE_JINJA2_NATIVE": "True"
|
||||
}
|
82
awx/main/tests/data/inventory/plugins/ec2/files/aws_ec2.yml
Normal file
82
awx/main/tests/data/inventory/plugins/ec2/files/aws_ec2.yml
Normal file
@ -0,0 +1,82 @@
|
||||
boto_profile: /tmp/my_boto_stuff
|
||||
compose:
|
||||
ansible_host: public_ip_address
|
||||
ec2_account_id: owner_id
|
||||
ec2_ami_launch_index: ami_launch_index | string
|
||||
ec2_architecture: architecture
|
||||
ec2_block_devices: dict(block_device_mappings | map(attribute='device_name') | list | zip(block_device_mappings | map(attribute='ebs.volume_id') | list))
|
||||
ec2_client_token: client_token
|
||||
ec2_dns_name: public_dns_name
|
||||
ec2_ebs_optimized: ebs_optimized
|
||||
ec2_eventsSet: events | default("")
|
||||
ec2_group_name: placement.group_name
|
||||
ec2_hypervisor: hypervisor
|
||||
ec2_id: instance_id
|
||||
ec2_image_id: image_id
|
||||
ec2_instance_profile: iam_instance_profile | default("")
|
||||
ec2_instance_type: instance_type
|
||||
ec2_ip_address: public_ip_address
|
||||
ec2_kernel: kernel_id | default("")
|
||||
ec2_key_name: key_name
|
||||
ec2_launch_time: launch_time | regex_replace(" ", "T") | regex_replace("(\+)(\d\d):(\d)(\d)$", ".\g<2>\g<3>Z")
|
||||
ec2_monitored: monitoring.state in ['enabled', 'pending']
|
||||
ec2_monitoring_state: monitoring.state
|
||||
ec2_persistent: persistent | default(false)
|
||||
ec2_placement: placement.availability_zone
|
||||
ec2_platform: platform | default("")
|
||||
ec2_private_dns_name: private_dns_name
|
||||
ec2_private_ip_address: private_ip_address
|
||||
ec2_public_dns_name: public_dns_name
|
||||
ec2_ramdisk: ramdisk_id | default("")
|
||||
ec2_reason: state_transition_reason
|
||||
ec2_region: placement.region
|
||||
ec2_requester_id: requester_id | default("")
|
||||
ec2_root_device_name: root_device_name
|
||||
ec2_root_device_type: root_device_type
|
||||
ec2_security_group_ids: security_groups | map(attribute='group_id') | list | join(',')
|
||||
ec2_security_group_names: security_groups | map(attribute='group_name') | list | join(',')
|
||||
ec2_sourceDestCheck: source_dest_check | default(false) | lower | string
|
||||
ec2_spot_instance_request_id: spot_instance_request_id | default("")
|
||||
ec2_state: state.name
|
||||
ec2_state_code: state.code
|
||||
ec2_state_reason: state_reason.message if state_reason is defined else ""
|
||||
ec2_subnet_id: subnet_id | default("")
|
||||
ec2_tag_Name: tags.Name
|
||||
ec2_virtualization_type: virtualization_type
|
||||
ec2_vpc_id: vpc_id | default("")
|
||||
filters:
|
||||
instance-state-name:
|
||||
- running
|
||||
groups:
|
||||
ec2: true
|
||||
hostnames:
|
||||
- network-interface.addresses.association.public-ip
|
||||
- dns-name
|
||||
- private-dns-name
|
||||
keyed_groups:
|
||||
- key: placement.availability_zone
|
||||
parent_group: zones
|
||||
prefix: ''
|
||||
separator: ''
|
||||
- key: instance_type | regex_replace("[^A-Za-z0-9\_]", "_")
|
||||
parent_group: types
|
||||
prefix: type
|
||||
- key: placement.region
|
||||
parent_group: regions
|
||||
prefix: ''
|
||||
separator: ''
|
||||
- key: dict(tags.keys() | map("regex_replace", "[^A-Za-z0-9\_]", "_") | list | zip(tags.values() | map("regex_replace", "[^A-Za-z0-9\_]", "_") | list))
|
||||
parent_group: tags
|
||||
prefix: tag
|
||||
- key: tags.keys() | map("regex_replace", "[^A-Za-z0-9\_]", "_") | list
|
||||
parent_group: tags
|
||||
prefix: tag
|
||||
- key: placement.availability_zone
|
||||
parent_group: '{{ placement.region }}'
|
||||
prefix: ''
|
||||
separator: ''
|
||||
plugin: aws_ec2
|
||||
regions:
|
||||
- us-east-2
|
||||
- ap-south-1
|
||||
use_contrib_script_compatible_sanitization: true
|
@ -0,0 +1,7 @@
|
||||
{
|
||||
"type": "service_account",
|
||||
"private_key": "{{private_key}}",
|
||||
"client_email": "fooo",
|
||||
"project_id": "fooo",
|
||||
"token_uri": "https://accounts.google.com/o/oauth2/token"
|
||||
}
|
@ -0,0 +1,46 @@
|
||||
auth_kind: serviceaccount
|
||||
compose:
|
||||
ansible_ssh_host: networkInterfaces[0].accessConfigs[0].natIP
|
||||
gce_description: description if description else None
|
||||
gce_id: id
|
||||
gce_machine_type: machineType
|
||||
gce_metadata: metadata.get("items", []) | items2dict(key_name="key", value_name="value")
|
||||
gce_name: name
|
||||
gce_network: networkInterfaces[0].network.name
|
||||
gce_private_ip: networkInterfaces[0].networkIP
|
||||
gce_public_ip: networkInterfaces[0].accessConfigs[0].natIP
|
||||
gce_status: status
|
||||
gce_subnetwork: networkInterfaces[0].subnetwork.name
|
||||
gce_tags: tags.get("items", [])
|
||||
gce_zone: zone
|
||||
hostnames:
|
||||
- name
|
||||
- public_ip
|
||||
- private_ip
|
||||
keyed_groups:
|
||||
- key: gce_subnetwork
|
||||
prefix: network
|
||||
- key: gce_private_ip
|
||||
prefix: ''
|
||||
separator: ''
|
||||
- key: gce_public_ip
|
||||
prefix: ''
|
||||
separator: ''
|
||||
- key: machineType
|
||||
prefix: ''
|
||||
separator: ''
|
||||
- key: zone
|
||||
prefix: ''
|
||||
separator: ''
|
||||
- key: gce_tags
|
||||
prefix: tag
|
||||
- key: status | lower
|
||||
prefix: status
|
||||
plugin: gcp_compute
|
||||
projects:
|
||||
- fooo
|
||||
service_account_file: {{ file_reference }}
|
||||
use_contrib_script_compatible_sanitization: true
|
||||
zones:
|
||||
- us-east4-a
|
||||
- us-west1-b
|
@ -0,0 +1,14 @@
|
||||
ansible:
|
||||
expand_hostvars: true
|
||||
fail_on_errors: true
|
||||
use_hostnames: false
|
||||
clouds:
|
||||
devstack:
|
||||
auth:
|
||||
auth_url: https://foo.invalid
|
||||
domain_name: fooo
|
||||
password: fooo
|
||||
project_name: fooo
|
||||
username: fooo
|
||||
private: false
|
||||
verify: false
|
@ -0,0 +1,6 @@
|
||||
clouds_yaml_path:
|
||||
- {{ file_reference }}
|
||||
expand_hostvars: true
|
||||
fail_on_errors: true
|
||||
inventory_hostname: uuid
|
||||
plugin: openstack
|
@ -0,0 +1,5 @@
|
||||
{
|
||||
"FOREMAN_SERVER": "https://foo.invalid",
|
||||
"FOREMAN_USER": "fooo",
|
||||
"FOREMAN_PASSWORD": "fooo"
|
||||
}
|
@ -0,0 +1 @@
|
||||
plugin: foreman
|
6
awx/main/tests/data/inventory/plugins/tower/env.json
Normal file
6
awx/main/tests/data/inventory/plugins/tower/env.json
Normal file
@ -0,0 +1,6 @@
|
||||
{
|
||||
"TOWER_HOST": "https://foo.invalid",
|
||||
"TOWER_USERNAME": "fooo",
|
||||
"TOWER_PASSWORD": "fooo",
|
||||
"TOWER_VERIFY_SSL": "False"
|
||||
}
|
@ -0,0 +1,3 @@
|
||||
include_metadata: true
|
||||
inventory_id: 42
|
||||
plugin: tower
|
8
awx/main/tests/data/inventory/scripts/azure_rm/env.json
Normal file
8
awx/main/tests/data/inventory/scripts/azure_rm/env.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"AZURE_SUBSCRIPTION_ID": "fooo",
|
||||
"AZURE_CLIENT_ID": "fooo",
|
||||
"AZURE_TENANT": "fooo",
|
||||
"AZURE_SECRET": "fooo",
|
||||
"AZURE_CLOUD_ENVIRONMENT": "fooo",
|
||||
"AZURE_INI_PATH": "{{ file_reference }}"
|
||||
}
|
@ -0,0 +1,10 @@
|
||||
[azure]
|
||||
include_powerstate = yes
|
||||
group_by_resource_group = yes
|
||||
group_by_location = yes
|
||||
group_by_tag = yes
|
||||
locations = southcentralus,westus
|
||||
base_source_var = value_of_var
|
||||
use_private_ip = True
|
||||
resource_groups = foo_resources,bar_resources
|
||||
|
@ -0,0 +1,3 @@
|
||||
{
|
||||
"CLOUDFORMS_INI_PATH": "{{ file_reference }}"
|
||||
}
|
@ -0,0 +1,16 @@
|
||||
[cloudforms]
|
||||
url = https://foo.invalid
|
||||
username = fooo
|
||||
password = fooo
|
||||
ssl_verify = false
|
||||
version = 2.4
|
||||
purge_actions = maybe
|
||||
clean_group_keys = this_key
|
||||
nest_tags = yes
|
||||
suffix = .ppt
|
||||
prefer_ipv4 = yes
|
||||
|
||||
[cache]
|
||||
max_age = 0
|
||||
path = {{ cache_dir }}
|
||||
|
@ -0,0 +1 @@
|
||||
<directory>
|
6
awx/main/tests/data/inventory/scripts/ec2/env.json
Normal file
6
awx/main/tests/data/inventory/scripts/ec2/env.json
Normal file
@ -0,0 +1,6 @@
|
||||
{
|
||||
"AWS_ACCESS_KEY_ID": "fooo",
|
||||
"AWS_SECRET_ACCESS_KEY": "fooo",
|
||||
"AWS_SECURITY_TOKEN": "fooo",
|
||||
"EC2_INI_PATH": "{{ file_reference }}"
|
||||
}
|
32
awx/main/tests/data/inventory/scripts/ec2/files/EC2_INI_PATH
Normal file
32
awx/main/tests/data/inventory/scripts/ec2/files/EC2_INI_PATH
Normal file
@ -0,0 +1,32 @@
|
||||
[ec2]
|
||||
base_source_var = value_of_var
|
||||
boto_profile = /tmp/my_boto_stuff
|
||||
regions = us-east-2,ap-south-1
|
||||
regions_exclude = us-gov-west-1,cn-north-1
|
||||
destination_variable = public_dns_name
|
||||
vpc_destination_variable = ip_address
|
||||
route53 = False
|
||||
all_instances = True
|
||||
all_rds_instances = False
|
||||
include_rds_clusters = False
|
||||
rds = False
|
||||
nested_groups = True
|
||||
elasticache = False
|
||||
stack_filters = False
|
||||
instance_filters = foobaa
|
||||
group_by_ami_id = False
|
||||
group_by_availability_zone = True
|
||||
group_by_aws_account = False
|
||||
group_by_instance_id = False
|
||||
group_by_instance_state = False
|
||||
group_by_platform = False
|
||||
group_by_instance_type = True
|
||||
group_by_key_pair = False
|
||||
group_by_region = True
|
||||
group_by_security_group = False
|
||||
group_by_tag_keys = True
|
||||
group_by_tag_none = False
|
||||
group_by_vpc_id = False
|
||||
cache_path = {{ cache_dir }}
|
||||
cache_max_age = 300
|
||||
|
@ -0,0 +1 @@
|
||||
<directory>
|
7
awx/main/tests/data/inventory/scripts/gce/env.json
Normal file
7
awx/main/tests/data/inventory/scripts/gce/env.json
Normal file
@ -0,0 +1,7 @@
|
||||
{
|
||||
"GCE_EMAIL": "fooo",
|
||||
"GCE_PROJECT": "fooo",
|
||||
"GCE_CREDENTIALS_FILE_PATH": "{{ file_reference }}",
|
||||
"GCE_ZONE": "us-east4-a,us-west1-b",
|
||||
"GCE_INI_PATH": "{{ file_reference }}"
|
||||
}
|
@ -0,0 +1,7 @@
|
||||
{
|
||||
"type": "service_account",
|
||||
"private_key": "{{private_key}}",
|
||||
"client_email": "fooo",
|
||||
"project_id": "fooo",
|
||||
"token_uri": "https://accounts.google.com/o/oauth2/token"
|
||||
}
|
@ -0,0 +1,3 @@
|
||||
[cache]
|
||||
cache_max_age = 0
|
||||
|
3
awx/main/tests/data/inventory/scripts/openstack/env.json
Normal file
3
awx/main/tests/data/inventory/scripts/openstack/env.json
Normal file
@ -0,0 +1,3 @@
|
||||
{
|
||||
"OS_CLIENT_CONFIG_FILE": "{{ file_reference }}"
|
||||
}
|
@ -0,0 +1,16 @@
|
||||
ansible:
|
||||
expand_hostvars: true
|
||||
fail_on_errors: true
|
||||
use_hostnames: false
|
||||
cache:
|
||||
path: {{ cache_dir }}
|
||||
clouds:
|
||||
devstack:
|
||||
auth:
|
||||
auth_url: https://foo.invalid
|
||||
domain_name: fooo
|
||||
password: fooo
|
||||
project_name: fooo
|
||||
username: fooo
|
||||
private: false
|
||||
verify: false
|
@ -0,0 +1 @@
|
||||
<directory>
|
6
awx/main/tests/data/inventory/scripts/rhv/env.json
Normal file
6
awx/main/tests/data/inventory/scripts/rhv/env.json
Normal file
@ -0,0 +1,6 @@
|
||||
{
|
||||
"OVIRT_INI_PATH": "{{ file_reference }}",
|
||||
"OVIRT_URL": "https://foo.invalid",
|
||||
"OVIRT_USERNAME": "fooo",
|
||||
"OVIRT_PASSWORD": "fooo"
|
||||
}
|
@ -0,0 +1,5 @@
|
||||
[ovirt]
|
||||
ovirt_url=https://foo.invalid
|
||||
ovirt_username=fooo
|
||||
ovirt_password=fooo
|
||||
ovirt_ca_file=fooo
|
@ -0,0 +1,3 @@
|
||||
{
|
||||
"FOREMAN_INI_PATH": "{{ file_reference }}"
|
||||
}
|
@ -0,0 +1,17 @@
|
||||
[foreman]
|
||||
base_source_var = value_of_var
|
||||
ssl_verify = False
|
||||
url = https://foo.invalid
|
||||
user = fooo
|
||||
password = fooo
|
||||
|
||||
[ansible]
|
||||
group_patterns = foo_group_patterns
|
||||
want_facts = True
|
||||
want_hostcollections = True
|
||||
group_prefix = foo_group_prefix
|
||||
|
||||
[cache]
|
||||
path = /tmp
|
||||
max_age = 0
|
||||
|
8
awx/main/tests/data/inventory/scripts/tower/env.json
Normal file
8
awx/main/tests/data/inventory/scripts/tower/env.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"TOWER_HOST": "https://foo.invalid",
|
||||
"TOWER_USERNAME": "fooo",
|
||||
"TOWER_PASSWORD": "fooo",
|
||||
"TOWER_VERIFY_SSL": "False",
|
||||
"TOWER_INVENTORY": "42",
|
||||
"TOWER_LICENSE_TYPE": "open"
|
||||
}
|
7
awx/main/tests/data/inventory/scripts/vmware/env.json
Normal file
7
awx/main/tests/data/inventory/scripts/vmware/env.json
Normal file
@ -0,0 +1,7 @@
|
||||
{
|
||||
"VMWARE_USER": "fooo",
|
||||
"VMWARE_PASSWORD": "fooo",
|
||||
"VMWARE_HOST": "https://foo.invalid",
|
||||
"VMWARE_VALIDATE_CERTS": "False",
|
||||
"VMWARE_INI_PATH": "{{ file_reference }}"
|
||||
}
|
@ -0,0 +1,10 @@
|
||||
[vmware]
|
||||
cache_max_age = 0
|
||||
validate_certs = False
|
||||
username = fooo
|
||||
password = fooo
|
||||
server = https://foo.invalid
|
||||
base_source_var = value_of_var
|
||||
host_filters = foobaa
|
||||
groupby_patterns = fouo
|
||||
|
@ -11,7 +11,8 @@ from django.core.management.base import CommandError
|
||||
|
||||
# AWX
|
||||
from awx.main.management.commands import inventory_import
|
||||
from awx.main.models import Inventory, Host, Group
|
||||
from awx.main.models import Inventory, Host, Group, InventorySource
|
||||
from awx.main.utils.mem_inventory import MemGroup
|
||||
|
||||
|
||||
TEST_INVENTORY_CONTENT = {
|
||||
@ -306,3 +307,21 @@ class TestEnabledVar:
|
||||
|
||||
def test_enabled_var_is_enabled_value(self, cmd):
|
||||
assert cmd._get_enabled({'foo': {'bar': 'barfoo'}}) is True
|
||||
|
||||
|
||||
def test_tower_version_compare():
|
||||
cmd = inventory_import.Command()
|
||||
cmd.inventory_source = InventorySource(source='tower')
|
||||
cmd.all_group = MemGroup('all')
|
||||
# mimic example from https://github.com/ansible/ansible/pull/52747
|
||||
# until that is merged, this is the best testing we can do
|
||||
cmd.all_group.variables = {
|
||||
'tower_metadata': {
|
||||
"ansible_version": "2.7.5",
|
||||
"license_type": "open",
|
||||
"version": "2.0.1-1068-g09684e2c41"
|
||||
}
|
||||
}
|
||||
with pytest.raises(CommandError):
|
||||
cmd.remote_tower_license_compare('very_supported')
|
||||
cmd.remote_tower_license_compare('open')
|
||||
|
@ -464,8 +464,9 @@ def group(inventory):
|
||||
|
||||
@pytest.fixture
|
||||
def inventory_source(inventory):
|
||||
# by making it ec2, the credential is not required
|
||||
return InventorySource.objects.create(name='single-inv-src',
|
||||
inventory=inventory, source='gce')
|
||||
inventory=inventory, source='ec2')
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
import pytest
|
||||
from unittest import mock
|
||||
import json
|
||||
|
||||
from django.core.exceptions import ValidationError
|
||||
|
||||
@ -11,8 +12,12 @@ from awx.main.models import (
|
||||
Inventory,
|
||||
InventorySource,
|
||||
InventoryUpdate,
|
||||
CredentialType,
|
||||
Credential,
|
||||
Job
|
||||
)
|
||||
from awx.main.constants import CLOUD_PROVIDERS
|
||||
from awx.main.models.inventory import PluginFileInjector
|
||||
from awx.main.utils.filters import SmartFilter
|
||||
|
||||
|
||||
@ -206,6 +211,108 @@ class TestSCMClean:
|
||||
inv_src2.clean_update_on_project_update()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestInventorySourceInjectors:
|
||||
def test_should_use_plugin(self):
|
||||
class foo(PluginFileInjector):
|
||||
plugin_name = 'foo_compute'
|
||||
initial_version = '2.7.8'
|
||||
assert not foo('2.7.7').should_use_plugin()
|
||||
assert foo('2.8').should_use_plugin()
|
||||
|
||||
def test_extra_credentials(self, project, credential):
|
||||
inventory_source = InventorySource.objects.create(
|
||||
name='foo', source='custom', source_project=project
|
||||
)
|
||||
inventory_source.credentials.add(credential)
|
||||
assert inventory_source.get_cloud_credential() == credential # for serializer
|
||||
assert inventory_source.get_extra_credentials() == [credential]
|
||||
|
||||
inventory_source.source = 'ec2'
|
||||
assert inventory_source.get_cloud_credential() == credential
|
||||
assert inventory_source.get_extra_credentials() == []
|
||||
|
||||
def test_all_cloud_sources_covered(self):
|
||||
"""Code in several places relies on the fact that the older
|
||||
CLOUD_PROVIDERS constant contains the same names as what are
|
||||
defined within the injectors
|
||||
"""
|
||||
assert set(CLOUD_PROVIDERS) == set(InventorySource.injectors.keys())
|
||||
|
||||
@pytest.mark.parametrize('source,filename', [
|
||||
('ec2', 'aws_ec2.yml'),
|
||||
('openstack', 'openstack.yml'),
|
||||
('gce', 'gcp_compute.yml')
|
||||
])
|
||||
def test_plugin_filenames(self, source, filename):
|
||||
"""It is important that the filenames for inventory plugin files
|
||||
are named correctly, because Ansible will reject files that do
|
||||
not have these exact names
|
||||
"""
|
||||
injector = InventorySource.injectors[source]('2.7.7')
|
||||
assert injector.filename == filename
|
||||
|
||||
@pytest.mark.parametrize('source,script_name', [
|
||||
('ec2', 'ec2.py'),
|
||||
('rhv', 'ovirt4.py'),
|
||||
('satellite6', 'foreman.py'),
|
||||
('openstack', 'openstack_inventory.py')
|
||||
], ids=['ec2', 'rhv', 'satellite6', 'openstack'])
|
||||
def test_script_filenames(self, source, script_name):
|
||||
"""Ansible has several exceptions in naming of scripts
|
||||
"""
|
||||
injector = InventorySource.injectors[source]('2.7.7')
|
||||
assert injector.script_name == script_name
|
||||
|
||||
def test_group_by_azure(self):
|
||||
injector = InventorySource.injectors['azure_rm']('2.9')
|
||||
inv_src = InventorySource(
|
||||
name='azure source', source='azure_rm',
|
||||
source_vars={'group_by_os_family': True}
|
||||
)
|
||||
group_by_on = injector.inventory_as_dict(inv_src, '/tmp/foo')
|
||||
# suspicious, yes, that is just what the script did
|
||||
expected_groups = 6
|
||||
assert len(group_by_on['keyed_groups']) == expected_groups
|
||||
inv_src.source_vars = json.dumps({'group_by_os_family': False})
|
||||
group_by_off = injector.inventory_as_dict(inv_src, '/tmp/foo')
|
||||
# much better, everyone should turn off the flag and live in the future
|
||||
assert len(group_by_off['keyed_groups']) == expected_groups - 1
|
||||
|
||||
def test_tower_plugin_named_url(self):
|
||||
injector = InventorySource.injectors['tower']('2.9')
|
||||
inv_src = InventorySource(
|
||||
name='my tower source', source='tower',
|
||||
# named URL pattern "inventory++organization"
|
||||
instance_filters='Designer hair 읰++Cosmetic_products䵆'
|
||||
)
|
||||
result = injector.inventory_as_dict(inv_src, '/tmp/foo')
|
||||
assert result['inventory_id'] == 'Designer%20hair%20%EC%9D%B0++Cosmetic_products%E4%B5%86'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_custom_source_custom_credential(organization):
|
||||
credential_type = CredentialType.objects.create(
|
||||
kind='cloud',
|
||||
name='MyCloud',
|
||||
inputs = {
|
||||
'fields': [{
|
||||
'id': 'api_token',
|
||||
'label': 'API Token',
|
||||
'type': 'string',
|
||||
'secret': True
|
||||
}]
|
||||
}
|
||||
)
|
||||
credential = Credential.objects.create(
|
||||
name='my cred', credential_type=credential_type, organization=organization,
|
||||
inputs={'api_token': 'secret'}
|
||||
)
|
||||
inv_source = InventorySource.objects.create(source='scm')
|
||||
inv_source.credentials.add(credential)
|
||||
assert inv_source.get_cloud_credential() == credential
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def setup_ec2_gce(organization):
|
||||
ec2_inv = Inventory.objects.create(name='test_ec2', organization=organization)
|
||||
|
313
awx/main/tests/functional/test_inventory_source_injectors.py
Normal file
313
awx/main/tests/functional/test_inventory_source_injectors.py
Normal file
@ -0,0 +1,313 @@
|
||||
import pytest
|
||||
from unittest import mock
|
||||
import os
|
||||
import json
|
||||
import re
|
||||
from collections import namedtuple
|
||||
|
||||
from awx.main.tasks import RunInventoryUpdate
|
||||
from awx.main.models import InventorySource, Credential, CredentialType, UnifiedJob
|
||||
from awx.main.constants import CLOUD_PROVIDERS, STANDARD_INVENTORY_UPDATE_ENV
|
||||
from awx.main.tests import data
|
||||
|
||||
from django.conf import settings
|
||||
|
||||
DATA = os.path.join(os.path.dirname(data.__file__), 'inventory')
|
||||
|
||||
TEST_SOURCE_FIELDS = {
|
||||
'vmware': {
|
||||
'instance_filters': 'foobaa',
|
||||
'group_by': 'fouo'
|
||||
},
|
||||
'ec2': {
|
||||
'instance_filters': 'foobaa',
|
||||
# group_by selected to capture some non-trivial cross-interactions
|
||||
'group_by': 'availability_zone,instance_type,tag_keys,region',
|
||||
'source_regions': 'us-east-2,ap-south-1'
|
||||
},
|
||||
'gce': {
|
||||
'source_regions': 'us-east4-a,us-west1-b' # surfaced as env var
|
||||
},
|
||||
'azure_rm': {
|
||||
'source_regions': 'southcentralus,westus'
|
||||
},
|
||||
'tower': {
|
||||
'instance_filters': '42'
|
||||
}
|
||||
}
|
||||
|
||||
INI_TEST_VARS = {
|
||||
'ec2': {
|
||||
'boto_profile': '/tmp/my_boto_stuff'
|
||||
},
|
||||
'gce': {},
|
||||
'openstack': {
|
||||
'private': False,
|
||||
'use_hostnames': False,
|
||||
'expand_hostvars': True,
|
||||
'fail_on_errors': True
|
||||
},
|
||||
'rhv': {}, # there are none
|
||||
'tower': {}, # there are none
|
||||
'vmware': {
|
||||
# setting VMWARE_VALIDATE_CERTS is duplicated with env var
|
||||
},
|
||||
'azure_rm': {
|
||||
'use_private_ip': True,
|
||||
'resource_groups': 'foo_resources,bar_resources'
|
||||
},
|
||||
'satellite6': {
|
||||
'satellite6_group_patterns': 'foo_group_patterns',
|
||||
'satellite6_group_prefix': 'foo_group_prefix',
|
||||
'satellite6_want_hostcollections': True
|
||||
},
|
||||
'cloudforms': {
|
||||
'version': '2.4',
|
||||
'purge_actions': 'maybe',
|
||||
'clean_group_keys': 'this_key',
|
||||
'nest_tags': 'yes',
|
||||
'suffix': '.ppt',
|
||||
'prefer_ipv4': 'yes'
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def generate_fake_var(element):
|
||||
"""Given a credential type field element, makes up something acceptable.
|
||||
"""
|
||||
if element['type'] == 'string':
|
||||
if element.get('format', None) == 'ssh_private_key':
|
||||
# this example came from the internet
|
||||
return '\n'.join([
|
||||
'-----BEGIN ENCRYPTED PRIVATE KEY-----'
|
||||
'MIIBpjBABgkqhkiG9w0BBQ0wMzAbBgkqhkiG9w0BBQwwDgQI5yNCu9T5SnsCAggA'
|
||||
'MBQGCCqGSIb3DQMHBAhJISTgOAxtYwSCAWDXK/a1lxHIbRZHud1tfRMR4ROqkmr4'
|
||||
'kVGAnfqTyGptZUt3ZtBgrYlFAaZ1z0wxnhmhn3KIbqebI4w0cIL/3tmQ6eBD1Ad1'
|
||||
'nSEjUxZCuzTkimXQ88wZLzIS9KHc8GhINiUu5rKWbyvWA13Ykc0w65Ot5MSw3cQc'
|
||||
'w1LEDJjTculyDcRQgiRfKH5376qTzukileeTrNebNq+wbhY1kEPAHojercB7d10E'
|
||||
'+QcbjJX1Tb1Zangom1qH9t/pepmV0Hn4EMzDs6DS2SWTffTddTY4dQzvksmLkP+J'
|
||||
'i8hkFIZwUkWpT9/k7MeklgtTiy0lR/Jj9CxAIQVxP8alLWbIqwCNRApleSmqtitt'
|
||||
'Z+NdsuNeTm3iUaPGYSw237tjLyVE6pr0EJqLv7VUClvJvBnH2qhQEtWYB9gvE1dS'
|
||||
'BioGu40pXVfjiLqhEKVVVEoHpI32oMkojhCGJs8Oow4bAxkzQFCtuWB1'
|
||||
'-----END ENCRYPTED PRIVATE KEY-----'
|
||||
])
|
||||
if element['id'] == 'host':
|
||||
return 'https://foo.invalid'
|
||||
return 'fooo'
|
||||
elif element['type'] == 'boolean':
|
||||
return False
|
||||
raise Exception('No generator written for {} type'.format(element.get('type', 'unknown')))
|
||||
|
||||
|
||||
def credential_kind(source):
|
||||
"""Given the inventory source kind, return expected credential kind
|
||||
"""
|
||||
return source.replace('ec2', 'aws')
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def fake_credential_factory(source):
|
||||
ct = CredentialType.defaults[credential_kind(source)]()
|
||||
ct.save()
|
||||
|
||||
inputs = {}
|
||||
var_specs = {} # pivoted version of inputs
|
||||
for element in ct.inputs.get('fields'):
|
||||
var_specs[element['id']] = element
|
||||
for var in var_specs.keys():
|
||||
inputs[var] = generate_fake_var(var_specs[var])
|
||||
|
||||
return Credential.objects.create(
|
||||
credential_type=ct,
|
||||
inputs=inputs
|
||||
)
|
||||
|
||||
|
||||
def read_content(private_data_dir, raw_env, inventory_update):
|
||||
"""Read the environmental data laid down by the task system
|
||||
template out private and secret data so they will be readable and predictable
|
||||
return a dictionary `content` with file contents, keyed off environment variable
|
||||
that references the file
|
||||
"""
|
||||
# Filter out environment variables which come from runtime environment
|
||||
env = {}
|
||||
exclude_keys = set(('PATH', 'INVENTORY_SOURCE_ID', 'INVENTORY_UPDATE_ID'))
|
||||
for key in dir(settings):
|
||||
if key.startswith('ANSIBLE_'):
|
||||
exclude_keys.add(key)
|
||||
for k, v in raw_env.items():
|
||||
if k in STANDARD_INVENTORY_UPDATE_ENV or k in exclude_keys:
|
||||
continue
|
||||
if k not in os.environ or v != os.environ[k]:
|
||||
env[k] = v
|
||||
inverse_env = {}
|
||||
for key, value in env.items():
|
||||
inverse_env[value] = key
|
||||
|
||||
cache_file_regex = re.compile(r'/tmp/awx_{0}_[a-zA-Z0-9_]+/{1}_cache[a-zA-Z0-9_]+'.format(
|
||||
inventory_update.id, inventory_update.source)
|
||||
)
|
||||
private_key_regex = re.compile(r'-----BEGIN ENCRYPTED PRIVATE KEY-----.*-----END ENCRYPTED PRIVATE KEY-----')
|
||||
|
||||
dir_contents = {}
|
||||
references = {}
|
||||
for filename in os.listdir(private_data_dir):
|
||||
if filename in ('args', 'project'):
|
||||
continue # Ansible runner
|
||||
abs_file_path = os.path.join(private_data_dir, filename)
|
||||
if abs_file_path in inverse_env:
|
||||
env_key = inverse_env[abs_file_path]
|
||||
references[abs_file_path] = env_key
|
||||
env[env_key] = '{{ file_reference }}'
|
||||
try:
|
||||
with open(abs_file_path, 'r') as f:
|
||||
dir_contents[abs_file_path] = f.read()
|
||||
# Declare a reference to inventory plugin file if it exists
|
||||
if abs_file_path.endswith('.yml') and 'plugin: ' in dir_contents[abs_file_path]:
|
||||
references[abs_file_path] = filename # plugin filenames are universal
|
||||
except IsADirectoryError:
|
||||
dir_contents[abs_file_path] = '<directory>'
|
||||
|
||||
# Declare cross-file references, also use special keywords if it is the cache
|
||||
cache_referenced = False
|
||||
cache_present = False
|
||||
for abs_file_path, file_content in dir_contents.copy().items():
|
||||
if cache_file_regex.match(file_content):
|
||||
cache_referenced = True
|
||||
for target_path in dir_contents.keys():
|
||||
if target_path in file_content:
|
||||
if target_path in references:
|
||||
raise AssertionError(
|
||||
'File {} is referenced by env var or other file as well as file {}:\n{}\n{}'.format(
|
||||
target_path, abs_file_path, json.dumps(env, indent=4), json.dumps(dir_contents, indent=4)))
|
||||
else:
|
||||
if cache_file_regex.match(target_path):
|
||||
cache_present = True
|
||||
if os.path.isdir(target_path):
|
||||
keyword = 'cache_dir'
|
||||
else:
|
||||
keyword = 'cache_file'
|
||||
references[target_path] = keyword
|
||||
new_file_content = cache_file_regex.sub('{{ ' + keyword + ' }}', file_content)
|
||||
else:
|
||||
references[target_path] = 'file_reference'
|
||||
new_file_content = file_content.replace(target_path, '{{ file_reference }}')
|
||||
dir_contents[abs_file_path] = new_file_content
|
||||
if cache_referenced and not cache_present:
|
||||
raise AssertionError(
|
||||
'A cache file was referenced but never created, files:\n{}'.format(
|
||||
json.dumps(dir_contents, indent=4)))
|
||||
|
||||
content = {}
|
||||
for abs_file_path, file_content in dir_contents.items():
|
||||
if abs_file_path not in references:
|
||||
raise AssertionError(
|
||||
"File {} is not referenced. References and files:\n{}\n{}".format(
|
||||
abs_file_path, json.dumps(references, indent=4), json.dumps(dir_contents, indent=4)))
|
||||
reference_key = references[abs_file_path]
|
||||
file_content = private_key_regex.sub('{{private_key}}', file_content)
|
||||
content[reference_key] = file_content
|
||||
|
||||
return (env, content)
|
||||
|
||||
|
||||
def create_reference_data(source_dir, env, content):
|
||||
if not os.path.exists(source_dir):
|
||||
os.mkdir(source_dir)
|
||||
if content:
|
||||
files_dir = os.path.join(source_dir, 'files')
|
||||
if not os.path.exists(files_dir):
|
||||
os.mkdir(files_dir)
|
||||
for env_name, content in content.items():
|
||||
with open(os.path.join(files_dir, env_name), 'w') as f:
|
||||
f.write(content)
|
||||
if env:
|
||||
with open(os.path.join(source_dir, 'env.json'), 'w') as f:
|
||||
f.write(json.dumps(env, indent=4))
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('this_kind', CLOUD_PROVIDERS)
|
||||
@pytest.mark.parametrize('script_or_plugin', ['scripts', 'plugins'])
|
||||
def test_inventory_update_injected_content(this_kind, script_or_plugin, inventory):
|
||||
src_vars = dict(base_source_var='value_of_var')
|
||||
if this_kind in INI_TEST_VARS:
|
||||
src_vars.update(INI_TEST_VARS[this_kind])
|
||||
extra_kwargs = {}
|
||||
if this_kind in TEST_SOURCE_FIELDS:
|
||||
extra_kwargs.update(TEST_SOURCE_FIELDS[this_kind])
|
||||
inventory_source = InventorySource.objects.create(
|
||||
inventory=inventory,
|
||||
source=this_kind,
|
||||
source_vars=src_vars,
|
||||
**extra_kwargs
|
||||
)
|
||||
inventory_source.credentials.add(fake_credential_factory(this_kind))
|
||||
inventory_update = inventory_source.create_unified_job()
|
||||
task = RunInventoryUpdate()
|
||||
|
||||
use_plugin = bool(script_or_plugin == 'plugins')
|
||||
if use_plugin and InventorySource.injectors[this_kind].plugin_name is None:
|
||||
pytest.skip('Use of inventory plugin is not enabled for this source')
|
||||
|
||||
def substitute_run(envvars=None, **_kw):
|
||||
"""This method will replace run_pexpect
|
||||
instead of running, it will read the private data directory contents
|
||||
It will make assertions that the contents are correct
|
||||
If MAKE_INVENTORY_REFERENCE_FILES is set, it will produce reference files
|
||||
"""
|
||||
private_data_dir = envvars.pop('AWX_PRIVATE_DATA_DIR')
|
||||
assert envvars.pop('ANSIBLE_INVENTORY_ENABLED') == ('auto' if use_plugin else 'script')
|
||||
set_files = bool(os.getenv("MAKE_INVENTORY_REFERENCE_FILES", 'false').lower()[0] not in ['f', '0'])
|
||||
env, content = read_content(private_data_dir, envvars, inventory_update)
|
||||
base_dir = os.path.join(DATA, script_or_plugin)
|
||||
if not os.path.exists(base_dir):
|
||||
os.mkdir(base_dir)
|
||||
source_dir = os.path.join(base_dir, this_kind) # this_kind is a global
|
||||
if set_files:
|
||||
create_reference_data(source_dir, env, content)
|
||||
pytest.skip('You set MAKE_INVENTORY_REFERENCE_FILES, so this created files, unset to run actual test.')
|
||||
else:
|
||||
if not os.path.exists(source_dir):
|
||||
raise FileNotFoundError(
|
||||
'Maybe you never made reference files? '
|
||||
'MAKE_INVENTORY_REFERENCE_FILES=true py.test ...\noriginal: {}')
|
||||
files_dir = os.path.join(source_dir, 'files')
|
||||
try:
|
||||
expected_file_list = os.listdir(files_dir)
|
||||
except FileNotFoundError:
|
||||
expected_file_list = []
|
||||
assert set(expected_file_list) == set(content.keys()), (
|
||||
'Inventory update runtime environment does not have expected files'
|
||||
)
|
||||
for f_name in expected_file_list:
|
||||
with open(os.path.join(files_dir, f_name), 'r') as f:
|
||||
ref_content = f.read()
|
||||
assert ref_content == content[f_name]
|
||||
try:
|
||||
with open(os.path.join(source_dir, 'env.json'), 'r') as f:
|
||||
ref_env_text = f.read()
|
||||
ref_env = json.loads(ref_env_text)
|
||||
except FileNotFoundError:
|
||||
ref_env = {}
|
||||
assert ref_env == env
|
||||
Res = namedtuple('Result', ['status', 'rc'])
|
||||
return Res('successful', 0)
|
||||
|
||||
mock_licenser = mock.Mock(return_value=mock.Mock(
|
||||
validate=mock.Mock(return_value={'license_type': 'open'})
|
||||
))
|
||||
|
||||
# Mock this so that it will not send events to the callback receiver
|
||||
# because doing so in pytest land creates large explosions
|
||||
with mock.patch('awx.main.queue.CallbackQueueDispatcher.dispatch', lambda self, obj: None):
|
||||
# Force the update to use the script injector
|
||||
with mock.patch('awx.main.models.inventory.PluginFileInjector.should_use_plugin', return_value=use_plugin):
|
||||
# Also do not send websocket status updates
|
||||
with mock.patch.object(UnifiedJob, 'websocket_emit_status', mock.Mock()):
|
||||
# The point of this test is that we replace run with assertions
|
||||
with mock.patch('awx.main.tasks.ansible_runner.interface.run', substitute_run):
|
||||
# mocking the licenser is necessary for the tower source
|
||||
with mock.patch('awx.main.models.inventory.get_licenser', mock_licenser):
|
||||
# so this sets up everything for a run and then yields control over to substitute_run
|
||||
task.run(inventory_update.pk)
|
@ -32,6 +32,7 @@ from awx.main.models import (
|
||||
CustomInventoryScript,
|
||||
build_safe_env
|
||||
)
|
||||
from awx.main.models.credential import ManagedCredentialType
|
||||
|
||||
from awx.main import tasks
|
||||
from awx.main.utils import encrypt_field, encrypt_value
|
||||
@ -163,11 +164,12 @@ def test_openstack_client_config_generation(mocker, source, expected, private_da
|
||||
inputs['verify_ssl'] = source
|
||||
credential = Credential(pk=1, credential_type=credential_type, inputs=inputs)
|
||||
|
||||
cred_method = mocker.Mock(return_value=credential)
|
||||
inventory_update = mocker.Mock(**{
|
||||
'source': 'openstack',
|
||||
'source_vars_dict': {},
|
||||
'get_cloud_credential': cred_method
|
||||
'get_cloud_credential': mocker.Mock(return_value=credential),
|
||||
'get_extra_credentials': lambda x: [],
|
||||
'ansible_virtualenv_path': '/venv/foo'
|
||||
})
|
||||
cloud_config = update.build_private_data(inventory_update, private_data_dir)
|
||||
cloud_credential = yaml.load(
|
||||
@ -204,11 +206,12 @@ def test_openstack_client_config_generation_with_private_source_vars(mocker, sou
|
||||
}
|
||||
credential = Credential(pk=1, credential_type=credential_type, inputs=inputs)
|
||||
|
||||
cred_method = mocker.Mock(return_value=credential)
|
||||
inventory_update = mocker.Mock(**{
|
||||
'source': 'openstack',
|
||||
'source_vars_dict': {'private': source},
|
||||
'get_cloud_credential': cred_method
|
||||
'get_cloud_credential': mocker.Mock(return_value=credential),
|
||||
'get_extra_credentials': lambda x: [],
|
||||
'ansible_virtualenv_path': '/venv/foo'
|
||||
})
|
||||
cloud_config = update.build_private_data(inventory_update, private_data_dir)
|
||||
cloud_credential = yaml.load(
|
||||
@ -1759,6 +1762,7 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
task = tasks.RunInventoryUpdate()
|
||||
inventory_update.source = 'ec2'
|
||||
inventory_update.get_cloud_credential = mocker.Mock(return_value=None)
|
||||
inventory_update.get_extra_credentials = mocker.Mock(return_value=[])
|
||||
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
@ -1781,7 +1785,7 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
if with_credential:
|
||||
azure_rm = CredentialType.defaults['azure_rm']()
|
||||
|
||||
def get_cred():
|
||||
def get_creds():
|
||||
cred = Credential(
|
||||
pk=1,
|
||||
credential_type=azure_rm,
|
||||
@ -1792,10 +1796,11 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
'subscription': 'some-subscription',
|
||||
}
|
||||
)
|
||||
return cred
|
||||
inventory_update.get_cloud_credential = get_cred
|
||||
return [cred]
|
||||
inventory_update.get_extra_credentials = get_creds
|
||||
else:
|
||||
inventory_update.get_cloud_credential = mocker.Mock(return_value=None)
|
||||
inventory_update.get_extra_credentials = mocker.Mock(return_value=[])
|
||||
inventory_update.get_cloud_credential = mocker.Mock(return_value=None)
|
||||
|
||||
env = task.build_env(inventory_update, private_data_dir, False)
|
||||
args = task.build_args(inventory_update, private_data_dir, {})
|
||||
@ -1818,7 +1823,7 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
assert env['AZURE_TENANT'] == 'some-tenant'
|
||||
assert env['AZURE_SUBSCRIPTION_ID'] == 'some-subscription'
|
||||
|
||||
def test_ec2_source(self, private_data_dir, inventory_update):
|
||||
def test_ec2_source(self, private_data_dir, inventory_update, mocker):
|
||||
task = tasks.RunInventoryUpdate()
|
||||
aws = CredentialType.defaults['aws']()
|
||||
inventory_update.source = 'ec2'
|
||||
@ -1832,17 +1837,14 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
cred.inputs['password'] = encrypt_field(cred, 'password')
|
||||
return cred
|
||||
inventory_update.get_cloud_credential = get_cred
|
||||
inventory_update.get_extra_credentials = mocker.Mock(return_value=[])
|
||||
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
|
||||
safe_env = {}
|
||||
credentials = task.build_credentials_list(inventory_update)
|
||||
for credential in credentials:
|
||||
if credential:
|
||||
credential.credential_type.inject_credential(
|
||||
credential, env, safe_env, [], private_data_dir
|
||||
)
|
||||
injector = InventorySource.injectors['ec2']('2.7')
|
||||
env = injector.get_script_env(inventory_update, private_data_dir, private_data_files)
|
||||
safe_env = build_safe_env(env)
|
||||
|
||||
assert env['AWS_ACCESS_KEY_ID'] == 'bob'
|
||||
assert env['AWS_SECRET_ACCESS_KEY'] == 'secret'
|
||||
@ -1854,7 +1856,7 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
|
||||
assert safe_env['AWS_SECRET_ACCESS_KEY'] == tasks.HIDDEN_PASSWORD
|
||||
|
||||
def test_vmware_source(self, inventory_update, private_data_dir):
|
||||
def test_vmware_source(self, inventory_update, private_data_dir, mocker):
|
||||
task = tasks.RunInventoryUpdate()
|
||||
vmware = CredentialType.defaults['vmware']()
|
||||
inventory_update.source = 'vmware'
|
||||
@ -1868,6 +1870,7 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
cred.inputs['password'] = encrypt_field(cred, 'password')
|
||||
return cred
|
||||
inventory_update.get_cloud_credential = get_cred
|
||||
inventory_update.get_extra_credentials = mocker.Mock(return_value=[])
|
||||
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
@ -1886,7 +1889,7 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
assert config.get('vmware', 'password') == 'secret'
|
||||
assert config.get('vmware', 'server') == 'https://example.org'
|
||||
|
||||
def test_azure_rm_source_with_tenant(self, private_data_dir, inventory_update):
|
||||
def test_azure_rm_source_with_tenant(self, private_data_dir, inventory_update, mocker):
|
||||
task = tasks.RunInventoryUpdate()
|
||||
azure_rm = CredentialType.defaults['azure_rm']()
|
||||
inventory_update.source = 'azure_rm'
|
||||
@ -1906,6 +1909,7 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
)
|
||||
return cred
|
||||
inventory_update.get_cloud_credential = get_cred
|
||||
inventory_update.get_extra_credentials = mocker.Mock(return_value=[])
|
||||
inventory_update.source_vars = {
|
||||
'include_powerstate': 'yes',
|
||||
'group_by_resource_group': 'no'
|
||||
@ -1914,13 +1918,10 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
|
||||
safe_env = {}
|
||||
credentials = task.build_credentials_list(inventory_update)
|
||||
for credential in credentials:
|
||||
if credential:
|
||||
credential.credential_type.inject_credential(
|
||||
credential, env, safe_env, [], private_data_dir
|
||||
)
|
||||
|
||||
injector = InventorySource.injectors['azure_rm']('2.7')
|
||||
env = injector.get_script_env(inventory_update, private_data_dir, private_data_files)
|
||||
safe_env = build_safe_env(env)
|
||||
|
||||
assert env['AZURE_CLIENT_ID'] == 'some-client'
|
||||
assert env['AZURE_SECRET'] == 'some-secret'
|
||||
@ -1939,7 +1940,7 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
|
||||
assert safe_env['AZURE_SECRET'] == tasks.HIDDEN_PASSWORD
|
||||
|
||||
def test_azure_rm_source_with_password(self, private_data_dir, inventory_update):
|
||||
def test_azure_rm_source_with_password(self, private_data_dir, inventory_update, mocker):
|
||||
task = tasks.RunInventoryUpdate()
|
||||
azure_rm = CredentialType.defaults['azure_rm']()
|
||||
inventory_update.source = 'azure_rm'
|
||||
@ -1958,6 +1959,7 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
)
|
||||
return cred
|
||||
inventory_update.get_cloud_credential = get_cred
|
||||
inventory_update.get_extra_credentials = mocker.Mock(return_value=[])
|
||||
inventory_update.source_vars = {
|
||||
'include_powerstate': 'yes',
|
||||
'group_by_resource_group': 'no',
|
||||
@ -1967,13 +1969,10 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
|
||||
safe_env = {}
|
||||
credentials = task.build_credentials_list(inventory_update)
|
||||
for credential in credentials:
|
||||
if credential:
|
||||
credential.credential_type.inject_credential(
|
||||
credential, env, safe_env, [], private_data_dir
|
||||
)
|
||||
|
||||
injector = InventorySource.injectors['azure_rm']('2.7')
|
||||
env = injector.get_script_env(inventory_update, private_data_dir, private_data_files)
|
||||
safe_env = build_safe_env(env)
|
||||
|
||||
assert env['AZURE_SUBSCRIPTION_ID'] == 'some-subscription'
|
||||
assert env['AZURE_AD_USER'] == 'bob'
|
||||
@ -1990,7 +1989,7 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
assert 'locations' not in config.items('azure')
|
||||
assert safe_env['AZURE_PASSWORD'] == tasks.HIDDEN_PASSWORD
|
||||
|
||||
def test_gce_source(self, inventory_update, private_data_dir):
|
||||
def test_gce_source(self, inventory_update, private_data_dir, mocker):
|
||||
task = tasks.RunInventoryUpdate()
|
||||
gce = CredentialType.defaults['gce']()
|
||||
inventory_update.source = 'gce'
|
||||
@ -2011,6 +2010,7 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
)
|
||||
return cred
|
||||
inventory_update.get_cloud_credential = get_cred
|
||||
inventory_update.get_extra_credentials = mocker.Mock(return_value=[])
|
||||
|
||||
def run(expected_gce_zone):
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
@ -2035,13 +2035,14 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
assert 'cache' in config.sections()
|
||||
assert config.getint('cache', 'cache_max_age') == 0
|
||||
|
||||
# Change the initial version of the inventory plugin to force use of script
|
||||
with mock.patch('awx.main.models.inventory.gce.initial_version', None):
|
||||
run('')
|
||||
|
||||
run('')
|
||||
inventory_update.source_regions = 'us-east-4'
|
||||
run('us-east-4')
|
||||
|
||||
inventory_update.source_regions = 'us-east-4'
|
||||
run('us-east-4')
|
||||
|
||||
def test_openstack_source(self, inventory_update, private_data_dir):
|
||||
def test_openstack_source(self, inventory_update, private_data_dir, mocker):
|
||||
task = tasks.RunInventoryUpdate()
|
||||
openstack = CredentialType.defaults['openstack']()
|
||||
inventory_update.source = 'openstack'
|
||||
@ -2063,6 +2064,7 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
)
|
||||
return cred
|
||||
inventory_update.get_cloud_credential = get_cred
|
||||
inventory_update.get_extra_credentials = mocker.Mock(return_value=[])
|
||||
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
@ -2079,7 +2081,7 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
''
|
||||
]) in shade_config
|
||||
|
||||
def test_satellite6_source(self, inventory_update, private_data_dir):
|
||||
def test_satellite6_source(self, inventory_update, private_data_dir, mocker):
|
||||
task = tasks.RunInventoryUpdate()
|
||||
satellite6 = CredentialType.defaults['satellite6']()
|
||||
inventory_update.source = 'satellite6'
|
||||
@ -2099,6 +2101,7 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
)
|
||||
return cred
|
||||
inventory_update.get_cloud_credential = get_cred
|
||||
inventory_update.get_extra_credentials = mocker.Mock(return_value=[])
|
||||
|
||||
inventory_update.source_vars = '{"satellite6_group_patterns": "[a,b,c]", "satellite6_group_prefix": "hey_", "satellite6_want_hostcollections": True}'
|
||||
|
||||
@ -2114,7 +2117,7 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
assert config.get('ansible', 'group_prefix') == 'hey_'
|
||||
assert config.get('ansible', 'want_hostcollections') == 'True'
|
||||
|
||||
def test_cloudforms_source(self, inventory_update, private_data_dir):
|
||||
def test_cloudforms_source(self, inventory_update, private_data_dir, mocker):
|
||||
task = tasks.RunInventoryUpdate()
|
||||
cloudforms = CredentialType.defaults['cloudforms']()
|
||||
inventory_update.source = 'cloudforms'
|
||||
@ -2134,6 +2137,7 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
)
|
||||
return cred
|
||||
inventory_update.get_cloud_credential = get_cred
|
||||
inventory_update.get_extra_credentials = mocker.Mock(return_value=[])
|
||||
|
||||
inventory_update.source_vars = '{"prefer_ipv4": True}'
|
||||
|
||||
@ -2153,7 +2157,7 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
assert os.path.isdir(cache_path)
|
||||
|
||||
@pytest.mark.parametrize('verify', [True, False])
|
||||
def test_tower_source(self, verify, inventory_update, private_data_dir):
|
||||
def test_tower_source(self, verify, inventory_update, private_data_dir, mocker):
|
||||
task = tasks.RunInventoryUpdate()
|
||||
tower = CredentialType.defaults['tower']()
|
||||
inventory_update.source = 'tower'
|
||||
@ -2170,16 +2174,13 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
cred.inputs['password'] = encrypt_field(cred, 'password')
|
||||
return cred
|
||||
inventory_update.get_cloud_credential = get_cred
|
||||
inventory_update.get_extra_credentials = mocker.Mock(return_value=[])
|
||||
|
||||
env = task.build_env(inventory_update, private_data_dir, False)
|
||||
|
||||
safe_env = {}
|
||||
credentials = task.build_credentials_list(inventory_update)
|
||||
for credential in credentials:
|
||||
if credential:
|
||||
credential.credential_type.inject_credential(
|
||||
credential, env, safe_env, [], private_data_dir
|
||||
)
|
||||
injector = InventorySource.injectors['tower']('2.7')
|
||||
env = injector.get_script_env(inventory_update, private_data_dir, {})
|
||||
safe_env = build_safe_env(env)
|
||||
|
||||
assert env['TOWER_HOST'] == 'https://tower.example.org'
|
||||
assert env['TOWER_USERNAME'] == 'bob'
|
||||
@ -2191,7 +2192,7 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
assert env['TOWER_VERIFY_SSL'] == 'False'
|
||||
assert safe_env['TOWER_PASSWORD'] == tasks.HIDDEN_PASSWORD
|
||||
|
||||
def test_tower_source_ssl_verify_empty(self, inventory_update, private_data_dir):
|
||||
def test_tower_source_ssl_verify_empty(self, inventory_update, private_data_dir, mocker):
|
||||
task = tasks.RunInventoryUpdate()
|
||||
tower = CredentialType.defaults['tower']()
|
||||
inventory_update.source = 'tower'
|
||||
@ -2207,6 +2208,7 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
cred.inputs['password'] = encrypt_field(cred, 'password')
|
||||
return cred
|
||||
inventory_update.get_cloud_credential = get_cred
|
||||
inventory_update.get_extra_credentials = mocker.Mock(return_value=[])
|
||||
|
||||
env = task.build_env(inventory_update, private_data_dir, False)
|
||||
safe_env = {}
|
||||
@ -2219,7 +2221,7 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
|
||||
assert env['TOWER_VERIFY_SSL'] == 'False'
|
||||
|
||||
def test_awx_task_env(self, inventory_update, private_data_dir, settings):
|
||||
def test_awx_task_env(self, inventory_update, private_data_dir, settings, mocker):
|
||||
task = tasks.RunInventoryUpdate()
|
||||
gce = CredentialType.defaults['gce']()
|
||||
inventory_update.source = 'gce'
|
||||
@ -2235,6 +2237,7 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
)
|
||||
return cred
|
||||
inventory_update.get_cloud_credential = get_cred
|
||||
inventory_update.get_extra_credentials = mocker.Mock(return_value=[])
|
||||
settings.AWX_TASK_ENV = {'FOO': 'BAR'}
|
||||
|
||||
env = task.build_env(inventory_update, private_data_dir, False)
|
||||
@ -2299,3 +2302,27 @@ def test_aquire_lock_acquisition_fail_logged(fcntl_flock, logging_getLogger, os_
|
||||
ProjectUpdate.acquire_lock(instance)
|
||||
os_close.assert_called_with(3)
|
||||
assert logger.err.called_with("I/O error({0}) while trying to aquire lock on file [{1}]: {2}".format(3, 'this_file_does_not_exist', 'dummy message'))
|
||||
|
||||
|
||||
@pytest.mark.parametrize('injector_cls', [
|
||||
cls for cls in ManagedCredentialType.registry.values() if cls.injectors
|
||||
])
|
||||
def test_managed_injector_redaction(injector_cls):
|
||||
"""See awx.main.models.inventory.PluginFileInjector._get_shared_env
|
||||
The ordering within awx.main.tasks.BaseTask and contract with build_env
|
||||
requires that all managed_by_tower injectors are safely redacted by the
|
||||
static method build_safe_env without having to employ the safe namespace
|
||||
as in inject_credential
|
||||
|
||||
This test enforces that condition uniformly to prevent password leakages
|
||||
"""
|
||||
secrets = set()
|
||||
for element in injector_cls.inputs.get('fields', []):
|
||||
if element.get('secret', False):
|
||||
secrets.add(element['id'])
|
||||
env = {}
|
||||
for env_name, template in injector_cls.injectors.get('env', {}).items():
|
||||
for secret_field_name in secrets:
|
||||
if secret_field_name in template:
|
||||
env[env_name] = 'very_secret_value'
|
||||
assert 'very_secret_value' not in str(build_safe_env(env))
|
||||
|
@ -153,13 +153,13 @@ def memoize_delete(function_name):
|
||||
return cache.delete(function_name)
|
||||
|
||||
|
||||
@memoize()
|
||||
def get_ansible_version():
|
||||
def _get_ansible_version(ansible_path):
|
||||
'''
|
||||
Return Ansible version installed.
|
||||
Ansible path needs to be provided to account for custom virtual environments
|
||||
'''
|
||||
try:
|
||||
proc = subprocess.Popen(['ansible', '--version'],
|
||||
proc = subprocess.Popen([ansible_path, '--version'],
|
||||
stdout=subprocess.PIPE)
|
||||
result = smart_str(proc.communicate()[0])
|
||||
return result.split('\n')[0].replace('ansible', '').strip()
|
||||
@ -167,6 +167,11 @@ def get_ansible_version():
|
||||
return 'unknown'
|
||||
|
||||
|
||||
@memoize()
|
||||
def get_ansible_version():
|
||||
return _get_ansible_version('ansible')
|
||||
|
||||
|
||||
@memoize()
|
||||
def get_ssh_version():
|
||||
'''
|
||||
|
@ -57,15 +57,13 @@ import os
|
||||
import sys
|
||||
import time
|
||||
from distutils.version import StrictVersion
|
||||
from io import StringIO
|
||||
|
||||
try:
|
||||
import json
|
||||
except:
|
||||
import simplejson as json
|
||||
import json
|
||||
|
||||
import os_client_config
|
||||
import shade
|
||||
import shade.inventory
|
||||
import openstack as sdk
|
||||
from openstack.cloud import inventory as sdk_inventory
|
||||
from openstack.config import loader as cloud_config
|
||||
|
||||
CONFIG_FILES = ['/etc/ansible/openstack.yaml', '/etc/ansible/openstack.yml']
|
||||
|
||||
@ -149,7 +147,7 @@ def get_host_groups_from_cloud(inventory):
|
||||
if hasattr(inventory, 'extra_config'):
|
||||
use_hostnames = inventory.extra_config['use_hostnames']
|
||||
list_args['expand'] = inventory.extra_config['expand_hostvars']
|
||||
if StrictVersion(shade.__version__) >= StrictVersion("1.6.0"):
|
||||
if StrictVersion(sdk.version.__version__) >= StrictVersion("0.13.0"):
|
||||
list_args['fail_on_cloud_config'] = \
|
||||
inventory.extra_config['fail_on_errors']
|
||||
else:
|
||||
@ -192,8 +190,13 @@ def is_cache_stale(cache_file, cache_expiration_time, refresh=False):
|
||||
|
||||
|
||||
def get_cache_settings(cloud=None):
|
||||
config = os_client_config.config.OpenStackConfig(
|
||||
config_files=os_client_config.config.CONFIG_FILES + CONFIG_FILES)
|
||||
config_files = cloud_config.CONFIG_FILES + CONFIG_FILES
|
||||
if cloud:
|
||||
config = cloud_config.OpenStackConfig(
|
||||
config_files=config_files).get_one(cloud=cloud)
|
||||
else:
|
||||
config = cloud_config.OpenStackConfig(
|
||||
config_files=config_files).get_all()[0]
|
||||
# For inventory-wide caching
|
||||
cache_expiration_time = config.get_cache_expiration_time()
|
||||
cache_path = config.get_cache_path()
|
||||
@ -231,15 +234,17 @@ def parse_args():
|
||||
def main():
|
||||
args = parse_args()
|
||||
try:
|
||||
config_files = os_client_config.config.CONFIG_FILES + CONFIG_FILES
|
||||
shade.simple_logging(debug=args.debug)
|
||||
# openstacksdk library may write to stdout, so redirect this
|
||||
sys.stdout = StringIO()
|
||||
config_files = cloud_config.CONFIG_FILES + CONFIG_FILES
|
||||
sdk.enable_logging(debug=args.debug)
|
||||
inventory_args = dict(
|
||||
refresh=args.refresh,
|
||||
config_files=config_files,
|
||||
private=args.private,
|
||||
cloud=args.cloud,
|
||||
)
|
||||
if hasattr(shade.inventory.OpenStackInventory, 'extra_config'):
|
||||
if hasattr(sdk_inventory.OpenStackInventory, 'extra_config'):
|
||||
inventory_args.update(dict(
|
||||
config_key='ansible',
|
||||
config_defaults={
|
||||
@ -249,14 +254,15 @@ def main():
|
||||
}
|
||||
))
|
||||
|
||||
inventory = shade.inventory.OpenStackInventory(**inventory_args)
|
||||
inventory = sdk_inventory.OpenStackInventory(**inventory_args)
|
||||
|
||||
sys.stdout = sys.__stdout__
|
||||
if args.list:
|
||||
output = get_host_groups(inventory, refresh=args.refresh, cloud=args.cloud)
|
||||
elif args.host:
|
||||
output = to_json(inventory.get_host(args.host))
|
||||
print(output)
|
||||
except shade.OpenStackCloudException as e:
|
||||
except sdk.exceptions.OpenStackCloudException as e:
|
||||
sys.stderr.write('%s\n' % e.message)
|
||||
sys.exit(1)
|
||||
sys.exit(0)
|
153
docs/inventory_plugins.md
Normal file
153
docs/inventory_plugins.md
Normal file
@ -0,0 +1,153 @@
|
||||
# Transition to Ansible Inventory Plugins
|
||||
Inventory updates change from using scripts which are vendored as executable
|
||||
python scripts to using dynamically-generated
|
||||
YAML files which conform to the specifications of the `auto` inventory plugin
|
||||
which are then parsed by their respective inventory plugin.
|
||||
|
||||
The major organizational change is that the inventory plugins are
|
||||
part of the Ansible core distribution, whereas the same logic used to
|
||||
be a part of AWX source.
|
||||
|
||||
## Prior Background for Transition
|
||||
|
||||
AWX used to maintain logic that parsed `.ini` inventory file contents,
|
||||
in addition to interpreting the JSON output of scripts, re-calling with
|
||||
the `--host` option in the case the `_meta.hostvars` key was not provided.
|
||||
|
||||
### Switch to Ansible Inventory
|
||||
|
||||
The CLI entry point `ansible-inventory` was introduced in Ansible 2.4.
|
||||
In Tower 3.2, inventory imports began running this command
|
||||
as an intermediary between the inventory and
|
||||
the import's logic to save content to database. Using `ansible-inventory`
|
||||
eliminates the need to maintain source-specific logic,
|
||||
relying on Ansible's code instead. This also allows us to
|
||||
count on a consistent data structure outputted from `ansible-inventory`.
|
||||
There are many valid structures that a script can provide, but the output
|
||||
from `ansible-inventory` will always be the same,
|
||||
thus the AWX logic to parse the content is simplified.
|
||||
This is why even scripts must be ran through the `ansible-inventory` CLI.
|
||||
|
||||
Along with this switchover, a backported version of
|
||||
`ansible-inventory` was provided that supported Ansible versions 2.2 and 2.3.
|
||||
|
||||
### Removal of Backport
|
||||
|
||||
In AWX 3.0.0 (and Tower 3.5), the backport of `ansible-inventory`
|
||||
was removed, and support for using custom virtual environments was added.
|
||||
This set the minimum version of Ansible necessary to run _any_
|
||||
inventory update to 2.4.
|
||||
|
||||
## Inventory Plugin Versioning
|
||||
|
||||
Beginning in Ansible 2.5, inventory sources in Ansible started migrating
|
||||
away from "contrib" scripts (meaning they lived in the contrib folder)
|
||||
to the inventory plugin model.
|
||||
|
||||
In AWX 4.0.0 (and Tower 3.5) inventory source types start to switchover
|
||||
to plugins, provided that sufficient compatibility is in place for
|
||||
the version of Ansible present in the local virtualenv.
|
||||
|
||||
To see what version the plugin transition will happen, see
|
||||
`awx/main/models/inventory.py` and look for the source name as a
|
||||
subclass of `PluginFileInjector`, and there should be an `initial_version`
|
||||
which is the first version that testing deemed to have sufficient parity
|
||||
in the content its inventory plugin returns. For example, `openstack` will
|
||||
begin using the inventory plugin in Ansible version 2.8.
|
||||
If you run an openstack inventory update with Ansible
|
||||
2.7.x or lower, it will use the script.
|
||||
|
||||
### Sunsetting the scripts
|
||||
|
||||
Eventually, it is intended that all source types will have moved to
|
||||
plugins. For any given source, after the `initial_version` for plugin use
|
||||
is higher than the lowest supported Ansible version, the script can be
|
||||
removed and the logic for script credential injection will also be removed.
|
||||
|
||||
For example, after AWX no longer supports Ansible 2.7, the script
|
||||
`awx/plugins/openstack_inventory.py` will be removed.
|
||||
|
||||
## Changes to Expect in Imports
|
||||
|
||||
An effort was made to keep imports working in the exact same way after
|
||||
the switchover. However, the inventory plugins are a fundamental rewrite
|
||||
and many elements of default behavior has changed. These changes also
|
||||
include many backward incompatible changes. Because of this, what you
|
||||
get via an inventory import will be a superset of what you get from the script
|
||||
but will not match the default behavior you would get from the inventory
|
||||
plugin on the CLI.
|
||||
|
||||
Because inventory plugins add additional variables, if you downgrade Ansible, you should
|
||||
turn on `overwrite` and `overwrite_vars` to get rid of stale
|
||||
variables (and potentially groups) no longer returned by the import.
|
||||
|
||||
### Changes for Compatibility
|
||||
|
||||
Programatically-generated examples of inventory file syntax used in
|
||||
updates (with dummy data) can be found in `awx/main/tests/data/inventory/scripts`,
|
||||
these demonstrate the inventory file syntax used to restore old behavior
|
||||
from the inventory scripts.
|
||||
|
||||
#### hostvar keys and values
|
||||
|
||||
More hostvars will appear if the inventory plugins are used.
|
||||
To maintain backward compatibility,
|
||||
the old names are added back where they have the same meaning as a
|
||||
variable returned by the plugin. New names are not removed.
|
||||
|
||||
A small number of hostvars will be lost because of general deprecation needs.
|
||||
|
||||
#### Host names
|
||||
|
||||
In many cases, the host names will change. In all cases, accurate host
|
||||
tracking will still be maintained via the host `instance_id`.
|
||||
(after: https://github.com/ansible/awx/pull/3362)
|
||||
|
||||
## How do I write my own Inventory File?
|
||||
|
||||
If you do not want any of this compatibility-related functionality, then
|
||||
you can add an SCM inventory source that points to your own file.
|
||||
You can also apply a credential of a `managed_by_tower` type to that inventory
|
||||
source that matches the credential you are using, as long as that is
|
||||
not `gce` or `openstack`.
|
||||
|
||||
All other sources provide _secrets_ via environment variables, so this
|
||||
can be re-used without any problems for SCM-based inventory, and your
|
||||
inventory file can be used securely to specify non-sensitive configuration
|
||||
details such as the keyed_groups to provide, or hostvars to construct.
|
||||
|
||||
## Notes on Technical Implementation of Injectors
|
||||
|
||||
For an inventory source with a given value of the `source` field that is
|
||||
of the built-in sources, a credential of the corresponding
|
||||
credential type is required in most cases (exception being ec2 IAM roles).
|
||||
This privileged credential is obtained by the method `get_cloud_credential`.
|
||||
|
||||
The `inputs` for this credential constitute one source of data for running
|
||||
inventory updates. The following fields from the
|
||||
`InventoryUpdate` model are also data sources, including:
|
||||
|
||||
- `source_vars`
|
||||
- `source_regions`
|
||||
- `instance_filters`
|
||||
- `group_by`
|
||||
|
||||
The way these data are applied to the environment (including files and
|
||||
environment vars) is highly dependent on the specific source.
|
||||
|
||||
With plugins, the inventory file may reference files that contain secrets
|
||||
from the credential. With scripts, typically an environment variable
|
||||
will reference a filename that contains a ConfigParser format file with
|
||||
parameters for the update, and possibly including fields from the credential.
|
||||
|
||||
Caution: Please do not put secrets from the credential into the
|
||||
inventory file for the plugin. Right now there appears to be no need to do
|
||||
this, and by using environment variables to specify secrets, this keeps
|
||||
open the possibility of showing the inventory file contents to the user
|
||||
as a latter enhancement.
|
||||
|
||||
Logic for setup for inventory updates using both plugins and scripts live
|
||||
inventory injector class, specific to the source type.
|
||||
|
||||
Any credentials which are not source-specific will use the generic
|
||||
injection logic which is also used in playbook runs.
|
20
docs/licenses/cachetools.txt
Normal file
20
docs/licenses/cachetools.txt
Normal file
@ -0,0 +1,20 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014-2019 Thomas Kemmer
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
48
docs/licenses/futures.txt
Normal file
48
docs/licenses/futures.txt
Normal file
@ -0,0 +1,48 @@
|
||||
PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
|
||||
--------------------------------------------
|
||||
|
||||
1. This LICENSE AGREEMENT is between the Python Software Foundation
|
||||
("PSF"), and the Individual or Organization ("Licensee") accessing and
|
||||
otherwise using this software ("Python") in source or binary form and
|
||||
its associated documentation.
|
||||
|
||||
2. Subject to the terms and conditions of this License Agreement, PSF
|
||||
hereby grants Licensee a nonexclusive, royalty-free, world-wide
|
||||
license to reproduce, analyze, test, perform and/or display publicly,
|
||||
prepare derivative works, distribute, and otherwise use Python
|
||||
alone or in any derivative version, provided, however, that PSF's
|
||||
License Agreement and PSF's notice of copyright, i.e., "Copyright (c)
|
||||
2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation; All Rights
|
||||
Reserved" are retained in Python alone or in any derivative version
|
||||
prepared by Licensee.
|
||||
|
||||
3. In the event Licensee prepares a derivative work that is based on
|
||||
or incorporates Python or any part thereof, and wants to make
|
||||
the derivative work available to others as provided herein, then
|
||||
Licensee hereby agrees to include in any such work a brief summary of
|
||||
the changes made to Python.
|
||||
|
||||
4. PSF is making Python available to Licensee on an "AS IS"
|
||||
basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
|
||||
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
|
||||
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
|
||||
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
|
||||
INFRINGE ANY THIRD PARTY RIGHTS.
|
||||
|
||||
5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
|
||||
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
|
||||
A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
|
||||
OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
|
||||
|
||||
6. This License Agreement will automatically terminate upon a material
|
||||
breach of its terms and conditions.
|
||||
|
||||
7. Nothing in this License Agreement shall be deemed to create any
|
||||
relationship of agency, partnership, or joint venture between PSF and
|
||||
Licensee. This License Agreement does not grant permission to use PSF
|
||||
trademarks or trade name in a trademark sense to endorse or promote
|
||||
products or services of Licensee, or any third party.
|
||||
|
||||
8. By copying, installing or otherwise using Python, Licensee
|
||||
agrees to be bound by the terms and conditions of this License
|
||||
Agreement.
|
@ -1,4 +1,3 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
@ -173,3 +172,30 @@
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
13
docs/licenses/rsa.txt
Normal file
13
docs/licenses/rsa.txt
Normal file
@ -0,0 +1,13 @@
|
||||
Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
@ -1,175 +0,0 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
@ -30,6 +30,8 @@ azure-graphrbac==0.40.0
|
||||
# AWS
|
||||
boto==2.47.0 # last which does not break ec2 scripts
|
||||
boto3==1.6.2
|
||||
google-auth==1.6.2 # needed for gce inventory imports
|
||||
jinja2==2.10 # required for native jinja2 types for inventory compat mode
|
||||
# netconf for network modules
|
||||
ncclient==0.6.3
|
||||
# netaddr filter
|
||||
@ -49,4 +51,4 @@ pywinrm[kerberos]==0.3.0
|
||||
requests
|
||||
requests-credssp==0.1.0 # For windows authentication awx/issues/1144
|
||||
# OpenStack
|
||||
shade==1.27.0
|
||||
openstacksdk==0.23.0
|
||||
|
@ -38,6 +38,7 @@ bcrypt==3.1.4 # via paramiko
|
||||
boto3==1.6.2
|
||||
boto==2.47.0
|
||||
botocore==1.9.3 # via boto3, s3transfer
|
||||
cachetools==3.0.0 # via google-auth
|
||||
certifi==2018.1.18 # via msrest, requests
|
||||
cffi==1.11.5 # via bcrypt, cryptography, pynacl
|
||||
chardet==3.0.4 # via requests
|
||||
@ -50,16 +51,19 @@ docutils==0.14 # via botocore
|
||||
dogpile.cache==0.6.5 # via openstacksdk
|
||||
entrypoints==0.2.3 # via keyring
|
||||
enum34==1.1.6; python_version < '3' # via cryptography, knack, msrest, ovirt-engine-sdk-python
|
||||
futures==3.2.0; python_version < '3' # via openstacksdk, s3transfer
|
||||
google-auth==1.6.2
|
||||
humanfriendly==4.8 # via azure-cli-core
|
||||
idna==2.6 # via cryptography, requests
|
||||
ipaddress==1.0.19 # via cryptography, openstacksdk
|
||||
iso8601==0.1.12 # via keystoneauth1, openstacksdk
|
||||
isodate==0.6.0 # via msrest
|
||||
jinja2==2.10
|
||||
jmespath==0.9.3 # via azure-cli-core, boto3, botocore, knack, openstacksdk
|
||||
jsonpatch==1.21 # via openstacksdk
|
||||
jsonpointer==2.0 # via jsonpatch
|
||||
keyring==15.1.0 # via msrestazure
|
||||
keystoneauth1==3.4.0 # via openstacksdk, os-client-config
|
||||
keystoneauth1==3.11.2 # via openstacksdk, os-client-config
|
||||
knack==0.3.3 # via azure-cli-core
|
||||
lxml==4.1.1 # via ncclient, pyvmomi
|
||||
monotonic==1.4 # via humanfriendly
|
||||
@ -71,16 +75,16 @@ netaddr==0.7.19
|
||||
netifaces==0.10.6 # via openstacksdk
|
||||
ntlm-auth==1.0.6 # via requests-credssp, requests-ntlm
|
||||
oauthlib==2.0.6 # via requests-oauthlib
|
||||
openstacksdk==0.12.0 # via shade
|
||||
os-client-config==1.29.0 # via shade
|
||||
openstacksdk==0.23.0
|
||||
os-service-types==1.2.0 # via openstacksdk
|
||||
ovirt-engine-sdk-python==4.2.4
|
||||
packaging==17.1
|
||||
paramiko==2.4.0 # via azure-cli-core, ncclient
|
||||
pbr==3.1.1 # via keystoneauth1, openstacksdk, os-service-types, shade, stevedore
|
||||
pbr==3.1.1 # via keystoneauth1, openstacksdk, os-service-types, stevedore
|
||||
pexpect==4.6.0
|
||||
psutil==5.4.3
|
||||
ptyprocess==0.5.2 # via pexpect
|
||||
pyasn1-modules==0.2.3 # via google-auth
|
||||
pyasn1==0.4.2 # via paramiko
|
||||
pycparser==2.18 # via cffi
|
||||
pycurl==7.43.0.1 # via ovirt-engine-sdk-python
|
||||
@ -100,11 +104,12 @@ requests-ntlm==1.1.0 # via pywinrm
|
||||
requests-oauthlib==0.8.0 # via msrest
|
||||
requests==2.20.0
|
||||
requestsexceptions==1.4.0 # via openstacksdk, os-client-config
|
||||
rsa==4.0 # via google-auth
|
||||
s3transfer==0.1.13 # via boto3
|
||||
secretstorage==2.3.1 # via keyring
|
||||
selectors2==2.0.1 # via ncclient
|
||||
shade==1.27.0
|
||||
six==1.11.0 # via azure-cli-core, bcrypt, cryptography, isodate, keystoneauth1, knack, munch, ncclient, ntlm-auth, openstacksdk, ovirt-engine-sdk-python, packaging, pynacl, pyopenssl, python-dateutil, pyvmomi, pywinrm, stevedore
|
||||
|
||||
six==1.11.0 # via azure-cli-core, bcrypt, cryptography, google-auth, isodate, keystoneauth1, knack, munch, ncclient, ntlm-auth, openstacksdk, ovirt-engine-sdk-python, packaging, pynacl, pyopenssl, python-dateutil, pyvmomi, pywinrm, stevedore
|
||||
stevedore==1.28.0 # via keystoneauth1
|
||||
tabulate==0.7.7 # via azure-cli-core, knack
|
||||
urllib3==1.24 # via requests
|
||||
|
Loading…
Reference in New Issue
Block a user