mirror of
https://github.com/ansible/awx.git
synced 2024-10-31 06:51:10 +03:00
Merge pull request #6911 from AlanCoding/rm_all_scripts
Remove vendored inventory scripts Reviewed-by: https://github.com/apps/softwarefactory-project-zuul
This commit is contained in:
commit
1321d298ee
@ -10,8 +10,7 @@ __all__ = [
|
||||
'ANSI_SGR_PATTERN', 'CAN_CANCEL', 'ACTIVE_STATES', 'STANDARD_INVENTORY_UPDATE_ENV'
|
||||
]
|
||||
|
||||
|
||||
CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'rhv', 'satellite6', 'cloudforms', 'tower')
|
||||
CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'rhv', 'satellite6', 'tower')
|
||||
SCHEDULEABLE_PROVIDERS = CLOUD_PROVIDERS + ('custom', 'scm',)
|
||||
PRIVILEGE_ESCALATION_METHODS = [
|
||||
('sudo', _('Sudo')), ('su', _('Su')), ('pbrun', _('Pbrun')), ('pfexec', _('Pfexec')),
|
||||
|
29
awx/main/migrations/0117_v400_remove_cloudforms_inventory.py
Normal file
29
awx/main/migrations/0117_v400_remove_cloudforms_inventory.py
Normal file
@ -0,0 +1,29 @@
|
||||
# Generated by Django 2.2.11 on 2020-05-01 13:25
|
||||
|
||||
from django.db import migrations, models
|
||||
from awx.main.migrations._inventory_source import create_scm_script_substitute
|
||||
|
||||
|
||||
def convert_cloudforms_to_scm(apps, schema_editor):
|
||||
create_scm_script_substitute(apps, 'cloudforms')
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0116_v400_remove_hipchat_notifications'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(convert_cloudforms_to_scm),
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
name='source',
|
||||
field=models.CharField(choices=[('file', 'File, Directory or Script'), ('scm', 'Sourced from a Project'), ('ec2', 'Amazon EC2'), ('gce', 'Google Compute Engine'), ('azure_rm', 'Microsoft Azure Resource Manager'), ('vmware', 'VMware vCenter'), ('satellite6', 'Red Hat Satellite 6'), ('openstack', 'OpenStack'), ('rhv', 'Red Hat Virtualization'), ('tower', 'Ansible Tower'), ('custom', 'Custom Script')], default=None, max_length=32),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdate',
|
||||
name='source',
|
||||
field=models.CharField(choices=[('file', 'File, Directory or Script'), ('scm', 'Sourced from a Project'), ('ec2', 'Amazon EC2'), ('gce', 'Google Compute Engine'), ('azure_rm', 'Microsoft Azure Resource Manager'), ('vmware', 'VMware vCenter'), ('satellite6', 'Red Hat Satellite 6'), ('openstack', 'OpenStack'), ('rhv', 'Red Hat Virtualization'), ('tower', 'Ansible Tower'), ('custom', 'Custom Script')], default=None, max_length=32),
|
||||
),
|
||||
]
|
@ -1,6 +1,9 @@
|
||||
import logging
|
||||
|
||||
from uuid import uuid4
|
||||
|
||||
from django.utils.encoding import smart_text
|
||||
from django.utils.timezone import now
|
||||
|
||||
from awx.main.utils.common import parse_yaml_or_json
|
||||
|
||||
@ -87,3 +90,44 @@ def back_out_new_instance_id(apps, source, new_id):
|
||||
modified_ct, source
|
||||
))
|
||||
|
||||
|
||||
def create_scm_script_substitute(apps, source):
|
||||
"""Only applies for cloudforms in practice, but written generally.
|
||||
Given a source type, this will replace all inventory sources of that type
|
||||
with SCM inventory sources that source the script from Ansible core
|
||||
"""
|
||||
# the revision in the Ansible 2.9 stable branch this project will start out as
|
||||
# it can still be updated manually later (but staying within 2.9 branch), if desired
|
||||
ansible_rev = '6f83b9aff42331e15c55a171de0a8b001208c18c'
|
||||
InventorySource = apps.get_model('main', 'InventorySource')
|
||||
ContentType = apps.get_model('contenttypes', 'ContentType')
|
||||
Project = apps.get_model('main', 'Project')
|
||||
if not InventorySource.objects.filter(source=source).exists():
|
||||
logger.debug('No sources of type {} to migrate'.format(source))
|
||||
return
|
||||
proj_name = 'Replacement project for {} type sources - {}'.format(source, uuid4())
|
||||
right_now = now()
|
||||
project = Project.objects.create(
|
||||
name=proj_name,
|
||||
created=right_now,
|
||||
modified=right_now,
|
||||
description='Created by migration',
|
||||
polymorphic_ctype=ContentType.objects.get(model='project'),
|
||||
# project-specific fields
|
||||
scm_type='git',
|
||||
scm_url='https://github.com/ansible/ansible.git',
|
||||
scm_branch='stable-2.9',
|
||||
scm_revision=ansible_rev
|
||||
)
|
||||
ct = 0
|
||||
for inv_src in InventorySource.objects.filter(source=source).iterator():
|
||||
inv_src.source = 'scm'
|
||||
inv_src.source_project = project
|
||||
inv_src.source_path = 'contrib/inventory/{}.py'.format(source)
|
||||
inv_src.scm_last_revision = ansible_rev
|
||||
inv_src.save(update_fields=['source', 'source_project', 'source_path', 'scm_last_revision'])
|
||||
logger.debug('Changed inventory source {} to scm type'.format(inv_src.pk))
|
||||
ct += 1
|
||||
if ct:
|
||||
logger.info('Changed total of {} inventory sources from {} type to scm'.format(ct, source))
|
||||
|
||||
|
@ -15,6 +15,7 @@ from crum import get_current_user
|
||||
|
||||
# AWX
|
||||
from awx.main.utils import encrypt_field, parse_yaml_or_json
|
||||
from awx.main.constants import CLOUD_PROVIDERS
|
||||
|
||||
__all__ = ['prevent_search', 'VarsDictProperty', 'BaseModel', 'CreatedModifiedModel',
|
||||
'PasswordFieldsModel', 'PrimordialModel', 'CommonModel',
|
||||
@ -50,7 +51,7 @@ PROJECT_UPDATE_JOB_TYPE_CHOICES = [
|
||||
(PERM_INVENTORY_CHECK, _('Check')),
|
||||
]
|
||||
|
||||
CLOUD_INVENTORY_SOURCES = ['ec2', 'vmware', 'gce', 'azure_rm', 'openstack', 'rhv', 'custom', 'satellite6', 'cloudforms', 'scm', 'tower',]
|
||||
CLOUD_INVENTORY_SOURCES = list(CLOUD_PROVIDERS) + ['scm', 'custom']
|
||||
|
||||
VERBOSITY_CHOICES = [
|
||||
(0, '0 (Normal)'),
|
||||
|
@ -11,10 +11,6 @@ import copy
|
||||
import os.path
|
||||
from urllib.parse import urljoin
|
||||
import yaml
|
||||
import configparser
|
||||
import tempfile
|
||||
from io import StringIO
|
||||
from distutils.version import LooseVersion as Version
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
@ -60,7 +56,7 @@ from awx.main.models.notifications import (
|
||||
JobNotificationMixin,
|
||||
)
|
||||
from awx.main.models.credential.injectors import _openstack_data
|
||||
from awx.main.utils import _inventory_updates, region_sorting, get_licenser
|
||||
from awx.main.utils import _inventory_updates, region_sorting
|
||||
from awx.main.utils.safe_yaml import sanitize_jinja
|
||||
|
||||
|
||||
@ -829,7 +825,6 @@ class InventorySourceOptions(BaseModel):
|
||||
('azure_rm', _('Microsoft Azure Resource Manager')),
|
||||
('vmware', _('VMware vCenter')),
|
||||
('satellite6', _('Red Hat Satellite 6')),
|
||||
('cloudforms', _('Red Hat CloudForms')),
|
||||
('openstack', _('OpenStack')),
|
||||
('rhv', _('Red Hat Virtualization')),
|
||||
('tower', _('Ansible Tower')),
|
||||
@ -1069,11 +1064,6 @@ class InventorySourceOptions(BaseModel):
|
||||
"""Red Hat Satellite 6 region choices (not implemented)"""
|
||||
return [('all', 'All')]
|
||||
|
||||
@classmethod
|
||||
def get_cloudforms_region_choices(self):
|
||||
"""Red Hat CloudForms region choices (not implemented)"""
|
||||
return [('all', 'All')]
|
||||
|
||||
@classmethod
|
||||
def get_rhv_region_choices(self):
|
||||
"""No region supprt"""
|
||||
@ -1602,19 +1592,12 @@ class CustomInventoryScript(CommonModelNameNotUnique, ResourceMixin):
|
||||
return reverse('api:inventory_script_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
|
||||
# TODO: move to awx/main/models/inventory/injectors.py
|
||||
class PluginFileInjector(object):
|
||||
# if plugin_name is not given, no inventory plugin functionality exists
|
||||
plugin_name = None # Ansible core name used to reference plugin
|
||||
# if initial_version is None, but we have plugin name, injection logic exists,
|
||||
# but it is vaporware, meaning we do not use it for some reason in Ansible core
|
||||
initial_version = None # at what version do we switch to the plugin
|
||||
ini_env_reference = None # env var name that points to old ini config file
|
||||
# base injector should be one of None, "managed", or "template"
|
||||
# this dictates which logic to borrow from playbook injectors
|
||||
base_injector = None
|
||||
# every source should have collection, but these are set here
|
||||
# so that a source without a collection will have null values
|
||||
# every source should have collection, these are for the collection name
|
||||
namespace = None
|
||||
collection = None
|
||||
collection_migration = '2.9' # Starting with this version, we use collections
|
||||
@ -1630,12 +1613,6 @@ class PluginFileInjector(object):
|
||||
"""
|
||||
return '{0}.yml'.format(self.plugin_name)
|
||||
|
||||
@property
|
||||
def script_name(self):
|
||||
"""Name of the script located in awx/plugins/inventory
|
||||
"""
|
||||
return '{0}.py'.format(self.__class__.__name__)
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
"""Default implementation of inventory plugin file contents.
|
||||
There are some valid cases when all parameters can be obtained from
|
||||
@ -1644,10 +1621,7 @@ class PluginFileInjector(object):
|
||||
"""
|
||||
if self.plugin_name is None:
|
||||
raise NotImplementedError('At minimum the plugin name is needed for inventory plugin use.')
|
||||
if self.initial_version is None or Version(self.ansible_version) >= Version(self.collection_migration):
|
||||
proper_name = f'{self.namespace}.{self.collection}.{self.plugin_name}'
|
||||
else:
|
||||
proper_name = self.plugin_name
|
||||
proper_name = f'{self.namespace}.{self.collection}.{self.plugin_name}'
|
||||
return {'plugin': proper_name}
|
||||
|
||||
def inventory_contents(self, inventory_update, private_data_dir):
|
||||
@ -1659,17 +1633,8 @@ class PluginFileInjector(object):
|
||||
width=1000
|
||||
)
|
||||
|
||||
def should_use_plugin(self):
|
||||
return bool(
|
||||
self.plugin_name and self.initial_version and
|
||||
Version(self.ansible_version) >= Version(self.initial_version)
|
||||
)
|
||||
|
||||
def build_env(self, inventory_update, env, private_data_dir, private_data_files):
|
||||
if self.should_use_plugin():
|
||||
injector_env = self.get_plugin_env(inventory_update, private_data_dir, private_data_files)
|
||||
else:
|
||||
injector_env = self.get_script_env(inventory_update, private_data_dir, private_data_files)
|
||||
injector_env = self.get_plugin_env(inventory_update, private_data_dir, private_data_files)
|
||||
env.update(injector_env)
|
||||
# Preserves current behavior for Ansible change in default planned for 2.10
|
||||
env['ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS'] = 'never'
|
||||
@ -1677,7 +1642,6 @@ class PluginFileInjector(object):
|
||||
|
||||
def _get_shared_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
"""By default, we will apply the standard managed_by_tower injectors
|
||||
for the script injection
|
||||
"""
|
||||
injected_env = {}
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
@ -1704,52 +1668,18 @@ class PluginFileInjector(object):
|
||||
|
||||
def get_plugin_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
env = self._get_shared_env(inventory_update, private_data_dir, private_data_files)
|
||||
if self.initial_version is None or Version(self.ansible_version) >= Version(self.collection_migration):
|
||||
env['ANSIBLE_COLLECTIONS_PATHS'] = settings.AWX_ANSIBLE_COLLECTIONS_PATHS
|
||||
env['ANSIBLE_COLLECTIONS_PATHS'] = settings.AWX_ANSIBLE_COLLECTIONS_PATHS
|
||||
return env
|
||||
|
||||
def get_script_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
injected_env = self._get_shared_env(inventory_update, private_data_dir, private_data_files)
|
||||
|
||||
# Put in env var reference to private ini data files, if relevant
|
||||
if self.ini_env_reference:
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
cred_data = private_data_files['credentials']
|
||||
injected_env[self.ini_env_reference] = cred_data[credential]
|
||||
|
||||
return injected_env
|
||||
|
||||
def build_private_data(self, inventory_update, private_data_dir):
|
||||
if self.should_use_plugin():
|
||||
return self.build_plugin_private_data(inventory_update, private_data_dir)
|
||||
else:
|
||||
return self.build_script_private_data(inventory_update, private_data_dir)
|
||||
|
||||
def build_script_private_data(self, inventory_update, private_data_dir):
|
||||
return None
|
||||
return self.build_plugin_private_data(inventory_update, private_data_dir)
|
||||
|
||||
def build_plugin_private_data(self, inventory_update, private_data_dir):
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def dump_cp(cp, credential):
|
||||
"""Dump config parser data and return it as a string.
|
||||
Helper method intended for use by build_script_private_data
|
||||
"""
|
||||
if cp.sections():
|
||||
f = StringIO()
|
||||
cp.write(f)
|
||||
private_data = {'credentials': {}}
|
||||
private_data['credentials'][credential] = f.getvalue()
|
||||
return private_data
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
class azure_rm(PluginFileInjector):
|
||||
plugin_name = 'azure_rm'
|
||||
initial_version = '2.8' # Driven by unsafe group names issue, hostvars, host names
|
||||
ini_env_reference = 'AZURE_INI_PATH'
|
||||
base_injector = 'managed'
|
||||
namespace = 'azure'
|
||||
collection = 'azcollection'
|
||||
@ -1860,32 +1790,9 @@ class azure_rm(PluginFileInjector):
|
||||
ret['exclude_host_filters'].append("location not in {}".format(repr(python_regions)))
|
||||
return ret
|
||||
|
||||
def build_script_private_data(self, inventory_update, private_data_dir):
|
||||
cp = configparser.RawConfigParser()
|
||||
section = 'azure'
|
||||
cp.add_section(section)
|
||||
cp.set(section, 'include_powerstate', 'yes')
|
||||
cp.set(section, 'group_by_resource_group', 'yes')
|
||||
cp.set(section, 'group_by_location', 'yes')
|
||||
cp.set(section, 'group_by_tag', 'yes')
|
||||
|
||||
if inventory_update.source_regions and 'all' not in inventory_update.source_regions:
|
||||
cp.set(
|
||||
section, 'locations',
|
||||
','.join([x.strip() for x in inventory_update.source_regions.split(',')])
|
||||
)
|
||||
|
||||
azure_rm_opts = dict(inventory_update.source_vars_dict.items())
|
||||
for k, v in azure_rm_opts.items():
|
||||
cp.set(section, k, str(v))
|
||||
return self.dump_cp(cp, inventory_update.get_cloud_credential())
|
||||
|
||||
|
||||
class ec2(PluginFileInjector):
|
||||
plugin_name = 'aws_ec2'
|
||||
# blocked by https://github.com/ansible/ansible/issues/54059
|
||||
initial_version = '2.9' # Driven by unsafe group names issue, parent_group templating, hostvars
|
||||
ini_env_reference = 'EC2_INI_PATH'
|
||||
base_injector = 'managed'
|
||||
namespace = 'amazon'
|
||||
collection = 'aws'
|
||||
@ -2108,46 +2015,9 @@ class ec2(PluginFileInjector):
|
||||
|
||||
return ret
|
||||
|
||||
def build_script_private_data(self, inventory_update, private_data_dir):
|
||||
cp = configparser.RawConfigParser()
|
||||
# Build custom ec2.ini for ec2 inventory script to use.
|
||||
section = 'ec2'
|
||||
cp.add_section(section)
|
||||
ec2_opts = dict(inventory_update.source_vars_dict.items())
|
||||
regions = inventory_update.source_regions or 'all'
|
||||
regions = ','.join([x.strip() for x in regions.split(',')])
|
||||
regions_blacklist = ','.join(settings.EC2_REGIONS_BLACKLIST)
|
||||
ec2_opts['regions'] = regions
|
||||
ec2_opts.setdefault('regions_exclude', regions_blacklist)
|
||||
ec2_opts.setdefault('destination_variable', 'public_dns_name')
|
||||
ec2_opts.setdefault('vpc_destination_variable', 'ip_address')
|
||||
ec2_opts.setdefault('route53', 'False')
|
||||
ec2_opts.setdefault('all_instances', 'True')
|
||||
ec2_opts.setdefault('all_rds_instances', 'False')
|
||||
ec2_opts.setdefault('include_rds_clusters', 'False')
|
||||
ec2_opts.setdefault('rds', 'False')
|
||||
ec2_opts.setdefault('nested_groups', 'True')
|
||||
ec2_opts.setdefault('elasticache', 'False')
|
||||
ec2_opts.setdefault('stack_filters', 'False')
|
||||
if inventory_update.instance_filters:
|
||||
ec2_opts.setdefault('instance_filters', inventory_update.instance_filters)
|
||||
group_by = [x.strip().lower() for x in inventory_update.group_by.split(',') if x.strip()]
|
||||
for choice in inventory_update.get_ec2_group_by_choices():
|
||||
value = bool((group_by and choice[0] in group_by) or (not group_by and choice[0] != 'instance_id'))
|
||||
ec2_opts.setdefault('group_by_%s' % choice[0], str(value))
|
||||
if 'cache_path' not in ec2_opts:
|
||||
cache_path = tempfile.mkdtemp(prefix='ec2_cache', dir=private_data_dir)
|
||||
ec2_opts['cache_path'] = cache_path
|
||||
ec2_opts.setdefault('cache_max_age', '300')
|
||||
for k, v in ec2_opts.items():
|
||||
cp.set(section, k, str(v))
|
||||
return self.dump_cp(cp, inventory_update.get_cloud_credential())
|
||||
|
||||
|
||||
class gce(PluginFileInjector):
|
||||
plugin_name = 'gcp_compute'
|
||||
initial_version = '2.8' # Driven by unsafe group names issue, hostvars
|
||||
ini_env_reference = 'GCE_INI_PATH'
|
||||
base_injector = 'managed'
|
||||
namespace = 'google'
|
||||
collection = 'cloud'
|
||||
@ -2158,17 +2028,6 @@ class gce(PluginFileInjector):
|
||||
ret['ANSIBLE_JINJA2_NATIVE'] = str(True)
|
||||
return ret
|
||||
|
||||
def get_script_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
env = super(gce, self).get_script_env(inventory_update, private_data_dir, private_data_files)
|
||||
cred = inventory_update.get_cloud_credential()
|
||||
# these environment keys are unique to the script operation, and are not
|
||||
# concepts in the modern inventory plugin or gce Ansible module
|
||||
# email and project are redundant with the creds file
|
||||
env['GCE_EMAIL'] = cred.get_input('username', default='')
|
||||
env['GCE_PROJECT'] = cred.get_input('project', default='')
|
||||
env['GCE_ZONE'] = inventory_update.source_regions if inventory_update.source_regions != 'all' else '' # noqa
|
||||
return env
|
||||
|
||||
def _compat_compose_vars(self):
|
||||
# missing: gce_image, gce_uuid
|
||||
# https://github.com/ansible/ansible/issues/51884
|
||||
@ -2241,28 +2100,13 @@ class gce(PluginFileInjector):
|
||||
ret['zones'] = inventory_update.source_regions.split(',')
|
||||
return ret
|
||||
|
||||
def build_script_private_data(self, inventory_update, private_data_dir):
|
||||
cp = configparser.RawConfigParser()
|
||||
# by default, the GCE inventory source caches results on disk for
|
||||
# 5 minutes; disable this behavior
|
||||
cp.add_section('cache')
|
||||
cp.set('cache', 'cache_max_age', '0')
|
||||
return self.dump_cp(cp, inventory_update.get_cloud_credential())
|
||||
|
||||
|
||||
class vmware(PluginFileInjector):
|
||||
plugin_name = 'vmware_vm_inventory'
|
||||
initial_version = '2.9'
|
||||
ini_env_reference = 'VMWARE_INI_PATH'
|
||||
base_injector = 'managed'
|
||||
namespace = 'community'
|
||||
collection = 'vmware'
|
||||
|
||||
@property
|
||||
def script_name(self):
|
||||
return 'vmware_inventory.py' # exception
|
||||
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
ret = super(vmware, self).inventory_as_dict(inventory_update, private_data_dir)
|
||||
ret['strict'] = False
|
||||
@ -2363,57 +2207,16 @@ class vmware(PluginFileInjector):
|
||||
|
||||
return ret
|
||||
|
||||
def build_script_private_data(self, inventory_update, private_data_dir):
|
||||
cp = configparser.RawConfigParser()
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
|
||||
# Allow custom options to vmware inventory script.
|
||||
section = 'vmware'
|
||||
cp.add_section(section)
|
||||
cp.set('vmware', 'cache_max_age', '0')
|
||||
cp.set('vmware', 'validate_certs', str(settings.VMWARE_VALIDATE_CERTS))
|
||||
cp.set('vmware', 'username', credential.get_input('username', default=''))
|
||||
cp.set('vmware', 'password', credential.get_input('password', default=''))
|
||||
cp.set('vmware', 'server', credential.get_input('host', default=''))
|
||||
|
||||
vmware_opts = dict(inventory_update.source_vars_dict.items())
|
||||
if inventory_update.instance_filters:
|
||||
vmware_opts.setdefault('host_filters', inventory_update.instance_filters)
|
||||
if inventory_update.group_by:
|
||||
vmware_opts.setdefault('groupby_patterns', inventory_update.group_by)
|
||||
|
||||
for k, v in vmware_opts.items():
|
||||
cp.set(section, k, str(v))
|
||||
|
||||
return self.dump_cp(cp, credential)
|
||||
|
||||
|
||||
class openstack(PluginFileInjector):
|
||||
ini_env_reference = 'OS_CLIENT_CONFIG_FILE'
|
||||
plugin_name = 'openstack'
|
||||
# minimum version of 2.7.8 may be theoretically possible
|
||||
initial_version = '2.8' # Driven by consistency with other sources
|
||||
namespace = 'openstack'
|
||||
collection = 'cloud'
|
||||
|
||||
@property
|
||||
def script_name(self):
|
||||
return 'openstack_inventory.py' # exception
|
||||
|
||||
def _get_clouds_dict(self, inventory_update, cred, private_data_dir, mk_cache=True):
|
||||
def _get_clouds_dict(self, inventory_update, cred, private_data_dir):
|
||||
openstack_data = _openstack_data(cred)
|
||||
|
||||
openstack_data['clouds']['devstack']['private'] = inventory_update.source_vars_dict.get('private', True)
|
||||
if mk_cache:
|
||||
# Retrieve cache path from inventory update vars if available,
|
||||
# otherwise create a temporary cache path only for this update.
|
||||
cache = inventory_update.source_vars_dict.get('cache', {})
|
||||
if not isinstance(cache, dict):
|
||||
cache = {}
|
||||
if not cache.get('path', ''):
|
||||
cache_path = tempfile.mkdtemp(prefix='openstack_cache', dir=private_data_dir)
|
||||
cache['path'] = cache_path
|
||||
openstack_data['cache'] = cache
|
||||
ansible_variables = {
|
||||
'use_hostnames': True,
|
||||
'expand_hostvars': False,
|
||||
@ -2430,27 +2233,16 @@ class openstack(PluginFileInjector):
|
||||
openstack_data['ansible'] = ansible_variables
|
||||
return openstack_data
|
||||
|
||||
def build_script_private_data(self, inventory_update, private_data_dir, mk_cache=True):
|
||||
def build_plugin_private_data(self, inventory_update, private_data_dir):
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
private_data = {'credentials': {}}
|
||||
|
||||
openstack_data = self._get_clouds_dict(inventory_update, credential, private_data_dir, mk_cache=mk_cache)
|
||||
openstack_data = self._get_clouds_dict(inventory_update, credential, private_data_dir)
|
||||
private_data['credentials'][credential] = yaml.safe_dump(
|
||||
openstack_data, default_flow_style=False, allow_unicode=True
|
||||
)
|
||||
return private_data
|
||||
|
||||
def build_plugin_private_data(self, inventory_update, private_data_dir):
|
||||
# Credentials can be passed in the same way as the script did
|
||||
# but do not create the tmp cache file
|
||||
return self.build_script_private_data(inventory_update, private_data_dir, mk_cache=False)
|
||||
|
||||
def get_plugin_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
env = super(openstack, self).get_plugin_env(inventory_update, private_data_dir, private_data_files)
|
||||
script_env = self.get_script_env(inventory_update, private_data_dir, private_data_files)
|
||||
env.update(script_env)
|
||||
return env
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
def use_host_name_for_name(a_bool_maybe):
|
||||
if not isinstance(a_bool_maybe, bool):
|
||||
@ -2485,6 +2277,13 @@ class openstack(PluginFileInjector):
|
||||
ret['inventory_hostname'] = use_host_name_for_name(source_vars['use_hostnames'])
|
||||
return ret
|
||||
|
||||
def get_plugin_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
env = super(openstack, self).get_plugin_env(inventory_update, private_data_dir, private_data_files)
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
cred_data = private_data_files['credentials']
|
||||
env['OS_CLIENT_CONFIG_FILE'] = cred_data[credential]
|
||||
return env
|
||||
|
||||
|
||||
class rhv(PluginFileInjector):
|
||||
"""ovirt uses the custom credential templating, and that is all
|
||||
@ -2495,10 +2294,6 @@ class rhv(PluginFileInjector):
|
||||
namespace = 'ovirt'
|
||||
collection = 'ovirt'
|
||||
|
||||
@property
|
||||
def script_name(self):
|
||||
return 'ovirt4.py' # exception
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
ret = super(rhv, self).inventory_as_dict(inventory_update, private_data_dir)
|
||||
ret['ovirt_insecure'] = False # Default changed from script
|
||||
@ -2521,68 +2316,9 @@ class rhv(PluginFileInjector):
|
||||
|
||||
class satellite6(PluginFileInjector):
|
||||
plugin_name = 'foreman'
|
||||
ini_env_reference = 'FOREMAN_INI_PATH'
|
||||
initial_version = '2.9'
|
||||
# No base injector, because this does not work in playbooks. Bug??
|
||||
namespace = 'theforeman'
|
||||
collection = 'foreman'
|
||||
|
||||
@property
|
||||
def script_name(self):
|
||||
return 'foreman.py' # exception
|
||||
|
||||
def build_script_private_data(self, inventory_update, private_data_dir):
|
||||
cp = configparser.RawConfigParser()
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
|
||||
section = 'foreman'
|
||||
cp.add_section(section)
|
||||
|
||||
group_patterns = '[]'
|
||||
group_prefix = 'foreman_'
|
||||
want_hostcollections = 'False'
|
||||
want_ansible_ssh_host = 'False'
|
||||
rich_params = 'False'
|
||||
want_facts = 'True'
|
||||
foreman_opts = dict(inventory_update.source_vars_dict.items())
|
||||
foreman_opts.setdefault('ssl_verify', 'False')
|
||||
for k, v in foreman_opts.items():
|
||||
if k == 'satellite6_group_patterns' and isinstance(v, str):
|
||||
group_patterns = v
|
||||
elif k == 'satellite6_group_prefix' and isinstance(v, str):
|
||||
group_prefix = v
|
||||
elif k == 'satellite6_want_hostcollections' and isinstance(v, bool):
|
||||
want_hostcollections = v
|
||||
elif k == 'satellite6_want_ansible_ssh_host' and isinstance(v, bool):
|
||||
want_ansible_ssh_host = v
|
||||
elif k == 'satellite6_rich_params' and isinstance(v, bool):
|
||||
rich_params = v
|
||||
elif k == 'satellite6_want_facts' and isinstance(v, bool):
|
||||
want_facts = v
|
||||
else:
|
||||
cp.set(section, k, str(v))
|
||||
|
||||
if credential:
|
||||
cp.set(section, 'url', credential.get_input('host', default=''))
|
||||
cp.set(section, 'user', credential.get_input('username', default=''))
|
||||
cp.set(section, 'password', credential.get_input('password', default=''))
|
||||
|
||||
section = 'ansible'
|
||||
cp.add_section(section)
|
||||
cp.set(section, 'group_patterns', group_patterns)
|
||||
cp.set(section, 'want_facts', str(want_facts))
|
||||
cp.set(section, 'want_hostcollections', str(want_hostcollections))
|
||||
cp.set(section, 'group_prefix', group_prefix)
|
||||
cp.set(section, 'want_ansible_ssh_host', str(want_ansible_ssh_host))
|
||||
cp.set(section, 'rich_params', str(rich_params))
|
||||
|
||||
section = 'cache'
|
||||
cp.add_section(section)
|
||||
cp.set(section, 'path', '/tmp')
|
||||
cp.set(section, 'max_age', '0')
|
||||
|
||||
return self.dump_cp(cp, credential)
|
||||
|
||||
def get_plugin_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
# this assumes that this is merged
|
||||
# https://github.com/ansible/ansible/pull/52693
|
||||
@ -2703,56 +2439,12 @@ class satellite6(PluginFileInjector):
|
||||
return ret
|
||||
|
||||
|
||||
class cloudforms(PluginFileInjector):
|
||||
# plugin_name = 'FIXME' # contribute inventory plugin to Ansible
|
||||
ini_env_reference = 'CLOUDFORMS_INI_PATH'
|
||||
# Also no base_injector because this does not work in playbooks
|
||||
# namespace = '' # does not have a collection
|
||||
# collection = ''
|
||||
|
||||
def build_script_private_data(self, inventory_update, private_data_dir):
|
||||
cp = configparser.RawConfigParser()
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
|
||||
section = 'cloudforms'
|
||||
cp.add_section(section)
|
||||
|
||||
if credential:
|
||||
cp.set(section, 'url', credential.get_input('host', default=''))
|
||||
cp.set(section, 'username', credential.get_input('username', default=''))
|
||||
cp.set(section, 'password', credential.get_input('password', default=''))
|
||||
cp.set(section, 'ssl_verify', "false")
|
||||
|
||||
cloudforms_opts = dict(inventory_update.source_vars_dict.items())
|
||||
for opt in ['version', 'purge_actions', 'clean_group_keys', 'nest_tags', 'suffix', 'prefer_ipv4']:
|
||||
if opt in cloudforms_opts:
|
||||
cp.set(section, opt, str(cloudforms_opts[opt]))
|
||||
|
||||
section = 'cache'
|
||||
cp.add_section(section)
|
||||
cp.set(section, 'max_age', "0")
|
||||
cache_path = tempfile.mkdtemp(
|
||||
prefix='cloudforms_cache',
|
||||
dir=private_data_dir
|
||||
)
|
||||
cp.set(section, 'path', cache_path)
|
||||
|
||||
return self.dump_cp(cp, credential)
|
||||
|
||||
|
||||
class tower(PluginFileInjector):
|
||||
plugin_name = 'tower'
|
||||
base_injector = 'template'
|
||||
initial_version = '2.8' # Driven by "include_metadata" hostvars
|
||||
namespace = 'awx'
|
||||
collection = 'awx'
|
||||
|
||||
def get_script_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
env = super(tower, self).get_script_env(inventory_update, private_data_dir, private_data_files)
|
||||
env['TOWER_INVENTORY'] = inventory_update.instance_filters
|
||||
env['TOWER_LICENSE_TYPE'] = get_licenser().validate().get('license_type', 'unlicensed')
|
||||
return env
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
ret = super(tower, self).inventory_as_dict(inventory_update, private_data_dir)
|
||||
# Credentials injected as env vars, same as script
|
||||
|
@ -50,7 +50,7 @@ import ansible_runner
|
||||
|
||||
# AWX
|
||||
from awx import __version__ as awx_application_version
|
||||
from awx.main.constants import CLOUD_PROVIDERS, PRIVILEGE_ESCALATION_METHODS, STANDARD_INVENTORY_UPDATE_ENV, GALAXY_SERVER_FIELDS
|
||||
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS, STANDARD_INVENTORY_UPDATE_ENV, GALAXY_SERVER_FIELDS
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.redact import UriCleaner
|
||||
from awx.main.models import (
|
||||
@ -2167,7 +2167,10 @@ class RunProjectUpdate(BaseTask):
|
||||
scm_branch = project_update.scm_branch
|
||||
branch_override = bool(scm_branch and project_update.scm_branch != project_update.project.scm_branch)
|
||||
if project_update.job_type == 'run' and (not branch_override):
|
||||
scm_branch = project_update.project.scm_revision
|
||||
if project_update.project.scm_revision:
|
||||
scm_branch = project_update.project.scm_revision
|
||||
elif not scm_branch:
|
||||
raise RuntimeError('Could not determine a revision to run from project.')
|
||||
elif not scm_branch:
|
||||
scm_branch = {'hg': 'tip'}.get(project_update.scm_type, 'HEAD')
|
||||
extra_vars.update({
|
||||
@ -2278,7 +2281,11 @@ class RunProjectUpdate(BaseTask):
|
||||
def acquire_lock(self, instance, blocking=True):
|
||||
lock_path = instance.get_lock_file()
|
||||
if lock_path is None:
|
||||
raise RuntimeError(u'Invalid lock file path')
|
||||
# If from migration or someone blanked local_path for any other reason, recoverable by save
|
||||
instance.save()
|
||||
lock_path = instance.get_lock_file()
|
||||
if lock_path is None:
|
||||
raise RuntimeError(u'Invalid lock file path')
|
||||
|
||||
try:
|
||||
self.lock_fd = os.open(lock_path, os.O_RDWR | os.O_CREAT)
|
||||
@ -2460,11 +2467,8 @@ class RunInventoryUpdate(BaseTask):
|
||||
|
||||
if injector is not None:
|
||||
env = injector.build_env(inventory_update, env, private_data_dir, private_data_files)
|
||||
# All CLOUD_PROVIDERS sources implement as either script or auto plugin
|
||||
if injector.should_use_plugin():
|
||||
env['ANSIBLE_INVENTORY_ENABLED'] = 'auto'
|
||||
else:
|
||||
env['ANSIBLE_INVENTORY_ENABLED'] = 'script'
|
||||
# All CLOUD_PROVIDERS sources implement as inventory plugin from collection
|
||||
env['ANSIBLE_INVENTORY_ENABLED'] = 'auto'
|
||||
|
||||
if inventory_update.source in ['scm', 'custom']:
|
||||
for env_k in inventory_update.source_vars_dict:
|
||||
@ -2580,16 +2584,12 @@ class RunInventoryUpdate(BaseTask):
|
||||
injector = InventorySource.injectors[src](self.get_ansible_version(inventory_update))
|
||||
|
||||
if injector is not None:
|
||||
if injector.should_use_plugin():
|
||||
content = injector.inventory_contents(inventory_update, private_data_dir)
|
||||
# must be a statically named file
|
||||
inventory_path = os.path.join(private_data_dir, injector.filename)
|
||||
with open(inventory_path, 'w') as f:
|
||||
f.write(content)
|
||||
os.chmod(inventory_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
||||
else:
|
||||
# Use the vendored script path
|
||||
inventory_path = self.get_path_to('..', 'plugins', 'inventory', injector.script_name)
|
||||
content = injector.inventory_contents(inventory_update, private_data_dir)
|
||||
# must be a statically named file
|
||||
inventory_path = os.path.join(private_data_dir, injector.filename)
|
||||
with open(inventory_path, 'w') as f:
|
||||
f.write(content)
|
||||
os.chmod(inventory_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
||||
elif src == 'scm':
|
||||
inventory_path = os.path.join(private_data_dir, 'project', inventory_update.source_path)
|
||||
elif src == 'custom':
|
||||
@ -2613,12 +2613,6 @@ class RunInventoryUpdate(BaseTask):
|
||||
src = inventory_update.source
|
||||
if src == 'scm' and inventory_update.source_project_update:
|
||||
return os.path.join(private_data_dir, 'project')
|
||||
if src in CLOUD_PROVIDERS:
|
||||
injector = None
|
||||
if src in InventorySource.injectors:
|
||||
injector = InventorySource.injectors[src](self.get_ansible_version(inventory_update))
|
||||
if (not injector) or (not injector.should_use_plugin()):
|
||||
return self.get_path_to('..', 'plugins', 'inventory')
|
||||
return private_data_dir
|
||||
|
||||
def build_playbook_path_relative_to_cwd(self, inventory_update, private_data_dir):
|
||||
|
@ -1,9 +0,0 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"AZURE_CLIENT_ID": "fooo",
|
||||
"AZURE_CLOUD_ENVIRONMENT": "fooo",
|
||||
"AZURE_INI_PATH": "{{ file_reference }}",
|
||||
"AZURE_SECRET": "fooo",
|
||||
"AZURE_SUBSCRIPTION_ID": "fooo",
|
||||
"AZURE_TENANT": "fooo"
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
[azure]
|
||||
include_powerstate = yes
|
||||
group_by_resource_group = yes
|
||||
group_by_location = yes
|
||||
group_by_tag = yes
|
||||
locations = southcentralus,westus
|
||||
base_source_var = value_of_var
|
||||
use_private_ip = True
|
||||
resource_groups = foo_resources,bar_resources
|
||||
tags = Creator:jmarshall, peanutbutter:jelly
|
||||
|
@ -1,4 +0,0 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"CLOUDFORMS_INI_PATH": "{{ file_reference }}"
|
||||
}
|
@ -1 +0,0 @@
|
||||
<directory>
|
@ -1,16 +0,0 @@
|
||||
[cloudforms]
|
||||
url = https://foo.invalid
|
||||
username = fooo
|
||||
password = fooo
|
||||
ssl_verify = false
|
||||
version = 2.4
|
||||
purge_actions = maybe
|
||||
clean_group_keys = this_key
|
||||
nest_tags = yes
|
||||
suffix = .ppt
|
||||
prefer_ipv4 = yes
|
||||
|
||||
[cache]
|
||||
max_age = 0
|
||||
path = {{ cache_dir }}
|
||||
|
@ -1,7 +0,0 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"AWS_ACCESS_KEY_ID": "fooo",
|
||||
"AWS_SECRET_ACCESS_KEY": "fooo",
|
||||
"AWS_SECURITY_TOKEN": "fooo",
|
||||
"EC2_INI_PATH": "{{ file_reference }}"
|
||||
}
|
@ -1 +0,0 @@
|
||||
<directory>
|
@ -1,34 +0,0 @@
|
||||
[ec2]
|
||||
base_source_var = value_of_var
|
||||
boto_profile = /tmp/my_boto_stuff
|
||||
iam_role_arn = arn:aws:iam::123456789012:role/test-role
|
||||
hostname_variable = public_dns_name
|
||||
destination_variable = public_dns_name
|
||||
regions = us-east-2,ap-south-1
|
||||
regions_exclude = us-gov-west-1,cn-north-1
|
||||
vpc_destination_variable = ip_address
|
||||
route53 = False
|
||||
all_instances = True
|
||||
all_rds_instances = False
|
||||
include_rds_clusters = False
|
||||
rds = False
|
||||
nested_groups = True
|
||||
elasticache = False
|
||||
stack_filters = False
|
||||
instance_filters = foobaa
|
||||
group_by_ami_id = False
|
||||
group_by_availability_zone = True
|
||||
group_by_aws_account = False
|
||||
group_by_instance_id = False
|
||||
group_by_instance_state = False
|
||||
group_by_platform = False
|
||||
group_by_instance_type = True
|
||||
group_by_key_pair = False
|
||||
group_by_region = True
|
||||
group_by_security_group = False
|
||||
group_by_tag_keys = True
|
||||
group_by_tag_none = False
|
||||
group_by_vpc_id = False
|
||||
cache_path = {{ cache_dir }}
|
||||
cache_max_age = 300
|
||||
|
@ -1,12 +0,0 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"GCE_CREDENTIALS_FILE_PATH": "{{ file_reference }}",
|
||||
"GCE_EMAIL": "fooo",
|
||||
"GCE_INI_PATH": "{{ file_reference_0 }}",
|
||||
"GCE_PROJECT": "fooo",
|
||||
"GCE_ZONE": "us-east4-a,us-west1-b",
|
||||
"GCP_AUTH_KIND": "serviceaccount",
|
||||
"GCP_ENV_TYPE": "tower",
|
||||
"GCP_PROJECT": "fooo",
|
||||
"GCP_SERVICE_ACCOUNT_FILE": "{{ file_reference }}"
|
||||
}
|
@ -1,7 +0,0 @@
|
||||
{
|
||||
"type": "service_account",
|
||||
"private_key": "{{private_key}}",
|
||||
"client_email": "fooo",
|
||||
"project_id": "fooo",
|
||||
"token_uri": "https://oauth2.googleapis.com/token"
|
||||
}
|
@ -1,3 +0,0 @@
|
||||
[cache]
|
||||
cache_max_age = 0
|
||||
|
@ -1,4 +0,0 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"OS_CLIENT_CONFIG_FILE": "{{ file_reference }}"
|
||||
}
|
@ -1 +0,0 @@
|
||||
<directory>
|
@ -1,17 +0,0 @@
|
||||
ansible:
|
||||
expand_hostvars: true
|
||||
fail_on_errors: true
|
||||
use_hostnames: false
|
||||
cache:
|
||||
path: {{ cache_dir }}
|
||||
clouds:
|
||||
devstack:
|
||||
auth:
|
||||
auth_url: https://foo.invalid
|
||||
domain_name: fooo
|
||||
password: fooo
|
||||
project_domain_name: fooo
|
||||
project_name: fooo
|
||||
username: fooo
|
||||
private: false
|
||||
verify: false
|
@ -1,7 +0,0 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"OVIRT_INI_PATH": "{{ file_reference }}",
|
||||
"OVIRT_PASSWORD": "fooo",
|
||||
"OVIRT_URL": "https://foo.invalid",
|
||||
"OVIRT_USERNAME": "fooo"
|
||||
}
|
@ -1,5 +0,0 @@
|
||||
[ovirt]
|
||||
ovirt_url=https://foo.invalid
|
||||
ovirt_username=fooo
|
||||
ovirt_password=fooo
|
||||
ovirt_ca_file=fooo
|
@ -1,4 +0,0 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"FOREMAN_INI_PATH": "{{ file_reference }}"
|
||||
}
|
@ -1,19 +0,0 @@
|
||||
[foreman]
|
||||
base_source_var = value_of_var
|
||||
ssl_verify = False
|
||||
url = https://foo.invalid
|
||||
user = fooo
|
||||
password = fooo
|
||||
|
||||
[ansible]
|
||||
group_patterns = ["{app}-{tier}-{color}", "{app}-{color}"]
|
||||
want_facts = True
|
||||
want_hostcollections = True
|
||||
group_prefix = foo_group_prefix
|
||||
want_ansible_ssh_host = True
|
||||
rich_params = False
|
||||
|
||||
[cache]
|
||||
path = /tmp
|
||||
max_age = 0
|
||||
|
@ -1,9 +0,0 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"TOWER_HOST": "https://foo.invalid",
|
||||
"TOWER_INVENTORY": "42",
|
||||
"TOWER_LICENSE_TYPE": "open",
|
||||
"TOWER_PASSWORD": "fooo",
|
||||
"TOWER_USERNAME": "fooo",
|
||||
"TOWER_VERIFY_SSL": "False"
|
||||
}
|
@ -1,8 +0,0 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"VMWARE_HOST": "https://foo.invalid",
|
||||
"VMWARE_INI_PATH": "{{ file_reference }}",
|
||||
"VMWARE_PASSWORD": "fooo",
|
||||
"VMWARE_USER": "fooo",
|
||||
"VMWARE_VALIDATE_CERTS": "False"
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
[vmware]
|
||||
cache_max_age = 0
|
||||
validate_certs = False
|
||||
username = fooo
|
||||
password = fooo
|
||||
server = https://foo.invalid
|
||||
base_source_var = value_of_var
|
||||
alias_pattern = {{ config.foo }}
|
||||
host_filters = {{ config.zoo == "DC0_H0_VM0" }}
|
||||
groupby_patterns = {{ config.asdf }}
|
||||
|
@ -17,7 +17,6 @@ from awx.main.models import (
|
||||
Job
|
||||
)
|
||||
from awx.main.constants import CLOUD_PROVIDERS
|
||||
from awx.main.models.inventory import PluginFileInjector
|
||||
from awx.main.utils.filters import SmartFilter
|
||||
|
||||
|
||||
@ -227,13 +226,6 @@ class TestSCMClean:
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestInventorySourceInjectors:
|
||||
def test_should_use_plugin(self):
|
||||
class foo(PluginFileInjector):
|
||||
plugin_name = 'foo_compute'
|
||||
initial_version = '2.7.8'
|
||||
assert not foo('2.7.7').should_use_plugin()
|
||||
assert foo('2.8').should_use_plugin()
|
||||
|
||||
def test_extra_credentials(self, project, credential):
|
||||
inventory_source = InventorySource.objects.create(
|
||||
name='foo', source='custom', source_project=project
|
||||
@ -266,18 +258,6 @@ class TestInventorySourceInjectors:
|
||||
injector = InventorySource.injectors[source]('2.7.7')
|
||||
assert injector.filename == filename
|
||||
|
||||
@pytest.mark.parametrize('source,script_name', [
|
||||
('ec2', 'ec2.py'),
|
||||
('rhv', 'ovirt4.py'),
|
||||
('satellite6', 'foreman.py'),
|
||||
('openstack', 'openstack_inventory.py')
|
||||
], ids=['ec2', 'rhv', 'satellite6', 'openstack'])
|
||||
def test_script_filenames(self, source, script_name):
|
||||
"""Ansible has several exceptions in naming of scripts
|
||||
"""
|
||||
injector = InventorySource.injectors[source]('2.7.7')
|
||||
assert injector.script_name == script_name
|
||||
|
||||
def test_group_by_azure(self):
|
||||
injector = InventorySource.injectors['azure_rm']('2.9')
|
||||
inv_src = InventorySource(
|
||||
|
@ -68,15 +68,6 @@ INI_TEST_VARS = {
|
||||
'satellite6_want_hostcollections': True,
|
||||
'satellite6_want_ansible_ssh_host': True,
|
||||
'satellite6_want_facts': True
|
||||
|
||||
},
|
||||
'cloudforms': {
|
||||
'version': '2.4',
|
||||
'purge_actions': 'maybe',
|
||||
'clean_group_keys': 'this_key',
|
||||
'nest_tags': 'yes',
|
||||
'suffix': '.ppt',
|
||||
'prefer_ipv4': 'yes'
|
||||
},
|
||||
'rhv': { # options specific to the plugin
|
||||
'ovirt_insecure': False,
|
||||
@ -250,8 +241,7 @@ def create_reference_data(source_dir, env, content):
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('this_kind', CLOUD_PROVIDERS)
|
||||
@pytest.mark.parametrize('script_or_plugin', ['scripts', 'plugins'])
|
||||
def test_inventory_update_injected_content(this_kind, script_or_plugin, inventory, fake_credential_factory):
|
||||
def test_inventory_update_injected_content(this_kind, inventory, fake_credential_factory):
|
||||
src_vars = dict(base_source_var='value_of_var')
|
||||
if this_kind in INI_TEST_VARS:
|
||||
src_vars.update(INI_TEST_VARS[this_kind])
|
||||
@ -268,8 +258,7 @@ def test_inventory_update_injected_content(this_kind, script_or_plugin, inventor
|
||||
inventory_update = inventory_source.create_unified_job()
|
||||
task = RunInventoryUpdate()
|
||||
|
||||
use_plugin = bool(script_or_plugin == 'plugins')
|
||||
if use_plugin and InventorySource.injectors[this_kind].plugin_name is None:
|
||||
if InventorySource.injectors[this_kind].plugin_name is None:
|
||||
pytest.skip('Use of inventory plugin is not enabled for this source')
|
||||
|
||||
def substitute_run(envvars=None, **_kw):
|
||||
@ -279,11 +268,11 @@ def test_inventory_update_injected_content(this_kind, script_or_plugin, inventor
|
||||
If MAKE_INVENTORY_REFERENCE_FILES is set, it will produce reference files
|
||||
"""
|
||||
private_data_dir = envvars.pop('AWX_PRIVATE_DATA_DIR')
|
||||
assert envvars.pop('ANSIBLE_INVENTORY_ENABLED') == ('auto' if use_plugin else 'script')
|
||||
assert envvars.pop('ANSIBLE_INVENTORY_ENABLED') == 'auto'
|
||||
set_files = bool(os.getenv("MAKE_INVENTORY_REFERENCE_FILES", 'false').lower()[0] not in ['f', '0'])
|
||||
env, content = read_content(private_data_dir, envvars, inventory_update)
|
||||
env.pop('ANSIBLE_COLLECTIONS_PATHS', None) # collection paths not relevant to this test
|
||||
base_dir = os.path.join(DATA, script_or_plugin)
|
||||
base_dir = os.path.join(DATA, 'plugins')
|
||||
if not os.path.exists(base_dir):
|
||||
os.mkdir(base_dir)
|
||||
source_dir = os.path.join(base_dir, this_kind) # this_kind is a global
|
||||
@ -317,21 +306,13 @@ def test_inventory_update_injected_content(this_kind, script_or_plugin, inventor
|
||||
Res = namedtuple('Result', ['status', 'rc'])
|
||||
return Res('successful', 0)
|
||||
|
||||
mock_licenser = mock.Mock(return_value=mock.Mock(
|
||||
validate=mock.Mock(return_value={'license_type': 'open'})
|
||||
))
|
||||
|
||||
# Mock this so that it will not send events to the callback receiver
|
||||
# because doing so in pytest land creates large explosions
|
||||
with mock.patch('awx.main.queue.CallbackQueueDispatcher.dispatch', lambda self, obj: None):
|
||||
# Force the update to use the script injector
|
||||
with mock.patch('awx.main.models.inventory.PluginFileInjector.should_use_plugin', return_value=use_plugin):
|
||||
# Also do not send websocket status updates
|
||||
with mock.patch.object(UnifiedJob, 'websocket_emit_status', mock.Mock()):
|
||||
with mock.patch.object(task, 'get_ansible_version', return_value='2.13'):
|
||||
# The point of this test is that we replace run with assertions
|
||||
with mock.patch('awx.main.tasks.ansible_runner.interface.run', substitute_run):
|
||||
# mocking the licenser is necessary for the tower source
|
||||
with mock.patch('awx.main.models.inventory.get_licenser', mock_licenser):
|
||||
# so this sets up everything for a run and then yields control over to substitute_run
|
||||
task.run(inventory_update.pk)
|
||||
# Also do not send websocket status updates
|
||||
with mock.patch.object(UnifiedJob, 'websocket_emit_status', mock.Mock()):
|
||||
with mock.patch.object(task, 'get_ansible_version', return_value='2.13'):
|
||||
# The point of this test is that we replace run with assertions
|
||||
with mock.patch('awx.main.tasks.ansible_runner.interface.run', substitute_run):
|
||||
# so this sets up everything for a run and then yields control over to substitute_run
|
||||
task.run(inventory_update.pk)
|
||||
|
@ -5,6 +5,8 @@ from awx.main.migrations import _inventory_source as invsrc
|
||||
|
||||
from django.apps import apps
|
||||
|
||||
from awx.main.models import InventorySource
|
||||
|
||||
|
||||
@pytest.mark.parametrize('vars,id_var,result', [
|
||||
({'foo': {'bar': '1234'}}, 'foo.bar', '1234'),
|
||||
@ -37,3 +39,19 @@ def test_apply_new_instance_id(inventory_source):
|
||||
host2.refresh_from_db()
|
||||
assert host1.instance_id == ''
|
||||
assert host2.instance_id == 'bad_user'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_replacement_scm_sources(inventory):
|
||||
inv_source = InventorySource.objects.create(
|
||||
name='test',
|
||||
inventory=inventory,
|
||||
organization=inventory.organization,
|
||||
source='ec2'
|
||||
)
|
||||
invsrc.create_scm_script_substitute(apps, 'ec2')
|
||||
inv_source.refresh_from_db()
|
||||
assert inv_source.source == 'scm'
|
||||
assert inv_source.source_project
|
||||
project = inv_source.source_project
|
||||
assert 'Replacement project for' in project.name
|
||||
|
@ -1835,6 +1835,13 @@ class TestProjectUpdateCredentials(TestJobExecution):
|
||||
assert env['FOO'] == 'BAR'
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_ansible_version():
|
||||
with mock.patch('awx.main.tasks._get_ansible_version', mock.MagicMock(return_value='2.10')) as _fixture:
|
||||
yield _fixture
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("mock_ansible_version")
|
||||
class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
@pytest.fixture
|
||||
def inventory_update(self):
|
||||
@ -1852,17 +1859,11 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
inventory_update.get_cloud_credential = mocker.Mock(return_value=None)
|
||||
inventory_update.get_extra_credentials = mocker.Mock(return_value=[])
|
||||
|
||||
with mocker.patch('awx.main.tasks._get_ansible_version', mocker.MagicMock(return_value='2.7')):
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
|
||||
assert 'AWS_ACCESS_KEY_ID' not in env
|
||||
assert 'AWS_SECRET_ACCESS_KEY' not in env
|
||||
assert 'EC2_INI_PATH' in env
|
||||
|
||||
config = configparser.ConfigParser()
|
||||
config.read(env['EC2_INI_PATH'])
|
||||
assert 'ec2' in config.sections()
|
||||
|
||||
@pytest.mark.parametrize('with_credential', [True, False])
|
||||
def test_custom_source(self, with_credential, mocker, inventory_update, private_data_dir):
|
||||
@ -1928,20 +1929,13 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
inventory_update.get_cloud_credential = get_cred
|
||||
inventory_update.get_extra_credentials = mocker.Mock(return_value=[])
|
||||
|
||||
# force test to use the ec2 script injection logic, as opposed to plugin
|
||||
with mocker.patch('awx.main.tasks._get_ansible_version', mocker.MagicMock(return_value='2.7')):
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
|
||||
safe_env = build_safe_env(env)
|
||||
|
||||
assert env['AWS_ACCESS_KEY_ID'] == 'bob'
|
||||
assert env['AWS_SECRET_ACCESS_KEY'] == 'secret'
|
||||
assert 'EC2_INI_PATH' in env
|
||||
|
||||
config = configparser.ConfigParser()
|
||||
config.read(env['EC2_INI_PATH'])
|
||||
assert 'ec2' in config.sections()
|
||||
|
||||
assert safe_env['AWS_SECRET_ACCESS_KEY'] == tasks.HIDDEN_PASSWORD
|
||||
|
||||
@ -1961,9 +1955,8 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
inventory_update.get_cloud_credential = get_cred
|
||||
inventory_update.get_extra_credentials = mocker.Mock(return_value=[])
|
||||
|
||||
with mocker.patch('awx.main.tasks._get_ansible_version', mocker.MagicMock(return_value='2.7')):
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
|
||||
safe_env = {}
|
||||
credentials = task.build_credentials_list(inventory_update)
|
||||
@ -1973,11 +1966,10 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
credential, env, safe_env, [], private_data_dir
|
||||
)
|
||||
|
||||
config = configparser.ConfigParser()
|
||||
config.read(env['VMWARE_INI_PATH'])
|
||||
assert config.get('vmware', 'username') == 'bob'
|
||||
assert config.get('vmware', 'password') == 'secret'
|
||||
assert config.get('vmware', 'server') == 'https://example.org'
|
||||
env["VMWARE_USER"] == "bob",
|
||||
env["VMWARE_PASSWORD"] == "secret",
|
||||
env["VMWARE_HOST"] == "https://example.org",
|
||||
env["VMWARE_VALIDATE_CERTS"] == "False",
|
||||
|
||||
def test_azure_rm_source_with_tenant(self, private_data_dir, inventory_update, mocker):
|
||||
task = tasks.RunInventoryUpdate()
|
||||
@ -2005,10 +1997,8 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
'group_by_resource_group': 'no'
|
||||
}
|
||||
|
||||
# force azure_rm inventory to use script injection logic, as opposed to plugin
|
||||
with mocker.patch('awx.main.tasks._get_ansible_version', mocker.MagicMock(return_value='2.7')):
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
|
||||
safe_env = build_safe_env(env)
|
||||
|
||||
@ -2018,15 +2008,6 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
assert env['AZURE_SUBSCRIPTION_ID'] == 'some-subscription'
|
||||
assert env['AZURE_CLOUD_ENVIRONMENT'] == 'foobar'
|
||||
|
||||
config = configparser.ConfigParser()
|
||||
config.read(env['AZURE_INI_PATH'])
|
||||
assert config.get('azure', 'include_powerstate') == 'yes'
|
||||
assert config.get('azure', 'group_by_resource_group') == 'no'
|
||||
assert config.get('azure', 'group_by_location') == 'yes'
|
||||
assert 'group_by_security_group' not in config.items('azure')
|
||||
assert config.get('azure', 'group_by_tag') == 'yes'
|
||||
assert config.get('azure', 'locations') == 'north,south,east,west'
|
||||
|
||||
assert safe_env['AZURE_SECRET'] == tasks.HIDDEN_PASSWORD
|
||||
|
||||
def test_azure_rm_source_with_password(self, private_data_dir, inventory_update, mocker):
|
||||
@ -2055,10 +2036,8 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
'group_by_security_group': 'no'
|
||||
}
|
||||
|
||||
# force azure_rm inventory to use script injection logic, as opposed to plugin
|
||||
with mocker.patch('awx.main.tasks._get_ansible_version', mocker.MagicMock(return_value='2.7')):
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
|
||||
safe_env = build_safe_env(env)
|
||||
|
||||
@ -2067,14 +2046,6 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
assert env['AZURE_PASSWORD'] == 'secret'
|
||||
assert env['AZURE_CLOUD_ENVIRONMENT'] == 'foobar'
|
||||
|
||||
config = configparser.ConfigParser()
|
||||
config.read(env['AZURE_INI_PATH'])
|
||||
assert config.get('azure', 'include_powerstate') == 'yes'
|
||||
assert config.get('azure', 'group_by_resource_group') == 'no'
|
||||
assert config.get('azure', 'group_by_location') == 'yes'
|
||||
assert config.get('azure', 'group_by_security_group') == 'no'
|
||||
assert config.get('azure', 'group_by_tag') == 'yes'
|
||||
assert 'locations' not in config.items('azure')
|
||||
assert safe_env['AZURE_PASSWORD'] == tasks.HIDDEN_PASSWORD
|
||||
|
||||
def test_gce_source(self, inventory_update, private_data_dir, mocker):
|
||||
@ -2118,18 +2089,6 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
assert json_data['client_email'] == 'bob'
|
||||
assert json_data['project_id'] == 'some-project'
|
||||
|
||||
config = configparser.ConfigParser()
|
||||
config.read(env['GCE_INI_PATH'])
|
||||
assert 'cache' in config.sections()
|
||||
assert config.getint('cache', 'cache_max_age') == 0
|
||||
|
||||
# Change the initial version of the inventory plugin to force use of script
|
||||
with mock.patch('awx.main.models.inventory.gce.initial_version', None):
|
||||
run('')
|
||||
|
||||
inventory_update.source_regions = 'us-east-4'
|
||||
run('us-east-4')
|
||||
|
||||
def test_openstack_source(self, inventory_update, private_data_dir, mocker):
|
||||
task = tasks.RunInventoryUpdate()
|
||||
openstack = CredentialType.defaults['openstack']()
|
||||
@ -2200,60 +2159,12 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
'satellite6_want_facts': False
|
||||
}
|
||||
|
||||
with mocker.patch('awx.main.tasks._get_ansible_version', mocker.MagicMock(return_value='2.7')):
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
|
||||
config = configparser.ConfigParser()
|
||||
config.read(env['FOREMAN_INI_PATH'])
|
||||
assert config.get('foreman', 'url') == 'https://example.org'
|
||||
assert config.get('foreman', 'user') == 'bob'
|
||||
assert config.get('foreman', 'password') == 'secret'
|
||||
assert config.get('ansible', 'group_patterns') == '[a,b,c]'
|
||||
assert config.get('ansible', 'group_prefix') == 'hey_'
|
||||
assert config.get('ansible', 'want_hostcollections') == 'True'
|
||||
assert config.get('ansible', 'want_ansible_ssh_host') == 'True'
|
||||
assert config.get('ansible', 'rich_params') == 'True'
|
||||
assert config.get('ansible', 'want_facts') == 'False'
|
||||
|
||||
def test_cloudforms_source(self, inventory_update, private_data_dir, mocker):
|
||||
task = tasks.RunInventoryUpdate()
|
||||
cloudforms = CredentialType.defaults['cloudforms']()
|
||||
inventory_update.source = 'cloudforms'
|
||||
|
||||
def get_cred():
|
||||
cred = Credential(
|
||||
pk=1,
|
||||
credential_type=cloudforms,
|
||||
inputs = {
|
||||
'username': 'bob',
|
||||
'password': 'secret',
|
||||
'host': 'https://example.org'
|
||||
}
|
||||
)
|
||||
cred.inputs['password'] = encrypt_field(
|
||||
cred, 'password'
|
||||
)
|
||||
return cred
|
||||
inventory_update.get_cloud_credential = get_cred
|
||||
inventory_update.get_extra_credentials = mocker.Mock(return_value=[])
|
||||
|
||||
inventory_update.source_vars = '{"prefer_ipv4": True}'
|
||||
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
|
||||
config = configparser.ConfigParser()
|
||||
config.read(env['CLOUDFORMS_INI_PATH'])
|
||||
assert config.get('cloudforms', 'url') == 'https://example.org'
|
||||
assert config.get('cloudforms', 'username') == 'bob'
|
||||
assert config.get('cloudforms', 'password') == 'secret'
|
||||
assert config.get('cloudforms', 'ssl_verify') == 'false'
|
||||
assert config.get('cloudforms', 'prefer_ipv4') == 'True'
|
||||
|
||||
cache_path = config.get('cache', 'path')
|
||||
assert cache_path.startswith(env['AWX_PRIVATE_DATA_DIR'])
|
||||
assert os.path.isdir(cache_path)
|
||||
env["FOREMAN_SERVER"] == "https://example.org",
|
||||
env["FOREMAN_USER"] == "bob",
|
||||
env["FOREMAN_PASSWORD"] == "secret",
|
||||
|
||||
@pytest.mark.parametrize('verify', [True, False])
|
||||
def test_tower_source(self, verify, inventory_update, private_data_dir, mocker):
|
||||
@ -2275,16 +2186,13 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
inventory_update.get_cloud_credential = get_cred
|
||||
inventory_update.get_extra_credentials = mocker.Mock(return_value=[])
|
||||
|
||||
# force tower inventory source to use script injection logic, as opposed to plugin
|
||||
with mocker.patch('awx.main.tasks._get_ansible_version', mocker.MagicMock(return_value='2.7')):
|
||||
env = task.build_env(inventory_update, private_data_dir, False)
|
||||
env = task.build_env(inventory_update, private_data_dir, False)
|
||||
|
||||
safe_env = build_safe_env(env)
|
||||
|
||||
assert env['TOWER_HOST'] == 'https://tower.example.org'
|
||||
assert env['TOWER_USERNAME'] == 'bob'
|
||||
assert env['TOWER_PASSWORD'] == 'secret'
|
||||
assert env['TOWER_INVENTORY'] == '12345'
|
||||
if verify:
|
||||
assert env['TOWER_VERIFY_SSL'] == 'True'
|
||||
else:
|
||||
@ -2339,9 +2247,8 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
inventory_update.get_extra_credentials = mocker.Mock(return_value=[])
|
||||
settings.AWX_TASK_ENV = {'FOO': 'BAR'}
|
||||
|
||||
with mocker.patch('awx.main.tasks._get_ansible_version', mocker.MagicMock(return_value='2.7')):
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
|
||||
assert env['FOO'] == 'BAR'
|
||||
|
||||
|
@ -1,22 +0,0 @@
|
||||
#
|
||||
# Configuration file for azure_rm.py
|
||||
#
|
||||
[azure]
|
||||
# Control which resource groups are included. By default all resources groups are included.
|
||||
# Set resource_groups to a comma separated list of resource groups names.
|
||||
#resource_groups=
|
||||
|
||||
# Control which tags are included. Set tags to a comma separated list of keys or key:value pairs
|
||||
#tags=
|
||||
|
||||
# Control which locations are included. Set locations to a comma separated list (e.g. eastus,eastus2,westus)
|
||||
#locations=
|
||||
|
||||
# Include powerstate. If you don't need powerstate information, turning it off improves runtime performance.
|
||||
include_powerstate=yes
|
||||
|
||||
# Control grouping with the following boolean flags. Valid values: yes, no, true, false, True, False, 0, 1.
|
||||
group_by_resource_group=yes
|
||||
group_by_location=yes
|
||||
group_by_security_group=yes
|
||||
group_by_tag=yes
|
@ -1,973 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
|
||||
# Chris Houseknecht, <house@redhat.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
'''
|
||||
Azure External Inventory Script
|
||||
===============================
|
||||
Generates dynamic inventory by making API requests to the Azure Resource
|
||||
Manager using the Azure Python SDK. For instruction on installing the
|
||||
Azure Python SDK see https://azure-sdk-for-python.readthedocs.io/
|
||||
|
||||
Authentication
|
||||
--------------
|
||||
The order of precedence is command line arguments, environment variables,
|
||||
and finally the [default] profile found in ~/.azure/credentials.
|
||||
|
||||
If using a credentials file, it should be an ini formatted file with one or
|
||||
more sections, which we refer to as profiles. The script looks for a
|
||||
[default] section, if a profile is not specified either on the command line
|
||||
or with an environment variable. The keys in a profile will match the
|
||||
list of command line arguments below.
|
||||
|
||||
For command line arguments and environment variables specify a profile found
|
||||
in your ~/.azure/credentials file, or a service principal or Active Directory
|
||||
user.
|
||||
|
||||
Command line arguments:
|
||||
- profile
|
||||
- client_id
|
||||
- secret
|
||||
- subscription_id
|
||||
- tenant
|
||||
- ad_user
|
||||
- password
|
||||
- cloud_environment
|
||||
- adfs_authority_url
|
||||
|
||||
Environment variables:
|
||||
- AZURE_PROFILE
|
||||
- AZURE_CLIENT_ID
|
||||
- AZURE_SECRET
|
||||
- AZURE_SUBSCRIPTION_ID
|
||||
- AZURE_TENANT
|
||||
- AZURE_AD_USER
|
||||
- AZURE_PASSWORD
|
||||
- AZURE_CLOUD_ENVIRONMENT
|
||||
- AZURE_ADFS_AUTHORITY_URL
|
||||
|
||||
Run for Specific Host
|
||||
-----------------------
|
||||
When run for a specific host using the --host option, a resource group is
|
||||
required. For a specific host, this script returns the following variables:
|
||||
|
||||
{
|
||||
"ansible_host": "XXX.XXX.XXX.XXX",
|
||||
"computer_name": "computer_name2",
|
||||
"fqdn": null,
|
||||
"id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Compute/virtualMachines/object-name",
|
||||
"image": {
|
||||
"offer": "CentOS",
|
||||
"publisher": "OpenLogic",
|
||||
"sku": "7.1",
|
||||
"version": "latest"
|
||||
},
|
||||
"location": "westus",
|
||||
"mac_address": "00-00-5E-00-53-FE",
|
||||
"name": "object-name",
|
||||
"network_interface": "interface-name",
|
||||
"network_interface_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkInterfaces/object-name1",
|
||||
"network_security_group": null,
|
||||
"network_security_group_id": null,
|
||||
"os_disk": {
|
||||
"name": "object-name",
|
||||
"operating_system_type": "Linux"
|
||||
},
|
||||
"plan": null,
|
||||
"powerstate": "running",
|
||||
"private_ip": "172.26.3.6",
|
||||
"private_ip_alloc_method": "Static",
|
||||
"provisioning_state": "Succeeded",
|
||||
"public_ip": "XXX.XXX.XXX.XXX",
|
||||
"public_ip_alloc_method": "Static",
|
||||
"public_ip_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/publicIPAddresses/object-name",
|
||||
"public_ip_name": "object-name",
|
||||
"resource_group": "galaxy-production",
|
||||
"security_group": "object-name",
|
||||
"security_group_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkSecurityGroups/object-name",
|
||||
"tags": {
|
||||
"db": "database"
|
||||
},
|
||||
"type": "Microsoft.Compute/virtualMachines",
|
||||
"virtual_machine_size": "Standard_DS4"
|
||||
}
|
||||
|
||||
Groups
|
||||
------
|
||||
When run in --list mode, instances are grouped by the following categories:
|
||||
- azure
|
||||
- location
|
||||
- resource_group
|
||||
- security_group
|
||||
- tag key
|
||||
- tag key_value
|
||||
|
||||
Control groups using azure_rm.ini or set environment variables:
|
||||
|
||||
AZURE_GROUP_BY_RESOURCE_GROUP=yes
|
||||
AZURE_GROUP_BY_LOCATION=yes
|
||||
AZURE_GROUP_BY_SECURITY_GROUP=yes
|
||||
AZURE_GROUP_BY_TAG=yes
|
||||
|
||||
Select hosts within specific resource groups by assigning a comma separated list to:
|
||||
|
||||
AZURE_RESOURCE_GROUPS=resource_group_a,resource_group_b
|
||||
|
||||
Select hosts for specific tag key by assigning a comma separated list of tag keys to:
|
||||
|
||||
AZURE_TAGS=key1,key2,key3
|
||||
|
||||
Select hosts for specific locations:
|
||||
|
||||
AZURE_LOCATIONS=eastus,westus,eastus2
|
||||
|
||||
Or, select hosts for specific tag key:value pairs by assigning a comma separated list key:value pairs to:
|
||||
|
||||
AZURE_TAGS=key1:value1,key2:value2
|
||||
|
||||
If you don't need the powerstate, you can improve performance by turning off powerstate fetching:
|
||||
AZURE_INCLUDE_POWERSTATE=no
|
||||
|
||||
azure_rm.ini
|
||||
------------
|
||||
As mentioned above, you can control execution using environment variables or a .ini file. A sample
|
||||
azure_rm.ini is included. The name of the .ini file is the basename of the inventory script (in this case
|
||||
'azure_rm') with a .ini extension. It also assumes the .ini file is alongside the script. To specify
|
||||
a different path for the .ini file, define the AZURE_INI_PATH environment variable:
|
||||
|
||||
export AZURE_INI_PATH=/path/to/custom.ini
|
||||
|
||||
Powerstate:
|
||||
-----------
|
||||
The powerstate attribute indicates whether or not a host is running. If the value is 'running', the machine is
|
||||
up. If the value is anything other than 'running', the machine is down, and will be unreachable.
|
||||
|
||||
Examples:
|
||||
---------
|
||||
Execute /bin/uname on all instances in the galaxy-qa resource group
|
||||
$ ansible -i azure_rm.py galaxy-qa -m shell -a "/bin/uname -a"
|
||||
|
||||
Use the inventory script to print instance specific information
|
||||
$ contrib/inventory/azure_rm.py --host my_instance_host_name --pretty
|
||||
|
||||
Use with a playbook
|
||||
$ ansible-playbook -i contrib/inventory/azure_rm.py my_playbook.yml --limit galaxy-qa
|
||||
|
||||
|
||||
Insecure Platform Warning
|
||||
-------------------------
|
||||
If you receive InsecurePlatformWarning from urllib3, install the
|
||||
requests security packages:
|
||||
|
||||
pip install requests[security]
|
||||
|
||||
|
||||
author:
|
||||
- Chris Houseknecht (@chouseknecht)
|
||||
- Matt Davis (@nitzmahone)
|
||||
|
||||
Company: Ansible by Red Hat
|
||||
|
||||
Version: 1.0.0
|
||||
'''
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import inspect
|
||||
|
||||
try:
|
||||
# python2
|
||||
import ConfigParser as cp
|
||||
except ImportError:
|
||||
# python3
|
||||
import configparser as cp
|
||||
|
||||
from os.path import expanduser
|
||||
import ansible.module_utils.six.moves.urllib.parse as urlparse
|
||||
|
||||
HAS_AZURE = True
|
||||
HAS_AZURE_EXC = None
|
||||
HAS_AZURE_CLI_CORE = True
|
||||
CLIError = None
|
||||
|
||||
try:
|
||||
from msrestazure.azure_active_directory import AADTokenCredentials
|
||||
from msrestazure.azure_exceptions import CloudError
|
||||
from msrestazure.azure_active_directory import MSIAuthentication
|
||||
from msrestazure import azure_cloud
|
||||
from azure.mgmt.compute import __version__ as azure_compute_version
|
||||
from azure.common import AzureMissingResourceHttpError, AzureHttpError
|
||||
from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials
|
||||
from azure.mgmt.network import NetworkManagementClient
|
||||
from azure.mgmt.resource.resources import ResourceManagementClient
|
||||
from azure.mgmt.resource.subscriptions import SubscriptionClient
|
||||
from azure.mgmt.compute import ComputeManagementClient
|
||||
from adal.authentication_context import AuthenticationContext
|
||||
except ImportError as exc:
|
||||
HAS_AZURE_EXC = exc
|
||||
HAS_AZURE = False
|
||||
|
||||
try:
|
||||
from azure.cli.core.util import CLIError
|
||||
from azure.common.credentials import get_azure_cli_credentials, get_cli_profile
|
||||
from azure.common.cloud import get_cli_active_cloud
|
||||
except ImportError:
|
||||
HAS_AZURE_CLI_CORE = False
|
||||
CLIError = Exception
|
||||
|
||||
try:
|
||||
from ansible.release import __version__ as ansible_version
|
||||
except ImportError:
|
||||
ansible_version = 'unknown'
|
||||
|
||||
AZURE_CREDENTIAL_ENV_MAPPING = dict(
|
||||
profile='AZURE_PROFILE',
|
||||
subscription_id='AZURE_SUBSCRIPTION_ID',
|
||||
client_id='AZURE_CLIENT_ID',
|
||||
secret='AZURE_SECRET',
|
||||
tenant='AZURE_TENANT',
|
||||
ad_user='AZURE_AD_USER',
|
||||
password='AZURE_PASSWORD',
|
||||
cloud_environment='AZURE_CLOUD_ENVIRONMENT',
|
||||
adfs_authority_url='AZURE_ADFS_AUTHORITY_URL'
|
||||
)
|
||||
|
||||
AZURE_CONFIG_SETTINGS = dict(
|
||||
resource_groups='AZURE_RESOURCE_GROUPS',
|
||||
tags='AZURE_TAGS',
|
||||
locations='AZURE_LOCATIONS',
|
||||
include_powerstate='AZURE_INCLUDE_POWERSTATE',
|
||||
group_by_resource_group='AZURE_GROUP_BY_RESOURCE_GROUP',
|
||||
group_by_location='AZURE_GROUP_BY_LOCATION',
|
||||
group_by_security_group='AZURE_GROUP_BY_SECURITY_GROUP',
|
||||
group_by_tag='AZURE_GROUP_BY_TAG',
|
||||
group_by_os_family='AZURE_GROUP_BY_OS_FAMILY',
|
||||
use_private_ip='AZURE_USE_PRIVATE_IP'
|
||||
)
|
||||
|
||||
AZURE_MIN_VERSION = "2.0.0"
|
||||
ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ansible_version)
|
||||
|
||||
|
||||
def azure_id_to_dict(id):
|
||||
pieces = re.sub(r'^\/', '', id).split('/')
|
||||
result = {}
|
||||
index = 0
|
||||
while index < len(pieces) - 1:
|
||||
result[pieces[index]] = pieces[index + 1]
|
||||
index += 1
|
||||
return result
|
||||
|
||||
|
||||
class AzureRM(object):
|
||||
|
||||
def __init__(self, args):
|
||||
self._args = args
|
||||
self._cloud_environment = None
|
||||
self._compute_client = None
|
||||
self._resource_client = None
|
||||
self._network_client = None
|
||||
self._adfs_authority_url = None
|
||||
self._resource = None
|
||||
|
||||
self.debug = False
|
||||
if args.debug:
|
||||
self.debug = True
|
||||
|
||||
self.credentials = self._get_credentials(args)
|
||||
if not self.credentials:
|
||||
self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
|
||||
"or define a profile in ~/.azure/credentials.")
|
||||
|
||||
# if cloud_environment specified, look up/build Cloud object
|
||||
raw_cloud_env = self.credentials.get('cloud_environment')
|
||||
if not raw_cloud_env:
|
||||
self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD # SDK default
|
||||
else:
|
||||
# try to look up "well-known" values via the name attribute on azure_cloud members
|
||||
all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)]
|
||||
matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env]
|
||||
if len(matched_clouds) == 1:
|
||||
self._cloud_environment = matched_clouds[0]
|
||||
elif len(matched_clouds) > 1:
|
||||
self.fail("Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'".format(raw_cloud_env))
|
||||
else:
|
||||
if not urlparse.urlparse(raw_cloud_env).scheme:
|
||||
self.fail("cloud_environment must be an endpoint discovery URL or one of {0}".format([x.name for x in all_clouds]))
|
||||
try:
|
||||
self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(raw_cloud_env)
|
||||
except Exception as e:
|
||||
self.fail("cloud_environment {0} could not be resolved: {1}".format(raw_cloud_env, e.message))
|
||||
|
||||
if self.credentials.get('subscription_id', None) is None:
|
||||
self.fail("Credentials did not include a subscription_id value.")
|
||||
self.log("setting subscription_id")
|
||||
self.subscription_id = self.credentials['subscription_id']
|
||||
|
||||
# get authentication authority
|
||||
# for adfs, user could pass in authority or not.
|
||||
# for others, use default authority from cloud environment
|
||||
if self.credentials.get('adfs_authority_url'):
|
||||
self._adfs_authority_url = self.credentials.get('adfs_authority_url')
|
||||
else:
|
||||
self._adfs_authority_url = self._cloud_environment.endpoints.active_directory
|
||||
|
||||
# get resource from cloud environment
|
||||
self._resource = self._cloud_environment.endpoints.active_directory_resource_id
|
||||
|
||||
if self.credentials.get('credentials'):
|
||||
self.azure_credentials = self.credentials.get('credentials')
|
||||
elif self.credentials.get('client_id') and self.credentials.get('secret') and self.credentials.get('tenant'):
|
||||
self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'],
|
||||
secret=self.credentials['secret'],
|
||||
tenant=self.credentials['tenant'],
|
||||
cloud_environment=self._cloud_environment)
|
||||
|
||||
elif self.credentials.get('ad_user') is not None and \
|
||||
self.credentials.get('password') is not None and \
|
||||
self.credentials.get('client_id') is not None and \
|
||||
self.credentials.get('tenant') is not None:
|
||||
|
||||
self.azure_credentials = self.acquire_token_with_username_password(
|
||||
self._adfs_authority_url,
|
||||
self._resource,
|
||||
self.credentials['ad_user'],
|
||||
self.credentials['password'],
|
||||
self.credentials['client_id'],
|
||||
self.credentials['tenant'])
|
||||
|
||||
elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None:
|
||||
tenant = self.credentials.get('tenant')
|
||||
if not tenant:
|
||||
tenant = 'common'
|
||||
self.azure_credentials = UserPassCredentials(self.credentials['ad_user'],
|
||||
self.credentials['password'],
|
||||
tenant=tenant,
|
||||
cloud_environment=self._cloud_environment)
|
||||
|
||||
else:
|
||||
self.fail("Failed to authenticate with provided credentials. Some attributes were missing. "
|
||||
"Credentials must include client_id, secret and tenant or ad_user and password, or "
|
||||
"ad_user, password, client_id, tenant and adfs_authority_url(optional) for ADFS authentication, or "
|
||||
"be logged in using AzureCLI.")
|
||||
|
||||
def log(self, msg):
|
||||
if self.debug:
|
||||
print(msg + u'\n')
|
||||
|
||||
def fail(self, msg):
|
||||
raise Exception(msg)
|
||||
|
||||
def _get_profile(self, profile="default"):
|
||||
path = expanduser("~")
|
||||
path += "/.azure/credentials"
|
||||
try:
|
||||
config = cp.ConfigParser()
|
||||
config.read(path)
|
||||
except Exception as exc:
|
||||
self.fail("Failed to access {0}. Check that the file exists and you have read "
|
||||
"access. {1}".format(path, str(exc)))
|
||||
credentials = dict()
|
||||
for key in AZURE_CREDENTIAL_ENV_MAPPING:
|
||||
try:
|
||||
credentials[key] = config.get(profile, key, raw=True)
|
||||
except:
|
||||
pass
|
||||
|
||||
if credentials.get('client_id') is not None or credentials.get('ad_user') is not None:
|
||||
return credentials
|
||||
|
||||
return None
|
||||
|
||||
def _get_env_credentials(self):
|
||||
env_credentials = dict()
|
||||
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
|
||||
env_credentials[attribute] = os.environ.get(env_variable, None)
|
||||
|
||||
if env_credentials['profile'] is not None:
|
||||
credentials = self._get_profile(env_credentials['profile'])
|
||||
return credentials
|
||||
|
||||
if env_credentials['client_id'] is not None or env_credentials['ad_user'] is not None:
|
||||
return env_credentials
|
||||
|
||||
return None
|
||||
|
||||
def _get_azure_cli_credentials(self):
|
||||
credentials, subscription_id = get_azure_cli_credentials()
|
||||
cloud_environment = get_cli_active_cloud()
|
||||
|
||||
cli_credentials = {
|
||||
'credentials': credentials,
|
||||
'subscription_id': subscription_id,
|
||||
'cloud_environment': cloud_environment
|
||||
}
|
||||
return cli_credentials
|
||||
|
||||
def _get_msi_credentials(self, subscription_id_param=None):
|
||||
credentials = MSIAuthentication()
|
||||
subscription_id_param = subscription_id_param or os.environ.get(AZURE_CREDENTIAL_ENV_MAPPING['subscription_id'], None)
|
||||
try:
|
||||
# try to get the subscription in MSI to test whether MSI is enabled
|
||||
subscription_client = SubscriptionClient(credentials)
|
||||
subscription = next(subscription_client.subscriptions.list())
|
||||
subscription_id = str(subscription.subscription_id)
|
||||
return {
|
||||
'credentials': credentials,
|
||||
'subscription_id': subscription_id_param or subscription_id
|
||||
}
|
||||
except Exception as exc:
|
||||
return None
|
||||
|
||||
def _get_credentials(self, params):
|
||||
# Get authentication credentials.
|
||||
# Precedence: cmd line parameters-> environment variables-> default profile in ~/.azure/credentials.
|
||||
|
||||
self.log('Getting credentials')
|
||||
|
||||
arg_credentials = dict()
|
||||
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
|
||||
arg_credentials[attribute] = getattr(params, attribute)
|
||||
|
||||
# try module params
|
||||
if arg_credentials['profile'] is not None:
|
||||
self.log('Retrieving credentials with profile parameter.')
|
||||
credentials = self._get_profile(arg_credentials['profile'])
|
||||
return credentials
|
||||
|
||||
if arg_credentials['client_id'] is not None:
|
||||
self.log('Received credentials from parameters.')
|
||||
return arg_credentials
|
||||
|
||||
if arg_credentials['ad_user'] is not None:
|
||||
self.log('Received credentials from parameters.')
|
||||
return arg_credentials
|
||||
|
||||
# try environment
|
||||
env_credentials = self._get_env_credentials()
|
||||
if env_credentials:
|
||||
self.log('Received credentials from env.')
|
||||
return env_credentials
|
||||
|
||||
# try default profile from ~./azure/credentials
|
||||
default_credentials = self._get_profile()
|
||||
if default_credentials:
|
||||
self.log('Retrieved default profile credentials from ~/.azure/credentials.')
|
||||
return default_credentials
|
||||
|
||||
msi_credentials = self._get_msi_credentials(arg_credentials.get('subscription_id'))
|
||||
if msi_credentials:
|
||||
self.log('Retrieved credentials from MSI.')
|
||||
return msi_credentials
|
||||
|
||||
try:
|
||||
if HAS_AZURE_CLI_CORE:
|
||||
self.log('Retrieving credentials from AzureCLI profile')
|
||||
cli_credentials = self._get_azure_cli_credentials()
|
||||
return cli_credentials
|
||||
except CLIError as ce:
|
||||
self.log('Error getting AzureCLI profile credentials - {0}'.format(ce))
|
||||
|
||||
return None
|
||||
|
||||
def acquire_token_with_username_password(self, authority, resource, username, password, client_id, tenant):
|
||||
authority_uri = authority
|
||||
|
||||
if tenant is not None:
|
||||
authority_uri = authority + '/' + tenant
|
||||
|
||||
context = AuthenticationContext(authority_uri)
|
||||
token_response = context.acquire_token_with_username_password(resource, username, password, client_id)
|
||||
return AADTokenCredentials(token_response)
|
||||
|
||||
def _register(self, key):
|
||||
try:
|
||||
# We have to perform the one-time registration here. Otherwise, we receive an error the first
|
||||
# time we attempt to use the requested client.
|
||||
resource_client = self.rm_client
|
||||
resource_client.providers.register(key)
|
||||
except Exception as exc:
|
||||
self.log("One-time registration of {0} failed - {1}".format(key, str(exc)))
|
||||
self.log("You might need to register {0} using an admin account".format(key))
|
||||
self.log(("To register a provider using the Python CLI: "
|
||||
"https://docs.microsoft.com/azure/azure-resource-manager/"
|
||||
"resource-manager-common-deployment-errors#noregisteredproviderfound"))
|
||||
|
||||
def get_mgmt_svc_client(self, client_type, base_url, api_version):
|
||||
client = client_type(self.azure_credentials,
|
||||
self.subscription_id,
|
||||
base_url=base_url,
|
||||
api_version=api_version)
|
||||
client.config.add_user_agent(ANSIBLE_USER_AGENT)
|
||||
return client
|
||||
|
||||
@property
|
||||
def network_client(self):
|
||||
self.log('Getting network client')
|
||||
if not self._network_client:
|
||||
self._network_client = self.get_mgmt_svc_client(NetworkManagementClient,
|
||||
self._cloud_environment.endpoints.resource_manager,
|
||||
'2017-06-01')
|
||||
self._register('Microsoft.Network')
|
||||
return self._network_client
|
||||
|
||||
@property
|
||||
def rm_client(self):
|
||||
self.log('Getting resource manager client')
|
||||
if not self._resource_client:
|
||||
self._resource_client = self.get_mgmt_svc_client(ResourceManagementClient,
|
||||
self._cloud_environment.endpoints.resource_manager,
|
||||
'2017-05-10')
|
||||
return self._resource_client
|
||||
|
||||
@property
|
||||
def compute_client(self):
|
||||
self.log('Getting compute client')
|
||||
if not self._compute_client:
|
||||
self._compute_client = self.get_mgmt_svc_client(ComputeManagementClient,
|
||||
self._cloud_environment.endpoints.resource_manager,
|
||||
'2017-03-30')
|
||||
self._register('Microsoft.Compute')
|
||||
return self._compute_client
|
||||
|
||||
|
||||
class AzureInventory(object):
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self._args = self._parse_cli_args()
|
||||
|
||||
try:
|
||||
rm = AzureRM(self._args)
|
||||
except Exception as e:
|
||||
sys.exit("{0}".format(str(e)))
|
||||
|
||||
self._compute_client = rm.compute_client
|
||||
self._network_client = rm.network_client
|
||||
self._resource_client = rm.rm_client
|
||||
self._security_groups = None
|
||||
|
||||
self.resource_groups = []
|
||||
self.tags = None
|
||||
self.locations = None
|
||||
self.replace_dash_in_groups = False
|
||||
self.group_by_resource_group = True
|
||||
self.group_by_location = True
|
||||
self.group_by_os_family = True
|
||||
self.group_by_security_group = True
|
||||
self.group_by_tag = True
|
||||
self.include_powerstate = True
|
||||
self.use_private_ip = False
|
||||
|
||||
self._inventory = dict(
|
||||
_meta=dict(
|
||||
hostvars=dict()
|
||||
),
|
||||
azure=[]
|
||||
)
|
||||
|
||||
self._get_settings()
|
||||
|
||||
if self._args.resource_groups:
|
||||
self.resource_groups = self._args.resource_groups.split(',')
|
||||
|
||||
if self._args.tags:
|
||||
self.tags = self._args.tags.split(',')
|
||||
|
||||
if self._args.locations:
|
||||
self.locations = self._args.locations.split(',')
|
||||
|
||||
if self._args.no_powerstate:
|
||||
self.include_powerstate = False
|
||||
|
||||
self.get_inventory()
|
||||
print(self._json_format_dict(pretty=self._args.pretty))
|
||||
sys.exit(0)
|
||||
|
||||
def _parse_cli_args(self):
|
||||
# Parse command line arguments
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Produce an Ansible Inventory file for an Azure subscription')
|
||||
parser.add_argument('--list', action='store_true', default=True,
|
||||
help='List instances (default: True)')
|
||||
parser.add_argument('--debug', action='store_true', default=False,
|
||||
help='Send debug messages to STDOUT')
|
||||
parser.add_argument('--host', action='store',
|
||||
help='Get all information about an instance')
|
||||
parser.add_argument('--pretty', action='store_true', default=False,
|
||||
help='Pretty print JSON output(default: False)')
|
||||
parser.add_argument('--profile', action='store',
|
||||
help='Azure profile contained in ~/.azure/credentials')
|
||||
parser.add_argument('--subscription_id', action='store',
|
||||
help='Azure Subscription Id')
|
||||
parser.add_argument('--client_id', action='store',
|
||||
help='Azure Client Id ')
|
||||
parser.add_argument('--secret', action='store',
|
||||
help='Azure Client Secret')
|
||||
parser.add_argument('--tenant', action='store',
|
||||
help='Azure Tenant Id')
|
||||
parser.add_argument('--ad_user', action='store',
|
||||
help='Active Directory User')
|
||||
parser.add_argument('--password', action='store',
|
||||
help='password')
|
||||
parser.add_argument('--adfs_authority_url', action='store',
|
||||
help='Azure ADFS authority url')
|
||||
parser.add_argument('--cloud_environment', action='store',
|
||||
help='Azure Cloud Environment name or metadata discovery URL')
|
||||
parser.add_argument('--resource-groups', action='store',
|
||||
help='Return inventory for comma separated list of resource group names')
|
||||
parser.add_argument('--tags', action='store',
|
||||
help='Return inventory for comma separated list of tag key:value pairs')
|
||||
parser.add_argument('--locations', action='store',
|
||||
help='Return inventory for comma separated list of locations')
|
||||
parser.add_argument('--no-powerstate', action='store_true', default=False,
|
||||
help='Do not include the power state of each virtual host')
|
||||
return parser.parse_args()
|
||||
|
||||
def get_inventory(self):
|
||||
if len(self.resource_groups) > 0:
|
||||
# get VMs for requested resource groups
|
||||
for resource_group in self.resource_groups:
|
||||
try:
|
||||
virtual_machines = self._compute_client.virtual_machines.list(resource_group.lower())
|
||||
except Exception as exc:
|
||||
sys.exit("Error: fetching virtual machines for resource group {0} - {1}".format(resource_group, str(exc)))
|
||||
if self._args.host or self.tags:
|
||||
selected_machines = self._selected_machines(virtual_machines)
|
||||
self._load_machines(selected_machines)
|
||||
else:
|
||||
self._load_machines(virtual_machines)
|
||||
else:
|
||||
# get all VMs within the subscription
|
||||
try:
|
||||
virtual_machines = self._compute_client.virtual_machines.list_all()
|
||||
except Exception as exc:
|
||||
sys.exit("Error: fetching virtual machines - {0}".format(str(exc)))
|
||||
|
||||
if self._args.host or self.tags or self.locations:
|
||||
selected_machines = self._selected_machines(virtual_machines)
|
||||
self._load_machines(selected_machines)
|
||||
else:
|
||||
self._load_machines(virtual_machines)
|
||||
|
||||
def _load_machines(self, machines):
|
||||
for machine in machines:
|
||||
id_dict = azure_id_to_dict(machine.id)
|
||||
|
||||
# TODO - The API is returning an ID value containing resource group name in ALL CAPS. If/when it gets
|
||||
# fixed, we should remove the .lower(). Opened Issue
|
||||
# #574: https://github.com/Azure/azure-sdk-for-python/issues/574
|
||||
resource_group = id_dict['resourceGroups'].lower()
|
||||
|
||||
if self.group_by_security_group:
|
||||
self._get_security_groups(resource_group)
|
||||
|
||||
host_vars = dict(
|
||||
ansible_host=None,
|
||||
private_ip=None,
|
||||
private_ip_alloc_method=None,
|
||||
public_ip=None,
|
||||
public_ip_name=None,
|
||||
public_ip_id=None,
|
||||
public_ip_alloc_method=None,
|
||||
fqdn=None,
|
||||
location=machine.location,
|
||||
name=machine.name,
|
||||
type=machine.type,
|
||||
id=machine.id,
|
||||
tags=machine.tags,
|
||||
network_interface_id=None,
|
||||
network_interface=None,
|
||||
resource_group=resource_group,
|
||||
mac_address=None,
|
||||
plan=(machine.plan.name if machine.plan else None),
|
||||
virtual_machine_size=machine.hardware_profile.vm_size,
|
||||
computer_name=(machine.os_profile.computer_name if machine.os_profile else None),
|
||||
provisioning_state=machine.provisioning_state,
|
||||
)
|
||||
|
||||
host_vars['os_disk'] = dict(
|
||||
name=machine.storage_profile.os_disk.name,
|
||||
operating_system_type=machine.storage_profile.os_disk.os_type.value.lower()
|
||||
)
|
||||
|
||||
if self.include_powerstate:
|
||||
host_vars['powerstate'] = self._get_powerstate(resource_group, machine.name)
|
||||
|
||||
if machine.storage_profile.image_reference:
|
||||
host_vars['image'] = dict(
|
||||
offer=machine.storage_profile.image_reference.offer,
|
||||
publisher=machine.storage_profile.image_reference.publisher,
|
||||
sku=machine.storage_profile.image_reference.sku,
|
||||
version=machine.storage_profile.image_reference.version
|
||||
)
|
||||
|
||||
# Add windows details
|
||||
if machine.os_profile is not None and machine.os_profile.windows_configuration is not None:
|
||||
host_vars['ansible_connection'] = 'winrm'
|
||||
host_vars['windows_auto_updates_enabled'] = \
|
||||
machine.os_profile.windows_configuration.enable_automatic_updates
|
||||
host_vars['windows_timezone'] = machine.os_profile.windows_configuration.time_zone
|
||||
host_vars['windows_rm'] = None
|
||||
if machine.os_profile.windows_configuration.win_rm is not None:
|
||||
host_vars['windows_rm'] = dict(listeners=None)
|
||||
if machine.os_profile.windows_configuration.win_rm.listeners is not None:
|
||||
host_vars['windows_rm']['listeners'] = []
|
||||
for listener in machine.os_profile.windows_configuration.win_rm.listeners:
|
||||
host_vars['windows_rm']['listeners'].append(dict(protocol=listener.protocol.name,
|
||||
certificate_url=listener.certificate_url))
|
||||
|
||||
for interface in machine.network_profile.network_interfaces:
|
||||
interface_reference = self._parse_ref_id(interface.id)
|
||||
network_interface = self._network_client.network_interfaces.get(
|
||||
interface_reference['resourceGroups'],
|
||||
interface_reference['networkInterfaces'])
|
||||
if network_interface.primary:
|
||||
if self.group_by_security_group and \
|
||||
self._security_groups[resource_group].get(network_interface.id, None):
|
||||
host_vars['security_group'] = \
|
||||
self._security_groups[resource_group][network_interface.id]['name']
|
||||
host_vars['security_group_id'] = \
|
||||
self._security_groups[resource_group][network_interface.id]['id']
|
||||
host_vars['network_interface'] = network_interface.name
|
||||
host_vars['network_interface_id'] = network_interface.id
|
||||
host_vars['mac_address'] = network_interface.mac_address
|
||||
for ip_config in network_interface.ip_configurations:
|
||||
host_vars['private_ip'] = ip_config.private_ip_address
|
||||
host_vars['private_ip_alloc_method'] = ip_config.private_ip_allocation_method
|
||||
if self.use_private_ip:
|
||||
host_vars['ansible_host'] = ip_config.private_ip_address
|
||||
if ip_config.public_ip_address:
|
||||
public_ip_reference = self._parse_ref_id(ip_config.public_ip_address.id)
|
||||
public_ip_address = self._network_client.public_ip_addresses.get(
|
||||
public_ip_reference['resourceGroups'],
|
||||
public_ip_reference['publicIPAddresses'])
|
||||
if not self.use_private_ip:
|
||||
host_vars['ansible_host'] = public_ip_address.ip_address
|
||||
host_vars['public_ip'] = public_ip_address.ip_address
|
||||
host_vars['public_ip_name'] = public_ip_address.name
|
||||
host_vars['public_ip_alloc_method'] = public_ip_address.public_ip_allocation_method
|
||||
host_vars['public_ip_id'] = public_ip_address.id
|
||||
if public_ip_address.dns_settings:
|
||||
host_vars['fqdn'] = public_ip_address.dns_settings.fqdn
|
||||
|
||||
self._add_host(host_vars)
|
||||
|
||||
def _selected_machines(self, virtual_machines):
|
||||
selected_machines = []
|
||||
for machine in virtual_machines:
|
||||
if self._args.host and self._args.host == machine.name:
|
||||
selected_machines.append(machine)
|
||||
if self.tags and self._tags_match(machine.tags, self.tags):
|
||||
selected_machines.append(machine)
|
||||
if self.locations and machine.location in self.locations:
|
||||
selected_machines.append(machine)
|
||||
return selected_machines
|
||||
|
||||
def _get_security_groups(self, resource_group):
|
||||
''' For a given resource_group build a mapping of network_interface.id to security_group name '''
|
||||
if not self._security_groups:
|
||||
self._security_groups = dict()
|
||||
if not self._security_groups.get(resource_group):
|
||||
self._security_groups[resource_group] = dict()
|
||||
for group in self._network_client.network_security_groups.list(resource_group):
|
||||
if group.network_interfaces:
|
||||
for interface in group.network_interfaces:
|
||||
self._security_groups[resource_group][interface.id] = dict(
|
||||
name=group.name,
|
||||
id=group.id
|
||||
)
|
||||
|
||||
def _get_powerstate(self, resource_group, name):
|
||||
try:
|
||||
vm = self._compute_client.virtual_machines.get(resource_group,
|
||||
name,
|
||||
expand='instanceview')
|
||||
except Exception as exc:
|
||||
sys.exit("Error: fetching instanceview for host {0} - {1}".format(name, str(exc)))
|
||||
|
||||
return next((s.code.replace('PowerState/', '')
|
||||
for s in vm.instance_view.statuses if s.code.startswith('PowerState')), None)
|
||||
|
||||
def _add_host(self, vars):
|
||||
|
||||
host_name = self._to_safe(vars['name'])
|
||||
resource_group = self._to_safe(vars['resource_group'])
|
||||
operating_system_type = self._to_safe(vars['os_disk']['operating_system_type'].lower())
|
||||
security_group = None
|
||||
if vars.get('security_group'):
|
||||
security_group = self._to_safe(vars['security_group'])
|
||||
|
||||
if self.group_by_os_family:
|
||||
if not self._inventory.get(operating_system_type):
|
||||
self._inventory[operating_system_type] = []
|
||||
self._inventory[operating_system_type].append(host_name)
|
||||
|
||||
if self.group_by_resource_group:
|
||||
if not self._inventory.get(resource_group):
|
||||
self._inventory[resource_group] = []
|
||||
self._inventory[resource_group].append(host_name)
|
||||
|
||||
if self.group_by_location:
|
||||
if not self._inventory.get(vars['location']):
|
||||
self._inventory[vars['location']] = []
|
||||
self._inventory[vars['location']].append(host_name)
|
||||
|
||||
if self.group_by_security_group and security_group:
|
||||
if not self._inventory.get(security_group):
|
||||
self._inventory[security_group] = []
|
||||
self._inventory[security_group].append(host_name)
|
||||
|
||||
self._inventory['_meta']['hostvars'][host_name] = vars
|
||||
self._inventory['azure'].append(host_name)
|
||||
|
||||
if self.group_by_tag and vars.get('tags'):
|
||||
for key, value in vars['tags'].items():
|
||||
safe_key = self._to_safe(key)
|
||||
safe_value = safe_key + '_' + self._to_safe(value)
|
||||
if not self._inventory.get(safe_key):
|
||||
self._inventory[safe_key] = []
|
||||
if not self._inventory.get(safe_value):
|
||||
self._inventory[safe_value] = []
|
||||
self._inventory[safe_key].append(host_name)
|
||||
self._inventory[safe_value].append(host_name)
|
||||
|
||||
def _json_format_dict(self, pretty=False):
|
||||
# convert inventory to json
|
||||
if pretty:
|
||||
return json.dumps(self._inventory, sort_keys=True, indent=2)
|
||||
else:
|
||||
return json.dumps(self._inventory)
|
||||
|
||||
def _get_settings(self):
|
||||
# Load settings from the .ini, if it exists. Otherwise,
|
||||
# look for environment values.
|
||||
file_settings = self._load_settings()
|
||||
if file_settings:
|
||||
for key in AZURE_CONFIG_SETTINGS:
|
||||
if key in ('resource_groups', 'tags', 'locations') and file_settings.get(key):
|
||||
values = file_settings.get(key).split(',')
|
||||
if len(values) > 0:
|
||||
setattr(self, key, values)
|
||||
elif file_settings.get(key):
|
||||
val = self._to_boolean(file_settings[key])
|
||||
setattr(self, key, val)
|
||||
else:
|
||||
env_settings = self._get_env_settings()
|
||||
for key in AZURE_CONFIG_SETTINGS:
|
||||
if key in('resource_groups', 'tags', 'locations') and env_settings.get(key):
|
||||
values = env_settings.get(key).split(',')
|
||||
if len(values) > 0:
|
||||
setattr(self, key, values)
|
||||
elif env_settings.get(key, None) is not None:
|
||||
val = self._to_boolean(env_settings[key])
|
||||
setattr(self, key, val)
|
||||
|
||||
def _parse_ref_id(self, reference):
|
||||
response = {}
|
||||
keys = reference.strip('/').split('/')
|
||||
for index in range(len(keys)):
|
||||
if index < len(keys) - 1 and index % 2 == 0:
|
||||
response[keys[index]] = keys[index + 1]
|
||||
return response
|
||||
|
||||
def _to_boolean(self, value):
|
||||
if value in ['Yes', 'yes', 1, 'True', 'true', True]:
|
||||
result = True
|
||||
elif value in ['No', 'no', 0, 'False', 'false', False]:
|
||||
result = False
|
||||
else:
|
||||
result = True
|
||||
return result
|
||||
|
||||
def _get_env_settings(self):
|
||||
env_settings = dict()
|
||||
for attribute, env_variable in AZURE_CONFIG_SETTINGS.items():
|
||||
env_settings[attribute] = os.environ.get(env_variable, None)
|
||||
return env_settings
|
||||
|
||||
def _load_settings(self):
|
||||
basename = os.path.splitext(os.path.basename(__file__))[0]
|
||||
default_path = os.path.join(os.path.dirname(__file__), (basename + '.ini'))
|
||||
path = os.path.expanduser(os.path.expandvars(os.environ.get('AZURE_INI_PATH', default_path)))
|
||||
config = None
|
||||
settings = None
|
||||
try:
|
||||
config = cp.ConfigParser()
|
||||
config.read(path)
|
||||
except:
|
||||
pass
|
||||
|
||||
if config is not None:
|
||||
settings = dict()
|
||||
for key in AZURE_CONFIG_SETTINGS:
|
||||
try:
|
||||
settings[key] = config.get('azure', key, raw=True)
|
||||
except:
|
||||
pass
|
||||
|
||||
return settings
|
||||
|
||||
def _tags_match(self, tag_obj, tag_args):
|
||||
'''
|
||||
Return True if the tags object from a VM contains the requested tag values.
|
||||
|
||||
:param tag_obj: Dictionary of string:string pairs
|
||||
:param tag_args: List of strings in the form key=value
|
||||
:return: boolean
|
||||
'''
|
||||
|
||||
if not tag_obj:
|
||||
return False
|
||||
|
||||
matches = 0
|
||||
for arg in tag_args:
|
||||
arg_key = arg
|
||||
arg_value = None
|
||||
if re.search(r':', arg):
|
||||
arg_key, arg_value = arg.split(':')
|
||||
if arg_value and tag_obj.get(arg_key, None) == arg_value:
|
||||
matches += 1
|
||||
elif not arg_value and tag_obj.get(arg_key, None) is not None:
|
||||
matches += 1
|
||||
if matches == len(tag_args):
|
||||
return True
|
||||
return False
|
||||
|
||||
def _to_safe(self, word):
|
||||
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
|
||||
regex = r"[^A-Za-z0-9\_"
|
||||
if not self.replace_dash_in_groups:
|
||||
regex += r"\-"
|
||||
return re.sub(regex + "]", "_", word)
|
||||
|
||||
|
||||
def main():
|
||||
if not HAS_AZURE:
|
||||
sys.exit("The Azure python sdk is not installed (try `pip install 'azure>={0}' --upgrade`) - {1}".format(AZURE_MIN_VERSION, HAS_AZURE_EXC))
|
||||
|
||||
AzureInventory()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,40 +0,0 @@
|
||||
[cloudforms]
|
||||
|
||||
# the version of CloudForms ; currently not used, but tested with
|
||||
version = 4.1
|
||||
|
||||
# This should be the hostname of the CloudForms server
|
||||
url = https://cfme.example.com
|
||||
|
||||
# This will more than likely need to be a local CloudForms username
|
||||
username = <set your username here>
|
||||
|
||||
# The password for said username
|
||||
password = <set your password here>
|
||||
|
||||
# True = verify SSL certificate / False = trust anything
|
||||
ssl_verify = True
|
||||
|
||||
# limit the number of vms returned per request
|
||||
limit = 100
|
||||
|
||||
# purge the CloudForms actions from hosts
|
||||
purge_actions = True
|
||||
|
||||
# Clean up group names (from tags and other groupings so Ansible doesn't complain)
|
||||
clean_group_keys = True
|
||||
|
||||
# Explode tags into nested groups / subgroups
|
||||
nest_tags = False
|
||||
|
||||
# If set, ensure host name are suffixed with this value
|
||||
# Note: This suffix *must* include the leading '.' as it is appended to the hostname as is
|
||||
# suffix = .example.org
|
||||
|
||||
# If true, will try and use an IPv4 address for the ansible_ssh_host rather than just the first IP address in the list
|
||||
prefer_ipv4 = False
|
||||
|
||||
[cache]
|
||||
|
||||
# Maximum time to trust the cache in seconds
|
||||
max_age = 600
|
@ -1,485 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# vim: set fileencoding=utf-8 :
|
||||
#
|
||||
# Copyright (C) 2016 Guido Günther <agx@sigxcpu.org>
|
||||
#
|
||||
# This script is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with it. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
# This is loosely based on the foreman inventory script
|
||||
# -- Josh Preston <jpreston@redhat.com>
|
||||
#
|
||||
|
||||
from __future__ import print_function
|
||||
import argparse
|
||||
from ansible.module_utils.six.moves import configparser as ConfigParser
|
||||
import os
|
||||
import re
|
||||
from time import time
|
||||
import requests
|
||||
from requests.auth import HTTPBasicAuth
|
||||
import warnings
|
||||
from ansible.errors import AnsibleError
|
||||
|
||||
try:
|
||||
import json
|
||||
except ImportError:
|
||||
import simplejson as json
|
||||
|
||||
|
||||
class CloudFormsInventory(object):
|
||||
def __init__(self):
|
||||
"""
|
||||
Main execution path
|
||||
"""
|
||||
self.inventory = dict() # A list of groups and the hosts in that group
|
||||
self.hosts = dict() # Details about hosts in the inventory
|
||||
|
||||
# Parse CLI arguments
|
||||
self.parse_cli_args()
|
||||
|
||||
# Read settings
|
||||
self.read_settings()
|
||||
|
||||
# Cache
|
||||
if self.args.refresh_cache or not self.is_cache_valid():
|
||||
self.update_cache()
|
||||
else:
|
||||
self.load_inventory_from_cache()
|
||||
self.load_hosts_from_cache()
|
||||
|
||||
data_to_print = ""
|
||||
|
||||
# Data to print
|
||||
if self.args.host:
|
||||
if self.args.debug:
|
||||
print("Fetching host [%s]" % self.args.host)
|
||||
data_to_print += self.get_host_info(self.args.host)
|
||||
else:
|
||||
self.inventory['_meta'] = {'hostvars': {}}
|
||||
for hostname in self.hosts:
|
||||
self.inventory['_meta']['hostvars'][hostname] = {
|
||||
'cloudforms': self.hosts[hostname],
|
||||
}
|
||||
# include the ansible_ssh_host in the top level
|
||||
if 'ansible_ssh_host' in self.hosts[hostname]:
|
||||
self.inventory['_meta']['hostvars'][hostname]['ansible_ssh_host'] = self.hosts[hostname]['ansible_ssh_host']
|
||||
|
||||
data_to_print += self.json_format_dict(self.inventory, self.args.pretty)
|
||||
|
||||
print(data_to_print)
|
||||
|
||||
def is_cache_valid(self):
|
||||
"""
|
||||
Determines if the cache files have expired, or if it is still valid
|
||||
"""
|
||||
if self.args.debug:
|
||||
print("Determining if cache [%s] is still valid (< %s seconds old)" % (self.cache_path_hosts, self.cache_max_age))
|
||||
|
||||
if os.path.isfile(self.cache_path_hosts):
|
||||
mod_time = os.path.getmtime(self.cache_path_hosts)
|
||||
current_time = time()
|
||||
if (mod_time + self.cache_max_age) > current_time:
|
||||
if os.path.isfile(self.cache_path_inventory):
|
||||
if self.args.debug:
|
||||
print("Cache is still valid!")
|
||||
return True
|
||||
|
||||
if self.args.debug:
|
||||
print("Cache is stale or does not exist.")
|
||||
|
||||
return False
|
||||
|
||||
def read_settings(self):
|
||||
"""
|
||||
Reads the settings from the cloudforms.ini file
|
||||
"""
|
||||
config = ConfigParser.SafeConfigParser()
|
||||
config_paths = [
|
||||
os.path.dirname(os.path.realpath(__file__)) + '/cloudforms.ini',
|
||||
"/etc/ansible/cloudforms.ini",
|
||||
]
|
||||
|
||||
env_value = os.environ.get('CLOUDFORMS_INI_PATH')
|
||||
if env_value is not None:
|
||||
config_paths.append(os.path.expanduser(os.path.expandvars(env_value)))
|
||||
|
||||
if self.args.debug:
|
||||
for config_path in config_paths:
|
||||
print("Reading from configuration file [%s]" % config_path)
|
||||
|
||||
config.read(config_paths)
|
||||
|
||||
# CloudForms API related
|
||||
if config.has_option('cloudforms', 'url'):
|
||||
self.cloudforms_url = config.get('cloudforms', 'url')
|
||||
else:
|
||||
self.cloudforms_url = None
|
||||
|
||||
if not self.cloudforms_url:
|
||||
warnings.warn("No url specified, expected something like 'https://cfme.example.com'")
|
||||
|
||||
if config.has_option('cloudforms', 'username'):
|
||||
self.cloudforms_username = config.get('cloudforms', 'username')
|
||||
else:
|
||||
self.cloudforms_username = None
|
||||
|
||||
if not self.cloudforms_username:
|
||||
warnings.warn("No username specified, you need to specify a CloudForms username.")
|
||||
|
||||
if config.has_option('cloudforms', 'password'):
|
||||
self.cloudforms_pw = config.get('cloudforms', 'password', raw=True)
|
||||
else:
|
||||
self.cloudforms_pw = None
|
||||
|
||||
if not self.cloudforms_pw:
|
||||
warnings.warn("No password specified, you need to specify a password for the CloudForms user.")
|
||||
|
||||
if config.has_option('cloudforms', 'ssl_verify'):
|
||||
self.cloudforms_ssl_verify = config.getboolean('cloudforms', 'ssl_verify')
|
||||
else:
|
||||
self.cloudforms_ssl_verify = True
|
||||
|
||||
if config.has_option('cloudforms', 'version'):
|
||||
self.cloudforms_version = config.get('cloudforms', 'version')
|
||||
else:
|
||||
self.cloudforms_version = None
|
||||
|
||||
if config.has_option('cloudforms', 'limit'):
|
||||
self.cloudforms_limit = config.getint('cloudforms', 'limit')
|
||||
else:
|
||||
self.cloudforms_limit = 100
|
||||
|
||||
if config.has_option('cloudforms', 'purge_actions'):
|
||||
self.cloudforms_purge_actions = config.getboolean('cloudforms', 'purge_actions')
|
||||
else:
|
||||
self.cloudforms_purge_actions = True
|
||||
|
||||
if config.has_option('cloudforms', 'clean_group_keys'):
|
||||
self.cloudforms_clean_group_keys = config.getboolean('cloudforms', 'clean_group_keys')
|
||||
else:
|
||||
self.cloudforms_clean_group_keys = True
|
||||
|
||||
if config.has_option('cloudforms', 'nest_tags'):
|
||||
self.cloudforms_nest_tags = config.getboolean('cloudforms', 'nest_tags')
|
||||
else:
|
||||
self.cloudforms_nest_tags = False
|
||||
|
||||
if config.has_option('cloudforms', 'suffix'):
|
||||
self.cloudforms_suffix = config.get('cloudforms', 'suffix')
|
||||
if self.cloudforms_suffix[0] != '.':
|
||||
raise AnsibleError('Leading fullstop is required for Cloudforms suffix')
|
||||
else:
|
||||
self.cloudforms_suffix = None
|
||||
|
||||
if config.has_option('cloudforms', 'prefer_ipv4'):
|
||||
self.cloudforms_prefer_ipv4 = config.getboolean('cloudforms', 'prefer_ipv4')
|
||||
else:
|
||||
self.cloudforms_prefer_ipv4 = False
|
||||
|
||||
# Ansible related
|
||||
try:
|
||||
group_patterns = config.get('ansible', 'group_patterns')
|
||||
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
|
||||
group_patterns = "[]"
|
||||
|
||||
self.group_patterns = eval(group_patterns)
|
||||
|
||||
# Cache related
|
||||
try:
|
||||
cache_path = os.path.expanduser(config.get('cache', 'path'))
|
||||
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
|
||||
cache_path = '.'
|
||||
(script, ext) = os.path.splitext(os.path.basename(__file__))
|
||||
self.cache_path_hosts = cache_path + "/%s.hosts" % script
|
||||
self.cache_path_inventory = cache_path + "/%s.inventory" % script
|
||||
self.cache_max_age = config.getint('cache', 'max_age')
|
||||
|
||||
if self.args.debug:
|
||||
print("CloudForms settings:")
|
||||
print("cloudforms_url = %s" % self.cloudforms_url)
|
||||
print("cloudforms_username = %s" % self.cloudforms_username)
|
||||
print("cloudforms_pw = %s" % self.cloudforms_pw)
|
||||
print("cloudforms_ssl_verify = %s" % self.cloudforms_ssl_verify)
|
||||
print("cloudforms_version = %s" % self.cloudforms_version)
|
||||
print("cloudforms_limit = %s" % self.cloudforms_limit)
|
||||
print("cloudforms_purge_actions = %s" % self.cloudforms_purge_actions)
|
||||
print("Cache settings:")
|
||||
print("cache_max_age = %s" % self.cache_max_age)
|
||||
print("cache_path_hosts = %s" % self.cache_path_hosts)
|
||||
print("cache_path_inventory = %s" % self.cache_path_inventory)
|
||||
|
||||
def parse_cli_args(self):
|
||||
"""
|
||||
Command line argument processing
|
||||
"""
|
||||
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on CloudForms managed VMs')
|
||||
parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
|
||||
parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
|
||||
parser.add_argument('--pretty', action='store_true', default=False, help='Pretty print JSON output (default: False)')
|
||||
parser.add_argument('--refresh-cache', action='store_true', default=False,
|
||||
help='Force refresh of cache by making API requests to CloudForms (default: False - use cache files)')
|
||||
parser.add_argument('--debug', action='store_true', default=False, help='Show debug output while running (default: False)')
|
||||
self.args = parser.parse_args()
|
||||
|
||||
def _get_json(self, url):
|
||||
"""
|
||||
Make a request and return the JSON
|
||||
"""
|
||||
results = []
|
||||
|
||||
ret = requests.get(url,
|
||||
auth=HTTPBasicAuth(self.cloudforms_username, self.cloudforms_pw),
|
||||
verify=self.cloudforms_ssl_verify)
|
||||
|
||||
ret.raise_for_status()
|
||||
|
||||
try:
|
||||
results = json.loads(ret.text)
|
||||
except ValueError:
|
||||
warnings.warn("Unexpected response from {0} ({1}): {2}".format(self.cloudforms_url, ret.status_code, ret.reason))
|
||||
results = {}
|
||||
|
||||
if self.args.debug:
|
||||
print("=======================================================================")
|
||||
print("=======================================================================")
|
||||
print("=======================================================================")
|
||||
print(ret.text)
|
||||
print("=======================================================================")
|
||||
print("=======================================================================")
|
||||
print("=======================================================================")
|
||||
|
||||
return results
|
||||
|
||||
def _get_hosts(self):
|
||||
"""
|
||||
Get all hosts by paging through the results
|
||||
"""
|
||||
limit = self.cloudforms_limit
|
||||
|
||||
page = 0
|
||||
last_page = False
|
||||
|
||||
results = []
|
||||
|
||||
while not last_page:
|
||||
offset = page * limit
|
||||
ret = self._get_json("%s/api/vms?offset=%s&limit=%s&expand=resources,tags,hosts,&attributes=ipaddresses" % (self.cloudforms_url, offset, limit))
|
||||
results += ret['resources']
|
||||
if ret['subcount'] < limit:
|
||||
last_page = True
|
||||
page += 1
|
||||
|
||||
return results
|
||||
|
||||
def update_cache(self):
|
||||
"""
|
||||
Make calls to cloudforms and save the output in a cache
|
||||
"""
|
||||
self.groups = dict()
|
||||
self.hosts = dict()
|
||||
|
||||
if self.args.debug:
|
||||
print("Updating cache...")
|
||||
|
||||
for host in self._get_hosts():
|
||||
if self.cloudforms_suffix is not None and not host['name'].endswith(self.cloudforms_suffix):
|
||||
host['name'] = host['name'] + self.cloudforms_suffix
|
||||
|
||||
# Ignore VMs that are not powered on
|
||||
if host['power_state'] != 'on':
|
||||
if self.args.debug:
|
||||
print("Skipping %s because power_state = %s" % (host['name'], host['power_state']))
|
||||
continue
|
||||
|
||||
# purge actions
|
||||
if self.cloudforms_purge_actions and 'actions' in host:
|
||||
del host['actions']
|
||||
|
||||
# Create ansible groups for tags
|
||||
if 'tags' in host:
|
||||
|
||||
# Create top-level group
|
||||
if 'tags' not in self.inventory:
|
||||
self.inventory['tags'] = dict(children=[], vars={}, hosts=[])
|
||||
|
||||
if not self.cloudforms_nest_tags:
|
||||
# don't expand tags, just use them in a safe way
|
||||
for group in host['tags']:
|
||||
# Add sub-group, as a child of top-level
|
||||
safe_key = self.to_safe(group['name'])
|
||||
if safe_key:
|
||||
if self.args.debug:
|
||||
print("Adding sub-group '%s' to parent 'tags'" % safe_key)
|
||||
|
||||
if safe_key not in self.inventory['tags']['children']:
|
||||
self.push(self.inventory['tags'], 'children', safe_key)
|
||||
|
||||
self.push(self.inventory, safe_key, host['name'])
|
||||
|
||||
if self.args.debug:
|
||||
print("Found tag [%s] for host which will be mapped to [%s]" % (group['name'], safe_key))
|
||||
else:
|
||||
# expand the tags into nested groups / sub-groups
|
||||
# Create nested groups for tags
|
||||
safe_parent_tag_name = 'tags'
|
||||
for tag in host['tags']:
|
||||
tag_hierarchy = tag['name'][1:].split('/')
|
||||
|
||||
if self.args.debug:
|
||||
print("Working on list %s" % tag_hierarchy)
|
||||
|
||||
for tag_name in tag_hierarchy:
|
||||
if self.args.debug:
|
||||
print("Working on tag_name = %s" % tag_name)
|
||||
|
||||
safe_tag_name = self.to_safe(tag_name)
|
||||
if self.args.debug:
|
||||
print("Using sanitized name %s" % safe_tag_name)
|
||||
|
||||
# Create sub-group
|
||||
if safe_tag_name not in self.inventory:
|
||||
self.inventory[safe_tag_name] = dict(children=[], vars={}, hosts=[])
|
||||
|
||||
# Add sub-group, as a child of top-level
|
||||
if safe_parent_tag_name:
|
||||
if self.args.debug:
|
||||
print("Adding sub-group '%s' to parent '%s'" % (safe_tag_name, safe_parent_tag_name))
|
||||
|
||||
if safe_tag_name not in self.inventory[safe_parent_tag_name]['children']:
|
||||
self.push(self.inventory[safe_parent_tag_name], 'children', safe_tag_name)
|
||||
|
||||
# Make sure the next one uses this one as it's parent
|
||||
safe_parent_tag_name = safe_tag_name
|
||||
|
||||
# Add the host to the last tag
|
||||
self.push(self.inventory[safe_parent_tag_name], 'hosts', host['name'])
|
||||
|
||||
# Set ansible_ssh_host to the first available ip address
|
||||
if 'ipaddresses' in host and host['ipaddresses'] and isinstance(host['ipaddresses'], list):
|
||||
# If no preference for IPv4, just use the first entry
|
||||
if not self.cloudforms_prefer_ipv4:
|
||||
host['ansible_ssh_host'] = host['ipaddresses'][0]
|
||||
else:
|
||||
# Before we search for an IPv4 address, set using the first entry in case we don't find any
|
||||
host['ansible_ssh_host'] = host['ipaddresses'][0]
|
||||
for currenthost in host['ipaddresses']:
|
||||
if '.' in currenthost:
|
||||
host['ansible_ssh_host'] = currenthost
|
||||
|
||||
# Create additional groups
|
||||
for key in ('location', 'type', 'vendor'):
|
||||
safe_key = self.to_safe(host[key])
|
||||
|
||||
# Create top-level group
|
||||
if key not in self.inventory:
|
||||
self.inventory[key] = dict(children=[], vars={}, hosts=[])
|
||||
|
||||
# Create sub-group
|
||||
if safe_key not in self.inventory:
|
||||
self.inventory[safe_key] = dict(children=[], vars={}, hosts=[])
|
||||
|
||||
# Add sub-group, as a child of top-level
|
||||
if safe_key not in self.inventory[key]['children']:
|
||||
self.push(self.inventory[key], 'children', safe_key)
|
||||
|
||||
if key in host:
|
||||
# Add host to sub-group
|
||||
self.push(self.inventory[safe_key], 'hosts', host['name'])
|
||||
|
||||
self.hosts[host['name']] = host
|
||||
self.push(self.inventory, 'all', host['name'])
|
||||
|
||||
if self.args.debug:
|
||||
print("Saving cached data")
|
||||
|
||||
self.write_to_cache(self.hosts, self.cache_path_hosts)
|
||||
self.write_to_cache(self.inventory, self.cache_path_inventory)
|
||||
|
||||
def get_host_info(self, host):
|
||||
"""
|
||||
Get variables about a specific host
|
||||
"""
|
||||
if not self.hosts or len(self.hosts) == 0:
|
||||
# Need to load cache from cache
|
||||
self.load_hosts_from_cache()
|
||||
|
||||
if host not in self.hosts:
|
||||
if self.args.debug:
|
||||
print("[%s] not found in cache." % host)
|
||||
|
||||
# try updating the cache
|
||||
self.update_cache()
|
||||
|
||||
if host not in self.hosts:
|
||||
if self.args.debug:
|
||||
print("[%s] does not exist after cache update." % host)
|
||||
# host might not exist anymore
|
||||
return self.json_format_dict({}, self.args.pretty)
|
||||
|
||||
return self.json_format_dict(self.hosts[host], self.args.pretty)
|
||||
|
||||
def push(self, d, k, v):
|
||||
"""
|
||||
Safely puts a new entry onto an array.
|
||||
"""
|
||||
if k in d:
|
||||
d[k].append(v)
|
||||
else:
|
||||
d[k] = [v]
|
||||
|
||||
def load_inventory_from_cache(self):
|
||||
"""
|
||||
Reads the inventory from the cache file sets self.inventory
|
||||
"""
|
||||
cache = open(self.cache_path_inventory, 'r')
|
||||
json_inventory = cache.read()
|
||||
self.inventory = json.loads(json_inventory)
|
||||
|
||||
def load_hosts_from_cache(self):
|
||||
"""
|
||||
Reads the cache from the cache file sets self.hosts
|
||||
"""
|
||||
cache = open(self.cache_path_hosts, 'r')
|
||||
json_cache = cache.read()
|
||||
self.hosts = json.loads(json_cache)
|
||||
|
||||
def write_to_cache(self, data, filename):
|
||||
"""
|
||||
Writes data in JSON format to a file
|
||||
"""
|
||||
json_data = self.json_format_dict(data, True)
|
||||
cache = open(filename, 'w')
|
||||
cache.write(json_data)
|
||||
cache.close()
|
||||
|
||||
def to_safe(self, word):
|
||||
"""
|
||||
Converts 'bad' characters in a string to underscores so they can be used as Ansible groups
|
||||
"""
|
||||
if self.cloudforms_clean_group_keys:
|
||||
regex = r"[^A-Za-z0-9\_]"
|
||||
return re.sub(regex, "_", word.replace(" ", ""))
|
||||
else:
|
||||
return word
|
||||
|
||||
def json_format_dict(self, data, pretty=False):
|
||||
"""
|
||||
Converts a dict to a JSON object and dumps it as a formatted string
|
||||
"""
|
||||
if pretty:
|
||||
return json.dumps(data, sort_keys=True, indent=2)
|
||||
else:
|
||||
return json.dumps(data)
|
||||
|
||||
CloudFormsInventory()
|
@ -1,219 +0,0 @@
|
||||
# Ansible EC2 external inventory script settings
|
||||
#
|
||||
|
||||
[ec2]
|
||||
|
||||
# to talk to a private eucalyptus instance uncomment these lines
|
||||
# and edit edit eucalyptus_host to be the host name of your cloud controller
|
||||
#eucalyptus = True
|
||||
#eucalyptus_host = clc.cloud.domain.org
|
||||
|
||||
# AWS regions to make calls to. Set this to 'all' to make request to all regions
|
||||
# in AWS and merge the results together. Alternatively, set this to a comma
|
||||
# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2' and do not
|
||||
# provide the 'regions_exclude' option. If this is set to 'auto', AWS_REGION or
|
||||
# AWS_DEFAULT_REGION environment variable will be read to determine the region.
|
||||
regions = all
|
||||
regions_exclude = us-gov-west-1, cn-north-1
|
||||
|
||||
# When generating inventory, Ansible needs to know how to address a server.
|
||||
# Each EC2 instance has a lot of variables associated with it. Here is the list:
|
||||
# http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance
|
||||
# Below are 2 variables that are used as the address of a server:
|
||||
# - destination_variable
|
||||
# - vpc_destination_variable
|
||||
|
||||
# This is the normal destination variable to use. If you are running Ansible
|
||||
# from outside EC2, then 'public_dns_name' makes the most sense. If you are
|
||||
# running Ansible from within EC2, then perhaps you want to use the internal
|
||||
# address, and should set this to 'private_dns_name'. The key of an EC2 tag
|
||||
# may optionally be used; however the boto instance variables hold precedence
|
||||
# in the event of a collision.
|
||||
destination_variable = public_dns_name
|
||||
|
||||
# This allows you to override the inventory_name with an ec2 variable, instead
|
||||
# of using the destination_variable above. Addressing (aka ansible_ssh_host)
|
||||
# will still use destination_variable. Tags should be written as 'tag_TAGNAME'.
|
||||
#hostname_variable = tag_Name
|
||||
|
||||
# For server inside a VPC, using DNS names may not make sense. When an instance
|
||||
# has 'subnet_id' set, this variable is used. If the subnet is public, setting
|
||||
# this to 'ip_address' will return the public IP address. For instances in a
|
||||
# private subnet, this should be set to 'private_ip_address', and Ansible must
|
||||
# be run from within EC2. The key of an EC2 tag may optionally be used; however
|
||||
# the boto instance variables hold precedence in the event of a collision.
|
||||
# WARNING: - instances that are in the private vpc, _without_ public ip address
|
||||
# will not be listed in the inventory until You set:
|
||||
# vpc_destination_variable = private_ip_address
|
||||
vpc_destination_variable = ip_address
|
||||
|
||||
# The following two settings allow flexible ansible host naming based on a
|
||||
# python format string and a comma-separated list of ec2 tags. Note that:
|
||||
#
|
||||
# 1) If the tags referenced are not present for some instances, empty strings
|
||||
# will be substituted in the format string.
|
||||
# 2) This overrides both destination_variable and vpc_destination_variable.
|
||||
#
|
||||
#destination_format = {0}.{1}.example.com
|
||||
#destination_format_tags = Name,environment
|
||||
|
||||
# To tag instances on EC2 with the resource records that point to them from
|
||||
# Route53, set 'route53' to True.
|
||||
route53 = False
|
||||
|
||||
# To use Route53 records as the inventory hostnames, uncomment and set
|
||||
# to equal the domain name you wish to use. You must also have 'route53' (above)
|
||||
# set to True.
|
||||
# route53_hostnames = .example.com
|
||||
|
||||
# To exclude RDS instances from the inventory, uncomment and set to False.
|
||||
#rds = False
|
||||
|
||||
# To exclude ElastiCache instances from the inventory, uncomment and set to False.
|
||||
#elasticache = False
|
||||
|
||||
# Additionally, you can specify the list of zones to exclude looking up in
|
||||
# 'route53_excluded_zones' as a comma-separated list.
|
||||
# route53_excluded_zones = samplezone1.com, samplezone2.com
|
||||
|
||||
# By default, only EC2 instances in the 'running' state are returned. Set
|
||||
# 'all_instances' to True to return all instances regardless of state.
|
||||
all_instances = False
|
||||
|
||||
# By default, only EC2 instances in the 'running' state are returned. Specify
|
||||
# EC2 instance states to return as a comma-separated list. This
|
||||
# option is overridden when 'all_instances' is True.
|
||||
# instance_states = pending, running, shutting-down, terminated, stopping, stopped
|
||||
|
||||
# By default, only RDS instances in the 'available' state are returned. Set
|
||||
# 'all_rds_instances' to True return all RDS instances regardless of state.
|
||||
all_rds_instances = False
|
||||
|
||||
# Include RDS cluster information (Aurora etc.)
|
||||
include_rds_clusters = False
|
||||
|
||||
# By default, only ElastiCache clusters and nodes in the 'available' state
|
||||
# are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes'
|
||||
# to True return all ElastiCache clusters and nodes, regardless of state.
|
||||
#
|
||||
# Note that all_elasticache_nodes only applies to listed clusters. That means
|
||||
# if you set all_elastic_clusters to false, no node will be return from
|
||||
# unavailable clusters, regardless of the state and to what you set for
|
||||
# all_elasticache_nodes.
|
||||
all_elasticache_replication_groups = False
|
||||
all_elasticache_clusters = False
|
||||
all_elasticache_nodes = False
|
||||
|
||||
# API calls to EC2 are slow. For this reason, we cache the results of an API
|
||||
# call. Set this to the path you want cache files to be written to. Two files
|
||||
# will be written to this directory:
|
||||
# - ansible-ec2.cache
|
||||
# - ansible-ec2.index
|
||||
cache_path = ~/.ansible/tmp
|
||||
|
||||
# The number of seconds a cache file is considered valid. After this many
|
||||
# seconds, a new API call will be made, and the cache file will be updated.
|
||||
# To disable the cache, set this value to 0
|
||||
cache_max_age = 300
|
||||
|
||||
# Organize groups into a nested/hierarchy instead of a flat namespace.
|
||||
nested_groups = False
|
||||
|
||||
# Replace - tags when creating groups to avoid issues with ansible
|
||||
replace_dash_in_groups = True
|
||||
|
||||
# If set to true, any tag of the form "a,b,c" is expanded into a list
|
||||
# and the results are used to create additional tag_* inventory groups.
|
||||
expand_csv_tags = False
|
||||
|
||||
# The EC2 inventory output can become very large. To manage its size,
|
||||
# configure which groups should be created.
|
||||
group_by_instance_id = True
|
||||
group_by_region = True
|
||||
group_by_availability_zone = True
|
||||
group_by_aws_account = False
|
||||
group_by_ami_id = True
|
||||
group_by_instance_type = True
|
||||
group_by_instance_state = False
|
||||
group_by_platform = True
|
||||
group_by_key_pair = True
|
||||
group_by_vpc_id = True
|
||||
group_by_security_group = True
|
||||
group_by_tag_keys = True
|
||||
group_by_tag_none = True
|
||||
group_by_route53_names = True
|
||||
group_by_rds_engine = True
|
||||
group_by_rds_parameter_group = True
|
||||
group_by_elasticache_engine = True
|
||||
group_by_elasticache_cluster = True
|
||||
group_by_elasticache_parameter_group = True
|
||||
group_by_elasticache_replication_group = True
|
||||
|
||||
# If you only want to include hosts that match a certain regular expression
|
||||
# pattern_include = staging-*
|
||||
|
||||
# If you want to exclude any hosts that match a certain regular expression
|
||||
# pattern_exclude = staging-*
|
||||
|
||||
# Instance filters can be used to control which instances are retrieved for
|
||||
# inventory. For the full list of possible filters, please read the EC2 API
|
||||
# docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters
|
||||
# Filters are key/value pairs separated by '=', to list multiple filters use
|
||||
# a list separated by commas. To "AND" criteria together, use "&". Note that
|
||||
# the "AND" is not useful along with stack_filters and so such usage is not allowed.
|
||||
# See examples below.
|
||||
|
||||
# If you want to apply multiple filters simultaneously, set stack_filters to
|
||||
# True. Default behaviour is to combine the results of all filters. Stacking
|
||||
# allows the use of multiple conditions to filter down, for example by
|
||||
# environment and type of host.
|
||||
stack_filters = False
|
||||
|
||||
# Retrieve only instances with (key=value) env=staging tag
|
||||
# instance_filters = tag:env=staging
|
||||
|
||||
# Retrieve only instances with role=webservers OR role=dbservers tag
|
||||
# instance_filters = tag:role=webservers,tag:role=dbservers
|
||||
|
||||
# Retrieve only t1.micro instances OR instances with tag env=staging
|
||||
# instance_filters = instance-type=t1.micro,tag:env=staging
|
||||
|
||||
# You can use wildcards in filter values also. Below will list instances which
|
||||
# tag Name value matches webservers1*
|
||||
# (ex. webservers15, webservers1a, webservers123 etc)
|
||||
# instance_filters = tag:Name=webservers1*
|
||||
|
||||
# Retrieve only instances of type t1.micro that also have tag env=stage
|
||||
# instance_filters = instance-type=t1.micro&tag:env=stage
|
||||
|
||||
# Retrieve instances of type t1.micro AND tag env=stage, as well as any instance
|
||||
# that are of type m3.large, regardless of env tag
|
||||
# instance_filters = instance-type=t1.micro&tag:env=stage,instance-type=m3.large
|
||||
|
||||
# An IAM role can be assumed, so all requests are run as that role.
|
||||
# This can be useful for connecting across different accounts, or to limit user
|
||||
# access
|
||||
# iam_role = role-arn
|
||||
|
||||
# A boto configuration profile may be used to separate out credentials
|
||||
# see http://boto.readthedocs.org/en/latest/boto_config_tut.html
|
||||
# boto_profile = some-boto-profile-name
|
||||
|
||||
|
||||
[credentials]
|
||||
|
||||
# The AWS credentials can optionally be specified here. Credentials specified
|
||||
# here are ignored if the environment variable AWS_ACCESS_KEY_ID or
|
||||
# AWS_PROFILE is set, or if the boto_profile property above is set.
|
||||
#
|
||||
# Supplying AWS credentials here is not recommended, as it introduces
|
||||
# non-trivial security concerns. When going down this route, please make sure
|
||||
# to set access permissions for this file correctly, e.g. handle it the same
|
||||
# way as you would a private SSH key.
|
||||
#
|
||||
# Unlike the boto and AWS configure files, this section does not support
|
||||
# profiles.
|
||||
#
|
||||
# aws_access_key_id = AXXXXXXXXXXXXXX
|
||||
# aws_secret_access_key = XXXXXXXXXXXXXXXXXXX
|
||||
# aws_security_token = XXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
File diff suppressed because it is too large
Load Diff
@ -1,199 +0,0 @@
|
||||
# Foreman inventory (https://github.com/theforeman/foreman_ansible_inventory)
|
||||
#
|
||||
# This script can be used as an Ansible dynamic inventory.
|
||||
# The connection parameters are set up via *foreman.ini*
|
||||
# This is how the script founds the configuration file in
|
||||
# order of discovery.
|
||||
#
|
||||
# * `/etc/ansible/foreman.ini`
|
||||
# * Current directory of your inventory script.
|
||||
# * `FOREMAN_INI_PATH` environment variable.
|
||||
#
|
||||
# ## Variables and Parameters
|
||||
#
|
||||
# The data returned from Foreman for each host is stored in a foreman
|
||||
# hash so they're available as *host_vars* along with the parameters
|
||||
# of the host and it's hostgroups:
|
||||
#
|
||||
# "foo.example.com": {
|
||||
# "foreman": {
|
||||
# "architecture_id": 1,
|
||||
# "architecture_name": "x86_64",
|
||||
# "build": false,
|
||||
# "build_status": 0,
|
||||
# "build_status_label": "Installed",
|
||||
# "capabilities": [
|
||||
# "build",
|
||||
# "image"
|
||||
# ],
|
||||
# "compute_profile_id": 4,
|
||||
# "hostgroup_name": "webtier/myapp",
|
||||
# "id": 70,
|
||||
# "image_name": "debian8.1",
|
||||
# ...
|
||||
# "uuid": "50197c10-5ebb-b5cf-b384-a1e203e19e77"
|
||||
# },
|
||||
# "foreman_params": {
|
||||
# "testparam1": "foobar",
|
||||
# "testparam2": "small",
|
||||
# ...
|
||||
# }
|
||||
#
|
||||
# and could therefore be used in Ansible like:
|
||||
#
|
||||
# - debug: msg="From Foreman host {{ foreman['uuid'] }}"
|
||||
#
|
||||
# Which yields
|
||||
#
|
||||
# TASK [test_foreman : debug] ****************************************************
|
||||
# ok: [foo.example.com] => {
|
||||
# "msg": "From Foreman host 50190bd1-052a-a34a-3c9c-df37a39550bf"
|
||||
# }
|
||||
#
|
||||
# ## Automatic Ansible groups
|
||||
#
|
||||
# The inventory will provide a set of groups, by default prefixed by
|
||||
# 'foreman_'. If you want to customize this prefix, change the
|
||||
# group_prefix option in /etc/ansible/foreman.ini. The rest of this
|
||||
# guide will assume the default prefix of 'foreman'
|
||||
#
|
||||
# The hostgroup, location, organization, content view, and lifecycle
|
||||
# environment of each host are created as Ansible groups with a
|
||||
# foreman_<grouptype> prefix, all lowercase and problematic parameters
|
||||
# removed. So e.g. the foreman hostgroup
|
||||
#
|
||||
# myapp / webtier / datacenter1
|
||||
#
|
||||
# would turn into the Ansible group:
|
||||
#
|
||||
# foreman_hostgroup_myapp_webtier_datacenter1
|
||||
#
|
||||
# If the parameter want_hostcollections is set to true, the
|
||||
# collections each host is in are created as Ansible groups with a
|
||||
# foreman_hostcollection prefix, all lowercase and problematic
|
||||
# parameters removed. So e.g. the Foreman host collection
|
||||
#
|
||||
# Patch Window Thursday
|
||||
#
|
||||
# would turn into the Ansible group:
|
||||
#
|
||||
# foreman_hostcollection_patchwindowthursday
|
||||
#
|
||||
# If the parameter host_filters is set, it will be used as the
|
||||
# "search" parameter for the /api/v2/hosts call. This can be used to
|
||||
# restrict the list of returned host, as shown below.
|
||||
#
|
||||
# Furthermore Ansible groups can be created on the fly using the
|
||||
# *group_patterns* variable in *foreman.ini* so that you can build up
|
||||
# hierarchies using parameters on the hostgroup and host variables.
|
||||
#
|
||||
# Lets assume you have a host that is built using this nested hostgroup:
|
||||
#
|
||||
# myapp / webtier / datacenter1
|
||||
#
|
||||
# and each of the hostgroups defines a parameters respectively:
|
||||
#
|
||||
# myapp: app_param = myapp
|
||||
# webtier: tier_param = webtier
|
||||
# datacenter1: dc_param = datacenter1
|
||||
#
|
||||
# The host is also in a subnet called "mysubnet" and provisioned via an image
|
||||
# then *group_patterns* like:
|
||||
#
|
||||
# [ansible]
|
||||
# group_patterns = ["{app_param}-{tier_param}-{dc_param}",
|
||||
# "{app_param}-{tier_param}",
|
||||
# "{app_param}",
|
||||
# "{subnet_name}-{provision_method}"]
|
||||
#
|
||||
# would put the host into the additional Ansible groups:
|
||||
#
|
||||
# - myapp-webtier-datacenter1
|
||||
# - myapp-webtier
|
||||
# - myapp
|
||||
# - mysubnet-image
|
||||
#
|
||||
# by recursively resolving the hostgroups, getting the parameter keys
|
||||
# and values and doing a Python *string.format()* like replacement on
|
||||
# it.
|
||||
#
|
||||
[foreman]
|
||||
url = http://localhost:3000/
|
||||
user = foreman
|
||||
password = secret
|
||||
ssl_verify = True
|
||||
|
||||
# Foreman 1.24 introduces a new reports API to improve performance of the inventory script.
|
||||
# Note: This requires foreman_ansible plugin installed.
|
||||
# Set to False if you want to use the old API. Defaults to True.
|
||||
|
||||
use_reports_api = True
|
||||
|
||||
# Retrieve only hosts from the organization "Web Engineering".
|
||||
# host_filters = organization="Web Engineering"
|
||||
|
||||
# Retrieve only hosts from the organization "Web Engineering" that are
|
||||
# also in the host collection "Apache Servers".
|
||||
# host_filters = organization="Web Engineering" and host_collection="Apache Servers"
|
||||
|
||||
|
||||
# Foreman Inventory report related configuration options.
|
||||
# Configs that default to True :
|
||||
# want_organization , want_location, want_ipv4, want_host_group, want_subnet, want_smart_proxies, want_facts
|
||||
# Configs that default to False :
|
||||
# want_ipv6, want_subnet_v6, want_content_facet_attributes, want_host_params
|
||||
|
||||
[report]
|
||||
want_organization = True
|
||||
want_location = True
|
||||
want_ipv4 = True
|
||||
want_ipv6 = False
|
||||
want_host_group = True
|
||||
want_subnet = True
|
||||
want_subnet_v6 = False
|
||||
want_smart_proxies = True
|
||||
want_content_facet_attributes = False
|
||||
want_host_params = False
|
||||
|
||||
# use this config to determine if facts are to be fetched in the report and stored on the hosts.
|
||||
# want_facts = False
|
||||
|
||||
# Upon receiving a request to return inventory report, Foreman schedules a report generation job.
|
||||
# The script then polls the report_data endpoint repeatedly to check if the job is complete and retrieves data
|
||||
# poll_interval allows to define the polling interval between 2 calls to the report_data endpoint while polling.
|
||||
# Defaults to 10 seconds
|
||||
|
||||
poll_interval = 10
|
||||
|
||||
[ansible]
|
||||
group_patterns = ["{app}-{tier}-{color}",
|
||||
"{app}-{color}",
|
||||
"{app}",
|
||||
"{tier}"]
|
||||
|
||||
group_prefix = foreman_
|
||||
|
||||
# Whether to create Ansible groups for host collections. Only tested
|
||||
# with Katello (Red Hat Satellite). Disabled by default to not break
|
||||
# the script for stand-alone Foreman.
|
||||
want_hostcollections = False
|
||||
|
||||
# Whether to interpret global parameters value as JSON (if possible, else
|
||||
# take as is). Only tested with Katello (Red Hat Satellite).
|
||||
# This allows to define lists and dictionaries (and more complicated structures)
|
||||
# variables by entering them as JSON string in Foreman parameters.
|
||||
# Disabled by default as the change would else not be backward compatible.
|
||||
rich_params = False
|
||||
|
||||
# Whether to populate the ansible_ssh_host variable to explicitly specify the
|
||||
# connection target. Only tested with Katello (Red Hat Satellite).
|
||||
# If the foreman 'ip' fact exists then the ansible_ssh_host varibale is populated
|
||||
# to permit connections where DNS resolution fails.
|
||||
want_ansible_ssh_host = False
|
||||
|
||||
[cache]
|
||||
path = .
|
||||
max_age = 60
|
||||
|
||||
# Whether to scan foreman to add recently created hosts in inventory cache
|
||||
scan_new_hosts = True
|
@ -1,667 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# vim: set fileencoding=utf-8 :
|
||||
#
|
||||
# Copyright (C) 2016 Guido Günther <agx@sigxcpu.org>,
|
||||
# Daniel Lobato Garcia <dlobatog@redhat.com>
|
||||
#
|
||||
# This script is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with it. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
# This is somewhat based on cobbler inventory
|
||||
|
||||
# Stdlib imports
|
||||
# __future__ imports must occur at the beginning of file
|
||||
from __future__ import print_function
|
||||
try:
|
||||
# Python 2 version
|
||||
import ConfigParser
|
||||
except ImportError:
|
||||
# Python 3 version
|
||||
import configparser as ConfigParser
|
||||
import json
|
||||
import argparse
|
||||
import copy
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from time import time, sleep
|
||||
from collections import defaultdict
|
||||
from distutils.version import LooseVersion, StrictVersion
|
||||
|
||||
# 3rd party imports
|
||||
import requests
|
||||
if LooseVersion(requests.__version__) < LooseVersion('1.1.0'):
|
||||
print('This script requires python-requests 1.1 as a minimum version')
|
||||
sys.exit(1)
|
||||
|
||||
from requests.auth import HTTPBasicAuth
|
||||
|
||||
from ansible.module_utils._text import to_text
|
||||
|
||||
|
||||
def json_format_dict(data, pretty=False):
|
||||
"""Converts a dict to a JSON object and dumps it as a formatted string"""
|
||||
|
||||
if pretty:
|
||||
return json.dumps(data, sort_keys=True, indent=2)
|
||||
else:
|
||||
return json.dumps(data)
|
||||
|
||||
|
||||
class ForemanInventory(object):
|
||||
|
||||
def __init__(self):
|
||||
self.inventory = defaultdict(list) # A list of groups and the hosts in that group
|
||||
self.cache = dict() # Details about hosts in the inventory
|
||||
self.params = dict() # Params of each host
|
||||
self.facts = dict() # Facts of each host
|
||||
self.hostgroups = dict() # host groups
|
||||
self.hostcollections = dict() # host collections
|
||||
self.session = None # Requests session
|
||||
self.config_paths = [
|
||||
"/etc/ansible/foreman.ini",
|
||||
os.path.dirname(os.path.realpath(__file__)) + '/foreman.ini',
|
||||
]
|
||||
env_value = os.environ.get('FOREMAN_INI_PATH')
|
||||
if env_value is not None:
|
||||
self.config_paths.append(os.path.expanduser(os.path.expandvars(env_value)))
|
||||
|
||||
def read_settings(self):
|
||||
"""Reads the settings from the foreman.ini file"""
|
||||
|
||||
config = ConfigParser.SafeConfigParser()
|
||||
config.read(self.config_paths)
|
||||
|
||||
# Foreman API related
|
||||
try:
|
||||
self.foreman_url = config.get('foreman', 'url')
|
||||
self.foreman_user = config.get('foreman', 'user')
|
||||
self.foreman_pw = config.get('foreman', 'password', raw=True)
|
||||
self.foreman_ssl_verify = config.getboolean('foreman', 'ssl_verify')
|
||||
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError) as e:
|
||||
print("Error parsing configuration: %s" % e, file=sys.stderr)
|
||||
return False
|
||||
|
||||
# Inventory Report Related
|
||||
try:
|
||||
self.foreman_use_reports_api = config.getboolean('foreman', 'use_reports_api')
|
||||
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
|
||||
self.foreman_use_reports_api = True
|
||||
|
||||
try:
|
||||
self.want_organization = config.getboolean('report', 'want_organization')
|
||||
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
|
||||
self.want_organization = True
|
||||
|
||||
try:
|
||||
self.want_location = config.getboolean('report', 'want_location')
|
||||
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
|
||||
self.want_location = True
|
||||
|
||||
try:
|
||||
self.want_IPv4 = config.getboolean('report', 'want_ipv4')
|
||||
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
|
||||
self.want_IPv4 = True
|
||||
|
||||
try:
|
||||
self.want_IPv6 = config.getboolean('report', 'want_ipv6')
|
||||
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
|
||||
self.want_IPv6 = False
|
||||
|
||||
try:
|
||||
self.want_host_group = config.getboolean('report', 'want_host_group')
|
||||
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
|
||||
self.want_host_group = True
|
||||
|
||||
try:
|
||||
self.want_host_params = config.getboolean('report', 'want_host_params')
|
||||
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
|
||||
self.want_host_params = False
|
||||
|
||||
try:
|
||||
self.want_subnet = config.getboolean('report', 'want_subnet')
|
||||
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
|
||||
self.want_subnet = True
|
||||
|
||||
try:
|
||||
self.want_subnet_v6 = config.getboolean('report', 'want_subnet_v6')
|
||||
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
|
||||
self.want_subnet_v6 = False
|
||||
|
||||
try:
|
||||
self.want_smart_proxies = config.getboolean('report', 'want_smart_proxies')
|
||||
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
|
||||
self.want_smart_proxies = True
|
||||
|
||||
try:
|
||||
self.want_content_facet_attributes = config.getboolean('report', 'want_content_facet_attributes')
|
||||
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
|
||||
self.want_content_facet_attributes = False
|
||||
|
||||
try:
|
||||
self.report_want_facts = config.getboolean('report', 'want_facts')
|
||||
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
|
||||
self.report_want_facts = True
|
||||
|
||||
try:
|
||||
self.poll_interval = config.getint('report', 'poll_interval')
|
||||
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
|
||||
self.poll_interval = 10
|
||||
|
||||
# Ansible related
|
||||
try:
|
||||
group_patterns = config.get('ansible', 'group_patterns')
|
||||
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
|
||||
group_patterns = "[]"
|
||||
|
||||
self.group_patterns = json.loads(group_patterns)
|
||||
|
||||
try:
|
||||
self.group_prefix = config.get('ansible', 'group_prefix')
|
||||
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
|
||||
self.group_prefix = "foreman_"
|
||||
|
||||
try:
|
||||
self.want_facts = config.getboolean('ansible', 'want_facts')
|
||||
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
|
||||
self.want_facts = True
|
||||
|
||||
self.want_facts = self.want_facts and self.report_want_facts
|
||||
|
||||
try:
|
||||
self.want_hostcollections = config.getboolean('ansible', 'want_hostcollections')
|
||||
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
|
||||
self.want_hostcollections = False
|
||||
|
||||
try:
|
||||
self.want_ansible_ssh_host = config.getboolean('ansible', 'want_ansible_ssh_host')
|
||||
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
|
||||
self.want_ansible_ssh_host = False
|
||||
|
||||
# Do we want parameters to be interpreted if possible as JSON? (no by default)
|
||||
try:
|
||||
self.rich_params = config.getboolean('ansible', 'rich_params')
|
||||
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
|
||||
self.rich_params = False
|
||||
|
||||
try:
|
||||
self.host_filters = config.get('foreman', 'host_filters')
|
||||
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
|
||||
self.host_filters = None
|
||||
|
||||
# Cache related
|
||||
try:
|
||||
cache_path = os.path.expanduser(config.get('cache', 'path'))
|
||||
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
|
||||
cache_path = '.'
|
||||
(script, ext) = os.path.splitext(os.path.basename(__file__))
|
||||
self.cache_path_cache = cache_path + "/%s.cache" % script
|
||||
self.cache_path_inventory = cache_path + "/%s.index" % script
|
||||
self.cache_path_params = cache_path + "/%s.params" % script
|
||||
self.cache_path_facts = cache_path + "/%s.facts" % script
|
||||
self.cache_path_hostcollections = cache_path + "/%s.hostcollections" % script
|
||||
try:
|
||||
self.cache_max_age = config.getint('cache', 'max_age')
|
||||
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
|
||||
self.cache_max_age = 60
|
||||
try:
|
||||
self.scan_new_hosts = config.getboolean('cache', 'scan_new_hosts')
|
||||
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
|
||||
self.scan_new_hosts = False
|
||||
|
||||
return True
|
||||
|
||||
def parse_cli_args(self):
|
||||
"""Command line argument processing"""
|
||||
|
||||
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on foreman')
|
||||
parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
|
||||
parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
|
||||
parser.add_argument('--refresh-cache', action='store_true', default=False,
|
||||
help='Force refresh of cache by making API requests to foreman (default: False - use cache files)')
|
||||
self.args = parser.parse_args()
|
||||
|
||||
def _get_session(self):
|
||||
if not self.session:
|
||||
self.session = requests.session()
|
||||
self.session.auth = HTTPBasicAuth(self.foreman_user, self.foreman_pw)
|
||||
self.session.verify = self.foreman_ssl_verify
|
||||
return self.session
|
||||
|
||||
def _get_json(self, url, ignore_errors=None, params=None):
|
||||
if params is None:
|
||||
params = {}
|
||||
params['per_page'] = 250
|
||||
|
||||
page = 1
|
||||
results = []
|
||||
s = self._get_session()
|
||||
while True:
|
||||
params['page'] = page
|
||||
ret = s.get(url, params=params)
|
||||
if ignore_errors and ret.status_code in ignore_errors:
|
||||
break
|
||||
ret.raise_for_status()
|
||||
json = ret.json()
|
||||
# /hosts/:id has not results key
|
||||
if 'results' not in json:
|
||||
return json
|
||||
# Facts are returned as dict in results not list
|
||||
if isinstance(json['results'], dict):
|
||||
return json['results']
|
||||
# List of all hosts is returned paginaged
|
||||
results = results + json['results']
|
||||
if len(results) >= json['subtotal']:
|
||||
break
|
||||
page += 1
|
||||
if len(json['results']) == 0:
|
||||
print("Did not make any progress during loop. "
|
||||
"expected %d got %d" % (json['total'], len(results)),
|
||||
file=sys.stderr)
|
||||
break
|
||||
return results
|
||||
|
||||
def _use_inventory_report(self):
|
||||
if not self.foreman_use_reports_api:
|
||||
return False
|
||||
status_url = "%s/api/v2/status" % self.foreman_url
|
||||
result = self._get_json(status_url)
|
||||
foreman_version = (LooseVersion(result.get('version')) >= LooseVersion('1.24.0'))
|
||||
return foreman_version
|
||||
|
||||
def _fetch_params(self):
|
||||
options, params = ("no", "yes"), dict()
|
||||
params["Organization"] = options[self.want_organization]
|
||||
params["Location"] = options[self.want_location]
|
||||
params["IPv4"] = options[self.want_IPv4]
|
||||
params["IPv6"] = options[self.want_IPv6]
|
||||
params["Facts"] = options[self.want_facts]
|
||||
params["Host Group"] = options[self.want_host_group]
|
||||
params["Host Collections"] = options[self.want_hostcollections]
|
||||
params["Subnet"] = options[self.want_subnet]
|
||||
params["Subnet v6"] = options[self.want_subnet_v6]
|
||||
params["Smart Proxies"] = options[self.want_smart_proxies]
|
||||
params["Content Attributes"] = options[self.want_content_facet_attributes]
|
||||
params["Host Parameters"] = options[self.want_host_params]
|
||||
if self.host_filters:
|
||||
params["Hosts"] = self.host_filters
|
||||
return params
|
||||
|
||||
def _post_request(self):
|
||||
url = "%s/ansible/api/v2/ansible_inventories/schedule" % self.foreman_url
|
||||
session = self._get_session()
|
||||
params = {'input_values': self._fetch_params()}
|
||||
ret = session.post(url, json=params)
|
||||
if not ret:
|
||||
raise Exception("Error scheduling inventory report on foreman. Please check foreman logs!")
|
||||
url = "{0}/{1}".format(self.foreman_url, ret.json().get('data_url'))
|
||||
response = session.get(url)
|
||||
while response:
|
||||
if response.status_code != 204:
|
||||
break
|
||||
else:
|
||||
sleep(self.poll_interval)
|
||||
response = session.get(url)
|
||||
if not response:
|
||||
raise Exception("Error receiving inventory report from foreman. Please check foreman logs!")
|
||||
else:
|
||||
return response.json()
|
||||
|
||||
def _get_hosts(self):
|
||||
url = "%s/api/v2/hosts" % self.foreman_url
|
||||
|
||||
params = {}
|
||||
if self.host_filters:
|
||||
params['search'] = self.host_filters
|
||||
|
||||
return self._get_json(url, params=params)
|
||||
|
||||
def _get_host_data_by_id(self, hid):
|
||||
url = "%s/api/v2/hosts/%s" % (self.foreman_url, hid)
|
||||
return self._get_json(url)
|
||||
|
||||
def _get_facts_by_id(self, hid):
|
||||
url = "%s/api/v2/hosts/%s/facts" % (self.foreman_url, hid)
|
||||
return self._get_json(url)
|
||||
|
||||
def _resolve_params(self, host_params):
|
||||
"""Convert host params to dict"""
|
||||
params = {}
|
||||
|
||||
for param in host_params:
|
||||
name = param['name']
|
||||
if self.rich_params:
|
||||
try:
|
||||
params[name] = json.loads(param['value'])
|
||||
except ValueError:
|
||||
params[name] = param['value']
|
||||
else:
|
||||
params[name] = param['value']
|
||||
|
||||
return params
|
||||
|
||||
def _get_facts(self, host):
|
||||
"""Fetch all host facts of the host"""
|
||||
if not self.want_facts:
|
||||
return {}
|
||||
|
||||
ret = self._get_facts_by_id(host['id'])
|
||||
if len(ret.values()) == 0:
|
||||
facts = {}
|
||||
elif len(ret.values()) == 1:
|
||||
facts = list(ret.values())[0]
|
||||
else:
|
||||
raise ValueError("More than one set of facts returned for '%s'" % host)
|
||||
return facts
|
||||
|
||||
def write_to_cache(self, data, filename):
|
||||
"""Write data in JSON format to a file"""
|
||||
json_data = json_format_dict(data, True)
|
||||
cache = open(filename, 'w')
|
||||
cache.write(json_data)
|
||||
cache.close()
|
||||
|
||||
def _write_cache(self):
|
||||
self.write_to_cache(self.cache, self.cache_path_cache)
|
||||
self.write_to_cache(self.inventory, self.cache_path_inventory)
|
||||
self.write_to_cache(self.params, self.cache_path_params)
|
||||
self.write_to_cache(self.facts, self.cache_path_facts)
|
||||
self.write_to_cache(self.hostcollections, self.cache_path_hostcollections)
|
||||
|
||||
def to_safe(self, word):
|
||||
'''Converts 'bad' characters in a string to underscores
|
||||
so they can be used as Ansible groups
|
||||
|
||||
>>> ForemanInventory.to_safe("foo-bar baz")
|
||||
'foo_barbaz'
|
||||
'''
|
||||
regex = r"[^A-Za-z0-9\_]"
|
||||
return re.sub(regex, "_", word.replace(" ", ""))
|
||||
|
||||
def update_cache(self, scan_only_new_hosts=False):
|
||||
"""Make calls to foreman and save the output in a cache"""
|
||||
use_inventory_report = self._use_inventory_report()
|
||||
if use_inventory_report:
|
||||
self._update_cache_inventory(scan_only_new_hosts)
|
||||
else:
|
||||
self._update_cache_host_api(scan_only_new_hosts)
|
||||
|
||||
def _update_cache_inventory(self, scan_only_new_hosts):
|
||||
self.groups = dict()
|
||||
self.hosts = dict()
|
||||
try:
|
||||
inventory_report_response = self._post_request()
|
||||
except Exception:
|
||||
self._update_cache_host_api(scan_only_new_hosts)
|
||||
return
|
||||
host_data = json.loads(inventory_report_response)
|
||||
for host in host_data:
|
||||
if not(host) or (host["name"] in self.cache.keys() and scan_only_new_hosts):
|
||||
continue
|
||||
dns_name = host['name']
|
||||
|
||||
host_params = host.pop('host_parameters', {})
|
||||
fact_list = host.pop('facts', {})
|
||||
content_facet_attributes = host.get('content_attributes', {}) or {}
|
||||
|
||||
# Create ansible groups for hostgroup
|
||||
group = 'host_group'
|
||||
val = host.get(group)
|
||||
if val:
|
||||
safe_key = self.to_safe('%s%s_%s' % (
|
||||
to_text(self.group_prefix),
|
||||
group,
|
||||
to_text(val).lower()
|
||||
))
|
||||
self.inventory[safe_key].append(dns_name)
|
||||
|
||||
# Create ansible groups for environment, location and organization
|
||||
for group in ['environment', 'location', 'organization']:
|
||||
val = host.get('%s' % group)
|
||||
if val:
|
||||
safe_key = self.to_safe('%s%s_%s' % (
|
||||
to_text(self.group_prefix),
|
||||
group,
|
||||
to_text(val).lower()
|
||||
))
|
||||
self.inventory[safe_key].append(dns_name)
|
||||
|
||||
for group in ['lifecycle_environment', 'content_view']:
|
||||
val = content_facet_attributes.get('%s_name' % group)
|
||||
if val:
|
||||
safe_key = self.to_safe('%s%s_%s' % (
|
||||
to_text(self.group_prefix),
|
||||
group,
|
||||
to_text(val).lower()
|
||||
))
|
||||
self.inventory[safe_key].append(dns_name)
|
||||
|
||||
params = host_params
|
||||
|
||||
# Ansible groups by parameters in host groups and Foreman host
|
||||
# attributes.
|
||||
groupby = dict()
|
||||
for k, v in params.items():
|
||||
groupby[k] = self.to_safe(to_text(v))
|
||||
|
||||
# The name of the ansible groups is given by group_patterns:
|
||||
for pattern in self.group_patterns:
|
||||
try:
|
||||
key = pattern.format(**groupby)
|
||||
self.inventory[key].append(dns_name)
|
||||
except KeyError:
|
||||
pass # Host not part of this group
|
||||
|
||||
if self.want_hostcollections:
|
||||
hostcollections = host.get('host_collections')
|
||||
|
||||
if hostcollections:
|
||||
# Create Ansible groups for host collections
|
||||
for hostcollection in hostcollections:
|
||||
safe_key = self.to_safe('%shostcollection_%s' % (self.group_prefix, hostcollection.lower()))
|
||||
self.inventory[safe_key].append(dns_name)
|
||||
|
||||
self.hostcollections[dns_name] = hostcollections
|
||||
|
||||
self.cache[dns_name] = host
|
||||
self.params[dns_name] = params
|
||||
self.facts[dns_name] = fact_list
|
||||
self.inventory['all'].append(dns_name)
|
||||
self._write_cache()
|
||||
|
||||
def _update_cache_host_api(self, scan_only_new_hosts):
|
||||
"""Make calls to foreman and save the output in a cache"""
|
||||
|
||||
self.groups = dict()
|
||||
self.hosts = dict()
|
||||
|
||||
for host in self._get_hosts():
|
||||
if host['name'] in self.cache.keys() and scan_only_new_hosts:
|
||||
continue
|
||||
dns_name = host['name']
|
||||
|
||||
host_data = self._get_host_data_by_id(host['id'])
|
||||
host_params = host_data.get('all_parameters', {})
|
||||
|
||||
# Create ansible groups for hostgroup
|
||||
group = 'hostgroup'
|
||||
val = host.get('%s_title' % group) or host.get('%s_name' % group)
|
||||
if val:
|
||||
safe_key = self.to_safe('%s%s_%s' % (
|
||||
to_text(self.group_prefix),
|
||||
group,
|
||||
to_text(val).lower()
|
||||
))
|
||||
self.inventory[safe_key].append(dns_name)
|
||||
|
||||
# Create ansible groups for environment, location and organization
|
||||
for group in ['environment', 'location', 'organization']:
|
||||
val = host.get('%s_name' % group)
|
||||
if val:
|
||||
safe_key = self.to_safe('%s%s_%s' % (
|
||||
to_text(self.group_prefix),
|
||||
group,
|
||||
to_text(val).lower()
|
||||
))
|
||||
self.inventory[safe_key].append(dns_name)
|
||||
|
||||
for group in ['lifecycle_environment', 'content_view']:
|
||||
val = host.get('content_facet_attributes', {}).get('%s_name' % group)
|
||||
if val:
|
||||
safe_key = self.to_safe('%s%s_%s' % (
|
||||
to_text(self.group_prefix),
|
||||
group,
|
||||
to_text(val).lower()
|
||||
))
|
||||
self.inventory[safe_key].append(dns_name)
|
||||
|
||||
params = self._resolve_params(host_params)
|
||||
|
||||
# Ansible groups by parameters in host groups and Foreman host
|
||||
# attributes.
|
||||
groupby = dict()
|
||||
for k, v in params.items():
|
||||
groupby[k] = self.to_safe(to_text(v))
|
||||
|
||||
# The name of the ansible groups is given by group_patterns:
|
||||
for pattern in self.group_patterns:
|
||||
try:
|
||||
key = pattern.format(**groupby)
|
||||
self.inventory[key].append(dns_name)
|
||||
except KeyError:
|
||||
pass # Host not part of this group
|
||||
|
||||
if self.want_hostcollections:
|
||||
hostcollections = host_data.get('host_collections')
|
||||
|
||||
if hostcollections:
|
||||
# Create Ansible groups for host collections
|
||||
for hostcollection in hostcollections:
|
||||
safe_key = self.to_safe('%shostcollection_%s' % (self.group_prefix, hostcollection['name'].lower()))
|
||||
self.inventory[safe_key].append(dns_name)
|
||||
|
||||
self.hostcollections[dns_name] = hostcollections
|
||||
|
||||
self.cache[dns_name] = host
|
||||
self.params[dns_name] = params
|
||||
self.facts[dns_name] = self._get_facts(host)
|
||||
self.inventory['all'].append(dns_name)
|
||||
self._write_cache()
|
||||
|
||||
def is_cache_valid(self):
|
||||
"""Determines if the cache is still valid"""
|
||||
if os.path.isfile(self.cache_path_cache):
|
||||
mod_time = os.path.getmtime(self.cache_path_cache)
|
||||
current_time = time()
|
||||
if (mod_time + self.cache_max_age) > current_time:
|
||||
if (os.path.isfile(self.cache_path_inventory) and
|
||||
os.path.isfile(self.cache_path_params) and
|
||||
os.path.isfile(self.cache_path_facts)):
|
||||
return True
|
||||
return False
|
||||
|
||||
def load_inventory_from_cache(self):
|
||||
"""Read the index from the cache file sets self.index"""
|
||||
|
||||
with open(self.cache_path_inventory, 'r') as fp:
|
||||
self.inventory = json.load(fp)
|
||||
|
||||
def load_params_from_cache(self):
|
||||
"""Read the index from the cache file sets self.index"""
|
||||
|
||||
with open(self.cache_path_params, 'r') as fp:
|
||||
self.params = json.load(fp)
|
||||
|
||||
def load_facts_from_cache(self):
|
||||
"""Read the index from the cache file sets self.facts"""
|
||||
|
||||
if not self.want_facts:
|
||||
return
|
||||
with open(self.cache_path_facts, 'r') as fp:
|
||||
self.facts = json.load(fp)
|
||||
|
||||
def load_hostcollections_from_cache(self):
|
||||
"""Read the index from the cache file sets self.hostcollections"""
|
||||
|
||||
if not self.want_hostcollections:
|
||||
return
|
||||
with open(self.cache_path_hostcollections, 'r') as fp:
|
||||
self.hostcollections = json.load(fp)
|
||||
|
||||
def load_cache_from_cache(self):
|
||||
"""Read the cache from the cache file sets self.cache"""
|
||||
|
||||
with open(self.cache_path_cache, 'r') as fp:
|
||||
self.cache = json.load(fp)
|
||||
|
||||
def get_inventory(self):
|
||||
if self.args.refresh_cache or not self.is_cache_valid():
|
||||
self.update_cache()
|
||||
else:
|
||||
self.load_inventory_from_cache()
|
||||
self.load_params_from_cache()
|
||||
self.load_facts_from_cache()
|
||||
self.load_hostcollections_from_cache()
|
||||
self.load_cache_from_cache()
|
||||
if self.scan_new_hosts:
|
||||
self.update_cache(True)
|
||||
|
||||
def get_host_info(self):
|
||||
"""Get variables about a specific host"""
|
||||
|
||||
if not self.cache or len(self.cache) == 0:
|
||||
# Need to load index from cache
|
||||
self.load_cache_from_cache()
|
||||
|
||||
if self.args.host not in self.cache:
|
||||
# try updating the cache
|
||||
self.update_cache()
|
||||
|
||||
if self.args.host not in self.cache:
|
||||
# host might not exist anymore
|
||||
return json_format_dict({}, True)
|
||||
|
||||
return json_format_dict(self.cache[self.args.host], True)
|
||||
|
||||
def _print_data(self):
|
||||
data_to_print = ""
|
||||
if self.args.host:
|
||||
data_to_print += self.get_host_info()
|
||||
else:
|
||||
self.inventory['_meta'] = {'hostvars': {}}
|
||||
for hostname in self.cache:
|
||||
self.inventory['_meta']['hostvars'][hostname] = {
|
||||
'foreman': self.cache[hostname],
|
||||
'foreman_params': self.params[hostname],
|
||||
}
|
||||
if self.want_ansible_ssh_host and 'ip' in self.cache[hostname]:
|
||||
self.inventory['_meta']['hostvars'][hostname]['ansible_ssh_host'] = self.cache[hostname]['ip']
|
||||
if self.want_facts:
|
||||
self.inventory['_meta']['hostvars'][hostname]['foreman_facts'] = self.facts[hostname]
|
||||
|
||||
data_to_print += json_format_dict(self.inventory, True)
|
||||
|
||||
print(data_to_print)
|
||||
|
||||
def run(self):
|
||||
# Read settings and parse CLI arguments
|
||||
if not self.read_settings():
|
||||
return False
|
||||
self.parse_cli_args()
|
||||
self.get_inventory()
|
||||
self._print_data()
|
||||
return True
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(not ForemanInventory().run())
|
@ -1,508 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2013 Google Inc.
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
GCE external inventory script
|
||||
=================================
|
||||
|
||||
Generates inventory that Ansible can understand by making API requests
|
||||
Google Compute Engine via the libcloud library. Full install/configuration
|
||||
instructions for the gce* modules can be found in the comments of
|
||||
ansible/test/gce_tests.py.
|
||||
|
||||
When run against a specific host, this script returns the following variables
|
||||
based on the data obtained from the libcloud Node object:
|
||||
- gce_uuid
|
||||
- gce_id
|
||||
- gce_image
|
||||
- gce_machine_type
|
||||
- gce_private_ip
|
||||
- gce_public_ip
|
||||
- gce_name
|
||||
- gce_description
|
||||
- gce_status
|
||||
- gce_zone
|
||||
- gce_tags
|
||||
- gce_metadata
|
||||
- gce_network
|
||||
- gce_subnetwork
|
||||
|
||||
When run in --list mode, instances are grouped by the following categories:
|
||||
- zone:
|
||||
zone group name examples are us-central1-b, europe-west1-a, etc.
|
||||
- instance tags:
|
||||
An entry is created for each tag. For example, if you have two instances
|
||||
with a common tag called 'foo', they will both be grouped together under
|
||||
the 'tag_foo' name.
|
||||
- network name:
|
||||
the name of the network is appended to 'network_' (e.g. the 'default'
|
||||
network will result in a group named 'network_default')
|
||||
- machine type
|
||||
types follow a pattern like n1-standard-4, g1-small, etc.
|
||||
- running status:
|
||||
group name prefixed with 'status_' (e.g. status_running, status_stopped,..)
|
||||
- image:
|
||||
when using an ephemeral/scratch disk, this will be set to the image name
|
||||
used when creating the instance (e.g. debian-7-wheezy-v20130816). when
|
||||
your instance was created with a root persistent disk it will be set to
|
||||
'persistent_disk' since there is no current way to determine the image.
|
||||
|
||||
Examples:
|
||||
Execute uname on all instances in the us-central1-a zone
|
||||
$ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
|
||||
|
||||
Use the GCE inventory script to print out instance specific information
|
||||
$ contrib/inventory/gce.py --host my_instance
|
||||
|
||||
Author: Eric Johnson <erjohnso@google.com>
|
||||
Contributors: Matt Hite <mhite@hotmail.com>, Tom Melendez <supertom@google.com>
|
||||
Version: 0.0.3
|
||||
'''
|
||||
|
||||
try:
|
||||
import pkg_resources
|
||||
except ImportError:
|
||||
# Use pkg_resources to find the correct versions of libraries and set
|
||||
# sys.path appropriately when there are multiversion installs. We don't
|
||||
# fail here as there is code that better expresses the errors where the
|
||||
# library is used.
|
||||
pass
|
||||
|
||||
USER_AGENT_PRODUCT = "Ansible-gce_inventory_plugin"
|
||||
USER_AGENT_VERSION = "v2"
|
||||
|
||||
import sys
|
||||
import os
|
||||
import argparse
|
||||
|
||||
from time import time
|
||||
|
||||
if sys.version_info >= (3, 0):
|
||||
import configparser
|
||||
else:
|
||||
import ConfigParser as configparser
|
||||
|
||||
import logging
|
||||
logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler())
|
||||
|
||||
try:
|
||||
import json
|
||||
except ImportError:
|
||||
import simplejson as json
|
||||
|
||||
try:
|
||||
from libcloud.compute.types import Provider
|
||||
from libcloud.compute.providers import get_driver
|
||||
_ = Provider.GCE
|
||||
except:
|
||||
sys.exit("GCE inventory script requires libcloud >= 0.13")
|
||||
|
||||
|
||||
class CloudInventoryCache(object):
|
||||
def __init__(self, cache_name='ansible-cloud-cache', cache_path='/tmp',
|
||||
cache_max_age=300):
|
||||
cache_dir = os.path.expanduser(cache_path)
|
||||
if not os.path.exists(cache_dir):
|
||||
os.makedirs(cache_dir)
|
||||
self.cache_path_cache = os.path.join(cache_dir, cache_name)
|
||||
|
||||
self.cache_max_age = cache_max_age
|
||||
|
||||
def is_valid(self, max_age=None):
|
||||
''' Determines if the cache files have expired, or if it is still valid '''
|
||||
|
||||
if max_age is None:
|
||||
max_age = self.cache_max_age
|
||||
|
||||
if os.path.isfile(self.cache_path_cache):
|
||||
mod_time = os.path.getmtime(self.cache_path_cache)
|
||||
current_time = time()
|
||||
if (mod_time + max_age) > current_time:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def get_all_data_from_cache(self, filename=''):
|
||||
''' Reads the JSON inventory from the cache file. Returns Python dictionary. '''
|
||||
|
||||
data = ''
|
||||
if not filename:
|
||||
filename = self.cache_path_cache
|
||||
with open(filename, 'r') as cache:
|
||||
data = cache.read()
|
||||
return json.loads(data)
|
||||
|
||||
def write_to_cache(self, data, filename=''):
|
||||
''' Writes data to file as JSON. Returns True. '''
|
||||
if not filename:
|
||||
filename = self.cache_path_cache
|
||||
json_data = json.dumps(data)
|
||||
with open(filename, 'w') as cache:
|
||||
cache.write(json_data)
|
||||
return True
|
||||
|
||||
|
||||
class GceInventory(object):
|
||||
def __init__(self):
|
||||
# Cache object
|
||||
self.cache = None
|
||||
# dictionary containing inventory read from disk
|
||||
self.inventory = {}
|
||||
|
||||
# Read settings and parse CLI arguments
|
||||
self.parse_cli_args()
|
||||
self.config = self.get_config()
|
||||
self.driver = self.get_gce_driver()
|
||||
self.ip_type = self.get_inventory_options()
|
||||
if self.ip_type:
|
||||
self.ip_type = self.ip_type.lower()
|
||||
|
||||
# Cache management
|
||||
start_inventory_time = time()
|
||||
cache_used = False
|
||||
if self.args.refresh_cache or not self.cache.is_valid():
|
||||
self.do_api_calls_update_cache()
|
||||
else:
|
||||
self.load_inventory_from_cache()
|
||||
cache_used = True
|
||||
self.inventory['_meta']['stats'] = {'use_cache': True}
|
||||
self.inventory['_meta']['stats'] = {
|
||||
'inventory_load_time': time() - start_inventory_time,
|
||||
'cache_used': cache_used
|
||||
}
|
||||
|
||||
# Just display data for specific host
|
||||
if self.args.host:
|
||||
print(self.json_format_dict(
|
||||
self.inventory['_meta']['hostvars'][self.args.host],
|
||||
pretty=self.args.pretty))
|
||||
else:
|
||||
# Otherwise, assume user wants all instances grouped
|
||||
zones = self.parse_env_zones()
|
||||
print(self.json_format_dict(self.inventory,
|
||||
pretty=self.args.pretty))
|
||||
sys.exit(0)
|
||||
|
||||
def get_config(self):
|
||||
"""
|
||||
Reads the settings from the gce.ini file.
|
||||
|
||||
Populates a SafeConfigParser object with defaults and
|
||||
attempts to read an .ini-style configuration from the filename
|
||||
specified in GCE_INI_PATH. If the environment variable is
|
||||
not present, the filename defaults to gce.ini in the current
|
||||
working directory.
|
||||
"""
|
||||
gce_ini_default_path = os.path.join(
|
||||
os.path.dirname(os.path.realpath(__file__)), "gce.ini")
|
||||
gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
|
||||
|
||||
# Create a ConfigParser.
|
||||
# This provides empty defaults to each key, so that environment
|
||||
# variable configuration (as opposed to INI configuration) is able
|
||||
# to work.
|
||||
config = configparser.SafeConfigParser(defaults={
|
||||
'gce_service_account_email_address': '',
|
||||
'gce_service_account_pem_file_path': '',
|
||||
'gce_project_id': '',
|
||||
'gce_zone': '',
|
||||
'libcloud_secrets': '',
|
||||
'inventory_ip_type': '',
|
||||
'cache_path': '~/.ansible/tmp',
|
||||
'cache_max_age': '300'
|
||||
})
|
||||
if 'gce' not in config.sections():
|
||||
config.add_section('gce')
|
||||
if 'inventory' not in config.sections():
|
||||
config.add_section('inventory')
|
||||
if 'cache' not in config.sections():
|
||||
config.add_section('cache')
|
||||
|
||||
config.read(gce_ini_path)
|
||||
|
||||
#########
|
||||
# Section added for processing ini settings
|
||||
#########
|
||||
|
||||
# Set the instance_states filter based on config file options
|
||||
self.instance_states = []
|
||||
if config.has_option('gce', 'instance_states'):
|
||||
states = config.get('gce', 'instance_states')
|
||||
# Ignore if instance_states is an empty string.
|
||||
if states:
|
||||
self.instance_states = states.split(',')
|
||||
|
||||
# Caching
|
||||
cache_path = config.get('cache', 'cache_path')
|
||||
cache_max_age = config.getint('cache', 'cache_max_age')
|
||||
# TOOD(supertom): support project-specific caches
|
||||
cache_name = 'ansible-gce.cache'
|
||||
self.cache = CloudInventoryCache(cache_path=cache_path,
|
||||
cache_max_age=cache_max_age,
|
||||
cache_name=cache_name)
|
||||
return config
|
||||
|
||||
def get_inventory_options(self):
|
||||
"""Determine inventory options. Environment variables always
|
||||
take precedence over configuration files."""
|
||||
ip_type = self.config.get('inventory', 'inventory_ip_type')
|
||||
# If the appropriate environment variables are set, they override
|
||||
# other configuration
|
||||
ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type)
|
||||
return ip_type
|
||||
|
||||
def get_gce_driver(self):
|
||||
"""Determine the GCE authorization settings and return a
|
||||
libcloud driver.
|
||||
"""
|
||||
# Attempt to get GCE params from a configuration file, if one
|
||||
# exists.
|
||||
secrets_path = self.config.get('gce', 'libcloud_secrets')
|
||||
secrets_found = False
|
||||
|
||||
try:
|
||||
import secrets
|
||||
args = list(secrets.GCE_PARAMS)
|
||||
kwargs = secrets.GCE_KEYWORD_PARAMS
|
||||
secrets_found = True
|
||||
except:
|
||||
pass
|
||||
|
||||
if not secrets_found and secrets_path:
|
||||
if not secrets_path.endswith('secrets.py'):
|
||||
err = "Must specify libcloud secrets file as "
|
||||
err += "/absolute/path/to/secrets.py"
|
||||
sys.exit(err)
|
||||
sys.path.append(os.path.dirname(secrets_path))
|
||||
try:
|
||||
import secrets
|
||||
args = list(getattr(secrets, 'GCE_PARAMS', []))
|
||||
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
|
||||
secrets_found = True
|
||||
except:
|
||||
pass
|
||||
|
||||
if not secrets_found:
|
||||
args = [
|
||||
self.config.get('gce', 'gce_service_account_email_address'),
|
||||
self.config.get('gce', 'gce_service_account_pem_file_path')
|
||||
]
|
||||
kwargs = {'project': self.config.get('gce', 'gce_project_id'),
|
||||
'datacenter': self.config.get('gce', 'gce_zone')}
|
||||
|
||||
# If the appropriate environment variables are set, they override
|
||||
# other configuration; process those into our args and kwargs.
|
||||
args[0] = os.environ.get('GCE_EMAIL', args[0])
|
||||
args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
|
||||
args[1] = os.environ.get('GCE_CREDENTIALS_FILE_PATH', args[1])
|
||||
|
||||
kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
|
||||
kwargs['datacenter'] = os.environ.get('GCE_ZONE', kwargs['datacenter'])
|
||||
|
||||
# Retrieve and return the GCE driver.
|
||||
gce = get_driver(Provider.GCE)(*args, **kwargs)
|
||||
gce.connection.user_agent_append(
|
||||
'%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION),
|
||||
)
|
||||
return gce
|
||||
|
||||
def parse_env_zones(self):
|
||||
'''returns a list of comma separated zones parsed from the GCE_ZONE environment variable.
|
||||
If provided, this will be used to filter the results of the grouped_instances call'''
|
||||
import csv
|
||||
reader = csv.reader([os.environ.get('GCE_ZONE', "")], skipinitialspace=True)
|
||||
zones = [r for r in reader]
|
||||
return [z for z in zones[0]]
|
||||
|
||||
def parse_cli_args(self):
|
||||
''' Command line argument processing '''
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Produce an Ansible Inventory file based on GCE')
|
||||
parser.add_argument('--list', action='store_true', default=True,
|
||||
help='List instances (default: True)')
|
||||
parser.add_argument('--host', action='store',
|
||||
help='Get all information about an instance')
|
||||
parser.add_argument('--pretty', action='store_true', default=False,
|
||||
help='Pretty format (default: False)')
|
||||
parser.add_argument(
|
||||
'--refresh-cache', action='store_true', default=False,
|
||||
help='Force refresh of cache by making API requests (default: False - use cache files)')
|
||||
self.args = parser.parse_args()
|
||||
|
||||
def node_to_dict(self, inst):
|
||||
md = {}
|
||||
|
||||
if inst is None:
|
||||
return {}
|
||||
|
||||
if 'items' in inst.extra['metadata']:
|
||||
for entry in inst.extra['metadata']['items']:
|
||||
md[entry['key']] = entry['value']
|
||||
|
||||
net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
|
||||
subnet = None
|
||||
if 'subnetwork' in inst.extra['networkInterfaces'][0]:
|
||||
subnet = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1]
|
||||
# default to exernal IP unless user has specified they prefer internal
|
||||
if self.ip_type == 'internal':
|
||||
ssh_host = inst.private_ips[0]
|
||||
else:
|
||||
ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
|
||||
|
||||
return {
|
||||
'gce_uuid': inst.uuid,
|
||||
'gce_id': inst.id,
|
||||
'gce_image': inst.image,
|
||||
'gce_machine_type': inst.size,
|
||||
'gce_private_ip': inst.private_ips[0],
|
||||
'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None,
|
||||
'gce_name': inst.name,
|
||||
'gce_description': inst.extra['description'],
|
||||
'gce_status': inst.extra['status'],
|
||||
'gce_zone': inst.extra['zone'].name,
|
||||
'gce_tags': inst.extra['tags'],
|
||||
'gce_metadata': md,
|
||||
'gce_network': net,
|
||||
'gce_subnetwork': subnet,
|
||||
# Hosts don't have a public name, so we add an IP
|
||||
'ansible_ssh_host': ssh_host
|
||||
}
|
||||
|
||||
def load_inventory_from_cache(self):
|
||||
''' Loads inventory from JSON on disk. '''
|
||||
|
||||
try:
|
||||
self.inventory = self.cache.get_all_data_from_cache()
|
||||
hosts = self.inventory['_meta']['hostvars']
|
||||
except Exception as e:
|
||||
print(
|
||||
"Invalid inventory file %s. Please rebuild with -refresh-cache option."
|
||||
% (self.cache.cache_path_cache))
|
||||
raise
|
||||
|
||||
def do_api_calls_update_cache(self):
|
||||
''' Do API calls and save data in cache. '''
|
||||
zones = self.parse_env_zones()
|
||||
data = self.group_instances(zones)
|
||||
self.cache.write_to_cache(data)
|
||||
self.inventory = data
|
||||
|
||||
def list_nodes(self):
|
||||
all_nodes = []
|
||||
params, more_results = {'maxResults': 500}, True
|
||||
while more_results:
|
||||
self.driver.connection.gce_params = params
|
||||
all_nodes.extend(self.driver.list_nodes())
|
||||
more_results = 'pageToken' in params
|
||||
return all_nodes
|
||||
|
||||
def group_instances(self, zones=None):
|
||||
'''Group all instances'''
|
||||
groups = {}
|
||||
meta = {}
|
||||
meta["hostvars"] = {}
|
||||
|
||||
for node in self.list_nodes():
|
||||
|
||||
# This check filters on the desired instance states defined in the
|
||||
# config file with the instance_states config option.
|
||||
#
|
||||
# If the instance_states list is _empty_ then _ALL_ states are returned.
|
||||
#
|
||||
# If the instance_states list is _populated_ then check the current
|
||||
# state against the instance_states list
|
||||
if self.instance_states and not node.extra['status'] in self.instance_states:
|
||||
continue
|
||||
|
||||
name = node.name
|
||||
|
||||
meta["hostvars"][name] = self.node_to_dict(node)
|
||||
|
||||
zone = node.extra['zone'].name
|
||||
|
||||
# To avoid making multiple requests per zone
|
||||
# we list all nodes and then filter the results
|
||||
if zones and zone not in zones:
|
||||
continue
|
||||
|
||||
if zone in groups:
|
||||
groups[zone].append(name)
|
||||
else:
|
||||
groups[zone] = [name]
|
||||
|
||||
tags = node.extra['tags']
|
||||
for t in tags:
|
||||
if t.startswith('group-'):
|
||||
tag = t[6:]
|
||||
else:
|
||||
tag = 'tag_%s' % t
|
||||
if tag in groups:
|
||||
groups[tag].append(name)
|
||||
else:
|
||||
groups[tag] = [name]
|
||||
|
||||
net = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
|
||||
net = 'network_%s' % net
|
||||
if net in groups:
|
||||
groups[net].append(name)
|
||||
else:
|
||||
groups[net] = [name]
|
||||
|
||||
machine_type = node.size
|
||||
if machine_type in groups:
|
||||
groups[machine_type].append(name)
|
||||
else:
|
||||
groups[machine_type] = [name]
|
||||
|
||||
image = node.image and node.image or 'persistent_disk'
|
||||
if image in groups:
|
||||
groups[image].append(name)
|
||||
else:
|
||||
groups[image] = [name]
|
||||
|
||||
status = node.extra['status']
|
||||
stat = 'status_%s' % status.lower()
|
||||
if stat in groups:
|
||||
groups[stat].append(name)
|
||||
else:
|
||||
groups[stat] = [name]
|
||||
|
||||
for private_ip in node.private_ips:
|
||||
groups[private_ip] = [name]
|
||||
|
||||
if len(node.public_ips) >= 1:
|
||||
for public_ip in node.public_ips:
|
||||
groups[public_ip] = [name]
|
||||
|
||||
groups["_meta"] = meta
|
||||
|
||||
return groups
|
||||
|
||||
def json_format_dict(self, data, pretty=False):
|
||||
''' Converts a dict to a JSON object and dumps it as a formatted
|
||||
string '''
|
||||
|
||||
if pretty:
|
||||
return json.dumps(data, sort_keys=True, indent=2)
|
||||
else:
|
||||
return json.dumps(data)
|
||||
|
||||
# Run the script
|
||||
if __name__ == '__main__':
|
||||
GceInventory()
|
@ -1,25 +0,0 @@
|
||||
---
|
||||
clouds:
|
||||
vexxhost:
|
||||
profile: vexxhost
|
||||
auth:
|
||||
project_name: 39e296b2-fc96-42bf-8091-cb742fa13da9
|
||||
username: fb886a9b-c37b-442a-9be3-964bed961e04
|
||||
password: fantastic-password1
|
||||
rax:
|
||||
cloud: rackspace
|
||||
auth:
|
||||
username: example
|
||||
password: spectacular-password
|
||||
project_id: 2352426
|
||||
region_name: DFW,ORD,IAD
|
||||
devstack:
|
||||
auth:
|
||||
auth_url: https://devstack.example.com
|
||||
username: stack
|
||||
password: stack
|
||||
project_name: stack
|
||||
ansible:
|
||||
use_hostnames: true
|
||||
expand_hostvars: false
|
||||
fail_on_errors: true
|
@ -1,272 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2012, Marco Vito Moscaritolo <marco@agavee.com>
|
||||
# Copyright (c) 2013, Jesse Keating <jesse.keating@rackspace.com>
|
||||
# Copyright (c) 2015, Hewlett-Packard Development Company, L.P.
|
||||
# Copyright (c) 2016, Rackspace Australia
|
||||
#
|
||||
# This module is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This software is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this software. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# The OpenStack Inventory module uses os-client-config for configuration.
|
||||
# https://github.com/openstack/os-client-config
|
||||
# This means it will either:
|
||||
# - Respect normal OS_* environment variables like other OpenStack tools
|
||||
# - Read values from a clouds.yaml file.
|
||||
# If you want to configure via clouds.yaml, you can put the file in:
|
||||
# - Current directory
|
||||
# - ~/.config/openstack/clouds.yaml
|
||||
# - /etc/openstack/clouds.yaml
|
||||
# - /etc/ansible/openstack.yml
|
||||
# The clouds.yaml file can contain entries for multiple clouds and multiple
|
||||
# regions of those clouds. If it does, this inventory module will by default
|
||||
# connect to all of them and present them as one contiguous inventory. You
|
||||
# can limit to one cloud by passing the `--cloud` parameter, or use the
|
||||
# OS_CLOUD environment variable. If caching is enabled, and a cloud is
|
||||
# selected, then per-cloud cache folders will be used.
|
||||
#
|
||||
# See the adjacent openstack.yml file for an example config file
|
||||
# There are two ansible inventory specific options that can be set in
|
||||
# the inventory section.
|
||||
# expand_hostvars controls whether or not the inventory will make extra API
|
||||
# calls to fill out additional information about each server
|
||||
# use_hostnames changes the behavior from registering every host with its UUID
|
||||
# and making a group of its hostname to only doing this if the
|
||||
# hostname in question has more than one server
|
||||
# fail_on_errors causes the inventory to fail and return no hosts if one cloud
|
||||
# has failed (for example, bad credentials or being offline).
|
||||
# When set to False, the inventory will return hosts from
|
||||
# whichever other clouds it can contact. (Default: True)
|
||||
#
|
||||
# Also it is possible to pass the correct user by setting an ansible_user: $myuser
|
||||
# metadata attribute.
|
||||
|
||||
import argparse
|
||||
import collections
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from distutils.version import StrictVersion
|
||||
from io import StringIO
|
||||
|
||||
import json
|
||||
|
||||
import openstack as sdk
|
||||
from openstack.cloud import inventory as sdk_inventory
|
||||
from openstack.config import loader as cloud_config
|
||||
|
||||
CONFIG_FILES = ['/etc/ansible/openstack.yaml', '/etc/ansible/openstack.yml']
|
||||
|
||||
|
||||
def get_groups_from_server(server_vars, namegroup=True):
|
||||
groups = []
|
||||
|
||||
region = server_vars['region']
|
||||
cloud = server_vars['cloud']
|
||||
metadata = server_vars.get('metadata', {})
|
||||
|
||||
# Create a group for the cloud
|
||||
groups.append(cloud)
|
||||
|
||||
# Create a group on region
|
||||
if region:
|
||||
groups.append(region)
|
||||
|
||||
# And one by cloud_region
|
||||
groups.append("%s_%s" % (cloud, region))
|
||||
|
||||
# Check if group metadata key in servers' metadata
|
||||
if 'group' in metadata:
|
||||
groups.append(metadata['group'])
|
||||
|
||||
for extra_group in metadata.get('groups', '').split(','):
|
||||
if extra_group:
|
||||
groups.append(extra_group.strip())
|
||||
|
||||
groups.append('instance-%s' % server_vars['id'])
|
||||
if namegroup:
|
||||
groups.append(server_vars['name'])
|
||||
|
||||
for key in ('flavor', 'image'):
|
||||
if 'name' in server_vars[key]:
|
||||
groups.append('%s-%s' % (key, server_vars[key]['name']))
|
||||
|
||||
for key, value in iter(metadata.items()):
|
||||
groups.append('meta-%s_%s' % (key, value))
|
||||
|
||||
az = server_vars.get('az', None)
|
||||
if az:
|
||||
# Make groups for az, region_az and cloud_region_az
|
||||
groups.append(az)
|
||||
groups.append('%s_%s' % (region, az))
|
||||
groups.append('%s_%s_%s' % (cloud, region, az))
|
||||
return groups
|
||||
|
||||
|
||||
def get_host_groups(inventory, refresh=False, cloud=None):
|
||||
(cache_file, cache_expiration_time) = get_cache_settings(cloud)
|
||||
if is_cache_stale(cache_file, cache_expiration_time, refresh=refresh):
|
||||
groups = to_json(get_host_groups_from_cloud(inventory))
|
||||
with open(cache_file, 'w') as f:
|
||||
f.write(groups)
|
||||
else:
|
||||
with open(cache_file, 'r') as f:
|
||||
groups = f.read()
|
||||
return groups
|
||||
|
||||
|
||||
def append_hostvars(hostvars, groups, key, server, namegroup=False):
|
||||
hostvars[key] = dict(
|
||||
ansible_ssh_host=server['interface_ip'],
|
||||
ansible_host=server['interface_ip'],
|
||||
openstack=server)
|
||||
|
||||
metadata = server.get('metadata', {})
|
||||
if 'ansible_user' in metadata:
|
||||
hostvars[key]['ansible_user'] = metadata['ansible_user']
|
||||
|
||||
for group in get_groups_from_server(server, namegroup=namegroup):
|
||||
groups[group].append(key)
|
||||
|
||||
|
||||
def get_host_groups_from_cloud(inventory):
|
||||
groups = collections.defaultdict(list)
|
||||
firstpass = collections.defaultdict(list)
|
||||
hostvars = {}
|
||||
list_args = {}
|
||||
if hasattr(inventory, 'extra_config'):
|
||||
use_hostnames = inventory.extra_config['use_hostnames']
|
||||
list_args['expand'] = inventory.extra_config['expand_hostvars']
|
||||
if StrictVersion(sdk.version.__version__) >= StrictVersion("0.13.0"):
|
||||
list_args['fail_on_cloud_config'] = \
|
||||
inventory.extra_config['fail_on_errors']
|
||||
else:
|
||||
use_hostnames = False
|
||||
|
||||
for server in inventory.list_hosts(**list_args):
|
||||
|
||||
if 'interface_ip' not in server:
|
||||
continue
|
||||
firstpass[server['name']].append(server)
|
||||
for name, servers in firstpass.items():
|
||||
if len(servers) == 1 and use_hostnames:
|
||||
append_hostvars(hostvars, groups, name, servers[0])
|
||||
else:
|
||||
server_ids = set()
|
||||
# Trap for duplicate results
|
||||
for server in servers:
|
||||
server_ids.add(server['id'])
|
||||
if len(server_ids) == 1 and use_hostnames:
|
||||
append_hostvars(hostvars, groups, name, servers[0])
|
||||
else:
|
||||
for server in servers:
|
||||
append_hostvars(
|
||||
hostvars, groups, server['id'], server,
|
||||
namegroup=True)
|
||||
groups['_meta'] = {'hostvars': hostvars}
|
||||
return groups
|
||||
|
||||
|
||||
def is_cache_stale(cache_file, cache_expiration_time, refresh=False):
|
||||
''' Determines if cache file has expired, or if it is still valid '''
|
||||
if refresh:
|
||||
return True
|
||||
if os.path.isfile(cache_file) and os.path.getsize(cache_file) > 0:
|
||||
mod_time = os.path.getmtime(cache_file)
|
||||
current_time = time.time()
|
||||
if (mod_time + cache_expiration_time) > current_time:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def get_cache_settings(cloud=None):
|
||||
config_files = cloud_config.CONFIG_FILES + CONFIG_FILES
|
||||
if cloud:
|
||||
config = cloud_config.OpenStackConfig(
|
||||
config_files=config_files).get_one(cloud=cloud)
|
||||
else:
|
||||
config = cloud_config.OpenStackConfig(
|
||||
config_files=config_files).get_all()[0]
|
||||
# For inventory-wide caching
|
||||
cache_expiration_time = config.get_cache_expiration_time()
|
||||
cache_path = config.get_cache_path()
|
||||
if cloud:
|
||||
cache_path = '{0}_{1}'.format(cache_path, cloud)
|
||||
if not os.path.exists(cache_path):
|
||||
os.makedirs(cache_path)
|
||||
cache_file = os.path.join(cache_path, 'ansible-inventory.cache')
|
||||
return (cache_file, cache_expiration_time)
|
||||
|
||||
|
||||
def to_json(in_dict):
|
||||
return json.dumps(in_dict, sort_keys=True, indent=2)
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description='OpenStack Inventory Module')
|
||||
parser.add_argument('--cloud', default=os.environ.get('OS_CLOUD'),
|
||||
help='Cloud name (default: None')
|
||||
parser.add_argument('--private',
|
||||
action='store_true',
|
||||
help='Use private address for ansible host')
|
||||
parser.add_argument('--refresh', action='store_true',
|
||||
help='Refresh cached information')
|
||||
parser.add_argument('--debug', action='store_true', default=False,
|
||||
help='Enable debug output')
|
||||
group = parser.add_mutually_exclusive_group(required=True)
|
||||
group.add_argument('--list', action='store_true',
|
||||
help='List active servers')
|
||||
group.add_argument('--host', help='List details about the specific host')
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
try:
|
||||
# openstacksdk library may write to stdout, so redirect this
|
||||
sys.stdout = StringIO()
|
||||
config_files = cloud_config.CONFIG_FILES + CONFIG_FILES
|
||||
sdk.enable_logging(debug=args.debug)
|
||||
inventory_args = dict(
|
||||
refresh=args.refresh,
|
||||
config_files=config_files,
|
||||
private=args.private,
|
||||
cloud=args.cloud,
|
||||
)
|
||||
if hasattr(sdk_inventory.OpenStackInventory, 'extra_config'):
|
||||
inventory_args.update(dict(
|
||||
config_key='ansible',
|
||||
config_defaults={
|
||||
'use_hostnames': False,
|
||||
'expand_hostvars': True,
|
||||
'fail_on_errors': True,
|
||||
}
|
||||
))
|
||||
|
||||
inventory = sdk_inventory.OpenStackInventory(**inventory_args)
|
||||
|
||||
sys.stdout = sys.__stdout__
|
||||
if args.list:
|
||||
output = get_host_groups(inventory, refresh=args.refresh, cloud=args.cloud)
|
||||
elif args.host:
|
||||
output = to_json(inventory.get_host(args.host))
|
||||
print(output)
|
||||
except sdk.exceptions.OpenStackCloudException as e:
|
||||
sys.stderr.write('%s\n' % e.message)
|
||||
sys.exit(1)
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,257 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2016 Red Hat, Inc.
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
"""
|
||||
oVirt dynamic inventory script
|
||||
=================================
|
||||
|
||||
Generates dynamic inventory file for oVirt.
|
||||
|
||||
Script will return following attributes for each virtual machine:
|
||||
- id
|
||||
- name
|
||||
- host
|
||||
- cluster
|
||||
- status
|
||||
- description
|
||||
- fqdn
|
||||
- os_type
|
||||
- template
|
||||
- tags
|
||||
- statistics
|
||||
- devices
|
||||
|
||||
When run in --list mode, virtual machines are grouped by the following categories:
|
||||
- cluster
|
||||
- tag
|
||||
- status
|
||||
|
||||
Note: If there is some virtual machine which has has more tags it will be in both tag
|
||||
records.
|
||||
|
||||
Examples:
|
||||
# Execute update of system on webserver virtual machine:
|
||||
|
||||
$ ansible -i contrib/inventory/ovirt4.py webserver -m yum -a "name=* state=latest"
|
||||
|
||||
# Get webserver virtual machine information:
|
||||
|
||||
$ contrib/inventory/ovirt4.py --host webserver
|
||||
|
||||
Author: Ondra Machacek (@machacekondra)
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
|
||||
from collections import defaultdict
|
||||
|
||||
from ansible.module_utils.six.moves import configparser
|
||||
|
||||
import json
|
||||
|
||||
try:
|
||||
import ovirtsdk4 as sdk
|
||||
import ovirtsdk4.types as otypes
|
||||
except ImportError:
|
||||
print('oVirt inventory script requires ovirt-engine-sdk-python >= 4.0.0')
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def parse_args():
|
||||
"""
|
||||
Create command line parser for oVirt dynamic inventory script.
|
||||
"""
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Ansible dynamic inventory script for oVirt.',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--list',
|
||||
action='store_true',
|
||||
default=True,
|
||||
help='Get data of all virtual machines (default: True).',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--host',
|
||||
help='Get data of virtual machines running on specified host.',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--pretty',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='Pretty format (default: False).',
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def create_connection():
|
||||
"""
|
||||
Create a connection to oVirt engine API.
|
||||
"""
|
||||
# Get the path of the configuration file, by default use
|
||||
# 'ovirt.ini' file in script directory:
|
||||
default_path = os.path.join(
|
||||
os.path.dirname(os.path.realpath(__file__)),
|
||||
'ovirt.ini',
|
||||
)
|
||||
config_path = os.environ.get('OVIRT_INI_PATH', default_path)
|
||||
|
||||
# Create parser and add ovirt section if it doesn't exist:
|
||||
config = configparser.SafeConfigParser(
|
||||
defaults={
|
||||
'ovirt_url': os.environ.get('OVIRT_URL'),
|
||||
'ovirt_username': os.environ.get('OVIRT_USERNAME'),
|
||||
'ovirt_password': os.environ.get('OVIRT_PASSWORD'),
|
||||
'ovirt_ca_file': os.environ.get('OVIRT_CAFILE', ''),
|
||||
}
|
||||
)
|
||||
if not config.has_section('ovirt'):
|
||||
config.add_section('ovirt')
|
||||
config.read(config_path)
|
||||
|
||||
# Create a connection with options defined in ini file:
|
||||
return sdk.Connection(
|
||||
url=config.get('ovirt', 'ovirt_url'),
|
||||
username=config.get('ovirt', 'ovirt_username'),
|
||||
password=config.get('ovirt', 'ovirt_password', raw=True),
|
||||
ca_file=config.get('ovirt', 'ovirt_ca_file') or None,
|
||||
insecure=not config.get('ovirt', 'ovirt_ca_file'),
|
||||
)
|
||||
|
||||
|
||||
def get_dict_of_struct(connection, vm):
|
||||
"""
|
||||
Transform SDK Vm Struct type to Python dictionary.
|
||||
"""
|
||||
if vm is None:
|
||||
return dict()
|
||||
|
||||
vms_service = connection.system_service().vms_service()
|
||||
clusters_service = connection.system_service().clusters_service()
|
||||
vm_service = vms_service.vm_service(vm.id)
|
||||
devices = vm_service.reported_devices_service().list()
|
||||
tags = vm_service.tags_service().list()
|
||||
stats = vm_service.statistics_service().list()
|
||||
labels = vm_service.affinity_labels_service().list()
|
||||
groups = clusters_service.cluster_service(
|
||||
vm.cluster.id
|
||||
).affinity_groups_service().list()
|
||||
|
||||
return {
|
||||
'id': vm.id,
|
||||
'name': vm.name,
|
||||
'host': connection.follow_link(vm.host).name if vm.host else None,
|
||||
'cluster': connection.follow_link(vm.cluster).name,
|
||||
'status': str(vm.status),
|
||||
'description': vm.description,
|
||||
'fqdn': vm.fqdn,
|
||||
'os_type': vm.os.type,
|
||||
'template': connection.follow_link(vm.template).name,
|
||||
'tags': [tag.name for tag in tags],
|
||||
'affinity_labels': [label.name for label in labels],
|
||||
'affinity_groups': [
|
||||
group.name for group in groups
|
||||
if vm.name in [vm.name for vm in connection.follow_link(group.vms)]
|
||||
],
|
||||
'statistics': dict(
|
||||
(stat.name, stat.values[0].datum) for stat in stats if stat.values
|
||||
),
|
||||
'devices': dict(
|
||||
(device.name, [ip.address for ip in device.ips]) for device in devices if device.ips
|
||||
),
|
||||
'ansible_host': next((device.ips[0].address for device in devices if device.ips), None)
|
||||
}
|
||||
|
||||
|
||||
def get_data(connection, vm_name=None):
|
||||
"""
|
||||
Obtain data of `vm_name` if specified, otherwise obtain data of all vms.
|
||||
"""
|
||||
vms_service = connection.system_service().vms_service()
|
||||
clusters_service = connection.system_service().clusters_service()
|
||||
|
||||
if vm_name:
|
||||
vm = vms_service.list(search='name=%s' % vm_name) or [None]
|
||||
data = get_dict_of_struct(
|
||||
connection=connection,
|
||||
vm=vm[0],
|
||||
)
|
||||
else:
|
||||
vms = dict()
|
||||
data = defaultdict(list)
|
||||
for vm in vms_service.list():
|
||||
name = vm.name
|
||||
vm_service = vms_service.vm_service(vm.id)
|
||||
cluster_service = clusters_service.cluster_service(vm.cluster.id)
|
||||
|
||||
# Add vm to vms dict:
|
||||
vms[name] = get_dict_of_struct(connection, vm)
|
||||
|
||||
# Add vm to cluster group:
|
||||
cluster_name = connection.follow_link(vm.cluster).name
|
||||
data['cluster_%s' % cluster_name].append(name)
|
||||
|
||||
# Add vm to tag group:
|
||||
tags_service = vm_service.tags_service()
|
||||
for tag in tags_service.list():
|
||||
data['tag_%s' % tag.name].append(name)
|
||||
|
||||
# Add vm to status group:
|
||||
data['status_%s' % vm.status].append(name)
|
||||
|
||||
# Add vm to affinity group:
|
||||
for group in cluster_service.affinity_groups_service().list():
|
||||
if vm.name in [
|
||||
v.name for v in connection.follow_link(group.vms)
|
||||
]:
|
||||
data['affinity_group_%s' % group.name].append(vm.name)
|
||||
|
||||
# Add vm to affinity label group:
|
||||
affinity_labels_service = vm_service.affinity_labels_service()
|
||||
for label in affinity_labels_service.list():
|
||||
data['affinity_label_%s' % label.name].append(name)
|
||||
|
||||
data["_meta"] = {
|
||||
'hostvars': vms,
|
||||
}
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
connection = create_connection()
|
||||
|
||||
print(
|
||||
json.dumps(
|
||||
obj=get_data(
|
||||
connection=connection,
|
||||
vm_name=args.host,
|
||||
),
|
||||
sort_keys=args.pretty,
|
||||
indent=args.pretty * 2,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,145 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2016 Red Hat, Inc.
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
"""
|
||||
Ansible Tower/AWX dynamic inventory script
|
||||
==========================================
|
||||
|
||||
Generates dynamic inventory for Tower
|
||||
|
||||
Author: Matthew Jones (@matburt)
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import requests
|
||||
from requests.auth import HTTPBasicAuth
|
||||
|
||||
try:
|
||||
from urlparse import urljoin
|
||||
except ImportError:
|
||||
from urllib.parse import urljoin
|
||||
|
||||
|
||||
def parse_configuration():
|
||||
"""
|
||||
Create command line parser for oVirt dynamic inventory script.
|
||||
"""
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Ansible dynamic inventory script for Ansible Tower.',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--list',
|
||||
action='store_true',
|
||||
default=True,
|
||||
help='Return all hosts known to Tower given a particular inventory',
|
||||
)
|
||||
parser.parse_args()
|
||||
host_name = os.environ.get("TOWER_HOST", None)
|
||||
username = os.environ.get("TOWER_USERNAME", None)
|
||||
password = os.environ.get("TOWER_PASSWORD", None)
|
||||
ignore_ssl = False
|
||||
ssl_negative_var = os.environ.get("TOWER_IGNORE_SSL", None)
|
||||
if ssl_negative_var:
|
||||
ignore_ssl = ssl_negative_var.lower() in ("1", "yes", "true")
|
||||
else:
|
||||
ssl_positive_var = os.environ.get("TOWER_VERIFY_SSL", None)
|
||||
if ssl_positive_var:
|
||||
ignore_ssl = ssl_positive_var.lower() not in ('true', '1', 't', 'y', 'yes')
|
||||
inventory = os.environ.get("TOWER_INVENTORY", None)
|
||||
license_type = os.environ.get("TOWER_LICENSE_TYPE", "enterprise")
|
||||
|
||||
errors = []
|
||||
if not host_name:
|
||||
errors.append("Missing TOWER_HOST in environment")
|
||||
if not username:
|
||||
errors.append("Missing TOWER_USERNAME in environment")
|
||||
if not password:
|
||||
errors.append("Missing TOWER_PASSWORD in environment")
|
||||
if not inventory:
|
||||
errors.append("Missing TOWER_INVENTORY in environment")
|
||||
if errors:
|
||||
raise RuntimeError("\n".join(errors))
|
||||
|
||||
return dict(tower_host=host_name,
|
||||
tower_user=username,
|
||||
tower_pass=password,
|
||||
tower_inventory=inventory,
|
||||
tower_license_type=license_type,
|
||||
ignore_ssl=ignore_ssl)
|
||||
|
||||
|
||||
def read_tower_inventory(tower_host, tower_user, tower_pass, inventory, license_type, ignore_ssl=False):
|
||||
if not re.match('(?:http|https)://', tower_host):
|
||||
tower_host = "https://{}".format(tower_host)
|
||||
inventory_url = urljoin(tower_host, "/api/v2/inventories/{}/script/?hostvars=1&towervars=1&all=1".format(inventory.replace('/', '')))
|
||||
config_url = urljoin(tower_host, "/api/v2/config/")
|
||||
try:
|
||||
if license_type != "open":
|
||||
config_response = requests.get(config_url,
|
||||
auth=HTTPBasicAuth(tower_user, tower_pass),
|
||||
verify=not ignore_ssl)
|
||||
if config_response.ok:
|
||||
source_type = config_response.json()['license_info']['license_type']
|
||||
if not source_type == license_type:
|
||||
raise RuntimeError("Tower server licenses must match: source: {} local: {}".format(source_type,
|
||||
license_type))
|
||||
else:
|
||||
raise RuntimeError("Failed to validate the license of the remote Tower: {}".format(config_response))
|
||||
|
||||
response = requests.get(inventory_url,
|
||||
auth=HTTPBasicAuth(tower_user, tower_pass),
|
||||
verify=not ignore_ssl)
|
||||
if not response.ok:
|
||||
# If the GET /api/v2/inventories/N/script is not HTTP 200, print the error code
|
||||
msg = "Connection to remote host failed: {}".format(response)
|
||||
if response.text:
|
||||
msg += " with message: {}".format(response.text)
|
||||
raise RuntimeError(msg)
|
||||
try:
|
||||
# Attempt to parse JSON
|
||||
return response.json()
|
||||
except (ValueError, TypeError) as e:
|
||||
# If the JSON parse fails, print the ValueError
|
||||
raise RuntimeError("Failed to parse json from host: {}".format(e))
|
||||
except requests.ConnectionError as e:
|
||||
raise RuntimeError("Connection to remote host failed: {}".format(e))
|
||||
|
||||
|
||||
def main():
|
||||
config = parse_configuration()
|
||||
inventory_hosts = read_tower_inventory(config['tower_host'],
|
||||
config['tower_user'],
|
||||
config['tower_pass'],
|
||||
config['tower_inventory'],
|
||||
config['tower_license_type'],
|
||||
ignore_ssl=config['ignore_ssl'])
|
||||
print(
|
||||
json.dumps(
|
||||
inventory_hosts
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,793 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C): 2017, Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
# Requirements
|
||||
# - pyvmomi >= 6.0.0.2016.4
|
||||
|
||||
# TODO:
|
||||
# * more jq examples
|
||||
# * optional folder hierarchy
|
||||
|
||||
"""
|
||||
$ jq '._meta.hostvars[].config' data.json | head
|
||||
{
|
||||
"alternateguestname": "",
|
||||
"instanceuuid": "5035a5cd-b8e8-d717-e133-2d383eb0d675",
|
||||
"memoryhotaddenabled": false,
|
||||
"guestfullname": "Red Hat Enterprise Linux 7 (64-bit)",
|
||||
"changeversion": "2016-05-16T18:43:14.977925Z",
|
||||
"uuid": "4235fc97-5ddb-7a17-193b-9a3ac97dc7b4",
|
||||
"cpuhotremoveenabled": false,
|
||||
"vpmcenabled": false,
|
||||
"firmware": "bios",
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import atexit
|
||||
import datetime
|
||||
import itertools
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import ssl
|
||||
import sys
|
||||
import uuid
|
||||
from time import time
|
||||
|
||||
from jinja2 import Environment
|
||||
|
||||
from ansible.module_utils.six import integer_types, PY3
|
||||
from ansible.module_utils.six.moves import configparser
|
||||
|
||||
try:
|
||||
import argparse
|
||||
except ImportError:
|
||||
sys.exit('Error: This inventory script required "argparse" python module. Please install it or upgrade to python-2.7')
|
||||
|
||||
try:
|
||||
from pyVmomi import vim, vmodl
|
||||
from pyVim.connect import SmartConnect, Disconnect
|
||||
except ImportError:
|
||||
sys.exit("ERROR: This inventory script required 'pyVmomi' Python module, it was not able to load it")
|
||||
|
||||
|
||||
def regex_match(s, pattern):
|
||||
'''Custom filter for regex matching'''
|
||||
reg = re.compile(pattern)
|
||||
if reg.match(s):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def select_chain_match(inlist, key, pattern):
|
||||
'''Get a key from a list of dicts, squash values to a single list, then filter'''
|
||||
outlist = [x[key] for x in inlist]
|
||||
outlist = list(itertools.chain(*outlist))
|
||||
outlist = [x for x in outlist if regex_match(x, pattern)]
|
||||
return outlist
|
||||
|
||||
|
||||
class VMwareMissingHostException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class VMWareInventory(object):
|
||||
__name__ = 'VMWareInventory'
|
||||
|
||||
guest_props = False
|
||||
instances = []
|
||||
debug = False
|
||||
load_dumpfile = None
|
||||
write_dumpfile = None
|
||||
maxlevel = 1
|
||||
lowerkeys = True
|
||||
config = None
|
||||
cache_max_age = None
|
||||
cache_path_cache = None
|
||||
cache_path_index = None
|
||||
cache_dir = None
|
||||
server = None
|
||||
port = None
|
||||
username = None
|
||||
password = None
|
||||
validate_certs = True
|
||||
host_filters = []
|
||||
skip_keys = []
|
||||
groupby_patterns = []
|
||||
groupby_custom_field_excludes = []
|
||||
|
||||
safe_types = [bool, str, float, None] + list(integer_types)
|
||||
iter_types = [dict, list]
|
||||
|
||||
bad_types = ['Array', 'disabledMethod', 'declaredAlarmState']
|
||||
|
||||
vimTableMaxDepth = {
|
||||
"vim.HostSystem": 2,
|
||||
"vim.VirtualMachine": 2,
|
||||
}
|
||||
|
||||
custom_fields = {}
|
||||
|
||||
# use jinja environments to allow for custom filters
|
||||
env = Environment()
|
||||
env.filters['regex_match'] = regex_match
|
||||
env.filters['select_chain_match'] = select_chain_match
|
||||
|
||||
# translation table for attributes to fetch for known vim types
|
||||
|
||||
vimTable = {
|
||||
vim.Datastore: ['_moId', 'name'],
|
||||
vim.ResourcePool: ['_moId', 'name'],
|
||||
vim.HostSystem: ['_moId', 'name'],
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _empty_inventory():
|
||||
return {"_meta": {"hostvars": {}}}
|
||||
|
||||
def __init__(self, load=True):
|
||||
self.inventory = VMWareInventory._empty_inventory()
|
||||
|
||||
if load:
|
||||
# Read settings and parse CLI arguments
|
||||
self.parse_cli_args()
|
||||
self.read_settings()
|
||||
|
||||
# Check the cache
|
||||
cache_valid = self.is_cache_valid()
|
||||
|
||||
# Handle Cache
|
||||
if self.args.refresh_cache or not cache_valid:
|
||||
self.do_api_calls_update_cache()
|
||||
else:
|
||||
self.debugl('loading inventory from cache')
|
||||
self.inventory = self.get_inventory_from_cache()
|
||||
|
||||
def debugl(self, text):
|
||||
if self.args.debug:
|
||||
try:
|
||||
text = str(text)
|
||||
except UnicodeEncodeError:
|
||||
text = text.encode('utf-8')
|
||||
print('%s %s' % (datetime.datetime.now(), text))
|
||||
|
||||
def show(self):
|
||||
# Data to print
|
||||
self.debugl('dumping results')
|
||||
data_to_print = None
|
||||
if self.args.host:
|
||||
data_to_print = self.get_host_info(self.args.host)
|
||||
elif self.args.list:
|
||||
# Display list of instances for inventory
|
||||
data_to_print = self.inventory
|
||||
return json.dumps(data_to_print, indent=2)
|
||||
|
||||
def is_cache_valid(self):
|
||||
''' Determines if the cache files have expired, or if it is still valid '''
|
||||
|
||||
valid = False
|
||||
|
||||
if os.path.isfile(self.cache_path_cache):
|
||||
mod_time = os.path.getmtime(self.cache_path_cache)
|
||||
current_time = time()
|
||||
if (mod_time + self.cache_max_age) > current_time:
|
||||
valid = True
|
||||
|
||||
return valid
|
||||
|
||||
def do_api_calls_update_cache(self):
|
||||
''' Get instances and cache the data '''
|
||||
self.inventory = self.instances_to_inventory(self.get_instances())
|
||||
self.write_to_cache(self.inventory)
|
||||
|
||||
def write_to_cache(self, data):
|
||||
''' Dump inventory to json file '''
|
||||
with open(self.cache_path_cache, 'w') as f:
|
||||
f.write(json.dumps(data, indent=2))
|
||||
|
||||
def get_inventory_from_cache(self):
|
||||
''' Read in jsonified inventory '''
|
||||
|
||||
jdata = None
|
||||
with open(self.cache_path_cache, 'r') as f:
|
||||
jdata = f.read()
|
||||
return json.loads(jdata)
|
||||
|
||||
def read_settings(self):
|
||||
''' Reads the settings from the vmware_inventory.ini file '''
|
||||
|
||||
scriptbasename = __file__
|
||||
scriptbasename = os.path.basename(scriptbasename)
|
||||
scriptbasename = scriptbasename.replace('.py', '')
|
||||
|
||||
defaults = {'vmware': {
|
||||
'server': '',
|
||||
'port': 443,
|
||||
'username': '',
|
||||
'password': '',
|
||||
'validate_certs': True,
|
||||
'ini_path': os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename),
|
||||
'cache_name': 'ansible-vmware',
|
||||
'cache_path': '~/.ansible/tmp',
|
||||
'cache_max_age': 3600,
|
||||
'max_object_level': 1,
|
||||
'skip_keys': 'declaredalarmstate,'
|
||||
'disabledmethod,'
|
||||
'dynamicproperty,'
|
||||
'dynamictype,'
|
||||
'environmentbrowser,'
|
||||
'managedby,'
|
||||
'parent,'
|
||||
'childtype,'
|
||||
'resourceconfig',
|
||||
'alias_pattern': '{{ config.name + "_" + config.uuid }}',
|
||||
'host_pattern': '{{ guest.ipaddress }}',
|
||||
'host_filters': '{{ runtime.powerstate == "poweredOn" }}',
|
||||
'groupby_patterns': '{{ guest.guestid }},{{ "templates" if config.template else "guests"}}',
|
||||
'lower_var_keys': True,
|
||||
'custom_field_group_prefix': 'vmware_tag_',
|
||||
'groupby_custom_field_excludes': '',
|
||||
'groupby_custom_field': False}
|
||||
}
|
||||
|
||||
if PY3:
|
||||
config = configparser.ConfigParser()
|
||||
else:
|
||||
config = configparser.SafeConfigParser()
|
||||
|
||||
# where is the config?
|
||||
vmware_ini_path = os.environ.get('VMWARE_INI_PATH', defaults['vmware']['ini_path'])
|
||||
vmware_ini_path = os.path.expanduser(os.path.expandvars(vmware_ini_path))
|
||||
config.read(vmware_ini_path)
|
||||
|
||||
if 'vmware' not in config.sections():
|
||||
config.add_section('vmware')
|
||||
|
||||
# apply defaults
|
||||
for k, v in defaults['vmware'].items():
|
||||
if not config.has_option('vmware', k):
|
||||
config.set('vmware', k, str(v))
|
||||
|
||||
# where is the cache?
|
||||
self.cache_dir = os.path.expanduser(config.get('vmware', 'cache_path'))
|
||||
if self.cache_dir and not os.path.exists(self.cache_dir):
|
||||
os.makedirs(self.cache_dir)
|
||||
|
||||
# set the cache filename and max age
|
||||
cache_name = config.get('vmware', 'cache_name')
|
||||
self.cache_path_cache = self.cache_dir + "/%s.cache" % cache_name
|
||||
self.debugl('cache path is %s' % self.cache_path_cache)
|
||||
self.cache_max_age = int(config.getint('vmware', 'cache_max_age'))
|
||||
|
||||
# mark the connection info
|
||||
self.server = os.environ.get('VMWARE_SERVER', config.get('vmware', 'server'))
|
||||
self.debugl('server is %s' % self.server)
|
||||
self.port = int(os.environ.get('VMWARE_PORT', config.get('vmware', 'port')))
|
||||
self.username = os.environ.get('VMWARE_USERNAME', config.get('vmware', 'username'))
|
||||
self.debugl('username is %s' % self.username)
|
||||
self.password = os.environ.get('VMWARE_PASSWORD', config.get('vmware', 'password', raw=True))
|
||||
self.validate_certs = os.environ.get('VMWARE_VALIDATE_CERTS', config.get('vmware', 'validate_certs'))
|
||||
if self.validate_certs in ['no', 'false', 'False', False]:
|
||||
self.validate_certs = False
|
||||
|
||||
self.debugl('cert validation is %s' % self.validate_certs)
|
||||
|
||||
# behavior control
|
||||
self.maxlevel = int(config.get('vmware', 'max_object_level'))
|
||||
self.debugl('max object level is %s' % self.maxlevel)
|
||||
self.lowerkeys = config.get('vmware', 'lower_var_keys')
|
||||
if type(self.lowerkeys) != bool:
|
||||
if str(self.lowerkeys).lower() in ['yes', 'true', '1']:
|
||||
self.lowerkeys = True
|
||||
else:
|
||||
self.lowerkeys = False
|
||||
self.debugl('lower keys is %s' % self.lowerkeys)
|
||||
self.skip_keys = list(config.get('vmware', 'skip_keys').split(','))
|
||||
self.debugl('skip keys is %s' % self.skip_keys)
|
||||
temp_host_filters = list(config.get('vmware', 'host_filters').split('}},'))
|
||||
for host_filter in temp_host_filters:
|
||||
host_filter = host_filter.rstrip()
|
||||
if host_filter != "":
|
||||
if not host_filter.endswith("}}"):
|
||||
host_filter += "}}"
|
||||
self.host_filters.append(host_filter)
|
||||
self.debugl('host filters are %s' % self.host_filters)
|
||||
|
||||
temp_groupby_patterns = list(config.get('vmware', 'groupby_patterns').split('}},'))
|
||||
for groupby_pattern in temp_groupby_patterns:
|
||||
groupby_pattern = groupby_pattern.rstrip()
|
||||
if groupby_pattern != "":
|
||||
if not groupby_pattern.endswith("}}"):
|
||||
groupby_pattern += "}}"
|
||||
self.groupby_patterns.append(groupby_pattern)
|
||||
self.debugl('groupby patterns are %s' % self.groupby_patterns)
|
||||
temp_groupby_custom_field_excludes = config.get('vmware', 'groupby_custom_field_excludes')
|
||||
self.groupby_custom_field_excludes = [x.strip('"') for x in [y.strip("'") for y in temp_groupby_custom_field_excludes.split(",")]]
|
||||
self.debugl('groupby exclude strings are %s' % self.groupby_custom_field_excludes)
|
||||
|
||||
# Special feature to disable the brute force serialization of the
|
||||
# virtual machine objects. The key name for these properties does not
|
||||
# matter because the values are just items for a larger list.
|
||||
if config.has_section('properties'):
|
||||
self.guest_props = []
|
||||
for prop in config.items('properties'):
|
||||
self.guest_props.append(prop[1])
|
||||
|
||||
# save the config
|
||||
self.config = config
|
||||
|
||||
def parse_cli_args(self):
|
||||
''' Command line argument processing '''
|
||||
|
||||
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on PyVmomi')
|
||||
parser.add_argument('--debug', action='store_true', default=False,
|
||||
help='show debug info')
|
||||
parser.add_argument('--list', action='store_true', default=True,
|
||||
help='List instances (default: True)')
|
||||
parser.add_argument('--host', action='store',
|
||||
help='Get all the variables about a specific instance')
|
||||
parser.add_argument('--refresh-cache', action='store_true', default=False,
|
||||
help='Force refresh of cache by making API requests to VSphere (default: False - use cache files)')
|
||||
parser.add_argument('--max-instances', default=None, type=int,
|
||||
help='maximum number of instances to retrieve')
|
||||
self.args = parser.parse_args()
|
||||
|
||||
def get_instances(self):
|
||||
''' Get a list of vm instances with pyvmomi '''
|
||||
kwargs = {'host': self.server,
|
||||
'user': self.username,
|
||||
'pwd': self.password,
|
||||
'port': int(self.port)}
|
||||
|
||||
if self.validate_certs and hasattr(ssl, 'SSLContext'):
|
||||
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
|
||||
context.verify_mode = ssl.CERT_REQUIRED
|
||||
context.check_hostname = True
|
||||
kwargs['sslContext'] = context
|
||||
elif self.validate_certs and not hasattr(ssl, 'SSLContext'):
|
||||
sys.exit('pyVim does not support changing verification mode with python < 2.7.9. Either update '
|
||||
'python or use validate_certs=false.')
|
||||
elif not self.validate_certs and hasattr(ssl, 'SSLContext'):
|
||||
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
|
||||
context.verify_mode = ssl.CERT_NONE
|
||||
context.check_hostname = False
|
||||
kwargs['sslContext'] = context
|
||||
elif not self.validate_certs and not hasattr(ssl, 'SSLContext'):
|
||||
# Python 2.7.9 < or RHEL/CentOS 7.4 <
|
||||
pass
|
||||
|
||||
return self._get_instances(kwargs)
|
||||
|
||||
def _get_instances(self, inkwargs):
|
||||
''' Make API calls '''
|
||||
instances = []
|
||||
si = None
|
||||
try:
|
||||
si = SmartConnect(**inkwargs)
|
||||
except ssl.SSLError as connection_error:
|
||||
if '[SSL: CERTIFICATE_VERIFY_FAILED]' in str(connection_error) and self.validate_certs:
|
||||
sys.exit("Unable to connect to ESXi server due to %s, "
|
||||
"please specify validate_certs=False and try again" % connection_error)
|
||||
|
||||
except Exception as exc:
|
||||
self.debugl("Unable to connect to ESXi server due to %s" % exc)
|
||||
sys.exit("Unable to connect to ESXi server due to %s" % exc)
|
||||
|
||||
self.debugl('retrieving all instances')
|
||||
if not si:
|
||||
sys.exit("Could not connect to the specified host using specified "
|
||||
"username and password")
|
||||
atexit.register(Disconnect, si)
|
||||
content = si.RetrieveContent()
|
||||
|
||||
# Create a search container for virtualmachines
|
||||
self.debugl('creating containerview for virtualmachines')
|
||||
container = content.rootFolder
|
||||
viewType = [vim.VirtualMachine]
|
||||
recursive = True
|
||||
containerView = content.viewManager.CreateContainerView(container, viewType, recursive)
|
||||
children = containerView.view
|
||||
for child in children:
|
||||
# If requested, limit the total number of instances
|
||||
if self.args.max_instances:
|
||||
if len(instances) >= self.args.max_instances:
|
||||
break
|
||||
instances.append(child)
|
||||
self.debugl("%s total instances in container view" % len(instances))
|
||||
|
||||
if self.args.host:
|
||||
instances = [x for x in instances if x.name == self.args.host]
|
||||
|
||||
instance_tuples = []
|
||||
for instance in instances:
|
||||
if self.guest_props:
|
||||
ifacts = self.facts_from_proplist(instance)
|
||||
else:
|
||||
ifacts = self.facts_from_vobj(instance)
|
||||
instance_tuples.append((instance, ifacts))
|
||||
self.debugl('facts collected for all instances')
|
||||
|
||||
try:
|
||||
cfm = content.customFieldsManager
|
||||
if cfm is not None and cfm.field:
|
||||
for f in cfm.field:
|
||||
if not f.managedObjectType or f.managedObjectType == vim.VirtualMachine:
|
||||
self.custom_fields[f.key] = f.name
|
||||
self.debugl('%d custom fields collected' % len(self.custom_fields))
|
||||
except vmodl.RuntimeFault as exc:
|
||||
self.debugl("Unable to gather custom fields due to %s" % exc.msg)
|
||||
except IndexError as exc:
|
||||
self.debugl("Unable to gather custom fields due to %s" % exc)
|
||||
|
||||
return instance_tuples
|
||||
|
||||
def instances_to_inventory(self, instances):
|
||||
''' Convert a list of vm objects into a json compliant inventory '''
|
||||
self.debugl('re-indexing instances based on ini settings')
|
||||
inventory = VMWareInventory._empty_inventory()
|
||||
inventory['all'] = {}
|
||||
inventory['all']['hosts'] = []
|
||||
for idx, instance in enumerate(instances):
|
||||
# make a unique id for this object to avoid vmware's
|
||||
# numerous uuid's which aren't all unique.
|
||||
thisid = str(uuid.uuid4())
|
||||
idata = instance[1]
|
||||
|
||||
# Put it in the inventory
|
||||
inventory['all']['hosts'].append(thisid)
|
||||
inventory['_meta']['hostvars'][thisid] = idata.copy()
|
||||
inventory['_meta']['hostvars'][thisid]['ansible_uuid'] = thisid
|
||||
|
||||
# Make a map of the uuid to the alias the user wants
|
||||
name_mapping = self.create_template_mapping(
|
||||
inventory,
|
||||
self.config.get('vmware', 'alias_pattern')
|
||||
)
|
||||
|
||||
# Make a map of the uuid to the ssh hostname the user wants
|
||||
host_mapping = self.create_template_mapping(
|
||||
inventory,
|
||||
self.config.get('vmware', 'host_pattern')
|
||||
)
|
||||
|
||||
# Reset the inventory keys
|
||||
for k, v in name_mapping.items():
|
||||
|
||||
if not host_mapping or k not in host_mapping:
|
||||
continue
|
||||
|
||||
# set ansible_host (2.x)
|
||||
try:
|
||||
inventory['_meta']['hostvars'][k]['ansible_host'] = host_mapping[k]
|
||||
# 1.9.x backwards compliance
|
||||
inventory['_meta']['hostvars'][k]['ansible_ssh_host'] = host_mapping[k]
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
if k == v:
|
||||
continue
|
||||
|
||||
# add new key
|
||||
inventory['all']['hosts'].append(v)
|
||||
inventory['_meta']['hostvars'][v] = inventory['_meta']['hostvars'][k]
|
||||
|
||||
# cleanup old key
|
||||
inventory['all']['hosts'].remove(k)
|
||||
inventory['_meta']['hostvars'].pop(k, None)
|
||||
|
||||
self.debugl('pre-filtered hosts:')
|
||||
for i in inventory['all']['hosts']:
|
||||
self.debugl(' * %s' % i)
|
||||
# Apply host filters
|
||||
for hf in self.host_filters:
|
||||
if not hf:
|
||||
continue
|
||||
self.debugl('filter: %s' % hf)
|
||||
filter_map = self.create_template_mapping(inventory, hf, dtype='boolean')
|
||||
for k, v in filter_map.items():
|
||||
if not v:
|
||||
# delete this host
|
||||
inventory['all']['hosts'].remove(k)
|
||||
inventory['_meta']['hostvars'].pop(k, None)
|
||||
|
||||
self.debugl('post-filter hosts:')
|
||||
for i in inventory['all']['hosts']:
|
||||
self.debugl(' * %s' % i)
|
||||
|
||||
# Create groups
|
||||
for gbp in self.groupby_patterns:
|
||||
groupby_map = self.create_template_mapping(inventory, gbp)
|
||||
for k, v in groupby_map.items():
|
||||
if v not in inventory:
|
||||
inventory[v] = {}
|
||||
inventory[v]['hosts'] = []
|
||||
if k not in inventory[v]['hosts']:
|
||||
inventory[v]['hosts'].append(k)
|
||||
|
||||
if self.config.get('vmware', 'groupby_custom_field'):
|
||||
for k, v in inventory['_meta']['hostvars'].items():
|
||||
if 'customvalue' in v:
|
||||
for tv in v['customvalue']:
|
||||
newkey = None
|
||||
field_name = self.custom_fields[tv['key']] if tv['key'] in self.custom_fields else tv['key']
|
||||
if field_name in self.groupby_custom_field_excludes:
|
||||
continue
|
||||
values = []
|
||||
keylist = map(lambda x: x.strip(), tv['value'].split(','))
|
||||
for kl in keylist:
|
||||
try:
|
||||
newkey = "%s%s_%s" % (self.config.get('vmware', 'custom_field_group_prefix'), str(field_name), kl)
|
||||
newkey = newkey.strip()
|
||||
except Exception as e:
|
||||
self.debugl(e)
|
||||
values.append(newkey)
|
||||
for tag in values:
|
||||
if not tag:
|
||||
continue
|
||||
if tag not in inventory:
|
||||
inventory[tag] = {}
|
||||
inventory[tag]['hosts'] = []
|
||||
if k not in inventory[tag]['hosts']:
|
||||
inventory[tag]['hosts'].append(k)
|
||||
|
||||
return inventory
|
||||
|
||||
def create_template_mapping(self, inventory, pattern, dtype='string'):
|
||||
''' Return a hash of uuid to templated string from pattern '''
|
||||
mapping = {}
|
||||
for k, v in inventory['_meta']['hostvars'].items():
|
||||
t = self.env.from_string(pattern)
|
||||
newkey = None
|
||||
try:
|
||||
newkey = t.render(v)
|
||||
newkey = newkey.strip()
|
||||
except Exception as e:
|
||||
self.debugl(e)
|
||||
if not newkey:
|
||||
continue
|
||||
elif dtype == 'integer':
|
||||
newkey = int(newkey)
|
||||
elif dtype == 'boolean':
|
||||
if newkey.lower() == 'false':
|
||||
newkey = False
|
||||
elif newkey.lower() == 'true':
|
||||
newkey = True
|
||||
elif dtype == 'string':
|
||||
pass
|
||||
mapping[k] = newkey
|
||||
return mapping
|
||||
|
||||
def facts_from_proplist(self, vm):
|
||||
'''Get specific properties instead of serializing everything'''
|
||||
|
||||
rdata = {}
|
||||
for prop in self.guest_props:
|
||||
self.debugl('getting %s property for %s' % (prop, vm.name))
|
||||
key = prop
|
||||
if self.lowerkeys:
|
||||
key = key.lower()
|
||||
|
||||
if '.' not in prop:
|
||||
# props without periods are direct attributes of the parent
|
||||
vm_property = getattr(vm, prop)
|
||||
if isinstance(vm_property, vim.CustomFieldsManager.Value.Array):
|
||||
temp_vm_property = []
|
||||
for vm_prop in vm_property:
|
||||
temp_vm_property.append({'key': vm_prop.key,
|
||||
'value': vm_prop.value})
|
||||
rdata[key] = temp_vm_property
|
||||
else:
|
||||
rdata[key] = vm_property
|
||||
else:
|
||||
# props with periods are subkeys of parent attributes
|
||||
parts = prop.split('.')
|
||||
total = len(parts) - 1
|
||||
|
||||
# pointer to the current object
|
||||
val = None
|
||||
# pointer to the current result key
|
||||
lastref = rdata
|
||||
|
||||
for idx, x in enumerate(parts):
|
||||
|
||||
if isinstance(val, dict):
|
||||
if x in val:
|
||||
val = val.get(x)
|
||||
elif x.lower() in val:
|
||||
val = val.get(x.lower())
|
||||
else:
|
||||
# if the val wasn't set yet, get it from the parent
|
||||
if not val:
|
||||
try:
|
||||
val = getattr(vm, x)
|
||||
except AttributeError as e:
|
||||
self.debugl(e)
|
||||
else:
|
||||
# in a subkey, get the subprop from the previous attrib
|
||||
try:
|
||||
val = getattr(val, x)
|
||||
except AttributeError as e:
|
||||
self.debugl(e)
|
||||
|
||||
# make sure it serializes
|
||||
val = self._process_object_types(val)
|
||||
|
||||
# lowercase keys if requested
|
||||
if self.lowerkeys:
|
||||
x = x.lower()
|
||||
|
||||
# change the pointer or set the final value
|
||||
if idx != total:
|
||||
if x not in lastref:
|
||||
lastref[x] = {}
|
||||
lastref = lastref[x]
|
||||
else:
|
||||
lastref[x] = val
|
||||
if self.args.debug:
|
||||
self.debugl("For %s" % vm.name)
|
||||
for key in list(rdata.keys()):
|
||||
if isinstance(rdata[key], dict):
|
||||
for ikey in list(rdata[key].keys()):
|
||||
self.debugl("Property '%s.%s' has value '%s'" % (key, ikey, rdata[key][ikey]))
|
||||
else:
|
||||
self.debugl("Property '%s' has value '%s'" % (key, rdata[key]))
|
||||
return rdata
|
||||
|
||||
def facts_from_vobj(self, vobj, level=0):
|
||||
''' Traverse a VM object and return a json compliant data structure '''
|
||||
|
||||
# pyvmomi objects are not yet serializable, but may be one day ...
|
||||
# https://github.com/vmware/pyvmomi/issues/21
|
||||
|
||||
# WARNING:
|
||||
# Accessing an object attribute will trigger a SOAP call to the remote.
|
||||
# Increasing the attributes collected or the depth of recursion greatly
|
||||
# increases runtime duration and potentially memory+network utilization.
|
||||
|
||||
if level == 0:
|
||||
try:
|
||||
self.debugl("get facts for %s" % vobj.name)
|
||||
except Exception as e:
|
||||
self.debugl(e)
|
||||
|
||||
rdata = {}
|
||||
|
||||
methods = dir(vobj)
|
||||
methods = [str(x) for x in methods if not x.startswith('_')]
|
||||
methods = [x for x in methods if x not in self.bad_types]
|
||||
methods = [x for x in methods if not x.lower() in self.skip_keys]
|
||||
methods = sorted(methods)
|
||||
|
||||
for method in methods:
|
||||
# Attempt to get the method, skip on fail
|
||||
try:
|
||||
methodToCall = getattr(vobj, method)
|
||||
except Exception as e:
|
||||
continue
|
||||
|
||||
# Skip callable methods
|
||||
if callable(methodToCall):
|
||||
continue
|
||||
|
||||
if self.lowerkeys:
|
||||
method = method.lower()
|
||||
|
||||
rdata[method] = self._process_object_types(
|
||||
methodToCall,
|
||||
thisvm=vobj,
|
||||
inkey=method,
|
||||
)
|
||||
|
||||
return rdata
|
||||
|
||||
def _process_object_types(self, vobj, thisvm=None, inkey='', level=0):
|
||||
''' Serialize an object '''
|
||||
rdata = {}
|
||||
|
||||
if type(vobj).__name__ in self.vimTableMaxDepth and level >= self.vimTableMaxDepth[type(vobj).__name__]:
|
||||
return rdata
|
||||
|
||||
if vobj is None:
|
||||
rdata = None
|
||||
elif type(vobj) in self.vimTable:
|
||||
rdata = {}
|
||||
for key in self.vimTable[type(vobj)]:
|
||||
try:
|
||||
rdata[key] = getattr(vobj, key)
|
||||
except Exception as e:
|
||||
self.debugl(e)
|
||||
|
||||
elif issubclass(type(vobj), str) or isinstance(vobj, str):
|
||||
if vobj.isalnum():
|
||||
rdata = vobj
|
||||
else:
|
||||
rdata = vobj.encode('utf-8').decode('utf-8')
|
||||
elif issubclass(type(vobj), bool) or isinstance(vobj, bool):
|
||||
rdata = vobj
|
||||
elif issubclass(type(vobj), integer_types) or isinstance(vobj, integer_types):
|
||||
rdata = vobj
|
||||
elif issubclass(type(vobj), float) or isinstance(vobj, float):
|
||||
rdata = vobj
|
||||
elif issubclass(type(vobj), list) or issubclass(type(vobj), tuple):
|
||||
rdata = []
|
||||
try:
|
||||
vobj = sorted(vobj)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
for idv, vii in enumerate(vobj):
|
||||
if level + 1 <= self.maxlevel:
|
||||
vid = self._process_object_types(
|
||||
vii,
|
||||
thisvm=thisvm,
|
||||
inkey=inkey + '[' + str(idv) + ']',
|
||||
level=(level + 1)
|
||||
)
|
||||
|
||||
if vid:
|
||||
rdata.append(vid)
|
||||
|
||||
elif issubclass(type(vobj), dict):
|
||||
pass
|
||||
|
||||
elif issubclass(type(vobj), object):
|
||||
methods = dir(vobj)
|
||||
methods = [str(x) for x in methods if not x.startswith('_')]
|
||||
methods = [x for x in methods if x not in self.bad_types]
|
||||
methods = [x for x in methods if not inkey + '.' + x.lower() in self.skip_keys]
|
||||
methods = sorted(methods)
|
||||
|
||||
for method in methods:
|
||||
# Attempt to get the method, skip on fail
|
||||
try:
|
||||
methodToCall = getattr(vobj, method)
|
||||
except Exception as e:
|
||||
continue
|
||||
|
||||
if callable(methodToCall):
|
||||
continue
|
||||
|
||||
if self.lowerkeys:
|
||||
method = method.lower()
|
||||
if level + 1 <= self.maxlevel:
|
||||
try:
|
||||
rdata[method] = self._process_object_types(
|
||||
methodToCall,
|
||||
thisvm=thisvm,
|
||||
inkey=inkey + '.' + method,
|
||||
level=(level + 1)
|
||||
)
|
||||
except vim.fault.NoPermission:
|
||||
self.debugl("Skipping method %s (NoPermission)" % method)
|
||||
else:
|
||||
pass
|
||||
|
||||
return rdata
|
||||
|
||||
def get_host_info(self, host):
|
||||
''' Return hostvars for a single host '''
|
||||
|
||||
if host in self.inventory['_meta']['hostvars']:
|
||||
return self.inventory['_meta']['hostvars'][host]
|
||||
elif self.args.host and self.inventory['_meta']['hostvars']:
|
||||
match = None
|
||||
for k, v in self.inventory['_meta']['hostvars'].items():
|
||||
if self.inventory['_meta']['hostvars'][k]['name'] == self.args.host:
|
||||
match = k
|
||||
break
|
||||
if match:
|
||||
return self.inventory['_meta']['hostvars'][match]
|
||||
else:
|
||||
raise VMwareMissingHostException('%s not found' % host)
|
||||
else:
|
||||
raise VMwareMissingHostException('%s not found' % host)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Run the script
|
||||
print(VMWareInventory().show())
|
@ -865,16 +865,6 @@ SATELLITE6_EXCLUDE_EMPTY_GROUPS = True
|
||||
SATELLITE6_INSTANCE_ID_VAR = 'foreman.id'
|
||||
# SATELLITE6_GROUP_PREFIX and SATELLITE6_GROUP_PATTERNS defined in source vars
|
||||
|
||||
# ---------------------
|
||||
# ----- CloudForms -----
|
||||
# ---------------------
|
||||
CLOUDFORMS_ENABLED_VAR = 'cloudforms.power_state'
|
||||
CLOUDFORMS_ENABLED_VALUE = 'on'
|
||||
CLOUDFORMS_GROUP_FILTER = r'^.+$'
|
||||
CLOUDFORMS_HOST_FILTER = r'^.+$'
|
||||
CLOUDFORMS_EXCLUDE_EMPTY_GROUPS = True
|
||||
CLOUDFORMS_INSTANCE_ID_VAR = 'cloudforms.id'
|
||||
|
||||
# ---------------------
|
||||
# ----- Custom -----
|
||||
# ---------------------
|
||||
|
@ -79,7 +79,6 @@ function InventorySourcesList({ i18n, nodeResource, onUpdateNodeResource }) {
|
||||
[`azure_rm`, i18n._(t`Microsoft Azure Resource Manager`)],
|
||||
[`vmware`, i18n._(t`VMware vCenter`)],
|
||||
[`satellite6`, i18n._(t`Red Hat Satellite 6`)],
|
||||
[`cloudforms`, i18n._(t`Red Hat CloudForms`)],
|
||||
[`openstack`, i18n._(t`OpenStack`)],
|
||||
[`rhv`, i18n._(t`Red Hat Virtualization`)],
|
||||
[`tower`, i18n._(t`Ansible Tower`)],
|
||||
|
@ -43,7 +43,7 @@ options:
|
||||
source:
|
||||
description:
|
||||
- The source to use for this group.
|
||||
choices: [ "scm", "ec2", "gce", "azure_rm", "vmware", "satellite6", "cloudforms", "openstack", "rhv", "tower", "custom" ]
|
||||
choices: [ "scm", "ec2", "gce", "azure_rm", "vmware", "satellite6", "openstack", "rhv", "tower", "custom" ]
|
||||
type: str
|
||||
source_path:
|
||||
description:
|
||||
@ -162,7 +162,7 @@ def main():
|
||||
# How do we handle manual and file? Tower does not seem to be able to activate them
|
||||
#
|
||||
source=dict(choices=["scm", "ec2", "gce",
|
||||
"azure_rm", "vmware", "satellite6", "cloudforms",
|
||||
"azure_rm", "vmware", "satellite6",
|
||||
"openstack", "rhv", "tower", "custom"]),
|
||||
source_path=dict(),
|
||||
source_script=dict(),
|
||||
|
Loading…
Reference in New Issue
Block a user