inial public version

This commit is contained in:
Sergey Bubnov 2018-10-02 10:12:23 +04:00
parent 1a8b2de4cf
commit abbd377dce
83 changed files with 4080 additions and 0 deletions

14
.gitignore vendored Normal file
View File

@ -0,0 +1,14 @@
.venv/
.tmp/
*.retry
*.log
*.pyc
.pass-*
vars/envs
vars/stacks
*~
*#
*.swp
*-apps.yml
.pytest_cache/
vars/conf/

10
.gitmodules vendored Normal file
View File

@ -0,0 +1,10 @@
[submodule "role-samba"]
path = roles/samba
url = https://github.com/altlinuxteam/ansible-samba.git
branch = master
[submodule "roles/remote-desktop"]
path = roles/remote-desktop
url = https://github.com/altlinuxteam/remote-desktop.git
[submodule "roles/tftp"]
path = roles/tftp
url = https://github.com/altlinuxteam/ansible-tftp.git

8
ansible.cfg Normal file
View File

@ -0,0 +1,8 @@
[defaults]
executable=bash
ansible_python_interpreter="python"
filter_plugins=plugins/
[ssh_connection]
ssh_args = -F .tmp/ssh_config
pipelining = True

23
docs/example_stack.org Normal file
View File

@ -0,0 +1,23 @@
* Generate example environment and stack
#+BEGIN_SRC sh
ansible-playbook make-example-stack.yml \
-e pve_address=10.64.0.6 \
-e pve_login="example@pve" \
-e pve_password='examplePass123$'\
`# parameters listed below are optional` \
`# default values will be used if ommited` \
-e stack_name="example-samba" \
-e domain_name="domain.alt" \
-e public_bridge="vmbr1" \
-e public_vlan="994" \
-e public_gw="10.64.84.1" \
-e public_dns="8.8.8.8" \
-e private_bridge="vmbr1" \
-e private_vlan="940" \
-e server_template="alt-srv-82-x64-latest" \
-e servers_public_ips="10.64.84.20/24" \
-e servers_private_ips="10.64.6.1/24" \
-e desktop_template="alt-workstation-82-x64-latest" \
-e desktops_public_ips="10.64.84.200/24" \
-e desktops_private_ips="10.64.6.100/24"
#+END_SRC

1130
library/proxmox_kvm.py Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,157 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Sergey Bubnov (@homgbebebe) <omg at altlinux.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: proxmox_qemu_agent
short_description: Execute qemu-agent commands on Qemu(KVM) Virtual Machines in Proxmox VE cluster.
description:
- Allows you to execute qemu-agent command on virtual machines
version_added: "2.6"
author: "Sergey Bubnov (@homgbebebe) <omg at altlinux.org>"
options:
api_host:
description:
- Specify the target host of the Proxmox VE cluster.
required: true
api_user:
description:
- Specify the user to authenticate with.
required: true
api_password:
description:
- Specify the password to authenticate with.
- You can use C(PROXMOX_PASSWORD) environment variable.
name:
description:
- Specifies the VM name.
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates.
type: bool
default: 'no'
vmid:
description:
- Specifies the VM ID. Instead use I(name) parameter.
requirements: [ "proxmoxer", "requests" ]
'''
EXAMPLES = '''
# Ping VM via agent interface
- proxmox_qemu_agent:
api_user : root@pam
api_password: secret
api_host : helldorado
name : vm1
commnad : ping
'''
RETURN = '''
'''
import os
import re
import time
import traceback
try:
from proxmoxer import ProxmoxAPI
HAS_PROXMOXER = True
except ImportError:
HAS_PROXMOXER = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
VZ_TYPE = 'qemu'
def get_vmid(proxmox, name):
return [vm['vmid'] for vm in proxmox.cluster.resources.get(type='vm') if vm['name'] == name]
def get_vm(proxmox, vmid):
return [vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)]
def agent_command(module, proxmox, vm, vmid, command):
try:
r = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).agent.post(command=command)
module.exit_json(changed=False, results=r['result'])
except Exception as e:
module.fail_json(msg='qemu-agent request failed withexception: %s' % e)
def main():
module = AnsibleModule(
argument_spec=dict(
api_host=dict(required=True),
api_user=dict(required=True),
api_password=dict(no_log=True),
name=dict(type='str'),
command=dict(default=None, choices=
['ping', 'get-time', 'info', 'fsfreeze-status', 'fsfreeze-freeze', 'fsfreeze-thaw', 'fstrim'
,'network-get-interfaces', 'get-vcpus', 'get-fsinfo', 'get-memory-blocks', 'get-memory-block-info'
,'suspend-hybrid', 'suspend-ram', 'suspend-disk', 'shutdown'
]),
validate_certs=dict(type='bool', default='no'),
vmid=dict(type='int', default=None),
),
required_one_of=[('name', 'vmid',)],
)
if not HAS_PROXMOXER:
module.fail_json(msg='proxmoxer required for this module')
api_user = module.params['api_user']
api_host = module.params['api_host']
api_password = module.params['api_password']
command = module.params['command']
name = module.params['name']
vmid = module.params['vmid']
validate_certs = module.params['validate_certs']
# If password not set get it from PROXMOX_PASSWORD env
if not api_password:
try:
api_password = os.environ['PROXMOX_PASSWORD']
except KeyError as e:
module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
try:
proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs)
global VZ_TYPE
global PVE_MAJOR_VERSION
PVE_MAJOR_VERSION = 3 if float(proxmox.version.get()['version']) < 4.0 else 4
except Exception as e:
module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
# If vm name is set get the VM id from ProxmoxAPI
if not vmid:
try:
vmid = get_vmid(proxmox, name)[0]
if vmid:
vm = get_vm(proxmox, vmid)
if not vm:
module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
except Exception as e:
module.fail_json(msg="VM {} does not exist in cluster.".format(clone))
agent_command(module, proxmox, vm, vmid, command)
if __name__ == '__main__':
main()

14
make-example-stack.yml Normal file
View File

@ -0,0 +1,14 @@
---
- name: make example stack
hosts: localhost
gather_facts: false
become: false
pre_tasks:
- fail: msg="{{ item }} should be set"
when: lookup('vars', item) is undefined
with_items:
- pve_address
- pve_login
- pve_password
roles:
- make-example-stack

118
plugins/filters.py Normal file
View File

@ -0,0 +1,118 @@
#!/usr/bin/env python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
import collections
import re
import ipaddress
try:
import jmespath
HAS_LIB = True
except ImportError:
HAS_LIB = False
def dict_merge(dct, merge_dct):
""" Recursive dict merge. Inspired by :meth:``dict.update()``, instead of
updating only top-level keys, dict_merge recurses down into dicts nested
to an arbitrary depth, updating keys. The ``merge_dct`` is merged into
``dct``.
:param dct: dict onto which the merge is executed
:param merge_dct: dct merged into dct
:return: None
"""
for k, v in merge_dct.iteritems():
if (k in dct and isinstance(dct[k], dict)
and isinstance(merge_dct[k], collections.Mapping)):
dict_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
def dict_inject(data, path, obj):
p = path.split('/')
p.remove('')
p.reverse()
c = obj
for n in p:
c = {n: c}
z = data.copy()
dict_merge(z, c)
return z
def to_nics_dict(src):
nics = {}
lo_pat = re.compile("^lo[0-9]{0,2}$")
for n in src:
if not lo_pat.match(n['name']):
addrs = []
if "ip-addresses" in n:
for a in n['ip-addresses']:
if a['ip-address-type'] != "ipv6":
addrs.append(u'%s/%s' % (a['ip-address'], a['prefix']))
nics[n['name']] = {"mac": n['hardware-address'],
"addrs": addrs}
return nics
def to_proxmox_net(src):
out = {}
ks = ['virtio', 'bridge', 'tag']
for k,v in src.items():
k = k.replace('eth','net')
[ v.pop(x, None) for x in set(v.keys()).difference(ks) ]
v.pop('ipv4', None) # remove unused key
if 'virtio' not in v.items():
res = "virtio,"
else:
res = ""
res = res + ','.join(map(lambda x:str('='.join(map(str,x))), v.items()))
out[k] = res
return out
def gen_nics_addrs(node, num):
res = node.copy()
for k, v in res['net'].items():
for i, a in enumerate(res['net'][k]['ipv4']):
interface = ipaddress.ip_interface(a)
addr = interface.ip + int(num)
cidr = interface.network.prefixlen
res['net'][k]['ipv4'][i] = '%s/%s' % (addr, cidr)
return res
def list_to_dict(src):
res = {}
for a in src:
k = list(a.keys())[0]
res[k] = a[k]
return res
def get_steps(node, steps_list):
res = []
for idx, ss in enumerate(steps_list):
for a in ss:
if node in a['binds']:
res.append("step%s" % idx)
return res
def filter_dict(src, pred):
p = eval(pred)
return { k: v for k, v in src.iteritems() if p(v)}
class FilterModule(object):
''' Query filter '''
def filters(self):
return {
'dict_merge': dict_merge,
'dict_inject': dict_inject,
'to_nics_dict': to_nics_dict,
'to_proxmox_net': to_proxmox_net,
'gen_nics_addrs': gen_nics_addrs,
'list_to_dict': list_to_dict,
'get_steps': get_steps,
'filter_dict': filter_dict
}

34
provision.yml Normal file
View File

@ -0,0 +1,34 @@
---
- name: check input parameters
hosts: localhost
gather_facts: false
tags: always
tasks:
- fail: msg="env_name should be set"
when: env_name is not defined
- fail: msg="stack_name should be set"
when: stack_name is not defined
- name: provisioning PVE nodes
hosts: localhost
gather_facts: false
roles:
- {role: prepare-config, tags: [ ]}
- {role: pve, tags: [ ]}
- {role: lxc, tags: [ ]}
- {role: inventory, bootstrap: yes, tags: [ ]}
tags: [ provision ]
- name: prepare nodes
hosts: stack
gather_facts: false
pre_tasks:
- meta: end_play
when: destroy_all is defined and destroy_all
- name: gather facts
setup:
when: destroy_all is not defined or not destroy_all
roles:
- {role: prepare-config, tags: [ ]}
- {role: common, tags: [ ]}
tags: [ prepare ]

View File

@ -0,0 +1,7 @@
---
- debug: msg="{{stack}}"
- name: "install apps"
include_tasks: prepare.yml
with_items: "{{stack.apps[step]}}"
loop_control:
loop_var: app

View File

@ -0,0 +1,9 @@
---
- name: "prepare {{ app.name }} environment on {{ inventory_hostname_short }}"
set_fact: {"{{ item.key }}":"{{ item.value }}"}
with_dict: "{{ app.vars }}"
when: app.vars is defined
- name: "apply {{ app.name }} role on {{ inventory_hostname_short }}"
include_role: name="{{app.name}}"
when: inventory_hostname_short in app.binds

View File

@ -0,0 +1,2 @@
---
- debug: msg="dummy role"

19
roles/bind-role/.gitignore vendored Normal file
View File

@ -0,0 +1,19 @@
# .gitignore
# Hidden Vagrant-directory
.vagrant
# Backup files (e.g. Vim, Gedit, etc.)
*~
# Vagrant base boxes (you never know when someone puts one in the repository)
*.box
# Python artefacts
.ropeproject
*.pyc
# Ignore test directory
tests/
vagrant-tests/
docker-tests/

View File

@ -0,0 +1,37 @@
sudo: required
env:
matrix:
- DISTRIBUTION: centos
VERSION: 7
- DISTRIBUTION: ubuntu
VERSION: 16.04
services:
- docker
before_install:
# Install latest Git
- sudo apt-get update
- sudo apt-get install --only-upgrade git
# Allow fetching other branches than master
- git config remote.origin.fetch +refs/heads/*:refs/remotes/origin/*
# Fetch the branch with test code
- git fetch origin docker-tests
- git worktree add tests origin/docker-tests
script:
# Running the test script the first time will set up the master DNS server
# with IP 172.17.0.2. Running it the second time sets up the slave DNS
# server with IP 172.17.0.3.
- ./tests/docker-tests.sh
- ./tests/docker-tests.sh
# Run functional tests on both master and slave server
- SUT_IP=172.17.0.2 ./tests/functional-tests.sh
# Ensure the slave gets the chance to receive zone updates from the master
- sleep 6s
- SUT_IP=172.17.0.3 ./tests/functional-tests.sh
notifications:
webhooks: https://galaxy.ansible.com/api/v1/notifications/

View File

@ -0,0 +1,175 @@
# Change log
This file contains al notable changes to the bind Ansible role.
This file adheres to the guidelines of [http://keepachangelog.com/](http://keepachangelog.com/). Versioning follows [Semantic Versioning](http://semver.org/). "GH-X" refers to the X'th issue/pull request on the Github project.
## 3.9.0 - 2017-11-21
### Added
- (GH-35) Role variable `bind_check_names`, which adds support for check-names (e.g. `check-names master ignore;`)
- (GH-36) Role variable `bind_allow_recursion`, which adds support for allow-recursion (credit: [Loic Dachary](https://github.com/dachary))
- (GH-39) Role variable `bind_zone_delegate`, which adds support for zone delegation / NS records (credit: [Loic Dachary](https://github.com/dachary))
- (GH-40) Role variables `bind_dnssec_enable` and `bind_dnssec_validation`, which makes DNSSEC validation configurable (credit: [Guillaume Darmont](https://github.com/gdarmont)).
### Changed
- (GH-38) Only append domain to MX if it does not end with a dot (credit: [Loic Dachary](https://github.com/dachary))
## 3.8.0 - 2017-07-12
This release adds support for multiple TXT entries and fixes some bugs.
### Added
- (GH-31) Support for multiple TXT entries for the same name (credit: [Rafael Bodill](https://github.com/rafi))
### Changed
- (GH-31) Fixed ipv6 reverse zone hash calculation for complete idempotency (credit: [Stuart Knight](https://github.com/blofeldthefish))
- (GH-32, GH-33) Fix for bug where CNAMEs and Multi-IP entries weren't working (credit: [Greg Cockburn](https://github.com/gergnz))
## 3.7.1 - 2017-07-03
### Changed
- (GH-29) Zone files are fully idempotent, so are only changed when actual content changes (credit: [@Stuart Knight](https://github.com/blofeldthefish))
## 3.7.0 - 2017-06-01
### Added
- (GH-10) Implement reverse IPv6 lookups
- (GH-28) Add option `bind_forwarders` and `bind_forward_only`, which allows BIND to be set up as a caching name server.
## 3.6.1 - 2017-06-01
### Changed
- Fixed a bug with generating the reverse zone names.
## 3.6.0 - 2017-06-01
### Changed
- (GH-25) Allow slave log file to be set with variable `bind_log` instead of a hard coded value (credit @kartone).
- The alignment of columns in the reverse zone file are improved
### Added
- (GH-22, 23) Documentation improvements
- (GH-27) Allow dynamic updates (credit: @bverschueren)
### Removed
- The custom filter plugins were removed. The functionality has since been added to Ansible's built-in filter plugins. This does require `python-netaddr` to be installed on the management node.
## 3.5.2 - 2016-09-29
### Changed
* The call to `named-checkconf` was fixed. It had the full path to the binary, which is not the same on all distributions. (GH-20, credit @peterjanes)
## 3.5.1 - 2016-09-22
### Changed
* The check for master/slave server is improved (GH-19, credit @josetaas)
## 3.5.0 - 2016-07-28
### Added
* Introduced role variable `bind_log`, the path to the log file.
* Introduced role variable `bind_zone_also_notify`, a list of servers that will receive a notification when the master zone file is reloaded (GH-18, credit: Joanna Delaporte)
* Reverse zone files now handle the case with only a single host (GH-18, credit: Joanna Delaporte)
## 3.4.0 - 2016-05-26
### Added
* (GH-16) Support for service record (SRV) lookups
* Support for text record (TXT) lookups
### Changed
* Fixed Ansible 2.0 deprecation warnings
* Generating a serial is no longer considered a change
* Ensured that all role variables have a default value, e.g. empty list instead of undefined. This simplifies template logic (no `if defined` tests), and is considered [deprecated in playbooks within a *with_* loop](https://docs.ansible.com/ansible/porting_guide_2.0.html#deprecated).
## 3.3.1 - 2016-04-08
### Removed
* The `version:` field in `meta/main.yml`. This an unofficial field that is used by a third-party tool for managing role dependencies (librarian-ansible). Custom meta fields are no longer accepted in Ansible 2.0. See [ansible/ansible#13496](https://github.com/ansible/ansible/issues/13496) for more info. Unfortunately, this will break support for librarian-ansible. As a workaround, until this issue is resolved upstream, use version 3.3.0 of this role.
## 3.3.0 - 2016-04-08
### Added
* Added role variable `bind_other_name_servers` for adding NS records for DNS servers outside of the domain. (GH-12)
* Re-added `bind_recursion`, as it is needed in some cases. (GH-14)
### Removed
## 3.2.1 - 2015-12-15
### Added
* The domain name can now also point to an IP address, enabling e.g. "http://example.com/" (GH-11)
## 3.2.0 - 2015-12-07
### Added
* Add support for multiple IP addresses per host (GH-9)
* Allow setting `rrset-order` (for DNS round robin)
* Add support for (multiple) IPv6 (AAAA) records (GH-2). For now, only forward lookups are supported.
### Changed
* Test code is put into a separate branch. This means that test code is no longer included when installing the role from Ansible Galaxy.
## 3.1.0 - 2015-12-04
### Added
* Add support for zone transfers (GH-8)
* Check whether `bind_zone_master_server_ip` was set (GH-7)
### Removed
* Role variable `bind_recursion` was removed. This role is explicitly only suitable for an authoritative DNS server, and in this case, recursion should be off.
## 3.0.0 - 2015-06-14
### Added
* You can now set up a master and slave DNS server.
* The variable `bind_zone_master_server_ip` was added. This is a **required** variable, which makes this release not backwards compatible.
* Automated acceptance tests for the test playbook
## 2.0.0 - 2015-06-10
### Added
* Added EL6 to supported platforms. Thanks to @rilindo for verifying this.
### Changed
* Recursion is turned off by default, which fits an authoritative name server. This change is not backwards compatible, as the behaviour of BIND is different from before when you do not set the variable `bind_recursion` explicitly.
### Removed
* Firewall settings. This should not be a concern of this role. Configuring the firewall is functionality offered by other roles (e.g. [bertvv.bind](https://github.com/bertvv/ansible-role-el7))
## 1.0.0 - 2015-04-22
First release!
### Added
- Functionality for master DNS server
- Multiple reverse lookup zones

View File

@ -0,0 +1,13 @@
# BSD License
Copyright (c) 2014, Bert Van Vreckem, (bert.vanvreckem@gmail.com)
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

315
roles/bind-role/README.md Normal file
View File

@ -0,0 +1,315 @@
# Ansible role `bind`
[![Build Status](https://travis-ci.org/bertvv/ansible-role-bind.svg?branch=master)](https://travis-ci.org/bertvv/ansible-role-bind)
An Ansible role for setting up BIND ISC as an authoritative DNS server for a single domain on EL7 or Ubuntu Server. Specifically, the responsibilities of this role are to:
- install BIND
- set up the main configuration file
- master server
- slave server
- set up forward and reverse lookup zone files
This role supports multiple reverse zones. IPv6 lookups are also supported, both forward and reverse.
Configuring the firewall is not a concern of this role, so you should do this using another role (e.g. [bertvv.rh-base](https://galaxy.ansible.com/bertvv/rh-base/)).
If you like/use this role, please consider giving it a star. Thanks!
## Requirements
- **The package `python-ipaddr` should be installed on the management node** (since v3.7.0)
## Role Variables
Variables are not required, unless specified.
| Variable | Default | Comments (type) |
| :--- | :--- | :--- |
| `bind_acls` | `[]` | A list of ACL definitions, which are dicts with fields `name` and `match_list`. See below for an example. |
| `bind_allow_query` | `['localhost']` | A list of hosts that are allowed to query this DNS server. Set to ['any'] to allow all hosts |
| `bind_allow_update` | `['none']` | A list of hosts that are allowed to dynamically update this DNS server. |
| `bind_check_names` | `[]` | Check host names for compliance with RFC 952 and RFC 1123 and take the defined actioni (e.g. `warn`, `ignore`, `fail`). |
| `bind_forwarders` | `[]` | A list of name servers to forward DNS requests to. |
| `bind_forward_only` | `false` | If `true`, BIND is set up as a caching name server |
| `bind_listen_ipv4` | `['127.0.0.1']` | A list of the IPv4 address of the network interface(s) to listen on. Set to ['any'] to listen on all interfaces. |
| `bind_listen_ipv6` | `['::1']` | A list of the IPv6 address of the network interface(s) to listen on |
| `bind_log` | `data/named.run` | Path to the log file |
| `bind_other_name_servers` | `[]` | A list of nameservers outside of the domain. For each one, an NS record will be created. |
| `bind_recursion` | `false` | Determines whether requests for which the DNS server is not authoritative should be forwarded†. |
| `bind_allow_recursion` | `['any']` | Similar to bind_allow_query, this option applies to recursive queries. |
| `bind_rrset_order` | `random` | Defines order for DNS round robin (either `random` or `cyclic`) |
| `bind_dnssec_enable` | `true` | Is DNSSEC enabled |
| `bind_dnssec_validation` | `true` | Is DNSSEC validation enabled |
| `bind_zone_also_notify` | - | A list of servers that will receive a notification when the master zone file is reloaded. |
| `bind_zone_hostmaster_email` | `hostmaster` | The e-mail address of the system administrator |
| `bind_zone_hosts` | `[]` | Host definitions. See below this table for examples. |
| `bind_zone_delegate` | `[]` | Zone delegation. See below this table for examples. |
| `bind_zone_mail_servers` | `[{name: mail, preference: 10}]` | A list of dicts (with fields `name` and `preference`) specifying the mail servers for this domain. |
| `bind_zone_master_server_ip` | - | **(Required)** The IP address of the master DNS server. |
| `bind_zone_minimum_ttl` | `1D` | Minimum TTL field in the SOA record. |
| `bind_zone_name_servers` | `[ansible_hostname]` | A list of the DNS servers for this domain. |
| `bind_zone_name` | `example.com` | The domain name |
| `bind_zone_networks` | `['10.0.2']` | A list of the networks that are part of the domain |
| `bind_zone_ipv6_networks` | `[]` | A list of the IPv6 networks that are part of the domain, in CIDR notation (e.g. 2001:db8::/48) |
| `bind_zone_other_name_servers` | `[]` | A list of the DNS servers outside of this domain. |
| `bind_zone_services` | `[]` | A list of services to be advertized by SRV records |
| `bind_zone_text` | `[]` | A list of dicts with fields `name` and `text`, specifying TXT records. `text` can be a list or string. |
| `bind_zone_time_to_expire` | `1W` | Time to expire field in the SOA record. |
| `bind_zone_time_to_refresh` | `1D` | Time to refresh field in the SOA record. |
| `bind_zone_time_to_retry` | `1H` | Time to retry field in the SOA record. |
| `bind_zone_ttl` | `1W` | Time to Live field in the SOA record. |
† Best practice for an authoritative name server is to leave recursion turned off. However, [for some cases](http://www.zytrax.com/books/dns/ch7/queries.html#allow-query-cache) it may be necessary to have recursion turned on.
### Minimal variables for a working zone
Even thoug only variable `bind_zone_master_server_ip` is required for the role to run without errors, this is not sufficient to get a working zone. In order to set up an authoritative name server that is available to clients, you should also at least define the following variables:
| Variable | Master | Slave |
| :--- | :---: | :---: |
| `bind_zone_name` | V | V |
| `bind_zone_networks` | V | V |
| `bind_zone_name_servers` | V | V |
| `bind_zone_hosts` | V | -- |
| `bind_listen_ipv4` | V | V |
| `bind_allow_query` | V | V |
### Host definitions
Host names that this DNS server should resolve can be specified with the variable `bind_zone_hosts` as a list of dicts with fields `name`, `ip` and `aliases`, e.g.:
```Yaml
bind_zone_hosts:
- name: pub01
ip: 192.0.2.1
ipv6: 2001:db8::1
aliases:
- ns
- name: '@'
ip:
- 192.0.2.2
- 192.0.2.3
ipv6:
- 2001:db8::2
- 2001:db8::3
aliases:
- www
- name: priv01
ip: 10.0.0.1
```
To allow to surf to http://example.com/, set the host name of your web server to `'@'` (must be quoted!). In BIND syntax, `@` indicates the domain name itself.
IP addresses (both IPv4 and IPv6) can be specified as a string or as a list. This results in a single or multiple A/AAAA records for the host, respectively. This enables [DNS round robin](http://www.zytrax.com/books/dns/ch9/rr.html), a simple load balancing technique. The order in which the IP addresses are returned can be configured with role variable `bind_rrset_order`.
As you can see, not all hosts are in the same network. This is perfectly acceptable, and supported by this role. All networks should be specified in `bind_zone_networks`, though, or the host will not get a PTR record for reverse lookup:
```Yaml
bind_zone_networks:
- '192.0.2'
- '10'
- '172.16'
```
Remark that only the network part should be specified here! When specifying a class B IP address (e.g. "172.16") in a variable file, it must be quoted. Otherwise, the Yaml parser will interpret it as a float.
Based on the idea and examples detailed at <https://linuxmonk.ch/wordpress/index.php/2016/managing-dns-zones-with-ansible/> for the gdnsd package, the zonefiles are fully idempotent, and thus only get updated if "real" content changes.
### Zone delgation
To delegate a zone to a DNS, it is enough to create a `NS` record with:
```Yaml
bind_zone_delegate:
- zone: foo
dns: 192.0.2.1
```
which is the equivalent of:
```
foo IN NS 192.0.2.1
```
### Service records
Service (SRV) records can be added with the variable `bind_zone_services`, e.g.:
```Yaml
bind_zone_services:
- name: _ldap._tcp
weight: 100
port: 88
target: dc001
```
This is a list of dicts with mandatory fields `name` (service name), `target` (host providing the service), `port` (TCP/UDP port of the service) and optional fields `priority` (default = 0) and `weight` (default = 0).
### ACLs
ACLs can be defined like this:
```Yaml
bind_acls:
- name: acl1
match_list:
- 192.0.2.0/24
- 10.0.0.0/8
```
The names of the ACLs will be added to the `allow-transfer` clause in global options.
## Dependencies
No dependencies. If you want to configure the firewall, do this through another role (e.g. [bertvv.rh-base](https://galaxy.ansible.com/bertvv/rh-base)).
## Example Playbook
See the test playbook [test.yml](https://github.com/bertvv/ansible-role-bind/blob/docker-tests/test.yml) for an elaborate example that shows all features.
## Testing
There are two test environments for this role, one based on Vagrant, the other on Docker. The latter powers the Travis-CI tests. The tests are kept in a separate (orphan) branch so as not to clutter the actual code of the role. [git-worktree(1)](https://git-scm.com/docs/git-worktree) is used to include the test code into the working directory. Remark that this requires at least Git v2.5.0.
### Running Docker tests
1. Fetch the test branch: `git fetch origin docker-tests`
2. Create a Git worktree for the test code: `git worktree add docker-tests docker-tests`. This will create a directory `docker-tests/`
The script `docker-tests.sh` will create a Docker container, and apply this role from a playbook `test.yml`. The Docker images are configured for testing Ansible roles and are published at <https://hub.docker.com/r/bertvv/ansible-testing/>. There are images available for several distributions and versions. The distribution and version should be specified outside the script using environment variables:
```
DISTRIBUTION=centos VERSION=7 ./tests/docker-tests.sh
```
The specific combinations of distributions and versions that are supported by this role are specified in `.travis.yml`.
The first time the test script is run, a container will be created that is assigned the IP address 172.17.0.2. This will be the master DNS-server. The server is still running after the script finishes and can be queried from the command line, e.g.:
```
$ dig @172.17.0.2 www.acme-inc.com +short
srv001.acme-inc.com.
172.17.1.1
```
If you run the script again, a new container is launched with IP address 172.17.0.3 that will be set up as a slave DNS-server. After a few seconds, it will have received updates from the master server and can be queried as well.
```
$ dig @172.17.0.3 www.acme-inc.com +short
srv001.acme-inc.com.
172.17.1.1
```
The script `tests/functional-tests.sh` will run a [BATS](https://github.com/sstephenson/bats) test suite, `dns.bats` that performs a number of different queries. Specify the server IP address as the environment variable `${SUT_IP}` (short for System Under Test).
```
$ SUT_IP=172.17.0.2 ./tests/functional-tests.sh
### Using BATS executable at: /usr/local/bin/bats
### Running test /home/bert/CfgMgmt/roles/bind/tests/dns.bats
✓ Forward lookups public servers
✓ Reverse lookups
✓ Alias lookups public servers
✓ IPv6 forward lookups
✓ NS record lookup
✓ Mail server lookup
✓ Service record lookup
✓ TXT record lookup
8 tests, 0 failures
$ SUT_IP=172.17.0.3 ./tests/functional-tests.sh
[...]
```
### Running Vagrant tests
1. Fetch the tests branch: `git fetch origin vagrant-tests`
2. Create a Git worktree for the test code: `git worktree add vagrant-tests vagrant-tests`. This will create a directory `vagrant-tests/`.
3. `cd vagrant-tests/`
4. `vagrant up` will then create two VMs and apply a test playbook (`test.yml`).
The command `vagrant up` results in a setup with *two* DNS servers, a master and a slave, set up according to playbook `test.yml`.
| **Hostname** | **ip** |
| :--- | :--- |
| `testbindmaster` | 192.168.56.53 |
| `testbindslave` | 192.168.56.54 |
IP addresses are in the subnet of the default VirtualBox Host Only network interface (192.168.56.0/24). You should be able to query the servers from your host system. For example, to verify if the slave is updated correctly, you can do the following:
```ShellSession
$ dig @192.168.56.54 ns1.example.com +short
testbindmaster.example.com.
192.168.56.53
$ dig @192.168.56.54 example.com www.example.com +short
web.example.com.
192.168.56.20
192.168.56.21
$ dig @192.168.56.54 MX example.com +short
10 mail.example.com.
```
An automated acceptance test written in [BATS](https://github.com/sstephenson/bats.git) is provided that checks most settings specified in `tests/test.yml`. You can run it by executing the shell script `tests/runtests.sh`. The script can be run on either your host system (assuming you have a Bash shell), or one of the VMs. The script will download BATS if needed and run the test script `tests/dns.bats` on both the master and the slave DNS server.
```ShellSession
$ cd vagrant-tests
$ vagrant up
[...]
$ ./runtests.sh
Testing 192.168.56.53
✓ The `dig` command should be installed
✓ It should return the NS record(s)
✓ It should be able to resolve host names
✓ It should be able to resolve IPv6 addresses
✓ It should be able to do reverse lookups
✓ It should be able to resolve aliases
✓ It should return the MX record(s)
6 tests, 0 failures
Testing 192.168.56.54
✓ The `dig` command should be installed
✓ It should return the NS record(s)
✓ It should be able to resolve host names
✓ It should be able to resolve IPv6 addresses
✓ It should be able to do reverse lookups
✓ It should be able to resolve aliases
✓ It should return the MX record(s)
6 tests, 0 failures
```
Running from the VM:
```ShellSession
$ vagrant ssh testbindmaster
Last login: Sun Jun 14 18:52:35 2015 from 10.0.2.2
Welcome to your Packer-built virtual machine.
[vagrant@testbindmaster ~]$ /vagrant/runtests.sh
Testing 192.168.56.53
✓ The `dig` command should be installed
[...]
```
## License
BSD
## Contributors
Issues, feature requests, ideas, suggestions, etc. are appreciated and can be posted in the Issues section.
Pull requests are also very welcome. Please create a topic branch for your proposed changes. If you don't, this will create conflicts in your fork after the merge. Don't hesitate to add yourself to the contributor list below in your PR!
- [Bert Van Vreckem](https://github.com/bertvv/) (Maintainer)
- [B. Verschueren](https://github.com/bverschueren)
- [Greg Cockburn](https://github.com/gergnz)
- [Guillaume Darmont](https://github.com/gdarmont)
- [Joanna Delaporte](https://github.com/jdelaporte)
- [Jose Taas](https://github.com/josetaas)
- [Peter Janes](https://github.com/peterjanes)
- [Mario Ciccarelli](https://github.com/kartone)
- [Rafael Bodill](https://github.com/rafi)
- [Stuart Knight](https://github.com/blofeldthefish)
- [Loic Dachary](http://dachary.org)

View File

@ -0,0 +1,68 @@
# roles/bind/defaults/main.yml
---
bind_log: "data/named.run"
bind_zone_name: "example.com"
bind_zone_networks:
- "10.0.2"
bind_zone_ipv6_networks: []
# List of servers to be notified when the master zone is reloaded.
bind_zone_also_notify: []
# List of acls.
bind_acls: []
# List of IPv4 address of the network interface(s) to listen on. Set to "any"
# to listen on all interfaces
bind_listen_ipv4:
- "127.0.0.1"
# List of IPv6 address of the network interface(s) to listen on.
bind_listen_ipv6:
- "::1"
# List of hosts that are allowed to query this DNS server.
bind_allow_query:
- "localhost"
# List of hosts that are allowed to dynamically update this DNS server
bind_allow_update:
- "none"
# Determines whether recursion should be allowed. Typically, an authoritative
# name server should have recursion turned OFF.
bind_recursion: false
bind_allow_recursion:
- "any"
# Allows BIND to be set up as a caching name server
bind_forward_only: false
# List of name servers to forward DNS requests to.
bind_forwarders: []
# DNS round robin order (random or cyclic)
bind_rrset_order: "random"
# DNSSEC configuration
bind_dnssec_enable: true
bind_dnssec_validation: true
# SOA information
bind_zone_hostmaster_email: "hostmaster"
bind_zone_ttl: "1W"
bind_zone_time_to_refresh: "1D"
bind_zone_time_to_retry: "1H"
bind_zone_time_to_expire: "1W"
bind_zone_minimum_ttl: "1D"
# Zone Resource Records
bind_other_name_servers: []
bind_zone_hosts: []
bind_zone_delegate: []
bind_zone_mail_servers: []
bind_zone_name_servers: []
bind_zone_services: []
bind_zone_text: []

View File

@ -0,0 +1,7 @@
# roles/bind/handlers/main.yml
---
- name: restart bind
service:
name: "{{ bind_service }}"
state: restarted

View File

@ -0,0 +1,19 @@
---
galaxy_info:
author: Bert Van Vreckem
description: Sets up ISC BIND on RHEL/CentOS 6/7 or Ubuntu 16.04 LTS (Xenial) as an authoritative DNS server for a single domain (master and/or slave).
license: BSD
min_ansible_version: 2.0
platforms:
- name: EL
versions:
- 6
- 7
- name: Ubuntu
versions:
- xenial
galaxy_tags:
- networking
- system
- dns
dependencies: []

View File

@ -0,0 +1,71 @@
# roles/bind/tasks/main.yml
---
# Initialise distribution-specific variables
- name: Source specific variables
include_vars: "{{ item }}"
with_first_found:
- "{{ ansible_distribution }}.yml"
- "{{ ansible_os_family }}.yml"
- name: Check whether `bind_zone_master_server_ip` was set
assert:
that: bind_zone_master_server_ip is defined
- name: Install BIND
apt_rpm:
pkg: "{{ item }}"
state: installed
with_items: "{{ bind_packages }}"
tags: bind
- name: Ensure runtime directories referenced in config exist
file:
path: "{{ item }}"
state: directory
owner: "{{ bind_owner }}"
group: "{{ bind_group }}"
mode: 0770
with_items:
- "{{ bind_chroot }}/{{ bind_dir }}/dynamic"
- "{{ bind_chroot }}/{{ bind_dir }}/data"
tags: bind
- name: Create serial, based on last two digits of year, month, day, and hour
command: date +%y%m%d%H
register: timestamp
changed_when: false
run_once: true
tags: bind
- name: Read forward zone hashes
shell: 'grep "^; Hash:" {{ bind_zone_dir }}/{{ bind_zone_name }} || true'
changed_when: false
check_mode: false
register: forward_hashes
- name: Read reverse ipv4 zone hashes
shell: "grep \"^; Hash:\" {{ bind_zone_dir }}/{{ ('.'.join(item.replace(item+'.','').split('.')[::-1])) }}.in-addr.arpa || true"
changed_when: false
check_mode: false
register: reverse_hashes
with_items: "{{ bind_zone_networks }}"
- name: Read reverse ipv6 zone hashes
shell: "grep \"^; Hash:\" {{bind_zone_dir}}/{{ (item | ipaddr('revdns'))[-(9+(item|regex_replace('^.*/','')|int)//2):-1] }} || true"
changed_when: false
check_mode: false
register: reverse_hashes_ipv6
with_items: "{{ bind_zone_ipv6_networks }}"
- include_tasks: master.yml
when: bind_zone_master_server_ip in ansible_all_ipv4_addresses
- include_tasks: slave.yml
when: bind_zone_master_server_ip not in ansible_all_ipv4_addresses
- name: Start BIND service
service:
name: "{{ bind_service }}"
state: started
enabled: yes
tags: bind

View File

@ -0,0 +1,53 @@
# roles/bind/tasks/master.yml
# Set up a BIND master server
---
- name: Master | Main BIND config file (master)
template:
src: master_etc_named.conf.j2
dest: "{{ bind_config }}"
owner: "{{ bind_owner }}"
group: "{{ bind_group }}"
mode: '0640'
setype: named_conf_t
validate: 'named-checkconf -t / %s'
notify: restart bind
tags: bind
- name: Master | Create forward lookup zone file
template:
src: bind_zone.j2
dest: "{{ bind_chroot }}/{{ bind_zone_dir }}/{{ bind_zone_name }}"
owner: "{{ bind_owner }}"
group: "{{ bind_group }}"
mode: "0640"
setype: named_zone_t
validate: 'named-checkzone -d {{ bind_zone_name }} %s'
notify: restart bind
tags: bind
- name: Master | Create reverse lookup zone file
template:
src: reverse_zone.j2
dest: "{{ bind_chroot }}/{{ bind_zone_dir }}/{{ ('.'.join(item.replace(item+'.','').split('.')[::-1])) }}.in-addr.arpa"
owner: "{{ bind_owner }}"
group: "{{ bind_group }}"
mode: "0640"
setype: named_zone_t
validate: "named-checkzone {{ ('.'.join(item.replace(item+'.','').split('.')[::-1])) }}.in-addr.arpa %s"
with_items: "{{ bind_zone_networks }}"
notify: restart bind
tags: bind
- name: Master | Create reverse IPv6 lookup zone file
template:
src: reverse_zone_ipv6.j2
dest: "{{ bind_chroot }}/{{bind_zone_dir}}/{{ (item | ipaddr('revdns'))[-(9+(item|regex_replace('^.*/','')|int)//2):-1] }}"
owner: "{{ bind_owner }}"
group: "{{ bind_group }}"
mode: "0640"
setype: named_zone_t
#validate: "named-checkzone {{ (item | ipaddr('revdns'))[-(9+(item|regex_replace('^.*/','')|int)//2):] }} %s"
with_items: "{{ bind_zone_ipv6_networks }}"
#notify: restart bind
tags: bind

View File

@ -0,0 +1,16 @@
# roles/bind/tasks/master.yml
# Set up a BIND slave server
---
- name: Slave | Main BIND config file (slave)
template:
src: slave_etc_named.conf.j2
dest: "{{ bind_config }}"
owner: "{{ bind_owner }}"
group: "{{ bind_group }}"
mode: '0640'
setype: named_conf_t
validate: 'named-checkconf -t / %s'
notify: restart bind
tags: bind

View File

@ -0,0 +1,116 @@
{#
# First create a dict holding the entire zone information and create a hash
# from it, that it can be compared with subsequent task executions. In this
# way the serial will only be updated if there are some content changes.
#}
{% set _zone_data = {} %}
{% set _ = _zone_data.update({'ttl': bind_zone_ttl}) %}
{% set _ = _zone_data.update({'domain': bind_zone_name }) %}
{% set _ = _zone_data.update({'mname': bind_zone_name_servers}) %}
{% set _ = _zone_data.update({'aname': bind_other_name_servers}) %}
{% set _ = _zone_data.update({'mail': bind_zone_mail_servers}) %}
{% set _ = _zone_data.update({'rname': (( bind_zone_hostmaster_email)) + ('' if (bind_zone_hostmaster_email | search('\.')) else ('.' + _zone_data['domain']))}) %}
{% set _ = _zone_data.update({'refresh': bind_zone_time_to_refresh}) %}
{% set _ = _zone_data.update({'retry': bind_zone_time_to_retry}) %}
{% set _ = _zone_data.update({'expire': bind_zone_time_to_expire}) %}
{% set _ = _zone_data.update({'minimum': bind_zone_minimum_ttl}) %}
{% set _ = _zone_data.update({'hosts': bind_zone_hosts }) %}
{% set _ = _zone_data.update({'delegate': bind_zone_delegate }) %}
{% set _ = _zone_data.update({'services': bind_zone_services }) %}
{% set _ = _zone_data.update({'text': bind_zone_text }) %}
{#
# Compare the zone file hash with the current zone data hash and set serial
# accordingly
#}
{% set _zone = {'hash': _zone_data | string | hash('md5')} %}
{% set _hash_serial = forward_hashes.stdout.split(' ')[2:] %}
{% if _hash_serial and _hash_serial[0] == _zone['hash'] %}
{% set _ = _zone.update({'serial': _hash_serial[1]}) %}
{% else %}
{% set _ = _zone.update({'serial': timestamp.stdout}) %}
{% endif %}
{#
# Eventually output the zone data
#}
; Hash: {{ _zone['hash'] }} {{ _zone['serial'] }}
; Zone file for {{ _zone_data['domain'] }}
; {{ ansible_managed }}
$ORIGIN {{ _zone_data['domain'] }}.
$TTL {{ _zone_data['ttl'] }}
{% if _zone_data['mname']|length > 0 %}
@ IN SOA {{ _zone_data['mname']|first }}.{{ _zone_data['domain'] }}. {{ _zone_data['rname'] }}. (
{% else %}
@ IN SOA {{ ansible_hostname }}.{{ _zone_data['domain'] }}. {{ _zone_data['rname'] }}. (
{% endif %}
{{ _zone['serial'] }}
{{ _zone_data['refresh'] }}
{{ _zone_data['retry'] }}
{{ _zone_data['expire'] }}
{{ _zone_data['minimum'] }} )
{% if _zone_data['mname']|length > 0 %}
{% for ns in _zone_data['mname'] %}
IN NS {{ ns }}.{{ _zone_data['domain'] }}.
{% endfor %}
{% else %}
IN NS {{ ansible_hostname }}.{{ _zone_data['domain'] }}.
{% endif %}
{% for ns in _zone_data['aname'] %}
IN NS {{ ns }}.
{% endfor %}
{% for mail in _zone_data['mail'] %}
{% if loop.first %}@{% else %} {% endif %} IN MX {{ mail.preference}} {{ mail.name }}{% if not mail.name.endswith('.') %}.{{ _zone_data['domain'] }}.{% endif %}
{% endfor %}
{% if _zone_data['delegate']|length > 0 %}
{% for host in _zone_data['delegate'] %}
{{ host.zone.ljust(20) }} IN NS {{ host.dns }}
{% endfor %}
{% endif %}
{% if _zone_data['hosts']|length > 0 %}
{% for host in _zone_data['hosts'] %}
{% if host.ip is defined %}
{% if host.ip is string %}
{{ host.name.ljust(20) }} IN A {{ host.ip }}
{% else %}
{% for ip in host.ip %}
{{ host.name.ljust(20) }} IN A {{ ip }}
{% endfor %}
{% endif %}
{% endif %}
{% if host.ipv6 is defined %}
{% if host.ipv6 is string %}
{{ host.name.ljust(20) }} IN AAAA {{ host.ipv6 }}
{% else %}
{% for ip6 in host.ipv6 %}
{{ host.name.ljust(20) }} IN AAAA {{ ip6 }}
{% endfor %}
{% endif %}
{% endif %}
{% if host.aliases is defined %}
{% for alias in host.aliases %}
{{ alias.ljust(20) }} IN CNAME {{ host.name }}
{% endfor %}
{% endif %}
{% endfor %}
{% else %}
{{ ansible_hostname.ljust(20) }} IN A {{ ansible_default_ipv4.address }}
{% endif %}
{% for service in _zone_data['services'] %}
{{ service.name.ljust(20) }} IN SRV {{ service.priority|default('0') }} {{ service.weight|default('0') }} {{ service.port }} {{ service.target }}
{% endfor %}
{% for text in _zone_data['text'] %}
{% if text.text is string %}
{{ text.name.ljust(20) }} IN TXT "{{ text.text }}"
{% else %}
{% for entry in text.text %}
{{ text.name.ljust(20) }} IN TXT "{{ entry }}"
{% endfor %}
{% endif %}
{% endfor %}
{# vim: ft=text
#}

View File

@ -0,0 +1,112 @@
//
// named.conf
//
// {{ ansible_managed }}
//
{% for file in bind_default_zone_files %}
include "{{ file }}";
{% endfor %}
{% for acl in bind_acls %}
acl "{{ acl.name }}" {
{% for match in acl.match_list %}
{{ match }};
{% endfor %}
};
{% endfor %}
options {
listen-on port 53 { {{ bind_listen_ipv4|join(';') }}; };
listen-on-v6 port 53 { {{ bind_listen_ipv6|join(';') }}; };
directory "/etc/bind/zone";
pid-file "";
dump-file "/var/run/named_dump.db";
statistics-file "/var/run/named.stats";
recursing-file "/var/run/recursing";
// directory "{{ bind_zone_dir }}";
// dump-file "/var/run/named_dump.db";
// statistics-file "/var/run/named.stats";
// memstatistics-file "/var/run/named_mem.stats";
allow-query { {{ bind_allow_query|join(';') }}; };
{% if bind_acls|length != 0 %}
allow-transfer { {% for acl in bind_acls %}"{{ acl.name }}"; {% endfor %}};
{% endif %}
{% if bind_check_names is defined %}
bind-check-names {{ check-names }};
{% endif %}
recursion {% if bind_recursion %}yes{% else %}no{% endif %};
{% if bind_recursion %}
allow-recursion { {{ bind_allow_recursion|join('; ') }}; };
{% endif %}
{% if bind_forwarders|length > 0 %}forwarders { {{ bind_forwarders|join('; ') }}; };{% endif %}
{% if bind_forward_only %}forward only;{% endif %}
rrset-order { order {{ bind_rrset_order }}; };
dnssec-enable {{ bind_dnssec_enable }};
dnssec-validation {{ bind_dnssec_validation }};
dnssec-lookaside auto;
/* Path to ISC DLV key */
// bindkeys-file "/etc/named.iscdlv.key";
// managed-keys-directory "{{ bind_dir }}/dynamic";
// pid-file "";
// session-keyfile "/run/named/session.key";
};
logging {
/*
* ALT Linux: if enabled, this fills log uselessly
*/
category lame-servers {null;};
};
/*
logging {
channel default_debug {
file "{{ bind_log }}";
severity dynamic;
print-time yes;
};
};
*/
zone "{{ bind_zone_name }}" IN {
type master;
file "{{ bind_zone_name }}";
notify yes;
{% if bind_zone_also_notify|length > 0 %}
also-notify { {{ bind_zone_also_notify|join(';') }}; };
{% endif %}
allow-update { {{ bind_allow_update|join(';') }}; };
};
{% if bind_zone_networks is defined %}
{% for network in bind_zone_networks %}
zone "{{ ('.'.join(network.replace(network+'.','').split('.')[::-1])) }}.in-addr.arpa" IN {
type master;
file "{{ ('.'.join(network.replace(network+'.','').split('.')[::-1])) }}.in-addr.arpa";
notify yes;
{% if bind_zone_also_notify|length > 0 %}
also-notify { {{ bind_zone_also_notify|join(';') }}; };
{% endif %}
allow-update { {{ bind_allow_update|join(';') }}; };
};
{% endfor %}
{% endif %}
{% if bind_zone_ipv6_networks is defined %}
{% for network in bind_zone_ipv6_networks %}
zone "{{ (network | ipaddr('revdns'))[-(9+(network|regex_replace('^.*/','')|int)//2):] }}" IN {
type master;
file "{{ (network | ipaddr('revdns'))[-(9+(network|regex_replace('^.*/','')|int)//2):-1] }}";
notify yes;
{% if bind_zone_also_notify|length > 0 %}
also-notify { {{ bind_zone_also_notify|join(';') }}; };
{% endif %}
allow-update { {{ bind_allow_update|join(';') }}; };
};
{% endfor %}
{% endif %}

View File

@ -0,0 +1,96 @@
{#
# First create a dict holding the entire zone information and create a hash
# from it, that it can be compared with subsequent task executions. In this
# way the serial will only be updated if there are some content changes.
#}
{% set _zone_data = {} %}
{% set _ = _zone_data.update({'ttl': bind_zone_ttl}) %}
{% set _ = _zone_data.update({'domain': bind_zone_name }) %}
{% set _ = _zone_data.update({'mname': bind_zone_name_servers}) %}
{% set _ = _zone_data.update({'aname': bind_other_name_servers}) %}
{% set _ = _zone_data.update({'rname': (( bind_zone_hostmaster_email)) + ('' if (bind_zone_hostmaster_email | search('\.')) else ('.' + _zone_data['domain']))}) %}
{% set _ = _zone_data.update({'refresh': bind_zone_time_to_refresh}) %}
{% set _ = _zone_data.update({'retry': bind_zone_time_to_retry}) %}
{% set _ = _zone_data.update({'expire': bind_zone_time_to_expire}) %}
{% set _ = _zone_data.update({'minimum': bind_zone_minimum_ttl}) %}
{% set _ = _zone_data.update({'hosts': bind_zone_hosts | selectattr('ip', 'defined') | selectattr('ip', 'string') | selectattr('ip', 'search', '^'+item) | list }) %}
{% set _ = _zone_data.update({'revip': ('.'.join(item.replace(item+'.','').split('.')[::-1])) }) %}
{#
# Compare the zone file hash with the current zone data hash and set serial
# accordingly
#}
{% set _zone = {'hash': _zone_data | string | hash('md5')} %}
{% for _result in reverse_hashes.results %}
{% if (item in _result.item ) %}
{% set _hash_serial = _result.stdout.split(' ')[2:] %}
{% if _hash_serial and _hash_serial[0] == _zone['hash'] %}
{% set _ = _zone.update({'serial': _hash_serial[1]}) %}
{% else %}
{% set _ = _zone.update({'serial': timestamp.stdout}) %}
{% endif %}
{% endif %}
{% endfor %}
{#
# Eventually output the zone data
#}
; Hash: {{ _zone['hash'] }} {{ _zone['serial'] }}
; Reverse zone file for {{ _zone_data['domain'] }}
; {{ ansible_managed }}
; vi: ft=bindzone
$TTL {{ _zone_data['ttl'] }}
$ORIGIN {{ ('.'.join(item.replace(item+'.','').split('.')[::-1])) }}.in-addr.arpa.
{% if _zone_data['mname']|length > 0 %}
@ IN SOA {{ _zone_data['mname']|first }}.{{ _zone_data['domain'] }}. {{ _zone_data['rname'] }}. (
{% else %}
@ IN SOA {{ ansible_hostname }}.{{ _zone_data['domain'] }}. {{ _zone_data['rname'] }}. (
{% endif %}
{{ _zone['serial'] }}
{{ _zone_data['refresh'] }}
{{ _zone_data['retry'] }}
{{ _zone_data['expire'] }}
{{ _zone_data['minimum'] }} )
{% if _zone_data['mname']|length > 0 %}
{% for ns in _zone_data['mname'] %}
IN NS {{ ns }}.{{ _zone_data['domain'] }}.
{% endfor %}
{% else %}
IN NS {{ ansible_hostname }}.{{ _zone_data['domain'] }}.
{% endif %}
{% for ns in _zone_data['aname'] %}
IN NS {{ ns }}.
{% endfor %}
{% if _zone_data['hosts']|length > 0 %}
{% for host in _zone_data['hosts'] %}
{% if host.ip is defined %}
{% if host.ip == item %}
@ IN PTR {{ host.name }}.{{ _zone_data['domain'] }}.
{% else %}
{% if host.ip is string and host.ip.startswith(item) %}
{% if host.name == '@' %}
{{ ('.'.join(host.ip.replace(item+'.','').split('.')[::-1])).ljust(16) }} IN PTR {{ _zone_data['domain'] }}.
{% else %}
{{ ('.'.join(host.ip.replace(item+'.','').split('.')[::-1])).ljust(16) }} IN PTR {{ host.name }}.{{ _zone_data['domain'] }}.
{% endif %}
{% else %}
{% for ip in host.ip %}
{% if ip.startswith(item) %}
{{ ('.'.join(ip.replace(item+'.','').split('.')[::-1])).ljust(16) }} IN PTR {{ _zone_data['domain'] }}.
{% if host.name == '@' %}
{% else %}
{{ ('.'.join(ip.replace(item+'.','').split('.')[::-1])).ljust(16) }} IN PTR {{ host.name }}.{{ _zone_data['domain'] }}.
{% endif %}
{% endif %}
{% endfor %}
{% endif %}
{% endif %}
{% endif %}
{% endfor %}
{% else %}
{{ ('.'.join(ansible_default_ipv4.address.replace(item+'.','').split('.')[::-1])).ljust(16) }} IN PTR {{ ansible_hostname }}.{{ _zone_data['domain'] }}.
{% endif %}
{# vim: ft=text
#}

View File

@ -0,0 +1,96 @@
{#
# First create a dict holding the entire zone information and create a hash
# from it, that it can be compared with subsequent task executions. In this
# way the serial will only be updated if there are some content changes.
#}
{% set _zone_data = {} %}
{% set _ = _zone_data.update({'ttl': bind_zone_ttl}) %}
{% set _ = _zone_data.update({'domain': bind_zone_name }) %}
{% set _ = _zone_data.update({'mname': bind_zone_name_servers}) %}
{% set _ = _zone_data.update({'aname': bind_other_name_servers}) %}
{% set _ = _zone_data.update({'rname': (( bind_zone_hostmaster_email)) + ('' if (bind_zone_hostmaster_email | search('\.')) else ('.' + _zone_data['domain']))}) %}
{% set _ = _zone_data.update({'refresh': bind_zone_time_to_refresh}) %}
{% set _ = _zone_data.update({'retry': bind_zone_time_to_retry}) %}
{% set _ = _zone_data.update({'expire': bind_zone_time_to_expire}) %}
{% set _ = _zone_data.update({'minimum': bind_zone_minimum_ttl}) %}
{% set _ = _zone_data.update({'hosts': bind_zone_hosts | selectattr('ipv6','defined') | selectattr('ipv6', 'search', '^'+item|regex_replace('/.*$','')) | list }) %}
{% set _ = _zone_data.update({'revip': (item | ipaddr('revdns'))[-(9+(item|regex_replace('^.*/','')|int)//2):] }) %}
{#
# Compare the zone file hash with the current zone data hash and set serial
# accordingly
#}
{% set _zone = {'hash': _zone_data | string | hash('md5')} %}
{% for _result in reverse_hashes_ipv6.results %}
{% if (item in _result.item ) %}
{% set _hash_serial = _result.stdout.split(' ')[2:] %}
{% if _hash_serial and _hash_serial[0] == _zone['hash'] %}
{% set _ = _zone.update({'serial': _hash_serial[1]}) %}
{% else %}
{% set _ = _zone.update({'serial': timestamp.stdout}) %}
{% endif %}
{% endif %}
{% endfor %}
{#
# Eventually output the zone data
#}
; Hash: {{ _zone['hash'] }} {{ _zone['serial'] }}
; Reverse zone file for {{ _zone_data['domain'] }}
; {{ ansible_managed }}
; vi: ft=bindzone
$TTL {{ _zone_data['ttl'] }}
$ORIGIN {{ (item | ipaddr('revdns'))[-(9+(item|regex_replace('^.*/','')|int)//2):] }}
{% if _zone_data['mname']|length > 0 %}
@ IN SOA {{ _zone_data['mname']|first }}.{{ _zone_data['domain'] }}. {{ _zone_data['rname'] }}. (
{% else %}
@ IN SOA {{ ansible_hostname }}.{{ _zone_data['domain'] }}. {{ _zone_data['rname'] }}. (
{% endif %}
{{ _zone['serial'] }}
{{ _zone_data['refresh'] }}
{{ _zone_data['retry'] }}
{{ _zone_data['expire'] }}
{{ _zone_data['minimum'] }} )
{% if _zone_data['mname']|length > 0 %}
{% for ns in _zone_data['mname'] %}
IN NS {{ ns }}.{{ _zone_data['domain'] }}.
{% endfor %}
{% else %}
IN NS {{ ansible_hostname }}.{{ _zone_data['domain'] }}.
{% endif %}
{% for ns in _zone_data['aname'] %}
IN NS {{ ns }}.
{% endfor %}
{% if _zone_data['hosts']|length > 0 %}
{% for host in _zone_data['hosts'] %}
{% if host.ipv6 is defined %}
{% if host.ipv6 == item %}
@ IN PTR {{ host.name }}.{{ _zone_data['domain'] }}.
{% else %}
{% if host.ipv6 is string and host.ipv6.startswith(item|regex_replace('/.*$','')) %}
{% if host.name == '@' %}
{{ host.ipv6 | ipaddr('revdns') }} IN PTR {{ _zone_data['domain'] }}.
{% else %}
{{ host.ipv6 | ipaddr('revdns') }} IN PTR {{ host.name }}.{{ _zone_data['domain'] }}.
{% endif %}
{% else %}
{% for ip in host.ipv6 %}
{% if ip.startswith(item|regex_replace('/.*$','')) %}
{{ ip | ipaddr('revdns') }} IN PTR {{ _zone_data['domain'] }}.
{% if host.name == '@' %}
{% else %}
{{ ip | ipaddr('revdns') }} IN PTR {{ host.name }}.{{ _zone_data['domain'] }}.
{% endif %}
{% endif %}
{% endfor %}
{% endif %}
{% endif %}
{% endif %}
{% endfor %}
{% else %}
{{ ansible_default_ipv6.address | ipaddr('revdns') }} IN PTR {{ ansible_hostname }}.{{ _zone_data['domain'] }}.
{% endif %}
{# vim: ft=text
#}

View File

@ -0,0 +1,94 @@
//
// named.conf
//
// {{ ansible_managed }}
//
{% for file in bind_default_zone_files %}
include "{{ file }}";
{% endfor %}
{% for acl in bind_acls %}
acl "{{ acl.name }}" {
{% for match in acl.match_list %}
{{ match }};
{% endfor %}
};
{% endfor %}
options {
listen-on port 53 { {{ bind_listen_ipv4|join(';') }}; };
listen-on-v6 port 53 { {{ bind_listen_ipv6|join(';') }}; };
directory "/etc/bind/zone";
pid-file "";
dump-file "/var/run/named_dump.db";
statistics-file "/var/run/named.stats";
recursing-file "/var/run/recursing";
// directory "{{ bind_zone_dir }}";
// dump-file "/var/run/named_dump.db";
// statistics-file "/var/run/named.stats";
// memstatistics-file "/var/run/named_mem.stats";
allow-query { {{ bind_allow_query|join(';') }}; };
{% if bind_acls|length != 0 %}
allow-transfer { {% for acl in bind_acls %}"{{ acl.name }}"; {% endfor %}};
{% endif %}
{% if bind_check_names is defined %}
bind-check-names {{ check-names }};
{% endif %}
recursion {% if bind_recursion %}yes{% else %}no{% endif %};
{% if bind_recursion %}
allow-recursion { {{ bind_allow_recursion|join('; ') }}; };
{% endif %}
{% if bind_forwarders|length > 0 %}forwarders { {{ bind_forwarders|join('; ') }}; };{% endif %}
{% if bind_forward_only %}forward only;{% endif %}
rrset-order { order {{ bind_rrset_order }}; };
dnssec-enable {{ bind_dnssec_enable }};
dnssec-validation {{ bind_dnssec_validation }};
dnssec-lookaside auto;
/* Path to ISC DLV key */
// bindkeys-file "/etc/named.iscdlv.key";
// managed-keys-directory "{{ bind_dir }}/dynamic";
// pid-file "";
// session-keyfile "/run/named/session.key";
};
logging {
/*
* ALT Linux: if enabled, this fills log uselessly
*/
category lame-servers {null;};
};
zone "{{ bind_zone_name }}" IN {
type slave;
masters { {{ bind_zone_master_server_ip }}; };
file "slaves/{{ bind_zone_name }}";
};
{% if bind_zone_networks is defined %}
{% for network in bind_zone_networks %}
zone "{{ ('.'.join(network.replace(network+'.','').split('.')[::-1])) }}.in-addr.arpa" IN {
type slave;
masters { {{ bind_zone_master_server_ip }}; };
file "slaves/{{ ('.'.join(network.replace(network+'.','').split('.')[::-1])) }}.in-addr.arpa";
};
{% endfor %}
{% endif %}
{% if bind_zone_ipv6_networks is defined %}
{% for network in bind_zone_ipv6_networks %}
zone "{{ (network | ipaddr('revdns'))[-(9+(network|regex_replace('^.*/','')|int)//2):] }}" IN {
type slave;
masters { {{ bind_zone_master_server_ip }}; };
file "slaves/{{ (network | ipaddr('revdns'))[-(9+(network|regex_replace('^.*/','')|int)//2):-1] }}";
};
{% endfor %}
{% endif %}

View File

@ -0,0 +1,22 @@
# roles/bind/vars/Debian.yml
---
bind_packages:
- python-netaddr
- bind9
- bind9utils
bind_service: bind9
# Main config file
bind_config: /etc/bind/named.conf
# Localhost zone
bind_default_zone_files:
- /etc/bind/named.conf.default-zones
# Directory with run-time stuff
bind_dir: /var/cache/bind
bind_owner: root
bind_group: bind

View File

@ -0,0 +1,23 @@
# roles/bind/vars/RedHat.yml
---
bind_packages:
- python-netaddr
- bind
- bind-utils
bind_service: named
# Main config file
bind_config: /etc/named.conf
# Zone files included in the installation
bind_default_zone_files:
- /etc/named.root.key
- /etc/named.rfc1912.zones
# Directory with run-time stuff
bind_dir: /var/named
bind_owner: root
bind_group: named

15
roles/bind/tasks/main.yml Normal file
View File

@ -0,0 +1,15 @@
---
- name: prepare config for role
block:
- include_vars:
file: "conf/stacks/{{stack_name}}/apps/bind.yml"
name: bind
- set_fact: {'bind_{{item.key}}': '{{item.value}}'}
with_dict: "{{bind}}"
- set_fact:
ansible_distribution: "ALT"
ansible_os_family: "ALT"
- name: execute actual role
import_role: name=bind-role

View File

@ -0,0 +1,5 @@
---
- name: restart network
service:
name: network
state: restarted

View File

@ -0,0 +1,30 @@
---
- name: "{{nic.key}} | set default route"
shell: egrep -q "^default via {{item|ipaddr('address')}}$" /etc/net/ifaces/"{{nic.key}}"/ipv4route || { alterator-cmdline -d /net-eth action write name "{{nic.key}}" ipv 4 configuration "static" default "{{item|ipaddr('address')}}" ipv_enabled "#t"; echo default_changed; }
register: result
changed_when: '"default_changed" in result.stdout'
notify: restart network
with_items: "{{nic.value.default}}"
when: nic.value.default is defined
- name: "{{nic.key}} | set nameservers"
shell: awk '($1 ~ /nameserver/){ list=list" "$2; }END{print list;}' /etc/net/ifaces/"{{nic.key}}"/resolv.conf | egrep -q "^{{nic.value.nameservers|join(' ')}}$" || { alterator-cmdline -d /net-eth action write name "{{nic.key}}" ipv 4 configuration "static" dns "{{nic.value.nameservers|join(' ')}}" ipv_enabled "#t"; echo dns_changed; }
register: result
changed_when: '"dns_changed" in result.stdout'
notify: restart network
when: nic.value.nameservers is defined
- name: "{{nic.key}} | set search domains"
shell: egrep -q "^search {{nic.value.search|join(' ')}}$" /etc/net/ifaces/"{{nic.key}}"/resolv.conf || { alterator-cmdline -d /net-eth action write name "{{nic.key}}" ipv 4 configuration "static" search "{{nic.value.search|join(' ')}}" ipv_enabled "#t"; echo search_changed; }
register: result
changed_when: '"search_changed" in result.stdout'
notify: restart network
when: nic.value.search is defined
- name: "{{nic.key}} | set IP addresses (async)"
shell: egrep -q "^{{item|ipaddr('address')}}/{{item|ipaddr('prefix')}}$" /etc/net/ifaces/"{{nic.key}}"/ipv4address || { alterator-cmdline -d /net-eth action add_iface_address name "{{nic.key}}" addip "{{item|ipaddr('address')}}" addmask "{{item|ipaddr('prefix')}}" ipv 4 configuration "static" ipv_enabled "#t"; echo addr_changed; }
register: result
changed_when: result.stdout is defined and '"addr_changed" in result.stdout'
notify: restart network
with_items: "{{nic.value.ipv4}}"
when: nic.value.ipv4 is defined

139
roles/common/tasks/main.yml Normal file
View File

@ -0,0 +1,139 @@
---
- set_fact: ansible_pkg_mgr=apt_rpm
- set_fact: node="{{inventory_hostname_short}}"
- set_fact: node="{{stack.nodes[inventory_hostname_short]}}"
- name: wait nodes for connection
wait_for_connection:
delay: 3
timeout: 60
- name: disable apt-cache in tmpfs
block:
- lineinfile:
path: /etc/rc.d/rc.local
regexp: '^/etc/rc\.d/scripts/apt-cache-on-tmp.*'
state: absent
backrefs: yes
- file:
path: /etc/apt/apt.conf.d/tmp.cache.conf
state: absent
- name: purge apt.sources
shell: find /etc/apt/sources.list.d/ -name '*.list' -delete
changed_when: false
- name: set apt sources
template:
src: "apt.list.j2"
dest: "/etc/apt/sources.list.d/{{item}}.list"
with_items: "{{env.apt.sources}}"
- name: update packages list
shell: apt-get update
- name: include additional apt repos from tasks
template:
src: "apt.list.tasks.j2"
dest: "/etc/apt/sources.list.d/tasks.list"
when: stack.apt_sources_from_tasks is defined
- name: allow apt downgrade packages
template:
src: "downgrade.j2"
dest: "/etc/apt/preferences.d/downgrade"
when: node.arch == 'e2k'
- name: install common packages
apt_rpm:
pkg: ntpdate,curl
state: installed
update_cache: yes
- name: install Alterator`s modules
apt_rpm:
pkg: alterator,alterator-net-eth
# pkg: alterator,alterator-alternatives,alterator-audit,alterator-bind,alterator-datetime,alterator-dhcp,alterator-groups,alterator-mirror,alterator-mkimage,alterator-net-bond,alterator-net-bridge,alterator-net-domain,alterator-net-eth,alterator-net-functions,alterator-net-iptables,alterator-net-routing,alterator-net-vlan,alterator-packages,alterator-services,alterator-sshd,alterator-sysconfig,alterator-users,alterator-vsftpd,alterator-xinetd
state: installed
# FIX: don't know how to switch from dhcp to static via alterator
# just remove this line from options file for now
- name: remove options for eth0
lineinfile:
dest: /etc/net/ifaces/eth0/options
regexp: '^BOOTPROTO=dhcp.*'
backrefs: yes
state: absent
- name: disable IPv6
sysctl: name={{ item }} value=1 state=present
with_items:
- net.ipv6.conf.all.disable_ipv6
- net.ipv6.conf.default.disable_ipv6
- net.ipv6.conf.lo.disable_ipv6
- name: blacklisting IPv6 module
template:
src: ipv6-disable.conf.j2
dest: /etc/modprobe.d/options-local.conf
- name: configure NICs
include_tasks: configure_nics.yml
with_dict: "{{node.net}}"
loop_control:
loop_var: nic
# when: node.net is defined
- name: apply NICs config
command: alterator-cmdline /net-eth action write commit "#t"
changed_when: false
async: 100
poll: 0
- name: update .tmp/ssh_config after NICs reconfiguration
include_role: name="inventory"
- name: wait for the host`s NIC to come back
wait_for_connection:
delay: 2
timeout: 300
# TODO: need to make it idempotent!
- name: set hostname
shell: '[[ $(hostname -s) == "{{inventory_hostname_short}}" ]] || { alterator-cmdline -d /net-eth action write computer_name "{{inventory_hostname_short}}" commit "#t"; echo changed; }'
register: result
notify: restart network
changed_when: '"changed" in result.stdout'
- name: set domainname
# shell: '[[ $(domainname) == "{{stack.domain}}" ]] || { alterator-cmdline -d /net-domain action write domain "{{stack.domain}}" domain_type "dns" commit "#t"; echo changed; }'
shell: '[[ $(domainname) == "{{stack.domain}}" ]] || { domainname "{{stack.domain}}"; echo changed; }'
register: result
notify: restart network
changed_when: '"changed" in result.stdout'
- name: delete .bash_logout
file:
path: /root/.bash_logout
state: absent
- meta: flush_handlers
- setup:
- name: register users keys
template:
dest: /etc/openssh/authorized_keys/root
src: authorized_keys
owner: root
group: root
mode: 0644
- name: mount nfs shares
mount:
src: "{{item.from}}"
path: "{{item.to}}"
fstype: "{{item.fstype}}"
state: mounted
with_items: "{{node.mounts}}"
when: node.mounts is defined

View File

@ -0,0 +1,14 @@
# {{ansible_managed}}
{% if node.platform is defined %}
rpm [{{node.platform}}] {{env.apt.sources[item].url}} {{node.platform}}/branch/{{node.arch}} classic
rpm [{{node.platform}}] {{env.apt.sources[item].url}} {{node.platform}}/branch/noarch classic
{% else %}
rpm http://ftp.altlinux.org/pub/distributions/ALTLinux/Sisyphus x86_64 classic
rpm http://ftp.altlinux.org/pub/distributions/ALTLinux/Sisyphus noarch classic
{% if node.arch == 'e2k' %}
rpm {{env.apt.sources[item].url}}/e2k/deb e2k alien
rpm {{env.apt.sources[item].url}}/e2k e2k hasher
rpm {{env.apt.sources[item].url}}/e2k/misc noarch hasher
rpm {{env.apt.sources[item].url}}/e2k/lcc-1.21.24 e2k alien
{% endif %}
{% endif %}

View File

@ -0,0 +1,10 @@
# {{ansible_managed}}
{% for t in stack.apt_sources_from_tasks %}
{% if t is mapping %}
{% if inventory_hostname_short in t.binds %}
rpm http://git.altlinux.org repo/{{t.id}}/{{node.arch}} task
{% endif %}
{% else %}
rpm http://git.altlinux.org repo/{{t}}/{{node.arch}} task
{% endif %}
{% endfor %}

View File

@ -0,0 +1,4 @@
{% for u, v in env.users.items() %}
# {{u}}
{{v['ssh-key']}}
{% endfor %}

View File

@ -0,0 +1,3 @@
Package: *
Pin: release c=hasher
Pin-Priority: 1001

View File

@ -0,0 +1 @@
options ipv6 disable=1

View File

@ -0,0 +1,2 @@
---
- name:

View File

@ -0,0 +1,5 @@
---
- name: restart dhcpd
service:
name: dhcpd
state: restarted

25
roles/dhcp/tasks/main.yml Normal file
View File

@ -0,0 +1,25 @@
---
- name: include dhcp variables
include_vars:
file: "conf/stacks/{{stack.name}}/apps/dhcp.yml"
name: dhcp
- name: install service package
apt_rpm:
pkg: dhcp-server
state: installed
tags: dhcp
- name: generate service config
template:
src=dhcpd.conf
dest=/etc/dhcp/dhcpd.conf
tags: dhcp
notify: restart dhcpd
- name: enable service
service:
name: dhcpd
enabled: yes
state: started
tags: dhcp

View File

@ -0,0 +1,51 @@
option domain-name "{{stack.domain}}";
option domain-name-servers {% for ns in stack.nameservers %}{{ns}}{% if not loop.last%},{%endif%}{%endfor%};
default-lease-time 600;
max-lease-time 7200;
ddns-update-style none;
authoritative;
class "vms" {
match if substring (option vendor-class-identifier, 0, 4) = "SUNW";
}
class "pxe" {
match if substring (option vendor-class-identifier, 0, 9) = "PXEClient";
}
class "ipph" {
match if (option vendor-class-identifier = "ccp.avaya.com"
or option vendor-class-identifier = "ATA186"
or substring (option vendor-class-identifier, 0, 28) = "Cisco Systems, Inc. IP Phone");
}
option option-66 code 66 = text;
{% for addr,net in dhcp.networks.iteritems() %}
{% if net.hosts is defined %}
{% for h in net.hosts %}
host {{net.vlan}}-{{h.name}} {
hardware ethernet {{h.mac}};
fixed-address {{h.ipv4}};
}
{% endfor %}
{% endif %}
subnet {{addr.split('/')[0]}} netmask {{addr|ipaddr('netmask')}} {
option routers {{net.default}};
option domain-name-servers {{net.nameservers|join(',')}};
{% if "bootp" in net.keys() %}
option option-66 "{{net.bootp.tftp}}/yealink/";
{% endif %}
{% if "pxe" in net.keys() %}
next-server {{net.pxe['next-server']}};
filename "{{net.pxe.filename}}";
{% endif %}
pool {
range {{net.range}};
}
}
{% endfor %}

View File

@ -0,0 +1,9 @@
---
pass_secrets:
- id: "ansible/vault-pass/{{ env_name }}"
dest: "{{ lookup('env', 'PWD') }}/.pass-{{ env_name }}"
comment: ansible vault password
- id: "infra/robot_key"
dest: "{{ lookup('env', 'PWD') }}/.tmp/robot_key"
comment: ssh key for the deployed nodes

View File

@ -0,0 +1,37 @@
---
# extract secret from the pass(1) storage
- name: "check if the secret {{ item.id }} exists"
stat:
path: "{{ lookup('env', 'PASSWORD_STORE_DIR') }}/{{ item.id }}.gpg"
get_checksum: false
get_mime: false
register: st_secret
- name: "check if the plaintext destination {{ item.dest }} exists"
stat:
path: "{{ item.dest }}"
get_checksum: false
get_mime: false
register: plaintext_st_secret
failed_when: false
- name: create a directory for the plaintext secret
file:
path: "{{ item.dest|dirname }}"
state: directory
mode: 0700
- set_fact:
secret_up2date: true
when: plaintext_st_secret.stat.exists|bool and plaintext_st_secret.stat.mtime > st_secret.stat.mtime
- name: "decrypt the secret {{ item.id }}"
shell: >
set -e &&
umask 0077 &&
pass {{ item.id }} > {{ item.dest }}.tmp &&
mv {{ item.dest }}.tmp {{ item.dest }}
when: not(secret_up2date|default('false')|bool)

View File

@ -0,0 +1,23 @@
- set_fact:
required_packages:
- python-module-jmespath
- python-module-netaddr
when: ansible_os_family == 'ALT'
# XXX: apt_rpm requires root privileges even if the packages are already installed
- name: check if required packages are installed
command: >
rpm -q {{ required_packages|join(" ") }}
register: required_packages_installed
failed_when: false
when: ansible_os_family == 'ALT'
- name: install required packages on ALT Linux
apt_rpm: pkg={{ item }} state=present
with_items: "{{ required_packages }}"
when: ansible_os_family == 'ALT' and required_packages_installed.rc != 0
become: true
- include: ./extract_secret.yml
with_items: "{{ pass_secrets }}"

View File

@ -0,0 +1,43 @@
---
- name: generate ssh_config for bootstrap
template: src=ssh_config-bootstrap dest=.tmp/ssh_config
delegate_to: localhost
when: bootstrap is defined
- name: generate ssh_config for bootstrap
template: src=ssh_config-bootstrap dest=".tmp/ssh_config-{{stack.name}}"
delegate_to: localhost
when: bootstrap is defined
- name: generate ssh_config
template: src=ssh_config dest=.tmp/ssh_config
delegate_to: localhost
when: bootstrap is not defined
- name: generate ssh_config
template: src=ssh_config dest=".tmp/ssh_config-{{stack.name}}"
delegate_to: localhost
when: bootstrap is not defined
- name: populate dynamic inventiry
add_host:
name: "{{tmp_node}}"
groups: "{{['stack'] + tmp_node|get_steps(stack.apps)}}"
ansible_ssh_host: "{{tmp_node}}.{{stack.domain}}"
ansible_ssh_port: 22
with_items: "{{stack.nodes}}"
loop_control:
loop_var: tmp_node
changed_when: false
- name: save invetory to file
template:
src: ansible_hosts.j2
dest: ".tmp/ansible_hosts"
delegate_to: localhost
- name: generate apps playbook
template:
src: apps.yml.j2
dest: "./{{env_name}}-{{stack_name}}-apps.yml"
delegate_to: localhost

View File

@ -0,0 +1,6 @@
{% for g in groups %}
[{{g}}]
{% for h in groups[g] %}
{{h}}
{% endfor %}
{% endfor %}

View File

@ -0,0 +1,14 @@
---
{% for s in stack.apps %}
- name: executing step{{loop.index-1}}
hosts: step{{loop.index-1}}
strategy: free
gather_facts: True
roles:
- {role: prepare-config}
- {role: apps}
vars:
- step: {{loop.index-1}}
tags: [ "apps" ]
{% endfor %}

View File

@ -0,0 +1,11 @@
{% for node_name, node in stack.nodes.iteritems() %}
Host {{node_name}}.{{stack.domain}} {{node_name}}
HostName {{node.net.eth0.ipv4[0]|ipaddr('address')}}
User root
UserKnownHostsFile /dev/null
StrictHostKeyChecking no
{% if node.net.eth0.ssh_proxy is defined %}
ProxyJump {{node.net.eth0.ssh_proxy}}
{% endif %}
{% endfor %}

View File

@ -0,0 +1,15 @@
{% for node_name, node in stack.nodes.iteritems() %}
Host {{node_name}}.{{stack.domain}} {{node_name}}
{% if node.nics is defined %}
HostName {{node.nics.eth0.addrs[0]|ipaddr('address')}}
{% else %}
HostName {{node.net.eth0.ipv4[0]|ipaddr('address')}}
{% endif %}
User root
UserKnownHostsFile /dev/null
StrictHostKeyChecking no
{% if node.net.eth0.ssh_proxy is defined %}
ProxyJump {{node.net.eth0.ssh_proxy}}
{% endif %}
{% endfor %}

86
roles/lxc/tasks/main.yml Normal file
View File

@ -0,0 +1,86 @@
---
- name: register LXC nodes
set_fact:
lxc_nodes: "{{stack.nodes | filter_dict(\"lambda x: x['type']=='lxc'\")}}"
- name: destroy containers
block:
- shell: if lxc-info -n "{{cont_name}}" | grep -q RUNNING ; then lxc-stop -t2 -n "{{cont_name}}"; else true; fi
delegate_to: "{{env['lxc'][stack['nodes'][tmp_node]['arch']]['hostname']}}"
remote_user: "{{env['lxc'][stack['nodes'][tmp_node]['arch']]['username']}}"
with_items: "{{lxc_nodes}}"
vars:
cont_name: "{{env_name}}-{{stack_name}}-{{tmp_node}}"
loop_control:
loop_var: tmp_node
- shell: rm -rf /mnt/osl/var/lib/lxc/{{cont_name}}
delegate_to: "{{env['lxc'][stack['nodes'][tmp_node]['arch']]['hostname']}}"
remote_user: "{{env['lxc'][stack['nodes'][tmp_node]['arch']]['username']}}"
with_items: "{{lxc_nodes}}"
vars:
cont_name: "{{env_name}}-{{stack_name}}-{{tmp_node}}"
loop_control:
loop_var: tmp_node
- shell: rm -rf "/var/lib/lxc/{{cont_name}}"
delegate_to: "{{env['lxc'][stack['nodes'][tmp_node]['arch']]['hostname']}}"
remote_user: "{{env['lxc'][stack['nodes'][tmp_node]['arch']]['username']}}"
with_items: "{{lxc_nodes}}"
vars:
cont_name: "{{env_name}}-{{stack_name}}-{{tmp_node}}"
loop_control:
loop_var: tmp_node
when: redeploy_all is defined and redeploy_all|bool == true
or destroy_all is defined and destroy_all|bool == true
#- meta: end_play
# when: destroy_all is defined and destroy_all|bool == true
- name: "create container config"
block:
- file:
path: "/var/lib/lxc/{{cont_name}}/"
state: directory
delegate_to: "{{env['lxc'][stack['nodes'][tmp_node]['arch']]['hostname']}}"
remote_user: "{{env['lxc'][stack['nodes'][tmp_node]['arch']]['username']}}"
with_items: "{{lxc_nodes}}"
vars:
cont_name: "{{env_name}}-{{stack_name}}-{{tmp_node}}"
loop_control:
loop_var: tmp_node
- template:
src: config.j2
dest: "/var/lib/lxc/{{cont_name}}/config"
delegate_to: "{{env['lxc'][stack['nodes'][tmp_node]['arch']]['hostname']}}"
remote_user: "{{env['lxc'][stack['nodes'][tmp_node]['arch']]['username']}}"
with_items: "{{lxc_nodes}}"
vars:
cont_name: "{{env_name}}-{{stack_name}}-{{tmp_node}}"
loop_control:
loop_var: tmp_node
- shell: mkdir -p /mnt/osl/var/lib/lxc/{{cont_name}}; [[ ! -d /mnt/osl/var/lib/lxc/{{cont_name}}/rootfs ]] && rsync -a /mnt/osl/var/lib/lxc/{{stack['nodes'][tmp_node]['template']}}/rootfs /mnt/osl/var/lib/lxc/{{cont_name}}/ || true
delegate_to: "{{env['lxc'][stack['nodes'][tmp_node]['arch']]['hostname']}}"
remote_user: "{{env['lxc'][stack['nodes'][tmp_node]['arch']]['username']}}"
with_items: "{{lxc_nodes}}"
vars:
cont_name: "{{env_name}}-{{stack_name}}-{{tmp_node}}"
loop_control:
loop_var: tmp_node
- lxc_container:
name: "{{cont_name}}"
state: started
container_command: |
apt-get update
apt-get install -y openssh-server python-modules python-modules-json python-module-distutils-extra python-module-setuptools
chkconfig sshd on
service sshd start
delegate_to: "{{env['lxc'][stack['nodes'][tmp_node]['arch']]['hostname']}}"
remote_user: "{{env['lxc'][stack['nodes'][tmp_node]['arch']]['username']}}"
with_items: "{{lxc_nodes}}"
vars:
cont_name: "{{env_name}}-{{stack_name}}-{{tmp_node}}"
loop_control:
loop_var: tmp_node
when: destroy_all is not defined or destroy_all|bool == false

View File

@ -0,0 +1,46 @@
lxc.rootfs = /mnt/osl/var/lib/lxc/{{cont_name}}/rootfs
lxc.utsname = {{cont_name}}
lxc.tty = 4
lxc.pts = 1024
#lxc.id_map = u 0 100000 65536
#lxc.id_map = g 0 100000 65536
lxc.cap.drop = sys_module mac_admin mac_override sys_time
# When using LXC with apparmor, uncomment the next line to run unconfined:
#lxc.aa_profile = unconfined
{% for k,v in stack['nodes'][tmp_node]['net'].iteritems() %}
#networking for {{k}}
lxc.network.type = veth
lxc.network.flags = up
lxc.network.link = {{v.bridge}}
lxc.network.name = {{k}}
lxc.network.mtu = 1500
{% for a in v.ipv4 %}
lxc.network.ipv4 = {{a}}
{% endfor %}
{% if v.default is defined %}
lxc.network.ipv4.gateway = {{v.default}}
{% endif %}
{% endfor %}
#cgroups
lxc.cgroup.devices.deny = a
# /dev/null and zero
lxc.cgroup.devices.allow = c 1:3 rwm
lxc.cgroup.devices.allow = c 1:5 rwm
# consoles
lxc.cgroup.devices.allow = c 5:1 rwm
lxc.cgroup.devices.allow = c 5:0 rwm
lxc.cgroup.devices.allow = c 4:0 rwm
lxc.cgroup.devices.allow = c 4:1 rwm
# /dev/{,u}random
lxc.cgroup.devices.allow = c 1:9 rwm
lxc.cgroup.devices.allow = c 1:8 rwm
lxc.cgroup.devices.allow = c 136:* rwm
lxc.cgroup.devices.allow = c 5:2 rwm
# rtc
lxc.cgroup.devices.allow = c 10:135 rwm
lxc.mount.auto = cgroup:mixed proc:mixed sys:mixed

View File

@ -0,0 +1,35 @@
---
- name: generate ansible vault password
set_fact:
vault_pass: "{{ lookup('password', '.tmp/example-env-pass length=6 chars=ascii_letters') }}"
- name: encrypt PVE password with Ansible Vault
command: ansible-vault encrypt_string --vault-id .tmp/example-env-pass '{{ pve_password }}' --name 'password'
register: res
- name: register encrypted password
set_fact:
pve_pass_encrypted: "{{ res.stdout }}"
- name: make directories
file:
path: "vars/conf/{{ item }}"
state: directory
with_items:
- envs
- stacks/example
- name: generate example environment
template:
src: env.yml
dest: vars/conf/envs/example.yml
- name: generate example stack
template:
src: stack.yml
dest: vars/conf/stacks/example/stack.yml
- name: generate example apps
template:
src: apps.yml
dest: vars/conf/stacks/example/apps.yml

View File

@ -0,0 +1,40 @@
---
samba_parameters: &samba_params
samba_realm: "domain.alt"
samba_domain: "domain"
samba_admin_pass: "myPass123$"
samba_dns_forward: 8.8.8.8
samba_dns_backend: SAMBA_INTERNAL
samba_master_hostname: dc0
samba_master_address: 10.64.6.1
samba_network: 10.64.6.0/24
apps:
- # step1
- name: samba
vars:
<<: *samba_params
samba_flavor: master
binds: [ "dc0" ]
- #step2
- name: samba
vars:
<<: *samba_params
samba_flavor: replica
binds: [ "dc1", "dc2" ]
- name: samba
vars:
<<: *samba_params
samba_flavor: client
binds: [ "cl0", "cl1" ]
- #step3
- name: samba
vars:
<<: *samba_params
samba_flavor: gen_test_env
samba_generate_domain_config: true
samba_domain_config_output: ".tmp/domain_config"
binds: [ "dc0" ]

View File

@ -0,0 +1,16 @@
---
apt:
sources:
altlinux:
enabled: yes
# url: "http://ftp.altlinux.org/pub/distributions/ALTLinux/"
url: "http://10.64.0.6"
pve:
api_url: "{{ pve_address }}"
username: "{{ pve_login }}"
{{ pve_pass_encrypted }}
storage: {{ pve_storage | default('local') }}
users:
robot:
ssh-key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOJSeRmDdbpn7axn0gJRRxmKXOygmpuR56Y3mP89nyIf robot@smb.basealt.ru"

View File

@ -0,0 +1,50 @@
---
name: {{ stack_name | default('example-samba') }}
domain: {{ domain_name | default('domain.alt') }}
x86_node: &x86_node
type: pve
arch: "x86_64"
platform: p8
cpus: 1
cores: 2
mem: 1024
net: &net
eth0: &eth0
bridge: {{ public_bridge | default('vmbr1') }}
tag: {{ public_vlan | default('994') }}
descr: "pub"
default: {{ public_gw | default('10.64.84.1') }}
nameservers: [ {{ public_dns | default('8.8.8.8') }} ]
eth1: &eth1
bridge: {{ private_bridge | default('vmbr1') }}
tag: {{ private_vlan | default('940') }}
descr: priv
search: [ {{ domain_name | default('domain.alt') }} ]
nodes:
dc:
<<: *x86_node
template: {{ server_template | default('alt-srv-82-x64') }}
count: 3
net:
<<: *net
eth0:
<<: *eth0
ipv4: [ {{ servers_public_ips | default('10.64.84.10/24') }} ]
eth1:
<<: *eth1
ipv4: [ {{ servers_private_ips | default('10.64.6.1/24') }} ]
cl:
<<: *x86_node
template: {{ desktop_template | default ('alt-workstation-82-x64') }}
count: 2
net:
<<: *net
eth0:
<<: *eth0
ipv4: [ {{ desktops_public_ips | default('10.64.84.100/24') }} ]
eth1:
<<: *eth1
ipv4: [ {{ desktops_private_ips | default('10.64.6.100/24') }} ]

View File

@ -0,0 +1,2 @@
---
- debug: msg="dummy role"

View File

@ -0,0 +1,24 @@
---
- block:
- name: "generate {{st.nodes[item].count}} nodes for {{item}}"
set_fact:
node: "{{ dict({item+n: st.nodes[item]|gen_nics_addrs(n)}) }}"
with_sequence: "start=0 end={{st.nodes[item].count-1}}"
loop_control:
loop_var: n
register: res
# when: st.nodes[item].provider != 'bare'
#- debug: msg="{{env}}"
#- set_fact:
#gen_nodes: "{{ dict({item: env['bare'][ st.nodes[item]['env_node'] ]}) }}"
#when: st.nodes[item].provider == 'bare'
#- debug: msg="{{gen_nodes}}"
- set_fact:
tmp_nodes: []
- set_fact:
gen_nodes: "{{ (res.results | map(attribute='ansible_facts.node') | list) + gen_nodes }}"
# when: st.nodes[item].provider != 'bare'

View File

@ -0,0 +1,29 @@
---
- name: include environment definition
include_vars:
file: "conf/envs/{{env_name}}.yml"
name: env
- set_fact:
env: "{{env}}"
- name: import config for stack
block:
- include_vars:
file: "conf/stacks/{{stack_name}}/apps.yml"
name: as
- include_vars:
file: "conf/stacks/{{stack_name}}/stack.yml"
name: st
- set_fact:
gen_nodes: []
- name: genrate nodes list
include_tasks: generate_nodes.yml
with_items: "{{st.nodes}}"
- set_fact:
stack: "{{ {}|combine( st | combine({'nodes': gen_nodes|list_to_dict}) )|combine(as) }}"
# stack: "{{ {}|combine( st )|combine({'apps': as}) }}"
#- debug: msg="{{stack}}"

38
roles/provision/README.md Normal file
View File

@ -0,0 +1,38 @@
Role Name
=========
A brief description of the role goes here.
Requirements
------------
Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
Role Variables
--------------
A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
Dependencies
------------
A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
Example Playbook
----------------
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
- hosts: servers
roles:
- { role: username.rolename, x: 42 }
License
-------
BSD
Author Information
------------------
An optional section for the role authors to include contact information, or a website (HTML is not allowed).

View File

@ -0,0 +1,7 @@
---
provision_supported_providers:
- bare
- pve
- lxc
provision_nodes: []

View File

@ -0,0 +1,2 @@
---
# handlers file for provision

View File

@ -0,0 +1,57 @@
galaxy_info:
author: your name
description: your description
company: your company (optional)
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: http://example.com/issue/tracker
# Some suggested licenses:
# - BSD (default)
# - MIT
# - GPLv2
# - GPLv3
# - Apache
# - CC-BY
license: license (GPLv2, CC-BY, etc)
min_ansible_version: 1.2
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
# Optionally specify the branch Galaxy will use when accessing the GitHub
# repo for this role. During role install, if no tags are available,
# Galaxy will use this branch. During import Galaxy will access files on
# this branch. If Travis integration is configured, only notifications for this
# branch will be accepted. Otherwise, in all cases, the repo's default branch
# (usually master) will be used.
#github_branch:
#
# platforms is a list of platforms, and each platform has a name and a list of versions.
#
# platforms:
# - name: Fedora
# versions:
# - all
# - 25
# - name: SomePlatform
# versions:
# - all
# - 1.0
# - 7
# - 99.99
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies: []
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
# if you add dependencies to this list.

View File

@ -0,0 +1,3 @@
---
- name: "provision bare node"
debug: msg="[dummy] {{ node.provider }} node {{ node.name }}"

View File

@ -0,0 +1,11 @@
---
- name: validate nodes providers
fail: msg="provider '{{item.provider}}' is not supported"
when: item.provider not in provision_supported_providers
with_items: "{{provision_nodes}}"
- name: provision nodes
include_tasks: "{{item.provider}}.yml"
vars:
node: "{{item}}"
with_items: "{{provision_nodes}}"

View File

@ -0,0 +1,2 @@
localhost

View File

@ -0,0 +1,5 @@
---
- hosts: localhost
remote_user: root
roles:
- provision

View File

@ -0,0 +1,2 @@
---
# vars file for provision

View File

@ -0,0 +1,90 @@
---
- set_fact: node_name="{{tmp_node}}"
- set_fact: vm_name="{{node_name}}.{{stack.name}}"
- name: "[{{vm_name}}] clone VM"
proxmox_kvm:
node: srv
api_user: "{{env.pve.username}}"
api_password: "{{env.pve.password}}"
api_host: "{{env.pve.api_url}}"
name: "{{vm_name}}"
pool: "{{env_name}}"
clone: "{{stack.nodes[node_name].template}}"
full: no
# storage: "{{env.pve.storage}}"
timeout: 90
# state: present
- name: "[{{vm_name}}] get VM state"
proxmox_kvm:
node: srv
api_user: "{{env.pve.username}}"
api_password: "{{env.pve.password}}"
api_host: "{{env.pve.api_url}}"
name: "{{vm_name}}"
agent: yes
state: current
register: vm_status
until: vm_status is succeeded
retries: 30
delay: 1
ignore_errors: yes
- name: "[{{vm_name}}] configure VM"
proxmox_kvm:
node: srv
api_user: "{{env.pve.username}}"
api_password: "{{env.pve.password}}"
api_host: "{{env.pve.api_url}}"
name: "{{vm_name}}"
agent: yes
cpu: host
kvm: yes
cores: "{{stack.nodes[node_name].cores}}"
memory: "{{stack.nodes[node_name].mem}}"
# storage: "{{env.pve.storage}}"
net: "{{stack.nodes[node_name].net | to_proxmox_net}}"
update: yes
state: present
register: vm_status
until: vm_status is succeeded
retries: 30
delay: 1
ignore_errors: yes
- name: "[{{vm_name}}] start VM"
proxmox_kvm:
node: srv
api_user: "{{env.pve.username}}"
api_password: "{{env.pve.password}}"
api_host: "{{env.pve.api_url}}"
name: "{{vm_name}}"
state: started
- name: "[{{vm_name}}] wait for qemu-agent return addresses list"
proxmox_qemu_agent:
api_user: "{{env.pve.username}}"
api_password: "{{env.pve.password}}"
api_host: "{{env.pve.api_url}}"
name: "{{vm_name}}"
command: "network-get-interfaces"
register: res
until: res.results | json_query('[] | [?name!=`lo`]."ip-addresses" | [] | [?"ip-address-type"==`ipv4`] | []."ip-address"') != None
retries: 30
delay: 1
- name: "[{{vm_name}}] wait for any IP on any VM's interface"
proxmox_qemu_agent:
api_user: "{{env.pve.username}}"
api_password: "{{env.pve.password}}"
api_host: "{{env.pve.api_url}}"
name: "{{vm_name}}"
command: "network-get-interfaces"
register: res
until: res.results | json_query('[] | [?name!=`lo`]."ip-addresses" | [] | [?"ip-address-type"==`ipv4`] | []."ip-address"') | length > 0
retries: 30
delay: 1
- name: "[{{vm_name}}] register VM's interfaces info"
set_fact: stack="{{stack|dict_inject("/nodes/"+node_name+"/nics", res.results|to_nics_dict)}}"

View File

@ -0,0 +1,32 @@
- set_fact: node_name="{{tmp_node}}"
- set_fact: vm_name="{{node_name}}.{{stack.name}}"
- name: checking that vm exists
proxmox_kvm:
node: srv
api_user: "{{env.pve.username}}"
api_password: "{{env.pve.password}}"
api_host: "{{env.pve.api_url}}"
name: "{{vm_name}}"
state: current
ignore_errors: true
register: vm_exists
- block:
- name: stopping vm
proxmox_kvm:
node: srv
api_user: "{{env.pve.username}}"
api_password: "{{env.pve.password}}"
api_host: "{{env.pve.api_url}}"
name: "{{vm_name}}"
state: stopped
- name: destroying vm
proxmox_kvm:
node: srv
api_user: "{{env.pve.username}}"
api_password: "{{env.pve.password}}"
api_host: "{{env.pve.api_url}}"
name: "{{vm_name}}"
state: absent
when: vm_exists is succeeded

21
roles/pve/tasks/main.yml Normal file
View File

@ -0,0 +1,21 @@
---
- name: register PVE nodes
set_fact:
pve_nodes: "{{stack.nodes | filter_dict(\"lambda x: x['type']=='pve'\")}}"
- name: destroy VMs
include_tasks: destroy_vm.yml
with_items: "{{pve_nodes}}"
loop_control:
loop_var: tmp_node
when: redeploy_all is defined and redeploy_all|bool == true
or destroy_all is defined and destroy_all|bool == true
#- meta: end_play
- name: create VMs
include_tasks: create_vm.yml
with_items: "{{pve_nodes}}"
loop_control:
loop_var: tmp_node
when: destroy_all is not defined or destroy_all|bool == false

View File

@ -0,0 +1,2 @@
---
- debug: msg="dummy role"

View File

@ -0,0 +1,2 @@
---
- debug: msg="dummy role"

1
roles/remote-desktop Submodule

@ -0,0 +1 @@
Subproject commit 77b0fdcda534dcc658d1b0a2b32f84efef4c677d

1
roles/samba Submodule

@ -0,0 +1 @@
Subproject commit 9f2025b38a20cf0f3c330f51dbc7ae112a94844a

1
roles/tftp Submodule

@ -0,0 +1 @@
Subproject commit 3869737cc560fa51e36891c199c9153a12fe24be

View File

@ -0,0 +1,5 @@
---
- name: restart dm
service:
name: lightdm
state: restarted

View File

@ -0,0 +1,20 @@
---
- name: install packages
apt_rpm:
pkg: xorg-extension-vnc
state: installed
- name: enable vnc service
replace:
path: /etc/X11/xorg.conf.d/vnc.conf
regexp: '#(.*)'
replace: '\1'
notify: restart dm
- name: create directory
file:
path: /root/.vnc
state: directory
- name: set vnc password
shell: "vncpasswd -f <<< {{vnc_password}} > /root/.vnc/passwd"

View File

@ -0,0 +1,2 @@
---
- debug: msg="dummy role"

View File

@ -0,0 +1,11 @@
---
- name: install xinetd package
apt_rpm:
pkg: xinetd
state: installed
- name: enable xinetd service
service:
name: xinetd
enabled: yes
state: started

72
shell.nix Normal file
View File

@ -0,0 +1,72 @@
{ pkgs ? import <nixpkgs> {}
, windowsSupport ? false
}:
with pkgs;
let
myAnsible = pythonPackages.buildPythonPackage rec {
pname = "ansible";
version = "devel";
name = "${pname}-${version}";
src = fetchFromGitHub {
owner = "ansible";
repo = "ansible";
rev = "f0fd0f219de80a8f682b80e1ccdb83fd4988da64";
sha256 = "128847r4bc650lcpc2z1wxjgdnh07zhxfd9m2bi3wfl069dvhjk9";
};
prePatch = ''
sed -i "s,/usr/,$out," lib/ansible/constants.py
'';
doCheck = false;
dontStrip = true;
dontPatchELF = true;
dontPatchShebangs = false;
propagatedBuildInputs = with pythonPackages; [
pycrypto paramiko jinja2 pyyaml httplib2 boto six netaddr dnspython
] ++ stdenv.lib.optional windowsSupport pywinrm;
meta = with stdenv.lib; {
homepage = http://www.ansible.com;
description = "A simple automation tool";
license = with licenses; [ gpl3 ];
maintainers = with maintainers; [
jgeerds
joamaki
];
platforms = with platforms; linux ++ darwin;
};
};
myProxmoxer = python27.pkgs.buildPythonPackage rec {
pname = "proxmoxer";
version = "1.0.2";
src = python36.pkgs.fetchPypi {
inherit pname version;
sha256 = "0vpb3b1b8w4r4c28kfhyviw4q70s3vwwirkq6rywryl4wqc3fyra";
};
doCheck = false;
meta = {
homepage = "https://github.com/swayf/proxmoxer";
description = "Proxmoxer is a wrapper around the Proxmox REST API v2";
};
};
myPython = python27.withPackages (ps: with ps; [ myProxmoxer jmespath virtualenv pip hypothesis pytest_xdist pytest ]);
in
stdenv.mkDerivation {
name = "alt-infra-ansible";
buildInputs = [
openssh
myAnsible
myPython
pass
];
}

25
vars/ALT.yml Normal file
View File

@ -0,0 +1,25 @@
---
ansible_pkg_mgr: apt_rpm
bind_packages:
- python-module-netaddr
- bind
- bind-utils
bind_service: bind
# Main config file
bind_config: /etc/bind/named.conf
# Localhost zone
bind_default_zone_files:
- /etc/bind/rndc.conf
# - /etc/bind/local.conf
# Directory with run-time stuff
bind_chroot: /var/lib/bind
bind_zone_dir: /etc/bind/zone
bind_dir: /var/run
bind_owner: named
bind_group: named