1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-03-16 22:50:10 +03:00

M #-: Various oneprovison/ansible updates (#3202)

- allow newer ansible in provision
 - remove unsused files, roles
 - simplify opennebula-node-kvm
 - remove unsued ceonts files,
 - rename yaml -> yml
 - replace `include:` with `include_tasks:`
 - don't restart frr
 - bump Ceph version in HCI to Reef, adopt ceph-opennebula-mon
This commit is contained in:
Jan Orel 2024-08-16 11:32:39 +02:00 committed by GitHub
parent a7da395d52
commit 92541b6da6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
46 changed files with 69 additions and 553 deletions

View File

@ -4,12 +4,12 @@ deprecation_warnings = False
display_skipped_hosts = False
stdout_callback = yaml
host_key_checking = False
roles_path = <%= ans_loc %>/roles:/var/lib/one/.ansible/roles:/var/lib/one/.ansible/ceph-7.0/roles
action_plugins = /var/lib/one/.ansible/ceph-7.0/plugins/actions
callback_plugins = /var/lib/one/.ansible/ceph-7.0/plugins/callback
filter_plugins = /var/lib/one/.ansible/ceph-7.0/plugins/filter
library = /var/lib/one/.ansible/ceph-7.0/library
module_utils = /var/lib/one/.ansible/ceph-7.0/module_utils
roles_path = <%= ans_loc %>/roles:/var/lib/one/.ansible/roles:/var/lib/one/.ansible/ceph-8.0/roles
action_plugins = /var/lib/one/.ansible/ceph-8.0/plugins/actions
callback_plugins = /var/lib/one/.ansible/ceph-8.0/plugins/callback
filter_plugins = /var/lib/one/.ansible/ceph-8.0/plugins/filter
library = /var/lib/one/.ansible/ceph-8.0/library
module_utils = /var/lib/one/.ansible/ceph-8.0/module_utils
[privilege_escalation]
become = yes

View File

@ -1,16 +1,12 @@
---
# Ceph basics
ceph_origin: repository
ceph_repository: uca
ceph_repository: community
ceph_rhcs_version: 7
ceph_stable_release: quincy
ceph_stable_release_num: 17
ceph_stable_release: reef
ceph_stable_release_num: 18
dashboard_enabled: false
configure_firewall: false
ceph_stable_mapping:
'20': yoga
'22': bobcat
ceph_stable_openstack_release_uca: "{{ ceph_stable_mapping[ansible_distribution_major_version] }}"
# ceph overrides (applying in ceph.conf)
ceph_conf_overrides:

View File

@ -1,13 +1,12 @@
---
- name: Set Ceph auth command init
set_fact: ceph_opennebula_mon_pools_auth=[]
- name: Create oneadmin keys
vars:
_profiles_with_pools: >-
{{ ceph_opennebula_mon_pools | map(attribute='name')
| map('regex_replace', '^', 'profile rbd pool=')
| join(',') }}
- name: Set Ceph auth command pools
set_fact: ceph_opennebula_mon_pools_auth="{{ ceph_opennebula_mon_pools_auth }} + [ 'profile rbd pool={{ item.name }}' ]"
with_items: "{{ ceph_opennebula_mon_pools }}"
- name: Create oneadmin keys for Ceph Luminous+
command: ceph auth get-or-create client.oneadmin mon 'profile rbd' osd '{{ ceph_opennebula_mon_pools_auth | join(",") }}'
command: ceph auth get-or-create client.oneadmin mon 'profile rbd' osd '{{ _profiles_with_pools }}'
changed_when: false
- name: get oneadmin key

View File

@ -2,8 +2,8 @@
- name: open /etc/ceph
file: path=/etc/ceph mode=0755
- include: pool.yml
- include_tasks: pool.yml
- include: auth.yml
- include_tasks: auth.yml
- include: tunables.yml
- include_tasks: tunables.yml

View File

@ -25,7 +25,7 @@
# configure libvirt secretes
- name: Include libvirt configuration tasks
include: libvirt.yml
include_tasks: libvirt.yml
when: ceph_opennebula_osd_libvirt_enabled
# stop and disable docker on hypervisors

View File

@ -1,39 +0,0 @@
---
#TODO: fix just once
- name: Disable obsolete network configuration
shell: |
ip link >/dev/null || exit 1
CHANGED=''
for FILE in ifcfg-*; do
# skip interfaces disabled "on boot"
if grep -q -i '^ONBOOT=["'\'']no' ${FILE}; then
continue
fi
# get interface name from configuration or filename
IFACE=$(awk -F= 'toupper($1) ~ /(DEVICE|NAME)/ { gsub("['\''\"]", "", $2); print $2; exit }' ${FILE})
IFACE=${IFACE:-${FILE##ifcfg-}}
# if interface does not exist, disable configuration
if ! ip link show ${IFACE} >/dev/null 2>&1; then
CHANGED=yes
mv ${FILE} disabled-${FILE}
fi
done
# As a result of obsolete configuration, the network service
# could end up in the failed state. Restart of the networking
# might fail later, because only service start is triggered
# (without prior stop). We try to manually put the interfaces down,
# and restart the networking to fix the service state.
# https://github.com/OpenNebula/one/issues/3080
if [ -n "${CHANGED}" ] && systemctl is-failed network.service >/dev/null 2>&1; then
ifdown ifcfg-* || :
systemctl restart network.service
fi
args:
executable: /bin/bash
chdir: /etc/sysconfig/network-scripts
when: ansible_os_family == "RedHat"

View File

@ -13,5 +13,3 @@
masked: yes
state: stopped
ignore_errors: yes
- include: clean_netconfigs.yml

View File

@ -0,0 +1,4 @@
- name: restart frr
service:
name: frr
state: restarted

View File

@ -56,6 +56,7 @@
template:
src: bgpd_rr.conf.j2
dest: /etc/frr/bgpd.conf
notify: restart frr
when:
- vars['ansible_' + frr_iface].ipv4.address is defined
- vars['ansible_' + frr_iface].ipv4.address in rr_servers
@ -64,6 +65,7 @@
template:
src: bgpd.conf.j2
dest: /etc/frr/bgpd.conf
notify: restart frr
when:
- vars['ansible_' + frr_iface].ipv4.address is defined
- not vars['ansible_' + frr_iface].ipv4.address in rr_servers
@ -72,27 +74,31 @@
template:
src: zebra.conf.j2
dest: /etc/frr/zebra.conf
notify: restart frr
when: frr_zebra == true
- name: Configure Staticd
template:
src: staticd.conf.j2
dest: /etc/frr/staticd.conf
notify: restart frr
when: frr_zebra == true
- name: Remove frr.conf
file:
path: /etc/frr/frr.conf
state: absent
notify: restart frr
- name: Enable BGP daemon
replace:
path: /etc/frr/daemons
regexp: '^bgpd=no'
replace: 'bgpd=yes'
notify: restart frr
- name: Start FRR service
service:
name: frr
state: restarted
state: started
enabled: yes

View File

@ -9,10 +9,8 @@
reload: yes
- block:
- include: redhat.yml
when: ansible_os_family == "RedHat"
- include: debian.yml
- include_tasks: debian.yml
when: ansible_os_family == "Debian"
when: iptables_manage_persistent
- include: rules.yml
- include_tasks: rules.yml

View File

@ -1,27 +0,0 @@
---
- name: Uninstall firewalld
package:
name: firewalld
state: absent
- name: Install iptables-services
package:
name: iptables-services
state: present
- name: Create persistent rules configurations
template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
with_items:
- { src: 'rules.v4.j2', dest: '/etc/sysconfig/iptables' }
- { src: 'rules.v6.j2', dest: '/etc/sysconfig/ip6tables' }
- name: Enable iptables services
service:
name: iptables
enabled: true
with_items:
- iptables
- ip6tables

View File

@ -2,16 +2,8 @@
###############################################################################
# Valid defaults
###############################################################################
# Whether to use the ev package for kvm
opennebula_node_kvm_use_ev: False
# Enable nested KVM virtualization
opennebula_node_kvm_param_nested: False
# Enable KVM configuration
opennebula_node_kvm_manage_kvm: True
# SELinux booleans to configure
opennebula_node_selinux_booleans:
- { name: 'virt_use_nfs', state: 'yes' }

View File

@ -1,81 +0,0 @@
---
- block:
- name: Update centos-release package
yum: name=centos-release state=latest
register: yum_result
until: yum_result is succeeded
retries: 3
delay: 10
- name: Install qemu-ev repository
yum: name=centos-release-qemu-ev state=latest
register: yum_result
until: yum_result is succeeded
retries: 3
delay: 10
- name: Install qemu-kvm-ev
yum: name=qemu-kvm-ev state=present
register: yum_result
until: yum_result is succeeded
retries: 3
delay: 10
when:
- ansible_distribution == "CentOS"
- ansible_distribution_major_version == "7"
- opennebula_node_kvm_use_ev
- block:
- name: Enable RHEV repo
command: subscription-manager repos --enable {{ opennebula_node_kvm_rhev_repo }}
register: sm_result
until: sm_result is succeeded
retries: 3
delay: 10
- name: Instal RHEV
yum: name=qemu-kvm-rhev state=latest
until: yum_result is succeeded
retries: 3
delay: 10
when:
- ansible_distribution == "RedHat"
- ansible_distribution_major_version == "7"
- opennebula_node_kvm_use_ev
- name: Install qemu-kvm (base)
yum: name=qemu-kvm state=present
register: yum_result
until: yum_result is succeeded
retries: 3
delay: 10
when: not opennebula_node_kvm_use_ev or ansible_distribution_major_version != "7"
# Update to a fixed mkswap
# * Wed Jan 31 2018 Karel Zak <kzak@redhat.com> 2.23.2-51
# - fix #1538545 - SELinux error creating swap file
- name: Update util-linux
package:
name: util-linux
state: latest
register: pkg_result
until: pkg_result is succeeded
retries: 3
delay: 10
- name: Install libgcrypt
package:
name: libgcrypt
state: latest
register: pkg_result
until: pkg_result is succeeded
retries: 3
delay: 10
when:
- ansible_os_family == "RedHat"
- ansible_distribution_major_version == "8"
- name: Install OpenNebula node KVM package
yum: name=opennebula-node-kvm state=latest
register: yum_result
until: yum_result is succeeded
retries: 3
delay: 10

View File

@ -1,22 +0,0 @@
---
- name: Install OpenNebula node package
apt:
name: opennebula-node
state: latest
register: apt_result
until: apt_result is success
retries: 3
delay: 10
# RHEL / CentOS compatibility
- name: Create /usr/libexec
file:
path: '/usr/libexec'
state: directory
- name: Create symlink /usr/libexec/qemu-kvm
file:
src: /usr/bin/qemu-system-x86_64
dest: /usr/libexec/qemu-kvm
state: link

View File

@ -1,28 +0,0 @@
---
- name: Configure KVM module
template:
src: kvm.conf.j2
dest: "{{ '/etc/modprobe.d/kvm.conf' if ansible_os_family == 'RedHat' else '/etc/modprobe.d/qemu-system-x86.conf' }}"
register: template
- name: Unload KVM modules for reconfiguration
modprobe:
name: "{{ item }}"
state: absent
with_items:
- kvm_intel
- kvm_amd
register: modprobe_result
until: modprobe_result is success
retries: 3
delay: 10
when: template.changed
- name: Load KVM on Intel
modprobe: name=kvm_intel state=present
when: ansible_processor | intersect(["GenuineIntel"])
- name: Load KVM on AMD
modprobe: name=kvm_amd state=present
when: ansible_processor | intersect(["AuthenticAMD"])

View File

@ -1,7 +0,0 @@
---
- name: Restart Libvirtd (Ubuntu from 18.10)
service:
name: libvirtd
state: restarted
enabled: yes

View File

@ -1,28 +1,6 @@
---
- name: Restart Libvirtd (RedHat)
- name: Restart Libvirtd
service:
name: libvirtd
state: restarted
enabled: yes
when: ansible_os_family == "RedHat"
- name: Restart Libvirtd (Debian)
service:
name: libvirtd
state: restarted
enabled: yes
when: ansible_distribution == "Debian"
- name: Restart Libvirt-bin (Ubuntu up to 18.04)
service:
name: libvirt-bin
state: restarted
enabled: yes
when: ansible_distribution == "Ubuntu" and ansible_distribution_version|float < 18.10
- name: Restart Libvirtd (Ubuntu from 18.10)
service:
name: libvirtd
state: restarted
enabled: yes
when: ansible_distribution == "Ubuntu" and ansible_distribution_version|float >= 18.10

View File

@ -1,18 +0,0 @@
---
- name: Install OpenNebula node-kvm package
apt:
name: opennebula-node-kvm
state: fixed
retries: 3
delay: 10
- include: kvm.yaml
when: opennebula_node_kvm_manage_kvm == True
- include: libvirt.yaml
- include: security.yaml
- name: "create datastore for ONE_LOCATION"
file: path={{ one_location }}/var/datastores owner=oneadmin group=oneadmin state=directory
when: one_location is defined

View File

@ -1,16 +1,18 @@
---
- name: Install OpenNebula node package
apt:
name: opennebula-node
state: latest
register: apt_result
until: apt_result is success
retries: 3
delay: 10
- include: centos.yml
when: ansible_os_family == "RedHat"
- include: debian.yml
when: ansible_os_family == "Debian"
- include: kvm.yml
- include_tasks: kvm.yml
when: opennebula_node_kvm_manage_kvm == True
- include: libvirt.yml
- include: security.yml
- include_tasks: libvirt.yml
- include_tasks: security.yml
- name: "create datastore for ONE_LOCATION"
file: path={{ one_location }}/var/datastores owner=oneadmin group=oneadmin state=directory

View File

@ -1,39 +0,0 @@
---
# SELinux
- block:
- name: Set SELinux booleans
seboolean:
name: '{{ item.name }}'
state: '{{ item.state }}'
persistent: yes
with_items: '{{ opennebula_node_selinux_booleans }}'
when:
- ansible_selinux.status == 'enabled'
- ansible_os_family == "RedHat"
# AppArmor
- name: Check if AppArmor configuration exists
stat: path=/etc/apparmor.d/abstractions/libvirt-qemu
register: apparmor_libvirt_qemu
- block:
- name: Add permissions to apparmor
lineinfile:
dest: /etc/apparmor.d/abstractions/libvirt-qemu
line: "{{ item }}"
with_items:
- " /srv/** rwk,"
- " /var/lib/one/datastores/** rwk,"
- name: Reload apparmor
service:
name: apparmor
state: reloaded
register: service_result
failed_when:
- service_result is failed
- "'find' not in service_result.msg and 'found' not in service_result.msg"
when:
- ansible_os_family == 'Debian'
- apparmor_libvirt_qemu.stat.exists == True

View File

@ -1,18 +1,8 @@
---
- name: Check if AppArmor configuration exists
stat: path=/etc/apparmor.d/abstractions/libvirt-qemu
register: apparmor_libvirt_qemu
# SELinux
- block:
- name: Set SELinux booleans
seboolean:
name: '{{ item.name }}'
state: '{{ item.state }}'
persistent: yes
with_items: '{{ opennebula_node_selinux_booleans }}'
when:
- ansible_selinux.status == 'enabled'
- ansible_os_family == "RedHat"
# AppArmor
- block:
- name: Add permissions to apparmor
lineinfile:
@ -27,5 +17,8 @@
name: apparmor
state: reloaded
register: service_result
failed_when: "service_result is failed and ('find' not in service_result.msg and 'found' not in service_result.msg)"
when: ansible_distribution == "Ubuntu"
failed_when:
- service_result is failed
- "'find' not in service_result.msg and 'found' not in service_result.msg"
when:
- apparmor_libvirt_qemu.stat.exists == True

View File

@ -1,8 +0,0 @@
---
###############################################################################
# Valid defaults
###############################################################################
# SELinux booleans to configure
opennebula_node_selinux_booleans:
- { name: 'virt_use_nfs', state: 'yes' }

View File

@ -1,29 +0,0 @@
---
- name: Update util-linux
package:
name: util-linux
state: latest
register: pkg_result
until: pkg_result is succeeded
retries: 3
delay: 10
- name: Install libgcrypt
package:
name: libgcrypt
state: latest
register: pkg_result
until: pkg_result is succeeded
retries: 3
delay: 10
when:
- ansible_os_family == "RedHat"
- ansible_distribution_major_version == "8"
- name: Install OpenNebula node LXC package
yum: name=opennebula-node-lxc state=latest
register: yum_result
until: yum_result is succeeded
retries: 3
delay: 10

View File

@ -1,10 +0,0 @@
---
- name: Install OpenNebula node LXC package
apt:
name: opennebula-node-lxc
state: latest
register: apt_result
until: apt_result is success
retries: 3
delay: 10

View File

@ -1,14 +1,4 @@
---
# Workaround for non-unified cgroups v2 not supported by Firecracker yet
# https://github.com/firecracker-microvm/firecracker/issues/841 ?
- name: Reconfigure cgroups
include_role:
name: cgroups1
when: |
(ansible_distribution == 'Fedora') or
(ansible_distribution == 'Debian' and ansible_distribution_version is version_compare('11', '>='))
- name: Install OpenNebula node LXC package
package:
name: opennebula-node-lxc

View File

@ -1,31 +0,0 @@
---
# SELinux
- block:
- name: Set SELinux booleans
seboolean:
name: '{{ item.name }}'
state: '{{ item.state }}'
persistent: yes
with_items: '{{ opennebula_node_selinux_booleans }}'
when:
- ansible_selinux.status == 'enabled'
- ansible_os_family == "RedHat"
# AppArmor
- block:
- name: Add permissions to apparmor
lineinfile:
dest: /etc/apparmor.d/abstractions/libvirt-qemu
line: "{{ item }}"
with_items:
- " /srv/** rwk,"
- " /var/lib/one/datastores/** rwk,"
- name: Reload apparmor
service:
name: apparmor
state: reloaded
register: service_result
failed_when: "service_result is failed and ('find' not in service_result.msg and 'found' not in service_result.msg)"
when: ansible_distribution == "Ubuntu"

View File

@ -9,9 +9,3 @@ opennebula_repository_version: '6.9'
# Repository of the OpenNebula packages
opennebula_repository_base: 'https://downloads.opennebula.io/repo/{{ opennebula_repository_version }}'
# Enable GPG check for the packages
opennebula_repository_gpgcheck: yes
# Enable GPG check for the repos (RHEL/CentOS only)
opennebula_repository_repo_gpgcheck: yes

View File

@ -1,13 +0,0 @@
---
- name: Clean APT metadata
command: apt-get clean
when: ansible_os_family == "Debian"
listen: "clean repository metadata"
- name: Clean YUM metadata
command: yum clean metadata
args:
warn: no
when: ansible_os_family == "RedHat"
listen: "clean repository metadata"

View File

@ -1,39 +0,0 @@
---
- name: Install EPEL in CentOS
yum: name=epel-release state=installed
when: ansible_distribution == "CentOS"
- name: Add repository GPG key for EPEL
rpm_key:
key: https://download-ib01.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
when: ansible_distribution == "RedHat"
- name: Install EPEL in RHEL
yum_repository:
name: epel
description: Extra Packages for Enterprise Linux 7
mirrorlist: https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=x86_64
failovermethod: priority
gpgcheck: yes
gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
when: ansible_distribution == "RedHat"
- name: Add repository GPG key for RPM
rpm_key: key=https://downloads.opennebula.io/repo/repo2.key
when: opennebula_repository_gpgcheck | bool
- name: Add OpenNebula repository
yum_repository:
name: opennebula
description: OpenNebula packages
baseurl: "{{ opennebula_repository_base }}/CentOS/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}"
gpgkey: https://downloads.opennebula.io/repo/repo2.key
gpgcheck: "{{ opennebula_repository_gpgcheck }}"
repo_gpgcheck: "{{ opennebula_repository_repo_gpgcheck }}"
notify: "clean repository metadata"
- name: Add repository GPG key for YUM
command: yum -q makecache -y --disablerepo=* --enablerepo=opennebula
args:
creates: "/var/lib/yum/repos/{{ ansible_architecture }}/{{ ansible_distribution_major_version}}/opennebula/gpgdir/pubring.gpg"
when: opennebula_repository_repo_gpgcheck | bool

View File

@ -1,9 +0,0 @@
---
- include: centos.yml
when: ansible_os_family == "RedHat"
- include: debian.yml
when: ansible_os_family == "Debian"
- name: Force all notified handler to run now
meta: flush_handlers

View File

@ -1,5 +1,4 @@
---
- name: Install apt-transport-https
apt:
name: apt-transport-https

View File

@ -8,11 +8,11 @@
mode: 0700
state: directory
- include: sshd.yml
- include_tasks: sshd.yml
when: opennebula_ssh_manage_sshd == True
- include: deploy_local.yml
- include_tasks: deploy_local.yml
when: opennebula_ssh_deploy_local == True
- include: root_authkeys.yml
- include_tasks: root_authkeys.yml
when: opennebula_ssh_keys_import_root_keys == True

View File

@ -1,17 +0,0 @@
# python
## Description
Installs default Python for Debians and Red Hats.
## Requirements
No special requirements.
## Variables
None
## Todo list
None

View File

@ -1,16 +0,0 @@
---
- name: Validate group_vars parameters
assert:
that:
- "{{ sys_ds_ids }} is defined"
- "{{ first_host }} is not none"
- name: Update replica host
local_action:
module: shell
cmd: onedatastore update {{ item }}
environment:
EDITOR: "sed -i -e 's/REPLICA_HOST=.*/REPLICA_HOST={{ first_host }}/'"
become: false
with_items: "{{ sys_ds_ids }}"

View File

@ -39,7 +39,7 @@ extends:
#-------------------------------------------------------------------------------
ansible:
ver_min: 2.8
ver_max: 2.13
ver_max: 2.17
playbook:
- vultr_metal

View File

@ -41,7 +41,7 @@ extends:
#-------------------------------------------------------------------------------
ansible:
ver_min: 2.12
ver_max: 2.13
ver_max: 2.17
playbook:
- aws
- ceph_hci/site

View File

@ -41,7 +41,7 @@ extends:
#-------------------------------------------------------------------------------
ansible:
ver_min: 2.8
ver_max: 2.13
ver_max: 2.17
playbook:
- aws

View File

@ -39,7 +39,7 @@ extends:
#-------------------------------------------------------------------------------
ansible:
ver_min: 2.8
ver_max: 2.13
ver_max: 2.17
playbook:
- digitalocean

View File

@ -39,7 +39,7 @@ extends:
#-------------------------------------------------------------------------------
ansible:
ver_min: 2.8
ver_max: 2.13
ver_max: 2.17
playbook:
- google

View File

@ -39,7 +39,7 @@ extends:
#-------------------------------------------------------------------------------
ansible:
ver_min: 2.8
ver_max: 2.13
ver_max: 2.17
playbook:
- vultr

View File

@ -41,7 +41,7 @@ extends:
#-------------------------------------------------------------------------------
ansible:
ver_min: 2.12
ver_max: 2.13
ver_max: 2.17
playbook:
- aws
- ceph_hci/site

View File

@ -41,7 +41,7 @@ extends:
#-------------------------------------------------------------------------------
ansible:
ver_min: 2.8
ver_max: 2.13
ver_max: 2.17
playbook:
- aws

View File

@ -40,7 +40,7 @@ extends:
#-------------------------------------------------------------------------------
ansible:
ver_min: 2.8
ver_max: 2.13
ver_max: 2.17
playbook:
- equinix

View File

@ -39,7 +39,7 @@ extends:
#-------------------------------------------------------------------------------
ansible:
ver_min: 2.12
ver_max: 2.13
ver_max: 2.17
playbook:
- aws
- ceph_hci/site

View File

@ -39,7 +39,7 @@ extends:
#-------------------------------------------------------------------------------
ansible:
ver_min: 2.8
ver_max: 2.13
ver_max: 2.17
playbook:
- onprem

View File

@ -44,8 +44,8 @@ CONFIG_DEFAULTS = {
ANSIBLE_ARGS = "--ssh-common-args='-o UserKnownHostsFile=/dev/null'"
ANSIBLE_INVENTORY_DEFAULT = 'default'
CEPH_ANSIBLE_URL = 'https://github.com/ceph/ceph-ansible.git'
CEPH_ANSIBLE_BRANCH = 'stable-7.0'
CEPH_ANSIBLE_DIR = '/var/lib/one/.ansible/ceph-7.0'
CEPH_ANSIBLE_BRANCH = 'stable-8.0'
CEPH_ANSIBLE_DIR = '/var/lib/one/.ansible/ceph-8.0'
module OneProvision