1
0
mirror of https://github.com/OpenNebula/one.git synced 2024-12-22 13:33:52 +03:00

F #5620: Add Ceph HCI to OneProvision (#1918)

This commit is contained in:
Jan Orel 2022-04-09 14:40:19 +02:00 committed by GitHub
parent 5f6a5cb71c
commit 72f1e5dee5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
74 changed files with 1928 additions and 187 deletions

View File

@ -2,10 +2,12 @@
retry_files_enabled = False
deprecation_warnings = False
display_skipped_hosts = False
callback_whitelist =
stdout_callback = skippy
stdout_callback = yaml
host_key_checking = False
roles_path = <%= roles %>
roles_path = <%= ans_loc %>/roles:/var/lib/one/.ansible/roles:/var/lib/one/.ansible/ceph-6.0/roles
library = /var/lib/one/.ansible/ceph-6.0/library
action_plugins = /var/lib/one/.ansible/ceph-6.0/plugins/actions
module_utils = /var/lib/one/.ansible/ceph-6.0/module_utils
[privilege_escalation]
become = yes

View File

@ -8,14 +8,13 @@
- { role: opennebula-node-lxc, when: oneprovision_hypervisor == 'lxc' }
- opennebula-ssh
- role: iptables
iptables_base_rules_interface: "{{ ansible_default_ipv4.interface }}"
iptables_base_rules_services:
- { protocol: 'tcp', port: 22 }
# TCP/179 bgpd (TODO: only needed on Route Refector(s))
- { protocol: 'tcp', port: 179 }
# TCP/8742 default VXLAN port on Linux (UDP/4789 default IANA)
- { protocol: 'udp', port: 8472 }
- update-replica
- role: frr
frr_iface: '{{ oneprovision_private_phydev }}'
# Use /16 for the internal management network address
frr_prefix_length: 16

View File

@ -0,0 +1,26 @@
---
# Ceph basics
ceph_origin: repository
ceph_repository: community
ceph_rhcs_version: 6
ceph_stable_release: pacific
ceph_stable_release_num: 16
dashboard_enabled: false
configure_firewall: false
# OSD
osd_objectstore: bluestore
osd_scenario: collocated
is_hci: true
# OpenNebula specifics
ceph_opennebula_mon_pools:
- name: one
pg_num: 128
type: replicated
set:
- name: size
value: 2
# ceph default vars
<%= vars %>

View File

@ -0,0 +1,279 @@
---
- hosts: all
pre_tasks:
# eth0_ip = 10.0.X.Y, eth1_ip = 10.1.X.Y
- set_fact:
eth1_ip: "{{ ansible_default_ipv4.address.split('.')[0] }}.{{ ansible_default_ipv4.address.split('.')[1]|int + 1 }}.{{ ansible_default_ipv4.address.split('.')[2] }}.{{ ansible_default_ipv4.address.split('.')[3] }}"
roles:
- role: stackhpc.systemd_networkd
systemd_networkd_apply_config: true
systemd_networkd_network:
eth1:
- Match:
- Name: "{{ monitor_interface }}"
- Network:
- DHCP: "no"
- Address: "{{ (eth1_ip + '/' + ansible_default_ipv4.netmask) | ipaddr('host/prefix') }}"
when: setup_eth1 | default('false') | bool
- hosts:
- mons
- osds
- clients
- mgrs
gather_facts: false
any_errors_fatal: true
become: true
tags: always
vars:
delegate_facts_host: True
pre_tasks:
- name: gather facts
setup:
gather_subset:
- 'all'
- '!facter'
- '!ohai'
when:
- not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, [])
- name: gather and delegate facts
setup:
gather_subset:
- 'all'
- '!facter'
- '!ohai'
delegate_to: "{{ item }}"
delegate_facts: True
with_items: "{{ groups['all'] | difference(groups.get('clients', [])) }}"
run_once: true
when: delegate_facts_host | bool
tasks:
- import_role:
name: ceph-slice
- import_role:
name: ceph-defaults
- import_role:
name: ceph-facts
- import_role:
name: ceph-opennebula-facts
- import_role:
name: ceph-handler
- import_role:
name: ceph-validate
- import_role:
name: ceph-infra
- import_role:
name: ceph-common
- hosts: mons
gather_facts: false
become: True
any_errors_fatal: true
pre_tasks:
- name: set ceph monitor install 'In Progress'
run_once: true
set_stats:
data:
installer_phase_ceph_mon:
status: "In Progress"
start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
tasks:
- import_role:
name: ceph-defaults
tags: ['ceph_update_config']
- import_role:
name: ceph-facts
tags: ['ceph_update_config']
- import_role:
name: ceph-handler
tags: ['ceph_update_config']
- import_role:
name: ceph-config
tags: ['ceph_update_config']
- import_role:
name: ceph-mon
- import_role:
name: ceph-mgr
when: groups.get(mgr_group_name, []) | length == 0
- import_role:
name: ceph-opennebula-mon
post_tasks:
- name: set ceph monitor install 'Complete'
run_once: true
set_stats:
data:
installer_phase_ceph_mon:
status: "Complete"
end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- hosts: mgrs
gather_facts: false
become: True
any_errors_fatal: true
pre_tasks:
- name: set ceph manager install 'In Progress'
run_once: true
set_stats:
data:
installer_phase_ceph_mgr:
status: "In Progress"
start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
tasks:
- import_role:
name: ceph-defaults
tags: ['ceph_update_config']
- import_role:
name: ceph-facts
tags: ['ceph_update_config']
- import_role:
name: ceph-handler
tags: ['ceph_update_config']
- import_role:
name: ceph-config
tags: ['ceph_update_config']
- import_role:
name: ceph-mgr
post_tasks:
- name: set ceph manager install 'Complete'
run_once: true
set_stats:
data:
installer_phase_ceph_mgr:
status: "Complete"
end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- hosts: osds
gather_facts: false
become: True
any_errors_fatal: true
pre_tasks:
- name: set ceph osd install 'In Progress'
run_once: true
set_stats:
data:
installer_phase_ceph_osd:
status: "In Progress"
start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
tasks:
- import_role:
name: ceph-defaults
tags: ['ceph_update_config']
- import_role:
name: ceph-facts
tags: ['ceph_update_config']
- import_role:
name: ceph-handler
tags: ['ceph_update_config']
- import_role:
name: ceph-config
tags: ['ceph_update_config']
- import_role:
name: ceph-osd
- import_role:
name: ceph-opennebula-osd
vars:
ceph_opennebula_osd_libvirt_enabled: "{% if oneprovision_hypervisor == 'lxc' %}False{% else %}True{% endif %}"
- name: Install rbd-nbd ceph client
package:
name: rbd-nbd
state: latest
update_cache: yes
when: oneprovision_hypervisor== 'lxc'
post_tasks:
- name: set ceph osd install 'Complete'
run_once: true
set_stats:
data:
installer_phase_ceph_osd:
status: "Complete"
end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- hosts: clients
gather_facts: false
become: True
any_errors_fatal: true
tags: 'ceph_client'
pre_tasks:
- name: set ceph client install 'In Progress'
run_once: true
set_stats:
data:
installer_phase_ceph_client:
status: "In Progress"
start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
tasks:
- import_role:
name: ceph-defaults
tags: ['ceph_update_config']
- import_role:
name: ceph-facts
tags: ['ceph_update_config']
- import_role:
name: ceph-handler
tags: ['ceph_update_config']
- import_role:
name: ceph-config
tags: ['ceph_update_config']
- import_role:
name: ceph-client
- import_role:
name: ceph-opennebula-osd
vars:
ceph_opennebula_osd_libvirt_enabled: "{% if oneprovision_hypervisor == 'lxc' %}False{% else %}True{% endif %}"
post_tasks:
- name: set ceph client install 'Complete'
run_once: true
set_stats:
data:
installer_phase_ceph_client:
status: "Complete"
end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
- hosts: mons
gather_facts: false
become: True
any_errors_fatal: true
tasks:
- import_role:
name: ceph-defaults
- name: get ceph status from the first monitor
command: ceph --cluster {{ cluster }} -s
register: ceph_status
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
- name: "show ceph status for cluster {{ cluster }}"
debug:
msg: "{{ ceph_status.stdout_lines }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when:
- ceph_status is not skipped
- ceph_status is successful

View File

@ -13,7 +13,6 @@
- { protocol: 'tcp', port: 179 }
# TCP/8742 default VXLAN port on Linux (UDP/4789 default IANA)
- { protocol: 'udp', port: 8472 }
- update-replica
- role: frr
frr_iface: 'eth1'
# Use /20 for the internal management network address

View File

@ -14,7 +14,6 @@
- { protocol: 'tcp', port: 179 }
# TCP/8742 default VXLAN port on Linux (UDP/4789 default IANA)
- { protocol: 'udp', port: 8472 }
- update-replica
- role: frr
#bond0_0 is attached to the project private network
frr_iface: 'bond0_0'

View File

@ -13,7 +13,6 @@
- { protocol: 'tcp', port: 179 }
# TCP/8742 default VXLAN port on Linux (UDP/4789 default IANA)
- { protocol: 'udp', port: 8472 }
- update-replica
- role: frr
frr_iface: 'ens4'
# Use /16 for the internal management network address

View File

@ -0,0 +1,2 @@
# enables and configure systemd-networkd
- src: stackhpc.systemd_networkd

View File

@ -7,7 +7,6 @@
- { role: opennebula-node-firecracker, when: oneprovision_hypervisor == 'firecracker' }
- { role: opennebula-node-lxc, when: oneprovision_hypervisor == 'lxc' }
- opennebula-ssh
- update-replica
- role: frr
#bond0_0 is attached to the project private network
frr_iface: '{{ oneprovision_private_phydev }}'

View File

@ -0,0 +1,14 @@
---
- name: get oneadmin key
shell: ceph auth get-key client.oneadmin
register: ceph_oneadmin_key
delegate_to: "{{ running_mon }}"
when:
- running_mon is defined
- name: get oneadmin keyring
shell: ceph auth export client.oneadmin
register: ceph_oneadmin_keyring
delegate_to: "{{ running_mon }}"
when:
- running_mon is defined

View File

@ -0,0 +1,19 @@
# ceph-opennebula-mon
## Description
Creates a new ceph pool for OpenNebula, generates the auth keys and applies the [crush tunables][1].
## Requirements
This role should be applied in nodes that already have been applied the official Ceph mon role.
## Variables
All of the variables in this role are documented in the [defaults](defaults/main.yml) file.
## Todo list
None
[1]: http://docs.ceph.com/docs/master/rados/operations/crush-map/#tunables

View File

@ -0,0 +1,16 @@
---
###############################################################################
# Valid defaults
###############################################################################
# List of Ceph pools to create, each specified as a hash with parameters:
# - name: Pool name
# - pg_num: Number of placement groups (http://docs.ceph.com/docs/firefly/rados/operations/placement-groups/)
# - type: Pool type 'replicated' (default) or 'erasure'
ceph_opennebula_mon_pools:
- { name: one, pg_num: 128 }
# Crush tunables to apply:
# http://docs.ceph.com/docs/master/rados/operations/crush-map/#tunables
ceph_opennebula_mon_tunables: default

View File

@ -0,0 +1,19 @@
---
- name: Set Ceph auth command init
set_fact: ceph_opennebula_mon_pools_auth=[]
- name: Set Ceph auth command pools
set_fact: ceph_opennebula_mon_pools_auth="{{ ceph_opennebula_mon_pools_auth }} + [ 'profile rbd pool={{ item.name }}' ]"
with_items: "{{ ceph_opennebula_mon_pools }}"
- name: Create oneadmin keys for Ceph Luminous+
command: ceph auth get-or-create client.oneadmin mon 'profile rbd' osd '{{ ceph_opennebula_mon_pools_auth | join(",") }}'
changed_when: false
- name: get oneadmin key
shell: ceph auth get-key client.oneadmin
register: ceph_oneadmin_key
- name: get oneadmin keyring
shell: ceph auth export client.oneadmin
register: ceph_oneadmin_keyring

View File

@ -0,0 +1,9 @@
---
- name: open /etc/ceph
file: path=/etc/ceph mode=0755
- include: pool.yml
- include: auth.yml
- include: tunables.yml

View File

@ -0,0 +1,29 @@
---
# Create replicated pools
- name: Create Ceph replicated pools
command: ceph osd pool create {{ item.name }} {{ item.pg_num }} {{ item.type | default('replicated') }}
with_items: "{{ ceph_opennebula_mon_pools }}"
when: ( item.type | default('replicated') ) == 'replicated'
changed_when: false
# Create EC pools
- name: Create Ceph EC profiles
command: ceph osd erasure-code-profile set ec-profile-{{ item.name }} k={{ item.ec_k }} m={{ item.ec_m }}
with_items: "{{ ceph_opennebula_mon_pools }}"
when: ( item.type | default('replicated') ) == 'erasure'
changed_when: false
- name: Create Ceph EC pools
command: ceph osd pool create {{ item.name }} {{ item.pg_num }} {{ item.pg_num }} {{ item.type | default('replicated') }} ec-profile-{{ item.name }}
with_items: "{{ ceph_opennebula_mon_pools }}"
when: ( item.type | default('replicated') ) == 'erasure'
changed_when: false
# Configure pools
- name: Set Ceph pool params
command: ceph osd pool set {{ item.0.name }} {{ item.1.name }} {{ item.1.value }}
with_subelements:
- "{{ ceph_opennebula_mon_pools }}"
- set
- { skip_missing: true }

View File

@ -0,0 +1,3 @@
---
- name: Set Ceph OSD CRUSH tunables to {{ ceph_opennebula_mon_tunables }}
command: ceph osd crush tunables {{ ceph_opennebula_mon_tunables }}

View File

@ -0,0 +1,17 @@
# ceph-opennebula-osd
## Description
To be applied in hypervisors that are connected to KVM. Creates the libvirt secret, and the oneadmin keyring, defines
## Requirements
This role should be applied in nodes that already have been applied the official Ceph osd role.
## Variables
* `ceph_secret_uuid` (mandatory): Ceph secret key to be stored in all KVM nodes, in libvirt.
## Todo list
None

View File

@ -0,0 +1,7 @@
---
###############################################################################
# Valid defaults
###############################################################################
ceph_opennebula_osd_libvirt_enabled: True

View File

@ -0,0 +1,11 @@
---
- name: create secret.xml
template: src=secret.xml.j2 dest=/tmp/secret.xml
- name: define secret
command: virsh -c qemu:///system secret-define /tmp/secret.xml
- name: register libvirt secret key
command: virsh -c qemu:///system secret-set-value --secret {{ ceph_secret_uuid }} --base64 {{ ceph_oneadmin_key.stdout }}
changed_when: false

View File

@ -0,0 +1,48 @@
---
- name: open /etc/ceph
file: path=/etc/ceph mode=0755
- name: create oneadmin keyring
shell: echo "{{ ceph_oneadmin_keyring.stdout }}" > /etc/ceph/ceph.client.oneadmin.keyring
- name: configure rbd clients directories
file:
path: "{{ item }}"
state: directory
owner: oneadmin
group: oneadmin
mode: 0775
with_items:
- /var/log/rbd-clients/
- /var/run/ceph/rbd-clients
- name: Add rbd default features
blockinfile:
dest: /etc/ceph/ceph.conf
block: |
[client]
rbd default features = 1
# configure libvirt secretes
- name: Include libvirt configuration tasks
include: libvirt.yml
when: ceph_opennebula_osd_libvirt_enabled
# stop and disable docker on hypervisors
- name: Check /etc/docker exists
stat:
path: /etc/docker
register: docker_result
- block:
- name: stop and disable Docker
service:
name: docker
state: stopped
enabled: false
- name: Allow forwading
shell: iptables -I DOCKER-USER -j ACCEPT
ignore_errors: yes
when: docker_result.stat.exists

View File

@ -0,0 +1,6 @@
<secret ephemeral='no' private='no'>
<uuid>{{ ceph_secret_uuid }}</uuid>
<usage type='ceph'>
<name>client.oneadmin secret</name>
</usage>
</secret>

View File

@ -0,0 +1,2 @@
- name: reload systemd
command: systemctl daemon-reload

View File

@ -0,0 +1,51 @@
---
- name: Create ceph slice
copy:
dest: /usr/lib/systemd/system/ceph
content: |
[Unit]
Description=Ceph slice
Documentation=man:systemd.special(7)
DefaultDependencies=no
Before=slices.target
Requires=system.slice
After=system.slice
- name: Ceph ceph.slice setting override dir
file:
path: /etc/systemd/system/ceph.slice.d/
state: directory
- name: Ceph ceph.slice setting override
copy:
dest: /etc/systemd/system/ceph.slice.d/override.conf
content: |
[Slice]
MemoryAccounting=true
CPUAccounting=true
MemoryHigh=
AllowedCPUs=
- name: Ceph services override dirs
file:
path: /etc/systemd/system/{{ item }}@.service.d/
state: directory
with_items:
- ceph-mds
- ceph-mgr
- ceph-mon
- ceph-osd
- name: Ceph services overrides
copy:
dest: /etc/systemd/system/{{ item }}@.service.d/override.conf
content: |
[Service]
Slice=ceph.slice
with_items:
- ceph-mds
- ceph-mgr
- ceph-mon
- ceph-osd
notify:
- reload systemd

View File

@ -7,7 +7,7 @@ frr_frrver: 'frr-7'
frr_rr_num: 1
# Network interface name to route VXLAN traffic
frr_iface: 'eth0'
frr_iface: "{{ ansible_default_ipv4.interface }}"
# The AS number used for BGP
frr_as: 65000

View File

@ -14,7 +14,6 @@
- { protocol: 'tcp', port: 179 }
# TCP/8742 default VXLAN port on Linux (UDP/4789 default IANA)
- { protocol: 'udp', port: 8472 }
- update-replica
- role: frr
frr_iface: 'enp6s0'
# Use /20 for the internal management network address

View File

@ -14,4 +14,3 @@
- { protocol: 'tcp', port: 179 }
# TCP/8742 default VXLAN port on Linux (UDP/4789 default IANA)
- { protocol: 'udp', port: 8472 }
- update-replica

View File

@ -37,4 +37,4 @@ datastores:
type: 'system_ds'
tm_mad: 'ssh'
safe_dirs: "/var/tmp /tmp"
replica_host: "use-first-host"
replica_host: "first-host"

View File

@ -0,0 +1,96 @@
---
# ---------------------------------------------------------------------------- #
# Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
#-------------------------------------------------------------------------------
# This is the canonical description file for a cluster build with 'AWS'
# resources using the KVM hypervisor.
# ------------------------------------------------------------------------------
name: 'aws-hci-cluster'
description: 'AWS hyper-convergent virtual Ceph cluster'
extends:
- common.d/defaults.yml
- common.d/resources.yml
- common.d/hosts-hci.yml
- aws.d/defaults.yml
- aws.d/datastores-hci.yml
- aws.d/fireedge.yml
- aws.d/inputs-hci.yml
- aws.d/networks.yml
#-------------------------------------------------------------------------------
# playbook: Ansible playbook used for hosts configuration. Check ansible/aws.yml
# for the specific roles applied.
#-------------------------------------------------------------------------------
playbook:
- aws
- ceph_hci/site
#-------------------------------------------------------------------------------
# defaults: Common configuration attributes for provision objects
#--------------------------------------------------------------------------------
defaults:
provision:
provider_name: 'aws'
ami: "${input.aws_ami_image}"
instancetype: "${input.aws_instance_type}"
cloud_init: true
connection:
remote_user: 'ubuntu'
# Defaults ceph options
ceph_vars:
ceph_hci: true
setup_eth1: true
devices: [ "/dev/xvdb" ]
monitor_interface: "eth1"
public_network: "10.1.0.0/16"
ceph_disk_size: "${input.ceph_disk_size}"
#-------------------------------------------------------------------------------
# cluster: Parameters for the OpenNebula cluster. Applies to all the Hosts
#--------------------------------------------------------------------------------
# name: of the cluster
# description: Additional information
# reserved_cpu: In percentage. It will be subtracted from the TOTAL CPU
# reserved_memory: In percentage. It will be subtracted from the TOTAL MEM
#--------------------------------------------------------------------------------
cluster:
name: "${provision}"
description: 'AWS virtual edge cluster'
reserved_cpu: '0'
reserved_mem: '0'
datastores:
- 1
- 2
provision:
cidr: '10.0.0.0/16'
hci_cidr: '10.1.0.0/16'
#-------------------------------------------------------------------------------
# AWS provision parameters.
#-------------------------------------------------------------------------------
# This section is used by provision drivers. DO NOT MODIFY IT
#
# CIDR: Private IP block for the cluster. This value HAS TO MATCH that on
# cluster.
#-------------------------------------------------------------------------------
aws_configuration:
cidr: '10.0.0.0/16'
...

View File

@ -0,0 +1,48 @@
---
# ---------------------------------------------------------------------------- #
# Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
#-------------------------------------------------------------------------------
# datastores: Defines the storage area for the cluster using Ceph
# drivers. It creates the following datastores, using ceph:
# 1. Image datastore, ${cluster_name}-image
# 2. System datastore, ${cluster_name}-system
#
# Configuration/Input attributes:
# - bridge_list: list of ceph cluster nodes (ip addresses)
#-------------------------------------------------------------------------------
datastores:
- name: "${provision}-image"
type: 'image_ds'
ds_mad: 'ceph'
tm_mad: 'ceph'
bridge_list: "ceph-hosts-list"
safe_dirs: "/var/tmp /tmp"
ceph_secret: "ceph-secret"
ceph_user: "oneadmin"
disk_type: "rbd"
- name: "${provision}-system"
type: 'system_ds'
tm_mad: 'ceph'
safe_dirs: "/var/tmp /tmp"
allow_orphans: 'mixed'
bridge_list: "ceph-hosts-list"
ceph_secret: "ceph-secret"
ceph_user: "oneadmin"
disk_type: "rbd"
shared: "yes"

View File

@ -36,4 +36,4 @@ datastores:
type: 'system_ds'
tm_mad: 'ssh'
safe_dirs: "/var/tmp /tmp"
replica_host: "use-first-host"
replica_host: "first-host"

View File

@ -20,6 +20,5 @@
#--------------------------------------------------------------------------------
defaults:
configuration:
configuration: {}
# Select the physical device for private network (VXLAN)
oneprovision_private_phydev: "${input.private_phydev}"

View File

@ -0,0 +1,66 @@
---
# ---------------------------------------------------------------------------- #
# Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
inputs:
- name: 'number_hosts'
type: text
description: 'Total number of AWS instances to create'
default: '1'
- name: 'number_ceph_full_hosts'
type: text
description: 'Number of instances to install Ceph HCI (osd + mon), usually 3'
default: '1'
- name: 'number_ceph_osd_hosts'
type: text
description: 'Number of instances to install Ceph HCI (osd only)'
default: '1'
- name: 'number_public_ips'
type: text
description: 'Number of public IPs to get'
default: '1'
- name: 'dns'
type: text
description: 'Comma separated list of DNS servers for public network'
default: '1.1.1.1'
- name: 'aws_ami_image'
type: text
description: 'AWS ami image used for host deployments'
default: 'default'
- name: 'aws_instance_type'
type: text
description: 'AWS instance type, use virtual instances'
default: 't2.micro'
- name: 'ceph_disk_size'
type: text
description: 'Disk size of CEPH disk volume, in GB'
default: '100'
- name: 'one_hypervisor'
type: list
description: 'Virtualization technology for the cluster hosts'
default: 'lxc'
options:
- 'qemu'
- 'lxc'
...

View File

@ -48,9 +48,4 @@ inputs:
options:
- 'qemu'
- 'lxc'
- name: 'private_phydev'
type: text
description: 'Physical device to be used for private networking.'
default: 'eth0'
...

View File

@ -31,7 +31,7 @@ networks:
vntemplates:
- name: "${provision}-private"
vn_mad: 'vxlan'
phydev: 'eth0'
phydev: 'default-ipv4-nic'
automatic_vlan_id: 'yes'
netrole: 'private'
vxlan_mode: 'evpn'

View File

@ -21,6 +21,7 @@
# ------------------------------------------------------------------------------
name: 'aws-cluster'
description: 'AWS cluster'
extends:
- common.d/defaults.yml

View File

@ -0,0 +1,36 @@
---
# ---------------------------------------------------------------------------- #
# Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
#-------------------------------------------------------------------------------
# hosts: AWS, Digital Ocean or Google servers
# provision:
# - count: Number of servers to create
# - hostname: edge-vhost1, edge-vhost2 .... of the server
#
# You can define specific OpenNebula configuration attributes for all the hosts:
# - reserved_cpu: In percentage. It will be subtracted from the TOTAL CPU
# - reserved_memory: In percentage. It will be subtracted from the TOTAL MEM
#-------------------------------------------------------------------------------
hosts:
- im_mad: "${input.one_hypervisor}"
vm_mad: "${input.one_hypervisor}"
provision:
count: "${input.number_hosts}"
ceph_full_count: "${input.number_ceph_full_hosts}"
ceph_osd_count: "${input.number_ceph_osd_hosts}"
hostname: "edge-vhost${index}"

View File

@ -36,4 +36,4 @@ datastores:
type: 'system_ds'
tm_mad: 'ssh'
safe_dirs: "/var/tmp /tmp"
replica_host: "use-first-host"
replica_host: "first-host"

View File

@ -31,7 +31,7 @@ networks:
vntemplates:
- name: "${provision}-private"
vn_mad: 'vxlan'
phydev: 'eth1'
phydev: 'default-ipv4-nic'
automatic_vlan_id: 'yes'
netrole: 'private'
vxlan_mode: 'evpn'

View File

@ -36,4 +36,4 @@ datastores:
type: 'system_ds'
tm_mad: 'ssh'
safe_dirs: "/var/tmp /tmp"
replica_host: "use-first-host"
replica_host: "first-host"

View File

@ -31,7 +31,7 @@ networks:
vntemplates:
- name: "${provision}-private"
vn_mad: 'vxlan'
phydev: 'ens4'
phydev: 'default-ipv4-nic'
automatic_vlan_id: 'yes'
netrole: 'private'
vxlan_mode: 'evpn'

View File

@ -37,4 +37,4 @@ datastores:
type: 'system_ds'
tm_mad: 'ssh'
safe_dirs: "/var/tmp /tmp"
replica_host: "use-first-host"
replica_host: "first-host"

View File

@ -32,7 +32,7 @@ networks:
vntemplates:
- name: "${provision}-private"
vn_mad: 'vxlan'
phydev: 'enp6s0'
phydev: 'default-ipv4-nic'
automatic_vlan_id: 'yes'
netrole: 'private'
vxlan_mode: 'evpn'

View File

@ -0,0 +1,96 @@
---
# ---------------------------------------------------------------------------- #
# Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
#-------------------------------------------------------------------------------
# This is the canonical description file for a cluster build with 'AWS'
# resources using the KVM hypervisor.
# ------------------------------------------------------------------------------
name: 'aws-hci-cluster'
description: 'AWS hyper-convergent metal Ceph cluster'
extends:
- common.d/defaults.yml
- common.d/resources.yml
- common.d/hosts-hci.yml
- aws.d/defaults.yml
- aws.d/datastores-hci.yml
- aws.d/fireedge.yml
- aws.d/inputs-hci.yml
- aws.d/networks.yml
#-------------------------------------------------------------------------------
# playbook: Ansible playbook used for hosts configuration. Check ansible/aws.yml
# for the specific roles applied.
#-------------------------------------------------------------------------------
playbook:
- aws
- ceph_hci/site
#-------------------------------------------------------------------------------
# defaults: Common configuration attributes for provision objects
#--------------------------------------------------------------------------------
defaults:
provision:
provider_name: 'aws'
ami: "${input.aws_ami_image}"
instancetype: "${input.aws_instance_type}"
cloud_init: true
connection:
remote_user: 'ubuntu'
# Defaults ceph options
ceph_vars:
ceph_hci: true
setup_eth1: true
devices: [ "/dev/nvme1n1" ]
monitor_interface: "ens1"
public_network: "10.1.0.0/16"
ceph_disk_size: "${input.ceph_disk_size}"
#-------------------------------------------------------------------------------
# cluster: Parameters for the OpenNebula cluster. Applies to all the Hosts
#--------------------------------------------------------------------------------
# name: of the cluster
# description: Additional information
# reserved_cpu: In percentage. It will be subtracted from the TOTAL CPU
# reserved_memory: In percentage. It will be subtracted from the TOTAL MEM
#--------------------------------------------------------------------------------
cluster:
name: "${provision}"
description: 'AWS edge cluster'
reserved_cpu: '0'
reserved_mem: '0'
datastores:
- 1
- 2
provision:
cidr: '10.0.0.0/16'
hci_cidr: '10.1.0.0/16'
#-------------------------------------------------------------------------------
# AWS provision parameters.
#-------------------------------------------------------------------------------
# This section is used by provision drivers. DO NOT MODIFY IT
#
# CIDR: Private IP block for the cluster. This value HAS TO MATCH that on
# cluster.
#-------------------------------------------------------------------------------
aws_configuration:
cidr: '10.0.0.0/16'
...

View File

@ -0,0 +1,48 @@
---
# ---------------------------------------------------------------------------- #
# Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
#-------------------------------------------------------------------------------
# datastores: Defines the storage area for the cluster using Ceph
# drivers. It creates the following datastores, using Ceph driver:
# 1. Image datastore, ${cluster_name}-image
# 2. System datastore, ${cluster_name}-system
#
# Configuration/Input attributes:
# - bridge_list: list of ceph cluster nodes (ip addresses)
#-------------------------------------------------------------------------------
datastores:
- name: "${provision}-image"
type: 'image_ds'
ds_mad: 'ceph'
tm_mad: 'ceph'
bridge_list: "ceph-hosts-list"
safe_dirs: "/var/tmp /tmp"
ceph_secret: "ceph-secret"
ceph_user: "oneadmin"
disk_type: "rbd"
- name: "${provision}-system"
type: 'system_ds'
tm_mad: 'ceph'
safe_dirs: "/var/tmp /tmp"
allow_orphans: 'mixed'
bridge_list: "ceph-hosts-list"
ceph_secret: "ceph-secret"
ceph_user: "oneadmin"
disk_type: "rbd"
shared: "yes"

View File

@ -37,4 +37,4 @@ datastores:
type: 'system_ds'
tm_mad: 'ssh'
safe_dirs: "/var/tmp /tmp"
replica_host: "use-first-host"
replica_host: "first-host"

View File

@ -20,6 +20,4 @@
#--------------------------------------------------------------------------------
defaults:
configuration:
# Select the physical device for private network (VXLAN)
oneprovision_private_phydev: "${input.private_phydev}"
configuration: {}

View File

@ -0,0 +1,66 @@
---
# ---------------------------------------------------------------------------- #
# Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
inputs:
- name: 'number_hosts'
type: text
description: 'Total number of AWS instances to create'
default: '1'
- name: 'number_ceph_full_hosts'
type: text
description: 'Number of instances to install Ceph HCI (osd + mon), usually 3'
default: '1'
- name: 'number_ceph_osd_hosts'
type: text
description: 'Number of instances to install Ceph HCI (osd only)'
default: '1'
- name: 'number_public_ips'
type: text
description: 'Number of public IPs to get'
default: '1'
- name: 'dns'
type: text
description: 'Comma separated list of DNS servers for public network'
default: '1.1.1.1'
- name: 'aws_ami_image'
type: text
description: 'AWS ami image used for host deployments'
default: 'default'
- name: 'aws_instance_type'
type: text
description: 'AWS instance type, use bare-metal instances'
default: 'c5.metal'
- name: 'ceph_disk_size'
type: text
description: 'Disk size of CEPH disk volume, in GB'
default: '100'
- name: 'one_hypervisor'
type: list
description: 'Virtualization technology for the cluster hosts'
default: 'kvm'
options:
- 'kvm'
- 'lxc'
...

View File

@ -49,9 +49,4 @@ inputs:
- 'kvm'
# - 'firecracker'
- 'lxc'
- name: 'private_phydev'
type: text
description: 'Physical device to be used for private networking.'
default: 'ens785'
...

View File

@ -31,7 +31,7 @@ networks:
vntemplates:
- name: "${provision}-private"
vn_mad: 'vxlan'
phydev: "${input.private_phydev}"
phydev: "default-ipv4-nic"
automatic_vlan_id: 'yes'
netrole: 'private'
vxlan_mode: 'evpn'

View File

@ -0,0 +1,37 @@
---
# ---------------------------------------------------------------------------- #
# Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
#-------------------------------------------------------------------------------
# hosts: Equinix or AWS metal servers
# provision:
# - count: Number of servers to create
# - hostname: kvm-host1, kvm-host2 .... of the server
#
# You can define specific OpenNebula configuration attributes for all the hosts:
# - reserved_cpu: In percentage. It will be subtracted from the TOTAL CPU
# - reserved_memory: In percentage. It will be subtracted from the TOTAL MEM
#-------------------------------------------------------------------------------
hosts:
- im_mad: "${input.one_hypervisor}"
vm_mad: "${input.one_hypervisor}"
provision:
count: "${input.number_hosts}"
ceph_full_count: "${input.number_ceph_full_hosts}"
ceph_osd_count: "${input.number_ceph_osd_hosts}"
hostname: "edge-host${index}"
...

View File

@ -37,4 +37,4 @@ datastores:
type: 'system_ds'
tm_mad: 'ssh'
safe_dirs: "/var/tmp /tmp"
replica_host: "use-first-host"
replica_host: "first-host"

View File

@ -32,7 +32,7 @@ networks:
vntemplates:
- name: "${provision}-private"
vn_mad: 'vxlan'
phydev: 'bond0'
phydev: 'default-ipv4-nic'
automatic_vlan_id: 'yes'
netrole: 'private'
vxlan_mode: 'evpn'

View File

@ -0,0 +1,75 @@
---
# ---------------------------------------------------------------------------- #
# Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
#-------------------------------------------------------------------------------
# This is the canonical description file for a cluster build On-Premise
# ------------------------------------------------------------------------------
name: 'onprem-hci-cluster'
description: 'On-premises hyper-convergent Ceph cluster'
extends:
- onprem.d/defaults.yml
- onprem.d/resources.yml
- onprem.d/hosts-hci.yml
- onprem.d/datastores-hci.yml
- onprem.d/fireedge.yml
- onprem.d/inputs-hci.yml
- onprem.d/networks.yml
#-------------------------------------------------------------------------------
# playbook: Ansible playbook used for hosts configuration. Check ansible/aws.yml
# for the specific roles applied.
#-------------------------------------------------------------------------------
playbook:
- onprem
- ceph_hci/site
#-------------------------------------------------------------------------------
# defaults: Common configuration attributes for provision objects
#--------------------------------------------------------------------------------
defaults:
provision:
provider_name: 'onprem'
connection:
remote_user: 'root'
ceph_vars:
ceph_hci: true
devices: "${input.ceph_device}"
monitor_interface: "${input.ceph_monitor_interface}"
public_network: "${input.ceph_public_network}"
#-------------------------------------------------------------------------------
# cluster: Parameters for the OpenNebula cluster. Applies to all the Hosts
#--------------------------------------------------------------------------------
# name: of the cluster
# description: Additional information
# reserved_cpu: In percentage. It will be subtracted from the TOTAL CPU
# reserved_memory: In percentage. It will be subtracted from the TOTAL MEM
#--------------------------------------------------------------------------------
cluster:
name: "${provision}"
description: 'On-Premise edge cluster'
reserved_cpu: '0'
reserved_mem: '0'
datastores:
- 1
- 2
...

View File

@ -0,0 +1,48 @@
---
# ---------------------------------------------------------------------------- #
# Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
#-------------------------------------------------------------------------------
# datastores: Defines the storage area for the cluster using the SSH replication
# drivers. It creates the following datastores, using Replica driver:
# 1. Image datastore, ${cluster_name}-image
# 2. System datastore, ${cluster_name}-system
#
# Configuration/Input attributes:
# - replica_host: The host that will hold the cluster replicas and snapshots.
#-------------------------------------------------------------------------------
datastores:
- name: "${provision}-image"
type: 'image_ds'
ds_mad: 'ceph'
tm_mad: 'ceph'
bridge_list: "ceph-hosts-list"
safe_dirs: "/var/tmp /tmp"
ceph_secret: "ceph-secret"
ceph_user: "oneadmin"
disk_type: "rbd"
- name: "${provision}-system"
type: 'system_ds'
tm_mad: 'ceph'
safe_dirs: "/var/tmp /tmp"
allow_orphans: 'mixed'
bridge_list: "ceph-hosts-list"
ceph_secret: "ceph-secret"
ceph_user: "oneadmin"
disk_type: "rbd"
shared: "yes"

View File

@ -36,4 +36,4 @@ datastores:
type: 'system_ds'
tm_mad: 'ssh'
safe_dirs: "/var/tmp /tmp"
replica_host: "use-first-host"
replica_host: "first-host"

View File

@ -0,0 +1,47 @@
---
# ---------------------------------------------------------------------------- #
# Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
#-------------------------------------------------------------------------------
# hosts: Physical servers
# provision:
# - count: Number of servers to create
# - hostname: kvm-host1, kvm-host2 .... of the server
#
# You can define specific OpenNebula configuration attributes for all the hosts:
# - reserved_cpu: In percentage. It will be subtracted from the TOTAL CPU
# - reserved_memory: In percentage. It will be subtracted from the TOTAL MEM
#-------------------------------------------------------------------------------
hosts:
- im_mad: "${input.one_hypervisor}"
vm_mad: "${input.one_hypervisor}"
provision:
hostname: "${input.ceph_full_hosts_names}"
ceph_group: "osd,mon"
- im_mad: "${input.one_hypervisor}"
vm_mad: "${input.one_hypervisor}"
provision:
hostname: "${input.ceph_osd_hosts_names}"
ceph_group: "osd"
- im_mad: "${input.one_hypervisor}"
vm_mad: "${input.one_hypervisor}"
provision:
hostname: "${input.client_hosts_names}"
ceph_group: "clients"
...

View File

@ -0,0 +1,70 @@
---
# ---------------------------------------------------------------------------- #
# Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
inputs:
- name: 'ceph_full_hosts_names'
type: array
description: 'Semicolon separated list of FQDNs or IP addresses of the Ceph full hosts to be added to the cluster (osd + mon)'
- name: 'ceph_osd_hosts_names'
type: array
description: 'Semicolon separated list of FQDNs or IP addresses of the Ceph osd hosts to be added to the cluster (osd only)'
- name: 'client_hosts_names'
type: array
description: 'Semicolon separated list of FQDNs or IP addresses of the non-Ceph hosts to be added to the cluster (ceph client)'
- name: 'one_hypervisor'
type: list
description: "Virtualization technology for the cluster hosts"
options:
- 'kvm'
- 'lxc'
- name: 'first_public_ip'
type: text
description: 'First public IP for the public IPs address range.'
- name: 'number_public_ips'
type: text
description: 'Number of public IPs to get'
default: '1'
- name: 'public_phydev'
type: text
description: 'Physical device to be used for public networking.'
- name: 'private_phydev'
type: text
description: 'Physical device to be used for private networking.'
- name: 'ceph_public_network'
type: test
description: 'Ceph public network in CIDR notation'
- name: 'ceph_device'
type: array
description: 'Semicolon separated list of block devices for Ceph OSD'
default: '/dev/sdb'
- name: 'ceph_monitor_interface'
type: text
description: 'Physical device to be used for Ceph.'
default: 'eth0'
...

View File

@ -20,6 +20,7 @@
# ------------------------------------------------------------------------------
name: 'onprem-cluster'
description: 'On-premises cluster'
extends:
- onprem.d/defaults.yml

View File

@ -236,6 +236,13 @@ class OneProvisionHelper < OpenNebulaHelper::OneHelper
:format => Array
}
HOST_PARAMS = {
:name => 'host_params',
:large => '--host-params param=value',
:description => 'Extra param to pass to host, e.g.: ceph_group=osd',
:format => String
}
########################################################################
MODES = CommandParser::OPTIONS - [CommandParser::VERBOSE] +
@ -481,7 +488,8 @@ class OneProvisionHelper < OpenNebulaHelper::OneHelper
return [-1, rc.message] if OpenNebula.is_error?(rc)
rc = provision.add_hosts(amount, options[:hostnames])
rc = provision.add_hosts(amount, options[:hostnames],
options[:host_params])
return [-1, rc.message] if OpenNebula.is_error?(rc)

View File

@ -256,7 +256,8 @@ CommandParser::CmdParser.new(ARGV) do
:provisionid,
:options => [OneProvisionHelper::MODES,
OneProvisionHelper::AMOUNT,
OneProvisionHelper::HOSTNAMES] do
OneProvisionHelper::HOSTNAMES,
OneProvisionHelper::HOST_PARAMS] do
helper.add_hosts(args[0], options)
end

View File

@ -22,6 +22,7 @@ require 'json'
require 'base64'
require 'erb'
require 'ostruct'
require 'fileutils'
if !ONE_LOCATION
ANSIBLE_LOCATION = '/usr/share/one/oneprovision/ansible'
@ -40,9 +41,12 @@ CONFIG_DEFAULTS = {
}
# Ansible params
ANSIBLE_VERSION = [Gem::Version.new('2.8'), Gem::Version.new('2.10')]
ANSIBLE_ARGS = "--ssh-common-args='-o UserKnownHostsFile=/dev/null'"
ANSIBLE_VERSION = [Gem::Version.new('2.8'), Gem::Version.new('2.10')]
ANSIBLE_ARGS = "--ssh-common-args='-o UserKnownHostsFile=/dev/null'"
ANSIBLE_INVENTORY_DEFAULT = 'default'
CEPH_ANSIBLE_URL = 'https://github.com/ceph/ceph-ansible.git'
CEPH_ANSIBLE_BRANCH = 'stable-6.0'
CEPH_ANSIBLE_DIR = '/var/lib/one/.ansible/ceph-6.0'
module OneProvision
@ -65,7 +69,7 @@ module OneProvision
class << self
# Checks ansible installed version
def check_ansible_version
def check_ansible_version(provision)
# Get just first line with Ansible version
version = `ansible --version`.split("\n")[0]
@ -78,38 +82,79 @@ module OneProvision
"must be >= #{ANSIBLE_VERSION[0]} " \
"and < #{ANSIBLE_VERSION[1]}")
end
return if provision.nil? || !provision.hci?
unless system('ansible-galaxy --version >/dev/null')
Utils.fail('Missing ansible-galaxy')
end
return if system('git --version >/dev/null')
Utils.fail('Missing git to checkout ceph-ansible')
end
def install_ansible_dependencies(provision)
return unless provision.hci?
unless File.directory?("#{CEPH_ANSIBLE_DIR}/roles")
ansible_dir = File.dirname(CEPH_ANSIBLE_DIR)
FileUtils.mkdir_p(ansible_dir) \
unless File.exist?(ansible_dir)
Driver.run('git clone --branch ' <<
"#{CEPH_ANSIBLE_BRANCH} " <<
"--depth 1 #{CEPH_ANSIBLE_URL} " <<
CEPH_ANSIBLE_DIR.to_s)
end
Driver.run('ansible-galaxy install -r ' <<
'/usr/share/one/oneprovision/ansible/' <<
'hci-requirements.yml')
end
# TODO: expect multiple hosts
# Configures host via ansible
#
# @param hosts [OpenNebula::Host Array] Hosts to configure
# @param hosts [OpenNebula::Datastore array] Datastores for vars
# @param provision [OpenNebula::Provision] Provision info
# @param ping [Boolean] True to check ping to hosts
def configure(hosts, datastores = nil, provision = nil, ping = true)
# @param hosts [OpenNebula::Host Array] Hosts to configure
# @param hosts [OpenNebula::Datastore array] Datastores for var
# @param provision [OpenNebula::Provision] Provision info
# @param only_hosts [Array] Hostames - limit configure to them
def configure(hosts, datastores = nil, provision = nil,
only_hosts = [])
return if hosts.nil? || hosts.empty?
Driver.retry_loop('Failed to configure hosts', provision) do
check_ansible_version
check_ansible_version(provision)
ansible_dir = generate_ansible_configs(hosts, datastores)
install_ansible_dependencies(provision)
try_ssh(ansible_dir) if ping
# sets @inventories, @group_vars, @playbooks
dir = generate_ansible_configs(hosts, datastores, provision)
# extends @inventories, @group_vars
if provision.hci?
generate_ceph_ansible_configs(dir, hosts, provision)
end
# try_ssh + gather facts
try_ssh_and_gather_facts(dir)
OneProvisionLogger.info('Configuring hosts')
@inventories.each do |i|
@playbooks.each do |playbook|
# build Ansible command
cmd = "ANSIBLE_CONFIG=#{ansible_dir}/ansible.cfg "
cmd = "ANSIBLE_CONFIG=#{dir}/ansible.cfg "
cmd << "ansible-playbook #{ANSIBLE_ARGS}"
cmd << " -i #{ansible_dir}/inventory"
cmd << " -e @#{ansible_dir}/group_vars.yml"
cmd << " #{ANSIBLE_LOCATION}/#{i}.yml"
@inventories.each {|i| cmd << " -i #{i}" }
@group_vars.each {|g| cmd << " -e @#{g}" }
cmd << " --limit #{only_hosts.join(',')}" if only_hosts
cmd << " #{ANSIBLE_LOCATION}/#{playbook}.yml"
o, _e, s = Driver.run(cmd, true)
if s && s.success? && i == @inventories.last
if s && s.success? && playbook == @playbooks.last
# enable configured ONE host back
OneProvisionLogger.debug(
'Enabling OpenNebula hosts'
@ -129,7 +174,7 @@ module OneProvision
end
end
0
[0, @facts]
rescue StandardError => e
raise e
end
@ -193,13 +238,15 @@ module OneProvision
# Checks ssh connection
#
# @param ansible_dir [Dir] Directory with ansible information
def try_ssh(ansible_dir)
def try_ssh_and_gather_facts(ansible_dir)
OneProvisionLogger.info('Checking working SSH connection')
return if retry_ssh(ansible_dir)
Driver.retry_loop 'SSH connection is failing' do
ansible_ssh(ansible_dir)
if retry_ssh(ansible_dir)
@facts = gather_facts(ansible_dir)
else
Driver.retry_loop 'SSH connection is failing' do
ansible_ssh(ansible_dir)
end
end
end
@ -247,15 +294,50 @@ module OneProvision
rtn.join("\n")
end
# After ping to ssh also gather some basics facts from hosts
# They are later reused to update OpenNebula resources:
# hosts and vnets
#
# @param ansible_dir [String] Ansible directory
#
# @return [Hash] facts
def gather_facts(ansible_dir)
cmd = "ANSIBLE_CONFIG=#{ansible_dir}"
cmd += '/ansible.cfg ANSIBLE_BECOME=false'
cmd << " ansible #{ANSIBLE_ARGS}"
cmd << " -i #{ansible_dir}/inventory"
cmd << ' --one-line'
cmd << " -m setup all -a 'gather_subset=network,hardware'"
o, _e, s = Driver.run(cmd)
raise OneProvisionLoopException if !s || !s.success?
# ansbile output post-procesing, remove " | SUCCESS " suffix
# create a hash like { "hostname" => { facts }, }
begin
facts = {}
o.each_line do |line|
hostname, host_facts = line.split(' | SUCCESS => ')
facts[hostname] = JSON.parse(host_facts)
end
rescue StandardError
raise OneProvisionLoopException
end
facts
end
# TODO: support different variables and
# connection parameters for each host
# Generates ansible configurations
#
# @param hosts [OpenNebula::Host array] Hosts to configure
# @param hosts [OpenNebula::Datastore array] Datastores for vars
# @param hosts [OpenNebula::Host array] Hosts to configure
# @param datastores [OpenNebula::Datastore array] Datastores for var
# @param provision [OpenNebula::Datastore array] Provision for var
#
# @return [Dir] Directory with Ansible information
def generate_ansible_configs(hosts, datastores)
def generate_ansible_configs(hosts, _datastores, _provision)
ansible_dir = Dir.mktmpdir
msg = "Generating Ansible configurations into #{ansible_dir}"
@ -294,24 +376,11 @@ module OneProvision
c << "ansible_port=#{conn['remote_port']}\n"
end
Driver.write_file_log("#{ansible_dir}/inventory", c)
# Generate "group_vars" file
group_vars = { 'sys_ds_ids' => [], 'first_host' => '' }
group_vars['first_host'] = hosts.first['name'] \
unless hosts.empty?
datastores.each do |d|
ds = Resource.object('datastores')
ds.info(d['id'])
next unless ds.one['TYPE'] == '1' # only system ds
group_vars['sys_ds_ids'] << d['id']
end unless datastores.nil?
c = YAML.dump(group_vars)
fname = "#{ansible_dir}/group_vars.yml"
fname = "#{ansible_dir}/inventory"
Driver.write_file_log(fname, c)
@inventories = [fname]
@group_vars = []
# Generate "host_vars" directory
Dir.mkdir("#{ansible_dir}/host_vars")
@ -331,21 +400,20 @@ module OneProvision
host = Resource.object('hosts')
host.info(hosts[0]['id'])
if host.one['TEMPLATE/ANSIBLE_PLAYBOOK']
@inventories = host.one['TEMPLATE/ANSIBLE_PLAYBOOK']
@inventories = @inventories.split(',')
if host.one['TEMPLATE/PROVISION/ANSIBLE_PLAYBOOK']
@playbooks = host.one['TEMPLATE/PROVISION/ANSIBLE_PLAYBOOK']
@playbooks = @playbooks.split(',')
else
@inventories = [ANSIBLE_INVENTORY_DEFAULT]
@playbooks = [ANSIBLE_INVENTORY_DEFAULT]
end
# Generate "ansible.cfg" file
# TODO: what if private_key isn't filename, but content
# TODO: store private key / equinix
# credentials securely in the ONE
roles = "#{ANSIBLE_LOCATION}/roles"
c = File.read("#{ANSIBLE_LOCATION}/ansible.cfg.erb")
c = ERBVal.render_from_hash(c, :roles => roles)
c = ERBVal.render_from_hash(c, :ans_loc => ANSIBLE_LOCATION)
Driver.write_file_log("#{ansible_dir}/ansible.cfg", c)
@ -356,6 +424,64 @@ module OneProvision
ansible_dir
end
# Generate ceph inventory based on hosts and theirs ceph_groups,
# add it to @inventories, also include ceph group_vars.yml to
# @group_vars array
#
# @param ansible_dir [String] Ansible tmp dir
# @param hosts [OpenNebula::Host array] Hosts to configure
# @param provision [OpenNebula::Datastore array] Provision vars
#
# @return nil
def generate_ceph_ansible_configs(ansible_dir, hosts, provision)
ceph_inventory = \
{
'mons' => { 'hosts' => {} },
'mgrs' => { 'hosts' => {} },
'osds' => { 'hosts' => {} },
'clients' => { 'hosts' => {},
'vars' => { 'copy_admin_key' => true } }
}
hosts.each do |h|
host = Resource.object('hosts')
host.info(h['id'])
ceph_group = host.one['TEMPLATE/PROVISION/CEPH_GROUP']
case ceph_group
when 'osd,mon'
ceph_inventory['mons']['hosts'][host.one['NAME']] = nil
ceph_inventory['mgrs']['hosts'][host.one['NAME']] = nil
ceph_inventory['osds']['hosts'][host.one['NAME']] = nil
when 'osd'
ceph_inventory['osds']['hosts'][host.one['NAME']] = nil
when 'clients'
ceph_inventory['clients']['hosts'][host.one['NAME']] =
nil
end
end
fname = "#{ansible_dir}/ceph_inventory.yml"
Driver.write_file_log(fname, YAML.dump(ceph_inventory))
@inventories << fname
# eval ceph group_vars template
ceph_vars = File.read(
"#{ANSIBLE_LOCATION}/ceph_hci/group_vars.yml.erb"
)
yaml = provision.body['ceph_vars'].to_yaml.gsub!("---\n", '')
ceph_vars = ERBVal.render_from_hash(
ceph_vars,
'vars' => yaml
)
fname = "#{ansible_dir}/ceph_group_vars.yml"
Driver.write_file_log(fname, ceph_vars)
@group_vars << fname
end
# Gets host connection options
#
# @param host [OpenNebula::Host] Host to get connections options

View File

@ -14,6 +14,7 @@
require 'opennebula/document_json'
require 'opennebula/wait_ext'
require 'securerandom'
module OneProvision
@ -78,7 +79,7 @@ module OneProvision
rc = to_json(template, provider)
return rc if OpenNebula.is_error?(rc)
rescue StandardError => e
rescue StandardError
return OpenNebula::Error.new(e)
end
@ -165,6 +166,13 @@ module OneProvision
infrastructure_objects['networks']
end
# Returns provision vnetemplates
def vntemplates
return unless resource_objects
resource_objects['vntemplates']
end
# Returns provision resources objects
def resource_objects
@body['provision']['resource']
@ -214,6 +222,16 @@ module OneProvision
@body['ar_template']
end
# Returns hci bool
def hci?
@body['ceph_vars']
end
# Returns vars
def ceph_vars
@body['ceph_vars']
end
# Get OpenNebula information for specific objects
#
# @param object [String] Object to check
@ -274,7 +292,7 @@ module OneProvision
#
# @return [Integer] Provision ID
def deploy(config, cleanup, timeout, skip, provider)
Ansible.check_ansible_version if skip == :none
Ansible.check_ansible_version(nil) if skip == :none
# Config contains
# :inputs -> array with user inputs values
@ -336,11 +354,15 @@ module OneProvision
# read provision file
cfg.parse(true)
puts
# @name is used for template evaluation
@name = cfg['name']
# copy ceph_vars and generate secret uuid
if cfg['ceph_vars']
@body['ceph_vars'] = cfg['ceph_vars']
@body['ceph_vars']['ceph_secret_uuid'] = SecureRandom.uuid
end
OneProvisionLogger.info('Creating provision objects')
rc = Driver.retry_loop('Failed to create cluster', self) do
@ -409,8 +431,10 @@ module OneProvision
#
# @param amount [Intenger] Amount of hosts to add to the provision
# @param hostnames [Array] Array of hostnames to add. Works only in
# on premise provisions
def add_hosts(amount, hostnames)
# on premise provisions
# @param params [String] Extra params for hosts in format
# ceph_group=osd, ...
def add_hosts(amount, hostnames, params)
if !state || state != STATE['RUNNING']
return OpenNebula::Error.new(
"Can't add hosts to provision in #{STATE_STR[state]}"
@ -442,6 +466,11 @@ module OneProvision
host['PROVISION'].delete('DEPLOY_ID')
host['PROVISION'].delete('HOSTNAME')
params.split(',').each do |par_val|
param, value = par_val.split('=')
host['PROVISION'][param] = value
end if params
# Downcase to use create_deployment_file
host = host.transform_keys(&:downcase)
host.keys.each do |key|
@ -478,9 +507,7 @@ module OneProvision
h = Resource.object('hosts', @provider, host)
dfile = h.create_deployment_file
one_host = h.create(dfile.to_xml,
cid,
host['ansible_playbook'])
one_host = h.create(dfile.to_xml, cid)
obj = { 'id' => Integer(one_host['ID']),
'name' => one_host['NAME'] }
@ -495,16 +522,17 @@ module OneProvision
OneProvisionLogger.info('Deploying')
ips, ids, state, conf = Driver.tf_action(self, 'add_hosts', tf)
hostnames ? added_hosts = hostnames : added_hosts = ips.last(amount)
OneProvisionLogger.info('Monitoring hosts')
update_hosts(ips, ids)
update_hosts(ips, ids, {})
add_tf(state, conf) if state && conf
update
configure_resources
configure_resources(added_hosts)
end
# Adds more IPs to the existing virtual network
@ -878,13 +906,33 @@ module OneProvision
h['provision']['index'] = idx + global_idx
h['provision']['count'] = count
h['provision']['id'] = @id
h['provision']['ansible_playbook'] = playbooks
# if hci? then assign ceph_group
# - hosts 1 .. ceph_full -> osd,mon
# - hosts ceph_full .. ceph_full + ceph_osd -> osd
# - hosts ceph_full + ceph_osd .. count -> clients
if hci? && h['provision']['ceph_full_count']
if idx < h['provision']['ceph_full_count'].to_i
h['provision']['ceph_group'] = 'osd,mon'
elsif idx < h['provision']['ceph_full_count'].to_i +
h['provision']['ceph_osd_count'].to_i
h['provision']['ceph_group'] = 'osd'
else
h['provision']['ceph_group'] = 'clients'
end
end
# create OpenNebula client, saves
host = Resource.object('hosts', @provider, h)
host.evaluate_rules(self)
dfile = host.create_deployment_file
host = host.create(dfile.to_xml, cid, playbooks)
host = host.create(dfile.to_xml, cid)
obj = { 'id' => Integer(host['ID']),
'name' => host['NAME'] }
@ -900,12 +948,16 @@ module OneProvision
end
# Configures provision resources
def configure_resources
def configure_resources(only_hosts = nil)
self.state = STATE['CONFIGURING']
update
rc = Ansible.configure(hosts, datastores, self)
rc, facts = Ansible.configure(hosts, datastores, self, only_hosts)
update_hosts(nil, nil, facts)
update_datastores
update_networks(facts)
if rc == 0
self.state = STATE['RUNNING']
@ -916,46 +968,208 @@ module OneProvision
update
end
# Updates provision hosts with new name
# Updates provision hosts with new name or facts
#
# @param ips [Array] IPs for each host
# @param ids [Array] IDs for each host
def update_hosts(ips, ids)
# @param ips [Array] IPs for each host
# @param ids [Array] IDs for each host
# @param facts [Hash] Facts, such as:
# { 'host1' => {
# 'ansible_facts' => {
# 'ansible_memtotal_mb' => ''}
# ...
# }
# }
#
def update_hosts(ips, ids, facts = {})
hosts.each do |h|
host = Resource.object('hosts', provider)
host.info(h['id'])
# Avoid existing hosts
if host.one['//TEMPLATE/PROVISION/DEPLOY_ID']
ips.shift
ids.shift
next
if ips
# Avoid existing hosts
if host.one['//TEMPLATE/PROVISION/DEPLOY_ID']
ips.shift
ids.shift
next
end
name = ips.shift
id = ids.shift if ids
# Rename using public IP address
host.one.rename(name)
h['name'] = name
# Add deployment ID
host.one.add_element('//TEMPLATE/PROVISION',
'DEPLOY_ID' => id)
Terraform.p_load
# Read private IP if any
terraform = Terraform.singleton(@provider, {})
if terraform.respond_to? :add_host_vars
terraform.add_host_vars(host)
end
end
name = ips.shift
id = ids.shift if ids
# Update TEMPLATE
if !facts.empty? && hci?
# Rename using public IP address
host.one.rename(name)
hostname = host.one['//NAME']
# Add deployment ID
host.one.add_element('//TEMPLATE/PROVISION', 'DEPLOY_ID' => id)
next unless facts[hostname]
Terraform.p_load
begin
host_mem = facts[hostname]['ansible_facts']\
['ansible_memtotal_mb']
host_cpu = facts[hostname]['ansible_facts']\
['ansible_processor_count']
# Read private IP if any
terraform = Terraform.singleton(@provider, {})
# Compute reserved CPU shares for host
res_cpu = 100 * case host_cpu
when 1..4
0 # looks like testing environment
when 5..10 then 1 # reserve 1 core
when 11..20 then 2 # 2 cores
else 3 # 3 cores
end
if terraform.respond_to? :add_host_vars
terraform.add_host_vars(host)
# Compute reserved MEMORY for host (in KB)
res_mem = 1024 * case host_mem
when 0..4000
0 # looks like testing environment
when 4001..6001 then 1000 # reserv
when 6001..10000 then 2000 # 2GB
when 10001..20000 then 4000 # 4GB
when 20001..40000 then 5000 # 5GB
when 40001..64000 then 8000 # 8GB
when 64001..128000 then 12000 # 12GB
else 16000 # 16GB
end
rescue StandardError
raise OneProvisionLoopException, \
"Missing facts for #{hostname}" \
end
host.one.delete_element('//TEMPLATE/RESERVED_MEM')
host.one.add_element('//TEMPLATE',
'RESERVED_MEM' => res_mem)
host.one.delete_element('//TEMPLATE/RESERVED_CPU')
host.one.add_element('//TEMPLATE',
'RESERVED_CPU' => res_cpu)
end
host.one.update(host.one.template_str)
h['name'] = name
end
end
# Updates datastores with ad-hoc changes:
# - replica_host <- replace by first host
# - bridge_list <- replace by ceph hosts list
# - ceph_secret <- replace by generated ceph secret
#
# @param ips [Array] IPs for each host
def update_datastores
datastores.each do |d|
datastore = Resource.object('datastores', provider)
datastore.info(d['id'])
if datastore.one['TEMPLATE/BRIDGE_LIST'] == 'ceph-hosts-list'
bridge_list = []
hosts.each do |h|
host = Resource.object('hosts', provider)
host.info(h['id'])
# add ceph hosts to bridge_list
ceph_group = host.one['TEMPLATE/PROVISION/CEPH_GROUP']
if ['osd', 'osd,mon'].include?(ceph_group)
bridge_list << host.one['NAME']
end
end
if bridge_list
datastore.one.delete_element('//TEMPLATE/BRIDGE_LIST')
datastore.one.add_element(
'//TEMPLATE',
'BRIDGE_LIST' => bridge_list.join(' ')
)
end
end
if datastore.one['TEMPLATE/REPLICA_HOST'] == 'first-host' \
&& hosts.first['name']
datastore.one.delete_element('//TEMPLATE/REPLICA_HOST')
datastore.one.add_element(
'//TEMPLATE',
'REPLICA_HOST' => hosts.first['name']
)
end
if datastore.one['TEMPLATE/CEPH_SECRET'] == 'ceph-secret'
datastore.one.delete_element('//TEMPLATE/CEPH_SECRET')
datastore.one.add_element(
'//TEMPLATE',
'CEPH_SECRET' => @body['ceph_vars']['ceph_secret_uuid']
)
end
datastore.one.update(datastore.one.template_str)
end
end
# Updates provision vnets & vnetmplates phydev from fact
#
# @param facts [Hash] Facts, such as:
#
# { 'host1' => { 'ansible_facts' => {'ansible_default_ipv4' => ''}, }
def update_networks(facts)
networks.each do |net|
vnet = OpenNebula::VirtualNetwork.new_with_id(net['id'],
@client)
vnet.info
next unless vnet['//TEMPLATE/PHYDEV'] == 'default-ipv4-nic'
begin
# asume all hosts have same default nic, use first
nic = facts[facts.keys[0]]['ansible_facts']\
['ansible_default_ipv4']['interface']
rescue StandardError
raise OneProvisionLoopException, 'Missing network facts'
end
vnet.delete_element('//TEMPLATE/PHYDEV')
vnet.add_element('//TEMPLATE', 'PHYDEV' => nic)
vnet.update(vnet.template_str)
end if networks
vntemplates.each do |vntemplate|
vntempl = OpenNebula::VNTemplate.new_with_id(vntemplate['id'],
@client)
vntempl.info
next unless vntempl['//TEMPLATE/PHYDEV'] == 'default-ipv4-nic'
begin
# asume all hosts have same default nic, use first
nic = facts[facts.keys[0]]['ansible_facts']\
['ansible_default_ipv4']['interface']
rescue StandardError
raise OneProvisionLoopException, 'Missing network facts'
end
vntempl.delete_element('//TEMPLATE/PHYDEV')
vntempl.add_element('//TEMPLATE', 'PHYDEV' => nic)
vntempl.update(vntempl.template_str)
end if vntemplates
end
# Checks if provision has running VMs
#
# @return [Boolean] True if there are running VMs

View File

@ -150,6 +150,8 @@ module OneProvision
return unless eval_ui
eval_user_inputs
validate_user_inputs
rescue StandardError => e
Utils.fail("Failed to read configuration: #{e}")
end
@ -674,9 +676,11 @@ module OneProvision
case input['type']
when 'array'
value = []
value << i_value.split(';')
value.flatten!
if i_value.nil?
value = []
else
value = i_value.split(';')
end
else
value.gsub!("${#{match.join('.')}}", i_value.to_s)
end
@ -809,15 +813,11 @@ module OneProvision
answer = input['options'][0] if !answer || answer.empty?
end
when 'array'
answer = ''
print "Array `#{input['name']}` " \
"(default=#{input['default']}): "
until answer.match(/(\w+)(;\s*\w+)*/)
print "Array `#{input['name']}` " \
"(default=#{input['default']}): "
answer = STDIN.readline.chop
answer = input['default'] if !answer || answer.empty?
end
answer = STDIN.readline.chop
answer = input['default'] if answer.empty?
when 'fixed'
answer = input['default']
end
@ -825,6 +825,27 @@ module OneProvision
answer
end
# Validate user inputs
def validate_user_inputs
return unless @config['ceph_vars']
osd_full_hosts_count = 0
@config['hosts'].each do |h|
if h['provision'] && h['provision']['ceph_group'] == 'osd,mon'
osd_full_hosts_count += h['provision']['hostname'].length
elsif h['provision'] && h['provision']['ceph_full_count']
osd_full_hosts_count += \
h['provision']['ceph_full_count'].to_i
end
end
return if [3, 5].include? osd_full_hosts_count == 3
Utils.warn('Recomended number of Mon+OSD Ceph hosts ' \
"is 3 or 5, given #{osd_full_hosts_count}")
end
end
end

View File

@ -133,10 +133,9 @@ module OneProvision
#
# @param dfile [String] XML with all the HOST information
# @param cluster [Integer] ID of the CLUSTER where
# @param playbooks [String] Ansible playbooks to configure host
#
# @retun [OpenNebula::Host] The ONE HOST object
def create(dfile, cluster, playbooks)
def create(dfile, cluster)
xhost = OpenNebula::XMLElement.new
xhost.initialize_xml(dfile, 'HOST')
@ -153,10 +152,6 @@ module OneProvision
host.allocate(name, im, vm, cluster)
host.update(xhost.template_str, true)
unless playbooks.nil?
host.update("ANSIBLE_PLAYBOOK=\"#{playbooks}\"", true)
end
host.offline
host.info

View File

@ -85,6 +85,25 @@ module OneProvision
exit(code)
end
# Shows warning and ask to continue
#
# @param text [String] Error message
# @param code [Integer] Error code
def warn(text, code = -1)
STDOUT.puts "WARN: #{text}"
STDOUT.puts 'Continue? yes/no'
answer = STDIN.readline.chop
until %w[YES NO].include?(answer.upcase)
STDOUT.puts "Invalid #{answer}, put yes/no"
STDOUT.puts 'Continue?'
answer = STDIN.readline.chop
end
exit(code) if answer.upcase == 'NO'
end
# Checks if the return_code is error
def exception(return_code)
error = OpenNebula.is_error?(return_code)

View File

@ -23,6 +23,7 @@ resource "aws_vpc" "device_<%= obj['ID'] %>" {
}
}
resource "aws_subnet" "device_<%= obj['ID'] %>" {
vpc_id = aws_vpc.device_<%= obj['ID'] %>.id
cidr_block = "<%= provision['CIDR'] ? provision['CIDR'] : '10.0.0.0/16'%>"
@ -36,6 +37,28 @@ resource "aws_subnet" "device_<%= obj['ID'] %>" {
availability_zone = local.my_zones[0]
}
<% if ceph_vars['ceph_hci'] %>
<%# Additional CIDR block for HCI %>
resource "aws_vpc_ipv4_cidr_block_association" "hci_cidr_<%= obj['ID'] %>" {
vpc_id = aws_vpc.device_<%= obj['ID'] %>.id
cidr_block = "<%= provision['HCI_CIDR'] ? provision['HCI_CIDR'] : '10.1.0.0/16'%>"
}
resource "aws_subnet" "device_<%= obj['ID'] %>_ceph" {
vpc_id = aws_vpc.device_<%= obj['ID'] %>.id
cidr_block = "<%= provision['HCI_CIDR'] ? provision['HCI_CIDR'] : '10.1.0.0/16'%>"
tags = {
Name = "<%= obj['NAME'] %>_ceph_subnet"
}
availability_zone = local.my_zones[0]
depends_on = [ aws_vpc_ipv4_cidr_block_association.hci_cidr_<%= obj['ID'] %> ]
}
<% end %>
resource "aws_internet_gateway" "device_<%= obj['ID'] %>" {
vpc_id = aws_vpc.device_<%= obj['ID'] %>.id

View File

@ -26,11 +26,49 @@ resource "aws_instance" "device_<%= obj['ID'] %>" {
}
}
output "ip_<%= obj['ID'] %>" {
value = aws_instance.device_<%= obj['ID'] %>.public_ip
<% if ceph_vars['ceph_hci'] %>
<%# additional CEPH network interface %>
resource "aws_network_interface" "device_<%= obj['ID'] %>_ceph" {
subnet_id = aws_subnet.device_<%= c['ID'] %>_ceph.id
<% ecidr = c['TEMPLATE']['PROVISION']['HCI_CIDR'] || "10.1.0.0/16"
pref = ecidr.split('/').first.rpartition(".")[0]
ceph_ip = pref << '.' << ( provision['INDEX'].to_i + 4 ).to_s
%>
private_ips = ["<%= ceph_ip %>"]
security_groups = [ aws_security_group.device_<%= c['ID'] %>_all.id ]
attachment {
instance = aws_instance.device_<%= obj['ID'] %>.id
device_index = 1
}
tags = {
Name = "device_<%= obj['ID'] %>_ceph"
}
}
output "device_id_<%= obj['ID'] %>" {
<%# additional CEPH disk %>
resource "aws_ebs_volume" "device_<%= obj['ID'] %>_ceph_volume" {
availability_zone = local.my_zones[0]
size = <%= ceph_vars['ceph_disk_size'] %>
}
resource "aws_volume_attachment" "device_<%= obj['ID'] %>_ebs_att" {
device_name = "/dev/xvdb"
volume_id = aws_ebs_volume.device_<%= obj['ID'] %>_ceph_volume.id
instance_id = aws_instance.device_<%= obj['ID'] %>.id
stop_instance_before_detaching = true
}
<% end %>
output "device_<%= obj['ID'] %>_id" {
value = aws_instance.device_<%= obj['ID'] %>.id
}
output "device_<%= obj['ID'] %>_ip" {
value = aws_instance.device_<%= obj['ID'] %>.public_ip
}

View File

@ -57,10 +57,10 @@ resource "digitalocean_firewall" "device_<%= obj['ID'] %>" {
}
}
output "ip_<%= obj['ID'] %>" {
output "device_<%= obj['ID'] %>_ip" {
value = digitalocean_droplet.device_<%= obj['ID'] %>.ipv4_address
}
output "device_id_<%= obj['ID'] %>" {
output "device_<%= obj['ID'] %>_id" {
value = digitalocean_droplet.device_<%= obj['ID'] %>.id
}

View File

@ -9,11 +9,11 @@ resource "packet_device" "device_<%= obj['ID'] %>" {
tags = ["OpenNebula", "ONE_ID=<%= obj['ID'] %>"]
}
output "ip_<%= obj['ID'] %>" {
output "device_<%= obj['ID'] %>_ip" {
value = packet_device.device_<%= obj['ID'] %>.network[0].address
}
output "device_id_<%= obj['ID'] %>" {
output "device_<%= obj['ID'] %>_id" {
value = packet_device.device_<%= obj['ID'] %>.id
}

View File

@ -21,11 +21,11 @@ resource "google_compute_instance" "device_<%= obj['ID'] %>" {
}
}
output "ip_<%= obj['ID'] %>" {
output "device_<%= obj['ID'] %>_ip" {
value = google_compute_instance.device_<%= obj['ID'] %>.network_interface[0].access_config[0].nat_ip
}
output "device_id_<%= obj['ID'] %>" {
output "device_<%= obj['ID'] %>_id" {
value = google_compute_instance.device_<%= obj['ID'] %>.id
}

View File

@ -13,11 +13,11 @@ resource "vultr_bare_metal_server" "device_<%= obj['ID'] %>" {
activation_email = false
}
output "ip_<%= obj['ID'] %>" {
output "device_<%= obj['ID'] %>_ip" {
value = vultr_bare_metal_server.device_<%= obj['ID'] %>.main_ip
}
output "device_id_<%= obj['ID'] %>" {
output "device_<%= obj['ID'] %>_id" {
value = vultr_bare_metal_server.device_<%= obj['ID'] %>.id
}

View File

@ -14,11 +14,11 @@ resource "vultr_instance" "device_<%= obj['ID'] %>" {
private_network_ids = [vultr_private_network.device_<%= c['ID'] %>.id]
}
output "ip_<%= obj['ID'] %>" {
output "device_<%= obj['ID'] %>_ip" {
value = vultr_instance.device_<%= obj['ID'] %>.main_ip
}
output "device_id_<%= obj['ID'] %>" {
output "device_<%= obj['ID'] %>_id" {
value = vultr_instance.device_<%= obj['ID'] %>.id
}

View File

@ -18,6 +18,7 @@ require 'erb'
require 'ostruct'
require 'yaml'
require 'zlib'
require 'json'
if !ONE_LOCATION
PROVIDERS_LOCATION = '/usr/lib/one/oneprovision/lib/terraform/providers'
@ -232,22 +233,17 @@ module OneProvision
# Get IP information and deploy IDs
info = output(tempdir)
info.gsub!(' ', '')
info = info.split("\n")
info.map! {|val| val.split('=')[1] }
# Filter ids
hash_ids = info.select do |key, _value|
key.to_s.match(/^device_[0-9]*_id/)
end
ids = hash_ids.values.map {|h| h['value'] }
# rubocop:disable Style/StringLiterals
info.map! {|val| val.gsub("\"", '') }
# rubocop:enable Style/StringLiterals
# rubocop:disable Style/StringLiterals
info.map! {|val| val.gsub("\"", '') }
# rubocop:enable Style/StringLiterals
# From 0 to (size / 2) - 1 -> deploy IDS
# From (size / 2) until the end -> IPs
ids = info[0..(info.size / 2) - 1]
ips = info[(info.size / 2)..-1]
# Filter ips
hash_ips = info.select do |key, _value|
key.to_s.match(/^device_[0-9]*_ip/)
end
ips = hash_ips.values.map {|h| h['value'] }
conf = Base64.encode64(Zlib::Deflate.deflate(@conf))
state = Base64.encode64(Zlib::Deflate.deflate(@state))
@ -431,7 +427,8 @@ module OneProvision
c = ERBVal.render_from_hash(c,
:c => cluster,
:obj => obj,
:provision => p)
:provision => p,
:ceph_vars => provision.ceph_vars)
@conf << c
end
@ -517,13 +514,21 @@ module OneProvision
# @param tempdir [String] Path to temporal directory
# @param variable [String] Variable to check
#
# @return [String] Variable value
# @return [Hash] Variable value
#
# example:
# { "device_51_id" => {
# "sensitive" => false,
# "type" => "string",
# "value" => "i-02a4d0f8012392d83"
# }
def output(tempdir, variable = nil)
ret = nil
Driver.retry_loop "Driver action 'tf output' failed" do
ret, e, s = Driver.run(
"cd #{tempdir}; terraform output #{variable}"
"cd #{tempdir}; terraform output -json #{variable}"
)
unless s && s.success?
@ -531,7 +536,7 @@ module OneProvision
end
end
ret
JSON.parse(ret)
end
# Destroys an specific resource

View File

@ -79,18 +79,34 @@ class AWSProvider
instcs = @ec2.describe_instances({ :instance_ids => [@deploy_id] })
inst = instcs[0][0].instances[0]
nic_id = inst.network_interfaces[0].network_interface_id
# find NIC to which the IP belongs (avoid Ceph network)
nic_id = nil
inst.network_interfaces.each do |ec2_nic|
ec2_subnet = @ec2.describe_subnets(
{ :subnet_ids => [ec2_nic.subnet_id] }
)[0][0]
ip_range = IPAddr.new(ec2_subnet.cidr_block)
@ec2.assign_private_ip_addresses(
{ :network_interface_id => nic_id,
:private_ip_addresses => [ip] }
)
if ip_range.include?(ip)
nic_id = ec2_nic.network_interface_id
end
end
@ec2.associate_address(
{ :network_interface_id => nic_id,
:allocation_id => opts[:alloc_id],
:private_ip_address => ip }
)
if nic_id
@ec2.assign_private_ip_addresses(
{ :network_interface_id => nic_id,
:private_ip_addresses => [ip] }
)
@ec2.associate_address(
{ :network_interface_id => nic_id,
:allocation_id => opts[:alloc_id],
:private_ip_address => ip }
)
else
OpenNebula.log_error("Can not find any interface to assign #{ip}")
exit 1
end
0
rescue StandardError => e