1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-01-08 21:17:43 +03:00

F #5593: implement OneProvision add host operation (#1721)

* Add new CLI command oneprovision host add
    * Add new CLI command oneprovision ip add
    * Fix minor issues with destroy resources operation
    * Add force parameter to delete operation
    * Update all providers base OS to ubuntu2004
    * Update Ansible roles
This commit is contained in:
Alejandro Huertas Herrero 2022-01-27 16:25:19 +01:00 committed by GitHub
parent 36bb3bbcc4
commit 341d5f806b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
76 changed files with 957 additions and 456 deletions

View File

@ -22,6 +22,6 @@
- { protocol: 'udp', port: 8472 }
- update-replica
- role: frr
frr_iface: 'eth0'
frr_iface: '{{ oneprovision_private_phydev }}'
# Use /16 for the internal management network address
frr_prefix_length: 16

View File

@ -21,7 +21,7 @@
- { protocol: 'udp', port: 8472 }
- update-replica
- role: frr
frr_iface: 'eth0'
frr_iface: 'ens4'
# Use /16 for the internal management network address
frr_prefix_length: 16
frr_zebra: true

View File

@ -3,12 +3,14 @@
- name: Check supported platform
assert:
that: |
(ansible_distribution == 'Ubuntu' and ansible_distribution_version == '16.04') or
(ansible_distribution == 'Ubuntu' and ansible_distribution_version == '18.04') or
(ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7') or
(ansible_distribution == 'CentOS' and ansible_distribution_major_version == '8') or
(ansible_distribution == 'RedHat' and ansible_distribution_major_version == '7') or
(ansible_distribution == 'RedHat' and ansible_distribution_major_version == '8')
ansible_distribution == 'Ubuntu' and ansible_distribution_version == '20.04'
msg: "Unsupported target OS"
- name: Disable ufw
service:
name: ufw
masked: yes
state: stopped
ignore_errors: yes
- include: clean_netconfigs.yml

View File

@ -1,81 +0,0 @@
---
- name: Add FRR RPM repository
yum:
name: https://rpm.frrouting.org/repo/{{ frr_frrver }}-repo-1-0.el{{ ansible_distribution_major_version }}.noarch.rpm
state: present
disable_gpg_check: True
- name: Install FRR
package:
name: frr
state: latest
register: pkg_result
until: pkg_result is succeeded
retries: 3
delay: 10
- name: Install FRR Python Tools
package:
name: frr-pythontools
state: latest
register: pkg_result
until: pkg_result is succeeded
retries: 3
delay: 10
- name: Install ipcalc
package:
name: ipcalc
state: latest
register: pkg_result
until: pkg_result is succeeded
retries: 3
delay: 10
when: frr_ipcalc == true
- name: Get network address
shell: ipcalc -bn "{{ ansible_default_ipv4.network }}/{{ frr_net_mask }}" | grep NETWORK | cut -d '=' -f2
register: frr_net
when: frr_ipcalc == true
- name: Configure BGP (RR)
template:
src: bgpd_rr.conf.j2
dest: /etc/frr/bgpd.conf
when: vars['ansible_' + frr_iface].ipv4.address in rr_servers
- name: Configure BGP
template:
src: bgpd.conf.j2
dest: /etc/frr/bgpd.conf
when: not vars['ansible_' + frr_iface].ipv4.address in rr_servers
- name: Configure Zebra
template:
src: zebra.conf.j2
dest: /etc/frr/zebra.conf
when: frr_zebra == true
- name: Configure Staticd
template:
src: staticd.conf.j2
dest: /etc/frr/staticd.conf
when: frr_zebra == true
- name: Remove frr.conf
file:
path: /etc/frr/frr.conf
state: absent
- name: Enable BGP daemon
replace:
path: /etc/frr/daemons
regexp: '^bgpd=no'
replace: 'bgpd=yes'
- name: Start FRR service
service:
name: frr
state: started
enabled: yes

View File

@ -8,8 +8,86 @@
rr_servers : "{{ rr_servers|default([]) + [ hostvars[item]['ansible_' + frr_iface].ipv4.address ] }}"
with_items: "{{ groups['all'][:frr_rr_num] }}"
- include: centos.yml
when: ansible_os_family == "RedHat"
- name: Add frr GPG key
ansible.builtin.apt_key:
url: https://deb.frrouting.org/frr/keys.asc
state: present
# - include: debian.yml
# when: ansible_os_family == "Debian"
- name: Add frr repository
ansible.builtin.apt_repository:
repo: deb https://deb.frrouting.org/frr focal frr-stable
state: present
- name: Install FRR
package:
name: frr
state: latest
register: pkg_result
until: pkg_result is succeeded
retries: 3
delay: 10
- name: Install FRR Python Tools
package:
name: frr-pythontools
state: latest
register: pkg_result
until: pkg_result is succeeded
retries: 3
delay: 10
- name: Install ipcalc
package:
name: ipcalc
state: latest
register: pkg_result
until: pkg_result is succeeded
retries: 3
delay: 10
when: frr_ipcalc == true
- name: Get network address
shell: ipcalc -bn "{{ ansible_default_ipv4.network }}/{{ frr_net_mask }}" | grep Network | cut -d ':' -f2 | sed 's/ //g'
register: frr_net
when: frr_ipcalc == true
- name: Configure BGP (RR)
template:
src: bgpd_rr.conf.j2
dest: /etc/frr/bgpd.conf
when: vars['ansible_' + frr_iface].ipv4.address in rr_servers
- name: Configure BGP
template:
src: bgpd.conf.j2
dest: /etc/frr/bgpd.conf
when: not vars['ansible_' + frr_iface].ipv4.address in rr_servers
- name: Configure Zebra
template:
src: zebra.conf.j2
dest: /etc/frr/zebra.conf
when: frr_zebra == true
- name: Configure Staticd
template:
src: staticd.conf.j2
dest: /etc/frr/staticd.conf
when: frr_zebra == true
- name: Remove frr.conf
file:
path: /etc/frr/frr.conf
state: absent
- name: Enable BGP daemon
replace:
path: /etc/frr/daemons
regexp: '^bgpd=no'
replace: 'bgpd=yes'
- name: Start FRR service
service:
name: frr
state: restarted
enabled: yes

View File

@ -1,2 +1,2 @@
hostname {{ ansible_nodename }}
ip route {{ frr_net.stdout }}/{{ frr_net_mask }} {{ ansible_default_ipv4.gateway }}
ip route {{ frr_net.stdout }} {{ ansible_default_ipv4.gateway }}

View File

@ -1,20 +0,0 @@
# networking
## Description
Configures static IP address for network interface.
These changes are persistent.
## Requirements
No special requirements.
## Variables
All of the variables in this role are documented in the [defaults](defaults/main.yml) file.
## Todo list
None

View File

@ -1,9 +0,0 @@
################################################################################
# Mandatory
################################################################################
# The network device to configure
networking_iface: 'eth_one'
# Netmask of the interface
networking_prefix: 20

View File

@ -1,6 +0,0 @@
---
- name: restart network
service:
name: network
state: restarted

View File

@ -1,14 +0,0 @@
---
- name: Configuration for bridge
template:
src: ifcfg.j2
dest: /etc/sysconfig/network-scripts/ifcfg-{{ networking_iface }}
notify: restart network
when: private_ip is defined
- name: Force all notified handlers to run now
meta: flush_handlers
- name: Refresh facts
setup:

View File

@ -1,6 +0,0 @@
DEVICE={{ networking_iface }}
BOOTPROTO=static
ONBOOT=yes
NM_CONTROLLED=no
IPADDR={{ private_ip }}
PREFIX={{ networking_prefix }}

View File

@ -1,32 +1,19 @@
---
- include: centos.yml
when: ansible_os_family == "RedHat"
# Workaround for non-unified cgroups v2 not supported by Firecracker yet
# https://github.com/firecracker-microvm/firecracker/issues/841 ?
- name: Reconfigure cgroups
include_role:
name: cgroups1
when: |
(ansible_distribution == 'Fedora') or
(ansible_distribution == 'Debian' and ansible_distribution_version is version_compare('11', '>='))
- include: debian.yml
when: ansible_os_family == "Debian"
- name: "create datastore for ONE_LOCATION"
file: path={{ one_location }}/var/datastores owner=oneadmin group=oneadmin state=directory
when: one_location is defined
- name: 'Create hook subfolders into network driver folders'
become: no
local_action:
module: file
path: /var/lib/one/remotes/vnm/{{ item[0] }}/{{ item[1] }}.d
state: directory
with_nested:
- '{{ opennebula_node_firecracker_network_drivers }}'
- '{{ opennebula_node_firecracker_network_hook_types }}'
- name: 'Install firecracker hooks'
become: no
local_action:
module: copy
src: /var/lib/one/remotes/vnm/hooks/{{ item[1] }}/firecracker
dest: /var/lib/one/remotes/vnm/{{ item[0] }}/{{ item[1] }}.d/firecracker
mode: 'u+x,g+x'
with_nested:
- '{{ opennebula_node_firecracker_network_drivers }}'
- '{{ opennebula_node_firecracker_network_hook_types }}'
- name: Install OpenNebula node Firecracker package
package:
name: opennebula-node-firecracker
state: latest
register: result
until: result is success
retries: 3
delay: 10

View File

@ -10,7 +10,7 @@ No special requirements.
## Variables
All of the variables in this role are documented in the [defaults](defaults/main.yml) file.
All of the variables in this role are documented in the [defaults](defaults/main.yaml) file.
## Todo list

View File

@ -0,0 +1,17 @@
---
###############################################################################
# Valid defaults
###############################################################################
# Whether to use the ev package for kvm
opennebula_node_kvm_use_ev: False
# Enable nested KVM virtualization
opennebula_node_kvm_param_nested: False
# Enable KVM configuration
opennebula_node_kvm_manage_kvm: True
# SELinux booleans to configure
opennebula_node_selinux_booleans:
- { name: 'virt_use_nfs', state: 'yes' }

View File

@ -0,0 +1,28 @@
---
- name: Configure KVM module
template:
src: kvm.conf.j2
dest: "{{ '/etc/modprobe.d/kvm.conf' if ansible_os_family == 'RedHat' else '/etc/modprobe.d/qemu-system-x86.conf' }}"
register: template
- name: Unload KVM modules for reconfiguration
modprobe:
name: "{{ item }}"
state: absent
with_items:
- kvm_intel
- kvm_amd
register: modprobe_result
until: modprobe_result is success
retries: 3
delay: 10
when: template.changed
- name: Load KVM on Intel
modprobe: name=kvm_intel state=present
when: ansible_processor | intersect(["GenuineIntel"])
- name: Load KVM on AMD
modprobe: name=kvm_amd state=present
when: ansible_processor | intersect(["AuthenticAMD"])

View File

@ -0,0 +1,7 @@
---
- name: Restart Libvirtd (Ubuntu from 18.10)
service:
name: libvirtd
state: restarted
enabled: yes

View File

@ -0,0 +1,18 @@
---
- name: Install OpenNebula node-kvm package
apt:
name: opennebula-node-kvm
state: fixed
retries: 3
delay: 10
- include: kvm.yaml
when: opennebula_node_kvm_manage_kvm == True
- include: libvirt.yaml
- include: security.yaml
- name: "create datastore for ONE_LOCATION"
file: path={{ one_location }}/var/datastores owner=oneadmin group=oneadmin state=directory
when: one_location is defined

View File

@ -0,0 +1,39 @@
---
# SELinux
- block:
- name: Set SELinux booleans
seboolean:
name: '{{ item.name }}'
state: '{{ item.state }}'
persistent: yes
with_items: '{{ opennebula_node_selinux_booleans }}'
when:
- ansible_selinux.status == 'enabled'
- ansible_os_family == "RedHat"
# AppArmor
- name: Check if AppArmor configuration exists
stat: path=/etc/apparmor.d/abstractions/libvirt-qemu
register: apparmor_libvirt_qemu
- block:
- name: Add permissions to apparmor
lineinfile:
dest: /etc/apparmor.d/abstractions/libvirt-qemu
line: "{{ item }}"
with_items:
- " /srv/** rwk,"
- " /var/lib/one/datastores/** rwk,"
- name: Reload apparmor
service:
name: apparmor
state: reloaded
register: service_result
failed_when:
- service_result is failed
- "'find' not in service_result.msg and 'found' not in service_result.msg"
when:
- ansible_os_family == 'Debian'
- apparmor_libvirt_qemu.stat.exists == True

View File

@ -2,16 +2,8 @@
## Description
Installs the opennebula-node-lxc package.
Install the opennebula-node-lxc package.
## Requirements
No special requirements.
## Variables
All of the variables in this role are documented in the [defaults](defaults/main.yml) file.
## Todo list
None
OpenNebula repository must be configured before executing this role.

View File

@ -1,13 +1,19 @@
---
- include: centos.yml
when: ansible_os_family == "RedHat"
# Workaround for non-unified cgroups v2 not supported by Firecracker yet
# https://github.com/firecracker-microvm/firecracker/issues/841 ?
- name: Reconfigure cgroups
include_role:
name: cgroups1
when: |
(ansible_distribution == 'Fedora') or
(ansible_distribution == 'Debian' and ansible_distribution_version is version_compare('11', '>='))
- include: debian.yml
when: ansible_os_family == "Debian"
- include: security.yml
- name: "create datastore for ONE_LOCATION"
file: path={{ one_location }}/var/datastores owner=oneadmin group=oneadmin state=directory
when: one_location is defined
- name: Install OpenNebula node LXC package
package:
name: opennebula-node-lxc
state: latest
register: result
until: result is success
retries: 3
delay: 10

View File

@ -2,7 +2,7 @@
## Description
Installs python2 for Debian and Ubuntu.
Installs default Python for Debians and Red Hats.
## Requirements

View File

@ -1,17 +1,8 @@
---
- name: Install Python 2 (Debian/Ubuntu)
raw: >
if [ -e /etc/debian_version ]; then
python2 --version >/dev/null 2>&1 || \
( apt-get --version >/dev/null 2>&1 && \
sudo -n apt-get update && \
sudo -n apt-get install -y python-minimal
)
fi
- name: Install libselinux-python3 (Centos8 8)
raw: >
if grep -q 'release 8' /etc/redhat-release; then
dnf -y install python3-libselinux python3-libsemanage
fi
- name: Install Python 2 (Ubuntus)
raw: if [ -e /etc/debian_version ] && ! python --version >/dev/null 2>&1 && lsb_release -s -i | grep -i Ubuntu && lsb_release -s -r | grep -v '^2'; then sudo -n apt-get update; sudo -n apt-get install -y python-minimal; fi
register: result
until: result is succeeded
retries: 3
delay: 10

View File

@ -7,7 +7,6 @@
- hosts: nodes
roles:
- networking
- ddc
- opennebula-repository
- { role: opennebula-node-kvm, when: oneprovision_hypervisor == 'kvm' or oneprovision_hypervisor == 'qemu' }
@ -23,6 +22,6 @@
- { protocol: 'udp', port: 8472 }
- update-replica
- role: frr
frr_iface: 'eth_one'
frr_iface: 'enp6s0'
# Use /20 for the internal management network address
frr_prefix_length: 20

View File

@ -12,7 +12,7 @@ inputs:
- name: 'aws_ami_image'
type: 'list'
options:
- 'ami-04c21037b3f953d37'
- 'ami-04505e74c0741db8d'
- name: 'aws_instance_type'
type: 'list'
options:

View File

@ -12,7 +12,7 @@ inputs:
- name: 'aws_ami_image'
type: 'list'
options:
- 'ami-08095fbc7037048f3'
- 'ami-04505e74c0741db8d'
- name: 'aws_instance_type'
type: 'list'
options:

View File

@ -12,7 +12,7 @@ inputs:
- name: 'aws_ami_image'
type: 'list'
options:
- 'ami-0d6e9a57f6259ba3a'
- 'ami-04505e74c0741db8d'
- name: 'aws_instance_type'
type: 'list'
options:

View File

@ -12,7 +12,7 @@ inputs:
- name: 'aws_ami_image'
type: 'list'
options:
- 'ami-08b547f0dcb46c4d3'
- 'ami-04505e74c0741db8d'
- name: 'aws_instance_type'
type: 'list'
options:

View File

@ -12,8 +12,10 @@ inputs:
- name: 'equinix_os'
type: 'list'
options:
- 'centos_8'
- 'ubuntu_20_04'
- name: 'equinix_plan'
type: 'list'
options:
- 'baremetal_0'
- 't1.small'
- 'c1.small'
- 'm1.xlarge'

View File

@ -12,7 +12,7 @@ inputs:
- name: 'equinix_os'
type: 'list'
options:
- 'centos_8'
- 'ubuntu_20_04'
- name: 'equinix_plan'
type: 'list'
options:

View File

@ -12,7 +12,7 @@ inputs:
- name: 'equinix_os'
type: 'list'
options:
- 'centos_8'
- 'ubuntu_20_04'
- name: 'equinix_plan'
type: 'list'
options:

View File

@ -12,7 +12,7 @@ inputs:
- name: 'equinix_os'
type: 'list'
options:
- 'centos_8'
- 'ubuntu_20_04'
- name: 'equinix_plan'
type: 'list'
options:

View File

@ -11,8 +11,8 @@ inputs:
- name: 'vultr_os'
type: 'list'
options:
- '362'
- '387'
- name: 'vultr_plan'
type: 'list'
options:
- 'vbm-8c-132gb'
- 'vbm-4c-32gb'

View File

@ -11,8 +11,8 @@ inputs:
- name: 'vultr_os'
type: 'list'
options:
- '362'
- '387'
- name: 'vultr_plan'
type: 'list'
options:
- 'vbm-8c-132gb'
- 'vbm-4c-32gb'

View File

@ -11,8 +11,8 @@ inputs:
- name: 'vultr_os'
type: 'list'
options:
- '362'
- '387'
- name: 'vultr_plan'
type: 'list'
options:
- 'vbm-8c-132gb'
- 'vbm-4c-32gb'

View File

@ -11,8 +11,8 @@ inputs:
- name: 'vultr_os'
type: 'list'
options:
- '362'
- '387'
- name: 'vultr_plan'
type: 'list'
options:
- 'vbm-8c-132gb'
- 'vbm-4c-32gb'

View File

@ -0,0 +1,31 @@
---
# ---------------------------------------------------------------------------- #
# Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
#-------------------------------------------------------------------------------
# defaults: Common configuration attributes for provision objects
#--------------------------------------------------------------------------------
defaults:
configuration:
# Select the hypervisor package to install
oneprovision_hypervisor: "${input.one_hypervisor}"
# required for copying recovery VM snaphosts to the replica host
opennebula_ssh_deploy_private_key: true
# Select the physical device for private network (VXLAN)
oneprovision_private_phydev: "${input.private_phydev}"

View File

@ -33,21 +33,24 @@ inputs:
- name: 'aws_ami_image'
type: text
description: "AWS ami image used for host deployments"
default: ''
description: 'AWS ami image used for host deployments'
default: 'ami-04505e74c0741db8d'
- name: 'aws_instance_type'
type: text
description: "AWS instance type, use bare-metal instances"
default: ''
description: 'AWS instance type, use bare-metal instances'
default: 'c5.metal'
- name: 'one_hypervisor'
type: list
description: "Virtualization technology for the cluster hosts"
description: 'Virtualization technology for the cluster hosts'
default: 'kvm'
options:
- 'kvm'
- 'firecracker'
- 'lxc'
default: 'kvm'
- name: 'private_phydev'
type: text
description: 'Physical device to be used for private networking.'
...

View File

@ -31,7 +31,7 @@ networks:
vntemplates:
- name: "${provision}-private"
vn_mad: 'vxlan'
phydev: 'eth0'
phydev: "${input.private_phydev}"
automatic_vlan_id: 'yes'
netrole: 'private'
vxlan_mode: 'evpn'

View File

@ -23,9 +23,9 @@
name: 'aws-cluster'
extends:
- common.d/defaults.yml
- common.d/resources.yml
- common.d/hosts.yml
- aws.d/defaults.yml
- aws.d/datastores.yml
- aws.d/fireedge.yml
- aws.d/inputs.yml
@ -48,7 +48,7 @@ defaults:
instancetype: "${input.aws_instance_type}"
cloud_init: true
connection:
remote_user: 'centos'
remote_user: 'ubuntu'
#-------------------------------------------------------------------------------
# cluster: Parameters for the OpenNebula cluster. Applies to all the Hosts

View File

@ -18,7 +18,7 @@
inputs:
- name: 'number_hosts'
type: text
description: "Number of metal servers to create"
description: 'Number of metal servers to create'
default: '1'
- name: 'number_public_ips'
@ -33,21 +33,20 @@ inputs:
- name: 'equinix_plan'
type: text
description: "Equinix plan (device type)"
default: 'baremetal_0'
description: 'Equinix plan (device type)'
default: 't1.small'
- name: 'equinix_os'
type: text
description: "Equinix host operating system"
default: 'centos_8'
description: 'Equinix host operating system'
default: 'ubuntu_20_04'
- name: 'one_hypervisor'
type: list
description: "Virtualization technology for the cluster hosts"
description: 'Virtualization technology for the cluster hosts'
default: 'kvm'
options:
- 'kvm'
- 'firecracker'
- 'lxc'
default: 'kvm'
...

View File

@ -18,7 +18,7 @@
inputs:
- name: 'number_hosts'
type: text
description: "Number of metal servers to create"
description: 'Number of metal servers to create'
default: '1'
- name: 'number_public_ips'
@ -33,21 +33,20 @@ inputs:
- name: 'vultr_plan'
type: text
description: "Vultr plan (device type)"
default: 'vbm-8c-132gb'
description: 'Vultr plan (device type)'
default: 'vbm-4c-32gb'
- name: 'vultr_os'
type: text
description: "Vultr host operating system"
default: '362'
description: 'Vultr host operating system'
default: '387'
- name: 'one_hypervisor'
type: list
description: "Virtualization technology for the cluster hosts"
description: 'Virtualization technology for the cluster hosts'
default: 'kvm'
options:
- 'kvm'
- 'firecracker'
- 'lxc'
default: 'kvm'
...

View File

@ -12,7 +12,7 @@ inputs:
- name: 'aws_ami_image'
type: 'list'
options:
- 'ami-04c21037b3f953d37'
- 'ami-04505e74c0741db8d'
- name: 'aws_instance_type'
type: 'list'
options:

View File

@ -12,7 +12,7 @@ inputs:
- name: 'aws_ami_image'
type: 'list'
options:
- 'ami-08095fbc7037048f3'
- 'ami-04505e74c0741db8d'
- name: 'aws_instance_type'
type: 'list'
options:

View File

@ -12,7 +12,7 @@ inputs:
- name: 'aws_ami_image'
type: 'list'
options:
- 'ami-0d6e9a57f6259ba3a'
- 'ami-04505e74c0741db8d'
- name: 'aws_instance_type'
type: 'list'
options:

View File

@ -12,7 +12,7 @@ inputs:
- name: 'aws_ami_image'
type: 'list'
options:
- 'ami-08b547f0dcb46c4d3'
- 'ami-04505e74c0741db8d'
- name: 'aws_instance_type'
type: 'list'
options:

View File

@ -6,3 +6,27 @@ provider: 'digitalocean'
connection:
token: 'DigitalOcean token'
region: 'ams3'
inputs:
- name: 'digitalocean_image'
type: list
description: "Droplet host operating system"
options:
- 'ubuntu-20-04-x64'
- name: 'digitalocean_size'
type: list
description: "Size of droplet. Basic droplets start with s-, memory optimize with m- and CPU optimize are c-"
options:
- 's-1vcpu-1gb'
- 's-1vcpu-2gb'
- 's-1vcpu-3gb'
- 's-2vcpu-2gb'
- 's-2vcpu-4gb'
- 's-4vcpu-8gb'
- 's-8vcpu-16gb'
- 'm-2vcpu-16gb'
- 'm-8vcpu-64gb'
- 'c-2'
- 'c-4'
- 'c-8'

View File

@ -6,3 +6,27 @@ provider: 'digitalocean'
connection:
token: 'DigitalOcean token'
region: 'lon1'
inputs:
- name: 'digitalocean_image'
type: list
description: "Droplet host operating system"
options:
- 'ubuntu-20-04-x64'
- name: 'digitalocean_size'
type: list
description: "Size of droplet. Basic droplets start with s-, memory optimize with m- and CPU optimize are c-"
options:
- 's-1vcpu-1gb'
- 's-1vcpu-2gb'
- 's-1vcpu-3gb'
- 's-2vcpu-2gb'
- 's-2vcpu-4gb'
- 's-4vcpu-8gb'
- 's-8vcpu-16gb'
- 'm-2vcpu-16gb'
- 'm-8vcpu-64gb'
- 'c-2'
- 'c-4'
- 'c-8'

View File

@ -6,3 +6,27 @@ provider: 'digitalocean'
connection:
token: 'DigitalOcean token'
region: 'nyc3'
inputs:
- name: 'digitalocean_image'
type: list
description: "Droplet host operating system"
options:
- 'ubuntu-20-04-x64'
- name: 'digitalocean_size'
type: list
description: "Size of droplet. Basic droplets start with s-, memory optimize with m- and CPU optimize are c-"
options:
- 's-1vcpu-1gb'
- 's-1vcpu-2gb'
- 's-1vcpu-3gb'
- 's-2vcpu-2gb'
- 's-2vcpu-4gb'
- 's-4vcpu-8gb'
- 's-8vcpu-16gb'
- 'm-2vcpu-16gb'
- 'm-8vcpu-64gb'
- 'c-2'
- 'c-4'
- 'c-8'

View File

@ -6,3 +6,27 @@ provider: 'digitalocean'
connection:
token: 'DigitalOcean token'
region: 'sfo3'
inputs:
- name: 'digitalocean_image'
type: list
description: "Droplet host operating system"
options:
- 'ubuntu-20-04-x64'
- name: 'digitalocean_size'
type: list
description: "Size of droplet. Basic droplets start with s-, memory optimize with m- and CPU optimize are c-"
options:
- 's-1vcpu-1gb'
- 's-1vcpu-2gb'
- 's-1vcpu-3gb'
- 's-2vcpu-2gb'
- 's-2vcpu-4gb'
- 's-4vcpu-8gb'
- 's-8vcpu-16gb'
- 'm-2vcpu-16gb'
- 'm-8vcpu-64gb'
- 'c-2'
- 'c-4'
- 'c-8'

View File

@ -6,3 +6,27 @@ provider: 'digitalocean'
connection:
token: 'DigitalOcean token'
region: 'sgp1'
inputs:
- name: 'digitalocean_image'
type: list
description: "Droplet host operating system"
options:
- 'ubuntu-20-04-x64'
- name: 'digitalocean_size'
type: list
description: "Size of droplet. Basic droplets start with s-, memory optimize with m- and CPU optimize are c-"
options:
- 's-1vcpu-1gb'
- 's-1vcpu-2gb'
- 's-1vcpu-3gb'
- 's-2vcpu-2gb'
- 's-2vcpu-4gb'
- 's-4vcpu-8gb'
- 's-8vcpu-16gb'
- 'm-2vcpu-16gb'
- 'm-8vcpu-64gb'
- 'c-2'
- 'c-4'
- 'c-8'

View File

@ -13,7 +13,7 @@ inputs:
- name: 'google_image'
type: 'list'
options:
- 'centos-8-v20210316'
- 'ubuntu-2004-focal-v20220118'
- name: 'google_machine_type'
type: 'list'
options:

View File

@ -13,7 +13,7 @@ inputs:
- name: 'google_image'
type: 'list'
options:
- 'centos-8-v20210316'
- 'ubuntu-2004-focal-v20220118'
- name: 'google_machine_type'
type: 'list'
options:

View File

@ -13,7 +13,7 @@ inputs:
- name: 'google_image'
type: 'list'
options:
- 'centos-8-v20210316'
- 'ubuntu-2004-focal-v20220118'
- name: 'google_machine_type'
type: 'list'
options:

View File

@ -13,7 +13,7 @@ inputs:
- name: 'google_image'
type: 'list'
options:
- 'centos-8-v20210316'
- 'ubuntu-2004-focal-v20220118'
- name: 'google_machine_type'
type: 'list'
options:

View File

@ -0,0 +1,31 @@
---
# ---------------------------------------------------------------------------- #
# Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
#-------------------------------------------------------------------------------
# defaults: Common configuration attributes for provision objects
#--------------------------------------------------------------------------------
defaults:
configuration:
# Select the hypervisor package to install
oneprovision_hypervisor: "${input.one_hypervisor}"
# required for copying recovery VM snaphosts to the replica host
opennebula_ssh_deploy_private_key: true
# Select the physical device for private network (VXLAN)
oneprovision_private_phydev: "${input.private_phydev}"

View File

@ -33,19 +33,23 @@ inputs:
- name: 'aws_ami_image'
type: text
description: "AWS ami image used for host deployments"
default: ''
description: 'AWS ami image used for host deployments'
default: 'ami-04505e74c0741db8d'
- name: 'aws_instance_type'
type: text
description: "AWS instance type, use virtual instances"
default: ''
description: 'AWS instance type, use virtual instances'
default: 't2.micro'
- name: 'one_hypervisor'
type: list
description: "Virtualization technology for the cluster hosts"
description: 'Virtualization technology for the cluster hosts'
default: 'lxc'
options:
- 'qemu'
- 'lxc'
- name: 'private_phydev'
type: text
description: 'Physical device to be used for private networking.'
...

View File

@ -23,9 +23,9 @@
name: 'aws-cluster'
extends:
- common.d/defaults.yml
- common.d/resources.yml
- common.d/hosts.yml
- aws.d/defaults.yml
- aws.d/datastores.yml
- aws.d/fireedge.yml
- aws.d/inputs.yml
@ -48,7 +48,7 @@ defaults:
instancetype: "${input.aws_instance_type}"
cloud_init: true
connection:
remote_user: 'centos'
remote_user: 'ubuntu'
#-------------------------------------------------------------------------------
# cluster: Parameters for the OpenNebula cluster. Applies to all the Hosts

View File

@ -18,37 +18,22 @@
inputs:
- name: 'number_hosts'
type: text
description: "Number of Droplets to create"
description: 'Number of Droplets to create'
default: '1'
- name: 'digitalocean_image'
type: list
description: "Droplet host operating system"
default: 'centos-8-x64'
options:
- 'centos-8-x64'
description: 'Droplet host operating system'
default: 'ubuntu-20-04-x64'
- name: 'digitalocean_size'
type: list
description: "Size of droplet. Basic droplets start with s-, memory optimize with m- and CPU optimize are c-"
description: 'Size of droplet. Basic droplets start with s-, memory optimize with m- and CPU optimize are c-'
default: 's-1vcpu-1gb'
options:
- 's-1vcpu-1gb'
- 's-1vcpu-2gb'
- 's-1vcpu-3gb'
- 's-2vcpu-2gb'
- 's-2vcpu-4gb'
- 's-4vcpu-8gb'
- 's-8vcpu-16gb'
- 'm-2vcpu-16gb'
- 'm-8vcpu-64gb'
- 'c-2'
- 'c-4'
- 'c-8'
- name: 'one_hypervisor'
type: list
description: "Virtualization technology for the cluster hosts"
description: 'Virtualization technology for the cluster hosts'
default: 'lxc'
options:
- 'qemu'

View File

@ -23,17 +23,17 @@ inputs:
- name: 'google_image'
type: text
description: "Google image used for host deployments"
default: ''
description: 'Google image used for host deployments'
default: 'ubuntu-2004-focal-v20220118'
- name: 'google_machine_type'
type: text
description: "Google instance type, use virtual instances"
default: ''
description: 'Google instance type, use virtual instances'
default: 'e2-standard-2'
- name: 'one_hypervisor'
type: list
description: "Virtualization technology for the cluster hosts"
description: 'Virtualization technology for the cluster hosts'
default: 'lxc'
options:
- 'qemu'

View File

@ -31,7 +31,7 @@ networks:
vntemplates:
- name: "${provision}-private"
vn_mad: 'vxlan'
phydev: 'eth0'
phydev: 'ens4'
automatic_vlan_id: 'yes'
netrole: 'private'
vxlan_mode: 'evpn'

View File

@ -18,7 +18,7 @@
inputs:
- name: 'number_hosts'
type: text
description: "Number of virtual servers to create"
description: 'Number of virtual servers to create'
default: '1'
- name: 'number_public_ips'
@ -33,20 +33,19 @@ inputs:
- name: 'vultr_plan'
type: text
description: "Vultr plan (device type)"
description: 'Vultr plan (device type)'
default: 'vc2-1c-1gb'
- name: 'vultr_os'
type: text
description: "Vultr host operating system"
default: '362'
description: 'Vultr host operating system'
default: '387'
- name: 'one_hypervisor'
type: list
description: "Virtualization technology for the cluster hosts"
description: 'Virtualization technology for the cluster hosts'
default: 'lxc'
options:
- 'qemu'
- 'lxc'
default: 'lxc'
...

View File

@ -32,7 +32,7 @@ networks:
vntemplates:
- name: "${provision}-private"
vn_mad: 'vxlan'
phydev: 'eth_one'
phydev: 'enp6s0'
automatic_vlan_id: 'yes'
netrole: 'private'
vxlan_mode: 'evpn'

View File

@ -222,6 +222,13 @@ class OneProvisionHelper < OpenNebulaHelper::OneHelper
:description => 'Dump the configuration file result.'
}
AMOUNT = {
:name => 'amount',
:large => '--amount amount',
:description => 'Amount of hosts to add to the provision',
:format => Integer
}
########################################################################
MODES = CommandParser::OPTIONS - [CommandParser::VERBOSE] +
@ -403,9 +410,9 @@ class OneProvisionHelper < OpenNebulaHelper::OneHelper
if provider
provider = OneProvision::Provider.by_name(@client, provider)
return provider if OpenNebula.is_error?(provider)
return [-1, provider.message] if OpenNebula.is_error?(provider)
return OpenNebula::Error.new('Provider not found') unless provider
return [-1, 'Provider not found'] unless provider
end
provision.deploy(config, cleanup, timeout, skip, provider)
@ -417,12 +424,15 @@ class OneProvisionHelper < OpenNebulaHelper::OneHelper
# @param force [Boolean] True to configure hosts anyway
def configure(id, force)
provision = OneProvision::Provision.new_with_id(id, @client)
rc = provision.info
rc = provision.info
return [-1, rc.message] if OpenNebula.is_error?(rc)
return rc if OpenNebula.is_error?(rc)
rc = provision.configure(force)
provision.configure(force)
return [-1, rc.message] if OpenNebula.is_error?(rc)
0
end
# Deletes an existing provision
@ -430,56 +440,103 @@ class OneProvisionHelper < OpenNebulaHelper::OneHelper
# @param id [Intenger] Provision ID
# @param cleanup [Boolean] True to delete VMs and images
# @param timeout [Intenger] Timeout in seconds to wait in delete
def delete(id, cleanup, timeout)
# @param force [Boolean] Force provision deletion
def delete(id, cleanup, timeout, force)
provision = OneProvision::Provision.new_with_id(id, @client)
rc = provision.info
rc = provision.info
return [-1, rc.message] if OpenNebula.is_error?(rc)
return rc if OpenNebula.is_error?(rc)
provision.synchronize(3) do
provision.delete(cleanup, timeout)
rc = provision.synchronize(3) do
provision.delete(cleanup, timeout, force)
end
return [-1, rc.message] if OpenNebula.is_error?(rc)
0
end
#######################################################################
# Helper host functions
#######################################################################
# Adds a new hosts to the provision and configures them
#
# @param id [Integer] Provision ID
# @param options [Hash] User CLI options
def add_hosts(id, options)
parse_options(options)
options.key?(:amount) ? amount = options[:amount] : amount = 1
provision = OneProvision::Provision.new_with_id(id, @client)
rc = provision.info
return [-1, rc.message] if OpenNebula.is_error?(rc)
rc = provision.add_hosts(amount)
return [-1, rc.message] if OpenNebula.is_error?(rc)
0
end
# Executes an operation in a host
#
# @param host [OpenNebula::Host]
# @param operation [String] Operation to perform
# @param args [Array] Operation arguments
# @param operation [String] Operation to perform
# @param args [Array] Operation arguments
def host_operation(host, operation, args)
p_id = host['TEMPLATE/PROVISION/ID']
return OpenNebula::Error.new('No provision ID found') unless p_id
provision = OneProvision::Provision.new_with_id(p_id, @client)
rc = provision.info
rc = provision.info
return rc if OpenNebula.is_error?(rc)
return [-1, rc.message] if OpenNebula.is_error?(rc)
id = host['ID']
host = OneProvision::Host.new(provision.provider['NAME'])
host.info(id)
rc = nil
case operation[:operation]
when 'delete'
provision.update_objects('hosts', :remove, host.one['ID'])
rc = provision.update_objects('hosts', :remove, host.one['ID'])
when 'configure'
host.configure
rc = host.configure
when 'ssh'
host.ssh(args)
rc = host.ssh(args)
end
return [-1, rc.message] if OpenNebula.is_error?(rc)
0
end
#######################################################################
# Helper resource functions
#######################################################################
# Add more IPs to provision network
#
# @param id [Integer] Provision ID
# @param amount [Integer] Number of IPs to add
def add_ips(id, amount)
provision = OneProvision::Provision.new_with_id(id, @client)
rc = provision.info
return [-1, rc.message] if OpenNebula.is_error?(rc)
rc = provision.add_ips(amount.nil? ? 1 : amount)
return [-1, rc.message] if OpenNebula.is_error?(rc)
0
end
# Executes an operation in a resource
#
# @param args [Array] Operation arguments
@ -522,12 +579,15 @@ class OneProvisionHelper < OpenNebulaHelper::OneHelper
end
provision = OneProvision::Provision.new_with_id(p_id, @client)
rc = provision.info
rc = provision.info
return [-1, rc.message] if OpenNebula.is_error?(rc)
provision.update_objects(type.downcase, :remove, obj['ID'])
rc = provision.update_objects(type.downcase, :remove, obj['ID'])
return [-1, rc.message] if OpenNebula.is_error?(rc)
0
end
end
end
@ -546,9 +606,9 @@ class OneProvisionHelper < OpenNebulaHelper::OneHelper
end
pool = factory_pool(options)
rc = pool.info
rc = pool.info_all
return rc if OpenNebula.is_error?(rc)
return [-1, rc.message] if OpenNebula.is_error?(rc)
pool = pool.map do |e|
e.info(true)
@ -578,10 +638,7 @@ class OneProvisionHelper < OpenNebulaHelper::OneHelper
helper = helper(type)
if OpenNebula.is_error?(helper)
STDERR.puts helper.message
exit(-1)
end
return [-1, helper.message] if OpenNebula.is_error?(helper)
helper.list_pool(options, top)

View File

@ -139,7 +139,7 @@ CommandParser::CmdParser.new(ARGV) do
if OpenNebula.is_error?(rc)
STDERR.puts rc.message
exit(-1)
elsif rc.to_i < 0
elsif rc[0].to_i < 0
rc
else
puts CLIHelper.green('Provision successfully created')
@ -209,14 +209,7 @@ CommandParser::CmdParser.new(ARGV) do
OneProvision::Utils.print_cmd('configure', options)
rc = helper.configure(args[0], options.key?(:force))
if OpenNebula.is_error?(rc)
STDERR.puts rc.message
exit(-1)
else
0
end
helper.configure(args[0], options.key?(:force))
end
###
@ -228,7 +221,8 @@ CommandParser::CmdParser.new(ARGV) do
command :delete,
provision_delete_desc,
:provisionid,
:options => [OneProvisionHelper::MODES,
:options => [OneProvisionHelper::FORCE,
OneProvisionHelper::MODES,
OneProvisionHelper::THREADS,
OneProvisionHelper::CLEANUP,
OneProvisionHelper::CLEANUP_TIMEOUT] +
@ -243,20 +237,30 @@ CommandParser::CmdParser.new(ARGV) do
timeout = options[:cleanup_timeout]
end
rc = helper.delete(args[0], (options.key? :cleanup), timeout)
if OpenNebula.is_error?(rc)
STDERR.puts rc.message
exit(-1)
else
0
end
helper.delete(args[0],
(options.key? :cleanup),
timeout,
(options.key? :force))
end
########################################################################
# Host Commands
########################################################################
host_add_desc = <<-EOT.unindent
Provisions and configures a new host
EOT
command [:host, :add],
host_add_desc,
:provisionid,
:options => [OneProvisionHelper::MODES,
OneProvisionHelper::AMOUNT] do
helper.add_hosts(args[0], options)
end
###
host_delete_desc = <<-EOT.unindent
Unprovisions and deletes the given Host
EOT
@ -267,12 +271,7 @@ CommandParser::CmdParser.new(ARGV) do
:options => [OneProvisionHelper::MODES] do
operation = { :operation => 'delete', :message => 'deleted' }
rc = helper.resources_operation(args, operation, options, 'HOSTS')
return 0 unless OpenNebula.is_error?(rc)
STDERR.puts rc.message
exit(-1)
helper.resources_operation(args, operation, options, 'HOSTS')
end
###
@ -287,12 +286,7 @@ CommandParser::CmdParser.new(ARGV) do
:options => [OneProvisionHelper::MODES] do
operation = { :operation => 'configure', :message => 'enabled' }
rc = helper.resources_operation(args, operation, options, 'HOSTS')
return 0 unless OpenNebula.is_error?(rc)
STDERR.puts rc.message
exit(-1)
helper.resources_operation(args, operation, options, 'HOSTS')
end
###
@ -307,12 +301,7 @@ CommandParser::CmdParser.new(ARGV) do
[:command, nil] do
operation = { :operation => 'ssh', :message => 'enabled' }
rc = helper.resources_operation(args, operation, options, 'HOSTS')
return 0 unless OpenNebula.is_error?(rc)
STDERR.puts rc.message
exit(-1)
helper.resources_operation(args, operation, options, 'HOSTS')
end
###
@ -344,6 +333,17 @@ CommandParser::CmdParser.new(ARGV) do
# Resources Commands
########################################################################
ip_add_desc = <<-EOT.unindent
Adds more IPs to the provision
EOT
command [:ip, :add],
ip_add_desc,
:provisionid,
:options => OneProvisionHelper::AMOUNT do
helper.add_ips(args[0], options[:amount])
end
(OneProvision::Provision::RESOURCES +
OneProvision::Provision::FULL_CLUSTER -
%w[hosts marketplaceapps flowtemplates]).each do |resource|
@ -373,19 +373,10 @@ CommandParser::CmdParser.new(ARGV) do
[:range, :id_list],
:options => [OneProvisionHelper::MODES,
OneProvisionHelper::FORCE] do
rc = helper.resources_operation(args,
{ :operation => 'delete' },
options,
resource.upcase)
if rc.is_a?(Array) && rc[0] == 0
0
elsif rc.is_a?(Array)
STDERR.puts rc[1]
exit(-1)
else
0
end
helper.resources_operation(args,
{ :operation => 'delete' },
options,
resource.upcase)
end
end
end

View File

@ -107,7 +107,7 @@ module OneProvision
cmd << " -e @#{ansible_dir}/group_vars.yml"
cmd << " #{ANSIBLE_LOCATION}/#{i}.yml"
o, _e, s = Driver.run(cmd)
o, _e, s = Driver.run(cmd, true)
if s && s.success? && i == @inventories.last
# enable configured ONE host back

View File

@ -104,7 +104,7 @@ module OneProvision
# @param block [Ruby Code]
#
# @return [Array] Output, Error and Value returned
def run(*cmd, &_block)
def run((*cmd), out = false)
OneProvisionLogger.debug("Command run: #{cmd.join(' ')}")
rtn = nil
@ -126,8 +126,8 @@ module OneProvision
e.binmode
end
out_reader = Thread.new { o.read }
err_reader = Thread.new { e.read }
out_reader = Thread.new { streamer(o, out) }
err_reader = Thread.new { streamer(e, out) }
begin
i.write stdin_data
@ -141,7 +141,9 @@ module OneProvision
raise OneProvisionLoopException, e.text
end
rtn = [out_reader.value, err_reader.value, t.value]
rtn = [out_reader.value.strip,
err_reader.value,
t.value]
end
@@mutex.synchronize do
@ -177,6 +179,27 @@ module OneProvision
rtn
end
# Print lines until empty
#
# @param str [String] Line to print
# @param logstdout [Boolean] True to print the line
def streamer(str, logstdout)
full = ''
str.each do |l|
next if l.empty? || l == "\n"
full << l
next unless logstdout
print l
$stdout.flush
end
full
end
# TODO: handle exceptions?
#
# Writes content to file

View File

@ -99,6 +99,32 @@ module OneProvision
STATE_STR[state]
end
# Checks if the provision can be configured
#
# @param force [Boolean] Avoid this comprobation
def can_configure?(force)
if force
case state
when STATE['PENDING'],
STATE['DEPLOYING'],
STATE['DELETING']
return OpenNebula::Error.new(
"Can't configure provision in #{state_str}"
)
else
0
end
else
unless state == STATE['ERROR']
return OpenNebula::Error.new(
"Can't configure provision in #{state_str}"
)
end
end
0
end
# Changes provision state
#
# @param state [Integer] New state
@ -132,6 +158,13 @@ module OneProvision
infrastructure_objects['datastores']
end
# Returns provision networks
def networks
return unless infrastructure_objects
infrastructure_objects['networks']
end
# Returns provision resources objects
def resource_objects
@body['provision']['resource']
@ -176,6 +209,11 @@ module OneProvision
@body['tf']['conf'] = conf
end
# Returns address range template to recreate it
def ar_template
@body['ar_template']
end
# Get OpenNebula information for specific objects
#
# @param object [String] Object to check
@ -349,7 +387,7 @@ module OneProvision
self['ID']
rescue OneProvisionCleanupException
delete(cleanup, timeout)
delete(cleanup, timeout, true)
-1
end
@ -360,42 +398,169 @@ module OneProvision
# @param force [Boolean] Force the configuration although provision
# is already configured
def configure(force = false)
unless [STATE['RUNNING'], STATE['ERROR']].include?(state)
rc = can_configure?(force)
return rc if OpenNebula.is_error?(rc)
configure_resources
end
# Provisions and configures new hosts
#
# @param amount [Intenger] Amount of hosts to add to the provision
def add_hosts(amount)
if !state || state != STATE['RUNNING']
return OpenNebula::Error.new(
"Can't configure provision in #{STATE_STR[state]}"
"Can't add hosts to provision in #{STATE_STR[state]}"
)
end
if state == STATE['RUNNING'] && !force
return OpenNebula::Error.new('Provision already configured')
self.state = STATE['DEPLOYING']
update
OneProvisionLogger.info('Adding more hosts')
# ask user to be patient, mandatory for now
STDERR.puts 'WARNING: This operation can ' \
'take tens of minutes. Please be patient.'
# Get current host template to replicate it
cid = cluster['id']
host = hosts[0]
host = OpenNebula::Host.new_with_id(host['id'], @client)
rc = host.info
return rc if OpenNebula.is_error?(rc)
host = host.to_hash['HOST']['TEMPLATE']
# Delete host specific information
host.delete('ERROR')
host['PROVISION'].delete('DEPLOY_ID')
host['PROVISION'].delete('HOSTNAME')
# Downcase to use create_deployment_file
host = host.transform_keys(&:downcase)
host.keys.each do |key|
next unless host[key].is_a? Hash
host[key] = host[key].transform_keys(&:downcase)
end
host['connection'] = {}
%w[private_key public_key remote_port remote_user].each do |attr|
host['connection'][attr] = host['provision_connection'][attr]
end
# idx used to generate hostname
idx = hosts.size
# Allocate hosts in OpenNebula and add them to the provision
amount.times do
host['provision']['index'] = idx
host['provision']['hostname'] = ''
host['provision']['hostname'] = "edge-host#{idx}"
h = Resource.object('hosts', @provider, host)
dfile = h.create_deployment_file
one_host = h.create(dfile.to_xml,
cid,
host['ansible_playbook'])
obj = { 'id' => Integer(one_host['ID']),
'name' => one_host['NAME'] }
infrastructure_objects['hosts'] << obj
one_host.offline
update
idx += 1
end
OneProvisionLogger.info('Deploying')
ips, ids, state, conf = Driver.tf_action(self, 'add_hosts', tf)
OneProvisionLogger.info('Monitoring hosts')
update_hosts(ips, ids)
add_tf(state, conf) if state && conf
update
configure_resources
end
# Adds more IPs to the existing virtual network
#
# @param amount [Integer] Number of IPs to add
def add_ips(amount)
if !state || state != STATE['RUNNING']
return OpenNebula::Error.new(
"Can't add IPs to provision in #{STATE_STR[state]}"
)
end
if !networks || networks.empty?
return OpenNebula::Error.new('Provision has no networks')
end
v_id = networks[0]['id']
vnet = OpenNebula::VirtualNetwork.new_with_id(v_id, @client)
rc = vnet.info
return rc if OpenNebula.is_error?(rc)
unless vnet['VN_MAD'] == 'elastic'
return OpenNebula::Error.new(
"Can't add IPs to network, wrong VN_MAD '#{vnet['VN_MAD']}'"
)
end
OneProvisionLogger.info("Adding more IPs to network #{v_id}")
amount.times do
rc = vnet.add_ar(ar_template)
return rc if OpenNebula.is_error?(rc)
end
0
end
# Deletes provision objects
#
# @param cleanup [Boolean] True to delete running VMs and images
# @param timeout [Integer] Timeout for deleting running VMs
def delete(cleanup, timeout)
# @param force [Boolean] Force provision deletion
def delete(cleanup, timeout, force = false)
exist = true
if running_vms? && !cleanup
Utils.fail('Provision with running VMs can\'t be deleted')
end
unless force
if running_vms? && !cleanup
Utils.fail('Provision with running VMs can\'t be deleted')
end
if images? && !cleanup
Utils.fail('Provision with images can\'t be deleted')
if images? && !cleanup
Utils.fail('Provision with images can\'t be deleted')
end
self.state = STATE['DELETING']
update
delete_vms(timeout) if cleanup
delete_images(timeout) if cleanup
end
self.state = STATE['DELETING']
update
delete_vms(timeout) if cleanup
delete_images(timeout) if cleanup
OneProvisionLogger.info("Deleting provision #{self['ID']}")
if hosts && !hosts.empty? && tf_state && tf_conf
@ -409,7 +574,7 @@ module OneProvision
host = Host.new(provider)
host.info(id)
host.delete
host.delete(force, self)
end
end
end
@ -418,10 +583,14 @@ module OneProvision
OneProvisionLogger.info('Deleting provision objects')
# Marketapps are turned into images and VM templates
delete_objects(RESOURCES - ['marketplaceapps'], resource_objects)
delete_objects(RESOURCES - ['marketplaceapps'],
resource_objects,
force)
# Hosts are previously deleted
delete_objects(FULL_CLUSTER - ['hosts'], infrastructure_objects)
delete_objects(FULL_CLUSTER - ['hosts'],
infrastructure_objects,
force)
rc = super()
@ -459,7 +628,9 @@ module OneProvision
return [-1, rc.message] if OpenNebula.is_error?(rc)
rc = o.delete(FULL_CLUSTER.include?(object) ? tf : nil)
rc = o.delete(false,
self,
FULL_CLUSTER.include?(object) ? tf : nil)
return [-1, rc.message] if OpenNebula.is_error?(rc)
@ -591,6 +762,15 @@ module OneProvision
obj.template_chown(x)
obj.template_chmod(x)
next unless r == 'networks'
next unless x['ar']
@body['ar_template'] = {}
@body['ar_template'] = Utils.template_like_str(
'ar' => x['ar'][0]
)
end
update
@ -734,6 +914,13 @@ module OneProvision
host = Resource.object('hosts', provider)
host.info(h['id'])
# Avoid existing hosts
if host.one['//TEMPLATE/PROVISION/DEPLOY_ID']
ips.shift
ids.shift
next
end
name = ips.shift
id = ids.shift if ids
@ -921,9 +1108,10 @@ module OneProvision
# Deletes provision objects
#
# @param resources [Array] Resources names
# @param objects [Array] Objects information to delete
def delete_objects(resources, objects)
# @param resources [Array] Resources names
# @param objects [Array] Objects information to delete
# @param force [Boolean] Force object deletion
def delete_objects(resources, objects, force)
return unless objects
resources.each do |resource|
@ -938,7 +1126,11 @@ module OneProvision
o = Resource.object(resource)
o.info(obj['id'])
Utils.exception(o.delete)
if force
o.delete(force, self)
else
Utils.exception(o.delete(force, self))
end
end
true

View File

@ -67,17 +67,19 @@ module OneProvision
# Deletes the cluster
#
# @param force [Boolean] Force cluster deletion
# @param provision [OpenNebula::Provision] Provision information
# @param tf [Hash] Terraform :conf and :state
#
# @return [Array]
# - Terraform state in base64
# - Terraform config in base64
def delete(tf = nil)
def delete(force, provision, tf = nil)
if tf && !tf.empty?
Terraform.p_load
terraform = Terraform.singleton(@provider, tf)
state, conf = terraform.destroy_cluster(@one.id)
state, conf = terraform.destroy_cluster(provision, @one.id)
end
# Remove non-provision elements added to the cluster
@ -85,7 +87,11 @@ module OneProvision
@one.vnet_ids.each {|i| @one.delvnet(i) }
@one.host_ids.each {|i| @one.delhost(i) }
Utils.exception(@one.delete)
if force
@one.delete
else
Utils.exception(@one.delete)
end
if state && conf
[state, conf]

View File

@ -42,12 +42,13 @@ module OneProvision
# Destroy datastore in provider
#
# @param provision [OpenNebula::Provision] Provision information
# @param tf [Hash] Terraform configuration
def destroy(tf)
def destroy(provision, tf)
Terraform.p_load
terraform = Terraform.singleton(@provider, tf)
terraform.destroy_datastore(@one.id)
terraform.destroy_datastore(provision, @one.id)
end
private

View File

@ -44,7 +44,7 @@ module OneProvision
def create_deployment_file
ssh_key = Utils.try_read_file(
@p_template['connection']['public_key']
)
) if @p_template['connection']
config = Base64.strict_encode64(
@p_template['configuration'].to_yaml
)
@ -167,12 +167,14 @@ module OneProvision
# Deletes the HOST
#
# @param force [Boolean] Force host deletion
# @param provision [OpenNebula::Provision] Provision information
# @param tf [Hash] Terraform :conf and :state
#
# @return [Array]
# - Terraform state in base64
# - Terraform config in base64
def delete(tf = nil)
def delete(force, provision, tf = nil)
check
id = @one.id
@ -182,7 +184,11 @@ module OneProvision
OneProvisionLogger.debug("Offlining OpenNebula host: #{id}")
@@mutex.synchronize do
Utils.exception(@one.offline)
if force
@one.offline
else
Utils.exception(@one.offline)
end
end
end
@ -190,14 +196,18 @@ module OneProvision
Terraform.p_load
terraform = Terraform.singleton(@provider, tf)
state, conf = terraform.destroy_host(id)
state, conf = terraform.destroy_host(provision, id)
end
# delete ONE host
OneProvisionLogger.debug("Deleting OpenNebula host: #{id}")
@@mutex.synchronize do
Utils.exception(@one.delete)
if force
@one.delete
else
Utils.exception(@one.delete)
end
end
if state && conf

View File

@ -57,12 +57,13 @@ module OneProvision
# Destroy network in provider
#
# @param provision [OpenNebula::Provision] Provision information
# @param tf [Hash] Terraform configuration
def destroy(tf)
def destroy(provision, tf)
Terraform.p_load
terraform = Terraform.singleton(@provider, tf)
terraform.destroy_network(@one.id)
terraform.destroy_network(provision, @one.id)
end
private

View File

@ -54,15 +54,21 @@ module OneProvision
# Deletes the resource
#
# @param tf [Hash] Terraform :conf and :state
# @param force [Boolean] Force object deletion
# @param provision [Provision] Provision information
# @param tf [Hash] Terraform :conf and :state
#
# @return [Array]
# - Terraform state in base64
# - Terraform config in base64
def delete(tf = nil)
state, conf = destroy(tf) if tf && !tf.empty?
def delete(force, provision, tf = nil)
state, conf = destroy(provision, tf) if tf && !tf.empty?
Utils.exception(@one.delete)
if force
@one.delete
else
Utils.exception(@one.delete)
end
if state && conf
[state, conf]

View File

@ -152,7 +152,7 @@ module OneProvision
end
# Deletes the ONE object
def delete(_ = nil)
def delete(_ = nil, _ = nil, _ = nil)
@one.info
@one.delete
end

View File

@ -28,7 +28,7 @@ module OneProvision
SUPPORTED_MODES = %w[true false]
# Delete object
def delete(_ = nil)
def delete(_ = nil, _ = nil, _ = nil)
@one.info
id = @one['ID']

View File

@ -61,13 +61,6 @@ module OneProvision
user_data << "chmod 700 ~/.ssh\n"
user_data << "chmod 644 ~/.ssh/authorized_keys\n"
# Rename last NIC to eth_one
user_data << 'NIC=$(ip --brief link show | '\
"tail -1 | awk '{print $1}')\n"
user_data << "ip link set down $NIC\n"
user_data << "ip link set $NIC name eth_one\n"
user_data << 'ip link set up $NIC'
Base64.strict_encode64(user_data)
end

View File

@ -253,6 +253,19 @@ module OneProvision
FileUtils.rm_r(tempdir) if tempdir && File.exist?(tempdir)
end
# Provisions and configures new hosts
#
# @param provision [OpenNebula::Provision] Provision information
def add_hosts(provision)
@conf = Zlib::Inflate.inflate(Base64.decode64(@conf))
@state = Zlib::Inflate.inflate(Base64.decode64(@state))
# Generate hosts Terraform configuration
host_info(provision)
deploy(provision)
end
# Get polling information from a host
#
# @param id [String] Host ID
@ -313,30 +326,34 @@ module OneProvision
# Destroys a cluster
#
# @param id [String] Host ID
def destroy_cluster(id)
destroy_resource(self.class::TYPES[:cluster], id)
# @param provision [OpenNebula::Provision] Provision information
# @param id [String] Host ID
def destroy_cluster(provision, id)
destroy_resource(self.class::TYPES[:cluster], provision, id)
end
# Destroys a host
#
# @param id [String] Host ID
def destroy_host(id)
destroy_resource(self.class::TYPES[:host], id)
# @param provision [OpenNebula::Provision] Provision information
# @param id [String] Host ID
def destroy_host(provision, id)
destroy_resource(self.class::TYPES[:host], provision, id)
end
# Destroys a datastore
#
# @param id [String] Datastore ID
def destroy_datastore(id)
destroy_resource(self.class::TYPES[:datastore], id)
# @param provision [OpenNebula::Provision] Provision information
# @param id [String] Datastore ID
def destroy_datastore(provision, id)
destroy_resource(self.class::TYPES[:datastore], provision, id)
end
# Destriys a network
#
# @param id [String] Network ID
def destroy_network(id)
destroy_resource(self.class::TYPES[:network], id)
# @param provision [OpenNebula::Provision] Provision information
# @param id [String] Network ID
def destroy_network(provision, id)
destroy_resource(self.class::TYPES[:network], provision, id)
end
private
@ -395,6 +412,8 @@ module OneProvision
next if !p || p.empty?
next if p['DEPLOY_ID'] # Already configured host
p = p.merge(@provider.connection)
yield(obj) if block_given?
@ -513,10 +532,11 @@ module OneProvision
# Destroys an specific resource
#
# @param type [String] Resource type
# @param id [String] Resource ID
def destroy_resource(type, id)
destroy("-target=#{type}.device_#{id}")
# @param type [String] Resource type
# @param provision [OpenNebula::Provision] Provision information
# @param id [String] Resource ID
def destroy_resource(type, provision, id)
destroy(provision, "-target=#{type}.device_#{id}")
end
end