1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-03-25 02:50:08 +03:00

F #5214 #5075: New provision templates.

* Public IP no longer uses NAT'ing
* Improved handle of host collections and indexes
* Update of ansible roles and playbooks

Co-authored-by: Ruben S. Montero <rsmontero@opennebula.org>
This commit is contained in:
Jan Orel 2020-12-15 10:07:06 +01:00 committed by GitHub
parent 3ae13b6ddc
commit 96068ef185
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
103 changed files with 1178 additions and 1446 deletions

View File

@ -429,7 +429,7 @@ VAR_DIRS="$VAR_LOCATION/remotes \
$VAR_LOCATION/remotes/vnm/vcenter/pre.d \
$VAR_LOCATION/remotes/vnm/vcenter/post.d \
$VAR_LOCATION/remotes/vnm/vcenter/clean.d \
$VAR_LOCATION/remotes/vnm/alias_sdnat \
$VAR_LOCATION/remotes/vnm/elastic \
$VAR_LOCATION/remotes/vnm/hooks/pre \
$VAR_LOCATION/remotes/vnm/hooks/post \
$VAR_LOCATION/remotes/vnm/hooks/clean \
@ -473,7 +473,7 @@ VAR_DIRS="$VAR_LOCATION/remotes \
$VAR_LOCATION/remotes/auth/dummy \
$VAR_LOCATION/remotes/ipam/dummy \
$VAR_LOCATION/remotes/ipam/packet \
$VAR_LOCATION/remotes/ipam/ec2"
$VAR_LOCATION/remotes/ipam/aws"
SUNSTONE_DIRS="$SUNSTONE_LOCATION/routes \
$SUNSTONE_LOCATION/models \
@ -667,7 +667,7 @@ INSTALL_FILES=(
MARKETPLACE_DRIVER_DH_SCRIPTS:$VAR_LOCATION/remotes/market/dockerhub
IPAM_DRIVER_DUMMY_SCRIPTS:$VAR_LOCATION/remotes/ipam/dummy
IPAM_DRIVER_PACKET_SCRIPTS:$VAR_LOCATION/remotes/ipam/packet
IPAM_DRIVER_EC2_SCRIPTS:$VAR_LOCATION/remotes/ipam/ec2
IPAM_DRIVER_EC2_SCRIPTS:$VAR_LOCATION/remotes/ipam/aws
NETWORK_FILES:$VAR_LOCATION/remotes/vnm
NETWORK_HOOKS_PRE_FILES:$VAR_LOCATION/remotes/vnm/hooks/pre
NETWORK_HOOKS_CLEAN_FILES:$VAR_LOCATION/remotes/vnm/hooks/clean
@ -681,7 +681,7 @@ INSTALL_FILES=(
NETWORK_OVSWITCH_FILES:$VAR_LOCATION/remotes/vnm/ovswitch
NETWORK_OVSWITCH_VXLAN_FILES:$VAR_LOCATION/remotes/vnm/ovswitch_vxlan
NETWORK_VCENTER_FILES:$VAR_LOCATION/remotes/vnm/vcenter
NETWORK_ALIAS_SDNAT_FILES:$VAR_LOCATION/remotes/vnm/alias_sdnat
NETWORK_ELASTIC_FILES:$VAR_LOCATION/remotes/vnm/elastic
EXAMPLE_SHARE_FILES:$SHARE_LOCATION/examples
EXAMPLE_HOST_HOOKS_SHARE_FILES:$SHARE_LOCATION/examples/host_hooks
LXD_NETWORK_HOOKS:$SHARE_LOCATION/examples/network_hooks
@ -737,7 +737,6 @@ INSTALL_ONEPROVISION_FILES=(
ONEPROVISION_CONF_FILES:$ETC_LOCATION/cli
ONEPROVISION_ANSIBLE_FILES:$SHARE_LOCATION/oneprovision
ONEPROVISION_TEMPLATES_FILES:$SHARE_LOCATION/oneprovision
ONEPROVISION_EXAMPLES_FILES:$SHARE_LOCATION/oneprovision
ONEPROVISION_LIB_FILES:$LIB_LOCATION/oneprovision/lib
ONEPROVISION_LIB_TF_FILES:$LIB_LOCATION/oneprovision/lib/terraform
ONEPROVISION_LIB_PROVIDERS_FILES:$LIB_LOCATION/oneprovision/lib/terraform/providers
@ -952,8 +951,8 @@ RUBY_LIB_FILES="src/mad/ruby/ActionManager.rb \
src/vmm_mad/remotes/ec2/ec2_driver.rb \
src/vmm_mad/remotes/one/opennebula_driver.rb \
src/vmm_mad/remotes/packet/packet_driver.rb \
src/vnm_mad/remotes/alias_sdnat/ec2_vnm.rb \
src/vnm_mad/remotes/alias_sdnat/packet_vnm.rb"
src/vnm_mad/remotes/elastic/aws_vnm.rb \
src/vnm_mad/remotes/elastic/packet_vnm.rb"
#-------------------------------------------------------------------------------
# Ruby auth library files, to be installed under $LIB_LOCATION/ruby/opennebula
@ -1587,11 +1586,11 @@ NETWORK_VCENTER_FILES="src/vnm_mad/remotes/vcenter/pre \
src/vnm_mad/remotes/vcenter/clean \
src/vnm_mad/remotes/vcenter/update_sg"
NETWORK_ALIAS_SDNAT_FILES="src/vnm_mad/remotes/alias_sdnat/AliasSDNAT.rb \
src/vnm_mad/remotes/alias_sdnat/clean \
src/vnm_mad/remotes/alias_sdnat/post \
src/vnm_mad/remotes/alias_sdnat/pre \
src/vnm_mad/remotes/alias_sdnat/update_sg "
NETWORK_ELASTIC_FILES="src/vnm_mad/remotes/elastic/Elastic.rb \
src/vnm_mad/remotes/elastic/clean \
src/vnm_mad/remotes/elastic/post \
src/vnm_mad/remotes/elastic/pre \
src/vnm_mad/remotes/elastic/update_sg "
#-------------------------------------------------------------------------------
# Virtual Network Manager drivers configuration to be installed under $REMOTES_LOCATION/etc/vnm
@ -1619,11 +1618,11 @@ IPAM_DRIVER_PACKET_SCRIPTS="src/ipamm_mad/remotes/packet/register_address_range
#-------------------------------------------------------------------------------
# IPAM EC2 drivers to be installed under $REMOTES_LOCATION/ipam
#-------------------------------------------------------------------------------
IPAM_DRIVER_EC2_SCRIPTS="src/ipamm_mad/remotes/ec2/register_address_range \
src/ipamm_mad/remotes/ec2/unregister_address_range \
src/ipamm_mad/remotes/ec2/allocate_address \
src/ipamm_mad/remotes/ec2/get_address \
src/ipamm_mad/remotes/ec2/free_address"
IPAM_DRIVER_EC2_SCRIPTS="src/ipamm_mad/remotes/aws/register_address_range \
src/ipamm_mad/remotes/aws/unregister_address_range \
src/ipamm_mad/remotes/aws/allocate_address \
src/ipamm_mad/remotes/aws/get_address \
src/ipamm_mad/remotes/aws/free_address"
#-------------------------------------------------------------------------------
# Transfer Manager commands, to be installed under $LIB_LOCATION/tm_commands
@ -2378,9 +2377,7 @@ ONEPROVISION_CONF_FILES="src/cli/etc/oneprovision.yaml \
ONEPROVISION_ANSIBLE_FILES="share/oneprovision/ansible"
ONEPROVISION_TEMPLATES_FILES="share/oneprovision/templates"
ONEPROVISION_EXAMPLES_FILES="share/oneprovision/examples"
ONEPROVISION_TEMPLATES_FILES="share/oneprovision/hybrid+"
ONEPROVISION_LIB_FILES="src/oneprovision/lib/oneprovision.rb \
src/oneprovision/lib/provision_element.rb"

View File

@ -675,7 +675,7 @@ MARKET_MAD = [
IPAM_MAD = [
EXECUTABLE = "one_ipam",
ARGUMENTS = "-t 1 -i dummy"
ARGUMENTS = "-t 1 -i dummy,aws,packet"
]
#*******************************************************************************
@ -1069,6 +1069,8 @@ INHERIT_VNET_ATTR = "BRIDGE_CONF"
INHERIT_VNET_ATTR = "OVS_BRIDGE_CONF"
INHERIT_VNET_ATTR = "IP_LINK_CONF"
INHERIT_VNET_ATTR = "EXTERNAL"
INHERIT_VNET_ATTR = "AWS_ALLOCATION_ID"
INHERIT_VNET_ATTR = "GATEWAY"
INHERIT_VNET_ATTR = "VCENTER_NET_REF"
INHERIT_VNET_ATTR = "VCENTER_SWITCH_NAME"
@ -1406,3 +1408,8 @@ VN_MAD_CONF = [
NAME = "bridge",
BRIDGE_TYPE = "linux"
]
VN_MAD_CONF = [
NAME = "elastic",
BRIDGE_TYPE = "linux"
]

View File

@ -11,6 +11,5 @@
- opennebula-repository
- opennebula-node-kvm
- opennebula-ssh
- tuntap
- bridged-networking
- iptables
- update-replica

View File

@ -1,16 +0,0 @@
---
- hosts: all
gather_facts: false
roles:
- python
- hosts: nodes
roles:
- ddc
- opennebula-repository
- opennebula-node-firecracker
- opennebula-ssh
- tuntap
- bridged-networking
- iptables

View File

@ -1,16 +0,0 @@
---
- hosts: all
gather_facts: false
roles:
- python
- hosts: nodes
roles:
- ddc
- opennebula-repository
- opennebula-node-lxd
- opennebula-ssh
- tuntap
- bridged-networking
- iptables

View File

@ -1,7 +0,0 @@
---
- hosts: all
gather_facts: false
tasks:
- name: Create a file in TMP dir
raw: touch /tmp/ddc_dummy

View File

@ -1,19 +0,0 @@
---
# Install qemu-ev on CentOS
opennebula_node_kvm_use_ev: true
# Create bridge with tap0 bridge port
bridged_networking_iface: tap0
# Tap0 is already managed by tuntap role
bridged_networking_iface_manage: false
# Configure static IP on bridge, works as gateway
bridged_networking_static_ip: 192.168.150.1
# Enable Masquerade
iptables_masquerade_enabled: true
# Disable iptables strict rules which drop all not matching the base rules
iptables_base_rules_strict: false

View File

@ -1,19 +0,0 @@
---
# Install qemu-ev on CentOS
opennebula_node_kvm_use_ev: true
# Create bridge with tap0 bridge port
bridged_networking_iface: tap0
# Tap0 is already managed by tuntap role
bridged_networking_iface_manage: false
# Configure static IP on bridge, works as gateway
bridged_networking_static_ip: 192.168.150.1
# Enable Masquerade
iptables_masquerade_enabled: true
# Disable iptables strict rules which drop all not matching the base rules
iptables_base_rules_strict: false

View File

@ -1,19 +0,0 @@
---
# Install qemu-ev on CentOS
opennebula_node_kvm_use_ev: true
# Create bridge with tap0 bridge port
bridged_networking_iface: tap0
# Tap0 is already managed by tuntap role
bridged_networking_iface_manage: false
# Configure static IP on bridge, works as gateway
bridged_networking_static_ip: 192.168.150.1
# Enable Masquerade
iptables_masquerade_enabled: true
# Disable iptables strict rules which drop all not matching the base rules
iptables_base_rules_strict: false

View File

@ -1,28 +0,0 @@
---
# Install qemu-ev on CentOS
opennebula_node_kvm_use_ev: true
# Create bridge with tap0 bridge port
bridged_networking_iface: tap0
# Tap0 is already managed by tuntap role
bridged_networking_iface_manage: false
# Configure static IP on bridge, works as gateway
bridged_networking_static_ip: 192.168.150.1
# Enable Masquerade
iptables_masquerade_enabled: true
# Disable iptables strict rules which drop all not matching the base rules
iptables_base_rules_strict: false
# Parameters for static VXLAN connections between provisioned hosts
opennebula_p2p_vxlan_bridge: vxbr100
opennebula_p2p_vxlan_phydev: "{{ ansible_interfaces | sort | select('in', ['bond0_0', 'eth0']) | first | replace('_', ':') }}"
ansible_phydev: "ansible_{{ ansible_interfaces | sort | select('in', ['bond0_0', 'eth0']) | first }}"
opennebula_p2p_vxlan_vxlan_vni: 100
opennebula_p2p_vxlan_vxlan_dev: vxlan100
opennebula_p2p_vxlan_vxlan_local_ip: '{{ ansible_facts[opennebula_p2p_vxlan_phydev | replace(":", "_")]["ipv4"]["address"] }}'
opennebula_p2p_vxlan_remotes: "{{ groups['nodes'] | map('extract', hostvars, [ansible_phydev, 'ipv4', 'address']) | list }}"

View File

@ -11,7 +11,5 @@
- opennebula-repository
- opennebula-node-kvm
- opennebula-ssh
- tuntap
- bridged-networking
- opennebula-p2p-vxlan
- iptables
- update-replica

View File

@ -1,21 +0,0 @@
# bridged-networking
## Description
Creates a new bridge (identified by `bridged_networking_bridge`) and connects the specified network interface (`bridged_networking_iface`) into.
These changes are persistent.
## Requirements
No special requirements.
## Variables
All of the variables in this role are documented in the [defaults](defaults/main.yml) file.
## Todo list
None

View File

@ -1,27 +0,0 @@
################################################################################
# Mandatory
################################################################################
# The bridge that will be created
bridged_networking_bridge: br0
# Manage configuration for bridge
bridged_networking_bridge_manage: True
# The network device connected to the bridge
bridged_networking_iface: eth1
# Manage configuration of interface connected to bridge
bridged_networking_iface_manage: True
# IP address of the bridge
bridged_networking_static_ip: NULL
# Netmask of the bridge
bridged_networking_static_netmask: '255.255.255.0'
# Gateway of the bridge
bridged_networking_static_gateway: NULL
# Name of interface to take the IP configuration for bridge, if bridged_networking_static_ip not defined
bridged_networking_ip_iface: eth1

View File

@ -1,11 +0,0 @@
---
- name: restart network
service:
name: network
state: restarted
- name: restart networking
service:
name: networking
state: restarted

View File

@ -1,63 +0,0 @@
---
- name: Install bridge-utils
package:
name: bridge-utils
state: present
- block:
- block:
- name: Install augeas-tools
package:
name: augeas-tools
state: present
- name: Stop bridge port interface
shell: |
ifdown {{ bridged_networking_iface }} || :
ip link set dev {{ bridged_networking_iface }} down || :
- name: Remove old configuration for bridge port interface (Debian like)
shell: '{{ item }}'
with_items: #TODO: fix repeated runs
- 'ifdown {{ bridged_networking_iface }}'
- 'augtool rm /files/etc/network/interfaces/auto[ * = \"{{ bridged_networking_iface }}\" ]'
- 'augtool rm /files/etc/network/interfaces/iface[ .=\"{{ bridged_networking_iface }}\" ]'
- name: Configuration for bridge port interface (Debian like)
blockinfile:
dest: /etc/network/interfaces
block: |
iface {{ bridged_networking_iface }} inet manual
marker: "# {mark} ANSIBLE MANAGED BLOCK {{ bridged_networking_iface }}"
when: bridged_networking_iface_manage
- name: Configuration for bridge (Debian like)
blockinfile:
dest: /etc/network/interfaces
block: |
auto {{ bridged_networking_bridge }}
iface {{ bridged_networking_bridge }} inet static
bridge_ports {{ bridged_networking_iface }}
{% if bridged_networking_static_ip %}
address {{ bridged_networking_static_ip }}
{% if bridged_networking_static_netmask %}netmask {{ bridged_networking_static_netmask }}{% endif %}
{% if bridged_networking_static_gateway %}gateway {{ bridged_networking_static_gateway }}{% endif %}
{% elif bridged_networking_ip_iface and bridged_networking_ip_iface.replace(':', '_') in ansible_interfaces and 'ipv4' in hostvars[inventory_hostname]["ansible_"+bridged_networking_ip_iface.replace(':', '_') ] %}
# IP configuration taken from '{{ bridged_networking_ip_iface }}'
address {{ hostvars[inventory_hostname]["ansible_"+bridged_networking_ip_iface.replace(':', '_') ].ipv4.address }}
netmask {{ hostvars[inventory_hostname]["ansible_"+bridged_networking_ip_iface.replace(':', '_') ].ipv4.netmask }}
{% if ansible_default_ipv4.interface == bridged_networking_ip_iface.replace(':', '_') %}
gateway {{ ansible_default_ipv4.gateway }}
{% endif %}
{% endif %}
marker: "# {mark} ANSIBLE MANAGED BLOCK {{ bridged_networking_bridge }}"
#notify: restart networking
when: bridged_networking_bridge_manage
- name: Start bridge (Debian like)
shell: |
ifup {{ bridged_networking_iface }}
ifup {{ bridged_networking_bridge }}
when: not bridged_networking_bridge in ansible_interfaces

View File

@ -1,22 +0,0 @@
---
- name: Remove OpenNebula contextualization
package: name=one-context state=absent
- include: redhat.yml
when: ansible_os_family == "RedHat"
- include: debian.yml
when: ansible_os_family == "Debian"
- name: Force all notified handlers to run now
meta: flush_handlers
- name: Refresh facts
setup:
- name: Validate availability of bridge and port
assert:
that:
- "bridged_networking_bridge.replace(':', '_') in ansible_interfaces"
- "bridged_networking_iface.replace(':', '_') in hostvars[inventory_hostname]['ansible_'+bridged_networking_bridge.replace(':', '_')]['interfaces']"

View File

@ -1,57 +0,0 @@
---
- name: Install bridge-utils
package:
name: bridge-utils
state: present
- block:
- name: Configuration for bridge
template:
src: ifcfg-br.j2
dest: /etc/sysconfig/network-scripts/ifcfg-{{ bridged_networking_bridge }}
notify: restart network
when: bridged_networking_bridge_manage
# - name: Remove configuration for IP interface
# file:
# path: /etc/sysconfig/network-scripts/ifcfg-{{ bridged_networking_ip_iface }}
# state: absent
# when: bridged_networking_ip_iface is defined
- name: Configuration for bridge port interface
template:
src: ifcfg.j2
dest: /etc/sysconfig/network-scripts/ifcfg-{{ bridged_networking_iface }}
notify: restart network
when: bridged_networking_iface_manage
# Note: Even though the 'bridged_networking_iface_manage' is set
# *NOT* to manage the configuration, in case of Red Hat we need to
# add at least BRIDGE attribute.
- name: Minimal configuration for bridge port interface
lineinfile:
dest: /etc/sysconfig/network-scripts/ifcfg-{{ bridged_networking_iface }}
regexp: "^BRIDGE"
line: "BRIDGE={{ bridged_networking_bridge }}"
notify: restart network
when: not bridged_networking_iface_manage
when: not bridged_networking_bridge in ansible_interfaces
## Put interfaces up
#- block:
# - name: Stop interfaces
# shell: |
# ifdown {{ bridged_networking_ip_iface }} || :
# ifdown {{ bridged_networking_iface }} || :
#
# - name: Remove configuration for IP interface
# file:
# path: /etc/sysconfig/network-scripts/ifcfg-{{ bridged_networking_ip_iface }}
# state: absent
# when: bridged_networking_ip_iface is defined
#
# - name: Start bridge
# shell: |
# ifup {{ bridged_networking_bridge }}
# when: ( conf_bridge | changed ) or ( conf_iface | changed )

View File

@ -1,20 +0,0 @@
# {{ ansible_managed }}
TYPE=Bridge
DEVICE={{ bridged_networking_bridge }}
ONBOOT=yes
BOOTPROTO=static
NM_CONTROLLED=no
DELAY=0
{% if bridged_networking_static_ip %}
IPADDR={{ bridged_networking_static_ip }}
{% if bridged_networking_static_netmask %}NETMASK={{ bridged_networking_static_netmask }}{% endif %}
{% if bridged_networking_static_gateway %}GATEWAY={{ bridged_networking_static_gateway }}{% endif %}
{% elif bridged_networking_ip_iface and bridged_networking_ip_iface.replace(':', '_') in ansible_interfaces and 'ipv4' in hostvars[inventory_hostname]["ansible_"+bridged_networking_ip_iface.replace(':', '_') ] %}
# IP configuration taken from '{{ bridged_networking_ip_iface }}'
IPADDR={{ hostvars[inventory_hostname]["ansible_"+bridged_networking_ip_iface.replace(':', '_') ].ipv4.address }}
NETMASK={{ hostvars[inventory_hostname]["ansible_"+bridged_networking_ip_iface.replace(':', '_') ].ipv4.netmask }}
{% if ansible_default_ipv4.interface == bridged_networking_ip_iface.replace(':', '_') %}
GATEWAY={{ ansible_default_ipv4.gateway }}
{% endif %}
{% endif %}

View File

@ -1,5 +0,0 @@
DEVICE={{ bridged_networking_iface }}
BOOTPROTO=static
ONBOOT=yes
NM_CONTROLLED=no
BRIDGE={{ bridged_networking_bridge }}

View File

@ -5,7 +5,8 @@
that: |
(ansible_distribution == 'Ubuntu' and ansible_distribution_version == '16.04') or
(ansible_distribution == 'Ubuntu' and ansible_distribution_version == '18.04') or
(ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7')
(ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7') or
(ansible_distribution == 'CentOS' and ansible_distribution_major_version == '8')
msg: "Unsupported target OS"
- include: clean_netconfigs.yml

View File

@ -59,15 +59,6 @@
reject_with: icmp-host-prohibited
in_interface: "{{ iptables_base_rules_interface }}"
jump: REJECT
- name: Live iptables rule - reject all the rest in FORWARD chain
iptables:
state: "{% if iptables_base_rules_strict %}present{% else %}absent{% endif %}"
action: append
table: filter
chain: FORWARD
reject_with: icmp-host-prohibited
in_interface: "{{ iptables_base_rules_interface }}"
jump: REJECT
# default IPv6 rules
- name: Live ip6tables rule - accept related/established
@ -137,14 +128,4 @@
reject_with: icmp6-adm-prohibited
in_interface: "{{ iptables_base_rules_interface }}"
jump: REJECT
- name: Live ip6tables rule - reject all the rest in FORWARD chain
iptables:
state: "{% if iptables_base_rules_strict %}present{% else %}absent{% endif %}"
action: append
ip_version: ipv6
table: filter
chain: FORWARD
reject_with: icmp6-adm-prohibited
in_interface: "{{ iptables_base_rules_interface }}"
jump: REJECT
when: iptables_base_rules_enabled

View File

@ -21,7 +21,6 @@ COMMIT
{% endfor %}
{% if iptables_base_rules_strict %}
-A INPUT {% if iptables_base_rules_interface %}-i {{ iptables_base_rules_interface }}{% endif %} -j REJECT --reject-with icmp-host-prohibited
-A FORWARD {% if iptables_base_rules_interface %}-i {{ iptables_base_rules_interface }}{% endif %} -j REJECT --reject-with icmp-host-prohibited
{% endif %}
{% endif %}
COMMIT

View File

@ -13,7 +13,6 @@
-A INPUT {% if iptables_base_rules_interface %}-i {{ iptables_base_rules_interface }}{% endif %} -d fe80::/64 -p udp -m udp --dport 546 -m state --state NEW -j ACCEPT
{% if iptables_base_rules_strict %}
-A INPUT {% if iptables_base_rules_interface %}-i {{ iptables_base_rules_interface }}{% endif %} -j REJECT --reject-with icmp6-adm-prohibited
-A FORWARD {% if iptables_base_rules_interface %}-i {{ iptables_base_rules_interface }}{% endif %} -j REJECT --reject-with icmp6-adm-prohibited
{% endif %}
{% endif %}
COMMIT

View File

@ -1,9 +0,0 @@
# opennebula-node-firecracker
## Description
Install the opennebula-node-firecracker package.
## Requirements
OpenNebula repository must be configured before executing this role.

View File

@ -1,10 +0,0 @@
---
- name: Install OpenNebula node Firecracker package
package:
name: opennebula-node-firecracker
state: latest
register: result
until: result is success
retries: 3
delay: 10

View File

@ -13,7 +13,16 @@
until: yum_result is succeeded
retries: 3
delay: 10
when: ansible_distribution == "CentOS" and opennebula_node_kvm_use_ev
- name: Install qemu-kvm-ev
yum: name=qemu-kvm-ev state=present
register: yum_result
until: yum_result is succeeded
retries: 3
delay: 10
when:
- ansible_distribution == "CentOS"
- ansible_distribution_major_version == "7"
- opennebula_node_kvm_use_ev
- block:
- name: Enable RHEV repo
@ -27,15 +36,10 @@
until: yum_result is succeeded
retries: 3
delay: 10
when: ansible_distribution == "RedHat" and opennebula_node_kvm_use_ev
- name: Install qemu-kvm-ev
yum: name=qemu-kvm-ev state=present
register: yum_result
until: yum_result is succeeded
retries: 3
delay: 10
when: ansible_distribution == "CentOS" and opennebula_node_kvm_use_ev
when:
- ansible_distribution == "RedHat"
- ansible_distribution_major_version == "7"
- opennebula_node_kvm_use_ev
- name: Install qemu-kvm (base)
yum: name=qemu-kvm state=present
@ -43,7 +47,7 @@
until: yum_result is succeeded
retries: 3
delay: 10
when: not opennebula_node_kvm_use_ev
when: not opennebula_node_kvm_use_ev or ansible_distribution_major_version != "7"
# Update to a fixed mkswap
# * Wed Jan 31 2018 Karel Zak <kzak@redhat.com> 2.23.2-51
@ -57,6 +61,18 @@
retries: 3
delay: 10
- name: Install libgcrypt
package:
name: libgcrypt
state: latest
register: pkg_result
until: pkg_result is succeeded
retries: 3
delay: 10
when:
- ansible_os_family == "RedHat"
- ansible_distribution_major_version == "8"
- name: Install OpenNebula node KVM package
yum: name=opennebula-node-kvm state=latest
register: yum_result

View File

@ -1,17 +0,0 @@
# opennebula-node-lxd
## Description
Installs the opennebula-node-lxd package.
## Requirements
No special requirements.
## Variables
All of the variables in this role are documented in the [defaults](defaults/main.yml) file.
## Todo list
None

View File

@ -1,4 +0,0 @@
---
###############################################################################
# Valid defaults
###############################################################################

View File

@ -1,31 +0,0 @@
---
- name: Enable LXD/LXC from backports on Ubuntu 16.04
copy:
dest: /etc/apt/preferences.d/backports-lxd
content: |
Package: /lx[cd]/
Pin: release a={{ ansible_distribution_release }}-backports
Pin-Priority: 500
when:
- ansible_distribution == 'Ubuntu'
- ansible_distribution_version == '16.04'
- name: Install OpenNebula node LXD package
apt:
name: opennebula-node-lxd
state: latest
register: apt_result
until: apt_result is succeeded
retries: 3
delay: 10
# Test profile
- name: Check for profile test
command: lxc profile show test
register: lxc_profile_check
ignore_errors: yes
- name: Create test profile
command: lxc profile copy default test
when: lxc_profile_check is failed

View File

@ -1,12 +0,0 @@
---
- name: Include distribution specific tasks
include: "{{ ansible_os_family }}.yml"
- name: 'Create datastore on ONE_LOCATION'
file:
state: directory
path: "{{ one_location }}/var/datastores"
owner: oneadmin
group: oneadmin
when: one_location is defined

View File

@ -1,17 +0,0 @@
# opennebula-p2p-vxlan
## Description
Creates static VXLAN connections between several physical hosts. This allows to have the limited private networking in the infrastructures where VXLAN discovery over multicast isn't supported.
## Requirements
No special requirements.
## Variables
All of the variables in this role are documented in the [defaults](defaults/main.yml) file.
## Todo list
None

View File

@ -1,23 +0,0 @@
---
###############################################################################
# Valid defaults
###############################################################################
# Name of VXLAN bridge
opennebula_p2p_vxlan_bridge: NULL
# Name of VXLAN physical interface
opennebula_p2p_vxlan_phydev: NULL
# VXLAN ID (VNI)
opennebula_p2p_vxlan_vxlan_vni: NULL
# Name of VXLAN device
opennebula_p2p_vxlan_vxlan_dev: NULL
# Source IP address to use by VXLAN device
opennebula_p2p_vxlan_vxlan_local_ip: NULL
# List of all remote VXLAN endpoints
opennebula_p2p_vxlan_remotes: []

View File

@ -1,97 +0,0 @@
#!/bin/bash
CONFIG=/etc/default/opennebula-p2p-vxlan
set -e
if [ ! -e "${CONFIG}" ]; then
echo "Missing configuration '${CONFIG}'" >&2
exit 1
fi
source "${CONFIG}"
###
# delete VXLAN and bridge interfaces
function stop {
for D in ${ONE_P2P_VXLAN_DEV} ${ONE_P2P_BRIDGE}; do
if ip link show ${D} &>/dev/null; then
ip link del dev ${D}
fi
done
}
# create VXLAN and bridge interfaces
function start {
# create bridge
ip link add name "${ONE_P2P_BRIDGE}" type bridge
ip link set "${ONE_P2P_BRIDGE}" up
# create VXLAN interface
ip link add ${ONE_P2P_VXLAN_DEV} \
type vxlan id ${ONE_P2P_VXLAN_VNI} \
dev ${ONE_P2P_PHYDEV} \
dstport 4789 \
local ${ONE_P2P_VXLAN_LOCAL_IP}
ip link set "${ONE_P2P_VXLAN_DEV}" up
ip link set "${ONE_P2P_VXLAN_DEV}" master "${ONE_P2P_BRIDGE}"
# add FDB entries
refresh_fdb
}
function refresh_fdb {
for R in ${ONE_P2P_REMOTES}; do
if [ "${R}" = "${ONE_P2P_VXLAN_LOCAL_IP}" ]; then
continue
fi
bridge fdb append 00:00:00:00:00:00 \
dev "${ONE_P2P_VXLAN_DEV}" \
dst "${R}"
done
# compare list of remotes in existing FDB entries
# with list of required remotes from the configuration and
# remove obsolete FDB entires
local _REMOTES_LIVE=$(
bridge fdb show dev "${ONE_P2P_VXLAN_DEV}" | \
grep '^00:00:00:00:00:00 ' | \
cut -d' ' -f3
)
for R_LIVE in ${_REMOTES_LIVE}; do
for R_CONF in ${ONE_P2P_REMOTES}; do
[ "${R_LIVE}" = "${ONE_P2P_VXLAN_LOCAL_IP}" ] && break
[ "${R_LIVE}" = "${R_CONF}" ] && continue 2
done
bridge fdb del 00:00:00:00:00:00 \
dev "${ONE_P2P_VXLAN_DEV}" \
dst "${R_LIVE}"
done
}
###
case $1 in
start)
start
;;
stop)
stop
;;
reload)
refresh_fdb
;;
*)
if [ -z "${1}" ]; then
echo "Syntax: $0 [start|stop|reload]"
else
echo "ERROR: Invalid command '${1}'" >&2
fi
exit 1
esac

View File

@ -1,15 +0,0 @@
[Unit]
Description=OpenNebula Private VXLAN point-to-point network
After=network.target
Wants=network.target
[Service]
EnvironmentFile=/etc/default/opennebula-p2p-vxlan
Type=oneshot
RemainAfterExit=true
ExecStart=/usr/local/sbin/opennebula-p2p-vxlan start
ExecStop=/usr/local/sbin/opennebula-p2p-vxlan stop
ExecReload=/usr/local/sbin/opennebula-p2p-vxlan reload
[Install]
WantedBy=multi-user.target

View File

@ -1,7 +0,0 @@
---
- name: Reload service
service:
name: opennebula-p2p-vxlan
state: reloaded
listen: 'reload opennebula-p2p-vxlan'

View File

@ -1,37 +0,0 @@
---
- name: Validate configuration parameters
assert:
that:
- "{{ item }} is defined"
- "{{ item }} is not none"
with_items:
- opennebula_p2p_vxlan_bridge
- opennebula_p2p_vxlan_phydev
- opennebula_p2p_vxlan_vxlan_vni
- opennebula_p2p_vxlan_vxlan_dev
- opennebula_p2p_vxlan_vxlan_local_ip
- opennebula_p2p_vxlan_remotes
- name: Create configuration
template:
src: opennebula-p2p-vxlan.j2
dest: /etc/default/opennebula-p2p-vxlan
notify: 'reload opennebula-p2p-vxlan'
- name: Deploy script
copy:
src: opennebula-p2p-vxlan
dest: /usr/local/sbin/
mode: '0755'
- name: Deploy service script
copy:
src: opennebula-p2p-vxlan.service
dest: /etc/systemd/system/
- name: Start service
service:
name: opennebula-p2p-vxlan
state: started
enabled: yes

View File

@ -1,7 +0,0 @@
{{ ansible_managed | comment }}
ONE_P2P_BRIDGE={{ opennebula_p2p_vxlan_bridge | quote }}
ONE_P2P_PHYDEV={{ opennebula_p2p_vxlan_phydev | quote }}
ONE_P2P_VXLAN_VNI={{ opennebula_p2p_vxlan_vxlan_vni | quote }}
ONE_P2P_VXLAN_DEV={{ opennebula_p2p_vxlan_vxlan_dev | quote }}
ONE_P2P_VXLAN_LOCAL_IP={{ opennebula_p2p_vxlan_vxlan_local_ip | quote }}
ONE_P2P_REMOTES={{ opennebula_p2p_vxlan_remotes | join(' ') | quote }}

View File

@ -14,3 +14,6 @@ opennebula_ssh_deploy_local: True
# Deploy local oneadmin's SSH private key to remote host
opennebula_ssh_deploy_private_key: False
# Adds /root/.ssh/authorized_keys to /var/lib/one/.ssh/authorized_keys
opennebula_ssh_keys_import_root_keys: False

View File

@ -13,3 +13,6 @@
- include: deploy_local.yml
when: opennebula_ssh_deploy_local == True
- include: root_authkeys.yml
when: opennebula_ssh_keys_import_root_keys == True

View File

@ -0,0 +1,9 @@
---
- name: Get authorized_keys
shell: cat /root/.ssh/authorized_keys
register: root_authorized_keys
- name: Add authorized_keys
authorized_key: user=oneadmin key="{{ item }}"
with_items: "{{ root_authorized_keys.stdout_lines }}"

View File

@ -1,4 +1,17 @@
---
- name: Install Python 2 (Debian/Ubuntu)
raw: python2 --version >/dev/null 2>&1 || (apt-get --version >/dev/null 2>&1 && sudo -n apt-get install -y python-minimal)
raw: >
if [ -e /etc/debian_version ]; then
python2 --version >/dev/null 2>&1 || \
( apt-get --version >/dev/null 2>&1 && \
sudo -n apt-get update && \
sudo -n apt-get install -y python-minimal
)
fi
- name: Install libselinux-python3 (Centos8 8)
raw: >
if grep -q 'release 8' /etc/redhat-release; then
dnf -y install python3-libselinux python3-libsemanage
fi

View File

@ -1,21 +0,0 @@
# tuntap
## Description
The role creates a TUN/TAP interface with persistent configuration.
Used when [provisioning course environments][1]. It is applied in the baremetal machine by `ansible-course/site.yml`.
## Requirements
No special requirements.
## Variables
All of the variables in this role are documented in the [defaults](defaults/main.yml) file.
## Todo list
Find a better name for this role.
[1]: https://github.com/OpenNebula/infra/wiki/Creating-course-environments

View File

@ -1,11 +0,0 @@
---
###############################################################################
# Valid defaults
###############################################################################
# Name of interface
tuntap_name: 'tap0'
# Interface mode
tuntap_mode: 'tap'

View File

@ -1,11 +0,0 @@
---
- name: Configure TUN/TAP (Debian like)
blockinfile:
dest: /etc/network/interfaces
block: |
auto {{ tuntap_name }}
iface {{ tuntap_name }} inet manual
pre-up ip tuntap add {{ tuntap_name }} mode {{ tuntap_mode }}
post-down ip link del dev {{ tuntap_name }}
marker: "# {mark} ANSIBLE MANAGED BLOCK {{ tuntap_name }}"

View File

@ -1,14 +0,0 @@
---
- include: redhat.yml
when: ansible_os_family == "RedHat"
- include: debian.yml
when: ansible_os_family == "Debian"
- block:
- name: Activate interface
shell: ifup {{ tuntap_name }}
- name: Refresh facts
setup:
when: not tuntap_name in ansible_interfaces

View File

@ -1,6 +0,0 @@
---
- name: Configure TUN/TAP (RedHat like)
template:
src: ifcfg.j2
dest: /etc/sysconfig/network-scripts/ifcfg-{{ tuntap_name }}

View File

@ -1,6 +0,0 @@
# {{ ansible_managed }}
TYPE=Tap
DEVICE='{{ tuntap_name }}'
BOOTPROTO=static
ONBOOT=yes
NM_CONTROLLED=no

View File

@ -0,0 +1,16 @@
---
- name: Validate group_vars parameters
assert:
that:
- "{{ sys_ds_ids }} is defined"
- "{{ first_host }} is not none"
- name: Update replica host
local_action:
module: shell
cmd: onedatastore update {{ item }}
environment:
EDITOR: "sed -i -e 's/REPLICA_HOST=.*/REPLICA_HOST={{ first_host }}/'"
become: false
with_items: "{{ sys_ds_ids }}"

View File

@ -1,70 +0,0 @@
---
#############################################################
# WARNING: You need to create a provider with values with your
# own credentials for the particular provider. You need to
# uncomment and update list of hosts to deploy based
# on your requirements.
#############################################################
# Ansible playbook to configure hosts
playbook: "static_vxlan"
# Provision name to use in all resources
name: "AWSCluster"
# Defaults sections with information related with Packet
defaults:
provision:
provider: "aws"
instancetype: "i3.metal"
cloud_init: true
# Hosts to be deployed in Packet and created in OpenNebula
hosts:
# - reserved_cpu: "100"
# im_mad: "kvm"
# vm_mad: "kvm"
# provision:
# hostname: "centos-host"
# ami: "ami-66a7871c"
# - reserved_cpu: "100"
# im_mad: "kvm"
# vm_mad: "kvm"
# provision:
# hostname: "ubuntu-host"
# ami: "ami-759bc50a" # (Ubuntu 16.04)
# Datastores to be created in OpenNebula
datastores:
- name: "<%= @name %>-image"
ds_mad: "fs"
tm_mad: "ssh"
- name: "<%= @name %>-system"
type: "system_ds"
tm_mad: "ssh"
# Network to be created in OpenNebula
networks:
- name: "<%= @name %>-private-host-only-nat"
vn_mad: "dummy"
bridge: "br0"
dns: "8.8.8.8 8.8.4.4"
gateway: "192.168.150.1"
description: "Host-only private network with NAT"
ar:
- ip: "192.168.150.2"
size: "253"
type: "IP4"
- name: "<%= @name %>-private"
vn_mad: "dummy"
bridge: "vxbr100"
mtu: "1450"
description: "Private networking"
ar:
- ip: "192.168.160.2"
size: "253"
type: "IP4"

View File

@ -1,87 +0,0 @@
---
#############################################################
# WARNING: You need to replace ***** values with your
# own credentials for the particular provider. You need to
# uncomment and update list of hosts to deploy based
# on your requirements.
#
# You also need to create a provider with credentials and location
# information.
#############################################################
# Ansible playbook to configure hosts
playbook: "static_vxlan"
# Provision name to use in all resources
name: "PacketCluster"
# Defaults sections with information related with Packet
defaults:
provision:
provider: "packet"
plan: "baremetal_0"
os: "centos_7"
configuration:
iptables_masquerade_enabled: false # NAT breaks public networking
# Hosts to be deployed in Packet and created in OpenNebula
hosts:
# - reserved_cpu: "100"
# im_mad: "kvm"
# vm_mad: "kvm"
# provision:
# hostname: "centos-host"
# os: "centos_7"
# - reserved_cpu: "100"
# im_mad: "kvm"
# vm_mad: "kvm"
# provision:
# hostname: "ubuntu-host"
# os: "ubuntu_18_04"
# Datastores to be created in OpenNebula
datastores:
- name: "<%= @name %>-image"
ds_mad: "fs"
tm_mad: "ssh"
- name: "<%= @name %>-system"
type: "system_ds"
tm_mad: "ssh"
# Network to be created in OpenNebula
networks:
- name: "<%= @name %>-private-host-only"
vn_mad: "dummy"
bridge: "br0"
description: "Host-only private network"
gateway: "192.168.150.1"
ar:
- ip: "192.168.150.2"
size: "253"
type: "IP4"
- name: "<%= @name %>-private"
vn_mad: "dummy"
bridge: "vxbr100"
mtu: "1450"
description: "Private networking"
ar:
- ip: "192.168.160.2"
size: "253"
type: "IP4"
- name: "<%= @name %>-public"
vn_mad: "alias_sdnat"
external: "yes"
description: "Public networking"
ar:
- size: "4" # select number of public IPs
type: "IP4"
ipam_mad: "packet"
packet_ip_type: "public_ipv4"
facility: "ams1"
packet_token: "********************************"
packet_project: "******************************"

View File

@ -0,0 +1,39 @@
---
# ---------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
#-------------------------------------------------------------------------------
# datastores: Defines the storage area for the cluster using the SSH replication
# drivers. It creates the following datastores, using Replica driver:
# 1. Image datastore, ${cluster_name}-image
# 2. System datastore, ${cluster_name}-system
#
# Configuration/Input attributes:
# - replica_host: The host that will hold the cluster replicas and snapshots.
#-------------------------------------------------------------------------------
datastores:
- name: "${provision}-image"
type: 'image_ds'
ds_mad: 'fs'
tm_mad: 'ssh'
safe_dirs: "/var/tmp /tmp"
- name: "${provision}-system"
type: 'system_ds'
tm_mad: 'ssh'
safe_dirs: "/var/tmp /tmp"
replica_host: "use-first-host"

View File

@ -0,0 +1,26 @@
---
# ---------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
#-------------------------------------------------------------------------------
# defaults: Common configuration attributes for provision objects
#--------------------------------------------------------------------------------
# configuration: Ansible role parameters.
#--------------------------------------------------------------------------------
# Check defaults/main.yml in each role for the available variables
#-------------------------------------------------------------------------------
...

View File

@ -0,0 +1,33 @@
---
# ---------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
inputs:
- name: 'number_hosts'
type: text
description: 'Number of AWS instances to crearte'
default: '1'
- name: 'aws_ami_image'
type: text
description: "AWS ami image used for host deployments"
default: ''
- name: 'aws_instance_type'
type: text
description: "AWS instance type, use bare-metal instances, otherwise use QEMU"
default: ''
...

View File

@ -0,0 +1,27 @@
---
# ---------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
networks:
- name: "${provision}-public"
vn_mad: 'elastic'
bridge: 'br0'
ar:
- provison_id: "${provision_id}"
size: '1'
packet_ip_type: 'public_ipv4'
ipam_mad: 'aws'
cidr: "${cluster.0.cidr}"

View File

@ -0,0 +1,72 @@
---
# ---------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
#-------------------------------------------------------------------------------
# This is the canonical description file for a cluster build with 'AWS'
# resources using the KVM hypervisor.
# ------------------------------------------------------------------------------
name: "aws-cluster"
extends:
- common.d/defaults.yml
- common.d/resources.yml
- common.d/kvm_hosts.yml
- aws.d/datastores.yml
- aws.d/defaults.yml
- aws.d/inputs.yml
- aws.d/networks.yml
#-------------------------------------------------------------------------------
# playbook: Ansible playbook used for hosts configuration.The aws playbook
# include the following roles:
# - ddc
# - opennebula-repository
# - opennebula-node-kvm
# - opennebula-ssh
# - tuntap
# - bridged-networking
# - iptables
#-------------------------------------------------------------------------------
playbook:
- aws
#-------------------------------------------------------------------------------
# defaults: Common configuration attributes for provision objects
#--------------------------------------------------------------------------------
defaults:
provision:
provider: 'aws'
ami: "${input.aws_ami_image}"
instancetype: "${input.aws_instance_type}"
cloud_init: true
#-------------------------------------------------------------------------------
# cluster: Parameters for the OpenNebula cluster. Applies to all the Hosts
#--------------------------------------------------------------------------------
# name: of the cluster
# description: Additional information
# reserved_cpu: In percentage. It will be subtracted from the TOTAL CPU
# reserved_memory: In percentage. It will be subtracted from the TOTAL MEM
#--------------------------------------------------------------------------------
cluster:
name: "${provision}"
description: 'AWS cluster'
reserved_cpu: '0'
reserved_mem: '0'
provision:
cidr: '10.0.0.0/16'

View File

@ -1,19 +1,24 @@
#!/bin/bash
---
# ---------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
#-------------------------------------------------------------------------------
# defaults: Common configuration attributes for provision objects
#--------------------------------------------------------------------------------
exit 0
defaults:
configuration:
opennebula_node_kvm_use_ev: true

View File

@ -0,0 +1,35 @@
---
# ---------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
#-------------------------------------------------------------------------------
# hosts: Packet or AWS metal servers
# provision:
# - count: Number of servers to create
# - hostname: kvm-host1, kvm-host2 .... of the server
#
# You can define specific OpenNebula configuration attributes for all the hosts:
# - reserved_cpu: In percentage. It will be subtracted from the TOTAL CPU
# - reserved_memory: In percentage. It will be subtracted from the TOTAL MEM
#-------------------------------------------------------------------------------
hosts:
- im_mad: 'kvm'
vm_mad: 'kvm'
provision:
count: "${input.number_hosts}"
hostname: "kvm-host${index}"
...

View File

@ -0,0 +1,38 @@
---
# ---------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
#-------------------------------------------------------------------------------
# Use this file to define custom resources associated to the cluster, example:
# - images
# - marketplaceapps
# - templates
# - vntemplates
# - flowtemplates
#
# Cross-references to other objects can be made:
# - by name, eg. ${datastore.images.id} to get the OpenNebula ID of the
# datastore with name "images"
# - by index, eg. ${datastore.0.id} to get the OpenNebula ID of the **first**
# datastore defined in the provision
#-------------------------------------------------------------------------------
marketplaceapps:
- appname: "Alpine Linux 3.10"
name: "alpine"
dsid: "${datastore.0.id}"
meta:
wait: true
wait_timeout: 60

View File

@ -0,0 +1,39 @@
---
# ---------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
#-------------------------------------------------------------------------------
# datastores: Defines the storage area for the cluster using the SSH replication
# drivers. It creates the following datastores, using Replica driver:
# 1. Image datastore, ${cluster_name}-image
# 2. System datastore, ${cluster_name}-system
#
# Configuration/Input attributes:
# - replica_host: The host that will hold the cluster replicas and snapshots.
#-------------------------------------------------------------------------------
datastores:
- name: "${provision}-image"
type: 'image_ds'
ds_mad: 'fs'
tm_mad: 'ssh'
safe_dirs: "/var/tmp /tmp"
- name: "${provision}-system"
type: 'system_ds'
tm_mad: 'ssh'
safe_dirs: "/var/tmp /tmp"
replica_host: "use-first-host"

View File

@ -0,0 +1,24 @@
---
# ---------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
#-------------------------------------------------------------------------------
# defaults: Common configuration attributes for provision objects
#--------------------------------------------------------------------------------
# configuration: Ansible role parameters.
#--------------------------------------------------------------------------------
# Check defaults/main.yml in each role for the available variables
#-------------------------------------------------------------------------------

View File

@ -0,0 +1,33 @@
---
# ---------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
inputs:
- name: 'number_hosts'
type: text
description: "Number of metal servers to create"
default: '1'
- name: 'packet_plan'
type: text
description: "Packet plan (device type)"
default: 't1.small'
- name: 'packet_os'
type: text
description: "Packet host operating system"
default: 'centos_8'

View File

@ -0,0 +1,26 @@
---
# ---------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
networks:
- name: "${provision}-public"
vn_mad: 'elastic'
bridge: 'br0'
ar:
- provison_id: "${provision_id}"
size: '1'
packet_ip_type: 'public_ipv4'
ipam_mad: 'packet'

View File

@ -0,0 +1,69 @@
---
# ---------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
#-------------------------------------------------------------------------------
# This is the canonical description file for a cluster build with 'Packet'
# resources using the KVM hypervisor.
# ------------------------------------------------------------------------------
name: "packet-cluster"
extends:
- common.d/defaults.yml
- common.d/resources.yml
- common.d/kvm_hosts.yml
- packet.d/defaults.yml
- packet.d/datastores.yml
- packet.d/inputs.yml
- packet.d/networks.yml
#-------------------------------------------------------------------------------
# playbook: Ansible playbook used for hosts configuration.The packet playbook
# include the following roles:
# - ddc
# - opennebula-repository
# - opennebula-node-kvm
# - opennebula-ssh
# - tuntap
# - bridged-networking
# - iptables
#-------------------------------------------------------------------------------
playbook:
- packet
#-------------------------------------------------------------------------------
# defaults: Common configuration attributes for provision objects
#--------------------------------------------------------------------------------
defaults:
provision:
provider: 'packet'
plan: "${input.packet_plan}"
os: "${input.packet_os}"
#-------------------------------------------------------------------------------
# cluster: Parameters for the OpenNebula cluster. Applies to all the Hosts
#--------------------------------------------------------------------------------
# name: of the cluster
# description: Additional information
# reserved_cpu: In percentage. It will be subtracted from the TOTAL CPU
# reserved_memory: In percentage. It will be subtracted from the TOTAL MEM
#--------------------------------------------------------------------------------
cluster:
name: "${provision}"
description: 'Packet cluster'
reserved_cpu: '0'
reserved_mem: '0'

View File

@ -1,14 +0,0 @@
---
playbook: default
networks:
- name: "nat"
vn_mad: dummy
bridge: br0
dns: "8.8.8.8 8.8.4.4"
gateway: "192.168.150.1"
description: "Host-only networking with NAT"
ar:
- ip: "192.168.150.2"
size: 253
type: IP4

View File

@ -1,24 +0,0 @@
---
playbook: static_vxlan
networks:
- name: "nat"
vn_mad: "dummy"
bridge: "br0"
dns: "8.8.8.8 8.8.4.4"
gateway: "192.168.150.1"
description: "Host-only networking with NAT"
ar:
- ip: "192.168.150.2"
size: "253"
type: "IP4"
- name: "private"
vn_mad: "dummy"
bridge: "vxbr100"
mtu: "1450"
description: "Private networking"
ar:
- ip: "192.168.160.2"
size: "253"
type: "IP4"

View File

@ -6,7 +6,7 @@ Cmnd_Alias ONE_FIRECRACKER = /usr/bin/jailer, /usr/bin/mount, /usr/sbin/one-clea
Cmnd_Alias ONE_HA = /usr/bin/systemctl start opennebula-flow, /usr/bin/systemctl stop opennebula-flow, /usr/bin/systemctl start opennebula-gate, /usr/bin/systemctl stop opennebula-gate, /usr/bin/systemctl start opennebula-hem, /usr/bin/systemctl stop opennebula-hem, /usr/bin/systemctl start opennebula-showback.timer, /usr/bin/systemctl stop opennebula-showback.timer, /usr/sbin/service opennebula-flow start, /usr/sbin/service opennebula-flow stop, /usr/sbin/service opennebula-gate start, /usr/sbin/service opennebula-gate stop, /usr/sbin/service opennebula-hem start, /usr/sbin/service opennebula-hem stop, /usr/sbin/arping, /usr/sbin/ip address *
Cmnd_Alias ONE_LVM = /usr/sbin/lvcreate, /usr/sbin/lvremove, /usr/sbin/lvs, /usr/sbin/vgdisplay, /usr/sbin/lvchange, /usr/sbin/lvscan, /usr/sbin/lvextend
Cmnd_Alias ONE_MARKET = /usr/lib/one/sh/create_container_image.sh, /usr/lib/one/sh/create_docker_image.sh
Cmnd_Alias ONE_NET = /usr/sbin/ebtables, /usr/sbin/iptables, /usr/sbin/ip6tables, /usr/sbin/ipset, /usr/sbin/ip link *, /usr/sbin/ip tuntap *
Cmnd_Alias ONE_NET = /usr/sbin/ebtables, /usr/sbin/iptables, /usr/sbin/ip6tables, /usr/sbin/ipset, /usr/sbin/ip link *, /usr/sbin/ip tuntap *, /usr/sbin/ip route *, /usr/sbin/ip neighbour *
Cmnd_Alias ONE_OVS = /usr/bin/ovs-ofctl, /usr/bin/ovs-vsctl
## Command aliases are enabled individually in dedicated

View File

@ -7,7 +7,7 @@ Cmnd_Alias ONE_HA = /bin/systemctl start opennebula-flow, /bin/systemctl stop op
Cmnd_Alias ONE_LVM = /sbin/lvcreate, /sbin/lvremove, /sbin/lvs, /sbin/vgdisplay, /sbin/lvchange, /sbin/lvscan, /sbin/lvextend
Cmnd_Alias ONE_LXD = /snap/bin/lxc, /usr/bin/catfstab, /bin/mount, /bin/umount, /bin/mkdir, /bin/lsblk, /sbin/losetup, /sbin/kpartx, /usr/bin/qemu-nbd, /sbin/blkid, /sbin/e2fsck, /sbin/resize2fs, /usr/sbin/xfs_growfs, /usr/bin/rbd-nbd, /usr/sbin/xfs_admin, /sbin/tune2fs
Cmnd_Alias ONE_MARKET = /usr/lib/one/sh/create_container_image.sh, /usr/lib/one/sh/create_docker_image.sh
Cmnd_Alias ONE_NET = /sbin/ebtables, /sbin/iptables, /sbin/ip6tables, /sbin/ipset, /sbin/ip link *, /sbin/ip tuntap *
Cmnd_Alias ONE_NET = /sbin/ebtables, /sbin/iptables, /sbin/ip6tables, /sbin/ipset, /sbin/ip link *, /sbin/ip tuntap *, /usr/sbin/ip route *, /usr/sbin/ip neighbour *
Cmnd_Alias ONE_OVS = /usr/bin/ovs-ofctl, /usr/bin/ovs-vsctl
## Command aliases are enabled individually in dedicated

View File

@ -47,7 +47,10 @@ void IPAMManager::_notify_request(unique_ptr<ipam_msg_t> msg)
}
else
{
notify_request(msg->oid(), false, msg->payload());
string buffer;
ssl_util::base64_decode(msg->payload(), buffer);
notify_request(msg->oid(), false, buffer);
}
return;

View File

@ -89,6 +89,19 @@ require 'nokogiri'
require 'aws-sdk-ec2'
require 'opennebula'
require 'oneprovision'
require 'ipaddr'
class IPAddr
attr_reader :addr
def ^(other)
return self.clone.set(@addr ^ other.to_i)
end
def <(other)
return @addr < other.addr
end
end
begin
data = Nokogiri::XML(Base64.decode64(STDIN.read))
@ -103,6 +116,20 @@ begin
exit(-1)
end
cidr_s = data.xpath('//AR/CIDR').text
if cidr_s.empty?
STDERR.puts 'Missing CIDR block in address range'
exit(-1)
end
cidr = IPAddr.new(cidr_s)
if cidr.prefix != 16
STDERR.puts 'Elastic CIDR block has to be /16'
exit(-1)
end
one = OpenNebula::Client.new
provision = OneProvision::Provision.new_with_id(provision_id, one)
rc = provision.info
@ -116,13 +143,16 @@ begin
connect = provider.body['connection']
options = {
:access_key_id => connect['aws_access'],
:secret_access_key => connect['aws_secret'],
:region => connect['aws_region']
:access_key_id => connect['access_key'],
:secret_access_key => connect['secret_key'],
:region => connect['region']
}
# --------------------------------------------------------------------------
# Connect to Packet and allocate a new IP
# Connect to AWS and allocate a new IP. Model a 2-host network for the
# public IP.
# Gateway = IP ^ 1
# IP = IP/31
# --------------------------------------------------------------------------
size = data.xpath('//AR/SIZE').text.to_i
@ -136,14 +166,31 @@ begin
ec2 = Aws::EC2::Resource.new.client
ip = ec2.allocate_address({ :domain => 'vpc' })
eip = IPAddr.new(ip.public_ip)
ipvm = (eip & 0x0000FFFF) | cidr.mask(16)
ipgw = ipvm ^ 1
first_ip = IPAddr.new('0.0.0.16') | cidr.mask(16)
if ipvm < first_ip
ec2.release_address({ :allocation_id => ip.allocation_id })
STDERR.puts 'Could not allocate Elastic IP'
exit(-1)
end
puts <<-EOF
AR = [
TYPE = "IP4",
IP = "#{ip.public_ip}",
SIZE = "1",
IPAM_MAD = "ec2",
TYPE = "IP4",
IP = "#{ipvm}",
EXTERNAL = "#{ip.public_ip}",
SIZE = "1",
IPAM_MAD = "aws",
GATEWAY = "#{ipgw}",
NETWORK_MASK = "255.255.255.254",
AWS_ALLOCATION_ID = "#{ip.allocation_id}",
PROVISION_ID = "#{provision_id}"
PROVISION_ID = "#{provision_id}"
]
EOF
rescue StandardError => e

View File

@ -92,9 +92,9 @@ begin
connect = provider.body['connection']
options = {
:access_key_id => connect['aws_access'],
:secret_access_key => connect['aws_secret'],
:region => connect['aws_region']
:access_key_id => connect['access_key'],
:secret_access_key => connect['secret_key'],
:region => connect['region']
}
# --------------------------------------------------------------------------

View File

@ -93,9 +93,16 @@ require 'base64'
require 'nokogiri'
require 'opennebula'
require 'oneprovision'
require 'ipaddr'
IP_TYPE = %w[public_ipv4 global_ipv4]
class IPAddr
def ^(other)
return self.clone.set(@addr ^ other.to_i)
end
end
begin
data = Nokogiri::XML(Base64.decode64(STDIN.read))
@ -121,9 +128,9 @@ begin
provider = provision.provider
connect = provider.body['connection']
pk_token = connect['packet_token']
pk_project = connect['packet_project']
pk_facility = connect['packet_facility']
pk_token = connect['token']
pk_project = connect['project']
pk_facility = connect['facility']
# --------------------------------------------------------------------------
# Connect to Packet and allocate a new IP
@ -152,12 +159,16 @@ begin
packet.create_ip(ip)
ipgw = IPAddr.new(ip.address) ^ 1
puts <<-EOF
AR = [
TYPE = "IP4",
IP = "#{ip.address}",
SIZE = "#{ip.quantity}",
IPAM_MAD = "packet",
GATEWAY = "#{ipgw}",
NETWORK_MASK = "255.255.255.254",
PACKET_IP_ID = "#{ip.id}",
PROVISION_ID = "#{provision_id}"
]

View File

@ -85,7 +85,7 @@ begin
provider = provision.provider
connect = provider.body['connection']
pk_token = connect['packet_token']
pk_token = connect['token']
# --------------------------------------------------------------------------
# Connect to Packet and delete the IP

View File

@ -40,6 +40,8 @@ module OneProvision
rc = nil
begin
Terraform.check_connection(template)
rc = to_json(template)
return rc if OpenNebula.is_error?(rc)

View File

@ -28,8 +28,8 @@ CONFIG_DEFAULTS = {
'connection' => {
'remote_user' => 'root',
'remote_port' => 22,
'public_key' => '/var/lib/one/.ssh/ddc/id_rsa.pub',
'private_key' => '/var/lib/one/.ssh/ddc/id_rsa'
'public_key' => '/var/lib/one/.ssh-oneprovision/id_rsa.pub',
'private_key' => '/var/lib/one/.ssh-oneprovision/id_rsa'
}
}
@ -74,14 +74,15 @@ module OneProvision
# Configures host via ansible
#
# @param hosts [OpenNebula::Host Array] Hosts to configure
# @param hosts [OpenNebula::Datastore array] Datastores for vars
# @param ping [Boolean] True to check ping to hosts
def configure(hosts, ping = true)
def configure(hosts, datastores, ping = true)
return if hosts.nil? || hosts.empty?
Driver.retry_loop 'Failed to configure hosts' do
check_ansible_version
ansible_dir = generate_ansible_configs(hosts)
ansible_dir = generate_ansible_configs(hosts, datastores)
try_ssh(ansible_dir) if ping
@ -93,6 +94,7 @@ module OneProvision
cmd << "ansible-playbook #{ANSIBLE_ARGS}"
cmd << " -i #{ansible_dir}/inventory"
cmd << " -i #{ANSIBLE_LOCATION}/inventories/#{i}"
cmd << " -e @#{ansible_dir}/group_vars.yml"
cmd << " #{ANSIBLE_LOCATION}/#{i}.yml"
o, _e, s = Driver.run(cmd)
@ -240,9 +242,10 @@ module OneProvision
# Generates ansible configurations
#
# @param hosts [OpenNebula::Host array] Hosts to configure
# @param hosts [OpenNebula::Datastore array] Datastores for vars
#
# @return [Dir] Directory with Ansible information
def generate_ansible_configs(hosts)
def generate_ansible_configs(hosts, datastores)
ansible_dir = Dir.mktmpdir
msg = "Generating Ansible configurations into #{ansible_dir}"
@ -277,6 +280,21 @@ module OneProvision
Driver.write_file_log("#{ansible_dir}/inventory", c)
# Generate "group_vars" file
group_vars = { 'sys_ds_ids' => [], 'first_host' => ""}
group_vars['first_host'] = hosts.first['name'] \
unless hosts.empty?
datastores.each do |d|
ds = Resource.object('datastores')
ds.info(d['id'])
next unless ds.one['TYPE'] == '1' # only system ds
group_vars['sys_ds_ids'] << d['id']
end
c = YAML.dump(group_vars)
fname = "#{ansible_dir}/group_vars.yml"
Driver.write_file_log(fname, c)
# Generate "host_vars" directory
Dir.mkdir("#{ansible_dir}/host_vars")

View File

@ -209,8 +209,6 @@ module OneProvision
cfg.validate
begin
@idx = nil
# Create configuration object
cfg = ProvisionConfig.new(config, inputs)
cfg.load
@ -335,7 +333,7 @@ module OneProvision
update
rc = Ansible.configure(hosts)
rc = Ansible.configure(hosts, datastores)
if rc == 0
self.state = STATE['RUNNING']
@ -619,19 +617,25 @@ module OneProvision
infrastructure_objects['hosts'] = []
cid = Integer(cluster['id'])
global_idx = -1
cfg['hosts'].each do |h|
h['count'].nil? ? count = 1 : count = Integer(h['count'])
# Multiple host definition setup
if h['provision']['hostname'].is_a? Array
count = h['provision']['hostname'].size
hostnames = h['provision']['hostname']
elsif h['count']
count = Integer(h['count'])
else
count = 1
end
# Get hostnames
host_names = h['provision']['hostname'] if count > 1
global_idx += 1
# Store original host template
h_bck = Marshal.load(Marshal.dump(h))
count.times.each do |idx|
@idx = idx
Driver.retry_loop('Failed to create some host', self) do
playbooks = cfg['playbook']
playbooks = playbooks.join(',') if playbooks.is_a? Array
@ -639,14 +643,13 @@ module OneProvision
h = Marshal.load(Marshal.dump(h_bck))
# Take hostname from array
if host_names
if host_names.is_a? Array
h['provision']['hostname'] = host_names.shift
else
h['provision']['hostname'] = host_names
end
if hostnames
h['provision']['hostname'] = hostnames.shift
end
h['provision']['index'] = idx + global_idx
h['provision']['count'] = count
host = Resource.object('hosts', @provider, h)
host.evaluate_rules(self)

View File

@ -240,12 +240,12 @@ module OneProvision
match = match.split('.')
if match.size == 1
index = @config['provision']['index'] if @config['provision']
value.gsub!('${provision}', provision.name.to_s)
value.gsub!('${provision_id}', provision.id.to_s)
if provision.idx
value.gsub!('${index}', provision.idx.to_s)
end
value.gsub!('${index}', index.to_s) if index
else
objects = provision.info_objects("#{match[0]}s")
@ -259,9 +259,13 @@ module OneProvision
end
end
key = match[2].upcase
object = object.to_hash
object = object[object.keys[0]]
replace = object[match[2].upcase]
replace = object[key]
replace = object['TEMPLATE'][key] unless replace
replace = object['TEMPLATE']['PROVISION'][key] unless replace
value.gsub!("${#{match.join('.')}}", replace)
end
@ -355,7 +359,7 @@ module OneProvision
# @return [Hash] Configuration content
def partial_load(name)
begin
yaml = YAML.load_file(name)
yaml = YAML.load_file(name) || {}
rescue StandardError => e
Utils.fail("Failed to read template: #{e}")
end

View File

@ -49,7 +49,7 @@ module OneProvision
@p_template['configuration'].to_yaml
)
reject = %w[im_mad vm_mad provision connection configuration]
reject = %w[im_mad vm_mad provision connection configuration count]
Nokogiri::XML::Builder.new do |xml|
xml.HOST do

View File

@ -30,6 +30,7 @@ module OneProvision
:network => 'aws_subnet'
}
KEYS = %w[access_key secret_key region]
# Class constructor
#
# @param provider [Provider]

View File

@ -29,6 +29,8 @@ module OneProvision
:network => 'packet_reserved_ip_block'
}
KEYS = %w[project token facility]
# Class constructor
#
# @param provider [Provider]

View File

@ -1,12 +1,22 @@
<% if provision['CIDR'] && provision['CIDR'] != "" %>
resource "aws_vpc" "device_<%= obj['ID'] %>" {
cidr_block = "<%= provision['CIDR'] %>"
cidr_block = "<%= provision['CIDR'] ? provision['CIDR'] : '10.0.0.0/16'%>"
tags = {
Name = "<%= obj['NAME'] %>_vpc"
}
}
resource "aws_subnet" "device_<%= obj['ID'] %>" {
vpc_id = aws_vpc.device_<%= obj['ID'] %>.id
cidr_block = "<%= provision['CIDR'] ? provision['CIDR'] : '10.0.0.0/16'%>"
map_public_ip_on_launch = true
tags = {
Name = "<%= obj['NAME'] %>_subnet"
}
}
resource "aws_internet_gateway" "device_<%= obj['ID'] %>" {
vpc_id = aws_vpc.device_<%= obj['ID'] %>.id
@ -20,5 +30,29 @@ resource "aws_route" "device_<%= obj['ID'] %>" {
destination_cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.device_<%= obj['ID'] %>.id
}
<% end %>
resource "aws_security_group" "device_<%= obj['ID'] %>_ssh" {
name = "allow_ssh"
description = "Allow SSH inbound traffic"
vpc_id = aws_vpc.device_<%= c['ID'] %>.id
ingress {
description = "TLS from all"
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "device_<%= obj['ID'] %>_ssh"
}
}

View File

@ -2,13 +2,16 @@ resource "aws_instance" "device_<%= obj['ID'] %>" {
ami = "<%= provision['AMI'] %>"
instance_type = "<%= provision['INSTANCETYPE'] %>"
<% if provision['SUBNETID'] && provision['SUBNETID'] != "" %>
subnet_id = "<%= provision['SUBNETID'] %>"
<% end %>
vpc_security_group_ids = [aws_security_group.device_<%= c['ID'] %>_ssh.id]
<% if provision['SECURITYGROUPSIDS'] && provision['SECURITYGROUPSIDS'] != "" %>
vpc_security_group_ids = ["<%= provision['SECURITYGROUPSIDS'] %>"]
<% end %>
subnet_id = aws_subnet.device_<%= c['ID'] %>.id
<% ecidr = c['TEMPLATE']['PROVISION']['CIDR'] || "10.0.0.0/16"
pref = ecidr.split('/').first.rpartition(".")[0]
ip = pref << '.' << ( provision['INDEX'].to_i + 4 ).to_s
%>
private_ip = "<%= ip %>"
user_data = "<%= obj['user_data'] %>"
tags = {

View File

@ -1,12 +0,0 @@
<% if provision['SUB_CIDR'] && provision['SUB_CIDR'] != "" %>
resource "aws_subnet" "device_<%= obj['ID'] %>" {
vpc_id = aws_vpc.device_<%= c['ID'] %>.id
cidr_block = "<%= provision['SUB_CIDR'] %>"
map_public_ip_on_launch = true
tags = {
Name = "<%= obj['NAME'] %>_subnet"
}
}
<% end %>

View File

@ -1,6 +1,6 @@
provider "aws" {
access_key = "<%= conn['AWS_ACCESS'] %>"
secret_key = "<%= conn['AWS_SECRET'] %>"
region = "<%= conn['AWS_REGION'] %>"
access_key = "<%= conn['ACCESS_KEY'] %>"
secret_key = "<%= conn['SECRET_KEY'] %>"
region = "<%= conn['REGION'] %>"
}

View File

@ -3,7 +3,7 @@ resource "packet_device" "device_<%= obj['ID'] %>" {
plan = "<%= provision['PLAN'] %>"
facilities = ["<%= provision['FACILITY'] %>"]
operating_system = "<%= provision['OS'] %>"
project_id = "<%= provision['PACKET_PROJECT']%>"
project_id = "<%= provision['PROJECT']%>"
billing_cycle = "hourly"
user_data = "<%= obj['user_data'] %>"
tags = ["OpenNebula", "ONE_ID=<%= obj['ID'] %>"]

View File

@ -1,4 +1,4 @@
provider "packet" {
auth_token = "<%= conn['PACKET_TOKEN'] %>"
auth_token = "<%= conn['TOKEN'] %>"
}

View File

@ -74,6 +74,31 @@ module OneProvision
tf_class.new(provider, tf[:state], tf[:conf])
end
# Check connection attributes of a provider template
#
# @param provider [Provider] Provider information
# @return true or raise exception
def self.check_connection(provider)
case provider['provider']
when 'packet'
keys = Packet::KEYS
when 'aws'
keys = AWS::KEYS
else
raise OneProvisionLoopException,
"Unknown provider: #{provider['provider']}"
end
keys.each do |k|
if !provider['connection'].has_key? k
raise OneProvisionLoopException,
"Missing provider connection attribute: '#{k}'"
end
end
true
end
# Generate Terraform deployment file
#
# @param provision [Provision] Provision information

View File

@ -19,4 +19,4 @@
LIVE_DISK_SNAPSHOTS="kvm-qcow2 kvm-ceph kvm-ssh"
# Space separated list VNM_MAD-ACTION pairs that run locally
VNMAD_LOCAL_ACTIONS="alias_sdnat-post alias_sdnat-clean"
VNMAD_LOCAL_ACTIONS="elastic-post elastic-clean"

View File

@ -1,259 +0,0 @@
# rubocop:disable Naming/FileName
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'vnmmad'
require 'opennebula'
require 'oneprovision'
require 'ssh_stream'
# Alias SDnat Driver
class AliasSDNATDriver < VNMMAD::VNMDriver
# Driver name
DRIVER = 'alias_sdnat'
# Filter to look for NICs managed by this diver
XPATH_FILTER = "TEMPLATE/NIC_ALIAS[VN_MAD='alias_sdnat'] | " \
'TEMPLATE/NIC[ALIAS_IDS=*]'
def initialize(vm, hostname, deploy_id = nil)
super(vm, XPATH_FILTER, deploy_id)
@locking = true
@ssh = SshStreamCommand.new(hostname, nil)
@mapping = {}
client = OpenNebula::Client.new
host_id = @vm['/VM/HISTORY_RECORDS/HISTORY/HID']
@host = OpenNebula::Host.new_with_id(host_id, client)
rc = @host.info(true)
raise rc if OpenNebula.is_error?(rc)
unless @host.has_elements?('TEMPLATE/PROVISION_ID')
OpenNebula.log_error("No PROVISION_ID for host #{host_id}")
exit 1
end
provision_id = @host['TEMPLATE/PROVISION_ID']
provision = OneProvision::Provision.new_with_id(provision_id, client)
provision.info
@provider = provision.provider
end
def self.from_base64(vm64, hostname, deploy_id = nil)
vm_xml = Base64.decode64(vm64)
new(vm_xml, hostname, deploy_id)
end
# Activate NAT rules on hypervisor
def activate
process_nat
0
end
# Clean NAT rules on hypervisor
def deactivate
attach_nic_alias_id = @vm['TEMPLATE/NIC_ALIAS[ATTACH="YES"]/NIC_ID']
process_nat(false, attach_nic_alias_id)
0
end
# @return [Bool] True if error, False otherwise
def assign
@mapping = {}
provider = AliasSDNATDriver.provider(@provider, @host)
return true if provider.nil?
mapped = []
rc = @vm.each_nic do |nic|
next if !nic[:alias_id] || !nic[:parent_id] || !nic[:ip]
map = provider.assign(nic[:ip])
break false if map.empty?
mapped << nic[:ip]
@mapping.merge!(map)
end
mapped.each {|ip| provider.unassign(ip) } unless rc # rollback
!rc
end
# Creates provider based on host template and unassign all nic IP aliases
def unassign
provider = AliasSDNATDriver.provider(@provider, @host)
return if provider.nil?
@vm.each_nic do |nic|
next if !nic[:alias_id] || !nic[:parent_id] || !nic[:ip]
provider.unassign(nic[:ip])
end
end
# Factory method to create a VNM provider for the host provision
# @param host [OpenNebula::Host]
# @return [AWSProvider, PacketProvider, nil] nil
def self.provider(provider, host)
case provider.body['provider']
when 'aws'
require 'ec2_vnm'
AWSProvider.new(provider, host)
when 'packet'
require 'packet_vnm'
PacketProvider.new(provider, host)
else
nil
end
rescue StandardError => e
OpenNebula.log_error(
"Error creating provider #{provider.body['provider']}:#{e.message}"
)
nil
end
private
# Run iptables command with given params on @ssh stream
# @param params [String]
# @return [String] command stdout
def iptables(params)
commands = VNMMAD::VNMNetwork::Commands.new
commands.add :iptables, params
commands.run_remote(@ssh)
end
# Defines iptables SNAT for IP pair
# @param parent_ip [String]
# @param alias_ip [String]
def nat_add(parent_ip, alias_ip)
iptables("-t nat -A POSTROUTING -s #{parent_ip} " \
"-j SNAT --to-source #{alias_ip}")
iptables("-t nat -A PREROUTING -d #{alias_ip} " \
"-j DNAT --to-destination #{parent_ip}")
end
# Cleans iptables SNAT for ip pair
# @param parent_ip [String]
# @param alias_ip [String]
# @param strict [Bool]
def nat_drop(parent_ip, alias_ip, strict = false)
iptables_s = iptables('-t nat -S')
# drop any line related to PRE/POSTROUTING of parent/alias IPs
iptables_s.each_line do |line|
line.chomp!
# matches for various rule parts
pre1 = line.match(%r{^-A PREROUTING -d #{alias_ip}/}i)
pre2 = line.match(/--to-destination #{parent_ip}$/i)
post1 = line.match(%r{^-A POSTROUTING -s #{parent_ip}/}i)
post2 = line.match(/--to-source #{alias_ip}$/i)
drop_rule = "-t nat #{line.sub('-A ', '-D ')}"
if strict && ((pre1 && pre2) || (post1 && post2))
iptables(drop_rule)
elsif !strict && (pre1 || pre2 || post1 || post2)
iptables(drop_rule)
end
# iptables("-t nat #{line.sub('-A ', '-D ')}") if
# line =~ /^-A PREROUTING -d #{alias_ip}\//i or
# line =~ /--to-destination #{parent_ip}$/i or
# line =~ /^-A POSTROUTING -s #{parent_ip}\//i or
# line =~ /--to-source #{alias_ip}$/i
end
end
# Replace IP using mapping created by provider
# For AWS: @mapping = { <elastic_ip> => <secondary_priv_ip>, }
# @param ip [String]
# @return ip [String]
def replace_ip(ip)
@mapping[ip] || ip
end
# Creates iptables SNAT rules for all nic aliases
def process_nat(activate = true, attach_nic_alias_id = nil)
lock
# create Alias IP <-> NIC IP mapping tables
nic_parents = {}
nic_aliases = {}
process do |nic|
if nic[:alias_id] && nic[:parent_id] && nic[:ip]
next if attach_nic_alias_id &&
attach_nic_alias_id != nic[:nic_id]
nic_aliases[replace_ip(nic[:ip])] = nic[:parent_id]
elsif nic[:alias_ids] && nic[:ip]
nic_parents[nic[:nic_id]] = nic[:ip]
else
STDERR.puts "Problem with NIC #{nic}"
exit 1
end
end
# cleanup any related mapping rules
nic_aliases.each do |alias_ip, parent_id|
parent_ip = nic_parents[parent_id]
if parent_ip
strict = !attach_nic_alias_id.nil?
nat_drop(parent_ip, alias_ip, strict)
else
STDERR.puts "Parent NIC/IP with NIC_ID #{parent_id}"
exit 1
end
end
if activate
# create mapping rules
# rubocop:disable Layout/LineLength
# iptables -t nat -A POSTROUTING -s 192.168.0.0/24 -j SNAT --to-source 10.0.0.41
# iptables -t nat -A PREROUTING -d 10.0.0.41 -j DNAT --to-destination 192.168.0.250
# rubocop:enable Layout/LineLength
nic_aliases.each do |alias_ip, parent_id|
parent_ip = nic_parents[parent_id]
nat_add(parent_ip, alias_ip) if parent_ip
end
end
unlock
end
end
# rubocop:enable Naming/FileName

View File

@ -0,0 +1,164 @@
# rubocop:disable Naming/FileName
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'vnmmad'
require 'opennebula'
require 'oneprovision'
require 'ssh_stream'
# Elastic Driver
class ElasticDriver < VNMMAD::VNMDriver
# Driver name
DRIVER = 'elastic'
# Filter to look for NICs managed by this diver
XPATH_FILTER = "TEMPLATE/NIC[VN_MAD='elastic']"
def initialize(vm, hostname, deploy_id = nil)
super(vm, XPATH_FILTER, deploy_id)
@locking = true
@ssh = SshStreamCommand.new(hostname, nil)
@mapping = {}
client = OpenNebula::Client.new
host_id = @vm['/VM/HISTORY_RECORDS/HISTORY/HID']
@host = OpenNebula::Host.new_with_id(host_id, client)
rc = @host.info(true)
raise rc if OpenNebula.is_error?(rc)
unless @host.has_elements?('TEMPLATE/PROVISION_ID')
OpenNebula.log_error("No PROVISION_ID for host #{host_id}")
exit 1
end
provision_id = @host['TEMPLATE/PROVISION_ID']
provision = OneProvision::Provision.new_with_id(provision_id, client)
provision.info
@provider = provision.provider
end
def self.from_base64(vm64, hostname, deploy_id = nil)
vm_xml = Base64.decode64(vm64)
new(vm_xml, hostname, deploy_id)
end
# Create route and arp proxy
def activate
attach_nic_id = @vm['TEMPLATE/NIC[ATTACH="YES"]/NIC_ID']
process do |nic|
next if attach_nic_id && attach_nic_id != nic[:nic_id]
ip("route add #{nic[:ip]}/32 dev #{nic[:bridge]}")
ip("neighbour add proxy #{nic[:gateway]} dev #{nic[:bridge]}")
end
0
end
# Remove route and arp proxy
def deactivate
attach_nic_id = @vm['TEMPLATE/NIC[ATTACH="YES"]/NIC_ID']
process do |nic|
next if attach_nic_id && attach_nic_id != nic[:nic_id]
ip("route del #{nic[:ip]}/32 dev #{nic[:bridge]} | true")
ip("neighbour del proxy #{nic[:gateway]} dev #{nic[:bridge]} | true")
next if nic[:conf][:keep_empty_bridge]
ip("link delete #{nic[:bridge]} | true")
end
0
end
# @return [Bool] True if error, False otherwise
def assign
provider = ElasticDriver.provider(@provider, @host)
return true if provider.nil?
assigned = []
attach_nic_id = @vm['TEMPLATE/NIC[ATTACH="YES"]/NIC_ID']
rc = @vm.each_nic do |nic|
next if attach_nic_id && attach_nic_id != nic[:nic_id]
# pass aws_allocation_id if present
opts = { :alloc_id => nic[:aws_allocation_id] }
break false \
unless provider.assign(nic[:ip], nic[:external], opts) == 0
assigned << [nic[:ip], nic[:external]]
end
assigned.each {|ip, ext| provider.unassign(ip, ext) } unless rc # rollback
!rc
end
# Creates provider based on host template and unassign all nic IP
def unassign
provider = ElasticDriver.provider(@provider, @host)
return if provider.nil?
attach_nic_id = @vm['TEMPLATE/NIC[ATTACH="YES"]/NIC_ID']
@vm.each_nic do |nic|
next if attach_nic_id && attach_nic_id != nic[:nic_id]
provider.unassign(nic[:ip], nic[:external])
end
end
# Factory method to create a VNM provider for the host provision
# @param host [OpenNebula::Host]
# @return [AWSProvider, PacketProvider, nil] nil
def self.provider(provider, host)
case provider.body['provider']
when 'aws'
require 'aws_vnm'
AWSProvider.new(provider, host)
when 'packet'
require 'packet_vnm'
PacketProvider.new(provider, host)
else
nil
end
rescue StandardError => e
OpenNebula.log_error(
"Error creating provider #{provider.body['provider']}:#{e.message}"
)
nil
end
private
# Run ip command with given params on @ssh stream
# @param params [String]
# @return [String] command stdout
def ip(params)
commands = VNMMAD::VNMNetwork::Commands.new
commands.add :ip, params
commands.run_remote(@ssh)
end
end
# rubocop:enable Naming/FileName

View File

@ -35,17 +35,18 @@ end
$LOAD_PATH << LIB_LOCATION + '/oneprovision/lib'
require 'aws-sdk-ec2'
require 'ipaddr'
# Class covering AWS functionality for AMM driver and AliasSDNAT
# Class covering AWS functionality for Elastic driver
class AWSProvider
def initialize(provider, host)
connect = provider.body['connection']
options = {
:access_key_id => connect['aws_access'],
:secret_access_key => connect['aws_secret'],
:region => connect['aws_region']
:access_key_id => connect['access_key'],
:secret_access_key => connect['secret_key'],
:region => connect['region']
}
Aws.config.merge!(options)
@ -54,52 +55,53 @@ class AWSProvider
@deploy_id = host['TEMPLATE/PROVISION/DEPLOY_ID']
end
# Assign a public_ip to an instance. It creates a secondary IP to map the
# public one
# @param public_ip [String] the IP to map
# @return [Hash] if success a Hash with the mapping, empty on error
def assign(public_ip)
# Assign a private IP to the instance, associate given elastic ip with it
# @param ip [String] private_ip for AWS
# @param external [String] public_ip, not used for AWS
# @param opts [Hash]
# opts[:alloc_id] [String] must contain public_ip alloc id
# @return 0 on success, 1 on error
def assign(ip, _external, opts = {})
instcs = @ec2.describe_instances({ :instance_ids => [@deploy_id] })
inst = instcs[0][0].instances[0]
nic_id = inst.network_interfaces[0].network_interface_id
rc = @ec2.assign_private_ip_addresses(
{ :network_interface_id => nic_id,
:secondary_private_ip_address_count => 1 }
@ec2.assign_private_ip_addresses(
{ :network_interface_id => nic_id,
:private_ip_addresses => [ip] }
)
priv_ip = rc.assigned_private_ip_addresses[0].private_ip_address
@ec2.associate_address(
{ :network_interface_id => nic_id,
:allocation_id => opts[:alloc_id],
:private_ip_address => ip }
)
@ec2.associate_address({
:instance_id => @deploy_id,
:public_ip => public_ip,
:private_ip_address => priv_ip
})
{ public_ip => priv_ip }
0
rescue StandardError => e
OpenNebula.log_error("Error assiging #{public_ip}:#{e.message}")
{}
OpenNebula.log_error("Error assiging #{ip}:#{e.message}")
1
end
# Unassign a public_ip from an instance private_ip
# @param public_ip [String] the public ip
def unassign(public_ip)
filter = [{ :name => 'public-ip', :values => [public_ip] }]
ip = @ec2.describe_addresses({ :filters => filter }).addresses[0]
# Unassign a public_ip from an instance
# @param ip [String] not used for AWS
# @param external [String] the public ip
def unassign(ip, external)
filter = [{ :name => 'public-ip', :values => [external] }]
aws_ip = @ec2.describe_addresses({ :filters => filter }).addresses[0]
if ip.nil? || ip.network_interface_id.nil? || ip.private_ip_address.nil?
if aws_ip.nil? || aws_ip.network_interface_id.nil? || aws_ip.private_ip_address.nil?
return
end
# free associated private ip, it frees associated public ip
@ec2.unassign_private_ip_addresses(
{ :network_interface_id => ip.network_interface_id,
:private_ip_addresses => [ip.private_ip_address] }
{ :network_interface_id => aws_ip.network_interface_id,
:private_ip_addresses => [aws_ip.private_ip_address] }
)
rescue StandardError
OpenNebula.log_error("Error unassiging #{public_ip}:#{e.message}")
OpenNebula.log_error("Error unassiging #{ip}:#{e.message}")
end
end

View File

@ -39,13 +39,13 @@ $LOAD_PATH << File.dirname(__FILE__)
$LOAD_PATH << File.join(File.dirname(__FILE__), '..')
$LOAD_PATH << LIB_LOCATION + '/oneprovision/lib'
require 'AliasSDNAT'
require 'Elastic'
template64 = STDIN.read
hostname = ARGV[0]
begin
drv = AliasSDNATDriver.from_base64(template64, hostname)
drv = ElasticDriver.from_base64(template64, hostname)
drv.unassign

View File

@ -25,7 +25,7 @@ else
LIB_LOCATION ||= ONE_LOCATION + '/lib'
RUBY_LIB_LOCATION ||= ONE_LOCATION + '/lib/ruby'
GEMS_LOCATION ||= ONE_LOCATION + '/share/gems'
PACKET_LOCATION ||= ONE_LOCATION + '/ruby/vendors/packethost/lib'
PACKET_LOCATION ||= ONE_LOCATION + '/lib/ruby/vendors/packethost/lib'
end
if File.directory?(GEMS_LOCATION)
@ -39,37 +39,36 @@ $LOAD_PATH << PACKET_LOCATION
require 'packet'
# Class covering Packet/Equinix functionality for AliasSDNAT
# Class covering Packet/Equinix functionality for Elastic driver
class PacketProvider
def initialize(provider, host)
connect = provider.body['connection']
@client = Packet::Client.new(connect['packet_token'])
@client = Packet::Client.new(connect['token'])
@deploy_id = host['TEMPLATE/PROVISION/DEPLOY_ID']
end
def assign(public_ip)
@client.assign_cidr_device("#{public_ip}/32", @deploy_id)
{ public_ip => public_ip }
def assign(ip, _external, opts = {})
@client.assign_cidr_device("#{ip}/32", @deploy_id)
0
rescue StandardError => e
OpenNebula.log_error("Error assiging #{public_ip}:#{e.message}")
{}
OpenNebula.log_error("Error assiging #{ip}:#{e.message}")
1
end
def unassign(public_ip)
def unassign(ip, _external)
dev = @client.get_device(@deploy_id)
ip = dev.ip_addresses.select do |i|
i['address'] == public_ip &&
i['address'] == ip &&
i['cidr'] == 32 &&
i['address_family'] == 4
end
@client.delete_ip(ip[0]['id'])
rescue StandardError => e
OpenNebula.log_error("Error assiging #{public_ip}:#{e.message}")
{}
OpenNebula.log_error("Error assiging #{ip}:#{e.message}")
end
end

View File

@ -39,7 +39,7 @@ $LOAD_PATH << File.dirname(__FILE__)
$LOAD_PATH << File.join(File.dirname(__FILE__), '..')
$LOAD_PATH << LIB_LOCATION + '/oneprovision/lib'
require 'AliasSDNAT'
require 'Elastic'
template64 = STDIN.read
@ -47,11 +47,16 @@ deploy_id = ARGV[0]
hostname = ARGV[1]
begin
drv = AliasSDNATDriver.from_base64(template64, hostname, deploy_id)
drv = ElasticDriver.from_base64(template64, hostname, deploy_id)
exit 1 if drv.assign
drv.activate
begin
drv.activate
rescue StandardError => e
drv.unassign # rollback assign
raise e
end
rescue StandardError => e
OpenNebula.log_error(e.message)
OpenNebula.log_error(e.backtrace)

58
src/vnm_mad/remotes/elastic/pre Executable file
View File

@ -0,0 +1,58 @@
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
ONE_LOCATION = ENV['ONE_LOCATION'] unless defined?(ONE_LOCATION)
if !ONE_LOCATION
LIB_LOCATION ||= '/usr/lib/one'
RUBY_LIB_LOCATION ||= '/usr/lib/one/ruby'
GEMS_LOCATION ||= '/usr/share/one/gems'
else
LIB_LOCATION ||= ONE_LOCATION + '/lib'
RUBY_LIB_LOCATION ||= ONE_LOCATION + '/lib/ruby'
GEMS_LOCATION ||= ONE_LOCATION + '/share/gems'
end
if File.directory?(GEMS_LOCATION)
$LOAD_PATH.reject! {|l| l =~ /vendor_ruby/ }
require 'rubygems'
Gem.use_paths(File.realpath(GEMS_LOCATION))
end
$LOAD_PATH << RUBY_LIB_LOCATION
$LOAD_PATH << File.dirname(__FILE__)
$LOAD_PATH << File.join(File.dirname(__FILE__), '..')
require 'vnmmad'
template64 = STDIN.read
deploy_id = ARGV[0]
begin
drv = VNMMAD::VLANDriver.from_base64(template64,
"TEMPLATE/NIC[VN_MAD='elastic']",
deploy_id)
drv.create_bridges
rescue StandardError => e
OpenNebula.log_error(e.message)
OpenNebula.log_error(e.backtrace)
exit 1
end

Some files were not shown because too many files have changed in this diff Show More