1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-02-28 17:57:22 +03:00

OneProvision refactor.

This commit is contained in:
Alejandro Huertas 2019-01-18 16:56:48 +01:00
parent fad4a1c454
commit 0b027090a2
29 changed files with 2711 additions and 2528 deletions

View File

@ -248,8 +248,8 @@ LIB_DIRS="$LIB_LOCATION/ruby \
$LIB_LOCATION/sh \ $LIB_LOCATION/sh \
$LIB_LOCATION/ruby/cli \ $LIB_LOCATION/ruby/cli \
$LIB_LOCATION/ruby/cli/one_helper \ $LIB_LOCATION/ruby/cli/one_helper \
$LIB_LOCATION/ruby/cli/one_helper/oneprovision_helpers \ $LIB_LOCATION/ruby/vcenter_driver \
$LIB_LOCATION/ruby/vcenter_driver" $LIB_LOCATION/oneprovision/lib"
VAR_DIRS="$VAR_LOCATION/remotes \ VAR_DIRS="$VAR_LOCATION/remotes \
$VAR_LOCATION/remotes/etc \ $VAR_LOCATION/remotes/etc \
@ -513,9 +513,10 @@ INSTALL_CLIENT_FILES=(
INSTALL_ONEPROVISION_FILES=( INSTALL_ONEPROVISION_FILES=(
ONEPROVISION_BIN_FILES:$BIN_LOCATION ONEPROVISION_BIN_FILES:$BIN_LOCATION
ONEPROVISION_ONE_LIB_FILES:$LIB_LOCATION/ruby/cli/one_helper/oneprovision_helpers ONEPROVISION_ONE_LIB_FILES:$LIB_LOCATION/ruby/cli/one_helper
ONEPROVISION_CONF_FILES:$ETC_LOCATION/cli ONEPROVISION_CONF_FILES:$ETC_LOCATION/cli
ONEPROVISION_ANSIBLE_FILES:$SHARE_LOCATION/oneprovision ONEPROVISION_ANSIBLE_FILES:$SHARE_LOCATION/oneprovision
ONEPROVISION_LIB_FILES:$LIB_LOCATION/oneprovision/lib
) )
INSTALL_SUNSTONE_RUBY_FILES=( INSTALL_SUNSTONE_RUBY_FILES=(
@ -1822,22 +1823,23 @@ CLI_CONF_FILES="src/cli/etc/onegroup.yaml \
ONEPROVISION_BIN_FILES="src/cli/oneprovision" ONEPROVISION_BIN_FILES="src/cli/oneprovision"
ONEPROVISION_ONE_LIB_FILES="src/cli/one_helper/oneprovision_helpers/ansible_helper.rb \ ONEPROVISION_ONE_LIB_FILES="src/cli/one_helper/oneprovision_helper.rb"
src/cli/one_helper/oneprovision_helpers/cluster_helper.rb \
src/cli/one_helper/oneprovision_helpers/common_helper.rb \
src/cli/one_helper/oneprovision_helpers/datastore_helper.rb \
src/cli/one_helper/oneprovision_helpers/host_helper.rb \
src/cli/one_helper/oneprovision_helpers/provision_helper.rb \
src/cli/one_helper/oneprovision_helpers/vnet_helper.rb"
ONEPROVISION_CONF_FILES="src/cli/etc/oneprovision_cluster.yaml \ ONEPROVISION_CONF_FILES="src/cli/etc/oneprovision.yaml"
src/cli/etc/oneprovision_datastore.yaml \
src/cli/etc/oneprovision_host.yaml \
src/cli/etc/oneprovision_provision.yaml \
src/cli/etc/oneprovision_vnet.yaml"
ONEPROVISION_ANSIBLE_FILES="share/oneprovision/ansible" ONEPROVISION_ANSIBLE_FILES="share/oneprovision/ansible"
ONEPROVISION_LIB_FILES="src/oneprovision/lib/ansible.rb \
src/oneprovision/lib/oneprovision.rb \
src/oneprovision/lib/cluster.rb \
src/oneprovision/lib/datastore.rb \
src/oneprovision/lib/driver.rb \
src/oneprovision/lib/host.rb \
src/oneprovision/lib/provision.rb \
src/oneprovision/lib/resource.rb \
src/oneprovision/lib/utils.rb \
src/oneprovision/lib/vnet.rb"
#----------------------------------------------------------------------------- #-----------------------------------------------------------------------------
# Sunstone files # Sunstone files
#----------------------------------------------------------------------------- #-----------------------------------------------------------------------------

View File

@ -0,0 +1,15 @@
[defaults]
retry_files_enabled = False
deprecation_warnings = False
display_skipped_hosts = False
callback_whitelist =
stdout_callback = skippy
host_key_checking = False
remote_user = <%= remote_user %>
remote_port = <%= remote_port %>
private_key_file = <%= private_key %>
roles_path = <%= roles %>
[privilege_escalation]
become = yes
become_user = root

View File

@ -61,6 +61,10 @@
:desc: Real MEM :desc: Real MEM
:size: 18 :size: 18
:PROVIDER:
:desc: Host provider
:size: 8
:STAT: :STAT:
:desc: Host status :desc: Host status
:size: 6 :size: 6

View File

@ -1,28 +0,0 @@
---
:ID:
:desc: Cluster identifier
:size: 5
:NAME:
:desc: Name of the Cluster
:size: 25
:left: true
:HOSTS:
:desc: Number of Hosts
:size: 5
:VNETS:
:desc: Number of Networks
:size: 5
:DATASTORES:
:desc: Number of Datastores
:size: 10
:default:
- :ID
- :NAME
- :HOSTS
- :VNETS
- :DATASTORES

View File

@ -1,76 +0,0 @@
---
:ID:
:desc: ONE identifier for the Datastore
:size: 4
:USER:
:desc: Username of the Datastore owner
:size: 10
:left: true
:GROUP:
:desc: Group of the Datastore
:size: 10
:left: true
:NAME:
:desc: Name of the Datastore
:size: 13
:left: true
:SIZE:
:desc: Total Datastore size
:size: 10
:AVAIL:
:desc: Free Datastore size (%)
:size: 5
:left: true
:CLUSTERS:
:desc: Cluster IDs
:size: 12
:left: true
:IMAGES:
:desc: Number of Images
:size: 6
:TYPE:
:desc: Datastore type
:size: 4
:left: true
:DS:
:desc: Datastore driver
:size: 7
:left: true
:PROVIDER:
:desc: Baremetal provider name
:size: 8
:left: true
:TM:
:desc: Transfer driver
:size: 7
:left: true
:STAT:
:desc: State of the Datastore
:size: 4
:left: true
:default:
- :ID
- :NAME
- :SIZE
- :AVAIL
- :CLUSTERS
- :IMAGES
- :TYPE
- :DS
- :PROVIDER
- :TM
- :STAT

View File

@ -1,46 +0,0 @@
---
:ID:
:desc: ONE identifier for Host
:size: 4
:NAME:
:desc: Name of the Host
:size: 15
:left: true
:CLUSTER:
:desc: Name of the Cluster
:size: 9
:left: true
:RVM:
:desc: Number of Virtual Machines running
:size: 3
:ZVM:
:desc: Number of Virtual Machine zombies
:size: 3
:PROVIDER:
:desc: Baremetal provider name
:size: 8
:left: true
:VM_MAD:
:desc: Virtual Machine driver
:size: 8
:left: true
:STAT:
:desc: Host status
:size: 6
:left: true
:default:
- :ID
- :NAME
- :CLUSTER
- :RVM
- :PROVIDER
- :VM_MAD
- :STAT

View File

@ -1,48 +0,0 @@
---
:ID:
:desc: ONE identifier for Virtual Network
:size: 4
:USER:
:desc: Username of the Virtual Network owner
:size: 15
:left: true
:GROUP:
:desc: Group of the Virtual Network
:size: 12
:left: true
:NAME:
:desc: Name of the Virtual Network
:size: 19
:left: true
:CLUSTERS:
:desc: Cluster IDs
:size: 10
:left: true
:BRIDGE:
:desc: Bridge associated to the Virtual Network
:size: 8
:left: true
:PROVIDER:
:desc: Baremetal provider name
:size: 8
:left: true
:LEASES:
:desc: Number of this Virtual Networks given leases
:size: 6
:default:
- :ID
- :USER
- :GROUP
- :NAME
- :CLUSTERS
- :BRIDGE
- :PROVIDER
- :LEASES

View File

@ -88,8 +88,8 @@ class OneDatastoreHelper < OpenNebulaHelper::OneHelper
end end
column :TYPE, "Datastore type", :left, :size=>4 do |d| column :TYPE, "Datastore type", :left, :size=>4 do |d|
type = Datastore::DATASTORE_TYPES[d["TYPE"].to_i] type = OpenNebula::Datastore::DATASTORE_TYPES[d["TYPE"].to_i]
Datastore::SHORT_DATASTORE_TYPES[type] OpenNebula::Datastore::SHORT_DATASTORE_TYPES[type]
end end
column :DS, "Datastore driver", :left, :size=>7 do |d| column :DS, "Datastore driver", :left, :size=>7 do |d|
@ -101,8 +101,8 @@ class OneDatastoreHelper < OpenNebulaHelper::OneHelper
end end
column :STAT, "State of the Datastore", :left, :size=>3 do |d| column :STAT, "State of the Datastore", :left, :size=>3 do |d|
state = Datastore::DATASTORE_STATES[d["STATE"].to_i] state = OpenNebula::Datastore::DATASTORE_STATES[d["STATE"].to_i]
Datastore::SHORT_DATASTORE_STATES[state] OpenNebula::Datastore::SHORT_DATASTORE_STATES[state]
end end
default :ID, :USER, :GROUP, :NAME, :SIZE, :AVAIL, :CLUSTERS, :IMAGES, default :ID, :USER, :GROUP, :NAME, :SIZE, :AVAIL, :CLUSTERS, :IMAGES,

View File

@ -206,6 +206,10 @@ class OneHostHelper < OpenNebulaHelper::OneHelper
end end
end end
column :PROVIDER, "Host provider", :left, :size=>6 do |d|
d['TEMPLATE']['PM_MAD'].nil? ? '-' : d['TEMPLATE']['PM_MAD']
end
column :STAT, "Host status", :left, :size=>6 do |d| column :STAT, "Host status", :left, :size=>6 do |d|
OneHostHelper.state_to_str(d["STATE"]) OneHostHelper.state_to_str(d["STATE"])
end end

View File

@ -0,0 +1,254 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'securerandom'
# OneProvision Helper
class OneProvisionHelper < OpenNebulaHelper::OneHelper
def self.rname
'PROVISION'
end
def self.conf_file
'oneprovision.yaml'
end
def parse_options(options)
OneProvision::OneProvisionLogger.get_logger(options)
OneProvision::Mode.get_run_mode(options)
OneProvision::Options.get_run_options(options)
end
def format_pool
config_file = self.class.table_conf
table = CLIHelper::ShowTable.new(config_file, self) do
column :ID, 'Identifier for the Provision', :size => 36 do |p|
p['ID']
end
column :NAME, 'Name of the Provision', :left, :size => 25 do |p|
p['NAME']
end
column :CLUSTERS, 'Number of Clusters', :size => 8 do |p|
p['CLUSTERS']['ID'].size
end
column :HOSTS, 'Number of Hosts', :size => 5 do |p|
p['HOSTS']['ID'].size
end
column :VNETS, 'Number of Networks', :size => 5 do |p|
p['VNETS']['ID'].size
end
column :DATASTORES, 'Number of Datastores', :size => 10 do |p|
p['DATASTORES']['ID'].size
end
column :STAT, 'Status of the Provision', :left, :size => 15 do |p|
p['STATUS']
end
default :ID, :NAME, :CLUSTERS, :HOSTS, :VNETS, :DATASTORES, :STAT
end
table
end
#######################################################################
# Helper provision functions
#######################################################################
def create(config)
msg = 'OpenNebula is not running'
OneProvision::Utils.fail(msg) if OneProvision::Utils.one_running?
provision = OneProvision::Provision.new(SecureRandom.uuid)
provision.create(config)
end
def configure(provision_id, options)
provision = OneProvision::Provision.new(provision_id)
provision.refresh
provision.configure((options.key? :force))
end
def delete(provision_id)
provision = OneProvision::Provision.new(provision_id)
provision.refresh
provision.delete
end
#######################################################################
# Helper host functions
#######################################################################
def hosts_operation(hosts, operation, options)
parse_options(options)
host_helper = OneHostHelper.new
host_helper.set_client(options)
host_helper.perform_actions(hosts,
options,
operation[:message]) do |host|
host = OneProvision::Host.new(host['ID'])
case operation[:operation]
when 'resume'
host.resume
when 'poweroff'
host.poweroff
when 'reboot'
host.reboot((options.key? :hard))
when 'delete'
host.delete
when 'configure'
host.configure((options.key? :force))
end
end
end
#######################################################################
# Utils functions
#######################################################################
def provision_ids
clusters = OneProvision::Cluster.new.pool
rc = clusters.info
if OpenNebula.is_error?(rc)
OneProvision::Utils.fail(rc.message)
end
clusters = clusters.reject do |x|
x['TEMPLATE/PROVISION/PROVISION_ID'].nil?
end
clusters = clusters.uniq do |x|
x['TEMPLATE/PROVISION/PROVISION_ID']
end
ids = []
clusters.each {|c| ids << c['TEMPLATE/PROVISION/PROVISION_ID'] }
ids
end
def get_list(columns, provision_list)
ret = []
ids = provision_ids
ids.each do |i|
provision = OneProvision::Provision.new(i)
provision.refresh
element = {}
element['ID'] = i if provision_list
element['ID'] = provision.clusters[0]['ID'] unless provision_list
element['NAME'] = provision.clusters[0]['NAME']
element['STATUS'] = provision.status
columns.each do |c|
element[c.to_s.upcase] = { 'ID' => [] }
provision.instance_variable_get("@#{c}").each do |v|
element[c.to_s.upcase]['ID'] << v['ID']
end
end
ret << element
end
ret
end
def list(options)
columns = %w[clusters hosts vnets datastores]
format_pool.show(get_list(columns, true), options)
end
def show(provision_id)
provision = OneProvision::Provision.new(provision_id)
provision.refresh
OneProvision::Utils.fail('Provision not found.') unless provision.exists
ret = {}
ret['id'] = provision_id
ret['name'] = provision.clusters[0]['NAME']
ret['status'] = provision.status
%w[clusters datastores hosts vnets].each do |r|
ret["@#{r}_ids"] = []
provision.instance_variable_get("@#{r}").each do |x|
ret["@#{r}_ids"] << (x['ID'])
end
end
format_resource(ret)
end
def format_resource(provision)
str_h1 = '%-80s'
status = provision['status']
id = provision['id']
CLIHelper.print_header(str_h1 % "PROVISION #{id} INFORMATION")
puts format('ID :%<s>s', :s => id)
puts format('NAME :%<s>s', :s => provision['name'])
puts format('STATUS :%<s>s', :s => CLIHelper.color_state(status))
puts
CLIHelper.print_header(format('%<s>s', :s => 'CLUSTERS'))
provision['@clusters_ids'].each do |i|
puts format('%<s>s', :s => i)
end
puts
CLIHelper.print_header(format('%<s>s', :s => 'HOSTS'))
provision['@hosts_ids'].each do |i|
puts format('%<s>s', :s => i)
end
puts
CLIHelper.print_header(format('%<s>s', :s => 'VNETS'))
provision['@vnets_ids'].each do |i|
puts format('%<s>s', :s => i)
end
puts
CLIHelper.print_header(format('%<s>s', :s => 'DATASTORES'))
provision['@datastores_ids'].each do |i|
puts format('%<s>s', :s => i)
end
end
end

View File

@ -1,258 +0,0 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'yaml'
require 'nokogiri'
require 'tempfile'
require 'tmpdir'
require 'json'
require 'base64'
# Default provision parameters
CONFIG_DEFAULTS = {
'connection' => {
'remote_user' => 'root',
'remote_port' => 22,
'public_key' => '/var/lib/one/.ssh/ddc/id_rsa.pub',
'private_key' => '/var/lib/one/.ssh/ddc/id_rsa'
}
}
# Ansible params
ANSIBLE_VERSION = [Gem::Version.new('2.5'), Gem::Version.new('2.7')]
ANSIBLE_ARGS = "--ssh-common-args='-o UserKnownHostsFile=/dev/null'"
$ANSIBLE_INVENTORY = 'default'
class OneProvisionAnsibleHelper < OpenNebulaHelper::OneHelper
def self.rname
"ANSIBLE"
end
def check_ansible_version()
version = Gem::Version.new(`ansible --version`.split[1])
if (version < ANSIBLE_VERSION[0]) || (version >= ANSIBLE_VERSION[1])
fail("Unsupported Ansible ver. #{version}, " +
"must be >= #{ANSIBLE_VERSION[0]} and < #{ANSIBLE_VERSION[1]}")
end
end
def retry_ssh(ansible_dir)
ret = false
retries = 0
while !ret && retries < $PING_RETRIES do
begin
ret = ansible_ssh(ansible_dir)
rescue OneProvisionLoopException
retries += 1
sleep($PING_TIMEOUT)
end
end
ret
end
def try_ssh(ansible_dir)
$logger.info("Checking working SSH connection")
if !retry_ssh(ansible_dir)
$common_helper.retry_loop 'SSH connection is failing' do ansible_ssh(ansible_dir) end
end
end
def parse_ansible(stdout)
begin
rtn = []
task = 'UNKNOWN'
stdout.lines.each do |line|
task = $1 if line =~ /^TASK \[(.*)\]/i
if line =~ /^fatal:/i
host = 'UNKNOWN'
text = ''
if line =~ /^fatal: \[([^\]]+)\]: .* => ({.*})$/i
host = $1
begin
text = JSON.parse($2)['msg'].strip.gsub("\n", ' ')
text = "- #{text}"
rescue
end
elsif line =~ /^fatal: \[([^\]]+)\]: .* =>/i
host = $1
end
rtn << sprintf("- %-15s : TASK[%s] %s", host, task, text)
end
end
rtn.join("\n")
rescue
nil
end
end
def ansible_ssh(ansible_dir)
# Note: We want only to check the working SSH connection, but
# Ansible "ping" module requires also Python to be installed on
# the remote side, otherwise fails. So we use only "raw" module with
# simple command. Python should be installed by "configure" phase later.
#
# Older approach with "ping" module:
# ANSIBLE_CONFIG=#{ansible_dir}/ansible.cfg ansible #{ANSIBLE_ARGS} -m ping all -i #{ansible_dir}/inventory
cmd = "ANSIBLE_CONFIG=#{ansible_dir}/ansible.cfg ANSIBLE_BECOME=false"
cmd << " ansible #{ANSIBLE_ARGS}"
cmd << " -i #{ansible_dir}/inventory"
cmd << " -m raw all -a /bin/true"
o, _e, s = $common_helper.run(cmd)
if s and s.success?
hosts = o.lines.count { |l| l =~ /success/i }
if hosts == 0
raise OneProvisionLoopException
else
return true
end
else
raise OneProvisionLoopException
end
end
#TODO: support different variables and connection parameters for each host
#TODO: make it a separate module?
def generate_ansible_configs(hosts)
ansible_dir = Dir.mktmpdir()
$logger.debug("Generating Ansible configurations into #{ansible_dir}")
# Generate 'inventory' file
c = "[nodes]\n"
hosts.each do |h|
h.info
c << "#{h['NAME']}\n"
end
c << "\n"
$common_helper.write_file_log("#{ansible_dir}/inventory", c)
# Generate "host_vars" directory
Dir.mkdir("#{ansible_dir}/host_vars")
hosts.each do |h|
h.info
var = h['TEMPLATE/PROVISION_CONFIGURATION_BASE64']
var = YAML.load(Base64.decode64(var)) if var
var ||= {}
c = YAML.dump(var)
$common_helper.write_file_log("#{ansible_dir}/host_vars/#{h['NAME']}.yml", c)
end
$ANSIBLE_INVENTORY = hosts[0]['TEMPLATE/ANSIBLE_PLAYBOOK'] if hosts[0]['TEMPLATE/ANSIBLE_PLAYBOOK']
# Generate "ansible.cfg" file
#TODO: what if private_key isn't filename, but content
#TODO: store private key / packet credentials securely in the ONE
c = <<-EOT
[defaults]
retry_files_enabled = False
deprecation_warnings = False
display_skipped_hosts = False
callback_whitelist =
stdout_callback = skippy
host_key_checking = False
remote_user = #{hosts[0]['TEMPLATE/PROVISION_CONNECTION/REMOTE_USER']}
remote_port = #{hosts[0]['TEMPLATE/PROVISION_CONNECTION/REMOTE_PORT']}
private_key_file = #{hosts[0]['TEMPLATE/PROVISION_CONNECTION/PRIVATE_KEY']}
roles_path = #{ANSIBLE_LOCATION}/roles
[privilege_escalation]
become = yes
become_user = root
EOT
$common_helper.write_file_log("#{ansible_dir}/ansible.cfg", c)
#TODO: site.yaml
#logger(inventoryContent + File.open("#{$ANSIBLE_LOCATION}/site.yml").read(), true)
ansible_dir
end
#TODO: expect multiple hosts
def configure(hosts, ping=true)
check_ansible_version
ansible_dir = generate_ansible_configs(hosts)
try_ssh(ansible_dir) if ping
# offline ONE host
$logger.debug("Offlining OpenNebula hosts")
hosts.each do |host|
host.offline
host.update("PROVISION_CONFIGURATION_STATUS=pending", true)
end
$common_helper.retry_loop 'Configuration failed' do
$logger.info("Configuring hosts")
# build Ansible command
cmd = "ANSIBLE_CONFIG=#{ansible_dir}/ansible.cfg ansible-playbook #{ANSIBLE_ARGS}"
cmd << " -i #{ansible_dir}/inventory"
cmd << " -i #{ANSIBLE_LOCATION}/inventories/#{$ANSIBLE_INVENTORY}/"
cmd << " #{ANSIBLE_LOCATION}/#{$ANSIBLE_INVENTORY}.yml"
o, _e, s = $common_helper.run(cmd)
if s and s.success?
# enable configured ONE host back
$logger.debug("Enabling OpenNebula hosts")
hosts.each do |host|
host.update("PROVISION_CONFIGURATION_STATUS=configured", true)
host.enable
end
else
hosts.each do |host| host.update("PROVISION_CONFIGURATION_STATUS=error", true) end
errors = parse_ansible(o) if o
raise OneProvisionLoopException.new(errors)
end
end
end
def get_host_template_conn(host)
conn = {}
#TODO: some nice / generic way (even for configuration?)
tmpl = host.to_hash['HOST']['TEMPLATE']['PROVISION_CONNECTION']
tmpl ||= {}
tmpl.each_pair do |key, value|
conn[ key.downcase ] = value
end
conn
end
end

View File

@ -1,89 +0,0 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'base64'
class OneProvisionClusterHelper < OpenNebulaHelper::OneHelper
def self.rname
"CLUSTER"
end
def self.conf_file
"oneprovision_cluster.yaml"
end
def factory(id=nil)
if id
OpenNebula::Cluster.new_with_id(id, @client)
else
xml=OpenNebula::Cluster.build_xml
OpenNebula::Cluster.new(xml, @client)
end
end
def format_pool(options)
config_file = self.class.table_conf
table = CLIHelper::ShowTable.new(config_file, self) do
column :ID, "ONE identifier for the Cluster", :size=>5 do |d|
d["ID"]
end
column :NAME, "Name of the Cluster", :left, :size=>25 do |d|
d["NAME"]
end
column :HOSTS, "Number of Hosts", :size=>5 do |d|
d["HOSTS"]
end
column :VNETS, "Number of Networks", :size=>5 do |d|
d["NETWORKS"]
end
column :DATASTORES, "Number of Datastores", :size=>10 do |d|
d["DATASTORES"]
end
default :ID, :NAME, :HOSTS, :VNETS, :DATASTORES
end
table
end
def create_cluster(cluster, provision_id)
c = OpenNebula::Cluster.new(OpenNebula::Cluster.build_xml, @client)
cluster['provision']['provision_id'] = provision_id
rc = c.allocate(cluster['name'])
if OpenNebula.is_error?(rc)
raise OneProvisionLoopException.new(rc.message)
end
c.update($common_helper.template_like_str(cluster), true)
c.info
c
end
def get_cluster(provision_id)
pool = OpenNebula::ClusterPool.new(@client)
pool.info
pool.select { |c| c['TEMPLATE/PROVISION/PROVISION_ID'] == provision_id }[0]
end
end

View File

@ -1,508 +0,0 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'yaml'
require 'erb'
require 'nokogiri'
require 'open3'
require 'tempfile'
require 'highline'
require 'highline/import'
require 'tmpdir'
require 'json'
require 'logger'
require 'base64'
require 'securerandom'
ENCRYPT_VALUES = ["PACKET_TOKEN", "EC2_SECRET", "EC2_ACCESS"]
class OneProvisionCleanupException < Exception
end
class OneProvisionLoopException < Exception
attr_reader :text
def initialize(text=nil)
@text = text
end
end
class CommonHelper < OpenNebulaHelper::OneHelper
ERROR_OPEN = "ERROR MESSAGE --8<------"
ERROR_CLOSE = "ERROR MESSAGE ------>8--"
def self.rname
"ONEPROVISION"
end
def validate_configuration(config, options)
config = read_config(config)
config = config.delete_if { |k, v| v.nil? }
check_config(config)
puts config.to_yaml if options.has_key? :dump
end
def check_config(config)
fail('There is an error in your configuration file: no name given') if config['name'].nil? && config['version'] == 2
fail('There is an error in your configuration file: defaults provision is missing') if config['defaults']['provision'].nil?
if config['hosts']
config['hosts'].each_with_index do |h, i|
fail("There is an error in your configuration file: there is no im_mad in host #{i + 1}") if h['im_mad'].nil?
fail("There is an error in your configuration file: there is no vm_mad in host #{i + 1}") if h['vm_mad'].nil?
fail("There is an error in your configuration file: there is no hostname in host #{i + 1}") if h['provision']['hostname'].nil?
end
end
if config['datastores']
config['datastores'].each_with_index do |d, i|
fail("There is an error in your configuration file: there is no tm_mad in datastore #{i + 1}") if d['tm_mad'].nil?
end
end
end
def create_config(yaml, update=false)
begin
check_config(yaml)
yaml['cluster'] = {'name' => yaml['name']} if (yaml['cluster'].nil? && !update)
#TODO: schema check
if yaml['hosts']
yaml['hosts'] = yaml['hosts'].map do |host|
['connection', 'provision', 'configuration'].each do |section|
data = CONFIG_DEFAULTS[section] || {}
# merge defaults with globals and device specific params
data.merge!(yaml['defaults'][section]) unless yaml['defaults'][section].nil?
data.merge!(host[section]) unless host[section].nil?
host[section] = data
end
host
end
end
['datastores', 'networks'].each do |r|
if yaml[r]
yaml[r] = yaml[r].map do |x|
x['provision'] = yaml['defaults']['provision']
x
end
end
end
yaml['cluster']['provision'] = yaml['defaults']['provision'] if !update
rescue Exception => e
fail("Failed to read configuration: #{e.to_s}")
end
yaml
end
def read_config(name)
begin
yaml = YAML.load_file(name)
rescue Exception => e
fail("Failed to read template: #{e.to_s}")
end
if yaml['extends']
base = read_config(yaml['extends'])
yaml.delete('extends')
base['defaults'] ||= {}
yaml['defaults'] ||= {}
# replace scalars or append array from child YAML
yaml.each do |key, value|
next if key == 'defaults'
if (value.is_a? Array) && (base[key].is_a? Array)
base[key].concat(value)
else
base[key] = value
end
end
# merge each defaults section separately
['connection', 'provision', 'configuration'].each do |section|
base['defaults'][section] ||= {}
yaml['defaults'][section] ||= {}
base['defaults'][section].merge!(yaml['defaults'][section])
end
return base
else
return yaml
end
end
def get_erb_value(provision, value)
template = ERB.new value
ret = template.result(provision.get_binding)
if ret.empty?
raise "#{value} not found."
else
ret
end
end
def evaluate_erb(provision, root)
if root.is_a? Hash
root.each_pair do |key, value|
if value.is_a? Array
root[key] = value.map do |x| evaluate_erb(provision, x) end
elsif value.is_a? Hash
root[key] = evaluate_erb(provision, value)
elsif value.is_a? String
if value =~ /<%= /
root[key] = get_erb_value(provision, value)
end
end
end
else
root = root.map do |x| evaluate_erb(provision, x) end
end
root
end
def try_read_file(name)
begin
File.read(name).strip
rescue
name
end
end
def create_deployment_file(host, provision_id)
Nokogiri::XML::Builder.new { |xml|
xml.HOST do
xml.NAME "provision-#{SecureRandom.hex(24)}"
xml.TEMPLATE do
xml.IM_MAD host['im_mad']
xml.VM_MAD host['vm_mad']
xml.PM_MAD host['provision']['driver']
xml.PROVISION do
host['provision'].each { |key, value|
if key != 'driver'
xml.send(key.upcase, encrypt(key.upcase, value))
end
}
xml.send("PROVISION_ID", provision_id)
end
if host['configuration']
xml.PROVISION_CONFIGURATION_BASE64 Base64.strict_encode64(host['configuration'].to_yaml)
end
xml.PROVISION_CONFIGURATION_STATUS 'pending'
if host['connection']
xml.PROVISION_CONNECTION do
host['connection'].each { |key, value|
xml.send(key.upcase, value)
}
end
end
if host['connection']
xml.CONTEXT do
if host['connection']['public_key']
xml.SSH_PUBLIC_KEY try_read_file(host['connection']['public_key'])
end
end
end
end
end
}.doc.root
end
def get_mode(options)
$logger = Logger.new(STDERR)
$logger.formatter = proc do |severity, datetime, progname, msg|
"#{datetime.strftime("%Y-%m-%d %H:%M:%S")} #{severity.ljust(5)} : #{msg}\n"
end
if options.has_key? :debug
$logger.level = Logger::DEBUG
elsif options.has_key? :verbose
$logger.level = Logger::INFO
else
$logger.level = Logger::UNKNOWN
end
$RUN_MODE = :batch if options.has_key? :batch
$PING_TIMEOUT = options[:ping_timeout] if options.has_key? :ping_timeout
$PING_RETRIES = options[:ping_retries] if options.has_key? :ping_retries
$THREADS = options[:threads] if options.has_key? :threads
if options.has_key? :fail_cleanup
$FAIL_CHOICE = :cleanup
elsif options.has_key? :fail_retry
$FAIL_CHOICE = :retry
$MAX_RETRIES = options[:fail_retry].to_i
elsif options.has_key? :fail_skip
$FAIL_CHOICE = :skip
elsif options.has_key? :fail_quit
$FAIL_CHOICE = :quit
end
end
def retry_loop(text, cleanup=$CLEANUP, &block)
retries = 0
begin
block.call
rescue OneProvisionLoopException => e
STDERR.puts "ERROR: #{text}\n#{e.text}"
retries += 1
exit(-1) if retries > $MAX_RETRIES && $RUN_MODE == :batch
choice = $FAIL_CHOICE
if $RUN_MODE == :interactive
begin
$mutex.synchronize do
cli = HighLine.new($stdin, $stderr)
choice = cli.choose do |menu|
menu.prompt = "Choose failover method:"
menu.choices(:quit, :retry, :skip)
menu.choices(:cleanup) if cleanup
menu.default = choice
end
end
rescue EOFError
STDERR.puts choice
rescue Interrupt => e
exit(-1)
end
end
if choice == :retry
retry
elsif choice == :quit
exit(-1)
elsif choice == :skip
return nil
elsif choice == :cleanup
if cleanup
raise OneProvisionCleanupException
else
fail('Cleanup unsupported for this operation')
end
end
exit(-1)
end
end
def run(*cmd, &block)
$logger.debug("Command run: #{cmd.join(' ')}")
rtn = nil
begin
if Hash === cmd.last
opts = cmd.pop.dup
else
opts = {}
end
stdin_data = opts.delete(:stdin_data) || ''
binmode = opts.delete(:binmode)
Open3.popen3(*cmd, opts) {|i, o, e, t|
if binmode
i.binmode
o.binmode
e.binmode
end
out_reader = Thread.new {o.read}
err_reader = Thread.new {e.read}
begin
i.write stdin_data
rescue Errno::EPIPE
end
begin
i.close
rescue IOError => e
end
rtn = [out_reader.value, err_reader.value, t.value]
}
$mutex.synchronize do
if rtn
$logger.debug("Command STDOUT: #{rtn[0].strip}") unless rtn[0].empty?
$logger.debug("Command STDERR: #{rtn[1].strip}") unless rtn[1].empty?
if rtn[2].success?
$logger.debug("Command succeeded")
else
$logger.warn("Command FAILED (code=#{rtn[2].exitstatus}): #{cmd.join(' ')}")
end
else
$logger.error("Command failed on unknown error")
end
end
rescue Interrupt
fail('Command interrupted')
rescue Exception => e
$logger.error("Command exception: #{e.message}")
end
rtn
end
def pm_driver_action(pm_mad, action, args, host = nil)
$host_helper.check_host(pm_mad)
cmd = ["#{REMOTES_LOCATION}/pm/#{pm_mad}/#{action}"]
args.each do |arg|
cmd << arg
end
# action always gets host ID/name if host defined, same as for VMs:
# https://github.com/OpenNebula/one/blob/d95b883e38a2cee8ca9230b0dbef58ce3b8d6d6c/src/mad/ruby/OpenNebulaDriver.rb#L95
$mutex.synchronize do
unless host.nil?
cmd << host.id
cmd << host.name
end
end
unless File.executable? cmd[0]
$logger.error("Command not found or not executable #{cmd[0]}")
fail('Driver action script not executable')
end
o = nil
retry_loop "Driver action '#{cmd[0]}' failed" do
o, e, s = run(cmd.join(' '))
unless s && s.success?
err = get_error_message(e)
text = err.lines[0].strip if err
text = 'Unknown error' if text == '-'
raise OneProvisionLoopException.new(text)
end
end
o
end
#TODO: handle exceptions?
def write_file_log(name, content)
$mutex.synchronize do
$logger.debug("Creating #{name}:\n" + content)
end
f = File.new(name, "w")
f.write(content)
f.close
end
def fail(text, code=-1)
STDERR.puts "ERROR: #{text}"
exit(code)
end
def get_error_message(text)
msg = '-'
if text
tmp = text.scan(/^#{ERROR_OPEN}\n(.*?)#{ERROR_CLOSE}$/m)
msg = tmp[0].join(' ').strip if tmp[0]
end
msg
end
def template_like_str(attributes, indent=true)
if indent
ind_enter="\n"
ind_tab=' '
else
ind_enter=''
ind_tab=' '
end
str=attributes.collect do |key, value|
if value
str_line=""
if value.class==Array
value.each do |value2|
str_line << key.to_s.upcase << "=[" << ind_enter
if value2 && value2.class==Hash
str_line << value2.collect do |key3, value3|
str = ind_tab + key3.to_s.upcase + "="
str += "\"#{encrypt(key3.to_s.upcase, value3.to_s)}\"" if value3
str
end.compact.join(",\n")
end
str_line << "\n]\n"
end
elsif value.class==Hash
str_line << key.to_s.upcase << "=[" << ind_enter
str_line << value.collect do |key3, value3|
str = ind_tab + key3.to_s.upcase + "="
str += "\"#{encrypt(key3.to_s.upcase, value3.to_s)}\"" if value3
str
end.compact.join(",\n")
str_line << "\n]\n"
else
str_line << key.to_s.upcase << "=" << "\"#{encrypt(key.to_s.upcase, value.to_s)}\""
end
str_line
end
end.compact.join("\n")
str
end
def encrypt(key, value)
if ENCRYPT_VALUES.include? key
system = OpenNebula::System.new(@client)
config = system.get_configuration
token = config["ONE_KEY"]
OpenNebula.encrypt({:value => value}, token)[:value]
else
value
end
end
end

View File

@ -1,135 +0,0 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'one_helper'
class OneProvisionDatastoreHelper < OpenNebulaHelper::OneHelper
def self.rname
"DATASTORE"
end
def self.conf_file
"oneprovision_datastore.yaml"
end
def factory(id=nil)
if id
OpenNebula::Datastore.new_with_id(id, @client)
else
xml=OpenNebula::Datastore.build_xml
OpenNebula::Datastore.new(xml, @client)
end
end
def format_pool(options)
config_file = self.class.table_conf
table = CLIHelper::ShowTable.new(config_file, self) do
column :ID, "ONE identifier for the Datastore", :size=>4 do |d|
d["ID"]
end
column :USER, "Username of the Datastore owner", :left,
:size=>10 do |d|
helper.user_name(d, options)
end
column :GROUP, "Group of the Datastore", :left,
:size=>10 do |d|
helper.group_name(d, options)
end
column :NAME, "Name of the Datastore", :left, :size=>13 do |d|
d["NAME"]
end
column :SIZE, "Datastore total size", :size =>10 do |d|
shared = d['TEMPLATE']['SHARED']
if shared != nil && shared.upcase == 'NO'
"-"
else
OpenNebulaHelper.unit_to_str(d['TOTAL_MB'].to_i, {}, 'M')
end
end
column :AVAIL, "Datastore free size", :left, :size =>5 do |d|
if d['TOTAL_MB'].to_i == 0
"-"
else
"#{((d['FREE_MB'].to_f/d['TOTAL_MB'].to_f) * 100).round()}%"
end
end
column :CLUSTERS, "Cluster IDs", :left, :size=>12 do |d|
OpenNebulaHelper.clusters_str(d["CLUSTERS"])
end
column :IMAGES, "Number of Images", :size=>6 do |d|
if d["IMAGES"]["ID"].nil?
"0"
else
[d["IMAGES"]["ID"]].flatten.size
end
end
column :TYPE, "Datastore type", :left, :size=>4 do |d|
type = Datastore::DATASTORE_TYPES[d["TYPE"].to_i]
Datastore::SHORT_DATASTORE_TYPES[type]
end
column :DS, "Datastore driver", :left, :size=>7 do |d|
d["DS_MAD"]
end
column :PROVIDER, "Baremetal provider name", :left, :size=>8 do |d|
d["TEMPLATE/PM_MAD"]
end
column :TM, "Transfer driver", :left, :size=>7 do |d|
d["TM_MAD"]
end
column :STAT, "State of the Datastore", :left, :size=>3 do |d|
state = Datastore::DATASTORE_STATES[d["STATE"].to_i]
Datastore::SHORT_DATASTORE_STATES[state]
end
default :ID, :NAME, :SIZE, :AVAIL, :CLUSTERS, :IMAGES, :TYPE, :DS, :PROVIDER, :TM, :STAT
end
table
end
def create_datastore(datastore, cluster_id, provision_id, pm)
datastore['provision']['provision_id'] = provision_id
one = OpenNebula::Client.new()
d = OpenNebula::Datastore.new(OpenNebula::Datastore.build_xml, one)
template = $common_helper.template_like_str(datastore)
template += "PM_MAD=\"#{pm}\"\n"
rc = d.allocate(template, cluster_id.to_i)
if OpenNebula.is_error?(rc)
raise OneProvisionLoopException.new(rc.message)
end
d.info
d
end
end

View File

@ -1,337 +0,0 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'base64'
require 'one_helper/onehost_helper'
class OneProvisionHostHelper < OpenNebulaHelper::OneHelper
def self.rname
"HOST"
end
def self.conf_file
"oneprovision_host.yaml"
end
def self.state_to_str(id)
id = id.to_i
state_str = Host::HOST_STATES[id]
Host::SHORT_HOST_STATES[state_str]
end
def factory(id=nil)
if id
OpenNebula::Host.new_with_id(id, @client)
else
xml=OpenNebula::Host.build_xml
OpenNebula::Host.new(xml, @client)
end
end
def format_pool(options)
config_file = self.class.table_conf
table = CLIHelper::ShowTable.new(config_file, self) do
column :ID, "ONE identifier for Host", :size=>4 do |d|
d["ID"]
end
column :NAME, "Name of the Host", :left, :size=>15 do |d|
d["NAME"]
end
column :CLUSTER, "Name of the Cluster", :left, :size=>9 do |d|
OpenNebulaHelper.cluster_str(d["CLUSTER"])
end
column :RVM, "Number of Virtual Machines running", :size=>3 do |d|
d["HOST_SHARE"]["RUNNING_VMS"]
end
column :ZVM, "Number of Virtual Machine zombies", :size=>3 do |d|
d["TEMPLATE"]["TOTAL_ZOMBIES"] || 0
end
column :STAT, "Host status", :left, :size=>6 do |d|
OneHostHelper.state_to_str(d["STATE"])
end
column :PROVIDER, "Provision driver", :size=>8 do |d|
d['TEMPLATE']['PM_MAD'].nil? ? '-' : d['TEMPLATE']['PM_MAD']
end
column :VM_MAD, "Virtual Machine driver", :size=>8 do |d|
d["VM_MAD"]
end
default :ID, :NAME, :CLUSTER, :RVM, :PROVIDER, :VM_MAD, :STAT
end
table
end
def factory(id=nil)
if id
OpenNebula::Host.new_with_id(id, @client)
else
xml=OpenNebula::Host.build_xml
OpenNebula::Host.new(xml, @client)
end
end
def factory_pool(user_flag=-2)
OpenNebula::HostPool.new(@client)
end
def check_host(pm_mad)
if pm_mad.nil? || pm_mad.empty?
fail('Not a valid bare metal host')
end
end
def running_vms?(host)
host["HOST_SHARE/RUNNING_VMS"].to_i > 0
# if host["HOST_SHARE/RUNNING_VMS"].to_i > 0
# raise OneProvisionLoopException.new('There are running VMS on the host, terminate them and then delete the host.')
# end
end
def poll(host)
poll = monitoring(host)
if poll.has_key? 'GUEST_IP_ADDRESSES'
name = poll['GUEST_IP_ADDRESSES'].split(',')[0][1..-1] #TODO
elsif poll.has_key? 'AWS_PUBLIC_IP_ADDRESS'
name = poll['AWS_PUBLIC_IP_ADDRESS'][2..-3]
else
fail('Failed to get provision name')
end
name
end
def monitoring(host)
host.info
pm_mad = host['TEMPLATE/PM_MAD']
deploy_id = host['TEMPLATE/PROVISION/DEPLOY_ID']
name = host.name
id = host.id
check_host(pm_mad)
$logger.debug("Monitoring host: #{id.to_s}")
$common_helper.retry_loop 'Monitoring metrics failed to parse' do
pm_ret = $common_helper.pm_driver_action(pm_mad, 'poll', [deploy_id, name], host)
begin
poll = {}
pm_ret.split(' ').map{|x| x.split('=', 2)}.each do |key, value|
poll[ key.upcase ] = value
end
poll
rescue
raise $common_helper.OneProvisionLoopException
end
end
end
def create_host(dfile, cluster, playbook)
xhost = OpenNebula::XMLElement.new
xhost.initialize_xml(dfile, 'HOST')
$logger.debug("Creating OpenNebula host: #{xhost['NAME']}")
one = OpenNebula::Client.new()
host = OpenNebula::Host.new(OpenNebula::Host.build_xml, one)
host.allocate(xhost['NAME'], xhost['TEMPLATE/IM_MAD'], xhost['TEMPLATE/VM_MAD'], cluster)
host.update(xhost.template_str, true)
host.update("ANSIBLE_PLAYBOOK=#{playbook}", true) if !playbook.nil?
host.offline
host.info
$logger.debug("host created with ID: #{host['ID']}")
host
end
def resume_host(host)
host.info
pm_mad = host['TEMPLATE/PM_MAD']
check_host(pm_mad)
begin
# create resume deployment file
resumeFile = Tempfile.new("xmlResume")
resumeFile.close()
$common_helper.write_file_log(resumeFile.path, host.to_xml)
$logger.info("Resuming host: #{host.id.to_s}")
$common_helper.pm_driver_action(pm_mad, 'deploy', [resumeFile.path, host.name], host)
$logger.debug("Enabling OpenNebula host: #{host.id.to_s}")
name = poll(host)
host.rename(name)
host.enable
ensure
resumeFile.unlink()
end
end
def poweroff_host(host)
host.info
pm_mad = host['TEMPLATE/PM_MAD']
deploy_id = host['TEMPLATE/PROVISION/DEPLOY_ID']
name = host.name
check_host(pm_mad)
$logger.info("Powering off host: #{host.id.to_s}")
$common_helper.pm_driver_action(pm_mad, 'shutdown', [deploy_id, name, 'SHUTDOWN_POWEROFF'], host)
$logger.debug("Offlining OpenNebula host: #{host.id.to_s}")
#Fix broken pipe exception on ubuntu 14.04
host.info
host.offline
end
def reboot_host(host, options)
begin
reset_host(host, (options.has_key? :hard))
rescue => e
$common_helper.fail("#{(options.has_key? :hard) ? "Reset" : "Reboot"} failed on exception: #{e.to_s}")
end
end
def reset_host(host, hard)
if hard
reset_reboot(host, 'reset', 'Resetting')
name = poll(host)
host.rename(name)
else
reset_reboot(host, 'reboot', 'Rebooting')
end
end
def reset_reboot(host, action, message)
host.info
pm_mad = host['TEMPLATE/PM_MAD']
deploy_id = host['TEMPLATE/PROVISION/DEPLOY_ID']
name = host.name
check_host(pm_mad)
$logger.debug("Offlining OpenNebula host: #{host.id.to_s}")
host.offline
$logger.info("#{message} host: #{host.id.to_s}")
$common_helper.pm_driver_action(pm_mad, action, [deploy_id, name], host)
$logger.debug("Enabling OpenNebula host: #{host.id.to_s}")
host.info
host.enable
end
def delete_host(host)
pm_mad = host['TEMPLATE/PM_MAD']
deploy_id = host['TEMPLATE/PROVISION/DEPLOY_ID']
name = host.name
check_host(pm_mad)
# offline ONE host
if host.state != 8
$logger.debug("Offlining OpenNebula host: #{host.id.to_s}")
$mutex.synchronize do
rc = host.offline
if OpenNebula.is_error?(rc)
raise OneProvisionLoopException.new(rc.message)
end
end
end
# unprovision host
$logger.debug("Undeploying host: #{host.id}")
$common_helper.pm_driver_action(pm_mad, 'cancel', [deploy_id, name], host)
# delete ONE host
$logger.debug("Deleting OpenNebula host: #{host.id}")
$mutex.synchronize do
# Fix ubuntu 14.04 borken pipe
host.info
rc = host.delete
if OpenNebula.is_error?(rc)
raise OneProvisionLoopException.new(rc.message)
end
end
end
def configure_host(hosts, options)
configured = ""
hosts.each do |host|
host.info
pm_mad = host['TEMPLATE/PM_MAD']
status = host['TEMPLATE/PROVISION_CONFIGURATION_STATUS']
check_host(pm_mad)
if (status == 'configured' && (!options.has_key? :force))
configured = configured && true
else
configured = configured && false
end
end
if (configured && (!options.has_key? :force))
$common_helper.fail('Hosts are already configured')
end
$ansible_helper.configure(hosts)
end
def ssh_host(host, args)
host.info
ip = host["NAME"]
private_key = host["TEMPLATE/PROVISION_CONNECTION/PRIVATE_KEY"]
remote_user = host["TEMPLATE/PROVISION_CONNECTION/REMOTE_USER"]
exec("ssh -i #{private_key} #{remote_user}@#{ip} '#{args[1]}'")
end
end

View File

@ -1,660 +0,0 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'base64'
class Provision
attr_reader :id
def initialize(id, name=nil)
@id = id
@name = name
@clusters = []
@hosts = []
@datastores = []
@networks = []
end
def exists()
clusters = $provision_helper.clusters_factory_pool()
clusters.info
clusters.each do |c|
return true if c['TEMPLATE/PROVISION/PROVISION_ID'] == @id
end
false
end
def refresh()
$provision_helper.get_provision_resources(['clusters', 'datastores', 'hosts', 'networks'], self)
@hosts = @hosts.map { |h|
$host_helper.factory(h.id)
}
end
def append_cluster(cluster)
@clusters << cluster
end
def append_datastore(datastore)
@datastores << datastore
end
def append_host(host)
@hosts << host
end
def append_network(network)
@networks << network
end
def clusters()
@clusters
end
def hosts()
@hosts
end
def datastores()
@datastores
end
def networks()
@networks
end
#TODO: rename delete_all -> cleanup
def delete(delete_all=false)
$common_helper.fail('Provision not found.') unless exists
$logger.info("Deleting provision #{@id}")
# offline and (optionally) clean all hosts
$logger.debug("Offlining OpenNebula hosts")
@hosts.each do |host|
$common_helper.retry_loop 'Failed to offline host' do
rc = host.offline
if OpenNebula.is_error?(rc)
raise OneProvisionLoopException.new(rc.message)
end
rc = host.info
if OpenNebula.is_error?(rc)
raise OneProvisionLoopException.new(rc.message)
end
end
if $host_helper.running_vms?(host)
if delete_all
$provision_helper.delete_vms(host)
else
$common_helper.fail("Provision with running VMs can't be deleted")
end
end
end
# undeploy hosts
$logger.info('Undeploying hosts')
threads = []
@hosts.each do |host|
if $THREADS > 1
while Thread.list.count > $THREADS
threads.map do |thread|
thread.join(5)
end
end
threads << Thread.new do
$host_helper.delete_host(host)
end
else
$host_helper.delete_host(host)
end
end
threads.map(&:join)
# delete all other deployed objects
$logger.info('Deleting provision objects')
['datastores', 'networks', 'clusters'].each do |section|
self.send("#{section}").each do |obj|
$common_helper.retry_loop "Failed to delete #{section.chomp('s')} #{obj['ID']}" do
$logger.debug("Deleting OpenNebula #{section.chomp('s')}: #{obj['ID']}")
if delete_all
case section
when 'datastores'
$provision_helper.delete_images(obj)
end
end
# Fix ubuntu 14.04 broken pipe
obj.info
rc = obj.delete
if OpenNebula.is_error?(rc)
raise OneProvisionLoopException.new(rc.message)
end
end
end
end
end
def get_binding
binding
end
end
class OneProvisionProvisionHelper < OpenNebulaHelper::OneHelper
def self.rname
"PROVISION"
end
def self.conf_file
"oneprovision_provision.yaml"
end
def format_pool(options)
config_file = self.class.table_conf
table = CLIHelper::ShowTable.new(config_file, self) do
column :ID, "Identifier for the Provision", :size=>36 do |d|
d["ID"]
end
column :NAME, "Name of the Provision", :left, :size=>25 do |d|
d["NAME"]
end
column :CLUSTERS, "Number of Clusters", :size=>8 do |d|
d["CLUSTERS"]
end
column :HOSTS, "Number of Hosts", :size=>5 do |d|
d["HOSTS"]
end
column :VNETS, "Number of Networks", :size=>5 do |d|
d["NETWORKS"]
end
column :DATASTORES, "Number of Datastores", :size=>10 do |d|
d["DATASTORES"]
end
column :STAT, "Status of the Provision", :left, :size=>15 do |d|
d["STATUS"]
end
default :ID, :NAME, :CLUSTERS, :HOSTS, :VNETS, :DATASTORES, :STAT
end
table
end
def get_provision_resources(resources_names, provision=nil)
resources_names.each do |r|
resources = self.send("#{r}_factory_pool")
rc = resources.info
if OpenNebula.is_error?(rc)
$common_helper.fail(rc.message)
end
if !provision.nil?
provision.instance_variable_set("@#{r}", resources.select{ |x|
x['TEMPLATE/PROVISION/PROVISION_ID'] == provision.id
})
else
return resources.select{ |x|
!x['TEMPLATE/PROVISION/PROVISION_ID'].nil?
}
end
end
end
def get_ids()
clusters = clusters_factory_pool()
rc = clusters.info
if OpenNebula.is_error?(rc)
$common_helper.fail(rc.message)
end
clusters = clusters.select{ |x| !x['TEMPLATE/PROVISION/PROVISION_ID'].nil? }
clusters = clusters.uniq{ |x| x['TEMPLATE/PROVISION/PROVISION_ID'] }
ids = []
clusters.each do |c| ids << c['TEMPLATE/PROVISION/PROVISION_ID'] end
ids
end
def get_list(columns, provision_list)
ret = []
ids = get_ids()
ids.each do |i|
provision = Provision.new(i)
provision.refresh
element = {}
element['ID'] = provision_list ? i : provision.clusters[0]['ID']
element['NAME'] = provision.clusters[0]['NAME']
element['STATUS'] = get_provision_status(provision.hosts)
columns.each do |c|
if provision.instance_variable_get("@#{c}").nil?
element["#{c}".upcase] = "0"
else
element["#{c}".upcase] = provision.instance_variable_get("@#{c}").size.to_s
end
end
ret << element
end
ret
end
def get_provision_status(hosts)
hosts.each do |h|
h.info
if h['TEMPLATE/PROVISION_CONFIGURATION_STATUS'] == 'pending'
return 'pending'
elsif h['TEMPLATE/PROVISION_CONFIGURATION_STATUS'] == 'error'
return 'error'
end
end
'configured'
end
def show(provision_id, options)
provision = Provision.new(provision_id)
provision.refresh
$common_helper.fail('Provision not found.') if !provision.exists
ret = {}
ret['id'] = provision_id
ret['name'] = provision.clusters[0]['NAME']
ret['status'] = get_provision_status(provision.hosts)
['clusters', 'datastores', 'hosts', 'networks'].each do |r|
ret["@#{r}_ids"] = []
provision.instance_variable_get("@#{r}").each do |x|
ret["@#{r}_ids"] << (x['ID'])
end
end
format_resource(ret, options)
end
def create(config, options, provision_id=nil)
update = !provision_id.nil?
$common_helper.retry_loop "Failed to #{update ? "update" : "create"} provision" do
$ansible_helper.check_ansible_version
# read provision file
cfg = $common_helper.create_config($common_helper.read_config(config), update)
if cfg['version'] == 2
if !update
provision = Provision.new(SecureRandom.uuid, cfg['name'])
else
provision = Provision.new(provision_id, cfg['name'])
end
whole_provision = Provision.new(provision.id, cfg['name'])
whole_provision.refresh
cluster = nil
$logger.info('Creating provision objects')
$common_helper.retry_loop "Failed to create cluster" do
if !cfg['cluster'].nil?
$logger.debug("Creating OpenNebula cluster: #{cfg['cluster']['name']}")
# create new cluster
cluster = $cluster_helper.create_cluster(cfg['cluster'], provision.id)
provision.append_cluster(cluster)
$logger.debug("cluster created with ID: #{cluster['ID']}")
else
cluster = $cluster_helper.get_cluster(provision.id)
end
end
$CLEANUP = true
['datastores', 'networks'].each do |r|
if !cfg["#{r}"].nil?
cfg["#{r}"].each do |x|
begin
$common_helper.retry_loop "Failed to create #{r}: #{x['name']}" do
$logger.debug("Creating OpenNebula #{r}: #{x['name']}")
if r == 'datastores'
provision.append_datastore($datastore_helper.create_datastore(
$common_helper.evaluate_erb(whole_provision, x),
cluster['ID'], provision.id, cfg['defaults']['provision']['driver']))
else
provision.append_network($vnet_helper.create_vnet(
$common_helper.evaluate_erb(whole_provision, x),
cluster['ID'], provision.id, cfg['defaults']['provision']['driver']))
end
whole_provision.refresh
$logger.debug("#{r} created with ID: #{provision.instance_variable_get("@#{r}").last['ID']}")
end
rescue OneProvisionCleanupException
provision.refresh
provision.delete
exit (-1)
end
end
end
end
if cfg['hosts'].nil?
puts "ID: #{provision.id}"
exit (0)
end
end
begin
host = nil
cfg['hosts'].each do |h|
if cfg['version'] == 2
dfile = $common_helper.create_deployment_file($common_helper.evaluate_erb(whole_provision, h), provision.id)
host = $host_helper.create_host(dfile.to_xml, cluster['ID'].to_i, cfg['playbook'])
provision.append_host(host)
else
dfile = $common_helper.create_deployment_file($common_helper.evaluate_erb(whole_provision, h), nil)
host = $host_helper.create_host(dfile.to_xml, 0, cfg['playbook'])
end
host.offline
end
hosts = []
if provision
total_hosts = provision.hosts.size
hosts = provision.hosts
else
total_hosts = 1
hosts << host
end
# ask user to be patient, mandatory for now
STDERR.puts 'WARNING: This operation can take tens of minutes. Please be patient.'
$logger.info('Deploying')
deploy_ids = []
threads = []
processed_hosts = 0
hosts.each do |h|
h.info
# deploy host
pm_mad = h['TEMPLATE/PM_MAD']
$logger.debug("Deploying host: #{h['ID']}")
processed_hosts += 1
deployFile = Tempfile.new("xmlDeploy#{h['ID']}")
deployFile.close()
$common_helper.write_file_log(deployFile.path, h.to_xml)
if $THREADS > 1
threads << Thread.new do
$common_helper.pm_driver_action(pm_mad, 'deploy', [deployFile.path, 'TODO'])
end
if (processed_hosts % $THREADS == 0) || (total_hosts == processed_hosts)
ids = threads.map(&:join).map(&:value)
ids.each do |i|
deploy_ids << i
end
deployFile.unlink
threads.clear
end
else
deploy_ids << $common_helper.pm_driver_action(pm_mad, 'deploy', [deployFile.path, 'TODO'])
end
end
if deploy_ids.nil? || deploy_ids.empty?
$common_helper.fail('Deployment failed, no ID got from driver')
end
$logger.info("Monitoring hosts")
hosts.each do |h|
h.add_element('//TEMPLATE/PROVISION', {'DEPLOY_ID' => deploy_ids.shift.strip})
h.update(h.template_str)
name = $host_helper.poll(h)
h.rename(name)
h.info
h
end
whole_provision.refresh if cfg['version'] == 2
if options.has_key? :incremental
$host_helper.configure_host(provision.hosts, options)
else
if whole_provision
$host_helper.configure_host(whole_provision.hosts, options)
else
$host_helper.configure_host(hosts, options)
end
end
puts "ID: #{provision.id}" if !update && cfg['version'] == 2
puts "ID: #{ host['ID']}" if cfg['version'] == 1
rescue OneProvisionCleanupException
if cfg['version'] == 2
provision.delete
else
$host_helper.delete_host(host)
end
exit(-1)
end
end
end
def configure(provision_id, options)
provision = Provision.new(provision_id)
provision.refresh
$common_helper.fail('Provision not found.') if !provision.exists
$host_helper.configure_host(provision.hosts, options)
end
def update(provision_id, config, options)
$logger.info("Updating provision #{provision_id}")
provision = Provision.new(provision_id)
$common_helper.fail('Provision not found.') if !provision.exists
create(config, options, provision_id)
end
#TODO: doesn't drop PENDING VMs, blocks delete of images
def delete_vms(host, wait=true)
vm_ids = host.retrieve_elements('VMS/ID')
if vm_ids
vm_ids.each do |vm_id|
$common_helper.retry_loop "Failed to delete OpenNebula VM #{vm_id}" do
$logger.debug("Deleting OpenNebula VM #{vm_id}")
vm = OpenNebula::VirtualMachine.new_with_id(vm_id, OpenNebula::Client.new())
rc = vm.info
if OpenNebula.is_error?(rc)
raise OneProvisionLoopException.new(rc.message)
end
rc = vm.delete
if OpenNebula.is_error?(rc)
raise OneProvisionLoopException.new(rc.message)
end
end
end
if wait
sleep 5
# refresh host information
rc = host.info
if OpenNebula.is_error?(rc)
raise OneProvisionLoopException.new(rc.message)
end
#TODO: causes infinite loop
if $host_helper.running_vms?(host)
$logger.debug("Still found running VMs on host #{host['ID']}, retrying...")
delete_vms(host, wait)
end
end
end
end
def delete_images(datastore, wait=true)
image_ids = datastore.retrieve_elements('IMAGES/ID')
if image_ids
image_ids.each do |image_id|
$common_helper.retry_loop "Failed to delete image #{image_id}" do
$logger.debug("Deleting OpenNebula image #{image_id}")
image = OpenNebula::Image.new_with_id(image_id, OpenNebula::Client.new())
rc = image.info
if OpenNebula.is_error?(rc)
raise OneProvisionLoopException.new(rc.message)
end
rc = image.delete
if OpenNebula.is_error?(rc)
raise OneProvisionLoopException.new(rc.message)
end
end
end
if wait
sleep 5
# refresh datastore information
rc = datastore.info
if OpenNebula.is_error?(rc)
raise OneProvisionLoopException.new(rc.message)
end
#TODO: causes infinite loop
if datastore.retrieve_elements('IMAGES/ID')
$logger.debug("Still found images in datastore #{datastore['ID']}, retrying...")
delete_images(datastore, wait)
end
end
end
end
def clusters_factory_pool(user_flag=-2)
OpenNebula::ClusterPool.new(@client)
end
def hosts_factory_pool(user_flag=-2)
OpenNebula::HostPool.new(@client)
end
def networks_factory_pool(user_flag=-2)
OpenNebula::VirtualNetworkPool.new(@client)
end
def datastores_factory_pool(user_flag=-2)
OpenNebula::DatastorePool.new(@client)
end
def format_resource(provision, options = {})
str="%-18s: %-20s"
str_h1="%-80s"
CLIHelper.print_header(str_h1 % "PROVISION #{provision['ID']} INFORMATION")
puts str % ["ID", provision['id'].to_s]
puts str % ["NAME", provision['name']]
puts str % ["STATUS", CLIHelper.color_state(provision['status'])]
puts
CLIHelper.print_header("%-15s" % ["CLUSTERS"])
provision['@clusters_ids'].each do |id|
puts "%-15s" % [id]
end
puts
CLIHelper.print_header("%-15s" % ["HOSTS"])
provision['@hosts_ids'].each do |id|
puts "%-15s" % [id]
end
puts
CLIHelper.print_header("%-15s" % ["VNETS"])
provision['@networks_ids'].each do |id|
puts "%-15s" % [id]
end
puts
CLIHelper.print_header("%-15s" % ["DATASTORES"])
provision['@datastores_ids'].each do |id|
puts "%-15s" % [id]
end
end
end

View File

@ -1,103 +0,0 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'one_helper'
class OneProvisionVnetHelper < OpenNebulaHelper::OneHelper
def self.rname
"VNET"
end
def self.conf_file
"oneprovision_vnet.yaml"
end
def factory(id=nil)
if id
OpenNebula::VirtualNetwork.new_with_id(id, @client)
else
xml=OpenNebula::VirtualNetwok.build_xml
OpenNebula::VirtualNetwork.new(xml, @client)
end
end
def format_pool(options)
config_file = self.class.table_conf
table = CLIHelper::ShowTable.new(config_file, self) do
column :ID, "ONE identifier for Virtual Network", :size=>4 do |d|
d["ID"]
end
column :USER, "Username of the Virtual Network owner", :left,
:size=>15 do |d|
helper.user_name(d, options)
end
column :GROUP, "Group of the Virtual Network", :left,
:size=>12 do |d|
helper.group_name(d, options)
end
column :NAME, "Name of the Virtual Network", :left,
:size=>19 do |d|
d["NAME"]
end
column :CLUSTERS, "Cluster IDs", :left, :size=>10 do |d|
OpenNebulaHelper.clusters_str(d["CLUSTERS"]) rescue "-"
end
column :BRIDGE, "Bridge associated to the Virtual Network", :left,
:size=>8 do |d|
d["BRIDGE"]
end
column :PROVIDER, "Baremetal provider name", :left, :size=>8 do |d|
d["TEMPLATE/PM_MAD"]
end
column :LEASES, "Number of this Virtual Network's given leases",
:size=>6 do |d|
d["USED_LEASES"]
end
default :ID, :USER, :GROUP, :NAME, :CLUSTERS, :BRIDGE, :PROVIDER, :LEASES
end
table
end
def create_vnet(vnet, cluster_id, provision_id, pm)
vnet['provision']['provision_id'] = provision_id
one = OpenNebula::Client.new()
v = OpenNebula::VirtualNetwork.new(OpenNebula::VirtualNetwork.build_xml, one)
template = $common_helper.template_like_str(vnet)
template += "PM_MAD=\"#{pm}\"\n"
rc = v.allocate(template, cluster_id.to_i)
if OpenNebula.is_error?(rc)
raise OneProvisionLoopException.new(rc.message)
end
v.info
v
end
end

View File

@ -16,63 +16,51 @@
# limitations under the License. # # limitations under the License. #
#--------------------------------------------------------------------------- # #--------------------------------------------------------------------------- #
ONE_LOCATION=ENV["ONE_LOCATION"] ONE_LOCATION = ENV['ONE_LOCATION']
if !ONE_LOCATION if !ONE_LOCATION
RUBY_LIB_LOCATION="/usr/lib/one/ruby" LIB_LOCATION = '/usr/lib/one'
REMOTES_LOCATION="/var/lib/one/remotes" RUBY_LIB_LOCATION = '/usr/lib/one/ruby'
ANSIBLE_LOCATION="/usr/share/one/oneprovision/ansible" REMOTES_LOCATION = '/var/lib/one/remotes'
ANSIBLE_LOCATION = '/usr/share/one/oneprovision/ansible'
else else
RUBY_LIB_LOCATION=ONE_LOCATION+"/lib/ruby" LIB_LOCATION = ONE_LOCATION + '/usr/lib/one'
REMOTES_LOCATION=ONE_LOCATION+"/var/remotes" RUBY_LIB_LOCATION = ONE_LOCATION + '/lib/ruby'
ANSIBLE_LOCATION=ONE_LOCATION+"/usr/share/oneprovision/ansible" REMOTES_LOCATION = ONE_LOCATION + '/var/remotes'
ANSIBLE_LOCATION = ONE_LOCATION + '/usr/share/oneprovision/ansible'
end end
$: << RUBY_LIB_LOCATION $LOAD_PATH << RUBY_LIB_LOCATION
$: << RUBY_LIB_LOCATION+"/cli" $LOAD_PATH << RUBY_LIB_LOCATION + '/cli'
$LOAD_PATH << LIB_LOCATION + '/oneprovision/lib'
$PING_TIMEOUT = 20 PING_TIMEOUT_DEFAULT = 20
$PING_RETRIES = 10 PING_RETRIES_DEFAULT = 10
$MAX_RETRIES = 3 MAX_RETRIES_DEFAULT = 3
$RUN_MODE = :interactive RUN_MODE_DEFAULT = :interactive
$FAIL_CHOICE = :quit FAIL_CHOICE_DEFAULT = :quit
$CLEANUP = false CLEANUP_DEFAULT = false
$THREADS = 3 THREADS_DEFAULT = 3
require 'command_parser' require 'command_parser'
require 'one_helper' require 'one_helper'
require 'one_helper/oneprovision_helpers/common_helper' require 'one_helper/oneprovision_helper'
require 'one_helper/oneprovision_helpers/host_helper' require 'one_helper/onecluster_helper'
require 'one_helper/oneprovision_helpers/ansible_helper' require 'one_helper/onehost_helper'
require 'one_helper/oneprovision_helpers/datastore_helper' require 'one_helper/onedatastore_helper'
require 'one_helper/oneprovision_helpers/provision_helper' require 'one_helper/onevnet_helper'
require 'one_helper/oneprovision_helpers/cluster_helper'
require 'one_helper/oneprovision_helpers/vnet_helper' require 'oneprovision'
require 'tempfile'
require 'base64'
CommandParser::CmdParser.new(ARGV) do CommandParser::CmdParser.new(ARGV) do
usage "`oneprovision` <command> [<file>] [<hostid>] [<args>] [<options>]" usage '`oneprovision` <command> [<file>] [<hostid>] [<args>] [<options>]'
version OpenNebulaHelper::ONE_VERSION version OpenNebulaHelper::ONE_VERSION
$common_helper = CommonHelper.new helper = OneProvisionHelper.new
$host_helper = OneProvisionHostHelper.new
$ansible_helper = OneProvisionAnsibleHelper.new
$provision_helper = OneProvisionProvisionHelper.new
$cluster_helper = OneProvisionClusterHelper.new
$datastore_helper = OneProvisionDatastoreHelper.new
$vnet_helper = OneProvisionVnetHelper.new
$mutex = Mutex.new
before_proc do before_proc do
$common_helper.set_client(options) helper.set_client(options)
$host_helper.set_client(options)
$ansible_helper.set_client(options)
$provision_helper.set_client(options)
$cluster_helper.set_client(options)
$datastore_helper.set_client(options)
$vnet_helper.set_client(options)
end end
######################################################################## ########################################################################
@ -80,125 +68,138 @@ CommandParser::CmdParser.new(ARGV) do
######################################################################## ########################################################################
VERBOSE = { VERBOSE = {
:name => "verbose", :name => 'verbose',
:short => "-d", :short => '-d',
:large => "--verbose", :large => '--verbose',
:description => "Set verbose logging mode", :description => 'Set verbose logging mode'
} }
DEBUG = { DEBUG = {
:name => "debug", :name => 'debug',
:short => "-D", :short => '-D',
:large => "--debug" , :large => '--debug',
:description => "Set debug logging mode", :description => 'Set debug logging mode',
:format => String :format => String
} }
BATCH = { BATCH = {
:name => "batch", :name => 'batch',
:short => "-b", :short => '-b',
:large => "--batch", :large => '--batch',
:description => "Run in non-interactive mode (no questions)", :description => 'Run in non-interactive mode (no questions)',
:format => String, :format => String
} }
FAIL_RETRY = { FAIL_RETRY = {
:name => "fail_retry", :name => 'fail_retry',
:large => "--fail-retry number", :large => '--fail-retry number',
:description => "Set batch failover mode to number of retries", :description => 'Set batch failover mode to number of retries',
:format => Integer :format => Integer
} }
FAIL_CLEANUP = { FAIL_CLEANUP = {
:name => "fail_cleanup", :name => 'fail_cleanup',
:large => "--fail-cleanup", :large => '--fail-cleanup',
:description => "Set batch failover mode to clean up and quit", :description => 'Set batch failover mode to clean up and quit'
} }
FAIL_SKIP = { FAIL_SKIP = {
:name => "fail_skip", :name => 'fail_skip',
:large => "--fail-skip", :large => '--fail-skip',
:description => "Set batch failover mode to skip failing part", :description => 'Set batch failover mode to skip failing part'
} }
FAIL_QUIT = { FAIL_QUIT = {
:name => "fail_quit", :name => 'fail_quit',
:large => "--fail-quit", :large => '--fail-quit',
:description => "Set batch failover mode to quit (default)", :description => 'Set batch failover mode to quit (default)'
} }
FORCE = { FORCE = {
:name => "force", :name => 'force',
:short => "-F", :short => '-F',
:large => "--force", :large => '--force',
:description => "Force configure to execute", :description => 'Force configure to execute',
:format => String :format => String
} }
HARD = { HARD = {
:name => "hard", :name => 'hard',
:short => "-H", :short => '-H',
:large => "--hard", :large => '--hard',
:description => "Reset the host", :description => 'Reset the host',
:format => String :format => String
} }
PING_TIMEOUT = { PING_TIMEOUT = {
:name => "ping_timeout", :name => 'ping_timeout',
:large => "--ping-timeout seconds", :large => '--ping-timeout seconds',
:description => "Set timeout for ping (default: #{$PING_TIMEOUT} secs)", :description => 'Set timeout for ping ' \
"(default: #{PING_TIMEOUT_DEFAULT} secs)",
:format => Integer :format => Integer
} }
PING_RETRIES = { PING_RETRIES = {
:name => "ping_retries", :name => 'ping_retries',
:large => "--ping-retries number", :large => '--ping-retries number',
:description => "Set retries for ping (default: #{$PING_RETRIES})", :description => 'Set retries for ping ' \
"(default: #{PING_RETRIES_DEFAULT})",
:format => Integer :format => Integer
} }
THREADS = { THREADS = {
:name => "threads", :name => 'threads',
:short => "-t threads", :short => '-t threads',
:large => "--threads threads", :large => '--threads threads',
:description => "Set threads for create (default: #{$THREADS})", :description => "Set threads for create (default: #{THREADS_DEFAULT})",
:format => Integer :format => Integer
} }
DELETE_ALL = { DELETE_ALL = {
:name => "delete_all", :name => 'delete_all',
:large => "--delete-all", :large => '--delete-all',
:description => "Delete all vms and images first, then delete the resources." :description => 'Delete all vms and images first, ' \
'then delete the resources.'
} }
INCREMENTAL = { INCREMENTAL = {
:name => "incremental", :name => 'incremental',
:large => "--incremental", :large => '--incremental',
:description => "Configure just new hosts, default is configure the whole provision." :description => 'Configure just new hosts, default ' \
'is configure the whole provision.'
} }
DUMP = { DUMP = {
:name => "dump", :name => 'dump',
:large => "--dump", :large => '--dump',
:description => "Dump the configuration file result." :description => 'Dump the configuration file result.'
} }
MODES = CommandParser::OPTIONS - [ CommandParser::VERBOSE ] + [ MODES = CommandParser::OPTIONS - [CommandParser::VERBOSE] +
VERBOSE, DEBUG, BATCH, [VERBOSE,
FAIL_RETRY, FAIL_CLEANUP, FAIL_SKIP, FAIL_QUIT ] DEBUG,
BATCH,
FAIL_RETRY,
FAIL_CLEANUP,
FAIL_SKIP,
FAIL_QUIT]
CREATE_OPTIONS = [ THREADS, MODES, PING_TIMEOUT, PING_RETRIES] CREATE_OPTIONS = [THREADS, MODES, PING_TIMEOUT, PING_RETRIES]
ONE_OPTIONS = CommandParser::OPTIONS +
CLIHelper::OPTIONS +
OpenNebulaHelper::OPTIONS
######################################################################## ########################################################################
# Formatters for arguments # Formatters for arguments
######################################################################## ########################################################################
set :format, :hostid, CommonHelper.to_id_desc do |arg| set :format, :hostid, OneProvisionHelper.to_id_desc do |arg|
$host_helper.to_id(arg) helper.to_id(arg)
end end
set :format, :hostid_list, CommonHelper.list_to_id_desc do |arg| set :format, :hostid_list, OneProvisionHelper.list_to_id_desc do |arg|
$host_helper.list_to_id(arg) helper.list_to_id(arg)
end end
######################################################################## ########################################################################
@ -209,10 +210,10 @@ CommandParser::CmdParser.new(ARGV) do
Provision a new cluster via bare metal provider Provision a new cluster via bare metal provider
EOT EOT
command :create, create_desc, :config, :options=>CREATE_OPTIONS do command :create, create_desc, :config, :options => CREATE_OPTIONS do
$common_helper.get_mode(options) helper.parse_options(options)
$provision_helper.create(args[0], options) helper.create(args[0])
0 0
end end
@ -223,8 +224,8 @@ CommandParser::CmdParser.new(ARGV) do
Validate configuration file Validate configuration file
EOT EOT
command :validate, validate_desc, [:config_file], :options=>DUMP do command :validate, validate_desc, [:config_file], :options => DUMP do
$common_helper.validate_configuration(args[0], options) OneProvision::Utils.validate_configuration(args[0], (options.key? :dump))
0 0
end end
@ -235,11 +236,11 @@ CommandParser::CmdParser.new(ARGV) do
List all avaliable provisions List all avaliable provisions
EOT EOT
#TODO add xml option # TODO: add xml option
command :list, provision_list_desc, :options=>CommandParser::OPTIONS+CLIHelper::OPTIONS do command :list,
columns = ['clusters', 'hosts', 'networks', 'datastores'] provision_list_desc,
:options => CommandParser::OPTIONS + CLIHelper::OPTIONS do
$provision_helper.format_pool(options).show($provision_helper.get_list(columns, true), options) helper.list(options)
0 0
end end
@ -250,8 +251,11 @@ CommandParser::CmdParser.new(ARGV) do
Show provision details Show provision details
EOT EOT
command :show, provision_show_desc, [:provisionid], :options=>CommandParser::OPTIONS do command :show,
$provision_helper.show(args[0], options) provision_show_desc,
[:provisionid],
:options => CommandParser::OPTIONS do
helper.show(args[0])
0 0
end end
@ -262,43 +266,30 @@ CommandParser::CmdParser.new(ARGV) do
Run configuration in all provision hosts Run configuration in all provision hosts
EOT EOT
command :configure, provision_configure_desc, :provisionid, :options=>[MODES,FORCE] do command :configure,
$common_helper.get_mode(options) provision_configure_desc,
:provisionid,
:options => [MODES, FORCE] do
helper.parse_options(options)
$provision_helper.configure(args[0], options) helper.configure(args[0], options)
0 0
end end
### ###
=begin
provision_update_desc = <<-EOT
Update the provision, adding more resources
EOT
command :update, provision_update_desc, :provisionid, :config, :options=>[CREATE_OPTIONS, INCREMENTAL] do
$common_helper.get_mode(options)
$provision_helper.update(args[0], args[1], options)
0
end
=end
###
provision_delete_desc = <<-EOT provision_delete_desc = <<-EOT
Deletes and unprovisions all the resources Deletes and unprovisions all the resources
EOT EOT
command :delete, provision_delete_desc, :provisionid, :options=>[MODES,THREADS,DELETE_ALL] do command :delete,
$common_helper.get_mode(options) provision_delete_desc,
:provisionid,
:options => [MODES, THREADS, DELETE_ALL] do
helper.parse_options(options)
provision = Provision.new(args[0]) helper.delete(args[0])
provision.refresh
provision.delete((options.has_key? :delete_all))
0 0
end end
@ -311,10 +302,14 @@ CommandParser::CmdParser.new(ARGV) do
List all availables clusters List all availables clusters
EOT EOT
command [:cluster, :list], cluster_list_desc, :options=>CommandParser::OPTIONS+CLIHelper::OPTIONS+OpenNebulaHelper::OPTIONS + [OpenNebulaHelper::DESCRIBE] do command [:cluster, :list],
columns = ['hosts', 'networks', 'datastores'] cluster_list_desc,
:options => ONE_OPTIONS + [OpenNebulaHelper::DESCRIBE] do
columns = %w[hosts vnets datastores]
$cluster_helper.format_pool(options).show($provision_helper.get_list(columns, false), options) cluster_helper = OneClusterHelper.new
cluster_helper.format_pool(options)
.show(helper.get_list(columns, false), options)
0 0
end end
@ -325,14 +320,22 @@ CommandParser::CmdParser.new(ARGV) do
Deletes and unprovisions the given cluster Deletes and unprovisions the given cluster
EOT EOT
command [:cluster, :delete], cluster_delete_desc, [:range,:clusterid_list], :options=>[MODES,FORCE] do command [:cluster, :delete],
$common_helper.get_mode(options) cluster_delete_desc,
[:range, :clusterid_list],
:options => [MODES, FORCE] do
cluster_helper = OneClusterHelper.new
cluster_helper.set_client(options)
cluster_helper.perform_actions(args[0], options, 'deleted') do |cluster|
msg = "Deleting cluster #{cluster['ID']}"
Oneprovision::OneProvisionLogger.info(msg)
$cluster_helper.perform_actions(args[0],options,"deleted") do |cluster|
rc = cluster.delete rc = cluster.delete
if OpenNebula.is_error?(rc) if OpenNebula.is_error?(rc)
$common_helper.fail(rc.message) OneProvision::Utils.fail(rc.message)
end end
end end
@ -343,32 +346,18 @@ CommandParser::CmdParser.new(ARGV) do
# Host Commands # Host Commands
######################################################################## ########################################################################
=begin
host_create_desc = <<-EOT.unindent
Create a single host
EOT
command [:host, :create], create_desc, :config, :options=>CREATE_OPTIONS do
$common_helper.get_mode(options)
$provision_helper.create(args[0], options)
0
end
=end
###
host_resume_desc = <<-EOT.unindent host_resume_desc = <<-EOT.unindent
Resume the host Resume the host
EOT EOT
command [:host, :resume], host_resume_desc, [:range,:hostid_list], :options=>MODES do command %i[host resume],
$common_helper.get_mode(options) host_resume_desc,
%i[range hostid_list],
$host_helper.perform_actions(args[0],options,"enabled") do |host| :options => [MODES] do
$host_helper.resume_host(host) helper.hosts_operation(args[0],
end { :operation => 'resume',
:message => 'enabled' },
options)
end end
### ###
@ -377,12 +366,14 @@ CommandParser::CmdParser.new(ARGV) do
Power off the host Power off the host
EOT EOT
command [:host, :poweroff], host_poweroff_desc, [:range,:hostid_list], :options=>MODES do command %i[host poweroff],
$common_helper.get_mode(options) host_poweroff_desc,
%i[range hostid_list],
$host_helper.perform_actions(args[0],options,"disabled") do |host| :options => [MODES] do
$host_helper.poweroff_host(host) helper.hosts_operation(args[0],
end { :operation => 'poweroff',
:message => 'disabled' },
options)
end end
### ###
@ -391,12 +382,14 @@ CommandParser::CmdParser.new(ARGV) do
Reboot the host Reboot the host
EOT EOT
command [:host, :reboot], host_reboot_desc, [:range,:hostid_list], :options=>[MODES, HARD] do command %i[host reboot],
$common_helper.get_mode(options) host_reboot_desc,
%i[range hostid_list],
$host_helper.perform_actions(args[0],options,"enabled") do |host| :options => [MODES, HARD] do
$host_helper.reboot_host(host, options) helper.hosts_operation(args[0],
end { :operation => 'reboot',
:message => 'enabled' },
options)
end end
### ###
@ -405,18 +398,14 @@ CommandParser::CmdParser.new(ARGV) do
Unprovisions and deletes the given Host Unprovisions and deletes the given Host
EOT EOT
command [:host, :delete], host_delete_desc, [:range,:hostid_list], :options=>MODES do command %i[host delete],
$common_helper.get_mode(options) host_delete_desc,
%i[range hostid_list],
$host_helper.perform_actions(args[0],options,"deleted") do |host| :options => [MODES] do
host.info helper.hosts_operation(args[0],
{ :operation => 'delete',
begin :message => 'deleted' },
$host_helper.delete_host(host) options)
rescue => e
$common_helper.fail("Delete failed on exception: #{e.to_s}")
end
end
end end
### ###
@ -425,12 +414,14 @@ CommandParser::CmdParser.new(ARGV) do
Run configuration on the host Run configuration on the host
EOT EOT
command [:host, :configure], host_configure_desc, [:range,:hostid_list], :options=>[MODES,FORCE] do command %i[host configure],
$common_helper.get_mode(options) host_configure_desc,
%i[range hostid_list],
$host_helper.perform_actions(args[0],options,"configured") do |host| :options => [MODES, FORCE] do
$host_helper.configure_host([host], options) helper.hosts_operation(args[0],
end { :operation => 'configure',
:message => 'enabled' },
options)
end end
### ###
@ -439,9 +430,15 @@ CommandParser::CmdParser.new(ARGV) do
Establish SSH conection to the host Establish SSH conection to the host
EOT EOT
command [:host, :ssh], host_ssh_desc, :hostid, [:command, nil], :options=>CommandParser::OPTIONS do command %i[host ssh],
$host_helper.perform_action(args[0],options,"") do |host| host_ssh_desc,
$host_helper.ssh_host(host, args) :hostid,
[:command, nil],
:options => CommandParser::OPTIONS do
host_helper = OneHostHelper.new
host_helper.perform_action(args[0], options, '') do |host|
host = OneProvision::Host.new(host['ID'])
host.ssh(args[1])
end end
end end
@ -451,12 +448,21 @@ CommandParser::CmdParser.new(ARGV) do
Lists bare metal Hosts in the pool Lists bare metal Hosts in the pool
EOT EOT
command [:host, :list], host_list_desc, :options=>CommandParser::OPTIONS+CLIHelper::OPTIONS+OpenNebulaHelper::OPTIONS + [OpenNebulaHelper::DESCRIBE] do command [:host, :list],
if !options.has_key? :filter host_list_desc,
options.merge!(filter: ["PROVIDER!=-"]) :options => ONE_OPTIONS + [OpenNebulaHelper::DESCRIBE] do
if !options.key? :filter
options[:filter] = ['PROVIDER!=-']
end end
$host_helper.list_pool(options) if !options.key? :list
options[:list] = %w[ID NAME CLUSTER ALLOCATED_CPU
ALLOCATED_MEM PROVIDER STAT]
end
host_helper = OneHostHelper.new
host_helper.set_client(options)
host_helper.list_pool(options)
end end
### ###
@ -465,12 +471,21 @@ CommandParser::CmdParser.new(ARGV) do
Lists bare metal Hosts continuously Lists bare metal Hosts continuously
EOT EOT
command [:host, :top], host_top_desc, :options=>CommandParser::OPTIONS+CLIHelper::OPTIONS+OpenNebulaHelper::OPTIONS do command [:host, :top],
if !options.has_key? :filter host_top_desc,
options.merge!(filter: ["PROVIDER!=-"]) :options => ONE_OPTIONS do
if !options.key? :filter
options[:filter] = ['PROVIDER!=-']
end end
$host_helper.list_pool(options, true) if !options.key? :list
options[:list] = %w[ID NAME CLUSTER ALLOCATED_CPU
ALLOCATED_MEM PROVIDER STAT]
end
host_helper = OneHostHelper.new
host_helper.set_client(options)
host_helper.list_pool(options, true)
end end
######################################################################## ########################################################################
@ -481,8 +496,12 @@ CommandParser::CmdParser.new(ARGV) do
List all availables datastores List all availables datastores
EOT EOT
command [:datastore, :list], datastore_list_desc, :options=>CommandParser::OPTIONS+CLIHelper::OPTIONS+OpenNebulaHelper::OPTIONS + [OpenNebulaHelper::DESCRIBE] do command [:datastore, :list], datastore_list_desc,
$datastore_helper.format_pool(options).show($provision_helper.get_provision_resources(['datastores']), options) :options => ONE_OPTIONS + [OpenNebulaHelper::DESCRIBE] do
datastore_helper = OneDatastoreHelper.new
datastore = OneProvision::Datastore.new
datastore_helper.format_pool(options).show(datastore.get)
0 0
end end
@ -493,11 +512,21 @@ CommandParser::CmdParser.new(ARGV) do
Deletes and unprovisions the given datastore Deletes and unprovisions the given datastore
EOT EOT
command [:datastore, :delete], datastore_delete_desc, [:range,:datastoreid_list], :options=>[MODES,FORCE] do command [:datastore, :delete],
$common_helper.get_mode(options) datastore_delete_desc,
[:range, :datastoreid_list],
:options => [MODES, FORCE] do
helper.parse_options(options)
$datastore_helper.perform_actions(args[0],options,"deleted") do |datastore| datastore_helper = OneDatastoreHelper.new
$logger.info("Deleting datastore #{datastore['ID']}") datastore_helper.set_client(options)
datastore_helper.perform_actions(args[0],
options,
'deleted') do |datastore|
msg = "Deleting datastore #{datastore['ID']}"
OneProvision::OneProvisionLogger.info(msg)
datastore.delete datastore.delete
end end
@ -513,8 +542,13 @@ CommandParser::CmdParser.new(ARGV) do
List all availables virtual networks List all availables virtual networks
EOT EOT
command [:vnet, :list], vnet_list_desc, :options=>CommandParser::OPTIONS+CLIHelper::OPTIONS+OpenNebulaHelper::OPTIONS + [OpenNebulaHelper::DESCRIBE] do command [:vnet, :list],
$vnet_helper.format_pool(options).show($provision_helper.get_provision_resources(['networks']), options) vnet_list_desc,
:options => ONE_OPTIONS + [OpenNebulaHelper::DESCRIBE] do
vnet_helper = OneVNetHelper.new
vnet = OneProvision::Vnet.new
vnet_helper.format_pool(options).show(vnet.get, options)
0 0
end end
@ -525,11 +559,17 @@ CommandParser::CmdParser.new(ARGV) do
Deletes and unprovisions the given virtual network Deletes and unprovisions the given virtual network
EOT EOT
command [:vnet, :delete], vnet_delete_desc, [:range,:vnetid_list], :options=>[MODES,FORCE] do command [:vnet, :delete],
$common_helper.get_mode(options) vnet_delete_desc,
[:range, :vnetid_list],
:options => [MODES, FORCE] do
helper.parse_options(options)
$vnet_helper.perform_actions(args[0],options,"deleted") do |vnet| vnet_helper = OneVNetHelper.new
$logger.info("Deleting vnet #{vnet['ID']}") vnet_helper.set_client(options)
vnet_helper.perform_actions(args[0], options, 'deleted') do |vnet|
OneProvision::OneProvisionLogger.info("Deleting vnet #{vnet['ID']}")
vnet.delete vnet.delete
end end

View File

@ -0,0 +1,351 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'yaml'
require 'nokogiri'
require 'tempfile'
require 'tmpdir'
require 'json'
require 'base64'
require 'erb'
require 'ostruct'
# Default provision parameters
CONFIG_DEFAULTS = {
'connection' => {
'remote_user' => 'root',
'remote_port' => 22,
'public_key' => '/var/lib/one/.ssh/ddc/id_rsa.pub',
'private_key' => '/var/lib/one/.ssh/ddc/id_rsa'
}
}
# Ansible params
ANSIBLE_VERSION = [Gem::Version.new('2.5'), Gem::Version.new('2.7')]
ANSIBLE_ARGS = "--ssh-common-args='-o UserKnownHostsFile=/dev/null'"
ANSIBLE_INVENTORY_DEFAULT = 'default'
module OneProvision
# Ansible
module Ansible
# ERB
class ERBVal < OpenStruct
def self.render_from_hash(template, hash)
ERBVal.new(hash).render(template)
end
def render(template)
ERB.new(template).result(binding)
end
end
class << self
# Checks ansible installed version
def check_ansible_version
version = Gem::Version.new(`ansible --version`.split[1])
if (version < ANSIBLE_VERSION[0]) ||
(version >= ANSIBLE_VERSION[1])
Utils.fail("Unsupported Ansible ver. #{version}, " \
"must be >= #{ANSIBLE_VERSION[0]} " \
"and < #{ANSIBLE_VERSION[1]}")
end
end
# Checks if hosts are configured or not
#
# @param hosts [Array of OpenNebula::Host] Hosts to configure
# @param force [Boolean] True to force configuration
# in configured hosts
def configure(hosts, force = nil)
configured = ''
hosts.each do |host|
host.info
status = host['TEMPLATE/PROVISION_CONFIGURATION_STATUS']
host = Host.new(host['ID'])
host.check
if status == 'configured' && !force
configured &&= true
else
configured &&= false
end
end
if configured && !force
Utils.fail('Hosts are already configured')
end
configure_all(hosts)
end
# TODO: expect multiple hosts
# Configures the hosts
#
# @param hosts [Array of OpenNebula::Host] Hosts to configure
# @param ping [Boolean] True to check ping to hosts
def configure_all(hosts, ping = true)
check_ansible_version
ansible_dir = generate_ansible_configs(hosts)
try_ssh(ansible_dir) if ping
# offline ONE host
OneProvisionLogger.info('Configuring hosts')
# build Ansible command
cmd = "ANSIBLE_CONFIG=#{ansible_dir}/ansible.cfg "
cmd += "ansible-playbook #{ANSIBLE_ARGS}"
cmd << " -i #{ansible_dir}/inventory"
cmd << " -i #{ANSIBLE_LOCATION}/inventories/#{@inventory}/"
cmd << " #{ANSIBLE_LOCATION}/#{@inventory}.yml"
o, _e, s = Driver.run(cmd)
if s && s.success?
# enable configured ONE host back
OneProvisionLogger.debug('Enabling OpenNebula hosts')
configured = 'PROVISION_CONFIGURATION_STATUS=configured'
hosts.each do |host|
host.update(configured, true)
host.enable
end
else
error = 'PROVISION_CONFIGURATION_STATUS=error'
hosts.each do |host|
host.update(error, true)
end
errors = parse_ansible(o) if o
raise OneProvisionLoopException, errors
end
end
# Retries ssh connection
#
# @param ansible_dir [Dir] Directory with all
# the ansible information
#
# @return [Boolean] True if the ssh connection works
def retry_ssh(ansible_dir)
ret = false
retries = 0
while !ret && retries < Options.ping_retries
begin
ret = ansible_ssh(ansible_dir)
rescue OneProvisionLoopException
retries += 1
sleep(Options.ping_timeout)
end
end
ret
end
# Checks ssh connection
#
# @param ansible_dir [Dir] Directory with all
# the ansible information
def try_ssh(ansible_dir)
OneProvisionLogger.info('Checking working SSH connection')
return if retry_ssh(ansible_dir)
Driver.retry_loop 'SSH connection is failing' do
ansible_ssh(ansible_dir)
end
end
# Parses the ansible output
#
# @param stdout [String] Ansible ouput
#
# return [String] Parsed output
def parse_ansible(stdout)
rtn = []
task = 'UNKNOWN'
stdout.lines.each do |line|
task = Regexp.last_match(1) if line =~ /^TASK \[(.*)\]/i
next unless line =~ /^fatal:/i
host = 'UNKNOWN'
text = ''
if line =~ /^fatal: \[([^\]]+)\]: .* => ({.*})$/i
host = Regexp.last_match(1)
begin
text = JSON.parse(Regexp.last_match(2))['msg']
.strip.tr("\n", ' ')
text = "- #{text}"
rescue StandardError => e
raise e
end
elsif line =~ /^fatal: \[([^\]]+)\]: .* =>/i
host = Regexp.last_match(1)
end
rtn << format('- 15%<h>s : TASK[%<t>s] %<tx>s',
:h => host, :t => task, :tx => text)
end
rtn.join("\n")
end
# Checks ssh connection
#
# @param ansible_dir [Dir] Directory with
# all the ansible information
#
# @return [Boolean] True if the ssh connection works
def ansible_ssh(ansible_dir)
# Note: We want only to check the working SSH connection, but
# Ansible "ping" module requires also Python to be installed on
# the remote side, otherwise fails. So we use only "raw"
# module with simple command. Python should be
# installed by "configure" phase later.
#
# Older approach with "ping" module:
# ANSIBLE_CONFIG=#{ansible_dir}/ansible.cfg ansible
# #{ANSIBLE_ARGS} -m ping all -i #{ansible_dir}/inventory
cmd = "ANSIBLE_CONFIG=#{ansible_dir}"
cmd += '/ansible.cfg ANSIBLE_BECOME=false'
cmd << " ansible #{ANSIBLE_ARGS}"
cmd << " -i #{ansible_dir}/inventory"
cmd << ' -m raw all -a /bin/true'
o, _e, s = Driver.run(cmd)
raise OneProvisionLoopException if !s && !s.success?
hosts = o.lines.count {|l| l =~ /success/i }
raise OneProvisionLoopException if hosts.zero?
true
end
# TODO: support different variables and
# connection parameters for each host
# Generates ansible configurations
#
# @param hosts [Array of OpenNebula::Host] Hosts to configure
#
# @return [Dir] Directory with all the Ansible information
def generate_ansible_configs(hosts)
ansible_dir = Dir.mktmpdir
msg = "Generating Ansible configurations into #{ansible_dir}"
OneProvisionLogger.debug(msg)
# Generate 'inventory' file
c = "[nodes]\n"
hosts.each do |h|
h.info
c << "#{h['NAME']}\n"
end
c << "\n"
Driver.write_file_log("#{ansible_dir}/inventory", c)
# Generate "host_vars" directory
Dir.mkdir("#{ansible_dir}/host_vars")
hosts.each do |h|
h.info
var = h['TEMPLATE/PROVISION_CONFIGURATION_BASE64']
var = YAML.safe_load(Base64.decode64(var)) if var
var ||= {}
c = YAML.dump(var)
fname = "#{ansible_dir}/host_vars/#{h['NAME']}.yml"
Driver.write_file_log(fname, c)
end
if hosts[0]['TEMPLATE/ANSIBLE_PLAYBOOK']
@inventory = hosts[0]['TEMPLATE/ANSIBLE_PLAYBOOK']
else
@inventory = ANSIBLE_INVENTORY_DEFAULT
end
# Generate "ansible.cfg" file
# TODO: what if private_key isn't filename, but content
# TODO: store private key / packet
# credentials securely in the ONE
ruser = hosts[0]['TEMPLATE/PROVISION_CONNECTION/REMOTE_USER']
rport = hosts[0]['TEMPLATE/PROVISION_CONNECTION/REMOTE_PORT']
prkey = hosts[0]['TEMPLATE/PROVISION_CONNECTION/PRIVATE_KEY']
roles = "#{ANSIBLE_LOCATION}/roles"
vals = { :remote_user => ruser,
:remote_port => rport,
:private_key => prkey,
:roles => roles }
c = File.read("#{ANSIBLE_LOCATION}/ansible.cfg.erb")
c = ERBVal.render_from_hash(c, vals)
Driver.write_file_log("#{ansible_dir}/ansible.cfg", c)
# TODO: site.yaml
# logger(inventoryContent +
# File.open("#{$ANSIBLE_LOCATION}/site.yml").read(), true)
ansible_dir
end
# Gets host connection options
#
# @param host [OpenNebula::Host] Host to get connections options
#
# @return [Key-Value Object] Connections options
def get_host_template_conn(host)
conn = {}
# TODO: some nice / generic way (even for configuration?)
tmpl = host.to_hash['HOST']['TEMPLATE']['PROVISION_CONNECTION']
tmpl ||= {}
tmpl.each_pair do |key, value|
conn[key.downcase] = value
end
conn
end
end
end
end

View File

@ -0,0 +1,46 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'resource'
module OneProvision
# Cluster
class Cluster < Resource
# Class constructor
#
# @param id [Integer] Id of the cluster
def initialize(id = nil)
super('Cluster', id)
end
# Creates a new CLUSTER in OpenNebula
#
# @param template [String] Template of the CLUSTER
# @param provision_id [String] ID of the provision
def create(template, provision_id)
template['provision']['provision_id'] = provision_id
name = template['name']
template = Utils.template_like_str(template)
super(name, template)
end
end
end

View File

@ -0,0 +1,46 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'resource'
module OneProvision
# Datastore
class Datastore < Resource
# Class constructor
def initialize
super 'Datastore'
end
# Creates a new DATASTORE in OpenNebula
#
# @param cluster_id [Integer] ID of the CLUSTER where is the DATASTORE
# @param template [String] Template of the DATASTORE
# @param pm_mad [String] Provision Manager Driver
# @param provision_id [String] ID of the provision
def create(cluster_id, template, pm_mad, provision_id)
template['provision']['provision_id'] = provision_id
template = Utils.template_like_str(template)
template += "PM_MAD=\"#{pm_mad}\"\n"
super(cluster_id, template)
end
end
end

View File

@ -0,0 +1,231 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
module OneProvision
# Driver
module Driver
class << self
# Mutex to synchronize console output
@@mutex = Mutex.new
# Retry the operation in case of failure
#
# @param text [String] Operation
# @param cleanup [Boolean] True if the operation can be cleaned up
# @param block [Ruby Code] Block of code to execute
def retry_loop(text, cleanup = Mode.cleanup)
retries = 0
begin
yield
rescue OneProvisionLoopException => e
STDERR.puts "ERROR: #{text}\n#{e.text}"
retries += 1
if retries > Mode.max_retries && Mode.mode == :batch
exit(-1)
end
choice = Mode.fail_choice
if Mode.mode == :interactive
begin
@@mutex.synchronize do
cli = HighLine.new($stdin, $stderr)
choice = cli.choose do |menu|
menu.prompt = 'Choose failover method:'
menu.choices(:quit, :retry, :skip)
menu.choices(:cleanup) if cleanup
menu.default = choice
end
end
rescue EOFError
STDERR.puts choice
rescue Interrupt => e
exit(-1)
end
end
if choice == :retry
retry
elsif choice == :quit
exit(-1)
elsif choice == :skip
return
elsif choice == :cleanup
raise OneProvisionCleanupException if cleanup
Utils.fail('Cleanup unsupported for this operation')
end
exit(-1)
end
end
# Runs commands
#
# @param cmd [Array] Command and arguments to execute
# @param block [Ruby Code]
#
# @return [Array] Output, Error and Value returned
def run(*cmd, &_block)
OneProvisionLogger.debug("Command run: #{cmd.join(' ')}")
rtn = nil
begin
if Hash == cmd.last
opts = cmd.pop.dup
else
opts = {}
end
stdin_data = opts.delete(:stdin_data) || ''
binmode = opts.delete(:binmode)
Open3.popen3(*cmd, opts) do |i, o, e, t|
if binmode
i.binmode
o.binmode
e.binmode
end
out_reader = Thread.new { o.read }
err_reader = Thread.new { e.read }
begin
i.write stdin_data
rescue Errno::EPIPE => e
raise OneProvisionLoopException, e.text
end
begin
i.close
rescue IOError => e
raise OneProvisionLoopException, e.text
end
rtn = [out_reader.value, err_reader.value, t.value]
end
@@mutex.synchronize do
if rtn
if !rtn[0].empty?
OneProvisionLogger.debug('Command STDOUT: ' \
"#{rtn[0].strip}")
end
if !rtn[1].empty?
OneProvisionLogger.debug('Command STDERR: ' \
"#{rtn[1].strip}")
end
if rtn[2].success?
OneProvisionLogger.debug('Command succeeded')
else
OneProvisionLogger.warn('Command FAILED ' \
"(code=#{rtn[2].exitstatus}): " \
"#{cmd.join(' ')}")
end
else
OneProvisionLogger
.error('Command failed on unknown error')
end
end
rescue Interrupt
Utils.fail('Command interrupted')
rescue StandardError => e
OneProvisionLogger.error("Command exception: #{e.message}")
end
rtn
end
# Executes an action in the host
#
# @param pm_mad [String] Provision Manager Driver
# @param action [String] Action to executue
# @param args [Array] Arguments for the driver
# @param host [OpenNebula::Host] Host where execute the action
#
# @return [String] Output of the commmand
def pm_driver_action(pm_mad, action, args, host = nil)
cmd = ["#{REMOTES_LOCATION}/pm/#{pm_mad}/#{action}"]
args.each do |arg|
cmd << arg
end
# action always gets host ID/name
# if host defined, same as for VMs:
# https://github.com/OpenNebula/one/blob/d95b883e38a2cee8ca9230b0dbef58ce3b8d6d6c/src/mad/ruby/OpenNebulaDriver.rb#L95
@@mutex.synchronize do
unless host.nil?
cmd << host.id
cmd << host.name
end
end
unless File.executable? cmd[0]
OneProvisionLogger.error('Command not found or ' \
"not executable #{cmd[0]}")
Utils.fail('Driver action script not executable')
end
o = nil
retry_loop "Driver action '#{cmd[0]}' failed" do
o, e, s = run(cmd.join(' '))
unless s && s.success?
err = Utils.get_error_message(e)
text = err.lines[0].strip if err
text = 'Unknown error' if text == '-'
raise OneProvisionLoopException, text
end
end
o
end
# TODO: handle exceptions?
#
# Writes content to file
#
# @param name [String] Name of the file
# @param content [String] Content of the file
def write_file_log(name, content)
@@mutex.synchronize do
OneProvisionLogger.debug("Creating #{name}:\n" + content)
end
f = File.new(name, 'w')
f.write(content)
f.close
end
end
end
end

View File

@ -0,0 +1,291 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'resource'
module OneProvision
# Host
class Host < Resource
# Mutex to syncrhonize delete operations
@@mutex = Mutex.new
# Class constructor
def initialize(id = nil)
super('Host', id)
end
# Checks if there are Running VMs on the HOST
def running_vms?
@one['HOST_SHARE/RUNNING_VMS'].to_i > 0
end
# Establishes an SSH connection to the HOST
#
# @param command [String] Command to execute in the HOST
def ssh(command)
ip = @one['NAME']
private_key = @one['TEMPLATE/PROVISION_CONNECTION/PRIVATE_KEY']
remote_user = @one['TEMPLATE/PROVISION_CONNECTION/REMOTE_USER']
exec("ssh -i #{private_key} #{remote_user}@#{ip} '#{command}'")
end
# Gets the public IP of the HOST
#
# @return [String] Public IP which is the NAME of the HOST
def poll
poll = monitoring
if poll.key? 'GUEST_IP_ADDRESSES'
name = poll['GUEST_IP_ADDRESSES'].split(',')[0][1..-1] # TODO
elsif poll.key? 'AWS_PUBLIC_IP_ADDRESS'
name = poll['AWS_PUBLIC_IP_ADDRESS'][2..-3]
else
Utils.fail('Failed to get provision name')
end
name
end
# Creates a new HOST in OpenNebula
#
# @param dfile [String] XML with all the HOST information
# @param cluster [Integer] ID of the CLUSTER where
# the HOST will be allocated
# @param playbook [String] Ansible playbook for configuring the HOST
#
# @retun [OpenNebula::Host] The ONE HOST object
def create(dfile, cluster, playbook)
xhost = OpenNebula::XMLElement.new
xhost.initialize_xml(dfile, 'HOST')
name = xhost['NAME']
OneProvisionLogger.debug("Creating OpenNebula host: #{name}")
one = OpenNebula::Client.new
host = OpenNebula::Host.new(OpenNebula::Host.build_xml, one)
im = xhost['TEMPLATE/IM_MAD']
vm = xhost['TEMPLATE/VM_MAD']
host.allocate(name, im, vm, cluster)
host.update(xhost.template_str, true)
if !playbook.nil?
host.update("ANSIBLE_PLAYBOOK=#{playbook}", true)
end
host.offline
host.info
OneProvisionLogger.debug("host created with ID: #{host['ID']}")
host
end
# Resumes the HOST
def resume
pm_mad = @one['TEMPLATE/PM_MAD']
check
begin
# create resume deployment file
resume_file = Tempfile.new('xmlResume')
resume_file.close
Driver.write_file_log(resume_file.path, @one.to_xml)
OneProvisionLogger.info("Resuming host: #{@one.id}")
params = [resume_file.path, @one.name]
Driver.pm_driver_action(pm_mad, 'deploy', params, @one)
OneProvisionLogger.debug("Enabling OpenNebula host: #{@one.id}")
name = poll
@one.rename(name)
@one.enable
ensure
resume_file.unlink
end
end
# Powers off the HOST
def poweroff
pm_mad = @one['TEMPLATE/PM_MAD']
deploy_id = @one['TEMPLATE/PROVISION/DEPLOY_ID']
name = @one.name
check
OneProvisionLogger.info("Powering off host: #{@one.id}")
params = [deploy_id, name, 'SHUTDOWN_POWEROFF']
Driver.pm_driver_action(pm_mad, 'shutdown', params, @one)
OneProvisionLogger.debug("Offlining OpenNebula host: #{@one.id}")
# Fix broken pipe exception on ubuntu 14.04
@one.info
@one.offline
end
# Reboots or resets the HOST
#
# @param hard [Boolean] True to reset the HOST, false to reboot the HOST
def reboot(hard)
reset(hard)
end
# Deletes the HOST
def delete
pm_mad = @one['TEMPLATE/PM_MAD']
deploy_id = @one['TEMPLATE/PROVISION/DEPLOY_ID']
name = @one.name
id = @one.id
check
# offline ONE host
if @one.state != 8
OneProvisionLogger.debug("Offlining OpenNebula host: #{id}")
@@mutex.synchronize do
rc = @one.offline
if OpenNebula.is_error?(rc)
raise OneProvisionLoopException, rc.message
end
end
end
# unprovision host
OneProvisionLogger.debug("Undeploying host: #{id}")
Driver.pm_driver_action(pm_mad, 'cancel', [deploy_id, name], @one)
# delete ONE host
OneProvisionLogger.debug("Deleting OpenNebula host: #{id}")
@@mutex.synchronize do
# Fix ubuntu 14.04 borken pipe
@one.info
rc = @one.delete
if OpenNebula.is_error?(rc)
raise OneProvisionLoopException, rc.message
end
end
end
# Configures the HOST
#
# @param force [Boolean] Force the configuration if the HOST
# is already configured
def configure(force)
Ansible.configure([@one], force)
end
# Checks that the HOST is a baremetal HOST
def check
pm_mad = @one['TEMPLATE/PM_MAD']
Utils.fail('Not a valid bare metal host') if pm_mad.nil?
Utils.fail('Not a valid bare metal host') if pm_mad.empty?
end
# Monitors the HOST
#
# @return [Key-Value object] All the monitoring information, such as
# IPS, MEMORY, CPU..
def monitoring
pm_mad = @one['TEMPLATE/PM_MAD']
deploy_id = @one['TEMPLATE/PROVISION/DEPLOY_ID']
name = @one.name
id = @one.id
OneProvisionLogger.debug("Monitoring host: #{id}")
Driver.retry_loop 'Monitoring metrics failed to parse' do
check
params = [deploy_id, name]
pm_ret = Driver.pm_driver_action(pm_mad, 'poll', params, @one)
begin
poll = {}
pm_ret = pm_ret.split(' ').map do |x|
x.split('=', 2)
end
pm_ret.each do |key, value|
poll[key.upcase] = value
end
poll
rescue StandarError
raise OneProvisionLoopException
end
end
end
# Resets or reboots the HOST
#
# @param hard [Boolean] True to reset, false to reboot
def reset(hard)
if hard
reset_reboot('reset', 'Resetting')
name = poll
@one.rename(name)
else
reset_reboot('reboot', 'Rebooting')
end
end
# Resets or reboots the HOST
#
# @param action [String] Action to execute (reset, reboot)
# @param action [String] Message for logging
def reset_reboot(action, message)
pm_mad = @one['TEMPLATE/PM_MAD']
deploy_id = @one['TEMPLATE/PROVISION/DEPLOY_ID']
name = @one.name
check
OneProvisionLogger.debug("Offlining OpenNebula host: #{@one.id}")
@one.offline
OneProvisionLogger.info("#{message} host: #{@one.id}")
Driver.pm_driver_action(pm_mad, action, [deploy_id, name], @one)
OneProvisionLogger.debug("Enabling OpenNebula host: #{@one.id}")
@one.info
@one.enable
end
end
end

View File

@ -0,0 +1,226 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'ansible'
require 'cluster'
require 'driver'
require 'host'
require 'datastore'
require 'provision'
require 'utils'
require 'vnet'
require 'logger'
require 'singleton'
module OneProvision
# Singleton OneProvision Logger
class OneProvisionLogger
include Singleton
attr_reader :logger
# Class constructor
def initialize
@logger = Logger.new(STDERR)
end
# Gets the logger
#
# @param options [Key-Value Object] CLI options
def self.get_logger(options)
format = '%Y-%m-%d %H:%M:%S'
instance.logger.formatter = proc do |severity, datetime, _p, msg|
"#{datetime.strftime(format)} #{severity.ljust(5)} : #{msg}\n"
end
if options.key? :debug
instance.logger.level = Logger::DEBUG
elsif options.key? :verbose
instance.logger.level = Logger::INFO
else
instance.logger.level = Logger::UNKNOWN
end
end
# Shows a debug message
#
# @param msg [String] Message to show
def self.debug(msg)
instance.logger.debug(msg)
end
# Shows an error message
#
# @param msg [String] Message to show
def self.error(msg)
instance.logger.error(msg)
end
# Shows an info message
#
# @param msg [String] Message to show
def self.info(msg)
instance.logger.info(msg)
end
# Shows a warning message
#
# @param msg [String] Message to show
def self.warn(msg)
instance.logger.warn(msg)
end
end
# Singleton running mode
class Mode
include Singleton
attr_reader :run_mode
# Class constructor
def initialize
@run_mode = {}
end
# Gets running mode
#
# @param options [Key-Value Object] CLI options
def self.get_run_mode(options)
if options.key? :batch
instance.run_mode[:mode] = :batch
else
instance.run_mode[:mode] = RUN_MODE_DEFAULT
end
if options.key? :fail_cleanup
instance.run_mode[:fail_choice] = :cleanup
elsif options.key? :fail_retry
instance.run_mode[:fail_choice] = :retry
instance.run_mode[:max_retries] = options[:fail_retry].to_i
elsif options.key? :fail_skip
instance.run_mode[:fail_choice] = :skip
elsif options.key? :fail_quit
instance.run_mode[:fail_choice] = :quit
else
instance.run_mode[:fail_choice] = FAIL_CHOICE_DEFAULT
instance.run_mode[:max_retries] = MAX_RETRIES_DEFAULT
end
instance.run_mode[:cleanup] = CLEANUP_DEFAULT
end
# Gets cleanup value
#
# @return [Boolean] Cleanup
def self.cleanup
instance.run_mode[:cleanup]
end
# Gets fail choice value
#
# @return [Sym] Choice value
def self.fail_choice
instance.run_mode[:fail_choice]
end
# Gets max retries value
#
# @return [Integer] Maximun retries
def self.max_retries
instance.run_mode[:max_retries]
end
# Gets mode value
#
# @return [Sym] Mode value
def self.mode
instance.run_mode[:mode]
end
# Sets cleanup value
#
# @param value [Boolean] New cleanup value
def self.new_cleanup(value)
instance.run_mode[:cleanup] = value
end
end
# Singleton options
class Options
include Singleton
attr_reader :run_options
# Class constructor
def initialize
@run_options = {}
end
# Gets options
#
# @param options [Key-Value Object] CLI Options
def self.get_run_options(options)
if options.key? :ping_timeout
instance.run_options[:ping_timeout] = options[:ping_timeout]
else
instance.run_options[:ping_timeout] = PING_TIMEOUT_DEFAULT
end
if options.key? :ping_retries
instance.run_options[:ping_retries] = options[:ping_retries]
else
instance.run_options[:ping_retries] = PING_RETRIES_DEFAULT
end
if options.key? :threads
instance.run_options[:threads] = options[:threads]
else
instance.run_options[:threads] = THREADS_DEFAULT
end
end
# Gets ping retries value
#
# @return [Integer] Ping retries value
def self.ping_retries
instance.run_options[:ping_retries]
end
# Gets ping timeout value
#
# @return [Integer] Ping timeout value
def self.ping_timeout
instance.run_options[:ping_timeout]
end
# Gets number of threads
#
# @return [Integer] Number of threads
def self.threads
instance.run_options[:threads]
end
end
end

View File

@ -0,0 +1,350 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'base64'
module OneProvision
# Provision
class Provision
attr_reader :id, :name, :clusters, :hosts, :datastores, :vnets
# Class constructor
def initialize(id, name = nil)
@id = id
@name = name
@clusters = []
@hosts = []
@datastores = []
@vnets = []
end
# Checks if the PROVISION exists
#
# @return [Boolean] True if exists, false if not
def exists
resource = Cluster.new
pool = resource.pool
pool.info
pool.each do |c|
return true if c['TEMPLATE/PROVISION/PROVISION_ID'] == @id
end
false
end
# Retrieves all the PROVISION objects
def refresh
Utils.fail('Provision not found.') unless exists
@clusters = Cluster.new.get(@id)
@datastores = Datastore.new.get(@id)
@hosts = Host.new.get(@id)
@vnets = Vnet.new.get(@id)
end
# TODO: rename delete_all -> cleanup
#
# Deletes the PROVISION
#
def delete
Utils.fail('Provision not found.') unless exists
OneProvisionLogger.info("Deleting provision #{@id}")
# offline and (optionally) clean all hosts
OneProvisionLogger.debug('Offlining OpenNebula hosts')
@hosts.each do |h|
host = Host.new(h['ID'])
Driver.retry_loop 'Failed to offline host' do
rc = h.offline
if OpenNebula.is_error?(rc)
raise OneProvisionLoopException, rc.message
end
rc = h.info
if OpenNebula.is_error?(rc)
raise OneProvisionLoopException, rc.message
end
end
if host.running_vms?
Utils.fail("Provision with running VMs can't be deleted")
end
end
# undeploy hosts
OneProvisionLogger.info('Undeploying hosts')
threads = []
@hosts.each do |host|
host = Host.new(host['ID'])
if Options.threads > 1
while Thread.list.count > Options.threads
threads.map do |thread|
thread.join(5)
end
end
threads << Thread.new do
host.delete
end
else
host.delete
end
end
threads.map(&:join)
# delete all other deployed objects
OneProvisionLogger.info('Deleting provision objects')
%w[datastores vnets clusters].each do |section|
send(section).each do |obj|
msg = "#{section.chomp('s')} #{obj['ID']}"
Driver.retry_loop "Failed to delete #{msg}" do
OneProvisionLogger.debug("Deleting OpenNebula #{msg}")
# Fix ubuntu 14.04 broken pipe
obj.info
rc = obj.delete
if OpenNebula.is_error?(rc)
raise OneProvisionLoopException, rc.message
end
end
end
end
end
# Returns the binding of the class
def _binding
binding
end
# Checks the status of the PROVISION
#
# @return [String]
# - Pending: if the HOSTS are being configured
# - Error: if something went wrong
# - Configured: if HOSTS are configured
def status
@hosts.each do |h|
h.info
status = h['TEMPLATE/PROVISION_CONFIGURATION_STATUS']
return status unless status.nil?
end
'configured'
end
# Creates a new PROVISION
#
# @param config [String] Path to the configuration file
def create(config)
Driver.retry_loop 'Failed to create provision' do
Ansible.check_ansible_version
# read provision file
cfg = Utils.create_config(Utils.read_config(config))
@name = cfg['name']
cluster = nil
cid = nil
cname = cfg['cluster']['name']
OneProvisionLogger.info('Creating provision objects')
Driver.retry_loop 'Failed to create cluster' do
msg = "Creating OpenNebula cluster: #{cname}"
OneProvisionLogger.debug(msg)
# create new cluster
cluster = Cluster.new
cluster.create(cfg['cluster'], @id)
cluster = cluster.one
cid = cluster.id
@clusters << cluster
OneProvisionLogger.debug("cluster created with ID: #{cid}")
end
Mode.new_cleanup(true)
%w[datastores networks].each do |r|
next if cfg[r].nil?
cfg[r].each do |x|
begin
driver = cfg['defaults']['provision']['driver']
msg = "#{r}: #{x['name']}"
Driver.retry_loop "Failed to create #{msg}" do
OneProvisionLogger
.debug("Creating OpenNebula #{msg}")
erb = Utils.evaluate_erb(self, x)
if r == 'datastores'
datastore = Datastore.new
datastore.create(cid.to_i, erb, driver, @id)
@datastores << datastore.one
else
vnet = Vnet.new
vnet.create(cid.to_i, erb, driver, @id)
@vnets << vnet.one
end
r = 'vnets' if r == 'networks'
rid = instance_variable_get("@#{r}").last['ID']
OneProvisionLogger
.debug("#{r} created with ID: #{rid}")
end
rescue OneProvisionCleanupException
refresh
delete
exit - 1
end
end
end
if cfg['hosts'].nil?
puts "ID: #{@id}"
exit 0
end
begin
cfg['hosts'].each do |h|
erb = Utils.evaluate_erb(self, h)
dfile = Utils .create_deployment_file(erb, @id)
playbook = cfg['playbook']
host = Host.new
host = host.create(dfile.to_xml, cid.to_i, playbook)
@hosts << host
host.offline
end
# ask user to be patient, mandatory for now
STDERR.puts 'WARNING: This operation can ' \
'take tens of minutes. Please be patient.'
OneProvisionLogger.info('Deploying')
deploy_ids = []
threads = []
processed_hosts = 0
@hosts.each do |host|
processed_hosts += 1
host.info
# deploy host
pm_mad = host['TEMPLATE/PM_MAD']
id = host['ID']
OneProvisionLogger.debug("Deploying host: #{id}")
deploy_file = Tempfile.new("xmlDeploy#{id}")
deploy_file.close
Driver.write_file_log(deploy_file.path, host.to_xml)
if Options.threads > 1
threads << Thread.new do
Thread.current[:output] =
Driver.pm_driver_action(pm_mad,
'deploy',
[deploy_file.path,
'TODO'])
end
if threads.size == Options.threads ||
processed_hosts == @hosts.size
threads.map do |thread|
thread.join
deploy_ids << thread[:output]
deploy_file.unlink
end
threads.clear
end
else
deploy_ids << Driver
.pm_driver_action(pm_mad,
'deploy',
[deploy_file.path,
'TODO'])
end
end
if deploy_ids.nil? || deploy_ids.empty?
Utils.fail('Deployment failed, no ID got from driver')
end
OneProvisionLogger.info('Monitoring hosts')
@hosts.each do |h|
h.add_element('//TEMPLATE/PROVISION',
'DEPLOY_ID' => deploy_ids.shift.strip)
h.update(h.template_str)
host = Host.new(h['ID'])
name = host.poll
h.rename(name)
end
Ansible.configure(@hosts)
puts "ID: #{@id}"
rescue OneProvisionCleanupException
delete
exit(-1)
end
end
end
# Configures the PROVISION
#
# @param force [Boolean] Force the configuration if the PROVISION
# is already configured
def configure(force)
Ansible.configure(@hosts, force)
end
end
end

View File

@ -0,0 +1,125 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
module OneProvision
# Represents the ONE object and pool
class Resource
# @one Stores the ONE object
# @pool Stores the ONE pool
attr_reader :one, :pool
# Creates ONE and Pool objects
#
# @param type [String] Resource type [Cluster, Datastore, Host, Vnet]
# @param id [Integer] Id of the resource
def initialize(type, id = nil)
client = OpenNebula::Client.new
@type = type
case type
when 'Cluster'
if !id
xml = OpenNebula::Cluster.build_xml
@one = OpenNebula::Cluster.new(xml, client)
else
@one = OpenNebula::Cluster.new_with_id(id, client)
@one.info
end
@pool = OpenNebula::ClusterPool.new(client)
when 'Datastore'
xml = OpenNebula::Datastore.build_xml
@one = OpenNebula::Datastore.new(xml, client)
@pool = OpenNebula::DatastorePool.new(client)
when 'Host'
if !id
xml = OpenNebula::Host.build_xml
@one = OpenNebula::Host.new(xml, client)
else
@one = OpenNebula::Host.new_with_id(id, client)
@one.info
end
@pool = OpenNebula::HostPool.new(client)
when 'Vnet'
xml = OpenNebula::VirtualNetwork.build_xml
@one = OpenNebula::VirtualNetwork.new(xml, client)
@pool = OpenNebula::VirtualNetworkPool.new(client)
end
end
# Creates a new resource
#
# @param cluster [Integer] ID of the cluster to allocate the resource
# @param template [Template] Template of the resource
def create(cluster, template)
if @type == 'Cluster'
rc = @one.allocate(cluster)
else
rc = @one.allocate(template, cluster)
end
if OpenNebula.is_error?(rc)
Utils.fail(rc.message)
end
if @type == 'Cluster'
@one.update(template, true)
end
@one.info
end
# Gets all resources or just provision resources
#
# @param id [Integer] ID of the provision
#
# @return [Array] with provision resource if id!=nil
# with all resources if if==nil
def get(id = nil)
rc = @pool.info
if OpenNebula.is_error?(rc)
Utils.fail(rc.message)
end
if id
return @pool.select do |resource|
resource['TEMPLATE/PROVISION/PROVISION_ID'] == id
end
else
return @pool.reject do |resource|
resource['TEMPLATE/PROVISION/PROVISION_ID'].nil?
end
end
end
# Deletes the ONE object
def delete
@one.info
@one.delete
end
end
end

View File

@ -0,0 +1,440 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'yaml'
require 'erb'
require 'nokogiri'
require 'open3'
require 'tempfile'
require 'highline'
require 'highline/import'
ENCRYPT_VALUES = %w[PACKET_TOKEN EC2_SECRET EC2_ACCESS]
# Cleanup Exception
class OneProvisionCleanupException < RuntimeError
end
# Loop Exception
class OneProvisionLoopException < RuntimeError
attr_reader :text
def initialize(text = nil)
@text = text
end
end
module OneProvision
# Utils
module Utils
class << self
ERROR_OPEN = 'ERROR MESSAGE --8<------'
ERROR_CLOSE = 'ERROR MESSAGE ------>8--'
# Checks if oned is running
def one_running?
system = OpenNebula::System.new(OpenNebula::Client.new)
config = system.get_configuration
OpenNebula.is_error?(config)
end
# Validates the configuration file
#
# @param config [String] Path to the configuration file
# @param dump [Boolean] True to show the result in the console
def validate_configuration(config, dump)
config = read_config(config)
config = config.delete_if {|_k, v| v.nil? }
check_config(config)
puts config.to_yaml if dump
end
# Checks configuration fole
#
# @param config [Hash] Configuration content
def check_config(config)
name = config['name']
version = config['version']
defaults = config['defaults']['provision']
if name.nil? && version == 2
Utils.fail('There is an error in your configuration ' \
'file: no name given')
end
if defaults.nil?
Utils.fail('There is an error in your configuration' \
'file: defaults provision is missing')
end
if config['hosts']
config['hosts'].each_with_index do |h, i|
im = h['im_mad']
vm = h['vm_mad']
name = h['provision']['hostname']
if im.nil?
Utils.fail('There is an error in your ' \
'configuration file: there is ' \
"no im_mad in host #{i + 1}")
end
if vm.nil?
Utils.fail('There is an error in your ' \
'configuration file: there is ' \
"no vm_mad in host #{i + 1}")
end
if name.nil?
Utils.fail('There is an error in your ' \
'configuration file: there is ' \
"no hostname in host #{i + 1}")
end
next
end
end
return unless config['datastores']
config['datastores'].each_with_index do |d, i|
if d['tm_mad'].nil?
Utils.fail('There is an error in your ' \
'configuration file: there is '\
"no tm_mad in datastore #{i + 1}")
end
next
end
end
# Creates configuration
#
# @param yaml [Hash] Configuration content
#
# @return [Hash] Configuration for drivers
def create_config(yaml)
begin
check_config(yaml)
cluster = yaml['cluster']
yaml['cluster'] = { 'name' => yaml['name'] } if cluster.nil?
# TODO: schema check
if yaml['hosts']
yaml['hosts'] = yaml['hosts'].map do |host|
sections = %w[connection provision configuration]
sections.each do |section|
data = CONFIG_DEFAULTS[section] || {}
defaults = yaml['defaults'][section]
h_sec = host[section]
# merge defaults with globals
# and device specific params
data.merge!(defaults) unless defaults.nil?
data.merge!(h_sec) unless h_sec.nil?
host[section] = data
end
host
end
end
%w[datastores networks].each do |r|
next unless yaml[r]
yaml[r] = yaml[r].map do |x|
x['provision'] = yaml['defaults']['provision']
x
end
end
yaml['cluster']['provision'] = yaml['defaults']['provision']
rescue StandardError => e
Utils.fail("Failed to read configuration: #{e}")
end
yaml
end
# Reads configuration content
#
# @param name [String] Path to the configuration file
#
# @return [Hash] Configuration content
def read_config(name)
begin
yaml = YAML.load_file(name)
rescue StandardError => e
Utils.fail("Failed to read template: #{e}")
end
return yaml unless yaml['extends']
base = read_config(yaml['extends'])
yaml.delete('extends')
base['defaults'] ||= {}
yaml['defaults'] ||= {}
# replace scalars or append array from child YAML
yaml.each do |key, value|
next if key == 'defaults'
if (value.is_a? Array) && (base[key].is_a? Array)
base[key].concat(value)
else
base[key] = value
end
end
# merge each defaults section separately
%w[connection provision configuration].each do |section|
base['defaults'][section] ||= {}
yaml['defaults'][section] ||= {}
defaults = yaml['defaults'][section]
base['defaults'][section].merge!(defaults)
end
base
end
# Gets the value of an ERB expression
#
# @param provision [OneProvision::Provision] Provision object
# @value [String] Value to evaluate
#
# @return [String] Evaluated value
def get_erb_value(provision, value)
template = ERB.new value
ret = template.result(provision._binding)
raise "#{value} not found." if ret.empty?
ret
end
# Evaluates ERB values
#
# @param provision [OneProvision::Provision] Provision object
# @param root [Hash] Hash with values to evaluate
#
# @return [Hash] Hash with evaluated values
def evaluate_erb(provision, root)
if root.is_a? Hash
root.each_pair do |key, value|
if value.is_a? Array
root[key] = value.map do |x|
evaluate_erb(provision, x)
end
elsif value.is_a? Hash
root[key] = evaluate_erb(provision, value)
elsif value.is_a? String
if value =~ /<%= /
root[key] = get_erb_value(provision, value)
end
end
end
else
root = root.map {|x| evaluate_erb(provision, x) }
end
root
end
# Checks if the file can be read
#
# @param name [String] Path to file to read
def try_read_file(name)
File.read(name).strip
rescue StandardError
name
end
# Creates the host deployment file
#
# @param host [Hash] Hash with host information
# @param provision_id [String] ID of the provision
#
# @return [Nokogiri::XML] XML with the host information
def create_deployment_file(host, provision_id)
ssh_key = try_read_file(host['connection']['public_key'])
config = Base64.strict_encode64(host['configuration'].to_yaml)
Nokogiri::XML::Builder.new do |xml|
xml.HOST do
xml.NAME "provision-#{SecureRandom.hex(24)}"
xml.TEMPLATE do
xml.IM_MAD host['im_mad']
xml.VM_MAD host['vm_mad']
xml.PM_MAD host['provision']['driver']
xml.PROVISION do
host['provision'].each do |key, value|
if key != 'driver'
encrypt = encrypt(key.upcase, value)
xml.send(key.upcase, encrypt)
end
end
xml.send('PROVISION_ID', provision_id)
end
if host['configuration']
xml.PROVISION_CONFIGURATION_BASE64 config
end
xml.PROVISION_CONFIGURATION_STATUS 'pending'
if host['connection']
xml.PROVISION_CONNECTION do
host['connection'].each do |key, value|
xml.send(key.upcase, value)
end
end
end
if host['connection']
xml.CONTEXT do
if host['connection']['public_key']
xml.SSH_PUBLIC_KEY ssh_key
end
end
end
end
end
end.doc.root
end
# Shows and error message and exit with fail code
#
# @param text [String] Error message
# @param code [Integer] Error code
def fail(text, code = -1)
STDERR.puts "ERROR: #{text}"
exit(code)
end
# Gets error message
#
# @param text [String] Text with error message inside
#
# @return [String] Error message
def get_error_message(text)
msg = '-'
if text
tmp = text.scan(/^#{ERROR_OPEN}\n(.*?)#{ERROR_CLOSE}$/m)
msg = tmp[0].join(' ').strip if tmp[0]
end
msg
end
# Converts XML template to string
#
# @param attributes [Hash] XML attributes
# @paran indent [Bollean] True to format indentation
#
# @return [String] String with the template information
def template_like_str(attributes, indent = true)
if indent
ind_enter = "\n"
ind_tab = ' '
else
ind_enter = ''
ind_tab = ' '
end
str = attributes.collect do |key, value|
next unless value
str_line = ''
if value.class == Array
value.each do |value2|
str_line << key.to_s.upcase << '=[' << ind_enter
if value2 && value2.class == Hash
str_line << value2.collect do |key3, value3|
str = ind_tab + key3.to_s.upcase + '='
if value3
str += "\"#{encrypt(key3.to_s.upcase,
value3.to_s)}\""
end
str
end.compact.join(",\n")
end
str_line << "\n]\n"
end
elsif value.class == Hash
str_line << key.to_s.upcase << '=[' << ind_enter
str_line << value.collect do |key3, value3|
str = ind_tab + key3.to_s.upcase + '='
if value3
str += "\"#{encrypt(key3.to_s.upcase,
value3.to_s)}\""
end
str
end.compact.join(",\n")
str_line << "\n]\n"
else
str_line << key.to_s.upcase << '=' \
"\"#{encrypt(key.to_s.upcase, value.to_s)}\""
end
str_line
end.compact.join("\n")
str
end
# Encrypts a value
#
# @param key [String] Key to encrypt
# @param value [String] Value to encrypt
#
# @return [String] Encrypted value
def encrypt(key, value)
if ENCRYPT_VALUES.include? key
system = OpenNebula::System.new(OpenNebula::Client.new)
config = system.get_configuration
token = config['ONE_KEY']
OpenNebula.encrypt({ :value => value }, token)[:value]
else
value
end
end
end
end
end

View File

@ -0,0 +1,46 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'resource'
module OneProvision
# Vnet
class Vnet < Resource
# Class constructor
def initialize
super 'Vnet'
end
# Creates a new VNET in OpenNebula
#
# @param cluster_id [Integer] ID of the CLUSTER where is the VNET
# @param template [String] Template of the VNET
# @param pm_mad [String] Provision Manager Driver
# @param provision_id [String] ID of the provision
def create(cluster_id, template, pm_mad, provision_id)
template['provision']['provision_id'] = provision_id
template = Utils.template_like_str(template)
template += "PM_MAD=\"#{pm_mad}\"\n"
super(cluster_id, template)
end
end
end