1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-02-02 09:47:00 +03:00

F #4302: vCenter monitoring service

co-authored by <rsmontero@opennebula.io> and <amoya@opennebula.io>
This commit is contained in:
Tino Vazquez 2020-05-25 18:47:25 +02:00
parent c0ce15a224
commit ace08b4d59
No known key found for this signature in database
GPG Key ID: 2FE9C32E94AEABBE
27 changed files with 1359 additions and 733 deletions

View File

@ -311,11 +311,6 @@ VAR_DIRS="$VAR_LOCATION/remotes \
$VAR_LOCATION/remotes/im/firecracker-probes.d/vm/monitor \
$VAR_LOCATION/remotes/im/firecracker-probes.d/vm/status \
$VAR_LOCATION/remotes/im/vcenter.d \
$VAR_LOCATION/remotes/im/vcenter-probes.d/host/beacon \
$VAR_LOCATION/remotes/im/vcenter-probes.d/host/monitor \
$VAR_LOCATION/remotes/im/vcenter-probes.d/host/system \
$VAR_LOCATION/remotes/im/vcenter-probes.d/vm/monitor \
$VAR_LOCATION/remotes/im/vcenter-probes.d/vm/status \
$VAR_LOCATION/remotes/im/ec2.d \
$VAR_LOCATION/remotes/im/ec2-probes.d/host/beacon \
$VAR_LOCATION/remotes/im/ec2-probes.d/host/monitor \
@ -571,11 +566,6 @@ INSTALL_FILES=(
IM_PROBES_FIRECRACKER_VM_MONITOR_FILES:$VAR_LOCATION/remotes/im/firecracker-probes.d/vm/monitor
IM_PROBES_FIRECRACKER_VM_STATUS_FILES:$VAR_LOCATION/remotes/im/firecracker-probes.d/vm/status
IM_PROBES_ETC_FIRECRACKER_PROBES_FILES:$VAR_LOCATION/remotes/etc/im/firecracker-probes.d
IM_PROBES_VCENTER_HOST_BEACON_FILES:$VAR_LOCATION/remotes/im/vcenter-probes.d/host/beacon
IM_PROBES_VCENTER_HOST_MONITOR_FILES:$VAR_LOCATION/remotes/im/vcenter-probes.d/host/monitor
IM_PROBES_VCENTER_HOST_SYSTEM_FILES:$VAR_LOCATION/remotes/im/vcenter-probes.d/host/system
IM_PROBES_VCENTER_VM_MONITOR_FILES:$VAR_LOCATION/remotes/im/vcenter-probes.d/vm/monitor
IM_PROBES_VCENTER_VM_STATUS_FILES:$VAR_LOCATION/remotes/im/vcenter-probes.d/vm/status
AUTH_SSH_FILES:$VAR_LOCATION/remotes/auth/ssh
AUTH_X509_FILES:$VAR_LOCATION/remotes/auth/x509
AUTH_LDAP_FILES:$VAR_LOCATION/remotes/auth/ldap
@ -1229,8 +1219,9 @@ IM_PROBES_LIB_FILES="\
src/im_mad/remotes/lib/firecracker.rb\
src/im_mad/remotes/lib/numa_common.rb \
src/im_mad/remotes/lib/probe_db.rb \
src/im_mad/remotes/lib/vcenter.rb \
src/im_mad/remotes/lib/nsx.rb \
src/im_mad/remotes/lib/vcenter_monitor.rb \
src/im_mad/remotes/lib/vcenter_cluster.rb \
src/im_mad/remotes/lib/monitord_client.rb \
src/im_mad/remotes/lib/domain.rb \
src/im_mad/remotes/lib/process_list.rb"
@ -1357,8 +1348,7 @@ IM_PROBES_FIRECRACKER_VM_STATUS_FILES="\
IM_PROBES_ETC_FIRECRACKER_PROBES_FILES="src/im_mad/remotes/lib/probe_db.conf"
IM_PROBES_VCENTER_FILES="src/im_mad/remotes/vcenter.d/monitord-client.rb \
src/im_mad/remotes/vcenter.d/monitord-client_control.sh"
IM_PROBES_VCENTER_FILES="src/im_mad/remotes/vcenter.d/monitord-client_control.sh"
# EC2 monitord-client
IM_PROBES_EC2_FILES="\
@ -1448,28 +1438,6 @@ IM_PROBES_PACKET_VM_STATUS_FILES="\
IM_PROBES_VERSION="src/im_mad/remotes/VERSION"
# VCENTER PROBES
IM_PROBES_VCENTER_FILES="\
src/im_mad/remotes/vcenter.d/monitord-client_control.sh \
src/im_mad/remotes/vcenter.d/monitord-client.rb"
IM_PROBES_VCENTER_HOST_BEACON_FILES="\
src/im_mad/remotes/vcenter-probes.d/host/beacon/date.sh \
src/im_mad/remotes/vcenter-probes.d/host/beacon/monitord-client-shepherd.sh"
IM_PROBES_VCENTER_HOST_MONITOR_FILES="\
src/im_mad/remotes/vcenter-probes.d/host/monitor/monitor.rb"
IM_PROBES_VCENTER_HOST_SYSTEM_FILES="\
src/im_mad/remotes/vcenter-probes.d/host/system/nsx.rb \
src/im_mad/remotes/vcenter-probes.d/host/system/vcenter_cluster.rb"
IM_PROBES_VCENTER_VM_MONITOR_FILES="\
src/im_mad/remotes/vcenter-probes.d/vms/monitor/monitor.rb"
IM_PROBES_VCENTER_VM_STATUS_FILES="\
src/im_mad/remotes/vcenter-probes.d/vms/status/state.rb"
#-------------------------------------------------------------------------------
# Auth Manager drivers to be installed under $REMOTES_LOCATION/auth
#-------------------------------------------------------------------------------

View File

@ -213,14 +213,25 @@ void InformationManager::_undefined(unique_ptr<Message<OpenNebulaMessages>> msg)
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
/* HOST_STATE - <state_str> <optional_desciption> */
void InformationManager::_host_state(unique_ptr<Message<OpenNebulaMessages>> msg)
{
NebulaLog::ddebug("InM", "HOST_STATE update from host: " +
to_string(msg->oid()) + ". Host information: " + msg->payload());
string str_state;
string err_message;
istringstream is(msg->payload());
is >> str_state >> ws;
if (is.good())
{
getline(is, err_message);
}
string str_state = msg->payload();
Host::HostState new_state;
if (Host::str_to_state(str_state, new_state) != 0)
@ -245,10 +256,10 @@ void InformationManager::_host_state(unique_ptr<Message<OpenNebulaMessages>> msg
if (host->get_state() != new_state)
{
host->set_state(new_state);
if ( new_state == Host::ERROR )
{
host->error(err_message);
LifeCycleManager* lcm = Nebula::instance().get_lcm();
for (const auto& vmid : host->get_vm_ids())
@ -256,6 +267,10 @@ void InformationManager::_host_state(unique_ptr<Message<OpenNebulaMessages>> msg
lcm->trigger(LCMAction::MONITOR_DONE, vmid);
}
}
else
{
host->set_state(new_state);
}
hpool->update(host);
}

View File

@ -15,12 +15,7 @@
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'socket'
require 'base64'
require 'resolv'
require 'ipaddr'
require 'zlib'
require 'yaml'
require 'open3'
require 'openssl'
@ -28,87 +23,7 @@ require 'openssl'
require 'rexml/document'
require_relative '../lib/probe_db'
#-------------------------------------------------------------------------------
# This class represents a monitord client. It handles udp and tcp connections
# and send update messages to monitord
#-------------------------------------------------------------------------------
class MonitorClient
# Defined in src/monitor/include/MonitorDriverMessages.h
MESSAGE_TYPES = %w[MONITOR_VM MONITOR_HOST SYSTEM_HOST BEACON_HOST STATE_VM
START_MONITOR STOP_MONITOR].freeze
MESSAGE_STATUS = { true =>'SUCCESS', false => 'FAILURE' }.freeze
MESSAGE_TYPES.each do |mt|
define_method("#{mt}_udp".downcase.to_sym) do |rc, payload|
msg = "#{mt} #{MESSAGE_STATUS[rc]} #{@hostid} #{pack(payload)}"
@socket_udp.send(msg, 0, @host, @port)
end
end
MESSAGE_TYPES.each do |mt|
define_method("#{mt}_tcp".downcase.to_sym) do |rc, payload|
msg = "#{mt} #{MESSAGE_STATUS[rc]} #{@hostid} #{pack(payload)}"
socket_tcp = TCPSocket.new(@host, @port)
socket_tcp.send(msg, 0)
socket_tcp.close
end
end
# Options to create a monitord client
# :host [:String] to send the messages to
# :port [:String] of monitord server
# :hostid [:String] OpenNebula ID of this host
# :pubkey [:String] public key to encrypt messages
def initialize(server, port, id, opt = {})
@opts = {
:pubkey => ''
}.merge opt
addr = Socket.getaddrinfo(server, port)[0]
@family = addr[0]
@host = addr[3]
@port = addr[1]
@socket_udp = UDPSocket.new(@family)
@pubkey = @opts[:pubkey]
@hostid = id
end
private
# Formats message payload to send over the wire
def pack(data)
if @pubkey
block_size = @pubkey.n.num_bytes - 11
edata = ''
index = 0
loop do
break if index >= data.length
edata << @pubkey.public_encrypt(data[index, block_size])
index += block_size
end
data = edata
end
zdata = Zlib::Deflate.deflate(data, Zlib::BEST_COMPRESSION)
data64 = Base64.strict_encode64(zdata)
data64
end
end
require_relative '../lib/monitord_client'
#-------------------------------------------------------------------------------
# This class wraps the execution of a probe directory and sends data to

View File

@ -0,0 +1,104 @@
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'socket'
require 'base64'
require 'zlib'
require 'openssl'
#-------------------------------------------------------------------------------
# This class represents a monitord client. It handles udp and tcp connections
# and send update messages to monitord
#-------------------------------------------------------------------------------
class MonitorClient
# Defined in src/monitor/include/MonitorDriverMessages.h
MESSAGE_TYPES = %w[MONITOR_VM MONITOR_HOST SYSTEM_HOST BEACON_HOST STATE_VM
START_MONITOR STOP_MONITOR].freeze
MESSAGE_STATUS = { true =>'SUCCESS', false => 'FAILURE' }.freeze
MESSAGE_TYPES.each do |mt|
define_method("#{mt}_udp".downcase.to_sym) do |rc, payload|
msg = "#{mt} #{MESSAGE_STATUS[rc]} #{@hostid} #{pack(payload)}"
@socket_udp.send(msg, 0)
end
end
MESSAGE_TYPES.each do |mt|
define_method("#{mt}_tcp".downcase.to_sym) do |rc, payload|
msg = "#{mt} #{MESSAGE_STATUS[rc]} #{@hostid} #{pack(payload)}"
socket_tcp = TCPSocket.new(@host, @port)
socket_tcp.send(msg, 0)
socket_tcp.close
end
end
# Options to create a monitord client
# :host [:String] to send the messages to
# :port [:String] of monitord server
# :hostid [:String] OpenNebula ID of this host
# :pubkey [:String] public key to encrypt messages
def initialize(server, port, id, opt = {})
@opts = {
:pubkey => ''
}.merge opt
addr = Socket.getaddrinfo(server, port)[0]
@family = addr[0]
@host = addr[3]
@port = addr[1]
@socket_udp = UDPSocket.new(@family)
@socket_udp.connect(@host, @port)
@pubkey = @opts[:pubkey]
@hostid = id
end
private
# Formats message payload to send over the wire
def pack(data)
if @pubkey && !@pubkey.empty?
block_size = @pubkey.n.num_bytes - 11
edata = ''
index = 0
loop do
break if index >= data.length
edata << @pubkey.public_encrypt(data[index, block_size])
index += block_size
end
data = edata
end
zdata = Zlib::Deflate.deflate(data, Zlib::BEST_COMPRESSION)
data64 = Base64.strict_encode64(zdata)
data64
end
end

View File

@ -1,187 +0,0 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
ONE_LOCATION = ENV['ONE_LOCATION'] unless defined?(ONE_LOCATION)
if !ONE_LOCATION
RUBY_LIB_LOCATION = '/usr/lib/one/ruby' unless defined?(RUBY_LIB_LOCATION)
GEMS_LOCATION = '/usr/share/one/gems' unless defined?(GEMS_LOCATION)
else
RUBY_LIB_LOCATION = ONE_LOCATION + '/lib/ruby' \
unless defined?(RUBY_LIB_LOCATION)
GEMS_LOCATION = ONE_LOCATION + '/share/gems' \
unless defined?(GEMS_LOCATION)
end
if File.directory?(GEMS_LOCATION)
Gem.use_paths(GEMS_LOCATION)
$LOAD_PATH.reject! {|l| l =~ /(vendor|site)_ruby/ }
end
$LOAD_PATH << RUBY_LIB_LOCATION
require 'vcenter_driver'
require 'nsx_driver'
# Gather NSX cluster monitor info
class NsxMonitor
attr_accessor :nsx_status
def initialize(host_id)
@host_id = host_id
@nsx_client = nil
@nsx_status = ''
return unless nsx_ready?
@nsx_client = NSXDriver::NSXClient.new_from_id(host_id)
end
def monitor
# NSX info
str_info = ''
str_info << nsx_info
str_info << tz_info
end
def nsx_info
nsx_info = ''
nsx_obj = {}
# In the future add more than one nsx manager
extension_list = @vi_client.vim.serviceContent
.extensionManager.extensionList
extension_list.each do |ext_list|
if ext_list.key == NSXDriver::NSXConstants::NSXV_EXTENSION_LIST
nsx_obj['type'] = NSXDriver::NSXConstants::NSXV
url_full = ext_list.client[0].url
url_split = url_full.split('/')
# protocol = "https://"
protocol = url_split[0] + '//'
# ip_port = ip:port
ip_port = url_split[2]
nsx_obj['url'] = protocol + ip_port
nsx_obj['version'] = ext_list.version
nsx_obj['label'] = ext_list.description.label
elsif ext_list.key == NSXDriver::NSXConstants::NSXT_EXTENSION_LIST
nsx_obj['type'] = NSXDriver::NSXConstants::NSXT
nsx_obj['url'] = ext_list.server[0].url
nsx_obj['version'] = ext_list.version
nsx_obj['label'] = ext_list.description.label
else
next
end
end
unless nsx_obj.empty?
nsx_info << "NSX_MANAGER=\"#{nsx_obj['url']}\"\n"
nsx_info << "NSX_TYPE=\"#{nsx_obj['type']}\"\n"
nsx_info << "NSX_VERSION=\"#{nsx_obj['version']}\"\n"
nsx_info << "NSX_LABEL=\"#{nsx_obj['label']}\"\n"
end
nsx_info
end
def tz_info
tz_info = 'NSX_TRANSPORT_ZONES = ['
tz_object = NSXDriver::TransportZone.new_child(@nsx_client)
# NSX request to get Transport Zones
if @one_item['TEMPLATE/NSX_TYPE'] == NSXDriver::NSXConstants::NSXV
tzs = tz_object.tzs
tzs.each do |tz|
tz_info << tz.xpath('name').text << '="'
tz_info << tz.xpath('objectId').text << '",'
end
tz_info.chomp!(',')
elsif @one_item['TEMPLATE/NSX_TYPE'] == NSXDriver::NSXConstants::NSXT
r = tz_object.tzs
r['results'].each do |tz|
tz_info << tz['display_name'] << '="'
tz_info << tz['id'] << '",'
end
tz_info.chomp!(',')
else
raise "Unknown PortGroup type #{@one_item['TEMPLATE/NSX_TYPE']}"
end
tz_info << ']'
end
def nsx_ready?
@one_item = VCenterDriver::VIHelper
.one_item(OpenNebula::Host, @host_id.to_i)
# Check if NSX_MANAGER is into the host template
if [nil, ''].include?(@one_item['TEMPLATE/NSX_MANAGER'])
@nsx_status = ''
return false
end
# Check if NSX_USER is into the host template
if [nil, ''].include?(@one_item['TEMPLATE/NSX_USER'])
@nsx_status = "NSX_STATUS = \"Missing NSX_USER\"\n"
return false
end
# Check if NSX_PASSWORD is into the host template
if [nil, ''].include?(@one_item['TEMPLATE/NSX_PASSWORD'])
@nsx_status = "NSX_STATUS = \"Missing NSX_PASSWORD\"\n"
return false
end
# Check if NSX_TYPE is into the host template
if [nil, ''].include?(@one_item['TEMPLATE/NSX_TYPE'])
@nsx_status = "NSX_STATUS = \"Missing NSX_TYPE\"\n"
return false
end
# Try a connection as part of NSX_STATUS
nsx_client = NSXDriver::NSXClient.new_from_id(@host_id.to_i)
if @one_item['TEMPLATE/NSX_TYPE'] == NSXDriver::NSXConstants::NSXV
# URL to test a connection
url = '/api/2.0/vdn/scopes'
begin
if nsx_client.get(url)
@nsx_status = "NSX_STATUS = OK\n"
true
else
@nsx_status = "NSX_STATUS = \"Response code incorrect\"\n"
false
end
rescue StandardError => e
@nsx_status = 'NSX_STATUS = "Error connecting to ' \
"NSX_MANAGER: #{e.message}\"\n"
false
end
elsif @one_item['TEMPLATE/NSX_TYPE'] == NSXDriver::NSXConstants::NSXT
# URL to test a connection
url = '/api/v1/transport-zones'
begin
if nsx_client.get(url)
@nsx_status = "NSX_STATUS = OK\n"
true
else
@nsx_status = "NSX_STATUS = \"Response code incorrect\"\n"
false
end
rescue StandardError => e
@nsx_status = 'NSX_STATUS = "Error connecting to '\
"NSX_MANAGER: #{e.message}\"\n"
false
end
end
end
end

View File

@ -0,0 +1,857 @@
#--------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'opennebula'
require 'vcenter_driver'
require 'logger'
def unindent(s)
m = s.match(/^(\s*)/)
spaces = m[1].size
s.gsub!(/^ {#{spaces}}/, '')
end
################################################################################
# Logger
################################################################################
$logger = Logger.new(
STDERR,
level: Logger::INFO,
datetime_format: '%Y-%m-%d %H:%M:%S',
formatter: proc { |severity, datetime, progname, msg|
"#{datetime} [#{severity}]: #{msg}\n"
}
)
#-------------------------------------------------------------------------------
# Set of vcenter clusters each one representing a opennebula host
# DataModel
# @vic : VCenterDriver::VIClient,
# @cluster: VCenterDriver::ClusterComputeResource,
# @host : OpenNebula::Host
# @last_system_host: Timer for last system host message
# @last_monitor_host: Timer for last monitor host message
# @last_monitor_vm: Timer for last monitor VM
#-------------------------------------------------------------------------------
class Cluster
#---------------------------------------------------------------------------
#Constants
# CLUSTER_PROPERTIES: ESX cluster properties
# RP_PROPERTIES: Resource pool properties
# VM_STATE_PROPERTIES: Properties for VM state changes
# STATE_MAP: vCenter to OpenNebula State mapping
#---------------------------------------------------------------------------
CLUSTER_PROPERTIES = [
'summary.totalCpu',
'summary.numCpuCores',
'summary.effectiveCpu',
'summary.totalMemory',
'summary.effectiveMemory',
'summary.numHosts',
'summary.numEffectiveHosts',
'summary.overallStatus',
'configuration.drsConfig.enabled',
'configuration.dasConfig.enabled'
].freeze
RP_PROPERTIES = [
'config.cpuAllocation.expandableReservation',
'config.cpuAllocation.limit',
'config.cpuAllocation.reservation',
'config.cpuAllocation.shares.level',
'config.cpuAllocation.shares.shares',
'config.memoryAllocation.expandableReservation',
'config.memoryAllocation.limit',
'config.memoryAllocation.reservation',
'config.memoryAllocation.shares.level',
'config.memoryAllocation.shares.shares'
].freeze
VM_STATE_PROPERTIES = [
'name', # VM name
'summary.runtime.powerState' # VM power state
].freeze
VM_SYNC_TIME = 300
VM_MISSING_STATE = 'POWEROFF'.freeze
STATE_MAP = {
'poweredOn' => 'RUNNING',
'suspended' => 'POWEROFF',
'poweredOff' => 'POWEROFF'
}.freeze
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def initialize(hid, onec)
@host = OpenNebula::Host.new_with_id(hid, onec)
@onec = onec
rc = @host.info(true)
if OpenNebula.is_error?(rc)
raise "Could not get hosts information - #{rc.message}"
end
@monitordc = nil
#-----------------------------------------------------------------------
# VI Client Initialization
#-----------------------------------------------------------------------
@vic = VCenterDriver::VIClient.new(connection, hid)
@cluster = VCenterDriver::ClusterComputeResource
.new_from_ref(connection[:ccr], @vic)
#-----------------------------------------------------------------------
# NSX Client Initialization
#-----------------------------------------------------------------------
nsx_manager = @host['TEMPLATE/NSX_MANAGER']
nsx_user = @host['TEMPLATE/NSX_USER']
nsx_password = @host['TEMPLATE/NSX_PASSWORD']
@nsx_type = @host['TEMPLATE/NSX_TYPE']
@nsx_status = ''
@nsx_client = nil
[nsx_manager, nsx_user, nsx_password, @nsx_type].each do |v|
next if !v.nil? && !v.empty?
@nsx_status = 'NSX_STATUS="Check NSX configuration attributes: '\
'NSX_MANAGER, NSX_USER, NSX_PASSWORD or NSX_TYPE"'
break
end
if @nsx_status.empty?
@nsx_client = NSXDriver::NSXClient.new_child(nsx_manager,
nsx_user,
nsx_password,
@nsx_type)
end
end
#---------------------------------------------------------------------------
# HOST PROBES
# - system_host: monitor "static" vCenter cluster information
# - monitor_host: monitor dynamic vCenter cluster information
#---------------------------------------------------------------------------
def system_host
monitor_str = cluster_info
monitor_str += hosts_info
monitor_str += customizations_info
monitor_str += datastore_info
monitor_str += vms_info('wild')
monitor_str += nsx_info
monitor_str += tz_info
monitor_str
end
def monitor_host
cluster_monitoring
end
def beacon_host
vi_client = VCenterDriver::VIClient.new(connection, @host.id)
Time.now.to_s
end
#---------------------------------------------------------------------------
# VM PROBES
# - state_vm: monitor vm state changes
# - monitor_vm: monitor dynamic VM information
# - vcenter_vms_state: retrieves state hash consulting vCenter
#---------------------------------------------------------------------------
def monitor_vm
vms_info('one')
end
# Given a deploy_id (vm_ref in vCenter) and a state, check if a VM
# has changed state compared with the latest known state
def need_state_sync?(vm_ref, state)
!@previous_vm_states[vm_ref] or !@previous_vm_states[vm_ref][:state].eql? state
end
# Return monitoring string containig the state of VMs
def state_vm
current_vm_states = vcenter_vms_state
# Check if we need a full sync
full_sync = false
now = Time.now.to_i
if @last_sync.nil? or ((now - @last_sync) > VM_SYNC_TIME)
full_sync = true
@last_sync = now
end
str_info = ""
str_info << "SYNC_STATE=yes\nMISSING_STATE=#{VM_MISSING_STATE}\n" if full_sync
current_vm_states.each do |_,vm|
vm_ref = vm[:deploy_id]
if full_sync or need_state_sync?(vm_ref, vm[:state])
str_info << "VM = [ ID=\"#{vm[:id]}\", "
str_info << "DEPLOY_ID=\"#{vm[:deploy_id]}\", STATE=\"#{vm[:state]}\" ]\n"
end
end
@previous_vm_states = current_vm_states
str_info
end
# Retrieve all known VM states from vCenter
def vcenter_vms_state
view = @vic.vim
.serviceContent
.viewManager
.CreateContainerView({
container: @cluster.item,
type: ['VirtualMachine'],
recursive: true
})
pc = @vic.vim.serviceContent.propertyCollector
result = pc.RetrieveProperties(:specSet => [
RbVmomi::VIM.PropertyFilterSpec(
:objectSet => [
:obj => view,
:skip => true,
:selectSet => [
RbVmomi::VIM.TraversalSpec(
:name => 'traverseEntities',
:type => 'ContainerView',
:path => 'view',
:skip => false
)
]
],
:propSet => [{
:type => 'VirtualMachine',
:pathSet => VM_STATE_PROPERTIES
}]
)
])
vms_hash = {}
result.each do |r|
next unless r.obj.is_a?(RbVmomi::VIM::VirtualMachine)
vms_hash[r.obj._ref] = r.to_hash
end
view.DestroyView
vmpool = OpenNebula::VirtualMachinePool.new(@onec)
rc = vmpool.info
return {} if OpenNebula.is_error?(rc)
vms = {}
vms_hash.each do |vm_ref, info|
one_id = -1
ids = vmpool.retrieve_xmlelements("/VM_POOL/VM[DEPLOY_ID = '#{vm_ref}']")
ids.select {|vm|
hid = vm["HISTORY_RECORDS/HISTORY/HID"]
if hid
hid.to_i == @host_id
else
false
end
}
one_id = ids[0]["ID"] if ids[0]
vms[vm_ref] = {
:id => one_id,
:name => "#{info['name']} - #{@cluster.item.name}",
:deploy_id => vm_ref,
:state => STATE_MAP[info['summary.runtime.powerState']] || 'UNKNOWN'
}
end
vms
end
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
# MONITOR INTERFACE & FUNCTIONS
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
private
def connection
vhost = @host['TEMPLATE/VCENTER_HOST']
vuser = @host['TEMPLATE/VCENTER_USER']
vpass = @host['TEMPLATE/VCENTER_PASSWORD']
vccr = @host['TEMPLATE/VCENTER_CCR_REF']
vrp = @host['TEMPLATE/VCENTER_RESOURCE_POOL']
vport = @host['TEMPLATE/VCENTER_PORT']
if vhost.nil? || vuser.nil? || vpass.nil? || vccr.nil?
raise 'Missing vCenter connection parameters in host'
end
connection = {
:host => vhost,
:user => vuser,
:password => vpass,
:rp => vrp,
:ccr => vccr
}
connection[:port] = vport if vport
connection
end
def cluster_monitoring
metrics = @cluster.item.collect(*(CLUSTER_PROPERTIES[0..4]))
total_cpu = metrics[0].to_f
num_cpu_cores = metrics[1].to_f
effective_cpu = metrics[2].to_f
total_memory = metrics[3].to_i
effective_mem = metrics[4].to_i
if num_cpu_cores > 0
mhz_core = total_cpu / num_cpu_cores
eff_core = effective_cpu / mhz_core
else
eff_core = 0
end
free_cpu = (eff_core * 100).to_i
used_cpu = (total_cpu - free_cpu).to_i
total_mem = total_memory / 1024
free_mem = effective_mem * 1024
unindent(<<-EOS)
HYPERVISOR = vcenter
USEDMEMORY = "#{(total_mem - free_mem)}"
FREEMEMORY = "#{free_mem}"
USEDCPU = "#{used_cpu}"
FREECPU = "#{free_cpu}"
EOS
end
def cluster_info
metrics = @cluster.item.collect(*CLUSTER_PROPERTIES)
total_cpu = metrics[0].to_f
num_cpu_cores = metrics[1].to_f
effective_cpu = metrics[2].to_f
total_memory = metrics[3].to_i
effective_mem = metrics[4].to_i
num_hosts = metrics[5]
num_eff_hosts = metrics[6]
overall_status = metrics[7]
drs_enabled = metrics[8]
ha_enabled = metrics[9]
if num_cpu_cores > 0
mhz_core = total_cpu / num_cpu_cores
eff_core = effective_cpu / mhz_core
else
mhz_core = 0
eff_core = 0
end
total_cpu = num_cpu_cores * 100
total_mem = total_memory / 1024
str_info = unindent(<<-EOS)
HYPERVISOR = vcenter
VCENTER_NAME = "#{@cluster['name'].tr(' ', '_')}"
STATUS = "#{overall_status}"
TOTALHOST = "#{num_hosts}"
AVAILHOST = "#{num_eff_hosts}"
CPUSPEED = "#{mhz_core}"
TOTALCPU = "#{total_cpu}"
TOTALMEMORY = "#{total_mem}"
VCENTER_DRS = "#{drs_enabled}"
VCENTER_HA = "#{ha_enabled}"
EOS
str_info << resource_pool_info(mhz_core)
if str_info.include?('STATUS=red')
raise 'vCenter cluster health is red, check vCenter'
end
str_info
end
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def resource_pool_info(mhz_core)
rp_list = @cluster.get_resource_pool_list
view = @vic.vim.serviceContent.viewManager.CreateContainerView({
container: @cluster.item,
type: ['ResourcePool'],
recursive: true
})
pc = @vic.vim.serviceContent.propertyCollector
result = pc.RetrieveProperties(:specSet => [
RbVmomi::VIM.PropertyFilterSpec(
:objectSet => [
:obj => view,
:skip => true,
:selectSet => [
RbVmomi::VIM.TraversalSpec(
:name => 'traverseEntities',
:type => 'ContainerView',
:path => 'view',
:skip => false
)
]
],
:propSet => [{
:type => 'ResourcePool',
:pathSet => RP_PROPERTIES
}]
)
])
rps = {}
result.each do |r|
hashed_properties = r.to_hash
if r.obj.is_a?(RbVmomi::VIM::ResourcePool)
rps[r.obj._ref] = hashed_properties
end
end
return '' if rps.empty?
rp_info = ''
rps.each do |ref, info|
#-------------------------------------------------------------------
# CPU
#-------------------------------------------------------------------
expandable = info['config.cpuAllocation.expandableReservation']
limit = info['config.cpuAllocation.limit']
if expandable
cpu_expandable = 'YES'
else
cpu_expandable = 'NO'
end
if limit == '-1'
cpu_limit = 'UNLIMITED'
else
cpu_limit = limit
end
cpu_reservation = info['config.cpuAllocation.reservation']
cpu_num = cpu_reservation.to_f / mhz_core
cpu_shares_level = info['config.cpuAllocation.shares.level']
cpu_shares = info['config.cpuAllocation.shares.shares']
#-------------------------------------------------------------------
# MEMORY
#-------------------------------------------------------------------
expandable = info['config.memoryAllocation.expandableReservation']
limit = info['config.memoryAllocation.limit']
if expandable
mem_expandable = 'YES'
else
mem_expandable = 'NO'
end
if limit == '-1'
mem_limit = 'UNLIMITED'
else
mem_limit = limit
end
mem_reservation = info['config.memoryAllocation.reservation'].to_f
mem_shares_level = info['config.memoryAllocation.shares.level']
mem_shares = info['config.memoryAllocation.shares.shares']
begin
rp_name = rp_list.select { |item|
item[:ref] == ref
}.first[:name]
rescue
rp_name = 'Resources'
end
rp_info << unindent(<<-EOS)
VCENTER_RESOURCE_POOL_INFO = [
NAME = "#{rp_name}",
CPU_EXPANDABLE = #{cpu_expandable},
CPU_LIMIT = #{cpu_limit},
CPU_RESERVATION = #{cpu_reservation},
CPU_RESERVATION_NUM_CORES=#{cpu_num},
CPU_SHARES = #{cpu_shares},
CPU_SHARES_LEVEL = #{cpu_shares_level},
MEM_EXPANDABLE = #{mem_expandable},
MEM_LIMIT = #{mem_limit},
MEM_RESERVATION = #{mem_reservation},
MEM_SHARES = #{mem_shares},
MEM_SHARES_LEVEL = #{mem_shares_level}
]
EOS
end
view.DestroyView
rp_info
end
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
# rubocop:disable Style/FormatStringToken
def hosts_info
host_info = ''
hosts = {}
@cluster.filter_hosts.each do |r|
hp = r.to_hash
hosts[r.obj._ref] = hp if r.obj.is_a?(RbVmomi::VIM::HostSystem)
end
hosts.each do |_ref, info|
next if info['runtime.connectionState'] != 'connected'
total_cpu = info['summary.hardware.numCpuCores'] * 100
used_cpu = (info['summary.quickStats.overallCpuUsage'].to_f \
/ info['summary.hardware.cpuMhz'].to_f) * 100
used_cpu = format('%.2f', used_cpu).to_f
free_cpu = total_cpu - used_cpu
total_memory = info['summary.hardware.memorySize']/1024
used_memory = info['summary.quickStats.overallMemoryUsage']*1024
free_memory = total_memory - used_memory
host_info << unindent(<<-EOS)
HOST = [
STATE = on,
HOSTNAME = "#{info['name']}",
MODELNAME = "#{info['summary.hardware.cpuModel']}",
CPUSPEED = "#{info['summary.hardware.cpuMhz']}",
MAX_CPU = "#{total_cpu}",
USED_CPU = "#{used_cpu}",
FREE_CPU = "#{free_cpu}",
MAX_MEM = "#{total_memory}",
USED_MEM = "#{used_memory}",
FREE_MEM = "#{free_memory}"
]
EOS
end
host_info
end
# rubocop:enable Style/FormatStringToken
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def customizations_info
customs = @cluster['_connection'].serviceContent
.customizationSpecManager
.info
c_str = ''
customs.each do |c|
c_str << unindent(<<-EOS)
CUSTOMIZATION = [
NAME = "#{c.name}",
TYPE = "#{c.type}"
]
EOS
end
c_str
end
#---------------------------------------------------------------------------
#
# TODO: Add more than one nsx managers
#---------------------------------------------------------------------------
def nsx_info
nsx_obj = {}
elist = @vic.vim.serviceContent.extensionManager.extensionList
elist.each do |ext_list|
case ext_list.key
when NSXDriver::NSXConstants::NSXV_EXTENSION_LIST
parts = ext_list.client[0].url.split("/")
protocol = parts[0] + "//"
ip_port = parts[2]
nsx_obj['type'] = NSXDriver::NSXConstants::NSXV
nsx_obj['url'] = protocol + ip_port
nsx_obj['version'] = ext_list.version
nsx_obj['label'] = ext_list.description.label
when NSXDriver::NSXConstants::NSXT_EXTENSION_LIST
nsx_obj['type'] = NSXDriver::NSXConstants::NSXT
nsx_obj['url'] = ext_list.server[0].url
nsx_obj['version'] = ext_list.version
nsx_obj['label'] = ext_list.description.label
else
next
end
end
return '' if nsx_obj.empty?
unindent(<<-EOS)
NSX_MANAGER ="#{nsx_obj['url']}"
NSX_TYPE ="#{nsx_obj['type']}"
NSX_VERSION ="#{nsx_obj['version']}"
NSX_LABEL ="#{nsx_obj['label']}"
EOS
end
#---------------------------------------------------------------------------
# Get a list vCenter datastores morefs
#---------------------------------------------------------------------------
def datastore_info
dc = @cluster.get_dc
ds = dc.datastore_folder
ds_info = ''
ds.fetch!.each do |ref, ds|
ds_info << "VCENTER_DS_REF=\"#{ref}\"\n"
end
ds_info
end
#---------------------------------------------------------------------------
# Return monitor string
# 'wilds': VMs in vCenter but not in OpenNebula
# 'ones' : VMs in OpenNebula
#---------------------------------------------------------------------------
def vms_info(vm_type)
str_info , _ltime = @cluster.monitor_vms(@host.id, vm_type)
return str_info
end
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def tz_info
return @nsx_status if @nsx_client.nil?
tz_object = NSXDriver::TransportZone.new_child(@nsx_client)
tz_info = 'NSX_TRANSPORT_ZONES = ['
case @nsx_type
when NSXDriver::NSXConstants::NSXV
tzs = tz_object.tzs
tzs.each do |tz|
tz_info << "#{tz.xpath('name').text}=\"#{tz.xpath('objectId').text}\","
end
when NSXDriver::NSXConstants::NSXT
tzs = tz_object.tzs
tzs['results'].each do |tz|
tz_info << "#{tz['display_name']}=\"#{tz['id']}\","
end
else
raise "Unknown PortGroup type #{@nsx_type}"
end
tz_info.chomp!(',')
tz_info << ']'
end
end
#---------------------------------------------------------------------------
# Set of vcenter clusters each one representing a opennebula host
# DataModel
#
# @clusters = {
# host_id => {
# :cluster => VCenterMonitor::Cluster,
# :error => String (last error if any)
# },
# ....
# }
#
#---------------------------------------------------------------------------
class ClusterSet
#---------------------------------------------------------------------------
# Constants
# CLUSTER_PROBES: to be executed. Each Cluster needs to respond to this
# methods and a correspnding UDP message of that type will be sent
#---------------------------------------------------------------------------
CLUSTER_PROBES = [
:system_host,
:monitor_host,
:state_vm,
:monitor_vm,
:beacon_host
].freeze
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def initialize
@mutex = Mutex.new
@client = OpenNebula::Client.new
@clusters = {}
end
# Add a host by id, it access OpenNebula to get connection parameters
def add(hid, conf)
begin
cluster = Cluster.new(hid, @client)
error = ''
rescue StandardError => e
cluster = nil
error = e.message
if !conf.nil?
mdc = MonitorClient.new(conf[:address], conf[:port], hid)
mdc.beacon_host_udp(false, error)
end
end
add_host(hid, cluster, error)
$logger.info("Registered host #{hid} #{error}")
end
# Del a host from the @cluster hash
def del(hid)
@mutex.synchronize {
@clusters.delete(hid)
}
$logger.info("Unregistered host #{hid}")
end
# This function should be called within a synchronized block
def on_cluster(hid, &block)
return unless @clusters[hid] && @clusters[hid][:cluster]
block.call(@clusters[hid][:cluster])
end
# One-time initialization of host pool
def bootstrap
hpool = OpenNebula::HostPool.new(@client)
rc = hpool.info
if OpenNebula.is_error?(rc)
raise "Could not get hosts information - #{rc.message}"
end
$logger.info("Bootstraping list of clusters")
hpool.each do |h|
next if h['IM_MAD'] != 'vcenter' || h['STATE'] == '8' #offline
$logger.info("Adding host #{h.name} (#{h.id})")
add(h.id, nil)
end
end
#---------------------------------------------------------------------------
# TODO: Error signaling
# [:cluster] is nil -> error creating clients, only? Send [:error]?
#---------------------------------------------------------------------------
def monitor(conf)
@mutex.synchronize do
@clusters.each do |id, c|
next if c[:cluster].nil?
if c[:monitordc].nil?
next if conf[:address].nil? or conf[:port].nil?
c[:monitordc] = MonitorClient.new(conf[:address],
conf[:port],
id)
end
$logger.info("Monitoring cluster #{id}")
CLUSTER_PROBES.each do |probe_name|
# Check if the last monitoring time is older than the
# configured monitoring frequency for the probe
last_mon = c["last_#{probe_name}".to_sym]
probe_frequency = conf[probe_name].to_i
next unless (Time.now.to_i - last_mon) > probe_frequency
$logger.info("\tRunning #{probe_name} probe")
begin
probe_result = c[:cluster].send(probe_name).strip
next if probe_result.empty?
success = true
rescue StandardError => e
success = false
probe_result = e.message
end
$logger.debug("\tResult(#{success})\n#{probe_result}\n")
c[:monitordc].send("#{probe_name}_udp".to_sym,
success,
probe_result)
c["last_#{probe_name}".to_sym] = Time.now.to_i
end
end
end
end
private
# Internal method to access @cluster hash
def add_host(id, cluster, error)
@mutex.synchronize do
@clusters[id] = {
:cluster => cluster,
:error => error,
:monitordc => nil,
:last_system_host => 0,
:last_monitor_host => 0,
:last_beacon_host => 0,
:last_monitor_vm => 0,
:last_state_vm => 0
}
end
end
end

View File

@ -0,0 +1,170 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
ONE_LOCATION ||= ENV['ONE_LOCATION']
if !ONE_LOCATION
RUBY_LIB_LOCATION ||= '/usr/lib/one/ruby'
GEMS_LOCATION ||= '/usr/share/one/gems'
ETC_LOCATION ||= '/etc/one/'
VAR_LOCATION ||= '/var/lib/one/'
else
RUBY_LIB_LOCATION ||= ONE_LOCATION + '/lib/ruby'
GEMS_LOCATION ||= ONE_LOCATION + '/share/gems'
ETC_LOCATION ||= ONE_LOCATION + '/etc/'
VAR_LOCATION ||= ONE_LOCATION + '/var/'
end
if File.directory?(GEMS_LOCATION)
Gem.use_paths(GEMS_LOCATION)
$LOAD_PATH.reject! {|l| l =~ /(vendor|site)_ruby/ }
end
$LOAD_PATH << RUBY_LIB_LOCATION
require 'yaml'
require 'rexml/document'
require_relative './vcenter_cluster'
require_relative './monitord_client'
#---------------------------------------------------------------------------
#
#
#---------------------------------------------------------------------------
class VcenterMonitorManager
#---------------------------------------------------------------------------
# Constants
# BASE_TIMER: for monitor loop, periods CANNOT be less than this value
# DEFAULT_CONFIGURATION
#---------------------------------------------------------------------------
BASE_TIMER = 10
DEFAULT_CONFIGURATION = {
:system_host => 600,
:monitor_host => 120,
:state_vm => 30,
:monitor_vm => 30,
:beacon_host => 30,
:address => "127.0.0.1",
:port => 4124
}.freeze
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def initialize
@clusters = ClusterSet.new
@clusters.bootstrap
@mutex = Mutex.new
@conf = DEFAULT_CONFIGURATION.clone
# Create timer thread to monitor vcenters
Thread.new {
timer
}
end
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def update_conf(conf64)
conftxt = Base64.decode64(conf64)
conf = REXML::Document.new(conftxt).root
@mutex.synchronize {
@conf = {
:system_host => conf.elements['PROBES_PERIOD/SYSTEM_HOST'].text.to_i,
:monitor_host => conf.elements['PROBES_PERIOD/MONITOR_HOST'].text.to_i,
:state_vms => conf.elements['PROBES_PERIOD/STATE_VM'].text.to_i,
:monitor_vm => conf.elements['PROBES_PERIOD/MONITOR_VM'].text.to_i,
:beacon_host => conf.elements['PROBES_PERIOD/BEACON_HOST'].text.to_i,
:address => conf.elements['NETWORK/MONITOR_ADDRESS'].text.to_s,
:port => conf.elements['NETWORK/PORT'].text.to_s
}
}
rescue StandardError
@mutex.synchronize {
@conf = DEFAULT_CONF.clone
}
end
#---------------------------------------------------------------------------
# ACTION from OpenNebula Information Driver:
# - start: monitor process for a cluster
# - stop: monitor process for a cluster
#---------------------------------------------------------------------------
def start(hid, conf)
update_conf(conf)
@clusters.add(hid, @conf.clone)
end
def stop(hid, _)
@clusters.del(hid)
end
#---------------------------------------------------------------------------
# Periodic timer to trigger monitor updates
#---------------------------------------------------------------------------
def timer
loop do
conf = @mutex.synchronize { @conf.clone }
@clusters.monitor(conf)
sleep BASE_TIMER
end
end
end
#---------------------------------------------------------------------------
# This class receives inputs reading on the fifo, sends monitor messages
# to monitord client and trigger operations on the Vcenter logic thread
# --------------------------------------------------------------------------
class IOThread
IO_FIFO = "/tmp/vcenter_monitor.fifo"
def initialize(vcentermm)
@vcentermm = vcentermm
end
def command_loop
loop do
fifo = File.open(IO_FIFO)
fifo.each_line { |line|
action, hid, conf = line.split
@vcentermm.send(action.to_sym, hid.to_i, conf)
}
end
end
end
Thread.new {
exit unless system('pgrep oned')
sleep 5
}
vcentermm = VcenterMonitorManager.new
io = IOThread.new(vcentermm)
io.command_loop

View File

@ -1,19 +0,0 @@
#!/bin/bash
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
date +%s

View File

@ -1,43 +0,0 @@
#!/bin/bash
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
HID=$2
HNAME=$3
STDIN=`cat -`
CLIENT_PID_FILE=/tmp/one-monitord-$HID.pid
(
[ -f $CLIENT_PID_FILE ] || exit 0
running_pid=$(cat $CLIENT_PID_FILE)
pids=$(ps axuwww | grep -e "/monitord-client.rb.*${HID} " | grep -v grep | \
awk '{ print $2 }' | grep -v "^${running_pid}$")
if [ -n "$pids" ]; then
kill $pids
fi
oned=`ps auxwww | grep oned | grep -v grep | wc -l`
if [ ${oned} -eq 0 ]; then
kill ${running_pid}
fi
) > /dev/null

View File

@ -1,19 +0,0 @@
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
exit 0

View File

@ -1,53 +0,0 @@
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
ONE_LOCATION ||= ENV['ONE_LOCATION']
if !ONE_LOCATION
RUBY_LIB_LOCATION ||= '/usr/lib/one/ruby'
GEMS_LOCATION ||= '/usr/share/one/gems'
else
RUBY_LIB_LOCATION ||= ONE_LOCATION + '/lib/ruby'
GEMS_LOCATION ||= ONE_LOCATION + '/share/gems'
end
if File.directory?(GEMS_LOCATION)
Gem.use_paths(GEMS_LOCATION)
$LOAD_PATH.reject! {|l| l =~ /(vendor|site)_ruby/ }
end
$LOAD_PATH << RUBY_LIB_LOCATION
require 'nsx_driver'
require_relative '../../../lib/nsx.rb'
host = ARGV[-1]
host_id = ARGV[-2]
# Vcenter and NSX connection
begin
nm = NsxMonitor.new(host_id)
puts nm.nsx_status
# Get Transport Zone info and NSX_STATUS from NSX Manager
if nm.nsx_ready?
puts nm.tz_info
end
rescue StandardError => e
STDERR.puts "IM poll for NSX cluster #{host_id} failed due to "\
"\"#{e.message}\"\n#{e.backtrace}"
end

View File

@ -1,73 +0,0 @@
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
ONE_LOCATION ||= ENV['ONE_LOCATION']
if !ONE_LOCATION
RUBY_LIB_LOCATION ||= '/usr/lib/one/ruby'
VAR_LOCATION ||= '/var/lib/one/'
GEMS_LOCATION ||= '/usr/share/one/gems'
else
RUBY_LIB_LOCATION ||= ONE_LOCATION + '/lib/ruby'
GEMS_LOCATION ||= ONE_LOCATION + '/share/gems'
VAR_LOCATION ||= ONE_LOCATION + '/var/'
end
REMOTE_LIB_LOCATION ||= VAR_LOCATION + 'remotes/im/lib'
if File.directory?(GEMS_LOCATION)
Gem.use_paths(GEMS_LOCATION)
$LOAD_PATH.reject! {|l| l =~ /(vendor|site)_ruby/ }
end
$LOAD_PATH << RUBY_LIB_LOCATION
$LOAD_PATH << REMOTE_LIB_LOCATION
require 'vcenter.rb'
host_id = ARGV[-2]
begin
# VCenter Monitoring object
vcm = VcenterMonitor.new(host_id)
puts vcm.monitor_clusters
puts vcm.monitor_host_systems
# Retrieve customizations
begin
puts vcm.monitor_customizations
rescue StandardError
# Do not break monitoring on customization error
puts 'ERROR="Customizations could not be retrieved,' \
'please check permissions"'
end
# Get NSX info detected from vCenter Server
puts vcm.nsx_info
# VM wilds info
puts vcm.monitor_vms('wilds')
# Datastore Monitoring
puts vcm.monitor_datastores
rescue StandardError => e
STDERR.puts "IM poll for vCenter cluster #{host_id} failed due to "\
"\"#{e.message}\"\n#{e.backtrace}"
exit(-1)
ensure
@vi_client.close_connection if @vi_client
end

View File

@ -1,45 +0,0 @@
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# -------------------------------------------------------------------------- #
ONE_LOCATION ||= ENV['ONE_LOCATION']
if !ONE_LOCATION
RUBY_LIB_LOCATION ||= '/usr/lib/one/ruby'
GEMS_LOCATION ||= '/usr/share/one/gems'
else
RUBY_LIB_LOCATION ||= ONE_LOCATION + '/lib/ruby'
GEMS_LOCATION ||= ONE_LOCATION + '/share/gems'
end
if File.directory?(GEMS_LOCATION)
Gem.use_paths(GEMS_LOCATION)
$LOAD_PATH.reject! {|l| l =~ /(vendor|site)_ruby/ }
end
$LOAD_PATH << RUBY_LIB_LOCATION
require_relative '../../../lib/vcenter'
host = ARGV[-1]
host_id = ARGV[-2]
vcm = VcenterMonitor.new(host_id)
begin
puts vcm.monitor_vms('ones')
rescue StandardError => e
OpenNebula.handle_driver_exception('im probe_vm_monitor', e, host)
end

View File

@ -1,53 +0,0 @@
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# -------------------------------------------------------------------------- #
ONE_LOCATION ||= ENV['ONE_LOCATION']
if !ONE_LOCATION
RUBY_LIB_LOCATION ||= '/usr/lib/one/ruby'
GEMS_LOCATION ||= '/usr/share/one/gems'
else
RUBY_LIB_LOCATION ||= ONE_LOCATION + '/lib/ruby'
GEMS_LOCATION ||= ONE_LOCATION + '/share/gems'
end
if File.directory?(GEMS_LOCATION)
Gem.use_paths(GEMS_LOCATION)
$LOAD_PATH.reject! {|l| l =~ /(vendor|site)_ruby/ }
end
$LOAD_PATH << RUBY_LIB_LOCATION
require_relative '../../../lib/vcenter'
require_relative '../../../lib/probe_db'
host = ARGV[-1]
host_id = ARGV[-2]
begin
vmdb = VirtualMachineDB.new('vcenter',
host,
host_id,
:missing_state => 'UNKNOWN',
:sync => 180)
vmdb.purge
puts vmdb.to_status
rescue StandardError => e
OpenNebula.handle_driver_exception('im probe_vm_status', e, host)
end

View File

@ -1 +0,0 @@
../common.d/monitord-client.rb

View File

@ -1 +0,0 @@
../common.d/monitord-client_control.sh

View File

@ -0,0 +1,77 @@
#!/bin/bash
# -------------------------------------------------------------------------- #
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
IO_FIFO_PATH="/tmp/vcenter_monitor.fifo"
if [ -z "$ONE_LOCATION" ]; then
LOG=/var/log/one/vcenter_monitor.log
BIN=/var/lib/one/remotes/im/lib/vcenter_monitor.rb
else
BIN=$ONE_LOCATION/var/remotes/im/lib/vcenter_monitor.rb
LOG=$ONE_LOCATION/var/vcenter_monitor.log
fi
#-------------------------------------------------------------------------------
# Check if vcenter_monitor is running
#-------------------------------------------------------------------------------
if [ ! -p $IO_FIFO_PATH ]; then
rm -f $IO_FIFO_PATH > /dev/null 2>&1
mkfifo $IO_FIFO_PATH
fi
pid=`ps auxx | grep vcenter_monitor.rb | grep -v grep | awk '{print $2}'`
if [ -z $pid ]; then
ruby $BIN > $LOG 2>&1 &
sleep 3
pid=`ps auxx | grep vcenter_monitor.rb | grep -v grep | awk '{print $2}'`
if [ -z $pid ]; then
echo "Cannot start vcenter_monitor service: `cat $LOG`"
exit 1
fi
fi
#-------------------------------------------------------------------------------
# Process Arguments
#-------------------------------------------------------------------------------
ACTION="start"
if [ "$1" = "stop" ]; then
shift
ACTION="stop"
fi
ARGV=$*
HYPERV=$1
HID=$2
STDIN=`cat -`
MONITOR_ACTION="$ACTION $HID $STDIN"
#todo check it is running wait for fifo
echo $MONITOR_ACTION > $IO_FIFO_PATH
echo "<MONITOR_MESSAGES></MONITOR_MESSAGES>"
exit 0

View File

@ -112,7 +112,7 @@ public:
* @param oid host id
* @param tmpl monitoring template
*/
void monitor_host(int oid, bool result, const Template &tmpl);
void monitor_host(int oid, const Template &tmpl);
/**
* Sets the monitor information of the VM.
@ -132,6 +132,14 @@ public:
void start_monitor_success(int oid);
/**
*
* Set host in error becasue of a monitor failure
* @param oid of the host
* @param message describing the error
*/
void error_monitor(int oid, const string& message);
/**
* This function is executed periodically to update host monitor status
*/

View File

@ -231,7 +231,7 @@ void HostMonitorManager::raft_status(const string& state)
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void HostMonitorManager::monitor_host(int oid, bool result, const Template &tmpl)
void HostMonitorManager::monitor_host(int oid, const Template &tmpl)
{
if (!is_leader)
{
@ -252,19 +252,6 @@ void HostMonitorManager::monitor_host(int oid, bool result, const Template &tmpl
return;
}
if (!result)
{
NebulaLog::error("HMM", "Monitor host failed id:" + to_string(oid));
if (host->state() != Host::OFFLINE && host->state() != Host::DISABLED )
{
oned_driver->host_state(oid, Host::state_to_str(Host::ERROR));
// TODO Set template error message
}
return;
}
HostMonitoringTemplate monitoring;
monitoring.oid(oid);
@ -550,3 +537,31 @@ void HostMonitorManager::stop_host_monitor(const HostRPCPool::HostBaseLock& host
host->monitor_in_progress(false);
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void HostMonitorManager::error_monitor(int oid, const string& msg)
{
auto host = hpool->get(oid);
if (!host.valid())
{
NebulaLog::warn("HMM", "monitor_host: unknown host " + to_string(oid));
return;
}
host->monitor_in_progress(false);
if (host->state() == Host::OFFLINE)
{
// Host is offline, we shouldn't receive monitoring
return;
}
ostringstream oss;
oss << Host::state_to_str(Host::ERROR) << " " << msg;
oned_driver->host_state(oid, oss.str());
}

View File

@ -39,9 +39,18 @@ void MonitorDriverProtocol::_monitor_vm(message_t msg)
{
NebulaLog::ddebug("MDP", "Received MONITOR_VM msg: " + msg->payload());
char * error_msg;
if (msg->status() != "SUCCESS")
{
NebulaLog::warn("MDP", "Failed to monitor VM for host " +
to_string(msg->oid()) + ": " + msg->payload());
hm->error_monitor(msg->oid(), msg->payload());
return;
}
Template tmpl;
char * error_msg;
int rc = tmpl.parse(msg->payload(), &error_msg);
if (rc != 0)
@ -50,9 +59,12 @@ void MonitorDriverProtocol::_monitor_vm(message_t msg)
oss << "Error parsing VM monitoring template from host " << msg->oid()
<< "\nMessage: " << msg->payload()
<< "\nError: " << error_msg;
NebulaLog::error("MDP", oss.str());
free(error_msg);
hm->error_monitor(msg->oid(), "Error parsing monitor information");
return;
}
@ -126,6 +138,15 @@ void MonitorDriverProtocol::_beacon_host(message_t msg)
NebulaLog::ddebug("MDP", "Received beacon for host " +
to_string(msg->oid()) + ": " + msg->payload());
if (msg->status() != "SUCCESS")
{
NebulaLog::warn("MDP", "Error condition detected on beacon for host "
+ to_string(msg->oid()) + ": " + msg->payload());
hm->error_monitor(msg->oid(), msg->payload());
return;
}
hm->update_last_monitor(msg->oid());
}
@ -137,11 +158,19 @@ void MonitorDriverProtocol::_monitor_host(message_t msg)
NebulaLog::ddebug("MDP", "Received monitoring information for host " +
to_string(msg->oid()) + ": " + msg->payload());
std::string msg_str = msg->payload();
char * error_msg;
if (msg->status() != "SUCCESS")
{
NebulaLog::warn("MDP", "Failed to monitor host " + to_string(msg->oid())
+ ": " + msg->payload());
hm->error_monitor(msg->oid(), msg->payload());
return;
}
Template tmpl;
int rc = tmpl.parse(msg_str, &error_msg);
char* error_msg;
int rc = tmpl.parse(msg->payload(), &error_msg);
if (rc != 0)
{
@ -149,15 +178,16 @@ void MonitorDriverProtocol::_monitor_host(message_t msg)
oss << "Error parsing monitoring template for host " << msg->oid()
<< "\nMessage: " << msg->payload()
<< "\nError: " << error_msg;
NebulaLog::error("MDP", oss.str());
free(error_msg);
hm->error_monitor(msg->oid(), "Error parsing monitor information");
return;
}
bool result = msg->status() == "SUCCESS" ? true : false;
hm->monitor_host(msg->oid(), result, tmpl);
hm->monitor_host(msg->oid(), tmpl);
}
/* -------------------------------------------------------------------------- */
@ -168,6 +198,15 @@ void MonitorDriverProtocol::_system_host(message_t msg)
NebulaLog::ddebug("MDP", "Received system information for host " +
to_string(msg->oid()) + ": " + msg->payload());
if (msg->status() != "SUCCESS")
{
NebulaLog::warn("MDP", "Failed to get system information for host " +
to_string(msg->oid()) + ": " + msg->payload());
hm->error_monitor(msg->oid(), msg->payload());
return;
}
auto oned = hm->get_oned_driver();
oned->host_system_info(msg->oid(), msg->status(), msg->payload());
}
@ -184,6 +223,9 @@ void MonitorDriverProtocol::_state_vm(message_t msg)
{
NebulaLog::warn("MDP", "Failed to monitor VM state for host " +
to_string(msg->oid()) + ": " + msg->payload());
hm->error_monitor(msg->oid(), msg->payload());
return;
}
auto oned = hm->get_oned_driver();
@ -210,11 +252,10 @@ void MonitorDriverProtocol::_start_monitor(message_t msg)
if (msg->status() != "SUCCESS")
{
hm->start_monitor_failure(msg->oid());
NebulaLog::warn("MDP", "Start monitor failed for host " +
to_string(msg->oid()) + ": " + msg->payload());
hm->start_monitor_failure(msg->oid());
return;
}

View File

@ -223,7 +223,7 @@ define(function(require) {
else if (OpenNebulaVM.isSPICESupported(data.VM)) {
actions += OpenNebulaVM.buttonSpice(id);
}
var wFile = OpenNebulaVM.isWFileSupported(data.VM);
actions += wFile ? OpenNebulaVM.buttonWFile(id, wFile) : "";
@ -237,7 +237,7 @@ define(function(require) {
promises.push(promiseVmInfo(id, successCallback))
})
}
$.when.apply($, promises).then(function() {
if (that.serviceroleVMsDataTable) {
that.serviceroleVMsDataTable.updateView(null, roleVms, true);

View File

@ -60,6 +60,7 @@ const std::vector<ContextVariable> NETWORK6_CONTEXT = {
{"IP6", "IP6_GLOBAL", "IP6", false},
{"IP6_ULA", "IP6_ULA", "", false},
{"GATEWAY6", "GATEWAY6", "", true},
{"METRIC6", "METRIC6", "", true},
{"CONTEXT_FORCE_IPV4", "CONTEXT_FORCE_IPV4", "", true},
{"IP6_PREFIX_LENGTH", "PREFIX_LENGTH", "", true},
{"VROUTER_IP6", "VROUTER_IP6_GLOBAL", "VROUTER_IP6", false},

View File

@ -66,28 +66,22 @@ module NSXDriver
NSXVClient.new(nsxmgr, nsx_user, nsx_password)
else
error_msg = "Unknown object type: #{type}"
error = NSXError::UnknownObject.new(error_msg)
error = NSXError::UnknownObject.new(error_msg)
raise error
end
end
def self.new_from_id(host_id)
def self.new_from_id(hid)
client = OpenNebula::Client.new
host = OpenNebula::Host.new_with_id(host_id, client)
rc = host.info
host = OpenNebula::Host.new_with_id(hid, client)
rc = host.info(true)
if OpenNebula.is_error?(rc)
raise "Could not get host info for ID: \
#{host_id} - #{rc.message}"
raise "Could not get host info for ID: #{hid} - #{rc.message}"
end
nsxmgr = host['TEMPLATE/NSX_MANAGER']
nsx_user = host['TEMPLATE/NSX_USER']
nsx_password = NSXClient
.nsx_pass(host['TEMPLATE/NSX_PASSWORD'])
nsx_type = host['TEMPLATE/NSX_TYPE']
new_child(nsxmgr, nsx_user, nsx_password, nsx_type)
new_from_host(host)
end
# METHODS
@ -108,20 +102,6 @@ module NSXDriver
raise NSXError::IncorrectResponseCodeError, nsx_error
end
def self.nsx_pass(nsx_pass_enc)
client = OpenNebula::Client.new
system = OpenNebula::System.new(client)
config = system.get_configuration
if OpenNebula.is_error?(config)
raise "Error getting oned configuration : #{config.message}"
end
token = config['ONE_KEY']
@nsx_password = VCenterDriver::VIClient
.decrypt(nsx_pass_enc, token)
end
# Return: respose.body
def get(url, aditional_headers = []); end

View File

@ -380,7 +380,7 @@ class ClusterComputeResource
mem_shares_level = info["config.memoryAllocation.shares.level"]
mem_shares = info["config.memoryAllocation.shares.shares"]
rp_name = rp_list.select { |item| item[:ref] == ref}.first[:name] rescue ""
rp_name = @rp_list.select { |item| item[:ref] == ref}.first[:name] rescue ""
rp_name = "Resources" if rp_name.empty?
@ -502,7 +502,7 @@ class ClusterComputeResource
return host_info
end
def monitor_vms(host_id, vm_type, debug = false)
def monitor_vms(host_id, vm_type)
vc_uuid = @vi_client.vim.serviceContent.about.instanceUuid
cluster_name = self["name"]
cluster_ref = self["_ref"]
@ -511,23 +511,20 @@ class ClusterComputeResource
one_host = VCenterDriver::VIHelper.one_item(OpenNebula::Host, host_id)
if !one_host
STDERR.puts "Failed to retieve host with id #{host.id}"
STDERR.puts e.inspect
STDERR.puts e.backtrace
if VCenterDriver::CONFIG[:debug_information]
STDERR.puts "#{message} #{e.backtrace}"
end
end
host_id = one_host["ID"] if one_host
# Extract CPU info and name for each esx host in cluster
esx_hosts = {}
@item.host.each do |esx_host|
info = {}
info[:name] = esx_host.name
info[:cpu] = esx_host.summary.hardware.cpuMhz.to_f
esx_hosts[esx_host._ref] = info
esx_hosts[esx_host._ref] = {
:name => esx_host.name,
:cpu => esx_host.summary.hardware.cpuMhz.to_f
}
end
@monitored_vms = Set.new
monitored_vms = Set.new
str_info = ""
view = @vi_client.vim.serviceContent.viewManager.CreateContainerView({
@ -626,12 +623,8 @@ class ClusterComputeResource
@rp_list = get_resource_pool_list if !@rp_list
vm_pool = VCenterDriver::VIHelper.one_pool(OpenNebula::VirtualMachinePool)
# opts common to all vms
opts = {
pool: vm_pool,
vc_uuid: vc_uuid,
}
# We filter to retrieve only those VMs running in the host that we are monitoring
host_vms = vm_pool.retrieve_xmlelements("/VM_POOL/VM[HISTORY_RECORDS/HISTORY/HID='#{host_id}']")
vms.each do |vm_ref,info|
vm_info = ''
@ -656,42 +649,22 @@ class ClusterComputeResource
next if running_flag == "no"
# retrieve vcenter driver machine
begin
vm = VCenterDriver::VirtualMachine.new_from_ref(@vi_client, vm_ref, info["name"], opts)
id = vm.vm_id
rescue
# Wild starting with one- not existing in one.
# Show all monitored vms, included if it's configured
# in vcenterrc, skip if not.
next unless debug
id = -1
# Find the VM by its deploy_id, which in the vCenter driver is
# the vCenter managed object reference
found_vm = host_vms.select{|vm| vm["DEPLOY_ID"].eql? vm_ref }.first
id = found_vm["ID"] if found_vm
id = -1
raise
end
if !vm.nil? && vm.one_exist?
one_uuid = vm.one_item.deploy_id + vc_uuid
vmvc_uuid = vm_ref + vc_uuid
if one_uuid != vmvc_uuid
# Wild starting with one- with the same id like existing one vm
next
end
end
# skip if it is a wild and we are looking for OpenNebula VMs
next if vm_type == 'ones' and id == -1
# skip if it is not a wild and we are looking for wilds
next if vm_type == 'wilds' and id != -1
# skip if already monitored
next if monitored_vms.include? vm_ref
if vm_type == 'ones'
next if id == -1
elsif vm_type == 'wilds'
next if id != -1
else
next
end
#skip if it's already monitored
if vm.one_exist?
next if @monitored_vms.include? id
@monitored_vms << id
end
monitored_vms << vm_ref
vm = VCenterDriver::VirtualMachine.new(@vi_client, vm_ref, id)
vm.vm_info = info
vm.monitor(stats)
@ -703,7 +676,7 @@ class ClusterComputeResource
# if the machine does not exist in opennebula it means that is a wild:
unless vm.one_exist?
vm_template_64 = Base64.encode64(vm.vm_to_one(vm_name)).gsub("\n","")
vm_info << "VCENTER_TEMPLATE=\"YES\","
vm_info << 'VCENTER_TEMPLATE="YES",'
vm_info << "IMPORT_TEMPLATE=\"#{vm_template_64}\"]\n"
else
mon_s64 = Base64.strict_encode64(vm.info)

View File

@ -65,10 +65,6 @@ if OpenNebula.is_error?(rc)
raise err_msg
end
# Exit if there is no NSX
nsx_status = one_host['TEMPLATE/NSX_STATUS']
exit 0 if nsx_status.nil?
# Exit if there is no NSX_PASSWORD
nsx_password = one_host['TEMPLATE/NSX_PASSWORD']
exit 0 if nsx_password.nil?

View File

@ -66,10 +66,6 @@ if OpenNebula.is_error?(rc)
raise err_msg
end
# Exit if there is no NSX
nsx_status = one_host['TEMPLATE/NSX_STATUS']
exit 0 if nsx_status.nil?
# Exit if there is no NSX_PASSWORD
nsx_password = one_host['TEMPLATE/NSX_PASSWORD']
exit 0 if nsx_password.nil?

View File

@ -66,6 +66,10 @@ if OpenNebula.is_error?(rc)
raise err_msg
end
# Exit if there is no NSX_PASSWORD
nsx_password = one_host['TEMPLATE/NSX_PASSWORD']
exit 0 if nsx_password.nil?
vmid = template_xml.xpath('//VM/ID')
one_vm = VCenterDriver::VIHelper.one_item(OpenNebula::VirtualMachine, vmid)
# OpenNebula VM