1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-01-11 05:17:41 +03:00

F #4302: vCenter monitoring: Hosts (#4553)

Initial commit
This commit is contained in:
Angel Luis Moya Gonzalez 2020-04-20 17:26:39 +02:00 committed by GitHub
parent 057e5cc4d9
commit 3ab22696e4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 1924 additions and 674 deletions

View File

@ -306,6 +306,11 @@ VAR_DIRS="$VAR_LOCATION/remotes \
$VAR_LOCATION/remotes/im/firecracker-probes.d/vm/monitor \
$VAR_LOCATION/remotes/im/firecracker-probes.d/vm/status \
$VAR_LOCATION/remotes/im/vcenter.d \
$VAR_LOCATION/remotes/im/vcenter-probes.d/host/beacon \
$VAR_LOCATION/remotes/im/vcenter-probes.d/host/monitor \
$VAR_LOCATION/remotes/im/vcenter-probes.d/host/system \
$VAR_LOCATION/remotes/im/vcenter-probes.d/vm/monitor \
$VAR_LOCATION/remotes/im/vcenter-probes.d/vm/status \
$VAR_LOCATION/remotes/im/ec2.d \
$VAR_LOCATION/remotes/im/ec2-probes.d/host/monitor \
$VAR_LOCATION/remotes/im/ec2-probes.d/host/system \
@ -543,6 +548,11 @@ INSTALL_FILES=(
IM_PROBES_FIRECRACKER_VM_MONITOR_FILES:$VAR_LOCATION/remotes/im/firecracker-probes.d/vm/monitor
IM_PROBES_FIRECRACKER_VM_STATUS_FILES:$VAR_LOCATION/remotes/im/firecracker-probes.d/vm/status
IM_PROBES_ETC_FIRECRACKER_PROBES_FILES:$VAR_LOCATION/remotes/etc/im/firecracker-probes.d
IM_PROBES_VCENTER_HOST_BEACON_FILES:$VAR_LOCATION/remotes/im/vcenter-probes.d/host/beacon
IM_PROBES_VCENTER_HOST_MONITOR_FILES:$VAR_LOCATION/remotes/im/vcenter-probes.d/host/monitor
IM_PROBES_VCENTER_HOST_SYSTEM_FILES:$VAR_LOCATION/remotes/im/vcenter-probes.d/host/system
IM_PROBES_VCENTER_VM_MONITOR_FILES:$VAR_LOCATION/remotes/im/vcenter-probes.d/vm/monitor
IM_PROBES_VCENTER_VM_STATUS_FILES:$VAR_LOCATION/remotes/im/vcenter-probes.d/vm/status
AUTH_SSH_FILES:$VAR_LOCATION/remotes/auth/ssh
AUTH_X509_FILES:$VAR_LOCATION/remotes/auth/x509
AUTH_LDAP_FILES:$VAR_LOCATION/remotes/auth/ldap
@ -944,8 +954,7 @@ VMM_EXEC_LIB_VCENTER_FILES="src/vmm_mad/remotes/lib/vcenter_driver/datastore.rb
src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine_device/vm_device.rb \
src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine_device/vm_disk.rb \
src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine_device/vm_nic.rb \
src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine_helper/vm_helper.rb \
src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine_monitor/vm_monitor.rb"
src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine_helper/vm_helper.rb"
#-------------------------------------------------------------------------------
# VMM Lib nsx files, used by the NSX Driver to be installed in
@ -1176,7 +1185,9 @@ IM_PROBES_LIB_FILES="\
src/im_mad/remotes/lib/linux.rb \
src/im_mad/remotes/lib/firecracker.rb\
src/im_mad/remotes/lib/numa_common.rb \
src/im_mad/remotes/lib/probe_db.rb"
src/im_mad/remotes/lib/probe_db.rb \
src/im_mad/remotes/lib/vcenter.rb \
src/im_mad/remotes/lib/nsx.rb"
# KVM PROBES
IM_PROBES_KVM_FILES="\
@ -1302,7 +1313,8 @@ IM_PROBES_FIRECRACKER_VM_STATUS_FILES="\
IM_PROBES_ETC_FIRECRACKER_PROBES_FILES="src/im_mad/remotes/lib/probe_db.conf"
IM_PROBES_VCENTER_FILES="src/im_mad/remotes/vcenter.d/poll"
IM_PROBES_VCENTER_FILES="src/im_mad/remotes/vcenter.d/monitord-client.rb \
src/im_mad/remotes/vcenter.d/monitord-client_control.sh"
# EC2 monitord-client
IM_PROBES_EC2_FILES="\
@ -1363,6 +1375,30 @@ IM_PROBES_PACKET_FILES="src/im_mad/remotes/packet.d/poll"
IM_PROBES_VERSION="src/im_mad/remotes/VERSION"
# VCENTER PROBES
IM_PROBES_VCENTER_FILES="\
src/im_mad/remotes/vcenter.d/monitord-client_control.sh \
src/im_mad/remotes/vcenter.d/monitord-client.rb"
IM_PROBES_VCENTER_HOST_BEACON_FILES="\
src/im_mad/remotes/vcenter-probes.d/host/beacon/date.sh \
src/im_mad/remotes/vcenter-probes.d/host/beacon/monitord-client-shepherd.sh"
IM_PROBES_VCENTER_HOST_MONITOR_FILES="\
src/im_mad/remotes/vcenter-probes.d/host/monitor/monitor.rb"
IM_PROBES_VCENTER_HOST_SYSTEM_FILES="\
src/im_mad/remotes/vcenter-probes.d/host/system/nsx.rb \
src/im_mad/remotes/vcenter-probes.d/host/system/vcenter.rb"
IM_PROBES_VCENTER_VM_MONITOR_FILES="\
src/im_mad/remotes/vcenter-probes.d/vms/monitor/monitor.rb"
IM_PROBES_VCENTER_VM_STATUS_FILES="\
src/im_mad/remotes/vcenter-probes.d/vms/status/state.rb"
#-------------------------------------------------------------------------------
# Auth Manager drivers to be installed under $REMOTES_LOCATION/auth
#-------------------------------------------------------------------------------

View File

@ -86,6 +86,14 @@ AllCops:
- src/im_mad/remotes/ec2.d/poll
- src/im_mad/remotes/one.d/poll
- src/im_mad/remotes/az.d/poll
- src/im_mad/remotes/lib/nsx.rb
- src/im_mad/remotes/lib/vcenter.rb
- src/im_mad/remotes/vcenter-probes.d/host/monitor/monitor.rb
- src/im_mad/remotes/vcenter-probes.d/host/system/nsx.rb
- src/im_mad/remotes/vcenter-probes.d/host/system/vcenter.rb
- src/im_mad/remotes/vcenter-probes.d/vms/monitor/monitor.rb
- src/im_mad/remotes/vcenter-probes.d/vms/status/state.rb
- src/im_mad/remotes/vcenter.d/monitord-client.rb
- src/vnm_mad/remotes/ovswitch/post
- src/vnm_mad/remotes/ovswitch/clean
- src/vnm_mad/remotes/ovswitch/pre

View File

@ -0,0 +1,188 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
ONE_LOCATION = ENV['ONE_LOCATION'] unless defined?(ONE_LOCATION)
if !ONE_LOCATION
RUBY_LIB_LOCATION = '/usr/lib/one/ruby' unless defined?(RUBY_LIB_LOCATION)
GEMS_LOCATION = '/usr/share/one/gems' unless defined?(GEMS_LOCATION)
else
RUBY_LIB_LOCATION = ONE_LOCATION + '/lib/ruby' \
unless defined?(RUBY_LIB_LOCATION)
GEMS_LOCATION = ONE_LOCATION + '/share/gems' \
unless defined?(GEMS_LOCATION)
end
if File.directory?(GEMS_LOCATION)
Gem.use_paths(GEMS_LOCATION)
end
$LOAD_PATH << RUBY_LIB_LOCATION
require 'vcenter_driver'
require 'nsx_driver'
# Gather NSX cluster monitor info
class NsxMonitor
attr_accessor :nsx_status
def initialize(host_id)
@nsx_client = nil
@nsx_status = ''
return unless nsx_ready?
@nsx_client = NSXDriver::NSXClient.new_from_id(host_id)
end
def monitor
# NSX info
str_info = ''
str_info << nsx_info
str_info << tz_info
end
def nsx_info
nsx_info = ''
nsx_obj = {}
# In the future add more than one nsx manager
extension_list = @vi_client.vim.serviceContent
.extensionManager.extensionList
extension_list.each do |ext_list|
if ext_list.key == NSXDriver::NSXConstants::NSXV_EXTENSION_LIST
nsx_obj['type'] = NSXDriver::NSXConstants::NSXV
url_full = ext_list.client[0].url
url_split = url_full.split('/')
# protocol = "https://"
protocol = url_split[0] + '//'
# ip_port = ip:port
ip_port = url_split[2]
nsx_obj['url'] = protocol + ip_port
nsx_obj['version'] = ext_list.version
nsx_obj['label'] = ext_list.description.label
elsif ext_list.key == NSXDriver::NSXConstants::NSXT_EXTENSION_LIST
nsx_obj['type'] = NSXDriver::NSXConstants::NSXT
nsx_obj['url'] = ext_list.server[0].url
nsx_obj['version'] = ext_list.version
nsx_obj['label'] = ext_list.description.label
else
next
end
end
unless nsx_obj.empty?
nsx_info << "NSX_MANAGER=\"#{nsx_obj['url']}\"\n"
nsx_info << "NSX_TYPE=\"#{nsx_obj['type']}\"\n"
nsx_info << "NSX_VERSION=\"#{nsx_obj['version']}\"\n"
nsx_info << "NSX_LABEL=\"#{nsx_obj['label']}\"\n"
end
nsx_info
end
def tz_info
tz_info = 'NSX_TRANSPORT_ZONES = ['
tz_object = NSXDriver::TransportZone.new_child(@nsx_client)
# NSX request to get Transport Zones
if @one_item['TEMPLATE/NSX_TYPE'] == NSXDriver::NSXConstants::NSXV
tzs = tz_object.tzs
tzs.each do |tz|
tz_info << tz.xpath('name').text << '="'
tz_info << tz.xpath('objectId').text << '",'
end
tz_info.chomp!(',')
elsif @one_item['TEMPLATE/NSX_TYPE'] == NSXDriver::NSXConstants::NSXT
r = tz_object.tzs
r['results'].each do |tz|
tz_info << tz['display_name'] << '="'
tz_info << tz['id'] << '",'
end
tz_info.chomp!(',')
else
raise "Unknown PortGroup type #{@one_item['TEMPLATE/NSX_TYPE']}"
end
tz_info << ']'
end
def nsx_ready?
@one_item = VCenterDriver::VIHelper
.one_item(OpenNebula::Host,
@vi_client.instance_variable_get(:@host_id).to_i)
# Check if NSX_MANAGER is into the host template
if [nil, ''].include?(@one_item['TEMPLATE/NSX_MANAGER'])
@nsx_status = "NSX_STATUS = \"Missing NSX_MANAGER\"\n"
return false
end
# Check if NSX_USER is into the host template
if [nil, ''].include?(@one_item['TEMPLATE/NSX_USER'])
@nsx_status = "NSX_STATUS = \"Missing NSX_USER\"\n"
return false
end
# Check if NSX_PASSWORD is into the host template
if [nil, ''].include?(@one_item['TEMPLATE/NSX_PASSWORD'])
@nsx_status = "NSX_STATUS = \"Missing NSX_PASSWORD\"\n"
return false
end
# Check if NSX_TYPE is into the host template
if [nil, ''].include?(@one_item['TEMPLATE/NSX_TYPE'])
@nsx_status = "NSX_STATUS = \"Missing NSX_TYPE\"\n"
return false
end
# Try a connection as part of NSX_STATUS
nsx_client = NSXDriver::NSXClient
.new_from_id(@vi_client.instance_variable_get(:@host_id)
.to_i)
if @one_item['TEMPLATE/NSX_TYPE'] == NSXDriver::NSXConstants::NSXV
# URL to test a connection
url = '/api/2.0/vdn/scopes'
begin
if nsx_client.get(url)
@nsx_status = "NSX_STATUS = OK\n"
true
else
@nsx_status = "NSX_STATUS = \"Response code incorrect\"\n"
false
end
rescue StandardError => e
@nsx_status = 'NSX_STATUS = "Error connecting to ' \
"NSX_MANAGER: #{e.message}\"\n"
false
end
elsif @one_item['TEMPLATE/NSX_TYPE'] == NSXDriver::NSXConstants::NSXT
# URL to test a connection
url = '/api/v1/transport-zones'
begin
if nsx_client.get(url)
@nsx_status = "NSX_STATUS = OK\n"
true
else
@nsx_status = "NSX_STATUS = \"Response code incorrect\"\n"
false
end
rescue StandardError => e
@nsx_status = 'NSX_STATUS = "Error connecting to '\
"NSX_MANAGER: #{e.message}\"\n"
false
end
end
end
end

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,19 @@
#!/bin/bash
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
date +%s

View File

@ -0,0 +1,29 @@
#!/bin/bash
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
(
[ -f /tmp/one-monitord-client.pid ] || exit 0
running_pid=$(cat /tmp/one-monitord-client.pid)
pids=$(ps axuwww | grep -e "/monitord-client.rb vcenter" | grep -v grep | awk '{ print $2 }' | grep -v "^${running_pid}$")
if [ -n "$pids" ]; then
kill -6 $pids
fi
) > /dev/null

View File

@ -0,0 +1,19 @@
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
exit 0

View File

@ -0,0 +1,53 @@
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
ONE_LOCATION = ENV['ONE_LOCATION'] if !defined?(ONE_LOCATION)
if !ONE_LOCATION
RUBY_LIB_LOCATION = '/usr/lib/one/ruby' if !defined?(RUBY_LIB_LOCATION)
GEMS_LOCATION = '/usr/share/one/gems' if !defined?(GEMS_LOCATION)
else
RUBY_LIB_LOCATION = ONE_LOCATION + '/lib/ruby' if !defined?(RUBY_LIB_LOCATION)
GEMS_LOCATION = ONE_LOCATION + '/share/gems' if !defined?(GEMS_LOCATION)
end
if File.directory?(GEMS_LOCATION)
Gem.use_paths(GEMS_LOCATION)
end
$LOAD_PATH << RUBY_LIB_LOCATION
require 'nsx_driver'
require_relative '../../../lib/nsx.rb'
host_id = ARGV[1]
# Vcenter and NSX connection
begin
nm = NsxMonitor.new(host_id)
puts nm.nsx_status
# Get Transport Zone info and NSX_STATUS from NSX Manager
if nm.nsx_ready?
puts nm.tz_info
end
rescue StandardError => e
STDERR.puts "IM poll for NSX cluster #{host_id} failed due to "\
"\"#{e.message}\"\n#{e.backtrace}"
end

View File

@ -0,0 +1,61 @@
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require_relative '../../../lib/vcenter.rb'
host_id = ARGV[1]
begin
# Vcenter connection
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
# Cluster Monitoring
cm = ClusterMonitor.new(vi_client, host_id)
puts cm.monitor_cluster
puts cm.monitor_host_systems
# Retrieve customizations
begin
puts cm.monitor_customizations
rescue StandardError
# Do not break monitoring on customization error
puts 'ERROR="Customizations could not be retrieved,' \
'please check permissions"'
end
# Get NSX info detected from vCenter Server
puts cm.nsx_info
# Print VM monitor info
vm_monitor_info, last_perf_poll = cm.monitor_vms(host_id)
if !vm_monitor_info.empty?
puts "VM_POLL=YES"
puts vm_monitor_info
end
# # Print last VM poll for perfmanager tracking
puts "VCENTER_LAST_PERF_POLL=" << last_perf_poll << "\n" if last_perf_poll
# Datastore Monitoring
dm = DatastoreMonitor.new(vi_client, host_id)
rescue StandardError => e
STDERR.puts "IM poll for vcenter cluster #{host_id} failed due to "\
"\"#{e.message}\"\n#{e.backtrace}"
exit(-1)
ensure
@vi_client.close_connection if @vi_client
end

View File

@ -0,0 +1,17 @@
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# -------------------------------------------------------------------------- #
exit 0

View File

@ -0,0 +1,17 @@
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# -------------------------------------------------------------------------- #
exit 0

View File

@ -0,0 +1,313 @@
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
require 'socket'
require 'base64'
require 'resolv'
require 'ipaddr'
require 'zlib'
require 'yaml'
require 'open3'
require 'openssl'
require 'rexml/document'
require_relative '../lib/probe_db'
# This class represents a monitord client. It handles udp and tcp connections
# and send update messages to monitord
#
class MonitorClient
# Defined in src/monitor/include/MonitorDriverMessages.h
MESSAGE_TYPES = %w[MONITOR_VM MONITOR_HOST SYSTEM_HOST BEACON_HOST STATE_VM
START_MONITOR STOP_MONITOR].freeze
MESSAGE_STATUS = { true =>'SUCCESS', false => 'FAILURE' }.freeze
MESSAGE_TYPES.each do |mt|
define_method("#{mt}_udp".downcase.to_sym) do |rc, payload|
msg = "#{mt} #{MESSAGE_STATUS[rc]} #{@hostid} #{pack(payload)}"
@socket_udp.send(msg, 0)
end
end
MESSAGE_TYPES.each do |mt|
define_method("#{mt}_tcp".downcase.to_sym) do |rc, payload|
msg = "#{mt} #{MESSAGE_STATUS[rc]} #{@hostid} #{pack(payload)}"
socket_tcp = TCPSocket.new(@host, @port)
socket_tcp.send(msg, 0)
socket_tcp.close
end
end
# Options to create a monitord client
# :host [:String] to send the messages to
# :port [:String] of monitord server
# :hostid [:String] OpenNebula ID of this host
# :pubkey [:String] public key to encrypt messages
def initialize(server, port, id, opt = {})
@opts = {
:pubkey => ''
}.merge opt
addr = Socket.getaddrinfo(server, port)[0]
@family = addr[0]
@host = addr[3]
@port = addr[1]
@socket_udp = UDPSocket.new(@family)
@socket_udp.connect(@host, @port)
@pubkey = @opts[:pubkey]
@hostid = id
end
private
# Formats message payload to send over the wire
def pack(data)
if @pubkey
block_size = @pubkey.n.num_bytes - 11
edata = ''
index = 0
loop do
break if index >= data.length
edata << @pubkey.public_encrypt(data[index, block_size])
index += block_size
end
data = edata
end
zdata = Zlib::Deflate.deflate(data, Zlib::BEST_COMPRESSION)
data64 = Base64.strict_encode64(zdata)
data64
end
end
# This class wraps the execution of a probe directory and sends data to
# monitord (optionally)
#
class ProbeRunner
def initialize(hyperv, path, stdin)
@path = File.join(File.dirname(__FILE__), '..', "#{hyperv}-probes.d",
path)
@stdin = stdin
end
# Run the probes once
# @return[Array] rc, data. rc 0 for success and data is the output of
# probes. If rc is -1 it signal failure and data is the error message of
# the failing probe
def run_probes
data = ''
dpro = Dir.new(@path)
dpro.each do |probe|
probe_path = File.join(@path, probe)
next unless File.file?(probe_path)
next unless File.executable?(probe_path)
cmd = "#{probe_path} #{ARGV.join(' ')}"
o_, e_, s_ = Open3.popen3(cmd) do |i, o, e, t|
out_reader = Thread.new { o.read }
err_reader = Thread.new { e.read }
begin
i.write @stdin
rescue Errno::EPIPE
end
i.close
out = out_reader.value
err = err_reader.value
rc = t.value
begin
Process.waitpid(rc.pid)
rescue Errno::ECHILD
end
[out, err, rc]
end
data += o_
return [-1, "Error executing #{probe}: #{e_}"] if s_.exitstatus != 0
end
[0, data]
end
# Singleton call for run_probes method
def self.run_once(hyperv, path, stdin)
runner = ProbeRunner.new(hyperv, path, stdin)
runner.run_probes
end
# Executes the probes in the directory in a loop. The block is called after
# each execution to optionally send the data to monitord
def self.monitor_loop(hyperv, path, period, stdin, &block)
# Failure retries, simple exponential backoff
sfail = [1, 1, 1, 2, 4, 8, 8, 16, 32, 64]
nfail = 0
runner = ProbeRunner.new(hyperv, path, stdin)
loop do
sleep_time = 0
ts = Time.now
rc, data = runner.run_probes
begin
block.call(rc, data)
run_time = (Time.now - ts).to_i
sleep_time = (period.to_i - run_time) if period.to_i > run_time
nfail = 0
rescue StandardError
sleep_time = sfail[nfail]
nfail += 1 if nfail < sfail.length - 1
end
sleep(sleep_time) if sleep_time > 0
end
end
end
#-------------------------------------------------------------------------------
# Configuration (from monitord)
#-------------------------------------------------------------------------------
xml_txt = Base64.decode64(STDIN.read)
begin
config = REXML::Document.new(xml_txt).root
host = config.elements['UDP_LISTENER/MONITOR_ADDRESS'].text.to_s
port = config.elements['UDP_LISTENER/PORT'].text.to_s
pubkey = config.elements['UDP_LISTENER/PUBKEY'].text.to_s
hostid = config.elements['HOST_ID'].text.to_s
hyperv = ARGV[0].split(' ')[0]
probes = {
:beacon_host_udp => {
:period => config.elements['PROBES_PERIOD/BEACON_HOST'].text.to_s,
:path => 'host/beacon'
},
:system_host_udp => {
:period => config.elements['PROBES_PERIOD/SYSTEM_HOST'].text.to_s,
:path => 'host/system'
},
:monitor_host_udp => {
:period => config.elements['PROBES_PERIOD/MONITOR_HOST'].text.to_s,
:path => 'host/monitor'
},
:state_vm_tcp => {
:period => config.elements['PROBES_PERIOD/STATE_VM'].text.to_s,
:path => 'vm/status'
},
:monitor_vm_udp => {
:period => config.elements['PROBES_PERIOD/MONITOR_VM'].text.to_s,
:path => 'vm/monitor'
}
}
if !pubkey.empty?
exp = /(-+BEGIN RSA PUBLIC KEY-+)([^-]*)(-+END RSA PUBLIC KEY-+)/
m = pubkey.match(exp)
if !m
puts 'Public key not in PEM format'
exit(-1)
end
pktxt = m[2].strip.tr(' ', "\n")
pubkey = OpenSSL::PKey::RSA.new "-----BEGIN RSA PUBLIC KEY-----\n" \
"#{pktxt}\n-----END RSA PUBLIC KEY-----"
else
pubkey = nil
end
rescue StandardError => e
puts e.inspect
exit(-1)
end
#-------------------------------------------------------------------------------
# Run configuration probes and send information to monitord
#-------------------------------------------------------------------------------
client = MonitorClient.new(host, port, hostid, :pubkey => pubkey)
rc, dt = ProbeRunner.run_once(hyperv, probes[:system_host_udp][:path], xml_txt)
puts dt
STDOUT.flush
exit(-1) if rc == -1
#-------------------------------------------------------------------------------
# Start monitor threads and shepherd
#-------------------------------------------------------------------------------
Process.setsid
STDIN.close
_rd, wr = IO.pipe
STDOUT.reopen(wr)
STDERR.reopen(wr)
threads = []
probes.each do |msg_type, conf|
threads << Thread.new do
ProbeRunner.monitor_loop(hyperv, conf[:path], conf[:period], xml_txt) do |rc, da|
da.strip!
next if da.empty?
client.send(msg_type, rc == 0, da)
end
end
end
threads.each {|thr| thr.join }

View File

@ -0,0 +1,73 @@
#!/bin/bash
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
#--------------------------------------------------------------------------- #
# Process Arguments
#--------------------------------------------------------------------------- #
ACTION="start"
if [ "$1" = "stop" ]; then
shift
ACTION="stop"
fi
ARGV=$*
STDIN=`cat -`
# Directory that contains this file
DIR=$(pwd)
# Basename
BASENAME=$(basename $0 _control.sh)
# Collectd client (Ruby)
CLIENT=$DIR/${BASENAME}.rb
# Collectd client PID
CLIENT_PID_FILE=/tmp/one-monitord-client.pid
# Launch the client
function start_client() {
echo "$STDIN" | /usr/bin/env ruby $CLIENT $ARGV &
echo $! > $CLIENT_PID_FILE
}
# Stop the client
function stop_client() {
local pids=$(ps axuww | grep /monitord-client.rb | grep -v grep | awk '{print $2}')
if [ -n "$pids" ]; then
kill -9 $pids
sleep 5
fi
rm -f $CLIENT_PID_FILE
}
case $ACTION in
start)
stop_client
start_client
;;
stop)
stop_client
;;
esac

View File

@ -1,94 +0,0 @@
#!/usr/bin/env ruby
# ---------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
ONE_LOCATION = ENV['ONE_LOCATION'] if !defined?(ONE_LOCATION)
if !ONE_LOCATION
RUBY_LIB_LOCATION = '/usr/lib/one/ruby' if !defined?(RUBY_LIB_LOCATION)
GEMS_LOCATION = '/usr/share/one/gems' if !defined?(GEMS_LOCATION)
else
RUBY_LIB_LOCATION = ONE_LOCATION + '/lib/ruby' if !defined?(RUBY_LIB_LOCATION)
GEMS_LOCATION = ONE_LOCATION + '/share/gems' if !defined?(GEMS_LOCATION)
end
if File.directory?(GEMS_LOCATION)
Gem.use_paths(GEMS_LOCATION)
end
$LOAD_PATH << RUBY_LIB_LOCATION
require 'vcenter_driver'
host_id = ARGV[4]
check_valid host_id, "host_id"
begin
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
# Get CCR reference
client = OpenNebula::Client.new
host = OpenNebula::Host.new_with_id(host_id, client)
rc = host.info
if OpenNebula::is_error? rc
STDERR.puts rc.message
exit 1
end
ccr_ref = host["TEMPLATE/VCENTER_CCR_REF"]
# Get vCenter Cluster
cluster = VCenterDriver::ClusterComputeResource.new_from_ref(ccr_ref, vi_client)
# Print monitoring info
cluster_info = cluster.monitor
puts cluster_info
raise "vCenter cluster health is on red, check issues on vCenter" if cluster_info.include?("STATUS=red")
puts cluster.monitor_host_systems
# Print VM monitor info
vm_monitor_info, last_perf_poll = cluster.monitor_vms(host_id)
if !vm_monitor_info.empty?
puts "VM_POLL=YES"
puts vm_monitor_info
end
# Print last VM poll for perfmanager tracking
puts "VCENTER_LAST_PERF_POLL=" << last_perf_poll << "\n" if last_perf_poll
# Retrieve customizations
begin
puts cluster.monitor_customizations
rescue
# Do not break monitoring on customization error
puts "ERROR=\"Customizations couldn't be retrieved, please check permissions\""
end
# Print Datastore information
dc = cluster.get_dc
ds_folder = dc.datastore_folder
ds_folder.fetch!
puts ds_folder.monitor
rescue Exception => e
STDERR.puts "IM poll for vcenter cluster #{host_id} failed due to "\
"\"#{e.message}\"\n#{e.backtrace}"
exit(-1)
ensure
vi_client.close_connection if vi_client
end

View File

@ -167,11 +167,11 @@ IM_MAD = [
# -t number of threads, i.e. number of hosts monitored at the same time
# -w Timeout in seconds to execute external commands (default unlimited)
#-------------------------------------------------------------------------------
# IM_MAD = [
# NAME = "vcenter",
# SUNSTONE_NAME = "VMWare vCenter",
# EXECUTABLE = "one_im_sh",
# ARGUMENTS = "-c -t 15 -r 0 vcenter" ]
IM_MAD = [
NAME = "vcenter",
SUNSTONE_NAME = "VMWare vCenter",
EXECUTABLE = "one_im_sh",
ARGUMENTS = "-c -t 15 -r 0 vcenter" ]
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------

View File

@ -43,14 +43,7 @@ class DatastoreFolder
@items[item_name.to_sym] = Datastore.new(item)
end
end
end
def monitor
monitor = ''
@items.values.each do |ds|
monitor << "VCENTER_DS_REF=\"#{ds['_ref']}\"\n"
end
monitor
@items
end
########################################################################

View File

@ -97,314 +97,6 @@ class ClusterComputeResource
rp_array
end
def get_nsx
nsx_info = ''
nsx_obj = {}
# In the future add more than one nsx manager
extension_list = []
extension_list = @vi_client.vim.serviceContent.extensionManager.extensionList
extension_list.each do |ext_list|
if ext_list.key == NSXDriver::NSXConstants::NSXV_EXTENSION_LIST
nsx_obj['type'] = NSXDriver::NSXConstants::NSXV
urlFull = ext_list.client[0].url
urlSplit = urlFull.split("/")
# protocol = "https://"
protocol = urlSplit[0] + "//"
# ipPort = ip:port
ipPort = urlSplit[2]
nsx_obj['url'] = protocol + ipPort
nsx_obj['version'] = ext_list.version
nsx_obj['label'] = ext_list.description.label
elsif ext_list.key == NSXDriver::NSXConstants::NSXT_EXTENSION_LIST
nsx_obj['type'] = NSXDriver::NSXConstants::NSXT
nsx_obj['url'] = ext_list.server[0].url
nsx_obj['version'] = ext_list.version
nsx_obj['label'] = ext_list.description.label
else
next
end
end
unless nsx_obj.empty?
nsx_info << "NSX_MANAGER=\"#{nsx_obj['url']}\"\n"
nsx_info << "NSX_TYPE=\"#{nsx_obj['type']}\"\n"
nsx_info << "NSX_VERSION=\"#{nsx_obj['version']}\"\n"
nsx_info << "NSX_LABEL=\"#{nsx_obj['label']}\"\n"
end
nsx_info
end
def nsx_ready?
@one_item = VCenterDriver::VIHelper
.one_item(OpenNebula::Host,
@vi_client.instance_variable_get(:@host_id).to_i)
# Check if NSX_MANAGER is into the host template
if [nil, ''].include?(@one_item['TEMPLATE/NSX_MANAGER'])
@nsx_status = "NSX_STATUS = \"Missing NSX_MANAGER\"\n"
return false
end
# Check if NSX_USER is into the host template
if [nil, ''].include?(@one_item['TEMPLATE/NSX_USER'])
@nsx_status = "NSX_STATUS = \"Missing NSX_USER\"\n"
return false
end
# Check if NSX_PASSWORD is into the host template
if [nil, ''].include?(@one_item['TEMPLATE/NSX_PASSWORD'])
@nsx_status = "NSX_STATUS = \"Missing NSX_PASSWORD\"\n"
return false
end
# Check if NSX_TYPE is into the host template
if [nil, ''].include?(@one_item['TEMPLATE/NSX_TYPE'])
@nsx_status = "NSX_STATUS = \"Missing NSX_TYPE\"\n"
return false
end
# Try a connection as part of NSX_STATUS
nsx_client = NSXDriver::NSXClient
.new_from_id(@vi_client.instance_variable_get(:@host_id).to_i)
if @one_item['TEMPLATE/NSX_TYPE'] == NSXDriver::NSXConstants::NSXV
# URL to test a connection
url = '/api/2.0/vdn/scopes'
begin
if nsx_client.get(url)
return true
else
@nsx_status = "NSX_STATUS = \"Response code incorrect\"\n"
return false
end
rescue StandardError => e
@nsx_status = 'NSX_STATUS = "Error connecting to ' \
"NSX_MANAGER\"\n"
return false
end
end
if @one_item['TEMPLATE/NSX_TYPE'] == NSXDriver::NSXConstants::NSXT
# URL to test a connection
url = '/api/v1/transport-zones'
begin
if nsx_client.get(url)
return true
else
@nsx_status = "NSX_STATUS = \"Response code incorrect\"\n"
return false
end
rescue StandardError => e
@nsx_status = 'NSX_STATUS = "Error connecting to '\
"NSX_MANAGER\"\n"
return false
end
end
end
def get_tz
@nsx_status = ''
if !nsx_ready?
tz_info = @nsx_status
else
tz_info = "NSX_STATUS = OK\n"
tz_info << 'NSX_TRANSPORT_ZONES = ['
nsx_client = NSXDriver::NSXClient
.new_from_id(@vi_client.instance_variable_get(:@host_id).to_i)
tz_object = NSXDriver::TransportZone.new_child(nsx_client)
# NSX request to get Transport Zones
if @one_item['TEMPLATE/NSX_TYPE'] == NSXDriver::NSXConstants::NSXV
tzs = tz_object.tzs
tzs.each do |tz|
tz_info << tz.xpath('name').text << '="'
tz_info << tz.xpath('objectId').text << '",'
end
tz_info.chomp!(',')
elsif @one_item['TEMPLATE/NSX_TYPE'] == NSXDriver::NSXConstants::NSXT
r = tz_object.tzs
r['results'].each do |tz|
tz_info << tz['display_name'] << '="'
tz_info << tz['id'] << '",'
end
tz_info.chomp!(',')
else
raise "Unknown Port Group type #{@one_item['TEMPLATE/NSX_TYPE']}"
end
tz_info << ']'
return tz_info
end
tz_info
end
def monitor
total_cpu,
num_cpu_cores,
effective_cpu,
total_memory,
effective_mem,
num_hosts,
num_eff_hosts,
overall_status,
drs_enabled,
ha_enabled= @item.collect("summary.totalCpu",
"summary.numCpuCores",
"summary.effectiveCpu",
"summary.totalMemory",
"summary.effectiveMemory",
"summary.numHosts",
"summary.numEffectiveHosts",
"summary.overallStatus",
"configuration.drsConfig.enabled",
"configuration.dasConfig.enabled"
)
mhz_core = total_cpu.to_f / num_cpu_cores.to_f
eff_core = effective_cpu.to_f / mhz_core
free_cpu = sprintf('%.2f', eff_core * 100).to_f
total_cpu = num_cpu_cores.to_f * 100
used_cpu = sprintf('%.2f', total_cpu - free_cpu).to_f
total_mem = total_memory.to_i / 1024
free_mem = effective_mem.to_i * 1024
str_info = ""
# Get cluster name for informative purposes (replace space with _ if any)
str_info << "VCENTER_NAME=" << self['name'].tr(" ", "_") << "\n"
# System
str_info << "HYPERVISOR=vcenter\n"
str_info << "TOTALHOST=" << num_hosts.to_s << "\n"
str_info << "AVAILHOST=" << num_eff_hosts.to_s << "\n"
str_info << "STATUS=" << overall_status << "\n"
# CPU
str_info << "CPUSPEED=" << mhz_core.to_s << "\n"
str_info << "TOTALCPU=" << total_cpu.to_s << "\n"
str_info << "USEDCPU=" << used_cpu.to_s << "\n"
str_info << "FREECPU=" << free_cpu.to_s << "\n"
# Memory
str_info << "TOTALMEMORY=" << total_mem.to_s << "\n"
str_info << "FREEMEMORY=" << free_mem.to_s << "\n"
str_info << "USEDMEMORY=" << (total_mem - free_mem).to_s << "\n"
# DRS enabled
str_info << "VCENTER_DRS=" << drs_enabled.to_s << "\n"
# HA enabled
str_info << "VCENTER_HA=" << ha_enabled.to_s << "\n"
# NSX info
str_info << get_nsx
str_info << get_tz
str_info << monitor_resource_pools(mhz_core)
end
def monitor_resource_pools(mhz_core)
@rp_list = get_resource_pool_list
view = @vi_client.vim.serviceContent.viewManager.CreateContainerView({
container: @item, #View for RPs inside this cluster
type: ['ResourcePool'],
recursive: true
})
pc = @vi_client.vim.serviceContent.propertyCollector
monitored_properties = [
"config.cpuAllocation.expandableReservation",
"config.cpuAllocation.limit",
"config.cpuAllocation.reservation",
"config.cpuAllocation.shares.level",
"config.cpuAllocation.shares.shares",
"config.memoryAllocation.expandableReservation",
"config.memoryAllocation.limit",
"config.memoryAllocation.reservation",
"config.memoryAllocation.shares.level",
"config.memoryAllocation.shares.shares"
]
filterSpec = RbVmomi::VIM.PropertyFilterSpec(
:objectSet => [
:obj => view,
:skip => true,
:selectSet => [
RbVmomi::VIM.TraversalSpec(
:name => 'traverseEntities',
:type => 'ContainerView',
:path => 'view',
:skip => false
)
]
],
:propSet => [
{ :type => 'ResourcePool', :pathSet => monitored_properties }
]
)
result = pc.RetrieveProperties(:specSet => [filterSpec])
rps = {}
result.each do |r|
hashed_properties = r.to_hash
if r.obj.is_a?(RbVmomi::VIM::ResourcePool)
rps[r.obj._ref] = hashed_properties
end
end
return "" if rps.empty?
rp_info = ""
rps.each{|ref, info|
# CPU
cpu_expandable = info["config.cpuAllocation.expandableReservation"] ? "YES" : "NO"
cpu_limit = info["config.cpuAllocation.limit"] == "-1" ? "UNLIMITED" : info["config.cpuAllocation.limit"]
cpu_reservation = info["config.cpuAllocation.reservation"]
cpu_num = cpu_reservation.to_f / mhz_core
cpu_shares_level = info["config.cpuAllocation.shares.level"]
cpu_shares = info["config.cpuAllocation.shares.shares"]
# MEMORY
mem_expandable = info["config.memoryAllocation.expandableReservation"] ? "YES" : "NO"
mem_limit = info["config.memoryAllocation.limit"] == "-1" ? "UNLIMITED" : info["config.memoryAllocation.limit"]
mem_reservation = info["config.memoryAllocation.reservation"].to_f
mem_shares_level = info["config.memoryAllocation.shares.level"]
mem_shares = info["config.memoryAllocation.shares.shares"]
rp_name = rp_list.select { |item| item[:ref] == ref}.first[:name] rescue ""
rp_name = "Resources" if rp_name.empty?
rp_info << "\nVCENTER_RESOURCE_POOL_INFO = ["
rp_info << "NAME=\"#{rp_name}\","
rp_info << "CPU_EXPANDABLE=#{cpu_expandable},"
rp_info << "CPU_LIMIT=#{cpu_limit},"
rp_info << "CPU_RESERVATION=#{cpu_reservation},"
rp_info << "CPU_RESERVATION_NUM_CORES=#{cpu_num},"
rp_info << "CPU_SHARES=#{cpu_shares},"
rp_info << "CPU_SHARES_LEVEL=#{cpu_shares_level},"
rp_info << "MEM_EXPANDABLE=#{mem_expandable},"
rp_info << "MEM_LIMIT=#{mem_limit},"
rp_info << "MEM_RESERVATION=#{mem_reservation},"
rp_info << "MEM_SHARES=#{mem_shares},"
rp_info << "MEM_SHARES_LEVEL=#{mem_shares_level}"
rp_info << "]"
}
view.DestroyView
return rp_info
end
def hostname_to_moref(hostname)
result = filter_hosts
@ -462,258 +154,6 @@ class ClusterComputeResource
return result
end
def monitor_host_systems
host_info = ""
result = filter_hosts
hosts = {}
result.each do |r|
hashed_properties = r.to_hash
if r.obj.is_a?(RbVmomi::VIM::HostSystem)
hosts[r.obj._ref] = hashed_properties
end
end
hosts.each do |ref, info|
next if info["runtime.connectionState"] != "connected"
total_cpu = info["summary.hardware.numCpuCores"] * 100
used_cpu = (info["summary.quickStats.overallCpuUsage"].to_f / info["summary.hardware.cpuMhz"].to_f) * 100
used_cpu = sprintf('%.2f', used_cpu).to_f # Trim precission
free_cpu = total_cpu - used_cpu
total_memory = info["summary.hardware.memorySize"]/1024
used_memory = info["summary.quickStats.overallMemoryUsage"]*1024
free_memory = total_memory - used_memory
host_info << "\nHOST=["
host_info << "STATE=on,"
host_info << "HOSTNAME=\"" << info["name"].to_s << "\","
host_info << "MODELNAME=\"" << info["summary.hardware.cpuModel"].to_s << "\","
host_info << "CPUSPEED=" << info["summary.hardware.cpuMhz"].to_s << ","
host_info << "MAX_CPU=" << total_cpu.to_s << ","
host_info << "USED_CPU=" << used_cpu.to_s << ","
host_info << "FREE_CPU=" << free_cpu.to_s << ","
host_info << "MAX_MEM=" << total_memory.to_s << ","
host_info << "USED_MEM=" << used_memory.to_s << ","
host_info << "FREE_MEM=" << free_memory.to_s
host_info << "]"
end
return host_info
end
def monitor_vms(host_id)
vc_uuid = @vi_client.vim.serviceContent.about.instanceUuid
cluster_name = self["name"]
cluster_ref = self["_ref"]
# Get info of the host where the VM/template is located
one_host = VCenterDriver::VIHelper.one_item(OpenNebula::Host, host_id)
if !one_host
STDERR.puts "Failed to retieve host with id #{host.id}"
STDERR.puts e.inspect
STDERR.puts e.backtrace
end
host_id = one_host["ID"] if one_host
# Extract CPU info and name for each esx host in cluster
esx_hosts = {}
@item.host.each do |esx_host|
info = {}
info[:name] = esx_host.name
info[:cpu] = esx_host.summary.hardware.cpuMhz.to_f
esx_hosts[esx_host._ref] = info
end
@monitored_vms = Set.new
str_info = ""
view = @vi_client.vim.serviceContent.viewManager.CreateContainerView({
container: @item, #View for VMs inside this cluster
type: ['VirtualMachine'],
recursive: true
})
pc = @vi_client.vim.serviceContent.propertyCollector
monitored_properties = [
"name", #VM name
"config.template", #To filter out templates
"summary.runtime.powerState", #VM power state
"summary.quickStats.hostMemoryUsage", #Memory usage
"summary.quickStats.overallCpuUsage", #CPU used by VM
"runtime.host", #ESX host
"resourcePool", #RP
"guest.guestFullName",
"guest.net", #IP addresses as seen by guest tools,
"guest.guestState",
"guest.toolsVersion",
"guest.toolsRunningStatus",
"guest.toolsVersionStatus2", #IP addresses as seen by guest tools,
"config.extraConfig", #VM extraconfig info e.g opennebula.vm.running
"config.hardware.numCPU",
"config.hardware.memoryMB",
"config.annotation",
"datastore"
]
filterSpec = RbVmomi::VIM.PropertyFilterSpec(
:objectSet => [
:obj => view,
:skip => true,
:selectSet => [
RbVmomi::VIM.TraversalSpec(
:name => 'traverseEntities',
:type => 'ContainerView',
:path => 'view',
:skip => false
)
]
],
:propSet => [
{ :type => 'VirtualMachine', :pathSet => monitored_properties }
]
)
result = pc.RetrieveProperties(:specSet => [filterSpec])
vms = {}
vm_objects = []
result.each do |r|
hashed_properties = r.to_hash
if r.obj.is_a?(RbVmomi::VIM::VirtualMachine)
#Only take care of VMs, not templates
if !hashed_properties["config.template"]
vms[r.obj._ref] = hashed_properties
vm_objects << r.obj
end
end
end
pm = @vi_client.vim.serviceContent.perfManager
stats = {}
max_samples = 9
refresh_rate = 20 #Real time stats takes samples every 20 seconds
last_mon_time = one_host["TEMPLATE/VCENTER_LAST_PERF_POLL"]
if last_mon_time
interval = (Time.now.to_i - last_mon_time.to_i)
interval = 3601 if interval < 0
samples = (interval / refresh_rate)
samples = 1 if samples == 0
max_samples = interval > 3600 ? 9 : samples
end
if !vm_objects.empty?
stats = pm.retrieve_stats(
vm_objects,
['net.transmitted','net.bytesRx','net.bytesTx','net.received',
'virtualDisk.numberReadAveraged','virtualDisk.numberWriteAveraged',
'virtualDisk.read','virtualDisk.write'],
{max_samples: max_samples}
) rescue {}
end
if !stats.empty?
last_mon_time = Time.now.to_i.to_s
end
get_resource_pool_list if !@rp_list
vm_pool = VCenterDriver::VIHelper.one_pool(OpenNebula::VirtualMachinePool)
# opts common to all vms
opts = {
pool: vm_pool,
vc_uuid: vc_uuid,
}
vms.each do |vm_ref,info|
vm_info = ""
begin
esx_host = esx_hosts[info["runtime.host"]._ref]
info[:esx_host_name] = esx_host[:name]
info[:esx_host_cpu] = esx_host[:cpu]
info[:cluster_name] = cluster_name
info[:cluster_ref] = cluster_ref
info[:vc_uuid] = vc_uuid
info[:host_id] = host_id
info[:rp_list] = @rp_list
# Check the running flag
running_flag = info["config.extraConfig"].select do |val|
val[:key] == "opennebula.vm.running"
end
if !running_flag.empty? && running_flag.first
running_flag = running_flag[0][:value]
end
next if running_flag == "no"
# retrieve vcenter driver machine
vm = VCenterDriver::VirtualMachine.new_from_ref(@vi_client, vm_ref, info["name"], opts)
id = vm.vm_id
#skip if it's already monitored
if vm.one_exist?
next if @monitored_vms.include? id
@monitored_vms << id
end
vm.vm_info = info
vm.monitor(stats)
vm_name = "#{info["name"]} - #{cluster_name}"
vm_info << %Q{
VM = [
ID="#{id}",
VM_NAME="#{vm_name}",
DEPLOY_ID="#{vm_ref}",
}
# if the machine does not exist in opennebula it means that is a wild:
unless vm.one_exist?
vm_template_64 = Base64.encode64(vm.vm_to_one(vm_name)).gsub("\n","")
vm_info << "VCENTER_TEMPLATE=\"YES\","
vm_info << "IMPORT_TEMPLATE=\"#{vm_template_64}\","
end
vm_info << "POLL=\"#{vm.info.gsub('"', "\\\"")}\"]"
rescue StandardError => e
vm_info = error_monitoring(e, vm_ref, info)
end
str_info << vm_info
end
view.DestroyView # Destroy the view
return str_info, last_mon_time
end
def error_monitoring(e, vm_ref, info = {})
error_info = ''
vm_name = info['name'] || nil
tmp_str = e.inspect
tmp_str << e.backtrace.join("\n")
error_info << %Q{
VM = [
VM_NAME="#{vm_name}",
DEPLOY_ID="#{vm_ref}",
}
error_info << "ERROR=\"#{Base64.encode64(tmp_str).gsub("\n","")}\"]"
end
def monitor_customizations
customizations = self['_connection'].serviceContent.customizationSpecManager.info

View File

@ -41,7 +41,6 @@ module VCenterDriver
require 'vm_device'
require 'vm_helper'
require 'vm_monitor'
class VirtualMachine < VCenterDriver::Template
@ -52,7 +51,6 @@ module VCenterDriver
# VCenterDriver::VirtualMachine::Disk
include VirtualMachineDevice
include VirtualMachineHelper
include VirtualMachineMonitor
############################################################################
# Virtual Machine main Class

View File

@ -963,6 +963,7 @@ class Template
"VCENTER_CCR_REF =\"#{@vm_info[:cluster_ref]}\"\n"
str << "IMPORT_VM_ID =\"#{self["_ref"]}\"\n"
str << "DEPLOY_ID =\"#{self["_ref"]}\"\n"
@state = 'POWEROFF' if @state == 'd'
str << "IMPORT_STATE =\"#{@state}\"\n"

View File

@ -97,7 +97,6 @@ require 'vm_device'
require 'vm_disk'
require 'vm_nic'
require 'vm_helper'
require 'vm_monitor'
CHECK_REFS = true