mirror of
https://github.com/OpenNebula/one.git
synced 2025-03-19 06:50:07 +03:00
Merge branch 'master' into f-3951
This commit is contained in:
commit
25907eae22
share
doc/xsd
hooks/vcenter
linters
oneprovision
ansible
hybrid+/provisions
pkgs/services/supervisor/centos8/scripts/lib
src
cli
datastore_mad/remotes
im_mad/remotes/lib
oca/go/src/goca/schemas/vm
oneprovision/lib
provider
provision
terraform/providers/templates
sunstone
vmm_mad/remotes
vnm_mad/remotes/elastic
@ -19,6 +19,7 @@
|
||||
<xs:complexType>
|
||||
<xs:all>
|
||||
<xs:element name="DRIVER_MANAGED_GROUPS" type="xs:string"/>
|
||||
<xs:element name="DRIVER_MANAGED_GROUP_ADMIN" type="xs:string"/>
|
||||
<xs:element name="MAX_TOKEN_TIME" type="xs:integer"/>
|
||||
<xs:element name="NAME" type="xs:string"/>
|
||||
<xs:element name="PASSWORD_CHANGE" type="xs:string"/>
|
||||
@ -164,6 +165,7 @@
|
||||
</xs:element>
|
||||
|
||||
<xs:element name="HOST_ENCRYPTED_ATTR" type="xs:string" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xs:element name="DOCUMENT_ENCRYPTED_ATTR" type="xs:string" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xs:element name="IMAGE_RESTRICTED_ATTR" type="xs:string" minOccurs="0" maxOccurs="unbounded"/>
|
||||
|
||||
<xs:element name="IM_MAD" minOccurs="0" maxOccurs="unbounded">
|
||||
|
@ -47,7 +47,9 @@ require 'nsx_driver'
|
||||
|
||||
# Exceptions
|
||||
class AllocateNetworkError < StandardError; end
|
||||
|
||||
class CreateNetworkError < StandardError; end
|
||||
|
||||
class UpdateNetworkError < StandardError; end
|
||||
|
||||
# FUNCTIONS
|
||||
@ -370,7 +372,7 @@ begin
|
||||
ccr_ref = one_host['TEMPLATE/VCENTER_CCR_REF']
|
||||
cluster = VCenterDriver::ClusterComputeResource.new_from_ref(ccr_ref,
|
||||
vi_client)
|
||||
dc = cluster.get_dc
|
||||
dc = cluster.datacenter
|
||||
|
||||
# Step 3. Create the port groups based on each type
|
||||
if pg_type == VCenterDriver::Network::NETWORK_TYPE_NSXV
|
||||
|
@ -45,6 +45,7 @@ require 'nsx_driver'
|
||||
|
||||
# Exceptions
|
||||
class DeleteNetworkError < StandardError; end
|
||||
|
||||
class DeletePortgroupError < StandardError; end
|
||||
|
||||
# FUNCTIONS
|
||||
@ -74,7 +75,7 @@ end
|
||||
|
||||
vnet_xml = arguments_xml.xpath(VNET_XPATH).to_s
|
||||
|
||||
template = OpenNebula::XMLElement.new
|
||||
template = OpenNebula::XMLElement.new
|
||||
template.initialize_xml(vnet_xml, 'VNET')
|
||||
managed = template['TEMPLATE/OPENNEBULA_MANAGED'] != 'NO'
|
||||
imported = template['TEMPLATE/VCENTER_IMPORTED']
|
||||
@ -109,7 +110,7 @@ begin
|
||||
ccr_ref = one_host['TEMPLATE/VCENTER_CCR_REF']
|
||||
cluster = VCenterDriver::ClusterComputeResource
|
||||
.new_from_ref(ccr_ref, vi_client)
|
||||
dc = cluster.get_dc
|
||||
dc = cluster.datacenter
|
||||
|
||||
# NSX
|
||||
ls_id = template['TEMPLATE/NSX_ID']
|
||||
@ -148,7 +149,7 @@ begin
|
||||
cluster['host'].each do |host|
|
||||
# Step 3. Loop through hosts in clusters
|
||||
esx_host = VCenterDriver::ESXHost
|
||||
.new_from_ref(host._ref, vi_client)
|
||||
.new_from_ref(host._ref, vi_client)
|
||||
|
||||
begin
|
||||
esx_host.lock # Exclusive lock for ESX host operation
|
||||
@ -179,7 +180,7 @@ begin
|
||||
begin
|
||||
nsx_client = NSXDriver::NSXClient.new_from_id(host_id)
|
||||
logical_switch = NSXDriver::VirtualWire
|
||||
.new(nsx_client, ls_id, nil, nil)
|
||||
.new(nsx_client, ls_id, nil, nil)
|
||||
logical_switch.delete_logical_switch
|
||||
rescue StandardError => e
|
||||
err_msg = e.message
|
||||
@ -191,14 +192,13 @@ begin
|
||||
begin
|
||||
nsx_client = NSXDriver::NSXClient.new_from_id(host_id)
|
||||
logical_switch = NSXDriver::OpaqueNetwork
|
||||
.new(nsx_client, ls_id, nil, nil)
|
||||
.new(nsx_client, ls_id, nil, nil)
|
||||
logical_switch.delete_logical_switch
|
||||
rescue StandardError => e
|
||||
err_msg = e.message
|
||||
raise DeletePortgroupError, err_msg
|
||||
end
|
||||
end
|
||||
|
||||
rescue DeleteNetworkError => e
|
||||
STDERR.puts e.message
|
||||
STDERR.puts e.backtrace if VCenterDriver::CONFIG[:debug_information]
|
||||
|
@ -82,14 +82,11 @@ AllCops:
|
||||
- src/im_mad/remotes/kvm-probes.d/pci.rb
|
||||
- src/im_mad/remotes/kvm.d/monitord-client.rb
|
||||
- src/im_mad/remotes/lxd.d/monitord-client.rb
|
||||
- src/im_mad/remotes/vcenter.d/poll
|
||||
- src/im_mad/remotes/packet.d/poll
|
||||
- src/im_mad/remotes/ec2.d/poll
|
||||
- src/im_mad/remotes/one.d/poll
|
||||
- src/im_mad/remotes/az.d/poll
|
||||
- src/im_mad/remotes/lib/vcenter_cluster.rb
|
||||
- src/im_mad/remotes/lib/vcenter_monitor.rb
|
||||
- src/im_mad/remotes/lib/vcenter_monitor_vms.rb
|
||||
- src/vnm_mad/remotes/ovswitch/post
|
||||
- src/vnm_mad/remotes/ovswitch/clean
|
||||
- src/vnm_mad/remotes/ovswitch/pre
|
||||
@ -183,8 +180,6 @@ AllCops:
|
||||
- src/cli/one
|
||||
- share/scons/po2json.rb
|
||||
- share/sudoers/sudo_commands.rb
|
||||
- share/hooks/vcenter/delete_vcenter_net.rb
|
||||
- share/hooks/vcenter/create_vcenter_net.rb
|
||||
- share/hooks/ft/host_error.rb
|
||||
- share/instance_types/ec2-instance-types.rb
|
||||
- share/instance_types/az-instance-types.rb
|
||||
@ -274,8 +269,6 @@ AllCops:
|
||||
- src/sunstone/routes/vcenter.rb
|
||||
- src/sunstone/models/OpenNebula2FA/SunstoneWebAuthn.rb
|
||||
- src/onegate/onegate-server.rb
|
||||
- src/datastore_mad/remotes/vcenter_downloader.rb
|
||||
- src/datastore_mad/remotes/vcenter_uploader.rb
|
||||
- src/datastore_mad/remotes/xpath.rb
|
||||
- src/datastore_mad/remotes/url.rb
|
||||
- src/datastore_mad/one_datastore.rb
|
||||
@ -315,7 +308,6 @@ AllCops:
|
||||
- src/cli/one_helper/oneuser_helper.rb
|
||||
- src/cli/one_helper/onegroup_helper.rb
|
||||
- src/cli/one_helper/onevnet_helper.rb
|
||||
- src/cli/one_helper/onevcenter_helper.rb
|
||||
- src/cli/one_helper/onecluster_helper.rb
|
||||
- src/cli/one_helper/onevntemplate_helper.rb
|
||||
- src/cli/one_helper/onevrouter_helper.rb
|
||||
@ -386,8 +378,6 @@ AllCops:
|
||||
- src/oca/ruby/deprecated/OpenNebula.rb
|
||||
- src/vmm_mad/dummy/one_vmm_dummy.rb
|
||||
- src/vmm_mad/remotes/one/opennebula_driver.rb
|
||||
- src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb
|
||||
- src/vmm_mad/remotes/lib/vcenter_driver/vm_template.rb
|
||||
- src/vmm_mad/remotes/lib/lxd/mapper/qcow2.rb
|
||||
- src/vmm_mad/remotes/lib/lxd/mapper/mapper.rb
|
||||
- src/vmm_mad/remotes/lib/lxd/mapper/rbd.rb
|
||||
|
@ -23,3 +23,4 @@
|
||||
frr_iface: 'eth0'
|
||||
# Use /16 for the internal management network address
|
||||
frr_prefix_length: 16
|
||||
- sudoers-tmp
|
||||
|
@ -24,3 +24,4 @@
|
||||
frr_iface: 'bond0_0'
|
||||
# Use /25 for the internal management network address
|
||||
frr_prefix_length: 25
|
||||
- sudoers-tmp
|
||||
|
@ -0,0 +1 @@
|
||||
../../../../../pkgs/sudoers/centos/opennebula
|
@ -0,0 +1 @@
|
||||
../../../../../pkgs/sudoers/debian/opennebula
|
11
share/oneprovision/ansible/roles/sudoers-tmp/tasks/main.yml
Normal file
11
share/oneprovision/ansible/roles/sudoers-tmp/tasks/main.yml
Normal file
@ -0,0 +1,11 @@
|
||||
- name: Overwrite opennebula sudoers (debian)
|
||||
copy:
|
||||
src: debian-opennebula
|
||||
dest: /etc/sudoers.d/opennebula
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: Overwrite opennebula sudoers (redhat)
|
||||
copy:
|
||||
src: centos-opennebula
|
||||
dest: /etc/sudoers.d/opennebula
|
||||
when: ansible_os_family == "RedHat"
|
@ -31,3 +31,4 @@ vntemplates:
|
||||
vn_mad: 'vxlan'
|
||||
phydev: 'eth0'
|
||||
automatic_vlan_id: 'yes'
|
||||
cluster_ids: "${cluster.0.id}"
|
||||
|
@ -30,3 +30,4 @@ vntemplates:
|
||||
vn_mad: 'vxlan'
|
||||
phydev: 'bond0'
|
||||
automatic_vlan_id: 'yes'
|
||||
cluster_ids: "${cluster.0.id}"
|
||||
|
@ -4,12 +4,12 @@
|
||||
|
||||
msg()
|
||||
(
|
||||
echo "[SUPERVISOR]: ${SUPERVISOR_PROCESS_NAME}: $*"
|
||||
echo "$(date '+%F %T') [SUPERVISOR]: ${SUPERVISOR_PROCESS_NAME}: $*"
|
||||
)
|
||||
|
||||
err()
|
||||
(
|
||||
echo "[SUPERVISOR] [!] ERROR: ${SUPERVISOR_PROCESS_NAME}: $*"
|
||||
echo "$(date '+%F %T') [SUPERVISOR] [!] ERROR: ${SUPERVISOR_PROCESS_NAME}: $*"
|
||||
)
|
||||
|
||||
is_running()
|
||||
|
@ -705,7 +705,7 @@ EOT
|
||||
else
|
||||
rc = pool.info
|
||||
|
||||
return rc if OpenNebula.is_error?(rc)
|
||||
return -1, rc.message if OpenNebula.is_error?(rc)
|
||||
|
||||
_, hash = print_page(pool, options)
|
||||
|
||||
|
@ -246,7 +246,7 @@ class OneProvisionHelper < OpenNebulaHelper::OneHelper
|
||||
WAIT_READY,
|
||||
WAIT_TIMEOUT,
|
||||
PROVIDER,
|
||||
USER_INPUTS]
|
||||
USER_INPUTS] + [OpenNebulaHelper::FORMAT]
|
||||
|
||||
ONE_OPTIONS = CommandParser::OPTIONS +
|
||||
CLIHelper::OPTIONS +
|
||||
|
@ -1,4 +1,3 @@
|
||||
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2020, OpenNebula Project, OpenNebula Systems #
|
||||
# #
|
||||
@ -17,16 +16,21 @@
|
||||
|
||||
require 'one_helper'
|
||||
|
||||
##############################################################################
|
||||
# Module OneVcenterHelper
|
||||
##############################################################################
|
||||
class OneVcenterHelper < OpenNebulaHelper::OneHelper
|
||||
|
||||
#
|
||||
# vCenter importer will divide rvmomi resources
|
||||
# in this group, makes parsing easier.
|
||||
module VOBJECT
|
||||
DATASTORE = 1
|
||||
TEMPLATE = 2
|
||||
NETWORK = 3
|
||||
IMAGE = 4
|
||||
|
||||
DATASTORE = 1
|
||||
TEMPLATE = 2
|
||||
NETWORK = 3
|
||||
IMAGE = 4
|
||||
|
||||
end
|
||||
|
||||
#
|
||||
@ -37,45 +41,53 @@ class OneVcenterHelper < OpenNebulaHelper::OneHelper
|
||||
# struct: [Array] LIST FORMAT for opennebula cli
|
||||
# related methods: * cli_format
|
||||
#
|
||||
# columns: [Hash(column => Integer)] Will be used in the list command, Integer represent nbytes
|
||||
# columns: [Hash(column => Integer)] Will be used in the list command,
|
||||
# Integer represent nbytes
|
||||
# related methods: * format_list
|
||||
#
|
||||
# cli: [Array] with mandatory args, for example image listing needs a datastore
|
||||
# cli: [Array] with mandatory args, for example image
|
||||
# listing needs a datastore
|
||||
# related methods: * parse_opts
|
||||
#
|
||||
# dialogue: [Lambda] Used only for Vobject that require a previous dialogue with the user, will be triggered
|
||||
# dialogue: [Lambda] Used only for Vobject that require a previous
|
||||
# dialogue with the user, will be triggered
|
||||
# on importation process
|
||||
# related methods: * network_dialogue
|
||||
# * template_dialogue
|
||||
#
|
||||
TABLE = {
|
||||
VOBJECT::DATASTORE => {
|
||||
:struct => ["DATASTORE_LIST", "DATASTORE"],
|
||||
:columns => {:IMID => 5, :REF => 15, :NAME => 50, :CLUSTERS => 10},
|
||||
:struct => %w[DATASTORE_LIST DATASTORE],
|
||||
:columns =>
|
||||
{ :IMID => 5, :REF => 15, :NAME => 50, :CLUSTERS => 10 },
|
||||
:cli => [:host],
|
||||
:dialogue => ->(arg){}
|
||||
:dialogue => ->(arg) {}
|
||||
},
|
||||
VOBJECT::TEMPLATE => {
|
||||
:struct => ["TEMPLATE_LIST", "TEMPLATE"],
|
||||
:columns => {:IMID => 5, :REF => 10, :NAME => 50},
|
||||
:struct => %w[TEMPLATE_LIST TEMPLATE],
|
||||
:columns => { :IMID => 5, :REF => 10, :NAME => 50 },
|
||||
:cli => [:host],
|
||||
:dialogue => ->(arg){ OneVcenterHelper.template_dialogue(arg) }
|
||||
:dialogue => ->(arg) { OneVcenterHelper.template_dialogue(arg) }
|
||||
},
|
||||
VOBJECT::NETWORK => {
|
||||
:struct => ["NETWORK_LIST", "NETWORK"],
|
||||
:columns => {:IMID => 5, :REF => 15, :NAME => 30, :CLUSTERS => 20},
|
||||
:struct => %w[NETWORK_LIST NETWORK],
|
||||
:columns => {
|
||||
:IMID => 5,
|
||||
:REF => 15,
|
||||
:NAME => 30,
|
||||
:CLUSTERS => 20
|
||||
},
|
||||
:cli => [:host],
|
||||
:dialogue => ->(arg){ OneVcenterHelper.network_dialogue(arg) }
|
||||
:dialogue => ->(arg) { OneVcenterHelper.network_dialogue(arg) }
|
||||
},
|
||||
VOBJECT::IMAGE => {
|
||||
:struct => ["IMAGE_LIST", "IMAGE"],
|
||||
:columns => {:IMID => 5,:REF => 35, :PATH => 60},
|
||||
:struct => %w[IMAGE_LIST IMAGE],
|
||||
:columns => { :IMID => 5, :REF => 35, :PATH => 60 },
|
||||
:cli => [:host, :datastore],
|
||||
:dialogue => ->(arg){}
|
||||
:dialogue => ->(arg) {}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
################################################################
|
||||
# CLI ARGS
|
||||
################################################################
|
||||
@ -83,28 +95,27 @@ class OneVcenterHelper < OpenNebulaHelper::OneHelper
|
||||
# these methods will be used by table :cli property
|
||||
# the purpose is to inject code when -d option in this case is used
|
||||
#
|
||||
# @param arg [String] The parameter passed to the option:w
|
||||
# @param arg [String] The parameter passed to the option:w
|
||||
#
|
||||
|
||||
def datastore(arg)
|
||||
ds = VCenterDriver::VIHelper.one_item(OpenNebula::Datastore, arg)
|
||||
|
||||
{
|
||||
ds_ref: ds['TEMPLATE/VCENTER_DS_REF'],
|
||||
one_item: ds
|
||||
:ds_ref => ds['TEMPLATE/VCENTER_DS_REF'],
|
||||
:one_item => ds
|
||||
}
|
||||
end
|
||||
|
||||
def host(arg)
|
||||
return arg
|
||||
arg
|
||||
end
|
||||
|
||||
########################
|
||||
|
||||
|
||||
# In list command you can use this method to print a header
|
||||
#
|
||||
# @param vcenter_host [String] this text will be displayed
|
||||
# @param vcenter_host [String] this text will be displayed
|
||||
#
|
||||
def show_header(vcenter_host)
|
||||
CLIHelper.scr_bold
|
||||
@ -112,33 +123,33 @@ class OneVcenterHelper < OpenNebulaHelper::OneHelper
|
||||
puts "# vCenter: #{vcenter_host}".ljust(50)
|
||||
CLIHelper.scr_restore
|
||||
puts
|
||||
|
||||
end
|
||||
|
||||
# Using for parse a String into a VOBJECT
|
||||
# We will use VOBJECT instances for handle any operatiion
|
||||
#
|
||||
# @param type [String] String representing the vCenter resource
|
||||
# @param type [String] String representing the vCenter resource
|
||||
#
|
||||
def set_object(type)
|
||||
raise "you need to use -o option!" unless type
|
||||
def object_update(type)
|
||||
raise 'you need to use -o option!' unless type
|
||||
|
||||
type = type.downcase
|
||||
if (type == "datastores")
|
||||
case type
|
||||
when 'datastores'
|
||||
@vobject = VOBJECT::DATASTORE
|
||||
elsif (type == "templates")
|
||||
when 'templates'
|
||||
@vobject = VOBJECT::TEMPLATE
|
||||
elsif (type =="networks")
|
||||
when 'networks'
|
||||
@vobject = VOBJECT::NETWORK
|
||||
elsif (type == "images")
|
||||
when 'images'
|
||||
@vobject = VOBJECT::IMAGE
|
||||
else
|
||||
puts "unknown #{type} type option"
|
||||
puts " -o options:"
|
||||
puts " datastores"
|
||||
puts " templates"
|
||||
puts " networks"
|
||||
puts " images"
|
||||
puts ' -o options:'
|
||||
puts ' datastores'
|
||||
puts ' templates'
|
||||
puts ' networks'
|
||||
puts ' images'
|
||||
|
||||
exit 0
|
||||
end
|
||||
@ -146,11 +157,11 @@ class OneVcenterHelper < OpenNebulaHelper::OneHelper
|
||||
|
||||
# Handles connection to vCenter.
|
||||
#
|
||||
# @param options [Hash] options for the connection
|
||||
# @param options [Hash] options for the connection
|
||||
#
|
||||
def connection_options(object_name, options)
|
||||
if options[:vuser].nil? || options[:vcenter].nil?
|
||||
raise "vCenter connection parameters are mandatory to import"\
|
||||
raise 'vCenter connection parameters are mandatory to import'\
|
||||
" #{object_name}:\n"\
|
||||
"\t --vcenter vCenter hostname\n"\
|
||||
"\t --vuser username to login in vcenter"
|
||||
@ -158,23 +169,29 @@ class OneVcenterHelper < OpenNebulaHelper::OneHelper
|
||||
|
||||
password = options[:vpass] || OpenNebulaHelper::OneHelper.get_password
|
||||
{
|
||||
:user => options[:vuser],
|
||||
:password => password,
|
||||
:host => options[:vcenter],
|
||||
:port => options[:port]
|
||||
:user => options[:vuser],
|
||||
:password => password,
|
||||
:host => options[:vcenter],
|
||||
:port => options[:port]
|
||||
}
|
||||
end
|
||||
|
||||
def cli_format( hash)
|
||||
{TABLE[@vobject][:struct].first => {TABLE[@vobject][:struct].last => hash.values}}
|
||||
def cli_format(hash)
|
||||
{
|
||||
TABLE[@vobject][:struct].first =>
|
||||
{
|
||||
TABLE[@vobject][:struct].last =>
|
||||
hash.values
|
||||
}
|
||||
}
|
||||
end
|
||||
|
||||
# This method will print a list for a vcenter_resource.
|
||||
#
|
||||
def list_object(options, list)
|
||||
def list_object(_options, list)
|
||||
vcenter_host = list.keys[0]
|
||||
list = cli_format(list.values.first)
|
||||
table = format_list()
|
||||
table = format_list
|
||||
|
||||
show_header(vcenter_host)
|
||||
|
||||
@ -184,11 +201,12 @@ class OneVcenterHelper < OpenNebulaHelper::OneHelper
|
||||
# handles :cli section of TABLE
|
||||
# used for executing the dialogue in some VOBJECTS
|
||||
#
|
||||
# @param object_info [Hash] This is the object with all the info related to the object
|
||||
# @param object_info [Hash] This is the object
|
||||
# with all the info related to the object
|
||||
# that will be imported
|
||||
#
|
||||
def cli_dialogue(object_info)
|
||||
return TABLE[@vobject][:dialogue].(object_info)
|
||||
TABLE[@vobject][:dialogue].call(object_info)
|
||||
end
|
||||
|
||||
# This method iterates over the possible options for certain resources
|
||||
@ -197,17 +215,18 @@ class OneVcenterHelper < OpenNebulaHelper::OneHelper
|
||||
# @param opts [Hash] options object passed to the onecenter tool
|
||||
#
|
||||
def parse_opts(opts)
|
||||
set_object(opts[:object])
|
||||
object_update(opts[:object])
|
||||
|
||||
res = {}
|
||||
TABLE[@vobject][:cli].each do |arg|
|
||||
raise "#{arg} it's mandadory for this op" if opts[arg].nil?
|
||||
res[arg] = self.method(arg).call(opts[arg])
|
||||
|
||||
res[arg] = method(arg).call(opts[arg])
|
||||
end
|
||||
|
||||
res[:config] = parse_file(opts[:configuration]) if opts[:configuration]
|
||||
|
||||
return res
|
||||
res
|
||||
end
|
||||
|
||||
# This method will parse a yaml
|
||||
@ -218,8 +237,8 @@ class OneVcenterHelper < OpenNebulaHelper::OneHelper
|
||||
#
|
||||
def parse_file(path)
|
||||
begin
|
||||
config = YAML::load(File.read(path))
|
||||
rescue Exception => e
|
||||
_config = YAML.safe_load(File.read(path))
|
||||
rescue StandardError => _e
|
||||
str_error="Unable to read '#{path}'. Invalid YAML syntax:\n"
|
||||
|
||||
raise str_error
|
||||
@ -230,42 +249,44 @@ class OneVcenterHelper < OpenNebulaHelper::OneHelper
|
||||
# with the purpose of build a complete CLI list
|
||||
# OpenNebula way
|
||||
#
|
||||
def format_list()
|
||||
def format_list
|
||||
config = TABLE[@vobject][:columns]
|
||||
table = CLIHelper::ShowTable.new() do
|
||||
column :IMID, "identifier for ...", :size=>config[:IMID] || 4 do |d|
|
||||
CLIHelper::ShowTable.new do
|
||||
column :IMID, 'identifier for ...', :size=>config[:IMID] || 4 do |d|
|
||||
d[:import_id]
|
||||
end
|
||||
|
||||
column :REF, "ref", :left, :adjust, :size=>config[:REF] || 15 do |d|
|
||||
column :REF, 'ref', :left, :adjust, :size=>config[:REF] || 15 do |d|
|
||||
d[:ref]
|
||||
end
|
||||
|
||||
column :NAME, "Name", :left, :expand, :size=>config[:NAME] || 20 do |d|
|
||||
column :NAME, 'Name', :left, :expand,
|
||||
:size=>config[:NAME] || 20 do |d|
|
||||
d[:name] || d[:simple_name]
|
||||
end
|
||||
|
||||
column :CLUSTERS, "CLUSTERS", :left, :size=>config[:CLUSTERS] || 10 do |d|
|
||||
column :CLUSTERS, 'CLUSTERS', :left,
|
||||
:size=>config[:CLUSTERS] || 10 do |d|
|
||||
d = d[:clusters] if d[:clusters]
|
||||
d[:one_ids] || d[:cluster].to_s
|
||||
end
|
||||
|
||||
column :PATH, "PATH", :left, :expand, :size=>config[:PATH] || 10 do |d|
|
||||
column :PATH, 'PATH', :left, :expand,
|
||||
:size=>config[:PATH] || 10 do |d|
|
||||
d[:path]
|
||||
end
|
||||
|
||||
default(*config.keys)
|
||||
end
|
||||
|
||||
table
|
||||
end
|
||||
|
||||
################################################################
|
||||
# CLI DIALOGUES
|
||||
################################################################
|
||||
def self.template_dialogue(t)
|
||||
rps_list = -> {
|
||||
return "" if t[:rp_list].empty?
|
||||
rps_list = lambda {
|
||||
return '' if t[:rp_list].empty?
|
||||
|
||||
puts
|
||||
t[:rp_list].each do |rp|
|
||||
puts " #{rp[:name]}"
|
||||
@ -277,91 +298,96 @@ class OneVcenterHelper < OpenNebulaHelper::OneHelper
|
||||
|
||||
# default opts
|
||||
opts = {
|
||||
linked_clone: '0',
|
||||
copy: '0',
|
||||
name: '',
|
||||
folder: '',
|
||||
resourcepool: [],
|
||||
type: ''
|
||||
:linked_clone => '0',
|
||||
:copy => '0',
|
||||
:name => '',
|
||||
:folder => '',
|
||||
:resourcepool => [],
|
||||
:type => ''
|
||||
}
|
||||
|
||||
STDOUT.print "\n- Template: \e[92m#{t[:template_name]}\e[39m\n\n"\
|
||||
|
||||
# LINKED CLONE OPTION
|
||||
STDOUT.print "\n For faster deployment operations"\
|
||||
" and lower disk usage, OpenNebula"\
|
||||
" can create new VMs as linked clones."\
|
||||
"\n Would you like to use Linked Clones with VMs based on this template (y/[n])? "
|
||||
' and lower disk usage, OpenNebula'\
|
||||
' can create new VMs as linked clones.'\
|
||||
"\n Would you like to use Linked Clones"\
|
||||
' with VMs based on this template (y/[n])? '
|
||||
|
||||
if STDIN.gets.strip.downcase == 'y'
|
||||
opts[:linked_clone] = '1'
|
||||
|
||||
|
||||
# CREATE COPY OPTION
|
||||
STDOUT.print "\n Linked clones requires that delta"\
|
||||
" disks must be created for each disk in the template."\
|
||||
" This operation may change the template contents."\
|
||||
" \n Do you want OpenNebula to create a copy of the template,"\
|
||||
" so the original template remains untouched ([y]/n)? "
|
||||
' disks must be created for '\
|
||||
'each disk in the template.'\
|
||||
' This operation may change the template contents.'\
|
||||
" \n Do you want OpenNebula to "\
|
||||
'create a copy of the template,'\
|
||||
' so the original template remains untouched ([y]/n)? '
|
||||
|
||||
if STDIN.gets.strip.downcase != 'n'
|
||||
opts[:copy] = '1'
|
||||
|
||||
# NAME OPTION
|
||||
STDOUT.print "\n The new template will be named"\
|
||||
" adding a one- prefix to the name"\
|
||||
' adding a one- prefix to the name'\
|
||||
" of the original template. \n"\
|
||||
" If you prefer a different name"\
|
||||
" please specify or press Enter"\
|
||||
" to use defaults: "
|
||||
' If you prefer a different name'\
|
||||
' please specify or press Enter'\
|
||||
' to use defaults: '
|
||||
|
||||
template_name = STDIN.gets.strip.downcase
|
||||
opts[:name] = template_name
|
||||
|
||||
STDOUT.print "\n WARNING!!! The cloning operation can take some time"\
|
||||
STDOUT.print "\n WARNING!!! The cloning "\
|
||||
'operation can take some time'\
|
||||
" depending on the size of disks.\n"
|
||||
end
|
||||
end
|
||||
|
||||
STDOUT.print "\n\n Do you want to specify a folder where"\
|
||||
" the deployed VMs based on this template will appear"\
|
||||
" in vSphere's VM and Templates section?"\
|
||||
"\n If no path is set, VMs will be placed in the same"\
|
||||
" location where the template lives."\
|
||||
"\n Please specify a path using slashes to separate folders"\
|
||||
" e.g /Management/VMs or press Enter to use defaults: "\
|
||||
sdtout_print = "\n\n Do you want to specify a folder where"\
|
||||
' the deployed VMs based on this template will appear'\
|
||||
" in vSphere's VM and Templates section?"\
|
||||
"\n If no path is set, VMs will be placed in the same"\
|
||||
' location where the template lives.'\
|
||||
"\n Please specify a path using slashes to separate folders"\
|
||||
' e.g /Management/VMs or press Enter to use defaults: '\
|
||||
|
||||
STDOUT.print sdtout_print
|
||||
|
||||
vcenter_vm_folder = STDIN.gets.strip
|
||||
opts[:folder] = vcenter_vm_folder
|
||||
|
||||
STDOUT.print "\n\n This template is currently set to "\
|
||||
"launch VMs in the default resource pool."\
|
||||
'launch VMs in the default resource pool.'\
|
||||
"\n Press y to keep this behaviour, n to select"\
|
||||
" a new resource pool or d to delegate the choice"\
|
||||
" to the user ([y]/n/d)? "
|
||||
' a new resource pool or d to delegate the choice'\
|
||||
' to the user ([y]/n/d)? '
|
||||
|
||||
answer = STDIN.gets.strip.downcase
|
||||
answer = STDIN.gets.strip.downcase
|
||||
|
||||
case answer.downcase
|
||||
when 'd' || 'delegate'
|
||||
opts[:type]='list'
|
||||
puts "separate with commas ',' the list that you want to deleate:"
|
||||
|
||||
opts[:resourcepool] = rps_list.call.gsub(/\s+/, "").split(",")
|
||||
opts[:resourcepool] = rps_list.call.gsub(/\s+/, '').split(',')
|
||||
|
||||
when 'n' || 'no'
|
||||
opts[:type]='fixed'
|
||||
puts "choose the proper name"
|
||||
puts 'choose the proper name'
|
||||
opts[:resourcepool] = rps_list.call
|
||||
else
|
||||
opts[:type]='default'
|
||||
end
|
||||
|
||||
return opts
|
||||
opts
|
||||
end
|
||||
|
||||
def self.network_dialogue(n)
|
||||
ask = ->(question, default = ""){
|
||||
ask = lambda {|question, default = ''|
|
||||
STDOUT.print question
|
||||
answer = STDIN.gets.strip
|
||||
|
||||
@ -372,59 +398,63 @@ class OneVcenterHelper < OpenNebulaHelper::OneHelper
|
||||
|
||||
STDOUT.print "\n- Network: \e[92m#{n[:name]}\e[39m\n\n"\
|
||||
|
||||
opts = { size: "255", type: "ether" }
|
||||
opts = { :size => '255', :type => 'ether' }
|
||||
|
||||
question = " How many VMs are you planning"\
|
||||
" to fit into this network [255]? "
|
||||
opts[:size] = ask.call(question, "255")
|
||||
question = ' How many VMs are you planning'\
|
||||
' to fit into this network [255]? '
|
||||
opts[:size] = ask.call(question, '255')
|
||||
|
||||
question = " What type of Virtual Network"\
|
||||
" do you want to create (IPv[4],IPv[6], [E]thernet)? "
|
||||
type_answer = ask.call(question, "ether")
|
||||
question = ' What type of Virtual Network'\
|
||||
' do you want to create (IPv[4],IPv[6], [E]thernet)? '
|
||||
type_answer = ask.call(question, 'ether')
|
||||
|
||||
supported_types = ["4","6","ether", "e", "ip4", "ip6" ]
|
||||
supported_types = %w[4 6 ether e ip4 ip6]
|
||||
if !supported_types.include?(type_answer)
|
||||
type_answer = 'e'
|
||||
STDOUT.puts " Type [#{type_answer}] not supported,"\
|
||||
" defaulting to Ethernet."
|
||||
type_answer = 'e'
|
||||
STDOUT.puts " Type [#{type_answer}] not supported,"\
|
||||
' defaulting to Ethernet.'
|
||||
end
|
||||
question_ip = " Please input the first IP in the range: "
|
||||
question_mac = " Please input the first MAC in the range [Enter for default]: "
|
||||
question_ip =
|
||||
' Please input the first IP in the range: '
|
||||
question_mac =
|
||||
' Please input the first MAC in the range [Enter for default]: '
|
||||
|
||||
case type_answer.downcase
|
||||
when "4", "ip4"
|
||||
opts[:ip] = ask.call(question_ip)
|
||||
opts[:mac] = ask.call(question_mac)
|
||||
opts[:type] = "ip"
|
||||
when "6", "ip6"
|
||||
opts[:mac] = ask.call(question_mac)
|
||||
when '4', 'ip4'
|
||||
opts[:ip] = ask.call(question_ip)
|
||||
opts[:mac] = ask.call(question_mac)
|
||||
opts[:type] = 'ip'
|
||||
when '6', 'ip6'
|
||||
opts[:mac] = ask.call(question_mac)
|
||||
|
||||
question = " Do you want to use SLAAC "\
|
||||
"Stateless Address Autoconfiguration? ([y]/n): "
|
||||
question = ' Do you want to use SLAAC '\
|
||||
'Stateless Address Autoconfiguration? ([y]/n): '
|
||||
slaac_answer = ask.call(question, 'y').downcase
|
||||
|
||||
if slaac_answer == 'n'
|
||||
question = " Please input the IPv6 address (cannot be empty): "
|
||||
opts[:ip6] = ask.call(question)
|
||||
if slaac_answer == 'n'
|
||||
question =
|
||||
' Please input the IPv6 address (cannot be empty): '
|
||||
opts[:ip6] = ask.call(question)
|
||||
|
||||
question = " Please input the Prefix length (cannot be empty): "
|
||||
opts[:prefix_length] = ask.call(question)
|
||||
question =
|
||||
' Please input the Prefix length (cannot be empty): '
|
||||
opts[:prefix_length] = ask.call(question)
|
||||
opts[:type] = 'ip6_static'
|
||||
else
|
||||
question = " Please input the GLOBAL PREFIX "\
|
||||
"[Enter for default]: "
|
||||
opts[:global_prefix] = ask.call(question)
|
||||
else
|
||||
question = ' Please input the GLOBAL PREFIX '\
|
||||
'[Enter for default]: '
|
||||
opts[:global_prefix] = ask.call(question)
|
||||
|
||||
question= " Please input the ULA PREFIX "\
|
||||
"[Enter for default]: "
|
||||
opts[:ula_prefix] = ask.call(question)
|
||||
opts[:type] = 'ip6'
|
||||
end
|
||||
when "e", "ether"
|
||||
opts[:mac] = ask.call(question_mac)
|
||||
end
|
||||
question= ' Please input the ULA PREFIX '\
|
||||
'[Enter for default]: '
|
||||
opts[:ula_prefix] = ask.call(question)
|
||||
opts[:type] = 'ip6'
|
||||
end
|
||||
when 'e', 'ether'
|
||||
opts[:mac] = ask.call(question_mac)
|
||||
end
|
||||
|
||||
return opts
|
||||
opts
|
||||
end
|
||||
|
||||
def clear_tags(vmid)
|
||||
@ -469,7 +499,7 @@ class OneVcenterHelper < OpenNebulaHelper::OneHelper
|
||||
end
|
||||
end
|
||||
|
||||
return vm, keys_to_remove
|
||||
[vm, keys_to_remove]
|
||||
end
|
||||
|
||||
def remove_keys(vm, keys_to_remove)
|
||||
|
@ -189,7 +189,8 @@ CommandParser::CmdParser.new(ARGV) do
|
||||
provision_configure_desc,
|
||||
:provisionid,
|
||||
:options => [OneProvisionHelper::MODES,
|
||||
OneProvisionHelper::FORCE] do
|
||||
OneProvisionHelper::FORCE] +
|
||||
[OpenNebulaHelper::FORMAT] do
|
||||
helper.parse_options(options)
|
||||
|
||||
rc = helper.configure(args[0], options.key?(:force))
|
||||
@ -214,7 +215,8 @@ CommandParser::CmdParser.new(ARGV) do
|
||||
:options => [OneProvisionHelper::MODES,
|
||||
OneProvisionHelper::THREADS,
|
||||
OneProvisionHelper::CLEANUP,
|
||||
OneProvisionHelper::CLEANUP_TIMEOUT] do
|
||||
OneProvisionHelper::CLEANUP_TIMEOUT] +
|
||||
[OpenNebulaHelper::FORMAT] do
|
||||
helper.parse_options(options)
|
||||
|
||||
if options[:cleanup_timeout].nil?
|
||||
|
@ -19,13 +19,13 @@
|
||||
ONE_LOCATION = ENV['ONE_LOCATION'] unless defined?(ONE_LOCATION)
|
||||
|
||||
if !ONE_LOCATION
|
||||
RUBY_LIB_LOCATION = '/usr/lib/one/ruby' unless defined?(RUBY_LIB_LOCATION)
|
||||
GEMS_LOCATION = '/usr/share/one/gems' unless defined?(GEMS_LOCATION)
|
||||
VAR_LOCATION = '/var/lib/one' unless defined?(VAR_LOCATION)
|
||||
RUBY_LIB_LOCATION ||= '/usr/lib/one/ruby'
|
||||
GEMS_LOCATION ||= '/usr/share/one/gems'
|
||||
VAR_LOCATION ||= '/var/lib/one'
|
||||
else
|
||||
RUBY_LIB_LOCATION = ONE_LOCATION + '/lib/ruby' unless defined?(RUBY_LIB_LOCATION)
|
||||
GEMS_LOCATION = ONE_LOCATION + '/share/gems' unless defined?(GEMS_LOCATION)
|
||||
VAR_LOCATION = ONE_LOCATION + '/var' unless defined?(VAR_LOCATION)
|
||||
RUBY_LIB_LOCATION ||= ONE_LOCATION + '/lib/ruby'
|
||||
GEMS_LOCATION ||= ONE_LOCATION + '/share/gems'
|
||||
VAR_LOCATION ||= ONE_LOCATION + '/var'
|
||||
end
|
||||
|
||||
if File.directory?(GEMS_LOCATION)
|
||||
@ -46,7 +46,7 @@ require 'addressable'
|
||||
vcenter_url = Addressable::URI.parse(ARGV[0])
|
||||
|
||||
params = CGI.parse(vcenter_url.query)
|
||||
ds_id = params["param_dsid"][0]
|
||||
ds_id = params['param_dsid'][0]
|
||||
|
||||
begin
|
||||
vi_client = VCenterDriver::VIClient.new_from_datastore(ds_id)
|
||||
@ -57,9 +57,13 @@ begin
|
||||
ds = VCenterDriver::Datastore.new_from_ref(source_ds_ref, vi_client)
|
||||
|
||||
VCenterDriver::FileHelper.dump_vmdk_tar_gz(vcenter_url, ds)
|
||||
rescue Exception => e
|
||||
STDERR.puts "Cannot download image #{vcenter_url.path} from datastore #{ds_id} "\
|
||||
"Reason: \"#{e.message}\"\n#{e.backtrace}"
|
||||
rescue StandardError => e
|
||||
STDERR.puts "Cannot download image #{vcenter_url.path}"\
|
||||
" from datastore #{ds_id} "\
|
||||
"Reason: \"#{e.message}\"}"
|
||||
if VCenterDriver::CONFIG[:debug_information]
|
||||
STDERR.puts "#{e.backtrace}"
|
||||
end
|
||||
exit(-1)
|
||||
ensure
|
||||
vi_client.close_connection if vi_client
|
||||
|
@ -19,11 +19,15 @@
|
||||
ONE_LOCATION = ENV['ONE_LOCATION'] unless defined?(ONE_LOCATION)
|
||||
|
||||
if !ONE_LOCATION
|
||||
RUBY_LIB_LOCATION = '/usr/lib/one/ruby' unless defined?(RUBY_LIB_LOCATION)
|
||||
GEMS_LOCATION = '/usr/share/one/gems' unless defined?(GEMS_LOCATION)
|
||||
RUBY_LIB_LOCATION =
|
||||
'/usr/lib/one/ruby' unless defined?(RUBY_LIB_LOCATION)
|
||||
GEMS_LOCATION =
|
||||
'/usr/share/one/gems' unless defined?(GEMS_LOCATION)
|
||||
else
|
||||
RUBY_LIB_LOCATION = ONE_LOCATION + '/lib/ruby' unless defined?(RUBY_LIB_LOCATION)
|
||||
GEMS_LOCATION = ONE_LOCATION + '/share/gems' unless defined?(GEMS_LOCATION)
|
||||
RUBY_LIB_LOCATION =
|
||||
ONE_LOCATION + '/lib/ruby' unless defined?(RUBY_LIB_LOCATION)
|
||||
GEMS_LOCATION =
|
||||
ONE_LOCATION + '/share/gems' unless defined?(GEMS_LOCATION)
|
||||
end
|
||||
|
||||
if File.directory?(GEMS_LOCATION)
|
||||
@ -47,7 +51,7 @@ begin
|
||||
ds = VCenterDriver::Datastore.new_from_ref(target_ds_ref, vi_client)
|
||||
|
||||
# Setting "." as the source will read from the stdin
|
||||
source_path = "." if source_path.nil?
|
||||
source_path = '.' if source_path.nil?
|
||||
|
||||
ds.create_directory(File.dirname(target_path))
|
||||
|
||||
@ -56,12 +60,13 @@ begin
|
||||
end
|
||||
|
||||
puts target_path
|
||||
|
||||
rescue Exception => e
|
||||
rescue StandardError => e
|
||||
STDERR.puts "Cannot upload image to datastore #{ds_id} "\
|
||||
"Reason: \"#{e.message}\"\n#{e.backtrace}"
|
||||
exit -1
|
||||
"Reason: \"#{e.message}\""
|
||||
if VCenterDriver::CONFIG[:debug_information]
|
||||
STDERR.puts "#{e.backtrace}"
|
||||
end
|
||||
exit(-1)
|
||||
ensure
|
||||
vi_client.close_connection if vi_client
|
||||
end
|
||||
|
||||
|
@ -29,13 +29,13 @@ end
|
||||
# Logger
|
||||
################################################################################
|
||||
$logger = Logger.new(
|
||||
STDERR,
|
||||
level: Logger::INFO,
|
||||
datetime_format: '%Y-%m-%d %H:%M:%S',
|
||||
formatter: proc { |severity, datetime, progname, msg|
|
||||
"#{datetime} [#{severity}]: #{msg}\n"
|
||||
}
|
||||
)
|
||||
STDERR,
|
||||
:level => Logger::INFO,
|
||||
:datetime_format => '%Y-%m-%d %H:%M:%S',
|
||||
:formatter => proc {|severity, datetime, _progname, msg|
|
||||
"#{datetime} [#{severity}]: #{msg}\n"
|
||||
}
|
||||
)
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Set of vcenter clusters each one representing a opennebula host
|
||||
@ -48,8 +48,9 @@ $logger = Logger.new(
|
||||
# @last_monitor_vm: Timer for last monitor VM
|
||||
#-------------------------------------------------------------------------------
|
||||
class Cluster
|
||||
|
||||
#---------------------------------------------------------------------------
|
||||
#Constants
|
||||
# Constants
|
||||
# CLUSTER_PROPERTIES: ESX cluster properties
|
||||
# RP_PROPERTIES: Resource pool properties
|
||||
# VM_STATE_PROPERTIES: Properties for VM state changes
|
||||
@ -172,21 +173,21 @@ class Cluster
|
||||
def state_vm
|
||||
current_vm_states = vcenter_vms_state
|
||||
|
||||
# Check if we need a full sync
|
||||
# Check if we need a full sync
|
||||
full_sync = false
|
||||
now = Time.now.to_i
|
||||
if @last_sync.nil? or ((now - @last_sync) > VM_SYNC_TIME)
|
||||
if @last_sync.nil? || ((now - @last_sync) > VM_SYNC_TIME)
|
||||
full_sync = true
|
||||
@last_sync = now
|
||||
end
|
||||
|
||||
str_info = ""
|
||||
str_info = ''
|
||||
str_info << "SYNC_STATE=yes\nMISSING_STATE=#{VM_MISSING_STATE}\n" if full_sync
|
||||
|
||||
current_vm_states.each do |_,vm|
|
||||
current_vm_states.each do |_, vm|
|
||||
vm_ref = vm[:deploy_id]
|
||||
|
||||
if full_sync or need_state_sync?(vm_ref, vm[:state])
|
||||
if full_sync || need_state_sync?(vm_ref, vm[:state])
|
||||
str_info << "VM = [ ID=\"#{vm[:id]}\", "
|
||||
str_info << "DEPLOY_ID=\"#{vm[:deploy_id]}\", STATE=\"#{vm[:state]}\" ]\n"
|
||||
end
|
||||
@ -202,40 +203,46 @@ class Cluster
|
||||
view = @vic.vim
|
||||
.serviceContent
|
||||
.viewManager
|
||||
.CreateContainerView({
|
||||
container: @cluster.item,
|
||||
type: ['VirtualMachine'],
|
||||
recursive: true
|
||||
})
|
||||
.CreateContainerView(
|
||||
{
|
||||
:container => @cluster.item,
|
||||
:type => ['VirtualMachine'],
|
||||
:recursive => true
|
||||
}
|
||||
)
|
||||
|
||||
pc = @vic.vim.serviceContent.propertyCollector
|
||||
|
||||
result = pc.RetrieveProperties(:specSet => [
|
||||
RbVmomi::VIM.PropertyFilterSpec(
|
||||
:objectSet => [
|
||||
:obj => view,
|
||||
:skip => true,
|
||||
:selectSet => [
|
||||
RbVmomi::VIM.TraversalSpec(
|
||||
:name => 'traverseEntities',
|
||||
:type => 'ContainerView',
|
||||
:path => 'view',
|
||||
:skip => false
|
||||
)
|
||||
result = pc.RetrieveProperties(
|
||||
:specSet => [
|
||||
RbVmomi::VIM.PropertyFilterSpec(
|
||||
:objectSet => [
|
||||
:obj => view,
|
||||
:skip => true,
|
||||
:selectSet => [
|
||||
RbVmomi::VIM.TraversalSpec(
|
||||
:name => 'traverseEntities',
|
||||
:type => 'ContainerView',
|
||||
:path => 'view',
|
||||
:skip => false
|
||||
)
|
||||
]
|
||||
],
|
||||
:propSet => [
|
||||
{
|
||||
:type => 'VirtualMachine',
|
||||
:pathSet => VM_STATE_PROPERTIES
|
||||
}
|
||||
]
|
||||
],
|
||||
|
||||
:propSet => [{
|
||||
:type => 'VirtualMachine',
|
||||
:pathSet => VM_STATE_PROPERTIES
|
||||
}]
|
||||
)
|
||||
])
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
vms_hash = {}
|
||||
|
||||
result.each do |r|
|
||||
next unless r.obj.is_a?(RbVmomi::VIM::VirtualMachine)
|
||||
|
||||
vms_hash[r.obj._ref] = r.to_hash
|
||||
end
|
||||
|
||||
@ -251,17 +258,17 @@ class Cluster
|
||||
one_id = -1
|
||||
ids = vmpool.retrieve_xmlelements("/VM_POOL/VM[DEPLOY_ID = '#{vm_ref}']")
|
||||
|
||||
ids.select {|vm|
|
||||
hid = vm["HISTORY_RECORDS/HISTORY/HID"]
|
||||
ids.select do |vm|
|
||||
hid = vm['HISTORY_RECORDS/HISTORY/HID']
|
||||
|
||||
if hid
|
||||
hid.to_i == @host_id
|
||||
else
|
||||
false
|
||||
end
|
||||
}
|
||||
end
|
||||
|
||||
one_id = ids[0]["ID"] if ids[0]
|
||||
one_id = ids[0]['ID'] if ids[0]
|
||||
next if one_id.to_i == -1
|
||||
|
||||
vms[vm_ref] = {
|
||||
@ -313,7 +320,7 @@ class Cluster
|
||||
|
||||
resource_usage_summary = @cluster.item.GetResourceUsage()
|
||||
|
||||
real_total_cpu = resource_usage_summary.cpuCapacityMHz.to_f
|
||||
real_total_cpu = resource_usage_summary.cpuCapacityMHz.to_f
|
||||
real_used_cpu = resource_usage_summary.cpuUsedMHz.to_f
|
||||
total_memory = resource_usage_summary.memCapacityMB.to_i
|
||||
used_mem = resource_usage_summary.memUsedMB.to_i
|
||||
@ -328,11 +335,11 @@ class Cluster
|
||||
|
||||
free_cpu = total_cpu - used_cpu
|
||||
|
||||
free_mem = total_memory - used_mem
|
||||
free_mem = total_memory - used_mem
|
||||
|
||||
unindent(<<-EOS)
|
||||
HYPERVISOR = vcenter
|
||||
USEDMEMORY = "#{(used_mem * 1024)}"
|
||||
USEDMEMORY = "#{used_mem * 1024}"
|
||||
FREEMEMORY = "#{free_mem}"
|
||||
USEDCPU = "#{used_cpu.to_i}"
|
||||
FREECPU = "#{free_cpu.to_i}"
|
||||
@ -388,33 +395,38 @@ class Cluster
|
||||
def resource_pool_info(mhz_core)
|
||||
rp_list = @cluster.get_resource_pool_list
|
||||
|
||||
view = @vic.vim.serviceContent.viewManager.CreateContainerView({
|
||||
container: @cluster.item,
|
||||
type: ['ResourcePool'],
|
||||
recursive: true
|
||||
})
|
||||
view =
|
||||
@vic.vim.serviceContent.viewManager.CreateContainerView(
|
||||
{
|
||||
:container => @cluster.item,
|
||||
:type => ['ResourcePool'],
|
||||
:recursive => true
|
||||
}
|
||||
)
|
||||
|
||||
pc = @vic.vim.serviceContent.propertyCollector
|
||||
result = pc.RetrieveProperties(:specSet => [
|
||||
RbVmomi::VIM.PropertyFilterSpec(
|
||||
:objectSet => [
|
||||
:obj => view,
|
||||
:skip => true,
|
||||
:selectSet => [
|
||||
RbVmomi::VIM.TraversalSpec(
|
||||
:name => 'traverseEntities',
|
||||
:type => 'ContainerView',
|
||||
:path => 'view',
|
||||
:skip => false
|
||||
)
|
||||
]
|
||||
],
|
||||
:propSet => [{
|
||||
:type => 'ResourcePool',
|
||||
:pathSet => RP_PROPERTIES
|
||||
}]
|
||||
)
|
||||
])
|
||||
result = pc.RetrieveProperties(
|
||||
:specSet => [
|
||||
RbVmomi::VIM.PropertyFilterSpec(
|
||||
:objectSet => [
|
||||
:obj => view,
|
||||
:skip => true,
|
||||
:selectSet => [
|
||||
RbVmomi::VIM.TraversalSpec(
|
||||
:name => 'traverseEntities',
|
||||
:type => 'ContainerView',
|
||||
:path => 'view',
|
||||
:skip => false
|
||||
)
|
||||
]
|
||||
],
|
||||
:propSet => [{
|
||||
:type => 'ResourcePool',
|
||||
:pathSet => RP_PROPERTIES
|
||||
}]
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
rps = {}
|
||||
|
||||
@ -476,10 +488,10 @@ class Cluster
|
||||
mem_shares = info['config.memoryAllocation.shares.shares']
|
||||
|
||||
begin
|
||||
rp_name = rp_list.select { |item|
|
||||
rp_name = rp_list.select do |item|
|
||||
item[:ref] == ref
|
||||
}.first[:name]
|
||||
rescue
|
||||
end.first[:name]
|
||||
rescue StandardError
|
||||
rp_name = 'Resources'
|
||||
end
|
||||
|
||||
@ -585,9 +597,9 @@ class Cluster
|
||||
elist.each do |ext_list|
|
||||
case ext_list.key
|
||||
when NSXDriver::NSXConstants::NSXV_EXTENSION_LIST
|
||||
parts = ext_list.client[0].url.split("/")
|
||||
parts = ext_list.client[0].url.split('/')
|
||||
|
||||
protocol = parts[0] + "//"
|
||||
protocol = parts[0] + '//'
|
||||
ip_port = parts[2]
|
||||
|
||||
@nsx_obj['type'] = NSXDriver::NSXConstants::NSXV
|
||||
@ -619,12 +631,12 @@ class Cluster
|
||||
# Get a list vCenter datastores morefs
|
||||
#---------------------------------------------------------------------------
|
||||
def datastore_info
|
||||
dc = @cluster.get_dc
|
||||
dc = @cluster.datacenter
|
||||
ds = dc.datastore_folder
|
||||
|
||||
ds_info = ''
|
||||
|
||||
ds.fetch!.each do |ref, ds|
|
||||
ds.fetch!.each do |ref, _ds|
|
||||
ds_info << "VCENTER_DS_REF=\"#{ref}\"\n"
|
||||
end
|
||||
|
||||
@ -661,9 +673,9 @@ class Cluster
|
||||
|
||||
if create_nsx_client
|
||||
@nsx_client = NSXDriver::NSXClient.new_child(nsx_manager,
|
||||
nsx_user,
|
||||
nsx_password,
|
||||
nsx_type)
|
||||
nsx_user,
|
||||
nsx_password,
|
||||
nsx_type)
|
||||
end
|
||||
|
||||
return '' if @nsx_client.nil?
|
||||
@ -689,7 +701,7 @@ class Cluster
|
||||
end
|
||||
|
||||
else
|
||||
raise "Unknown PortGroup type #{nsx_type}"
|
||||
raise "Unknown PortGroup type #{nsx_type}"
|
||||
end
|
||||
|
||||
nsx_info.chomp!(',')
|
||||
@ -713,6 +725,7 @@ end
|
||||
#
|
||||
#---------------------------------------------------------------------------
|
||||
class ClusterSet
|
||||
|
||||
#---------------------------------------------------------------------------
|
||||
# Constants
|
||||
# CLUSTER_PROBES: to be executed. Each Cluster needs to respond to this
|
||||
@ -758,9 +771,9 @@ class ClusterSet
|
||||
|
||||
# Del a host from the @cluster hash
|
||||
def del(hid)
|
||||
@mutex.synchronize {
|
||||
@mutex.synchronize do
|
||||
@clusters.delete(hid)
|
||||
}
|
||||
end
|
||||
|
||||
$logger.info("Unregistered host #{hid}")
|
||||
end
|
||||
@ -778,7 +791,7 @@ class ClusterSet
|
||||
rc = hpool.info
|
||||
|
||||
if OpenNebula.is_error?(rc)
|
||||
# Wait 5 seconds and retry
|
||||
# Wait 5 seconds and retry
|
||||
sleep 5
|
||||
rc = hpool.info
|
||||
if OpenNebula.is_error?(rc)
|
||||
@ -786,10 +799,10 @@ class ClusterSet
|
||||
end
|
||||
end
|
||||
|
||||
$logger.info("Bootstraping list of clusters")
|
||||
$logger.info('Bootstraping list of clusters')
|
||||
|
||||
hpool.each do |h|
|
||||
next if h['IM_MAD'] != 'vcenter' || h['STATE'] == '8' #offline
|
||||
next if h['IM_MAD'] != 'vcenter' || h['STATE'] == '8' # offline
|
||||
|
||||
$logger.info("Adding host #{h.name} (#{h.id})")
|
||||
|
||||
@ -810,7 +823,7 @@ class ClusterSet
|
||||
next if c[:cluster].nil?
|
||||
|
||||
if c[:monitordc].nil?
|
||||
next if conf[:address].nil? or conf[:port].nil?
|
||||
next if conf[:address].nil? || conf[:port].nil?
|
||||
|
||||
c[:monitordc] = MonitorClient.new(conf[:address],
|
||||
conf[:port],
|
||||
@ -827,7 +840,8 @@ class ClusterSet
|
||||
probe_frequency = conf[probe_name].to_i
|
||||
next unless (Time.now.to_i - last_mon) > probe_frequency
|
||||
|
||||
# Refresh the vCenter connection in the least frequent probe
|
||||
# Refresh the vCenter connection
|
||||
# in the least frequent probe
|
||||
if probe_name.eql?(:system_host)
|
||||
c[:cluster].connect_vcenter
|
||||
end
|
||||
@ -872,4 +886,5 @@ class ClusterSet
|
||||
}
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
|
@ -19,17 +19,17 @@
|
||||
ONE_LOCATION ||= ENV['ONE_LOCATION']
|
||||
|
||||
if !ONE_LOCATION
|
||||
RUBY_LIB_LOCATION ||= '/usr/lib/one/ruby'
|
||||
GEMS_LOCATION ||= '/usr/share/one/gems'
|
||||
RUBY_LIB_LOCATION ||= '/usr/lib/one/ruby'
|
||||
GEMS_LOCATION ||= '/usr/share/one/gems'
|
||||
else
|
||||
RUBY_LIB_LOCATION ||= ONE_LOCATION + '/lib/ruby'
|
||||
GEMS_LOCATION ||= ONE_LOCATION + '/share/gems'
|
||||
RUBY_LIB_LOCATION ||= ONE_LOCATION + '/lib/ruby'
|
||||
GEMS_LOCATION ||= ONE_LOCATION + '/share/gems'
|
||||
end
|
||||
|
||||
if File.directory?(GEMS_LOCATION)
|
||||
$LOAD_PATH.reject! {|l| l =~ /vendor_ruby/ }
|
||||
require 'rubygems'
|
||||
Gem.use_paths(File.realpath(GEMS_LOCATION))
|
||||
$LOAD_PATH.reject! {|l| l =~ /vendor_ruby/ }
|
||||
require 'rubygems'
|
||||
Gem.use_paths(File.realpath(GEMS_LOCATION))
|
||||
end
|
||||
|
||||
$LOAD_PATH << RUBY_LIB_LOCATION
|
||||
@ -44,22 +44,22 @@ vm_type = ARGV[1]
|
||||
ccr = ARGV[2]
|
||||
|
||||
begin
|
||||
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
|
||||
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
|
||||
|
||||
cluster = VCenterDriver::ClusterComputeResource.new_from_ref(ccr, vi_client)
|
||||
cluster = VCenterDriver::ClusterComputeResource.new_from_ref(ccr, vi_client)
|
||||
|
||||
str_info , _ltime = cluster.monitor_vms(host_id, vm_type)
|
||||
str_info, _ltime = cluster.monitor_vms(host_id, vm_type)
|
||||
|
||||
puts str_info
|
||||
puts str_info
|
||||
rescue StandardError => e
|
||||
message = "Monitoring of VMs on vCenter cluster #{host_id} " \
|
||||
" failed due to \"#{e.message}\"."
|
||||
OpenNebula.log_error(message)
|
||||
if VCenterDriver::CONFIG[:debug_information]
|
||||
STDERR.puts "#{message} #{e.backtrace}"
|
||||
end
|
||||
message = "Monitoring of VMs on vCenter cluster #{host_id} " \
|
||||
" failed due to \"#{e.message}\"."
|
||||
OpenNebula.log_error(message)
|
||||
if VCenterDriver::CONFIG[:debug_information]
|
||||
STDERR.puts "#{message} #{e.backtrace}"
|
||||
end
|
||||
|
||||
exit(-1)
|
||||
exit(-1)
|
||||
ensure
|
||||
vi_client.close_connection if vi_client
|
||||
vi_client.close_connection if vi_client
|
||||
end
|
||||
|
@ -444,7 +444,7 @@ func (s LCMState) String() string {
|
||||
return "DISK_RESIZE_UNDEPLOYED"
|
||||
case HotplugNicPoweroff:
|
||||
return "HOTPLUG_NIC_POWEROFF"
|
||||
case HotplugrResize:
|
||||
case HotplugResize:
|
||||
return "HOTPLUG_RESIZE"
|
||||
case HotplugSaveasUndeployed:
|
||||
return "HOTPLUG_SAVEAS_UNDEPLOYED"
|
||||
|
@ -22,7 +22,7 @@ module OneProvision
|
||||
DOCUMENT_TYPE = 102
|
||||
|
||||
# These attributes can not be changed when updating the provider
|
||||
IMMUTABLE_ATTRS = %w[provider]
|
||||
IMMUTABLE_ATTRS = %w[provider name]
|
||||
|
||||
# Allocates a new document
|
||||
#
|
||||
@ -85,9 +85,7 @@ module OneProvision
|
||||
return rc if OpenNebula.is_error?(rc)
|
||||
|
||||
pool.each do |p|
|
||||
next unless p.body['provider'] == body['provider']
|
||||
|
||||
next if p.body['state'] == Provision::STATE['DONE']
|
||||
next unless p.body['provider'] == self['NAME']
|
||||
|
||||
return OpenNebula::Error.new(
|
||||
'Provider can not be deleted, it is used by ' \
|
||||
|
@ -22,6 +22,7 @@ require 'provision/provision_pool'
|
||||
require 'provision/resources'
|
||||
require 'provision/utils'
|
||||
|
||||
require 'base64'
|
||||
require 'logger'
|
||||
require 'singleton'
|
||||
|
||||
@ -46,7 +47,18 @@ module OneProvision
|
||||
format = '%Y-%m-%d %H:%M:%S'
|
||||
|
||||
instance.logger.formatter = proc do |severity, datetime, _p, msg|
|
||||
"#{datetime.strftime(format)} #{severity.ljust(5)} : #{msg}\n"
|
||||
if options[:json]
|
||||
"{ \"timestamp\": \"#{datetime}\", " \
|
||||
" \"severity\": \"#{severity}\", " \
|
||||
" \"message\": \"#{Base64.strict_encode64(msg)}\"}\n"
|
||||
elsif options[:xml]
|
||||
"<TIMESTAMP>#{datetime}</TIMESTAMP>" \
|
||||
"<SEVERITY>#{severity}</SEVERITY>" \
|
||||
"<MESSAGE>#{Base64.strict_encode64(msg)}</MESSAGE>\n"
|
||||
else
|
||||
"#{datetime.strftime(format)} #{severity.ljust(5)} " \
|
||||
": #{msg}\n"
|
||||
end
|
||||
end
|
||||
|
||||
if options.key? :debug
|
||||
|
@ -112,19 +112,22 @@ module OneProvision
|
||||
|
||||
# Get cluster information
|
||||
def cluster
|
||||
return nil unless infrastructure_objects
|
||||
return unless infrastructure_objects
|
||||
|
||||
infrastructure_objects['clusters'][0]
|
||||
end
|
||||
|
||||
# Returns provision hosts
|
||||
def hosts
|
||||
return nil unless infrastructure_objects
|
||||
return unless infrastructure_objects
|
||||
|
||||
infrastructure_objects['hosts']
|
||||
end
|
||||
|
||||
# Returns provision datastores
|
||||
def datastores
|
||||
return nil unless infrastructure_objects
|
||||
return unless infrastructure_objects
|
||||
|
||||
infrastructure_objects['datastores']
|
||||
end
|
||||
|
||||
@ -247,8 +250,13 @@ module OneProvision
|
||||
return OpenNebula::Error.new('No provider found')
|
||||
end
|
||||
|
||||
@provider = provider
|
||||
cfg['inputs'] = cfg['inputs'] | provider.inputs
|
||||
@provider = provider
|
||||
|
||||
if cfg['inputs'].nil?
|
||||
cfg['inputs'] = provider.inputs
|
||||
else
|
||||
cfg['inputs'] << provider.inputs unless provider.inputs.nil?
|
||||
end
|
||||
|
||||
cfg.validate(false)
|
||||
|
||||
@ -534,7 +542,7 @@ module OneProvision
|
||||
|
||||
OneProvisionLogger.debug(msg)
|
||||
|
||||
datastores = cfg['cluster'].delete("datastores")
|
||||
datastores = cfg['cluster'].delete('datastores')
|
||||
|
||||
obj = Cluster.new(nil, cfg['cluster'])
|
||||
|
||||
@ -542,7 +550,7 @@ module OneProvision
|
||||
|
||||
id = obj.create
|
||||
|
||||
datastores.each { |i| obj.adddatastore(i) } if datastores
|
||||
datastores.each {|i| obj.adddatastore(i) } if datastores
|
||||
|
||||
infrastructure_objects['clusters'] = []
|
||||
infrastructure_objects['clusters'] << { 'id' => id,
|
||||
@ -566,8 +574,7 @@ module OneProvision
|
||||
cfg[r].each do |x|
|
||||
Driver.retry_loop('Failed to create some resources',
|
||||
self) do
|
||||
|
||||
x['provision'] = {'id' => @id }
|
||||
x['provision'] = { 'id' => @id }
|
||||
obj = Resource.object(r, nil, x)
|
||||
|
||||
next if obj.nil?
|
||||
|
@ -79,9 +79,9 @@ module OneProvision
|
||||
end
|
||||
|
||||
# Remove non-provision elements added to the cluster
|
||||
@one.datastore_ids.each { |i| @one.deldatastore(i) }
|
||||
@one.vnet_ids.each { |i| @one.delvnet(i) }
|
||||
@one.host_ids.each { |i| @one.delhost(i) }
|
||||
@one.datastore_ids.each {|i| @one.deldatastore(i) }
|
||||
@one.vnet_ids.each {|i| @one.delvnet(i) }
|
||||
@one.host_ids.each {|i| @one.delhost(i) }
|
||||
|
||||
Utils.exception(@one.delete)
|
||||
|
||||
|
@ -1,9 +1,9 @@
|
||||
resource "aws_ebs_volume" "device_<%= obj['ID'] %>" {
|
||||
availability_zone = "<%= provision['AWS_REGION'] %>"
|
||||
size = "<%= obj['TOTAL_MB'] %>"
|
||||
|
||||
tags = {
|
||||
Name = "<%= obj['NAME'] %>"
|
||||
}
|
||||
}
|
||||
#resource "aws_ebs_volume" "device_<%= obj['ID'] %>" {
|
||||
# availability_zone = "<%= provision['AWS_REGION'] %>"
|
||||
# size = "<%= obj['TOTAL_MB'] %>"
|
||||
#
|
||||
# tags = {
|
||||
# Name = "<%= obj['NAME'] %>"
|
||||
# }
|
||||
#}
|
||||
|
||||
|
@ -1,9 +1,9 @@
|
||||
resource "packet_volume" "device_<%= obj['ID'] %>" {
|
||||
description = "<%= obj['ID'] %>_volume"
|
||||
facility = "<%= provision['FACILITY'] %>"
|
||||
project_id = "<%= provision['PACKET_PROJECT'] %>"
|
||||
plan = "<%= provision['PLAN'] %>"
|
||||
size = "<%= obj['TOTAL_MB'] %>"
|
||||
billing_cycle = "hourly"
|
||||
}
|
||||
#resource "packet_volume" "device_<%= obj['ID'] %>" {
|
||||
# description = "<%= obj['ID'] %>_volume"
|
||||
# facility = "<%= provision['FACILITY'] %>"
|
||||
# project_id = "<%= provision['PACKET_PROJECT'] %>"
|
||||
# plan = "<%= provision['PLAN'] %>"
|
||||
# size = "<%= obj['TOTAL_MB'] %>"
|
||||
# billing_cycle = "hourly"
|
||||
#}
|
||||
|
||||
|
@ -161,7 +161,7 @@ class OpenNebulaVMRC
|
||||
|
||||
vm = VCenterDriver::VirtualMachine.new(vi_client, vm_ref, vm_id)
|
||||
|
||||
parameters = vm.get_html_console_parameters
|
||||
parameters = vm.html_console_parameters
|
||||
|
||||
data = {
|
||||
:host => parameters[:host],
|
||||
|
@ -612,7 +612,8 @@ define(function(require) {
|
||||
var callback = params.success;
|
||||
var callback_error = params.error;
|
||||
var id = params.data.id;
|
||||
var typeConnection = params.data.extra_param;
|
||||
var typeConnection = params.data.extra_param.type;
|
||||
var vm_name = params.data.extra_param.vm_name;
|
||||
var resource = RESOURCE;
|
||||
|
||||
var request = OpenNebulaHelper.request(resource, null, params.data);
|
||||
@ -621,6 +622,7 @@ define(function(require) {
|
||||
type: "POST",
|
||||
dataType: "json",
|
||||
success: function(response) {
|
||||
response.vm_name = vm_name;
|
||||
return callback ? callback(request, response) : null;
|
||||
},
|
||||
error: function(response) {
|
||||
|
@ -347,7 +347,9 @@ define(function(require) {
|
||||
type: "custom",
|
||||
call: function() {
|
||||
$.each(Sunstone.getDataTable(TAB_ID).elements(), function(index, elem) {
|
||||
Sunstone.runAction("VM.startguac_action", elem, 'vnc');
|
||||
var vm_name = OpenNebulaVM.getName(elem);
|
||||
var extra_param = {type: 'vnc', 'vm_name': vm_name }
|
||||
Sunstone.runAction("VM.startguac_action", elem, extra_param);
|
||||
});
|
||||
},
|
||||
error: function(req, resp) {
|
||||
@ -358,7 +360,9 @@ define(function(require) {
|
||||
type: "custom",
|
||||
call: function() {
|
||||
$.each(Sunstone.getDataTable(TAB_ID).elements(), function(index, elem) {
|
||||
Sunstone.runAction("VM.startguac_action", elem, 'rdp');
|
||||
var vm_name = OpenNebulaVM.getName(elem);
|
||||
var extra_param = {type: 'rdp', 'vm_name': vm_name }
|
||||
Sunstone.runAction("VM.startguac_action", elem, extra_param);
|
||||
});
|
||||
},
|
||||
error: function(req, resp) {
|
||||
@ -369,7 +373,9 @@ define(function(require) {
|
||||
type: "custom",
|
||||
call: function() {
|
||||
$.each(Sunstone.getDataTable(TAB_ID).elements(), function(index, elem) {
|
||||
Sunstone.runAction("VM.startguac_action", elem, 'ssh');
|
||||
var vm_name = OpenNebulaVM.getName(elem);
|
||||
var extra_param = {type: 'ssh', 'vm_name': vm_name }
|
||||
Sunstone.runAction("VM.startguac_action", elem, extra_param);
|
||||
});
|
||||
},
|
||||
error: function(req, resp) {
|
||||
|
@ -75,6 +75,7 @@ define(function(require) {
|
||||
|
||||
var tunnel = new Guacamole.WebSocketTunnel(wsprotocol + '//' + host + ':' + port + '/fireedge/guacamole')
|
||||
var guac = this._client = new Guacamole.Client(tunnel);
|
||||
var vm_name = response.vm_name || "";
|
||||
|
||||
// Client display
|
||||
this._display = $("#guacamole-display");
|
||||
@ -94,19 +95,19 @@ define(function(require) {
|
||||
guac.onstatechange = function(state) {
|
||||
switch (state) {
|
||||
case 0:
|
||||
setStatus("Client IDLE");
|
||||
setStatus("Client IDLE to: " + vm_name);
|
||||
setLoading(true);
|
||||
break;
|
||||
case 1:
|
||||
setStatus("Client CONNECTING");
|
||||
setStatus("Client CONNECTING to: " + vm_name);
|
||||
setLoading(true);
|
||||
break;
|
||||
case 2:
|
||||
setStatus("Client WAITING");
|
||||
setStatus("Client WAITING to: " + vm_name);
|
||||
setLoading(true);
|
||||
break;
|
||||
case 3:
|
||||
setStatus("Client CONNECTED");
|
||||
setStatus("Client CONNECTED to: " + vm_name);
|
||||
setLoading(false);
|
||||
setTimeout(function() {
|
||||
rescale(that);
|
||||
@ -114,15 +115,15 @@ define(function(require) {
|
||||
}, 100);
|
||||
break;
|
||||
case 4:
|
||||
setStatus("Client DISCONNECTING");
|
||||
setStatus("Client DISCONNECTING to: " + vm_name);
|
||||
setLoading(true);
|
||||
break;
|
||||
case 5:
|
||||
setStatus("Client DISCONNECTED");
|
||||
setStatus("Client DISCONNECTED to: " + vm_name);
|
||||
setLoading(false);
|
||||
break;
|
||||
default:
|
||||
setStatus("Client ERROR");
|
||||
setStatus("Client ERROR to: " + vm_name);
|
||||
setLoading(false);
|
||||
break;
|
||||
}
|
||||
|
@ -30,12 +30,11 @@ is_readonly() {
|
||||
local DOMAIN=$1
|
||||
local DISK=$2
|
||||
|
||||
READ_ONLY=$(awk 'gsub(/[\0]/, x)' \
|
||||
<( virsh --connect $LIBVIRT_URI dumpxml $DOMAIN | \
|
||||
READ_ONLY=$(virsh --connect $LIBVIRT_URI dumpxml $DOMAIN | \
|
||||
$XPATH --stdin --subtree \
|
||||
"//domain/devices/disk[source/@file='$DISK']/readonly"))
|
||||
"//domain/devices/disk[source/@file='$DISK']/readonly")
|
||||
|
||||
[ "$READ_ONLY" = '<readonly/>' ]
|
||||
[[ "$READ_ONLY" =~ '<readonly/>' ]]
|
||||
}
|
||||
|
||||
get_size_and_format_of_disk_img() {
|
||||
|
@ -890,7 +890,7 @@ module VCenterDriver
|
||||
text
|
||||
end
|
||||
|
||||
def get_dc # rubocop:disable Naming/AccessorMethodName
|
||||
def datacenter # rubocop:disable Naming/AccessorMethodName
|
||||
item = @item
|
||||
|
||||
until item.instance_of? RbVmomi::VIM::Datacenter
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -63,15 +63,19 @@ module VCenterDriver
|
||||
|
||||
template << template_disks
|
||||
|
||||
opts = {
|
||||
:vi_client => @vi_client,
|
||||
:vc_uuid => vc_uuid,
|
||||
:npool => npool,
|
||||
:hpool => hpool,
|
||||
:vcenter => vc_name,
|
||||
:template_moref => vm_ref,
|
||||
:vm_object => vc_vm
|
||||
}
|
||||
|
||||
# Create images or get nics information for template
|
||||
error, template_nics, ar_ids = vc_vm
|
||||
.import_vcenter_nics(@vi_client,
|
||||
vc_uuid,
|
||||
npool,
|
||||
hpool,
|
||||
vc_name,
|
||||
vm_ref,
|
||||
vc_vm)
|
||||
error, template_nics, ar_ids =
|
||||
vc_vm.import_vcenter_nics(opts)
|
||||
opts = { :uuid => vc_uuid, :npool => npool, :error => error }
|
||||
Raction.delete_ars(ar_ids, opts) unless error.empty?
|
||||
|
||||
@ -79,7 +83,7 @@ module VCenterDriver
|
||||
template << "VCENTER_ESX_HOST = #{vc_vm['runtime.host.name']}\n"
|
||||
|
||||
# Get DS_ID for the deployment, the wild VM needs a System DS
|
||||
dc_ref = vc_vm.get_dc.item._ref
|
||||
dc_ref = vc_vm.datacenter.item._ref
|
||||
ds_ref = template.match(/^VCENTER_DS_REF *= *"(.*)" *$/)[1]
|
||||
|
||||
ds_one = dpool.select do |e|
|
||||
|
@ -44,12 +44,12 @@ class ElasticDriver < VNMMAD::VNMDriver
|
||||
|
||||
raise rc if OpenNebula.is_error?(rc)
|
||||
|
||||
unless @host.has_elements?('TEMPLATE/PROVISION_ID')
|
||||
OpenNebula.log_error("No PROVISION_ID for host #{host_id}")
|
||||
unless @host.has_elements?('TEMPLATE/PROVISION/ID')
|
||||
OpenNebula.log_error("No ID in PROVISION for host #{host_id}")
|
||||
exit 1
|
||||
end
|
||||
|
||||
provision_id = @host['TEMPLATE/PROVISION_ID']
|
||||
provision_id = @host['TEMPLATE/PROVISION/ID']
|
||||
provision = OneProvision::Provision.new_with_id(provision_id, client)
|
||||
provision.info
|
||||
|
||||
@ -161,4 +161,4 @@ class ElasticDriver < VNMMAD::VNMDriver
|
||||
commands.run_remote(@ssh)
|
||||
end
|
||||
end
|
||||
# rubocop:enable Naming/FileName
|
||||
# rubocop:enable Naming/FileName
|
||||
|
Loading…
x
Reference in New Issue
Block a user