1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-03-21 14:50:08 +03:00

F #4913: Add linked clone import to CLI. Several enhancements in monitor and import performance (containerview and propertycollector)

This commit is contained in:
mcabrerizo 2017-04-10 13:36:00 +02:00
parent dea2a64148
commit e6a57b0f85
7 changed files with 1450 additions and 774 deletions

View File

@ -88,7 +88,7 @@ get '/vcenter' do
error 404, error.to_json
end
rs = dc_folder.get_unimported_hosts(hpool)
rs = dc_folder.get_unimported_hosts(hpool,vcenter_client.vim.host)
[200, rs.to_json]
rescue Exception => e
logger.error("[vCenter] " + e.message)
@ -110,7 +110,7 @@ get '/vcenter/templates' do
error 404, error.to_json
end
templates = dc_folder.get_unimported_templates(vcenter_client, tpool)
templates = dc_folder.get_unimported_templates(vcenter_client, tpool, vcenter_client.vim.host)
if templates.nil?
msg = "No datacenter found"
@ -131,18 +131,86 @@ get '/vcenter/template/:vcenter_ref' do
begin
t = {}
t[:one] = ""
template_copy_ref = nil
template = nil
append = true
lc_error = nil
template = VCenterDriver::VirtualMachine.new_from_ref(params[:vcenter_ref], vcenter_client)
ref = params[:vcenter_ref]
if !ref || ref.empty?
msg = "No template ref specified"
logger.error("[vCenter] " + msg)
error = Error.new(msg)
error 404, error.to_json
end
template = VCenterDriver::Template.new_from_ref(ref, vcenter_client)
vc_uuid = vcenter_client.vim.serviceContent.about.instanceUuid
dpool = VCenterDriver::VIHelper.one_pool(OpenNebula::DatastorePool)
ipool = VCenterDriver::VIHelper.one_pool(OpenNebula::ImagePool)
npool = VCenterDriver::VIHelper.one_pool(OpenNebula::VirtualNetworkPool)
# POST params
if @request_body && !@request_body.empty?
body_hash = JSON.parse(@request_body)
use_linked_clones = body_hash['use_linked_clones'] || false
create_copy = body_hash['create_copy'] || false
template_name = body_hash['template_name'] || ""
if !use_linked_clones && (create_copy || !template_name.empty?)
msg = "Should not set create template copy or template copy name if not using linked clones"
logger.error("[vCenter] " + msg)
error = Error.new(msg)
error 403, error.to_json
end
if use_linked_clones && !create_copy && !template_name.empty?
msg = "Should not set template copy name if create template copy hasn't been selected"
logger.error("[vCenter] " + msg)
error = Error.new(msg)
error 403, error.to_json
end
if create_copy
lc_error, template_copy_ref = template.create_template_copy(template_name)
if template_copy_ref
template = VCenterDriver::Template.new_from_ref(template_copy_ref, vi_client)
one_template = VCenterDriver::Template.get_xml_template(template, vc_uuid, vi_client, vcenter_client.vim.host)
if one_template
lc_error, use_lc = template.create_delta_disks
if !lc_error
one_template[:one] << "\nVCENTER_LINKED_CLONES=\"YES\"\n"
t = one_template
append = false # t[:one] replaces the current template
end
else
lc_error = "Could not obtain the info from the template's copy"
template.delete_template if template_copy_ref
end
end
else
lc_error, use_lc = template.create_delta_disks
if !lc_error
append = true
t[:one] << "\nVCENTER_LINKED_CLONES=\"YES\"\n" if use_lc
end
end
end
# Create images or get disks information for template
error, template_disks = template.import_vcenter_disks(vc_uuid, dpool, ipool)
if !error.empty?
append = false
template.delete_template if template_copy_ref
msg = error
logger.error("[vCenter] " + msg)
error = Error.new(msg)
@ -155,6 +223,8 @@ get '/vcenter/template/:vcenter_ref' do
error, template_nics = template.import_vcenter_nics(vc_uuid, npool)
if !error.empty?
append = false
template.delete_template if template_copy_ref
msg = error
logger.error("[vCenter] " + msg)
error = Error.new(msg)
@ -163,8 +233,12 @@ get '/vcenter/template/:vcenter_ref' do
t[:one] << template_nics
t[:lc_error] = lc_error
t[:append] = append
[200, t.to_json]
rescue Exception => e
template.delete_template if template_copy_ref
logger.error("[vCenter] " + e.message)
error = Error.new(e.message)
error 403, error.to_json
@ -184,7 +258,7 @@ get '/vcenter/networks' do
error 404, error.to_json
end
networks = dc_folder.get_unimported_networks(npool)
networks = dc_folder.get_unimported_networks(npool,vcenter_client.vim.host)
if networks.nil?
msg = "No datacenter found"
@ -231,16 +305,27 @@ get '/vcenter/datastores' do
begin
dc_folder = VCenterDriver::DatacenterFolder.new(vcenter_client)
hpool = VCenterDriver::VIHelper.one_pool(OpenNebula::DatastorePool, false)
dpool = VCenterDriver::VIHelper.one_pool(OpenNebula::DatastorePool, false)
if hpool.respond_to?(:message)
msg = "Could not get OpenNebula DatastorePool: #{hpool.message}"
if dpool.respond_to?(:message)
msg = "Could not get OpenNebula DatastorePool: #{dpool.message}"
logger.error("[vCenter] " + msg)
error = Error.new(msg)
error 404, error.to_json
end
datastores = dc_folder.get_unimported_datastores(hpool)
hpool = VCenterDriver::VIHelper.one_pool(OpenNebula::HostPool, false)
if hpool.respond_to?(:message)
msg = "Could not get OpenNebula HostPool: #{hpool.message}"
logger.error("[vCenter] " + msg)
error = Error.new(msg)
error 404, error.to_json
end
datastores = dc_folder.get_unimported_datastores(dpool, vcenter_client.vim.host, hpool)
if datastores.nil?
msg = "No datacenter found"
logger.error("[vCenter] " + msg)

View File

@ -1,5 +1,5 @@
module VCenterDriver
require 'set'
class DatacenterFolder
attr_accessor :items
@ -38,57 +38,11 @@ class DatacenterFolder
@vi_client.vim.serviceContent.about.instanceUuid
end
def get_vcenter_instance_name
@vi_client.vim.serviceContent.setting.setting.select{|set| set.key == 'VirtualCenter.InstanceName'}.first.value rescue nil
end
def get_vcenter_api_version
@vi_client.vim.serviceContent.about.apiVersion
end
def get_clusters
clusters = {}
vcenter_uuid = get_vcenter_instance_uuid
pool = VCenterDriver::VIHelper.one_pool(OpenNebula::HostPool, false)
if pool.respond_to?(:message)
raise "Could not get OpenNebula Pool: #{pool.message}"
end
fetch! if @items.empty? #Get datacenters
# Add datacenter to hash and store in an array all clusters
@items.values.each do |dc|
dc_name = dc.item.name
clusters[dc_name] = []
host_folder = dc.host_folder
host_folder.fetch_clusters!
host_folder.items.values.each do |ccr|
cluster = {}
cluster[:ref] = ccr['_ref']
cluster[:name] = ccr['name']
attribute = "TEMPLATE/VCENTER_CCR_REF"
one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool,
attribute,
ccr['_ref'],
vcenter_uuid,
pool)
next if one_host.nil? #Cluster hasn't been imported'
cluster[:host_id] = one_host['ID']
clusters[dc_name] << cluster
end
end
clusters
end
def get_unimported_hosts(hpool)
def get_unimported_hosts(hpool, vcenter_instance_name)
host_objects = {}
vcenter_uuid = get_vcenter_instance_uuid
@ -116,7 +70,8 @@ class DatacenterFolder
rpools = cluster.get_resource_pool_list.select {|rp| !rp[:name].empty?}
host_info = {}
host_info[:cluster_name] = ccr['name'].tr(" ", "_")
cluster_name = "[#{vcenter_instance_name} - #{dc_name}] #{ccr['name'].tr(" ", "_")}"
host_info[:cluster_name] = cluster_name
host_info[:cluster_ref] = ccr['_ref']
host_info[:vcenter_uuid] = vcenter_uuid
host_info[:vcenter_version] = vcenter_version
@ -129,19 +84,41 @@ class DatacenterFolder
return host_objects
end
def get_unimported_datastores(hpool)
def get_unimported_datastores(dpool, vcenter_instance_name, hpool)
ds_objects = {}
vcenter_uuid = get_vcenter_instance_uuid
vcenter_instance_name = get_vcenter_instance_name
fetch! if @items.empty? #Get datacenters
one_clusters = get_clusters
one_clusters = {}
@items.values.each do |dc|
dc_name = dc.item.name
one_clusters[dc_name] = []
host_folder = dc.host_folder
host_folder.fetch_clusters!
host_folder.items.values.each do |ccr|
cluster = {}
cluster[:ref] = ccr['_ref']
cluster[:name] = ccr['name']
attribute = "TEMPLATE/VCENTER_CCR_REF"
one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool,
attribute,
ccr['_ref'],
vcenter_uuid,
hpool)
if !!one_host
one_clusters[:host_id] = one_host['ID']
one_clusters[dc_name] << cluster
end
end
next if one_clusters[dc_name].empty? #No clusters imported, continue
ds_objects[dc_name] = []
@ -155,20 +132,21 @@ class DatacenterFolder
clusters_in_ds = {}
hosts_in_ds.each do |host|
if !clusters_in_ds[host.key.parent._ref.to_s]
clusters_in_ds[host.key.parent._ref.to_s] = host.key.parent.name
cluster_ref = host.key.parent._ref
if !clusters_in_ds[cluster_ref]
clusters_in_ds[cluster_ref] = host.key.parent.name
end
end
clusters_in_ds.each do |ccr_ref, ccr_name|
already_image_ds = VCenterDriver::Storage.exists_one_by_ref_ccr_and_type?(ds["_ref"], ccr_ref, vcenter_uuid, "IMAGE_DS", hpool)
already_image_ds = VCenterDriver::Storage.exists_one_by_ref_ccr_and_type?(ds["_ref"], ccr_ref, vcenter_uuid, "IMAGE_DS", dpool)
if !already_image_ds
object = ds.to_one_template(one_clusters[dc_name], ccr_ref, ccr_name, "IMAGE_DS", vcenter_uuid, vcenter_instance_name, dc_name)
ds_objects[dc_name] << object if !object.nil?
end
already_system_ds = VCenterDriver::Storage.exists_one_by_ref_ccr_and_type?(ds["_ref"], ccr_ref, vcenter_uuid, "SYSTEM_DS", hpool)
already_system_ds = VCenterDriver::Storage.exists_one_by_ref_ccr_and_type?(ds["_ref"], ccr_ref, vcenter_uuid, "SYSTEM_DS", dpool)
if !already_system_ds
object = ds.to_one_template(one_clusters[dc_name], ccr_ref, ccr_name, "SYSTEM_DS", vcenter_uuid, vcenter_instance_name, dc_name)
@ -184,14 +162,15 @@ class DatacenterFolder
ds_in_spod.each do |sp_ds|
hosts_in_ds = sp_ds.host
hosts_in_ds.each do |host|
if !clusters_in_spod[host.key.parent._ref.to_s]
clusters_in_spod[host.key.parent._ref.to_s] = host.key.parent.name
cluster_ref = host.key.parent._ref
if !clusters_in_spod[cluster_ref]
clusters_in_spod[cluster_ref] = host.key.parent.name
end
end
end
clusters_in_spod.each do |ccr_ref, ccr_name|
already_system_ds = VCenterDriver::Storage.exists_one_by_ref_ccr_and_type?(ds["_ref"], ccr_ref, vcenter_uuid, "SYSTEM_DS", hpool)
already_system_ds = VCenterDriver::Storage.exists_one_by_ref_ccr_and_type?(ds["_ref"], ccr_ref, vcenter_uuid, "SYSTEM_DS", dpool)
if !already_system_ds
object = ds.to_one_template(one_clusters[dc_name], ccr_ref, ccr_name, "SYSTEM_DS", vcenter_uuid, vcenter_instance_name, dc_name)
@ -201,7 +180,6 @@ class DatacenterFolder
end
end
end
ds_objects
end
@ -209,77 +187,120 @@ class DatacenterFolder
template_objects = {}
vcenter_uuid = get_vcenter_instance_uuid
vcenter_instance_name = vi_client.vim.host
fetch! if @items.empty? #Get datacenters
@items.values.each do |dc|
rp_cache = {}
dc_name = dc.item.name
template_objects[dc_name] = []
#Get templates defined in a datacenter
vm_folder = dc.vm_folder
vm_folder.fetch_templates!
vm_folder.items.values.each do |template|
view = vi_client.vim.serviceContent.viewManager.CreateContainerView({
container: dc.item.vmFolder,
type: ['VirtualMachine'],
recursive: true
})
pc = vi_client.vim.serviceContent.propertyCollector
filterSpec = RbVmomi::VIM.PropertyFilterSpec(
:objectSet => [
:obj => view,
:skip => true,
:selectSet => [
RbVmomi::VIM.TraversalSpec(
:name => 'traverseEntities',
:type => 'ContainerView',
:path => 'view',
:skip => false
)
]
],
:propSet => [
{ :type => 'VirtualMachine', :pathSet => ['config.template'] }
]
)
result = pc.RetrieveProperties(:specSet => [filterSpec])
vms = {}
result.each do |r|
vms[r.obj._ref] = r.to_hash if r.obj.is_a?(RbVmomi::VIM::VirtualMachine)
end
templates = []
vms.each do |ref,value|
if value["config.template"]
templates << VCenterDriver::Template.new_from_ref(ref, vi_client)
end
end
view.DestroyView # Destroy the view
templates.each do |template|
one_template = VCenterDriver::VIHelper.find_by_ref(OpenNebula::TemplatePool,
"TEMPLATE/VCENTER_TEMPLATE_REF",
template['_ref'],
vcenter_uuid,
tpool)
next if one_template #If the template has been already imported
template_ccr = template['runtime.host.parent']
one_template = VCenterDriver::Template.get_xml_template(template, vcenter_uuid, vi_client, vcenter_instance_name, dc_name, rp_cache)
# Check if template has nics or disks to be imported later
has_nics_and_disks = false
template_objects[dc_name] << one_template if !!one_template
end
end
template["config.hardware.device"].each do |device|
if VCenterDriver::Storage.is_disk_or_iso?(device)
has_nics_and_disks = true
break
end
if VCenterDriver::Network.is_nic?(device)
has_nics_and_disks = true
break
end
end
#Get resource pools
rp_cache = {}
if !rp_cache[template_ccr.name.to_s]
tmp_cluster = VCenterDriver::ClusterComputeResource.new_from_ref(template_ccr._ref, vi_client)
rp_list = tmp_cluster.get_resource_pool_list
rp = ""
if !rp_list.empty?
rp_name_list = []
rp_list.each do |rp_hash|
rp_name_list << rp_hash[:name]
end
rp = "O|list|Which resource pool you want this VM to run in? "
rp << "|#{rp_name_list.join(",")}" #List of RP
rp << "|#{rp_name_list.first}" #Default RP
end
rp_cache[template_ccr.name.to_s] = rp
end
rp = rp_cache[template_ccr.name.to_s]
object = template.to_one_template(template,
has_nics_and_disks,
rp,
rp_list,
vcenter_uuid)
template_objects[dc_name] << object if !object.nil?
end #template loop
end #datacenter loop
return template_objects
template_objects
end
def get_unimported_networks(npool)
def get_unimported_networks(npool,vcenter_instance_name)
network_objects = {}
vcenter_uuid = get_vcenter_instance_uuid
pc = @vi_client.vim.serviceContent.propertyCollector
#Get all port groups and distributed port groups in vcenter instance
view = @vi_client.vim.serviceContent.viewManager.CreateContainerView({
container: @vi_client.vim.rootFolder,
type: ['Network','DistributedVirtualPortgroup'],
recursive: true
})
filterSpec = RbVmomi::VIM.PropertyFilterSpec(
:objectSet => [
:obj => view,
:skip => true,
:selectSet => [
RbVmomi::VIM.TraversalSpec(
:name => 'traverseEntities',
:type => 'ContainerView',
:path => 'view',
:skip => false
)
]
],
:propSet => [
{ :type => 'Network', :pathSet => ['name'] },
{ :type => 'DistributedVirtualPortgroup', :pathSet => ['name'] }
]
)
result = pc.RetrieveProperties(:specSet => [filterSpec])
networks = {}
result.each do |r|
networks[r.obj._ref] = r.to_hash if r.obj.is_a?(RbVmomi::VIM::DistributedVirtualPortgroup) || r.obj.is_a?(RbVmomi::VIM::Network)
networks[r.obj._ref][:network_type] = r.obj.is_a?(RbVmomi::VIM::DistributedVirtualPortgroup) ? "Distributed Port Group" : "Port Group"
end
view.DestroyView # Destroy the view
fetch! if @items.empty? #Get datacenters
@items.values.each do |dc|
@ -287,32 +308,66 @@ class DatacenterFolder
dc_name = dc.item.name
network_objects[dc_name] = []
#Get networks defined in a datacenter
network_folder = dc.network_folder
network_folder.fetch!
network_folder.items.values.each do |network|
view = @vi_client.vim.serviceContent.viewManager.CreateContainerView({
container: dc.item,
type: ['ClusterComputeResource'],
recursive: true
})
next if network.instance_of? VCenterDriver::DistributedVirtualSwitch
filterSpec = RbVmomi::VIM.PropertyFilterSpec(
:objectSet => [
:obj => view,
:skip => true,
:selectSet => [
RbVmomi::VIM.TraversalSpec(
:name => 'traverseEntities',
:type => 'ContainerView',
:path => 'view',
:skip => false
)
]
],
:propSet => [
{ :type => 'ClusterComputeResource', :pathSet => ['name','network'] }
]
)
one_network = VCenterDriver::VIHelper.find_by_ref(OpenNebula::VirtualNetworkPool,
"TEMPLATE/VCENTER_NET_REF",
network['_ref'],
vcenter_uuid,
npool)
next if one_network #If the network has been already imported
result = pc.RetrieveProperties(:specSet => [filterSpec])
network_name = network['name']
network_ref = network['_ref']
clusters = {}
result.each do |r|
clusters[r.obj._ref] = r.to_hash if r.obj.is_a?(RbVmomi::VIM::ClusterComputeResource)
end
view.DestroyView # Destroy the view
clusters.each do |ref, info|
network_obj = info['network']
network_obj.each do |n|
network_ref = n._ref
network_name = networks[network_ref]['name']
network_type = networks[network_ref][:network_type]
one_network = VCenterDriver::VIHelper.find_by_ref(OpenNebula::VirtualNetworkPool,
"TEMPLATE/VCENTER_NET_REF",
network_ref,
vcenter_uuid,
npool)
next if one_network #If the network has been already imported
network.clusters.each do |ccr_ref, ccr_name|
one_vnet = VCenterDriver::Network.to_one_template(network_name,
network_ref,
network.network_type,
ccr_ref,
ccr_name,
vcenter_uuid)
network_ref,
network_type,
ref,
info['name'],
vcenter_uuid,
vcenter_instance_name,
dc_name)
network_objects[dc_name] << one_vnet
end #network clusters loop
end
end # network loop
end #datacenters loop

View File

@ -160,19 +160,21 @@ class Storage
return nil if one_cluster.nil?
name, capacity, freeSpace = @item.collect("name","summary.capacity","summary.freeSpace")
ds_name = ""
if type == "IMAGE_DS"
ds_name << "[#{vcenter_instance_name} - #{dc_name}] #{self['name']} - #{ccr_name} (IMG)"
ds_name << "[#{vcenter_instance_name} - #{dc_name}] #{name} - #{ccr_name.tr(" ", "_")} (IMG)"
else
ds_name << "[#{vcenter_instance_name} - #{dc_name}] #{self['name']} - #{ccr_name} (SYS)"
ds_name << "[#{vcenter_instance_name} - #{dc_name}] #{name} - #{ccr_name.tr(" ", "_")} (SYS)"
ds_name << " [StorDRS]" if self.class == VCenterDriver::StoragePod
end
one_tmp = {
:name => ds_name,
:total_mb => ((self['summary.capacity'].to_i / 1024) / 1024),
:free_mb => ((self['summary.freeSpace'].to_i / 1024) / 1024),
:total_mb => ((capacity.to_i / 1024) / 1024),
:free_mb => ((freeSpace.to_i / 1024) / 1024),
:cluster => ccr_name,
:one => to_one(ds_name, vcenter_uuid, ccr_ref, one_cluster[:host_id])
}

View File

@ -27,12 +27,14 @@ end # class HostFolder
class ClusterComputeResource
attr_accessor :item
attr_accessor :rp_list
include Memoize
def initialize(item, vi_client=nil)
@item = item
@vi_client = vi_client
@rp_list
end
def fetch_resource_pools(rp, rp_array = [])
@ -53,51 +55,54 @@ class ClusterComputeResource
@resource_pools
end
def get_resource_pool_list(rp = nil, parent_prefix = "", rp_array = [])
def get_resource_pool_list(rp = @item.resourcePool, parent_prefix = "", rp_array = [])
current_rp = ""
if rp.nil?
rp = @item.resourcePool
else
if !parent_prefix.empty?
current_rp << parent_prefix
current_rp << "/"
end
current_rp << rp.name
if !parent_prefix.empty?
current_rp << parent_prefix
current_rp << "/"
end
if rp.resourcePool.size == 0
rp_info = {}
rp_info[:name] = current_rp
rp_info[:ref] = rp._ref
rp_array << rp_info
else
rp.resourcePool.each do |child_rp|
get_resource_pool_list(child_rp, current_rp, rp_array)
end
rp_info = {}
rp_info[:name] = current_rp
rp_info[:ref] = rp._ref
rp_array << rp_info if !current_rp.empty?
resource_pool, name = rp.collect("resourcePool","name")
current_rp << name if name != "Resources"
resource_pool.each do |child_rp|
get_resource_pool_list(child_rp, current_rp, rp_array)
end
rp_info = {}
rp_info[:name] = current_rp
rp_info[:ref] = rp._ref
rp_array << rp_info if !current_rp.empty?
rp_array
end
def monitor
#Load the host systems
summary = @item.summary
total_cpu,
num_cpu_cores,
effective_cpu,
total_memory,
effective_mem,
num_hosts,
num_eff_hosts = @item.collect("summary.totalCpu",
"summary.numCpuCores",
"summary.effectiveCpu",
"summary.totalMemory",
"summary.effectiveMemory",
"summary.numHosts",
"summary.numEffectiveHosts"
)
mhz_core = summary.totalCpu.to_f / summary.numCpuCores.to_f
eff_core = summary.effectiveCpu.to_f / mhz_core
mhz_core = total_cpu.to_f / num_cpu_cores.to_f
eff_core = effective_cpu.to_f / mhz_core
free_cpu = sprintf('%.2f', eff_core * 100).to_f
total_cpu = summary.numCpuCores.to_f * 100
total_cpu = num_cpu_cores.to_f * 100
used_cpu = sprintf('%.2f', total_cpu - free_cpu).to_f
total_mem = summary.totalMemory.to_i / 1024
free_mem = summary.effectiveMemory.to_i * 1024
total_mem = total_memory.to_i / 1024
free_mem = effective_mem.to_i * 1024
str_info = ""
@ -106,8 +111,8 @@ class ClusterComputeResource
# System
str_info << "HYPERVISOR=vcenter\n"
str_info << "TOTALHOST=" << summary.numHosts.to_s << "\n"
str_info << "AVAILHOST=" << summary.numEffectiveHosts.to_s << "\n"
str_info << "TOTALHOST=" << num_hosts.to_s << "\n"
str_info << "AVAILHOST=" << num_eff_hosts.to_s << "\n"
# CPU
str_info << "CPUSPEED=" << mhz_core.to_s << "\n"
@ -118,37 +123,90 @@ class ClusterComputeResource
# Memory
str_info << "TOTALMEMORY=" << total_mem.to_s << "\n"
str_info << "FREEMEMORY=" << free_mem.to_s << "\n"
str_info << "USEDMEMORY=" << (total_mem - free_mem).to_s
str_info << "USEDMEMORY=" << (total_mem - free_mem).to_s << "\n"
str_info << "VCENTER_LAST_PERF_POLL=" << Time.now.to_i.to_s << "\n"
str_info << monitor_resource_pools(@item.resourcePool, "", mhz_core)
str_info << monitor_resource_pools(mhz_core)
end
def monitor_resource_pools(parent_rp, parent_prefix, mhz_core)
return "" if parent_rp.resourcePool.size == 0
def monitor_resource_pools(mhz_core)
@rp_list = get_resource_pool_list
view = @vi_client.vim.serviceContent.viewManager.CreateContainerView({
container: @item, #View for RPs inside this cluster
type: ['ResourcePool'],
recursive: true
})
pc = @vi_client.vim.serviceContent.propertyCollector
monitored_properties = [
"config.cpuAllocation.expandableReservation",
"config.cpuAllocation.limit",
"config.cpuAllocation.reservation",
"config.cpuAllocation.shares.level",
"config.cpuAllocation.shares.shares",
"config.memoryAllocation.expandableReservation",
"config.memoryAllocation.limit",
"config.memoryAllocation.reservation",
"config.memoryAllocation.shares.level",
"config.memoryAllocation.shares.shares"
]
filterSpec = RbVmomi::VIM.PropertyFilterSpec(
:objectSet => [
:obj => view,
:skip => true,
:selectSet => [
RbVmomi::VIM.TraversalSpec(
:name => 'traverseEntities',
:type => 'ContainerView',
:path => 'view',
:skip => false
)
]
],
:propSet => [
{ :type => 'ResourcePool', :pathSet => monitored_properties }
]
)
result = pc.RetrieveProperties(:specSet => [filterSpec])
rps = {}
result.each do |r|
hashed_properties = r.to_hash
if r.obj.is_a?(RbVmomi::VIM::ResourcePool)
rps[r.obj._ref] = hashed_properties
end
end
return "" if rps.empty?
rp_info = ""
parent_rp.resourcePool.each{|rp|
rpcpu = rp.config.cpuAllocation
rpmem = rp.config.memoryAllocation
rps.each{|ref, info|
# CPU
cpu_expandable = rpcpu.expandableReservation ? "YES" : "NO"
cpu_limit = rpcpu.limit == "-1" ? "UNLIMITED" : rpcpu.limit
cpu_reservation = rpcpu.reservation
cpu_num = rpcpu.reservation.to_f / mhz_core
cpu_shares_level = rpcpu.shares.level
cpu_shares = rpcpu.shares.shares
cpu_expandable = info["config.cpuAllocation.expandableReservation"] ? "YES" : "NO"
cpu_limit = info["config.cpuAllocation.limit"] == "-1" ? "UNLIMITED" : info["config.cpuAllocation.limit"]
cpu_reservation = info["config.cpuAllocation.reservation"]
cpu_num = cpu_reservation.to_f / mhz_core
cpu_shares_level = info["config.cpuAllocation.shares.level"]
cpu_shares = info["config.cpuAllocation.shares.shares"]
# MEMORY
mem_expandable = rpmem.expandableReservation ? "YES" : "NO"
mem_limit = rpmem.limit == "-1" ? "UNLIMITED" : rpmem.limit
mem_reservation = rpmem.reservation.to_f
mem_shares_level = rpmem.shares.level
mem_shares = rpmem.shares.shares
mem_expandable = info["config.memoryAllocation.expandableReservation"] ? "YES" : "NO"
mem_limit = info["config.memoryAllocation.limit"] == "-1" ? "UNLIMITED" : info["config.memoryAllocation.limit"]
mem_reservation = info["config.memoryAllocation.reservation"].to_f
mem_shares_level = info["config.memoryAllocation.shares.level"]
mem_shares = info["config.memoryAllocation.shares.shares"]
rp_name = (parent_prefix.empty? ? "" : parent_prefix + "/")
rp_name += rp.name
rp_name = rp_list.select { |item| item[:ref] == ref}.first[:name] rescue ""
rp_name = "Resources" if rp_name.empty?
rp_info << "\nVCENTER_RESOURCE_POOL_INFO = ["
rp_info << "NAME=\"#{rp_name}\","
@ -164,39 +222,80 @@ class ClusterComputeResource
rp_info << "MEM_SHARES=#{mem_shares},"
rp_info << "MEM_SHARES_LEVEL=#{mem_shares_level}"
rp_info << "]"
if rp.resourcePool.size != 0
rp_info << monitor_resource_pools(rp, rp_name, mhz_core)
end
}
view.DestroyView
return rp_info
end
def monitor_host_systems
host_info = ""
@item.host.each do |h|
next if h.runtime.connectionState != "connected"
view = @vi_client.vim.serviceContent.viewManager.CreateContainerView({
container: @item, #View for Hosts inside this cluster
type: ['HostSystem'],
recursive: true
})
summary = h.summary
hw = summary.hardware
stats = summary.quickStats
pc = @vi_client.vim.serviceContent.propertyCollector
total_cpu = hw.numCpuCores * 100
used_cpu = (stats.overallCpuUsage.to_f / hw.cpuMhz.to_f) * 100
monitored_properties = [
"name",
"runtime.connectionState",
"summary.hardware.numCpuCores",
"summary.hardware.memorySize",
"summary.hardware.cpuModel",
"summary.hardware.cpuMhz",
"summary.quickStats.overallCpuUsage",
"summary.quickStats.overallMemoryUsage"
]
filterSpec = RbVmomi::VIM.PropertyFilterSpec(
:objectSet => [
:obj => view,
:skip => true,
:selectSet => [
RbVmomi::VIM.TraversalSpec(
:name => 'traverseEntities',
:type => 'ContainerView',
:path => 'view',
:skip => false
)
]
],
:propSet => [
{ :type => 'HostSystem', :pathSet => monitored_properties }
]
)
result = pc.RetrieveProperties(:specSet => [filterSpec])
hosts = {}
result.each do |r|
hashed_properties = r.to_hash
if r.obj.is_a?(RbVmomi::VIM::HostSystem)
hosts[r.obj._ref] = hashed_properties
end
end
hosts.each do |ref, info|
next if info["runtime.connectionState"] != "connected"
total_cpu = info["summary.hardware.numCpuCores"] * 100
used_cpu = (info["summary.quickStats.overallCpuUsage"].to_f / info["summary.hardware.cpuMhz"].to_f) * 100
used_cpu = sprintf('%.2f', used_cpu).to_f # Trim precission
free_cpu = total_cpu - used_cpu
total_memory = hw.memorySize/1024
used_memory = stats.overallMemoryUsage*1024
total_memory = info["summary.hardware.memorySize"]/1024
used_memory = info["summary.quickStats.overallMemoryUsage"]*1024
free_memory = total_memory - used_memory
host_info << "\nHOST=["
host_info << "STATE=on,"
host_info << "HOSTNAME=\"" << h.name.to_s << "\","
host_info << "MODELNAME=\"" << hw.cpuModel.to_s << "\","
host_info << "CPUSPEED=" << hw.cpuMhz.to_s << ","
host_info << "HOSTNAME=\"" << info["name"].to_s << "\","
host_info << "MODELNAME=\"" << info["summary.hardware.cpuModel"].to_s << "\","
host_info << "CPUSPEED=" << info["summary.hardware.cpuMhz"].to_s << ","
host_info << "MAX_CPU=" << total_cpu.to_s << ","
host_info << "USED_CPU=" << used_cpu.to_s << ","
host_info << "FREE_CPU=" << free_cpu.to_s << ","
@ -206,88 +305,195 @@ class ClusterComputeResource
host_info << "]"
end
view.DestroyView # Destroy the view
return host_info
end
def monitor_vms
str_info = ""
vc_uuid = @vi_client.vim.serviceContent.about.instanceUuid
cluster_name = self["name"]
cluster_ref = self["_ref"]
# Get info of the host where the VM/template is located
host_id = nil
one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool,
"TEMPLATE/VCENTER_CCR_REF",
cluster_ref,
vc_uuid)
host_id = one_host["ID"] if one_host
# Extract CPU info for each esx host in cluster
esx_host_cpu = {}
@item.host.each do |esx_host|
esx_host_cpu[esx_host._ref] = esx_host.summary.hardware.cpuMhz.to_f
end
@monitored_vms = Set.new
resource_pools.each do |rp|
str_info << monitor_vms_in_rp(rp)
end
return str_info
end
def monitor_vms_in_rp(rp)
str_info = ""
host_pool = VCenterDriver::VIHelper.one_pool(OpenNebula::HostPool)
view = @vi_client.vim.serviceContent.viewManager.CreateContainerView({
container: @item, #View for VMs inside this cluster
type: ['VirtualMachine'],
recursive: true
})
ccr_host = {}
host_pool.each do |host|
ccr = host['TEMPLATE/VCENTER_CCR_REF']
ccr_host[ccr] = host['ID'] if ccr
pc = @vi_client.vim.serviceContent.propertyCollector
monitored_properties = [
"name", #VM name
"config.template", #To filter out templates
"summary.runtime.powerState", #VM power state
"summary.quickStats.hostMemoryUsage", #Memory usage
"summary.quickStats.overallCpuUsage", #CPU used by VM
"runtime.host", #ESX host
"resourcePool", #RP
"guest.guestFullName",
"guest.net", #IP addresses as seen by guest tools,
"guest.guestState",
"guest.toolsVersion",
"guest.toolsRunningStatus",
"guest.toolsVersionStatus2", #IP addresses as seen by guest tools,
"config.extraConfig", #VM extraconfig info e.g opennebula.vm.running
"config.hardware.numCPU",
"config.hardware.memoryMB",
"config.annotation"
]
filterSpec = RbVmomi::VIM.PropertyFilterSpec(
:objectSet => [
:obj => view,
:skip => true,
:selectSet => [
RbVmomi::VIM.TraversalSpec(
:name => 'traverseEntities',
:type => 'ContainerView',
:path => 'view',
:skip => false
)
]
],
:propSet => [
{ :type => 'VirtualMachine', :pathSet => monitored_properties }
]
)
result = pc.RetrieveProperties(:specSet => [filterSpec])
vms = {}
vm_objects = []
result.each do |r|
hashed_properties = r.to_hash
if r.obj.is_a?(RbVmomi::VIM::VirtualMachine)
#Only take care of VMs, not templates
if !hashed_properties["config.template"]
vms[r.obj._ref] = hashed_properties
vm_objects << r.obj
end
end
end
rp.vm.each do |v|
pm = @vi_client.vim.serviceContent.perfManager
stats = []
max_samples = 9
refresh_rate = 20 #Real time stats takes samples every 20 seconds
last_mon_time = one_host["TEMPLATE/VCENTER_LAST_PERF_POLL"]
if last_mon_time
interval = (Time.now.to_i - last_mon_time.to_i)
interval = 3601 if interval < 0
samples = (interval / refresh_rate)
samples = 1 if samples == 0
max_samples = interval > 3600 ? 9 : samples
end
stats = pm.retrieve_stats(
vm_objects,
['net.transmitted','net.bytesRx','net.bytesTx','net.received',
'virtualDisk.numberReadAveraged','virtualDisk.numberWriteAveraged',
'virtualDisk.read','virtualDisk.write'],
{max_samples: max_samples}
)
get_resource_pool_list if !@rp_list
vms.each do |vm_ref,info|
begin
vm = VirtualMachine.new(v, @vi_client)
vm = VCenterDriver::VirtualMachine.new_from_ref(vm_ref, @vi_client)
info[:cluster_name] = cluster_name
info[:cluster_ref] = cluster_ref
info[:vc_uuid] = vc_uuid
info[:host_id] = host_id
info[:rp_list] = @rp_list
vm.vm_info = info
number = -1
# Check the running flag
running_flag = vm["config.extraConfig"].select do |val|
running_flag = info["config.extraConfig"].select do |val|
val[:key] == "opennebula.vm.running"
end
if running_flag.size > 0 and running_flag[0]
if !running_flag.empty? && running_flag.first
running_flag = running_flag[0][:value]
end
next if running_flag == "no"
# Extract vmid if possible
matches = vm["name"].match(/^one-(\d*)(-(.*))?$/)
matches = info["name"].match(/^one-(\d*)(-(.*))?$/)
number = matches[1] if matches
# Extract vmid from ref and vcenter instance uuid if possible
vm_id = vm.get_vm_id
number = vm_id if vm_id
if number == -1
one_vm = VCenterDriver::VIHelper.find_by_ref(OpenNebula::VirtualMachinePool,
"DEPLOY_ID",
vm_ref,
vc_uuid)
number = one_vm["ID"] if one_vm
end
if number != -1
next if @monitored_vms.include? number
@monitored_vms << number
vm.one_item if vm.get_vm_id
end
vm.monitor
vm.monitor(esx_host_cpu,stats)
next if !vm["config"]
vm_name = "#{info["name"]} - #{cluster_name}"
str_info << %Q{
VM = [
ID="#{number}",
VM_NAME="#{vm["name"]} - #{vm["runtime.host.parent.name"]}",
DEPLOY_ID="#{vm["_ref"]}",
VM_NAME="#{vm_name}",
DEPLOY_ID="#{vm_ref}",
}
if number == -1
vm_template_64 = Base64.encode64(vm.to_one).gsub("\n","")
vm_template_64 = Base64.encode64(vm.vm_to_one(vm_name)).gsub("\n","")
str_info << "IMPORT_TEMPLATE=\"#{vm_template_64}\","
end
str_info << "POLL=\"#{vm.info.gsub('"', "\\\"")}\"]"
rescue Exception => e
STDERR.puts e.inspect
STDERR.puts e.backtrace
end
end
return str_info.gsub(/^\s+/,"")
end
view.DestroyView # Destroy the view
return str_info
end
def monitor_customizations
customizations = self['_connection'].serviceContent.customizationSpecManager.info

View File

@ -16,11 +16,7 @@ def self.import_clusters(con_ops, options)
dc_folder = VCenterDriver::DatacenterFolder.new(vi_client)
# Get vcenter intance uuid as moref is unique for each vcenter
vc_uuid = vi_client.vim.serviceContent.about.instanceUuid
# Get vcenter API version
vc_version = vi_client.vim.serviceContent.about.apiVersion
vcenter_instance_name = vi_client.vim.host
# OpenNebula's ClusterPool
cpool = VCenterDriver::VIHelper.one_pool(OpenNebula::ClusterPool, false)
@ -29,8 +25,6 @@ def self.import_clusters(con_ops, options)
raise "Could not get OpenNebula ClusterPool: #{cpool.message}"
end
cpool.info
cluster_list = {}
cpool.each do |c|
cluster_list[c["ID"]] = c["NAME"]
@ -43,7 +37,7 @@ def self.import_clusters(con_ops, options)
raise "Could not get OpenNebula HostPool: #{hpool.message}"
end
rs = dc_folder.get_unimported_hosts(hpool)
rs = dc_folder.get_unimported_hosts(hpool,vcenter_instance_name)
STDOUT.print "done!\n\n"
@ -175,6 +169,9 @@ def self.import_templates(con_ops, options)
end
tmps.each{ |t|
template = nil
template_copy_ref = nil
template_xml = nil
if !use_defaults
STDOUT.print "\n * VM Template found:\n"\
@ -186,9 +183,106 @@ def self.import_templates(con_ops, options)
next if STDIN.gets.strip.downcase != 'y'
end
# Linked Clones
if !use_defaults
template = VCenterDriver::Template.new_from_ref(t[:vcenter_ref], vi_client)
STDOUT.print "\n For faster deployment operations"\
" and lower disk usage, OpenNebula"\
" can create new VMs as linked clones."\
"\n Would you like to use Linked Clones with VMs based on this template (y/[n])? "
if STDIN.gets.strip.downcase == 'y'
STDOUT.print "\n Linked clones requires that delta"\
" disks must be created for each disk in the template."\
" This operation may change the template contents."\
" \n Do you want OpenNebula to create a copy of the template,"\
" so the original template remains untouched ([y]/n)? "
template = t[:template]
if STDIN.gets.strip.downcase != 'n'
STDOUT.print "\n The new template will be named"\
" adding a one- prefix to the name"\
" of the original template. \n"\
" If you prefer a different name"\
" please specify or press Enter"\
" to use defaults: "
template_name = STDIN.gets.strip.downcase
STDOUT.print "\n WARNING!!! The cloning operation can take some time"\
" depending on the size of disks. Please wait...\n"
error, template_copy_ref = template.create_template_copy(template_name)
if template_copy_ref
template = VCenterDriver::Template.new_from_ref(template_copy_ref, vi_client)
one_template = VCenterDriver::Template.get_xml_template(template, vc_uuid, vi_client, options[:vcenter], dc)
if one_template
#Now create delta disks
STDOUT.print "\n Delta disks are being created, please be patient..."
lc_error, use_lc = template.create_delta_disks
if lc_error
STDOUT.print "\n ERROR. Something was wrong with the create delta disks on the template operation: #{lc_error}.\n"\
"\n Linked Clones will not be used with this template.\n"
else
one_template[:one] << "\nVCENTER_LINKED_CLONES=\"YES\"\n"
t = one_template
end
else
STDOUT.print "\n ERROR. Something was wrong obtaining the info from the template's copy.\n"\
"\n Linked Clones will not be used with this template.\n"
template.delete_template if template_copy_ref
end
else
STDOUT.print "\n ERROR. #{error}\n"
end
else
# Create linked clones on top of the existing template
# Create a VirtualMachine object from the template_copy_ref
STDOUT.print "\n Delta disks are being created, please be patient..."
lc_error, use_lc = template.create_delta_disks
if lc_error
STDOUT.print "\n ERROR. Something was wrong with the create delta disks on the template operation: #{lc_error}.\n"\
"\n Linked Clones will not be used with this template.\n"
end
t[:one] << "\nVCENTER_LINKED_CLONES=\"YES\"\n" if use_lc
end
end
end
vcenter_vm_folder = ""
if !use_defaults
STDOUT.print "\n\n Do you want to specify a folder where"\
" the deployed VMs based on this template will appear"\
" in vSphere's VM and Templates section?"\
"\n If no path is set, VMs will be placed in the same"\
" location where the template lives."\
"\n Please specify a path using slashes to separate folders"\
" e.g /Management/VMs or press Enter to use defaults: "\
vcenter_vm_folder = STDIN.gets.strip
t[:one] << "VCENTER_VM_FOLDER=\"#{vcenter_vm_folder}\"\n" if !vcenter_vm_folder.empty?
end
## Add existing disks to template (OPENNEBULA_MANAGED)
template = t[:template]
STDOUT.print "\n The existing disks and networks in the template"\
" are being imported, please be patient..."
template = t[:template] if !template
error, template_disks = template.import_vcenter_disks(vc_uuid,
dpool,
@ -198,15 +292,17 @@ def self.import_templates(con_ops, options)
t[:one] << template_disks
else
STDOUT.puts error
template.delete_template if template_copy_ref
next
end
error, template_nics = template.import_vcenter_nics(vc_uuid,
npool)
npool)
if error.empty?
t[:one] << template_nics
else
STDOUT.puts error
template.delete_template if template_copy_ref
next
end
@ -217,7 +313,7 @@ def self.import_templates(con_ops, options)
if !use_defaults
if rp_split.size > 3
STDOUT.print "\n This template is currently set to "\
STDOUT.print "\n\n This template is currently set to "\
"launch VMs in the default resource pool."\
"\n Press y to keep this behaviour, n to select"\
" a new resource pool or d to delegate the choice"\
@ -238,15 +334,15 @@ def self.import_templates(con_ops, options)
"\"#{list_of_rp}\""
input_str+= "\n Press y to agree, or input a comma"\
" separated list of resource pools to edit "\
"[y/comma separated list] "
"([y]/comma separated list) "
STDOUT.print input_str
answer = STDIN.gets.strip
if answer.downcase == 'y'
rp_input += rp_split[3] + "|"
else
if !answer.empty? && answer.downcase != 'y'
rp_input += answer + "|"
else
rp_input += rp_split[3] + "|"
end
# Default
@ -254,49 +350,37 @@ def self.import_templates(con_ops, options)
"to the end user is set to"\
" \"#{default_rp}\"."
input_str+= "\n Press y to agree, or input a new "\
"resource pool [y/resource pool name] "
"resource pool ([y]/resource pool name) "
STDOUT.print input_str
answer = STDIN.gets.strip
if answer.downcase == 'y'
rp_input += rp_split[4]
else
if !answer.empty? && answer.downcase != 'y'
rp_input += answer
else
rp_input += rp_split[4]
end
when 'n'
list_of_rp = rp_split[-2]
input_str = " The list of available resource pools is:\n"
STDOUT.print input_str
dashes = ""
100.times do
dashes << "-"
end
list_str = "\n [Index] Resource pool :"\
"\n #{dashes}\n"
STDOUT.print list_str
STDOUT.print " The list of available resource pools is:\n\n"
index = 1
t[:rp_list].each do |rp|
list_str = " [#{index}] #{rp[:name]}\n"
t[:rp_list].each do |r|
list_str = " - #{r[:name]}\n"
index += 1
STDOUT.print list_str
end
input_str = "\n Please input the new default"\
" resource pool index in the list (e.g 1): "
" resource pool name: "
STDOUT.print input_str
answer = STDIN.gets.strip
t[:one] << "VCENTER_RESOURCE_POOL=\"#{t[:rp_list][answer.to_i - 1][:name]}\"\n"
t[:one] << "VCENTER_RESOURCE_POOL=\"#{answer}\"\n"
end
end
end
@ -314,6 +398,7 @@ def self.import_templates(con_ops, options)
if ::OpenNebula.is_error?(rc)
STDOUT.puts " Error creating template: #{rc.message}\n"
template.delete_template if template_copy_ref
else
STDOUT.puts " OpenNebula template #{one_t.id} created!\n"
end
@ -343,20 +428,6 @@ def self.import_networks(con_ops, options)
dc_folder = VCenterDriver::DatacenterFolder.new(vi_client)
# OpenNebula's ClusterPool
cpool = VCenterDriver::VIHelper.one_pool(OpenNebula::ClusterPool, false)
if cpool.respond_to?(:message)
raise "Could not get OpenNebula ClusterPool: #{cpool.message}"
end
cpool.info
cluster_list = {}
cpool.each do |c|
cluster_list[c["ID"]] = c["NAME"]
end
# OpenNebula's VirtualNetworkPool
npool = VCenterDriver::VIHelper.one_pool(OpenNebula::VirtualNetworkPool, false)
@ -364,7 +435,7 @@ def self.import_networks(con_ops, options)
raise "Could not get OpenNebula VirtualNetworkPool: #{npool.message}"
end
rs = dc_folder.get_unimported_networks(npool)
rs = dc_folder.get_unimported_networks(npool,options[:vcenter])
STDOUT.print "done!\n"
@ -393,22 +464,6 @@ def self.import_networks(con_ops, options)
STDOUT.print print_str
next if STDIN.gets.strip.downcase != 'y'
if cluster_list.size > 1
STDOUT.print "\n In which OpenNebula cluster do you want the network to be included?\n "
cluster_list_str = "\n"
cluster_list.each do |key, value|
cluster_list_str << " - ID: " << key << " - NAME: " << value << "\n"
end
STDOUT.print "\n #{cluster_list_str}"
STDOUT.print "\n Specify the ID of the cluster or Enter to use the default cluster: "
answer = STDIN.gets.strip
one_cluster_id = answer if !answer.empty?
end
end
size="255"
@ -499,11 +554,7 @@ def self.import_networks(con_ops, options)
one_vn = VCenterDriver::VIHelper.new_one_item(OpenNebula::VirtualNetwork)
if one_cluster_id
rc = one_vn.allocate(n[:one],one_cluster_id.to_i)
else
rc = one_vn.allocate(n[:one])
end
rc = one_vn.allocate(n[:one])
if ::OpenNebula.is_error?(rc)
STDOUT.puts "\n Error creating virtual network: " +
@ -538,27 +589,20 @@ def self.import_datastore(con_ops, options)
dc_folder = VCenterDriver::DatacenterFolder.new(vi_client)
# OpenNebula's ClusterPool
cpool = VCenterDriver::VIHelper.one_pool(OpenNebula::ClusterPool, false)
dpool = VCenterDriver::VIHelper.one_pool(OpenNebula::DatastorePool, false)
if cpool.respond_to?(:message)
raise "Could not get OpenNebula ClusterPool: #{cpool.message}"
if dpool.respond_to?(:message)
raise "Could not get OpenNebula DatastorePool: #{dpool.message}"
end
cpool.info
cluster_list = {}
cpool.each do |c|
cluster_list[c["ID"]] = c["NAME"]
end
hpool = VCenterDriver::VIHelper.one_pool(OpenNebula::DatastorePool, false)
# Get OpenNebula's host pool
hpool = VCenterDriver::VIHelper.one_pool(OpenNebula::HostPool, false)
if hpool.respond_to?(:message)
raise "Could not get OpenNebula DatastorePool: #{hpool.message}"
raise "Could not get OpenNebula HostPool: #{hpool.message}"
end
rs = dc_folder.get_unimported_datastores(hpool)
rs = dc_folder.get_unimported_datastores(dpool, options[:vcenter], hpool)
STDOUT.print "done!\n"
@ -585,21 +629,6 @@ def self.import_datastore(con_ops, options)
" Import this as Datastore [y/n]? "
next if STDIN.gets.strip.downcase != 'y'
if cluster_list.size > 1
STDOUT.print "\n In which OpenNebula cluster do you want the datastore to be included?\n "
cluster_list_str = "\n"
cluster_list.each do |key, value|
cluster_list_str << " - ID: " << key << " - NAME: " << value << "\n"
end
STDOUT.print "\n #{cluster_list_str}"
STDOUT.print "\n Specify the ID of the cluster or Enter to use the default cluster: "
answer = STDIN.gets.strip
one_cluster_id = answer if !answer.empty?
end
end
one_d = VCenterDriver::VIHelper.new_one_item(OpenNebula::Datastore)

View File

@ -69,21 +69,24 @@ class Network
def self.to_one_template(network_name, network_ref, network_type,
ccr_ref, ccr_name, vcenter_uuid)
ccr_ref, ccr_name, vcenter_uuid,
vcenter_instance_name, dc_name)
one_tmp = {}
one_tmp[:name] = "#{network_name} - #{ccr_name}"
network_import_name = "[#{vcenter_instance_name} - #{dc_name}] #{network_name} - #{ccr_name.tr(" ", "_")}"
one_tmp[:name] = network_import_name
one_tmp[:bridge] = network_name
one_tmp[:type] = network_type
one_tmp[:cluster] = ccr_name
one_tmp[:vcenter_ccr_ref] = ccr_ref
one_tmp[:one] = to_one(network_name, network_ref, network_type,
ccr_ref, ccr_name, vcenter_uuid)
one_tmp[:one] = to_one(network_import_name, network_name, network_ref, network_type,
ccr_ref, vcenter_uuid)
return one_tmp
end
def self.to_one(network_name, network_ref, network_type,
ccr_ref, ccr_name, vcenter_uuid)
template = "NAME=\"#{network_name} - #{ccr_name}\"\n"\
def self.to_one(network_import_name, network_name, network_ref, network_type,
ccr_ref, vcenter_uuid)
template = "NAME=\"#{network_import_name}\"\n"\
"BRIDGE=\"#{network_name}\"\n"\
"VN_MAD=\"dummy\"\n"\
"VCENTER_PORTGROUP_TYPE=\"#{network_type}\"\n"\

File diff suppressed because it is too large Load Diff