1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-03-21 14:50:08 +03:00

Minor refactor vcenter driver

This commit is contained in:
Tino Vazquez 2018-08-28 16:49:04 -07:00
parent d9572fbdd5
commit d6f3249c53
5 changed files with 1132 additions and 1131 deletions

View File

@ -239,7 +239,7 @@ module OpenNebula
require 'vcenter_driver'
host_id = self["ID"]
vm_ref = wild["DEPLOY_ID"]
return VCenterDriver::Importer.import_wild(host_id, vm_ref, vm, template)
return VCenterDriver::VmImporter.import_wild(host_id, vm_ref, vm, template)
else
rc = vm.allocate(template)

View File

@ -17,144 +17,6 @@
module VCenterDriver
class Importer
def self.import_wild(host_id, vm_ref, one_vm, template)
begin
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
vc_uuid = vi_client.vim.serviceContent.about.instanceUuid
vc_name = vi_client.vim.host
dpool = VCenterDriver::VIHelper.one_pool(OpenNebula::DatastorePool)
if dpool.respond_to?(:message)
raise "Could not get OpenNebula DatastorePool: #{dpool.message}"
end
ipool = VCenterDriver::VIHelper.one_pool(OpenNebula::ImagePool)
if ipool.respond_to?(:message)
raise "Could not get OpenNebula ImagePool: #{ipool.message}"
end
npool = VCenterDriver::VIHelper.one_pool(OpenNebula::VirtualNetworkPool)
if npool.respond_to?(:message)
raise "Could not get OpenNebula VirtualNetworkPool: #{npool.message}"
end
hpool = VCenterDriver::VIHelper.one_pool(OpenNebula::HostPool)
if hpool.respond_to?(:message)
raise "Could not get OpenNebula HostPool: #{hpool.message}"
end
vcenter_vm = VCenterDriver::VirtualMachine.new_without_id(vi_client, vm_ref)
vm_name = vcenter_vm["name"]
type = {:object => "VM", :id => vm_name}
error, template_disks = vcenter_vm.import_vcenter_disks(vc_uuid, dpool, ipool, type)
return OpenNebula::Error.new(error) if !error.empty?
template << template_disks
# Create images or get nics information for template
error, template_nics, ar_ids = vcenter_vm
.import_vcenter_nics(vc_uuid,
npool,
hpool,
vc_name,
vm_ref)
if !error.empty?
if !ar_ids.nil?
ar_ids.each do |key, value|
network = VCenterDriver::VIHelper.find_by_ref(OpenNebula::VirtualNetworkPool,"TEMPLATE/VCENTER_NET_REF", key, vc_uuid, npool)
value.each do |ar|
network.rm_ar(ar)
end
end
end
return OpenNebula::Error.new(error) if !error.empty?
end
template << template_nics
template << "VCENTER_ESX_HOST = #{vcenter_vm["runtime.host.name"].to_s}\n"
# Get DS_ID for the deployment, the wild VM needs a System DS
dc_ref = vcenter_vm.get_dc.item._ref
ds_ref = template.match(/^VCENTER_DS_REF *= *"(.*)" *$/)[1]
ds_one = dpool.select do |e|
e["TEMPLATE/TYPE"] == "SYSTEM_DS" &&
e["TEMPLATE/VCENTER_DS_REF"] == ds_ref &&
e["TEMPLATE/VCENTER_DC_REF"] == dc_ref &&
e["TEMPLATE/VCENTER_INSTANCE_ID"] == vc_uuid
end.first
if !ds_one
if !ar_ids.nil?
ar_ids.each do |key, value|
network = VCenterDriver::VIHelper.find_by_ref(OpenNebula::VirtualNetworkPool,"TEMPLATE/VCENTER_NET_REF", key, vc_uuid, npool)
value.each do |ar|
network.rm_ar(ar)
end
end
end
return OpenNebula::Error.new("DS with ref #{ds_ref} is not imported in OpenNebula, aborting Wild VM import.")
end
rc = one_vm.allocate(template)
if OpenNebula.is_error?(rc)
if !ar_ids.nil?
ar_ids.each do |key, value|
network = VCenterDriver::VIHelper.find_by_ref(OpenNebula::VirtualNetworkPool,"TEMPLATE/VCENTER_NET_REF", key, vc_uuid, npool)
value.each do |ar|
network.rm_ar(ar)
end
end
end
return rc
end
rc = one_vm.deploy(host_id, false, ds_one.id)
if OpenNebula.is_error?(rc)
if !ar_ids.nil?
ar_ids.each do |key, value|
network = VCenterDriver::VIHelper.find_by_ref(OpenNebula::VirtualNetworkPool,"TEMPLATE/VCENTER_NET_REF", key, vc_uuid, npool)
value.each do |ar|
network.rm_ar(ar)
end
end
end
return rc
end
# Set reference to template disks and nics in VM template
vcenter_vm.one_item = one_vm
vcenter_vm.reference_unmanaged_devices(vm_ref)
# Set vnc configuration F#5074
vnc_port = one_vm["TEMPLATE/GRAPHICS/PORT"]
elapsed_seconds = 0
# Let's update the info to gather VNC port
until vnc_port || elapsed_seconds > 30
sleep(1)
one_vm.info
vnc_port = one_vm["TEMPLATE/GRAPHICS/PORT"]
elapsed_seconds += 1
end
if vnc_port
vcenter_vm.one_item = one_vm
extraconfig = []
extraconfig += vcenter_vm.extraconfig_vnc
spec_hash = { :extraConfig => extraconfig }
spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
vcenter_vm.item.ReconfigVM_Task(:spec => spec).wait_for_completion
end
return one_vm.id
rescue Exception => e
vi_client.close_connection if vi_client
return OpenNebula::Error.new("#{e.message}/#{e.backtrace}")
end
end
def self.import_clusters(con_ops, options)
begin
STDOUT.print "\nConnecting to vCenter: #{options[:vcenter]}..."

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,993 @@
class Template
attr_accessor :item
include Memoize
def initialize(item=nil, vi_client=nil)
@item = item
check_item(@item, nil) if (@item)
@vi_client = vi_client
@locking = true
end
# Locking function. Similar to flock
def lock
if @locking
@locking_file = File.open("/tmp/vcenter-importer-lock","w")
@locking_file.flock(File::LOCK_EX)
end
end
# Unlock driver execution mutex
def unlock
if @locking
@locking_file.close
if File.exist?("/tmp/vcenter-importer-lock")
File.delete("/tmp/vcenter-importer-lock")
end
end
end
def wild?
self.class == VCenterDriver::VirtualMachine
end
def online?
raise "vcenter item not found!" unless @item
!@item["guest.net"].empty?
end
def get_dc
item = @item
trace = []
while item && !item.instance_of?(RbVmomi::VIM::Datacenter)
rp = item.resourcePool rescue nil
if rp && rp.instance_of?(RbVmomi::VIM::VirtualApp)
trace << "rp:" + item.to_s
item = rp.parent rescue nil
else
trace << item.to_s
item = item.parent rescue nil
end
end
if item.nil?
trace = "[" + trace.join(", ") + "]"
raise "Could not find the parent Datacenter. Trace: #{trace}"
end
Datacenter.new(item)
end
def delete_template
@item.Destroy_Task.wait_for_completion
end
def get_vcenter_instance_uuid
@vi_client.vim.serviceContent.about.instanceUuid rescue nil
end
def create_template_copy(template_name)
error = nil
template_ref = nil
template_name = "one-#{self['name']}" if template_name.empty?
relocate_spec_params = {}
relocate_spec_params[:pool] = get_rp
relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec(relocate_spec_params)
clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec({
:location => relocate_spec,
:powerOn => false,
:template => false
})
template = nil
begin
template = @item.CloneVM_Task(:folder => @item.parent,
:name => template_name,
:spec => clone_spec).wait_for_completion
template_ref = template._ref
rescue Exception => e
if !e.message.start_with?('DuplicateName')
error = "Could not create the template clone. Reason: #{e.message}"
return error, nil
end
dc = get_dc
vm_folder = dc.vm_folder
vm_folder.fetch!
vm = vm_folder.items
.select{|k,v| v.item.name == template_name}
.values.first.item rescue nil
if vm
begin
vm.Destroy_Task.wait_for_completion
template = @item.CloneVM_Task(:folder => @item.parent,
:name => template_name,
:spec => clone_spec).wait_for_completion
template_ref = template._ref
rescue
error = "Could not delete the existing template, please remove it manually from vCenter. Reason: #{e.message}"
end
else
error = "Could not create the template clone. Reason: #{e.message}"
end
end
return error, template_ref
end
# Linked Clone over existing template
def create_delta_disks
begin
disks = @item['config.hardware.device'].grep(RbVmomi::VIM::VirtualDisk)
disk_without_snapshots = disks.select { |x| x.backing.parent.nil? }
rescue
error = "Cannot extract existing disks on template."
use_linked_clones = false
return error, use_linked_clones
end
if !disk_without_snapshots.empty?
begin
if self['config.template']
@item.MarkAsVirtualMachine(:pool => get_rp, :host => self['runtime.host'])
end
rescue Exception => e
@item.MarkAsTemplate()
error = "Cannot mark the template as a VirtualMachine. Not using linked clones. Reason: #{e.message}/#{e.backtrace}"
use_linked_clones = false
return error, use_linked_clones
end
begin
spec = {}
spec[:deviceChange] = []
disk_without_snapshots.each do |disk|
remove_disk_spec = { :operation => :remove, :device => disk }
spec[:deviceChange] << remove_disk_spec
add_disk_spec = { :operation => :add,
:fileOperation => :create,
:device => disk.dup.tap { |x|
x.backing = x.backing.dup
x.backing.fileName = "[#{disk.backing.datastore.name}]"
x.backing.parent = disk.backing
}
}
spec[:deviceChange] << add_disk_spec
end
@item.ReconfigVM_Task(:spec => spec).wait_for_completion if !spec[:deviceChange].empty?
rescue Exception => e
error = "Cannot create the delta disks on top of the template. Reason: #{e.message}."
use_linked_clones = false
return error, use_linked_clones
end
begin
@item.MarkAsTemplate()
rescue
error = "Cannot mark the VirtualMachine as a template. Not using linked clones."
use_linked_clones = false
return error, use_linked_clones
end
error = nil
use_linked_clones = true
return error, use_linked_clones
else
# Template already has delta disks
error = nil
use_linked_clones = true
return error, use_linked_clones
end
end
########################################################################
# Import vcenter disks
# @param type [object] contains the type of the object(:object) and identificator(:id)
# @return error, template_disks
########################################################################
def import_vcenter_disks(vc_uuid, dpool, ipool, type)
disk_info = ""
error = ""
begin
lock #Lock import operation, to avoid concurrent creation of images
##ccr_ref = self["runtime.host.parent._ref"]
dc = get_dc
dc_ref = dc.item._ref
#Get disks and info required
vc_disks = get_vcenter_disks
# Track allocated images
allocated_images = []
vc_disks.each do |disk|
ds_ref = nil
begin
ds_ref = disk[:datastore]._ref
rescue
raise "The ISO #{disk[:path_wo_ds].name} cannot be found because the datastore was removed or deleted"
end
datastore_found = VCenterDriver::Storage.get_one_image_ds_by_ref_and_dc(ds_ref,
dc_ref,
vc_uuid,
dpool)
if datastore_found.nil?
error = "\n ERROR: datastore #{disk[:datastore].name}: has to be imported first as an image datastore!\n"
#Rollback delete disk images
allocated_images.each do |i|
i.delete
end
break
end
opts = {:persistent => wild? ? "YES":"NO"}
image_import = VCenterDriver::Datastore.get_image_import_template(disk, ipool, type, datastore_found["ID"], opts)
#Image is already in the datastore
if image_import[:one]
# This is the disk info
disk_tmp = ""
disk_tmp << "DISK=[\n"
disk_tmp << "IMAGE_ID=\"#{image_import[:one]["ID"]}\",\n"
disk_tmp << "OPENNEBULA_MANAGED=\"NO\"\n"
disk_tmp << "]\n"
disk_info << disk_tmp
elsif !image_import[:template].empty?
# Then the image is created as it's not in the datastore
one_i = VCenterDriver::VIHelper.new_one_item(OpenNebula::Image)
allocated_images << one_i
rc = one_i.allocate(image_import[:template], datastore_found['ID'].to_i, false)
if OpenNebula.is_error?(rc)
error = " Error creating disk from template: #{rc.message}\n"
break
end
# Monitor image, we need READY state
one_i.info
start_time = Time.now
while one_i.state_str != "READY" and Time.now - start_time < 300
sleep 1
one_i.info
end
#Add info for One template
one_i.info
disk_info << "DISK=[\n"
disk_info << "IMAGE_ID=\"#{one_i["ID"]}\",\n"
disk_info << "OPENNEBULA_MANAGED=\"NO\"\n"
disk_info << "]\n"
end
end
rescue Exception => e
error = "\n There was an error trying to create an image for disk in vcenter template. Reason: #{e.message}\n#{e.backtrace}"
ensure
unlock
if !error.empty? && allocated_images
#Rollback delete disk images
allocated_images.each do |i|
i.delete
end
end
end
return error, disk_info, allocated_images
end
def create_ar(nic, with_id = false)
ar_tmp = ""
if nic[:mac] && nic[:ipv4] && nic[:ipv6]
ar_tmp << "AR=[\n"
ar_tmp << "AR_ID=0,\n" if with_id
ar_tmp << "TYPE=\"IP4_6_STATIC\",\n"
ar_tmp << "IP=\"#{nic[:ipv4]}\",\n"
ar_tmp << "MAC=\"#{nic[:mac]}\",\n"
ar_tmp << "IP6=\"#{nic[:ipv6]}\",\n"
ar_tmp << "PREFIX_LENGTH=\"64\",\n"
ar_tmp << "SIZE=\"1\"\n"
ar_tmp << "]\n"
elsif nic[:mac] && nic[:ipv6]
ar_tmp << "AR=[\n"
ar_tmp << "AR_ID=0,\n" if with_id
ar_tmp << "TYPE=\"IP6_STATIC\",\n"
ar_tmp << "MAC=\"#{nic[:mac]}\",\n"
ar_tmp << "IP6=\"#{nic[:ipv6]}\",\n"
ar_tmp << "PREFIX_LENGTH=\"64\",\n"
ar_tmp << "SIZE=\"1\"\n"
ar_tmp << "]\n"
elsif nic[:mac] && nic[:ipv4]
ar_tmp << "AR=[\n"
ar_tmp << "AR_ID=0,\n" if with_id
ar_tmp << "TYPE=\"IP4\",\n"
ar_tmp << "IP=\"#{nic[:ipv4]}\",\n"
ar_tmp << "MAC=\"#{nic[:mac]}\",\n"
ar_tmp << "SIZE=\"1\"\n"
ar_tmp << "]\n"
else
ar_tmp << "AR=[\n"
ar_tmp << "AR_ID=0,\n" if with_id
ar_tmp << "TYPE=\"ETHER\",\n"
ar_tmp << "MAC=\"#{nic[:mac]}\",\n"
ar_tmp << "SIZE=\"1\"\n"
ar_tmp << "]\n"
end
ar_tmp
end
def save_ar_ids(network_found, nic, ar_ids, start_ids = false)
if start_ids
value = []
arsNew = network_found.to_hash["VNET"]["AR_POOL"]["AR"]
arsNew = [arsNew] if arsNew.class.to_s.eql? "Hash"
last_id = 0
if ar_ids.has_key?(nic[:net_ref])
ref = nic[:net_ref]
value = ar_ids[ref.to_s]
value.insert(value.length, last_id.to_s)
ar_ids.store(nic[:net_ref], value)
else
value.insert(value.length , last_id.to_s)
ar_ids.store(nic[:net_ref], value)
end
else
value = []
arsNew = network_found.to_hash["VNET"]["AR_POOL"]["AR"]
arsNew = [arsNew] if arsNew.class.to_s.eql? "Hash"
last_id = arsNew.last["AR_ID"]
if ar_ids.has_key?(nic[:net_ref])
ref = nic[:net_ref]
value = ar_ids[ref.to_s]
value.insert(value.length, last_id)
ar_ids.store(nic[:net_ref], value)
else
value.insert(value.length , last_id)
ar_ids.store(nic[:net_ref], value)
end
end
last_id
end
def import_vcenter_nics(vc_uuid, npool, hpool, vcenter_instance_name,
template_ref, vm_id=nil, dc_name=nil)
nic_info = ""
error = ""
ar_ids = {}
begin
lock #Lock import operation, to avoid concurrent creation of networks
if !dc_name
dc = get_dc
dc_name = dc.item.name
dc_ref = dc.item._ref
end
ccr_ref = self["runtime.host.parent._ref"]
ccr_name = self["runtime.host.parent.name"]
#Get disks and info required
vc_nics = get_vcenter_nics
# Track allocated networks for rollback
allocated_networks = []
# Track port groups duplicated in this VM
duplicated_networks = []
vc_nics.each do |nic|
# Check if the network already exists
network_found = VCenterDriver::VIHelper.find_by_ref(OpenNebula::VirtualNetworkPool,
"TEMPLATE/VCENTER_NET_REF",
nic[:net_ref],
vc_uuid,
npool)
#Network is already in OpenNebula
if network_found
nic_tmp = "NIC=[\n"
nic_tmp << "NETWORK_ID=\"#{network_found["ID"]}\",\n"
if wild?
ar_tmp = create_ar(nic)
network_found.add_ar(ar_tmp)
network_found.info
last_id = save_ar_ids(network_found, nic, ar_ids)
# This is the existing nic info
nic_tmp << "AR_ID=\"#{last_id}\",\n"
nic_tmp << "MAC=\"#{nic[:mac]}\",\n" if nic[:mac]
nic_tmp << "VCENTER_ADDITIONALS_IP4=\"#{nic[:ipv4_additionals]}\",\n" if nic[:ipv4_additionals]
nic_tmp << "VCENTER_IP6=\"#{nic[:ipv6]}\",\n" if nic[:ipv6]
nic_tmp << "IP6_GLOBAL=\"#{nic[:ipv6_global]}\",\n" if nic[:ipv6_global]
nic_tmp << "IP6_ULA=\"#{nic[:ipv6_ula]}\",\n" if nic[:ipv6_ula]
nic_tmp << "VCENTER_ADDITIONALS_IP6=\"#{nic[:ipv6_additionals]}\",\n" if nic[:ipv6_additionals]
end
nic_tmp << "OPENNEBULA_MANAGED=\"NO\"\n"
nic_tmp << "]\n"
nic_info << nic_tmp
# network not found:
else
config = {}
config[:refs] = nic[:refs]
# Then the network has to be created as it's not in OpenNebula
one_vn = VCenterDriver::VIHelper.new_one_item(OpenNebula::VirtualNetwork)
# Let's get the OpenNebula hosts ids associated to the clusters references
config[:one_ids] = nic[:refs].map do |ref|
VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool,
"TEMPLATE/VCENTER_CCR_REF",
ref,
vc_uuid,
hpool)["CLUSTER_ID"] rescue -1
end
if wild?
unmanaged = "wild"
else
unmanaged = "template"
end
import_opts = {
:network_name=> nic[:net_name],
:network_ref=> nic[:net_ref],
:network_type=> nic[:pg_type],
:ccr_ref=> ccr_ref,
:ccr_name=> ccr_name,
:vcenter_uuid=> vc_uuid,
:vcenter_instance_name=> vcenter_instance_name,
:dc_name=> dc_name,
:unmanaged=> unmanaged,
:template_ref=> template_ref,
:dc_ref=> dc_ref,
:template_id=> vm_id
}
# Prepare the Virtual Network template
one_vnet = VCenterDriver::Network.to_one_template(import_opts)
# always has to be created because of templates when they are instantiated
ar_tmp = ""
ar_tmp << "AR=[\n"
ar_tmp << "TYPE=\"ETHER\",\n"
ar_tmp << "SIZE=255\n"
ar_tmp << "]\n"
if wild?
ar_tmp << create_ar(nic, true)
end
one_vnet[:one] << ar_tmp
config[:one_object] = one_vnet[:one]
cluster_id = VCenterDriver::VIHelper.get_cluster_id(config[:one_ids])
one_vn = VCenterDriver::Network.create_one_network(config)
allocated_networks << one_vn
VCenterDriver::VIHelper.clean_ref_hash()
one_vn.info
nic_tmp = "NIC=[\n"
nic_tmp << "NETWORK_ID=\"#{one_vn.id}\",\n"
if wild?
last_id = save_ar_ids(one_vn, nic, ar_ids)
nic_tmp << "AR_ID=\"#{last_id}\",\n"
nic_tmp << "MAC=\"#{nic[:mac]}\",\n" if nic[:mac]
nic_tmp << "VCENTER_ADDITIONALS_IP4=\"#{nic[:ipv4_additionals]}\",\n" if nic[:ipv4_additionals]
nic_tmp << "VCENTER_IP6=\"#{nic[:ipv6]}\",\n" if nic[:ipv6]
nic_tmp << "IP6_GLOBAL=\"#{nic[:ipv6_global]}\",\n" if nic[:ipv6_global]
nic_tmp << "IP6_ULA=\"#{nic[:ipv6_ula]}\",\n" if nic[:ipv6_ula]
nic_tmp << "VCENTER_ADDITIONALS_IP6=\"#{nic[:ipv6_additionals]}\",\n" if nic[:ipv6_additionals]
end
nic_tmp << "OPENNEBULA_MANAGED=\"NO\"\n"
nic_tmp << "]\n"
nic_info << nic_tmp
# Refresh npool
npool.info_all
end
end
rescue Exception => e
error = "\n There was an error trying to create a virtual network to repesent a vCenter network for a VM or VM Template. Reason: #{e.message}"
ensure
unlock
#Rollback, delete virtual networks
if !error.empty? && allocated_networks
allocated_networks.each do |n|
n.delete
end
end
end
return error, nic_info, ar_ids, allocated_networks
end
def get_vcenter_disk_key(unit_number, controller_key)
key = nil
@item["config.hardware.device"].each do |device|
disk = {}
if is_disk_or_iso?(device)
disk[:device] = device
if device.controllerKey == controller_key &&
device.unitNumber == unit_number
key = device.key
break
end
end
end
return key
end
def get_vcenter_disks
disks = []
ide_controlled = []
sata_controlled = []
scsi_controlled = []
@item["config.hardware.device"].each do |device|
disk = {}
if device.is_a? RbVmomi::VIM::VirtualIDEController
ide_controlled.concat(device.device)
end
if device.is_a? RbVmomi::VIM::VirtualSATAController
sata_controlled.concat(device.device)
end
if device.is_a? RbVmomi::VIM::VirtualSCSIController
scsi_controlled.concat(device.device)
end
if is_disk_or_iso?(device)
disk[:device] = device
disk[:datastore] = device.backing.datastore
disk[:path] = device.backing.fileName
disk[:path_wo_ds]= disk[:path].sub(/^\[(.*?)\] /, "")
disk[:type] = is_disk?(device) ? "OS" : "CDROM"
disk[:key] = device.key
disk[:prefix] = "hd" if ide_controlled.include?(device.key)
disk[:prefix] = "sd" if scsi_controlled.include?(device.key)
disk[:prefix] = "sd" if sata_controlled.include?(device.key)
disks << disk
end
end
return disks
end
def retrieve_from_device(device)
res = {}
# Let's find out if it is a standard or distributed network
# If distributed, it needs to be instantitaed from the ref
if device.backing.is_a? RbVmomi::VIM::VirtualEthernetCardDistributedVirtualPortBackingInfo
if device.backing.port.portKey.match(/^[a-z]+-\d+$/)
ref = device.backing.port.portKey
elsif device.backing.port.portgroupKey.match(/^[a-z]+-\d+$/)
ref = device.backing.port.portgroupKey
else
raise "Cannot get hold of Network for device #{device}"
end
network = RbVmomi::VIM::Network.new(@vi_client.vim, ref)
else
network = device.backing.network
end
res[:refs] = network.host.map do |h|
h.parent._ref if h.parent
end
res[:net_name] = network.name
res[:net_ref] = network._ref
res[:pg_type] = VCenterDriver::Network.get_network_type(device)
res[:network] = network
res
end
def get_vcenter_nics
parse_live = ->(inets_raw) {
h = nil
begin
h = inets_raw.to_h
rescue NoMethodError
h = {}
inets_raw.each do |nic_dev|
h[nic_dev[0]] = nic_dev[1]
end
end
return h
}
nics = []
inets_raw = nil
inets = {}
num_device = 0
@item["config.hardware.device"].each do |device|
next unless is_nic?(device)
nic = retrieve_from_device(device)
nic[:mac] = device.macAddress rescue nil
if wild?
if online?
inets_raw = @item["guest.net"].map.with_index { |x,i| [x.macAddress, x] } unless inets_raw
inets = parse_live.call(inets_raw) if inets.empty?
ipAddresses = inets[nic[:mac]].ipConfig.ipAddress
if !ipAddresses.nil? && !ipAddresses.empty?
nic[:ipv4], nic[:ipv4_additionals] = nil
nic[:ipv6], nic[:ipv6_ula], nic[:ipv6_global], nic[:ipv6_additionals] = nil
fill_nic(ipAddresses, nic)
end
end
end
nics << nic
end
return nics
end
def fill_nic(ipAddresses, nic)
for i in 0...ipAddresses.length
ip = ipAddresses[i].ipAddress
if ip =~ Resolv::IPv4::Regex
if nic[:ipv4]
if nic[:ipv4_additionals]
nic[:ipv4_additionals] += ',' + ip
else
nic[:ipv4_additionals] = ip
end
else
nic[:ipv4] = ip
end
elsif ipAddresses[i].ipAddress =~ Resolv::IPv6::Regex
if get_ipv6_prefix(ip, 3) == "2000"
if nic[:ipv6_global]
if nic[:ipv6_additionals]
nic[:ipv6_additionals] += ',' + ip
else
nic[:ipv6_additionals] = ip
end
else
nic[:ipv6_global] = ip
end
elsif get_ipv6_prefix(ip, 10) == "fe80"
nic[:ipv6] = ip
elsif get_ipv6_prefix(ip, 7) == "fc00"
if nic[:ipv6_ula]
if nic[:ipv6_additionals]
nic[:ipv6_additionals] += ',' + ip
else
nic[:ipv6_additionals] = ip
end
else
nic[:ipv6_ula] = ip
end
end
end
end
end
def get_ipv6_prefix(ipv6, prefix_length)
ipSlice = ipv6.split(":").map{ |elem| elem.hex }.map{ |elem|
int, dec = elem.divmod(1)
bin = "#{int.to_s(2)}"
while dec > 0
int, dec = (dec * 2).divmod(1)
bin << int.to_s
end
elem = bin
}.map{ |elem| elem.rjust(16, '0') }
ipChain = ipSlice.join
prefix = ipChain[0, prefix_length]
cont = 0
limit = prefix.length
index = 0
slices = []
while cont < limit
slices[index] = prefix.slice(cont, 4)
slices[index] = slices[index].ljust(4, '0')
index +=1
cont+=4
end
finalPrefix = slices.map{|elem| "%0x" % elem.to_i(2) }.join.ljust(4, '0')
return finalPrefix
end
# Checks if a RbVmomi::VIM::VirtualDevice is a disk or a cdrom
def is_disk_or_cdrom?(device)
is_disk = !(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil?
is_cdrom = !(device.class.ancestors.index(RbVmomi::VIM::VirtualCdrom)).nil?
is_disk || is_cdrom
end
# Checks if a RbVmomi::VIM::VirtualDevice is a disk or an iso file
def is_disk_or_iso?(device)
is_disk = !(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil?
is_iso = device.backing.is_a? RbVmomi::VIM::VirtualCdromIsoBackingInfo
is_disk || is_iso
end
# Checks if a RbVmomi::VIM::VirtualDevice is a disk
def is_disk?(device)
!(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil?
end
# Checks if a RbVmomi::VIM::VirtualDevice is a network interface
def is_nic?(device)
!device.class.ancestors.index(RbVmomi::VIM::VirtualEthernetCard).nil?
end
# @return RbVmomi::VIM::ResourcePool, first resource pool in cluster
def get_rp
self['runtime.host.parent.resourcePool']
end
def get_esx_name
self['runtime.host.name']
end
def vm_to_one(vm_name)
str = "NAME = \"#{vm_name}\"\n"\
"CPU = \"#{@vm_info["config.hardware.numCPU"]}\"\n"\
"vCPU = \"#{@vm_info["config.hardware.numCPU"]}\"\n"\
"MEMORY = \"#{@vm_info["config.hardware.memoryMB"]}\"\n"\
"HYPERVISOR = \"vcenter\"\n"\
"CONTEXT = [\n"\
" NETWORK = \"YES\",\n"\
" SSH_PUBLIC_KEY = \"$USER[SSH_PUBLIC_KEY]\"\n"\
"]\n"\
"VCENTER_INSTANCE_ID =\"#{@vm_info[:vc_uuid]}\"\n"\
"VCENTER_CCR_REF =\"#{@vm_info[:cluster_ref]}\"\n"
str << "IMPORT_VM_ID =\"#{self["_ref"]}\"\n"
@state = 'POWEROFF' if @state == 'd'
str << "IMPORT_STATE =\"#{@state}\"\n"
# Get DS information
str << "VCENTER_DS_REF = \"#{@vm_info["datastore"].last._ref}\"\n"
vnc_port = nil
keymap = VCenterDriver::VIHelper.get_default("VM/TEMPLATE/GRAPHICS/KEYMAP")
@vm_info["config.extraConfig"].select do |xtra|
if xtra[:key].downcase=="remotedisplay.vnc.port"
vnc_port = xtra[:value]
end
if xtra[:key].downcase=="remotedisplay.vnc.keymap"
keymap = xtra[:value]
end
end
if !@vm_info["config.extraConfig"].empty?
str << "GRAPHICS = [\n"\
" TYPE =\"vnc\",\n"
str << " PORT =\"#{vnc_port}\",\n" if vnc_port
str << " KEYMAP =\"#{keymap}\",\n" if keymap
str << " LISTEN =\"0.0.0.0\"\n"
str << "]\n"
end
if !@vm_info["config.annotation"] || @vm_info["config.annotation"].empty?
str << "DESCRIPTION = \"vCenter Template imported by OpenNebula" \
" from Cluster #{@vm_info["cluster_name"]}\"\n"
else
notes = @vm_info["config.annotation"].gsub("\\", "\\\\").gsub("\"", "\\\"")
str << "DESCRIPTION = \"#{notes}\"\n"
end
case @vm_info["guest.guestFullName"]
when /CentOS/i
str << "LOGO=images/logos/centos.png\n"
when /Debian/i
str << "LOGO=images/logos/debian.png\n"
when /Red Hat/i
str << "LOGO=images/logos/redhat.png\n"
when /Ubuntu/i
str << "LOGO=images/logos/ubuntu.png\n"
when /Windows XP/i
str << "LOGO=images/logos/windowsxp.png\n"
when /Windows/i
str << "LOGO=images/logos/windows8.png\n"
when /Linux/i
str << "LOGO=images/logos/linux.png\n"
end
return str
end
def self.template_to_one(template, vc_uuid, ccr_ref, ccr_name, import_name, host_id)
num_cpu, memory, annotation, guest_fullname = template.item.collect("config.hardware.numCPU","config.hardware.memoryMB","config.annotation","guest.guestFullName")
str = "NAME = \"#{import_name}\"\n"\
"CPU = \"#{num_cpu}\"\n"\
"vCPU = \"#{num_cpu}\"\n"\
"MEMORY = \"#{memory}\"\n"\
"HYPERVISOR = \"vcenter\"\n"\
"CONTEXT = [\n"\
" NETWORK = \"YES\",\n"\
" SSH_PUBLIC_KEY = \"$USER[SSH_PUBLIC_KEY]\"\n"\
"]\n"\
"VCENTER_INSTANCE_ID =\"#{vc_uuid}\"\n"
str << "VCENTER_TEMPLATE_REF =\"#{template["_ref"]}\"\n"
str << "VCENTER_CCR_REF =\"#{ccr_ref}\"\n"
str << "GRAPHICS = [\n"\
" TYPE =\"vnc\",\n"
str << " LISTEN =\"0.0.0.0\"\n"
str << "]\n"
if annotation.nil? || annotation.empty?
str << "DESCRIPTION = \"vCenter Template imported by OpenNebula" \
" from Cluster #{ccr_name}\"\n"
else
notes = annotation.gsub("\\", "\\\\").gsub("\"", "\\\"")
str << "DESCRIPTION = \"#{notes}\"\n"
end
case guest_fullname
when /CentOS/i
str << "LOGO=images/logos/centos.png\n"
when /Debian/i
str << "LOGO=images/logos/debian.png\n"
when /Red Hat/i
str << "LOGO=images/logos/redhat.png\n"
when /Ubuntu/i
str << "LOGO=images/logos/ubuntu.png\n"
when /Windows XP/i
str << "LOGO=images/logos/windowsxp.png\n"
when /Windows/i
str << "LOGO=images/logos/windows8.png\n"
when /Linux/i
str << "LOGO=images/logos/linux.png\n"
end
return str
end
def self.get_xml_template(template, vcenter_uuid, vi_client, vcenter_instance_name=nil, dc_name=nil, rp_cache={})
begin
template_ref = template['_ref']
template_name = template["name"]
template_ccr = template['runtime.host.parent']
template_ccr_ref = template_ccr._ref
template_ccr_name = template_ccr.name
# Set vcenter instance name
vcenter_instance_name = vi_client.vim.host if !vcenter_instance_name
# Get datacenter info
if !dc_name
dc = get_dc
dc_name = dc.item.name
end
#Get resource pools and generate a list
if !rp_cache[template_ccr_name]
tmp_cluster = VCenterDriver::ClusterComputeResource.new_from_ref(template_ccr_ref, vi_client)
rp_list = tmp_cluster.get_resource_pool_list
rp = ""
if !rp_list.empty?
rp_name_list = []
rp_list.each do |rp_hash|
rp_name_list << rp_hash[:name]
end
rp = "O|list|Which resource pool you want this VM to run in? "
rp << "|#{rp_name_list.join(",")}" #List of RP
rp << "|#{rp_name_list.first}" #Default RP
end
rp_cache[template_ccr_name] = {}
rp_cache[template_ccr_name][:rp] = rp
rp_cache[template_ccr_name][:rp_list] = rp_list
end
rp = rp_cache[template_ccr_name][:rp]
rp_list = rp_cache[template_ccr_name][:rp_list]
# Determine the location path for the template
vcenter_template = VCenterDriver::VirtualMachine.new_without_id(vi_client, template_ref)
item = vcenter_template.item
folders = []
while !item.instance_of? RbVmomi::VIM::Datacenter
item = item.parent
if !item.instance_of? RbVmomi::VIM::Datacenter
folders << item.name if item.name != "vm"
end
raise "Could not find the templates parent location" if item.nil?
end
location = folders.reverse.join("/")
location = "/" if location.empty?
# Generate a crypto hash for the template name and take the first 12 chars
import_name = VCenterDriver::VIHelper.one_name(OpenNebula::TemplatePool, template_name, template_ref+vcenter_uuid)
template_name = template_name.tr("\u007F", "")
template_ccr_name = template_ccr_name.tr("\u007F", "")
# Prepare the Hash that will be used by importers to display
# the object being imported
one_tmp = {}
one_tmp[:name] = import_name
one_tmp[:ref] = template_ref
one_tmp[:dc_name] = dc_name
one_tmp[:template_name] = template_name
one_tmp[:sunstone_template_name]= "#{template_name} [ Cluster: #{template_ccr_name} - Template location: #{location} ]"
one_tmp[:template_location] = location
one_tmp[:vcenter_ccr_ref] = template_ccr_ref
one_tmp[:vcenter_ref] = template_ref
one_tmp[:vcenter_instance_uuid] = vcenter_uuid
one_tmp[:cluster_name] = template_ccr_name
one_tmp[:rp] = rp
one_tmp[:rp_list] = rp_list
one_tmp[:template] = template
one_tmp[:import_disks_and_nics] = true # By default we import disks and nics
# Get the host ID of the OpenNebula host which represents the vCenter Cluster
host_id = nil
one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool,
"TEMPLATE/VCENTER_CCR_REF",
template_ccr_ref,
vcenter_uuid)
host_id = one_host["ID"]
cluster_id = one_host["CLUSTER_ID"]
raise "Could not find the host's ID associated to template being imported" if !host_id
# Get the OpenNebula's template hash
one_tmp[:one] = template_to_one(template, vcenter_uuid, template_ccr_ref, template_ccr_name, import_name, host_id)
return one_tmp
rescue Exception => e
return nil
end
end
# TODO check with uuid
def self.new_from_ref(ref, vi_client)
self.new(RbVmomi::VIM::VirtualMachine.new(vi_client.vim, ref), vi_client)
end
end

View File

@ -56,6 +56,7 @@ require 'datacenter'
require 'host'
require 'datastore'
require 'virtual_machine'
require 'vm_template'
require 'network'
require 'file_helper'
require 'importer'