diff --git a/share/hooks/vcenter/create_vcenter_net.rb b/share/hooks/vcenter/create_vcenter_net.rb index 4e0b42d555..54405acdbd 100755 --- a/share/hooks/vcenter/create_vcenter_net.rb +++ b/share/hooks/vcenter/create_vcenter_net.rb @@ -123,7 +123,7 @@ def create_dpg(one_vnet, dc, cluster, vi_client) if dvs pnics_available = nil if pnics && !pnics.empty? - pnics_available = esx_host.get_available_pnics + pnics_available = esx_host.available_pnics end esx_host.assign_proxy_switch(dvs, sw_name, @@ -164,7 +164,7 @@ def create_pg(one_vnet, esx_host) esx_host.lock # Exclusive lock for ESX host operation pnics_available = nil - pnics_available = esx_host.get_available_pnics if pnics + pnics_available = esx_host.available_pnics if pnics # Get port group if it exists pg = esx_host.pg_exists(pg_name) diff --git a/share/linters/.rubocop.yml b/share/linters/.rubocop.yml index d89d098512..23e19ffbfc 100644 --- a/share/linters/.rubocop.yml +++ b/share/linters/.rubocop.yml @@ -88,6 +88,7 @@ AllCops: - src/im_mad/remotes/one.d/poll - src/im_mad/remotes/az.d/poll - src/im_mad/remotes/lib/vcenter_cluster.rb + - src/im_mad/remotes/lib/vcenter_monitor.rb - src/vnm_mad/remotes/ovswitch/post - src/vnm_mad/remotes/ovswitch/clean - src/vnm_mad/remotes/ovswitch/pre @@ -386,17 +387,8 @@ AllCops: - src/oca/ruby/deprecated/OpenNebula.rb - src/vmm_mad/dummy/one_vmm_dummy.rb - src/vmm_mad/remotes/one/opennebula_driver.rb - - src/vmm_mad/remotes/lib/vcenter_driver/datastore.rb - - src/vmm_mad/remotes/lib/vcenter_driver/vcenter_importer.rb - - src/vmm_mad/remotes/lib/vcenter_driver/vi_helper.rb - - src/vmm_mad/remotes/lib/vcenter_driver/network.rb - src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb - src/vmm_mad/remotes/lib/vcenter_driver/vm_template.rb - - src/vmm_mad/remotes/lib/vcenter_driver/file_helper.rb - - src/vmm_mad/remotes/lib/vcenter_driver/vi_client.rb - - src/vmm_mad/remotes/lib/vcenter_driver/host.rb - - src/vmm_mad/remotes/lib/vcenter_driver/datacenter.rb - - src/vmm_mad/remotes/lib/vcenter_driver/memoize.rb - src/vmm_mad/remotes/lib/lxd/mapper/qcow2.rb - src/vmm_mad/remotes/lib/lxd/mapper/mapper.rb - src/vmm_mad/remotes/lib/lxd/mapper/rbd.rb diff --git a/src/datastore_mad/remotes/vcenter/cp b/src/datastore_mad/remotes/vcenter/cp index 9f0892a49a..0807a5f54f 100755 --- a/src/datastore_mad/remotes/vcenter/cp +++ b/src/datastore_mad/remotes/vcenter/cp @@ -86,7 +86,7 @@ target_path = "#{ds_image_dir}/#{id}" # If image is in a remote http location it has to be downloaded # or if is a zipped file it has to be unzipped in a temp folder -if VCenterDriver::FileHelper.is_remote_or_needs_unpack?(img_path) +if VCenterDriver::FileHelper.remote_or_needs_unpack?(img_path) temp_folder = File.join(VAR_LOCATION, "vcenter/#{target_path}") temp_file = File.join(temp_folder, File.basename(img_path)) # if the original file doesnt have the vmdk extension, add it @@ -140,25 +140,25 @@ files_to_upload.each_with_index do |f, index| path = "#{target_path}/#{filename}" # remove gz or bz2 if part of filename - if path.end_with?('gz') && VCenterDriver::FileHelper.is_vmdk?(f) + if path.end_with?('gz') && VCenterDriver::FileHelper.vmdk?(f) path.gsub!(/gz$/, '') end - if path.end_with?('bz2') && VCenterDriver::FileHelper.is_vmdk?(f) + if path.end_with?('bz2') && VCenterDriver::FileHelper.vmdk?(f) path.gsub!(/bz2$/, '') end end # Change path if vmdk is part of filename but it's not the extension # rubocop:disable Style/DoubleNegation - if !!/[^.]+vmdk$/.match(path) && VCenterDriver::FileHelper.is_vmdk?(f) + if !!/[^.]+vmdk$/.match(path) && VCenterDriver::FileHelper.vmdk?(f) path.gsub!(/vmdk$/, '') extension = '.vmdk' end # rubocop:enable Style/DoubleNegation # Add iso extension if file is an ISO file - if VCenterDriver::FileHelper.is_iso?(f) + if VCenterDriver::FileHelper.iso?(f) path = "#{File.dirname(path)}/#{File.basename(path, '.*')}" extension = '.iso' end diff --git a/src/tm_mad/vcenter/cpds b/src/tm_mad/vcenter/cpds index 6943380844..faee34168a 100755 --- a/src/tm_mad/vcenter/cpds +++ b/src/tm_mad/vcenter/cpds @@ -76,7 +76,7 @@ begin vm = VCenterDriver::VirtualMachine.new(vi_client, vm_ref, vmid) - if vm.has_snapshots? + if vm.snapshots? raise "'disk-saveas' not supported in VMs with system snapshots." end diff --git a/src/tm_mad/vcenter/delete b/src/tm_mad/vcenter/delete index 7b6aaec8e0..8d16762562 100755 --- a/src/tm_mad/vcenter/delete +++ b/src/tm_mad/vcenter/delete @@ -97,10 +97,10 @@ begin vm.destroy_disk(disk) else @error_message = "Error unregistering vm #{vmid} (#{vm_ref})." - vm.poweroff_hard if vm.is_powered_on? + vm.poweroff_hard if vm.powered_on? if vm.instantiated_as_persistent? - vm.remove_all_snapshots if vm.has_snapshots? + vm.remove_all_snapshots if vm.snapshots? vm.convert_to_template else vm.destroy diff --git a/src/tm_mad/vcenter/mvds b/src/tm_mad/vcenter/mvds index d01d756961..31c1def92b 100755 --- a/src/tm_mad/vcenter/mvds +++ b/src/tm_mad/vcenter/mvds @@ -71,7 +71,7 @@ begin vm = VCenterDriver::VirtualMachine.new(vi_client, vm_ref, vmid) vmperst = vm.instantiated_as_persistent? - vm.remove_all_snapshots if vm.has_snapshots? + vm.remove_all_snapshots if vm.snapshots? disk = vm.disk(disk_id) # Don't detach persistent disks if the VM has snapshots diff --git a/src/tm_mad/vcenter/resize b/src/tm_mad/vcenter/resize index 56d30fd320..ddea742d1b 100755 --- a/src/tm_mad/vcenter/resize +++ b/src/tm_mad/vcenter/resize @@ -63,7 +63,7 @@ begin vm = VCenterDriver::VirtualMachine.new(vi_client, vm_ref, vmid) # Cannot resize if VM has snapshots - if vm.has_snapshots? + if vm.snapshots? raise "'disk-resize' operation not supported for VMs with snapshots." end diff --git a/src/vmm_mad/remotes/lib/nsx_driver/distributed_firewall.rb b/src/vmm_mad/remotes/lib/nsx_driver/distributed_firewall.rb index 2416286948..a345fc512e 100644 --- a/src/vmm_mad/remotes/lib/nsx_driver/distributed_firewall.rb +++ b/src/vmm_mad/remotes/lib/nsx_driver/distributed_firewall.rb @@ -126,7 +126,7 @@ module NSXDriver # Virtual Machine devices vm_devices = vm.item.config.hardware.device vm_devices.each do |device| - next unless vm.is_nic?(device) + next unless VCenterDriver::Network.nic?(device) next if device.macAddress != network_mac diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/datacenter.rb b/src/vmm_mad/remotes/lib/vcenter_driver/datacenter.rb index 3fd33bd1e9..4287e1173f 100644 --- a/src/vmm_mad/remotes/lib/vcenter_driver/datacenter.rb +++ b/src/vmm_mad/remotes/lib/vcenter_driver/datacenter.rb @@ -17,940 +17,1175 @@ require 'set' require 'digest' +############################################################################## +# Module VCenterDriver +############################################################################## module VCenterDriver -class DatacenterFolder + ########################################################################## + # Class DatacenterFolder + ########################################################################## + class DatacenterFolder - attr_accessor :items + attr_accessor :items - def initialize(vi_client) - @vi_client = vi_client - @items = {} - end - - ######################################################################## - # Builds a hash with Datacenter-Ref / Datacenter to be used as a cache - # @return [Hash] in the form - # { dc_ref [Symbol] => Datacenter object } - ######################################################################## - def fetch! - VIClient.get_entities(@vi_client.vim.root, 'Datacenter').each do |item| - item_name = item._ref - @items[item_name.to_sym] = Datacenter.new(item) - end - end - - ######################################################################## - # Returns a Datacenter. Uses the cache if available. - # @param ref [Symbol] the vcenter ref - # @return Datacenter - ######################################################################## - def get(ref) - if !@items[ref.to_sym] - rbvmomi_dc = RbVmomi::VIM::Datacenter.new(@vi_client.vim, ref) - @items[ref.to_sym] = Datacenter.new(rbvmomi_dc) + def initialize(vi_client) + @vi_client = vi_client + @items = {} end - @items[ref.to_sym] - end - - def get_vcenter_instance_uuid - @vi_client.vim.serviceContent.about.instanceUuid - end - - def get_vcenter_api_version - @vi_client.vim.serviceContent.about.apiVersion - end - - def get_unimported_hosts(hpool, vcenter_instance_name) - host_objects = {} - - vcenter_uuid = get_vcenter_instance_uuid - vcenter_version = get_vcenter_api_version - - fetch! if @items.empty? # Get datacenters - - # Loop through datacenters - @items.values.each do |dc| - dc_name = dc.item.name - host_objects[dc_name] = [] - - # Get clusters inside a datacenter - host_folder = dc.host_folder - host_folder.fetch_clusters! - host_folder.items.values.each do |ccr| - - # Check if the cluster is a host in OpenNebula's pool - one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool, - "TEMPLATE/VCENTER_CCR_REF", - ccr['_ref'], - vcenter_uuid, - hpool) - next if one_host - - # Get a ClusterComputeResource object - cluster = VCenterDriver::ClusterComputeResource.new_from_ref(ccr['_ref'], @vi_client) - - # Obtain a list of resource pools found in the cluster - rpools = cluster.get_resource_pool_list.select {|rp| !rp[:name].empty?} - - # Determine a host location (folder and subfolders) - item = cluster.item - folders = [] - while !item.instance_of? RbVmomi::VIM::Datacenter - item = item.parent - if !item.instance_of? RbVmomi::VIM::Datacenter - folders << item.name if item.name != "host" - end - raise "Could not find the host's location" if item.nil? - end - location = folders.reverse.join("/") - location = "/" if location.empty? - - # Setting host import name and replace spaces and weird characters - cluster_name = "#{ccr['name']}".tr(" ", "_") - cluster_name = VCenterDriver::VIHelper.one_name(OpenNebula::HostPool, cluster_name, ccr['_ref']+vcenter_uuid, hpool) - - - # Prepare hash for import tool - host_info = {} - host_info[:simple_name] = ccr['name'] - host_info[:cluster_name] = cluster_name - host_info[:cluster_ref] = ccr['_ref'] - host_info[:cluster_location] = location - host_info[:vcenter_uuid] = vcenter_uuid - host_info[:vcenter_version] = vcenter_version - host_info[:rp_list] = rpools - - # Add the hash to current datacenter - host_objects[dc_name] << host_info + ######################################################################## + # Builds a hash with Datacenter-Ref / Datacenter to be used as a cache + # @return [Hash] in the form + # { dc_ref [Symbol] => Datacenter object } + ######################################################################## + def fetch! + VIClient + .get_entities( + @vi_client.vim.root, + 'Datacenter' + ).each do |item| + item_name = item._ref + @items[item_name.to_sym] = Datacenter.new(item) end end - return host_objects - end + ######################################################################## + # Returns a Datacenter. Uses the cache if available. + # @param ref [Symbol] the vcenter ref + # @return Datacenter + ######################################################################## + def get(ref) + if !@items[ref.to_sym] + rbvmomi_dc = RbVmomi::VIM::Datacenter.new(@vi_client.vim, ref) + @items[ref.to_sym] = Datacenter.new(rbvmomi_dc) + end - def get_unimported_datastores(dpool, vcenter_instance_name, hpool) + @items[ref.to_sym] + end - import_id = 0 - ds_objects = {} - vcenter_uuid = get_vcenter_instance_uuid + def vcenter_instance_uuid + @vi_client.vim.serviceContent.about.instanceUuid + end - #Get datacenters - fetch! if @items.empty? + def vcenter_api_version + @vi_client.vim.serviceContent.about.apiVersion + end - @items.values.each do |dc| - clusters_in_ds = {} - dc_name = dc.item.name - dc_ref = dc.item._ref + def get_unimported_hosts(hpool, _vcenter_instance_name) + host_objects = {} - datastore_folder = dc.datastore_folder - datastore_folder.fetch! + vcenter_uuid = vcenter_instance_uuid + vcenter_version = vcenter_api_version - datastore_folder.items.values.each do |ds| + fetch! if @items.empty? # Get datacenters - name, capacity, freeSpace = ds.item.collect("name", "summary.capacity", "summary.freeSpace") + # Loop through datacenters + @items.values.each do |dc| + dc_name = dc.item.name + host_objects[dc_name] = [] - ds_name = "#{name}" - ds_total_mb = ((capacity.to_i / 1024) / 1024) - ds_free_mb = ((freeSpace.to_i / 1024) / 1024) - ds_ref = ds['_ref'] + # Get clusters inside a datacenter + host_folder = dc.host_folder + host_folder.fetch_clusters! + host_folder.items.values.each do |ccr| + # Check if the cluster is a host in OpenNebula's pool + one_host = + VCenterDriver::VIHelper + .find_by_ref( + OpenNebula::HostPool, + 'TEMPLATE/VCENTER_CCR_REF', + ccr['_ref'], + vcenter_uuid, + hpool + ) + next if one_host - ds_objects[ds_ref] = {} - ds_objects[ds_ref][:ref] = ds_ref - ds_objects[ds_ref][:import_id] = import_id - ds_objects[ds_ref][:datacenter] = dc_name - ds_objects[ds_ref][:simple_name] = "#{ds_name}" - ds_objects[ds_ref][:total_mb] = ds_total_mb - ds_objects[ds_ref][:free_mb] = ds_free_mb - ds_objects[ds_ref][:ds] = [] - ds_objects[ds_ref][:cluster] = [] + # Get a ClusterComputeResource object + cluster = + VCenterDriver::ClusterComputeResource + .new_from_ref( + ccr['_ref'], + @vi_client + ) - if ds.instance_of? VCenterDriver::Datastore - hosts = ds["host"] - hosts.each do |host| - cluster_ref = host.key.parent._ref - if !clusters_in_ds.key?(cluster_ref) - clusters_in_ds[cluster_ref] = nil + # Obtain a list of resource pools found in the cluster + rpools = + cluster + .get_resource_pool_list + .reject {|rp| rp[:name].empty? } - # Try to locate cluster ref in host's pool - one_cluster = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool, - "TEMPLATE/VCENTER_CCR_REF", - cluster_ref, - vcenter_uuid, - hpool) - if one_cluster - ds_objects[ds_ref][:cluster] << one_cluster["CLUSTER_ID"].to_i - clusters_in_ds[cluster_ref] = one_cluster["CLUSTER_ID"].to_i - end - else - ds_objects[ds_ref][:cluster] << clusters_in_ds[cluster_ref] if clusters_in_ds[cluster_ref] && !ds_objects[ds_ref][:cluster].include?(clusters_in_ds[cluster_ref]) + # Determine a host location (folder and subfolders) + item = cluster.item + folders = [] + until item.instance_of? RbVmomi::VIM::Datacenter + item = item.parent + if !item.instance_of? RbVmomi::VIM::Datacenter + folders << item.name if item.name != 'host' end + raise "Could not find the host's location" if item.nil? end + location = folders.reverse.join('/') + location = '/' if location.empty? - already_image_ds = VCenterDriver::Storage.exists_one_by_ref_dc_and_type?(ds_ref, dc_ref, vcenter_uuid, "IMAGE_DS", dpool) + # Setting host import name and + # replace spaces and weird characters + cluster_name = (ccr['name']).to_s.tr(' ', '_') + cluster_name = + VCenterDriver::VIHelper + .one_name( + OpenNebula::HostPool, + cluster_name, + ccr['_ref']+vcenter_uuid, + hpool + ) - key = ds_ref+vcenter_uuid - if !already_image_ds - ds_objects[ds_ref][:name] = VCenterDriver::VIHelper.one_name(OpenNebula::DatastorePool, "#{ds_name}(IMG)", key) - object = ds.to_one_template(ds_objects[ds_ref], vcenter_uuid, dc_name, dc_ref, "IMAGE_DS") - ds_objects[ds_ref][:ds] << object if !object.nil? - end + # Prepare hash for import tool + host_info = {} + host_info[:simple_name] = ccr['name'] + host_info[:cluster_name] = cluster_name + host_info[:cluster_ref] = ccr['_ref'] + host_info[:cluster_location] = location + host_info[:vcenter_uuid] = vcenter_uuid + host_info[:vcenter_version] = vcenter_version + host_info[:rp_list] = rpools - already_system_ds = VCenterDriver::Storage.exists_one_by_ref_dc_and_type?(ds_ref, dc_ref, vcenter_uuid, "SYSTEM_DS", dpool) + # Add the hash to current datacenter + host_objects[dc_name] << host_info + end + end - if !already_system_ds - ds_objects[ds_ref][:name] = VCenterDriver::VIHelper.one_name(OpenNebula::DatastorePool, "#{ds_name}(SYS)", key) - object = ds.to_one_template(ds_objects[ds_ref], vcenter_uuid, dc_name, dc_ref, "SYSTEM_DS") - ds_objects[ds_ref][:ds] << object if !object.nil? - end + host_objects + end - ds_objects[ds_ref][:name] = "#{ds_name}" - elsif ds.instance_of? VCenterDriver::StoragePod - ds['children'].each do |sp_ds| - hosts = sp_ds.host + def get_unimported_datastores(dpool, vcenter_instance_name, hpool) + import_id = 0 + ds_objects = {} + vcenter_uuid = vcenter_instance_uuid + + # Get datacenters + fetch! if @items.empty? + + @items.values.each do |dc| + clusters_in_ds = {} + dc_name = dc.item.name + dc_ref = dc.item._ref + + datastore_folder = dc.datastore_folder + datastore_folder.fetch! + + datastore_folder.items.values.each do |ds| + name, capacity, free_space = + ds + .item + .collect( + 'name', + 'summary.capacity', + 'summary.freeSpace' + ) + + ds_name = name.to_s + ds_total_mb = ((capacity.to_i / 1024) / 1024) + ds_free_mb = ((free_space.to_i / 1024) / 1024) + ds_ref = ds['_ref'] + + ds_objects[ds_ref] = {} + ds_objects[ds_ref][:ref] = ds_ref + ds_objects[ds_ref][:import_id] = import_id + ds_objects[ds_ref][:datacenter] = dc_name + ds_objects[ds_ref][:simple_name] = ds_name.to_s + ds_objects[ds_ref][:total_mb] = ds_total_mb + ds_objects[ds_ref][:free_mb] = ds_free_mb + ds_objects[ds_ref][:ds] = [] + ds_objects[ds_ref][:cluster] = [] + + if ds.instance_of? VCenterDriver::Datastore + hosts = ds['host'] hosts.each do |host| cluster_ref = host.key.parent._ref - if !clusters_in_ds.include?(cluster_ref) + if !clusters_in_ds.key?(cluster_ref) clusters_in_ds[cluster_ref] = nil - # Try to locate cluster ref in cluster's pool - one_cluster = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool, - "TEMPLATE/VCENTER_CCR_REF", - cluster_ref, - vcenter_uuid, - hpool) + + # Try to locate cluster ref in host's pool + one_cluster = + VCenterDriver::VIHelper + .find_by_ref( + OpenNebula::HostPool, + 'TEMPLATE/VCENTER_CCR_REF', + cluster_ref, + vcenter_uuid, + hpool + ) if one_cluster - ds_objects[ds_ref][:cluster] << one_cluster["CLUSTER_ID"].to_i - clusters_in_ds[cluster_ref] = one_cluster["CLUSTER_ID"].to_i + ds_objects[ds_ref][:cluster] << + one_cluster['CLUSTER_ID'].to_i + clusters_in_ds[cluster_ref] = + one_cluster['CLUSTER_ID'].to_i end else - ds_objects[ds_ref][:cluster] << clusters_in_ds[cluster_ref] if clusters_in_ds[cluster_ref] && !ds_objects[ds_ref][:cluster].include?(clusters_in_ds[cluster_ref]) + if clusters_in_ds[cluster_ref] && + !ds_objects[ds_ref][:cluster] + .include?( + clusters_in_ds[cluster_ref] + ) + ds_objects[ds_ref][:cluster] << + clusters_in_ds[cluster_ref] + end end end + + already_image_ds = VCenterDriver::Storage + .exists_one_by_ref_dc_and_type?( + ds_ref, + dc_ref, + vcenter_uuid, + 'IMAGE_DS', + dpool + ) + + key = ds_ref+vcenter_uuid + if !already_image_ds + ds_objects[ds_ref][:name] = + VCenterDriver::VIHelper + .one_name( + OpenNebula::DatastorePool, + "#{ds_name}(IMG)", + key + ) + object = + ds + .to_one_template( + ds_objects[ds_ref], + vcenter_uuid, + dc_name, + dc_ref, + 'IMAGE_DS' + ) + ds_objects[ds_ref][:ds] << object unless object.nil? + end + + already_system_ds = + VCenterDriver::Storage + .exists_one_by_ref_dc_and_type?( + ds_ref, + dc_ref, + vcenter_uuid, + 'SYSTEM_DS', + dpool + ) + + if !already_system_ds + ds_objects[ds_ref][:name] = + VCenterDriver::VIHelper + .one_name( + OpenNebula::DatastorePool, + "#{ds_name}(SYS)", + key + ) + object = ds + .to_one_template( + ds_objects[ds_ref], + vcenter_uuid, + dc_name, + dc_ref, + 'SYSTEM_DS' + ) + ds_objects[ds_ref][:ds] << object unless object.nil? + end + + ds_objects[ds_ref][:name] = ds_name.to_s + elsif ds.instance_of? VCenterDriver::StoragePod + ds['children'].each do |sp_ds| + hosts = sp_ds.host + hosts.each do |host| + cluster_ref = host.key.parent._ref + if !clusters_in_ds.include?(cluster_ref) + clusters_in_ds[cluster_ref] = nil + # Try to locate cluster + # ref in cluster's pool + one_cluster = + VCenterDriver::VIHelper + .find_by_ref( + OpenNebula::HostPool, + 'TEMPLATE/VCENTER_CCR_REF', + cluster_ref, + vcenter_uuid, + hpool + ) + if one_cluster + ds_objects[ds_ref][:cluster] << + one_cluster['CLUSTER_ID'].to_i + clusters_in_ds[cluster_ref] = + one_cluster['CLUSTER_ID'].to_i + end + else + if clusters_in_ds[cluster_ref] && + !ds_objects[ds_ref][:cluster] + .include?( + clusters_in_ds[cluster_ref] + ) + ds_objects[ds_ref][:cluster] << + clusters_in_ds[cluster_ref] + end + end + end + end + + already_system_ds = VCenterDriver::Storage + .exists_one_by_ref_dc_and_type?( + ds_ref, + dc_ref, + vcenter_uuid, + 'SYSTEM_DS', + dpool + ) + + if !already_system_ds + ds_objects[ds_ref][:name] = "#{ds_name} \ + [#{vcenter_instance_name} - #{dc_name}] (StorDRS)" + object = ds.to_one_template( + ds_objects[ds_ref], + vcenter_uuid, + dc_name, + dc_ref, + 'SYSTEM_DS' + ) + ds_objects[ds_ref][:ds] << object unless object.nil? + end end - already_system_ds = VCenterDriver::Storage.exists_one_by_ref_dc_and_type?(ds_ref, dc_ref, vcenter_uuid, "SYSTEM_DS", dpool) - - if !already_system_ds - ds_objects[ds_ref][:name] = "#{ds_name} [#{vcenter_instance_name} - #{dc_name}] (StorDRS)" - object = ds.to_one_template(ds_objects[ds_ref], vcenter_uuid, dc_name, dc_ref, "SYSTEM_DS") - ds_objects[ds_ref][:ds] << object if !object.nil? + if ds_objects[ds_ref][:ds].empty? + ds_objects.delete(ds_ref) + else + import_id += 1 end end - - if ds_objects[ds_ref][:ds].empty? - ds_objects.delete(ds_ref) - else - import_id += 1 - end - end + + { vcenter_instance_name => ds_objects } end - { vcenter_instance_name => ds_objects } - end + def get_unimported_templates(vi_client, tpool) + template_objects = {} + import_id = 0 + vcenter_uuid = vcenter_instance_uuid - def get_unimported_templates(vi_client, tpool) - template_objects = {} - import_id = 0 - vcenter_uuid = get_vcenter_instance_uuid + vcenter_instance_name = vi_client.vim.host - vcenter_instance_name = vi_client.vim.host + fetch! if @items.empty? # Get datacenters - fetch! if @items.empty? #Get datacenters + @items.values.each do |dc| + rp_cache = {} + dc_name = dc.item.name - @items.values.each do |dc| - rp_cache = {} - dc_name = dc.item.name + view = vi_client + .vim + .serviceContent + .viewManager + .CreateContainerView({ + :container => dc.item.vmFolder, + :type => ['VirtualMachine'], + :recursive => true + }) - view = vi_client.vim.serviceContent.viewManager.CreateContainerView({ - container: dc.item.vmFolder, - type: ['VirtualMachine'], - recursive: true - }) + pc = vi_client.vim.serviceContent.propertyCollector - pc = vi_client.vim.serviceContent.propertyCollector - - filterSpec = RbVmomi::VIM.PropertyFilterSpec( - :objectSet => [ - :obj => view, - :skip => true, - :selectSet => [ - RbVmomi::VIM.TraversalSpec( - :name => 'traverseEntities', - :type => 'ContainerView', - :path => 'view', - :skip => false + filter_spec = RbVmomi::VIM.PropertyFilterSpec( + :objectSet => [ + { :obj => view, + :skip => true, + :selectSet => [ + RbVmomi::VIM.TraversalSpec( + :name => 'traverseEntities', + :type => 'ContainerView', + :path => 'view', + :skip => false + ) + ] } + ], + :propSet => [ + { + :type => 'VirtualMachine', + :pathSet => ['config.template'] + } + ] ) - ] - ], - :propSet => [ - { :type => 'VirtualMachine', :pathSet => ['config.template'] } - ] - ) - result = pc.RetrieveProperties(:specSet => [filterSpec]) + result = pc.RetrieveProperties(:specSet => [filter_spec]) - vms = {} + vms = {} result.each do |r| - vms[r.obj._ref] = r.to_hash if r.obj.is_a?(RbVmomi::VIM::VirtualMachine) - end - templates = [] - vms.each do |ref,value| - if value["config.template"] - templates << VCenterDriver::Template.new_from_ref(ref, vi_client) + if r.obj.is_a?(RbVmomi::VIM::VirtualMachine) + vms[r.obj._ref] = r.to_hash + end end - end + templates = [] + vms.each do |ref, value| + next unless value['config.template'] - view.DestroyView # Destroy the view + templates << VCenterDriver::Template + .new_from_ref( + ref, + vi_client + ) + end - templates.each do |template| + view.DestroyView # Destroy the view - tref = template['_ref'] - next if template_objects[tref] + templates.each do |template| + tref = template['_ref'] + next if template_objects[tref] - one_template = VCenterDriver::VIHelper.find_by_ref(OpenNebula::TemplatePool, - "TEMPLATE/VCENTER_TEMPLATE_REF", - template['_ref'], - vcenter_uuid, - tpool) + one_template = VCenterDriver::VIHelper + .find_by_ref( + OpenNebula::TemplatePool, + 'TEMPLATE/VCENTER_TEMPLATE_REF', + template['_ref'], + vcenter_uuid, + tpool + ) - next if one_template #If the template has been already imported + # If the template has been already imported + next if one_template - one_template = VCenterDriver::Template.get_xml_template(template, vcenter_uuid, vi_client, vcenter_instance_name, dc_name, rp_cache) + one_template = VCenterDriver::Template + .get_xml_template( + template, + vcenter_uuid, + vi_client, + dc_name, + rp_cache + ) + + next if one_template.nil? - if !!one_template one_template[:import_id] = import_id one_template[:vcenter] = vcenter_instance_name import_id += 1 template_objects[tref] = one_template end end + + { vcenter_instance_name => template_objects } end - { vcenter_instance_name => template_objects } - end + def cluster_networks(one_host) + ccr_ref = one_host['TEMPLATE/VCENTER_CCR_REF'] + cluster = VCenterDriver::ClusterComputeResource + .new_from_ref(ccr_ref, @vi_client) + # cluster = cluster_mob(one_host) + raise "Cluster with ref: #{ccr_ref} not found" if cluster.nil? - def cluster_networks(one_host) - ccr_ref = one_host['TEMPLATE/VCENTER_CCR_REF'] - cluster = VCenterDriver::ClusterComputeResource - .new_from_ref(ccr_ref, @vi_client) - # cluster = cluster_mob(one_host) - raise "Cluster with ref: #{ccr_ref} not found" if cluster.nil? - - networks = cluster.item.network - return networks - end - - # Return ONE cluster ID - def one_cluster_id(one_host) - if !one_host || !one_host['CLUSTER_ID'] - cluster_id = -1 - else - cluster_id = one_host['CLUSTER_ID'] + cluster.item.network end - cluster_id.to_i - end - - # Get vSwitch of Standard PortGroup - # If there is differents vSwitches returns the first. - def vSwitch(vc_pg) - vswitch = [] - vc_hosts = vc_pg.host - vc_hosts.each do |vc_host| - host_pgs = vc_host.configManager.networkSystem.networkInfo.portgroup - host_pgs.each do |pg| - if vc_pg.name == pg.spec.name - vswitch << pg.spec.vswitchName - end + # Return ONE cluster ID + def one_cluster_id(one_host) + if !one_host || !one_host['CLUSTER_ID'] + cluster_id = -1 + else + cluster_id = one_host['CLUSTER_ID'] end + + cluster_id.to_i end - vswitch.uniq! - vswitch << 'Invalid configuration' if vswitch.length > 1 - vswitch.join(" / ") - end - # Determine if a network must be excluded from the list - def exclude_network?(vc_network, one_host, args) - # Exclude some networks if filter = true - if args[:filter] - if ( one_host && one_host['TEMPLATE/NSX_PASSWORD'].nil?) - # Only NSX-V and NSX-T can be excluded - network_type = VCenterDriver::Network.get_network_type(vc_network) - if network_type == VCenterDriver::Network::NETWORK_TYPE_NSXT ||\ - network_type == VCenterDriver::Network::NETWORK_TYPE_NSXV + # Determine if a network must be excluded from the list + def exclude_network?(vc_network, one_host, args) + # Exclude some networks if filter = true + if args[:filter] + if one_host && one_host['TEMPLATE/NSX_PASSWORD'].nil? + network_types = [ + VCenterDriver::Network::NETWORK_TYPE_NSXT, + VCenterDriver::Network::NETWORK_TYPE_NSXV + ] + # Only NSX-V and NSX-T can be excluded + network_type = VCenterDriver::Network + .get_network_type(vc_network) + + return true if network_types.include? network_type + end + # Exclude networks without hosts + if vc_network['host'].empty? return true end - end - # Exclude networks without hosts - if vc_network['host'].empty? - return true - end - # Exclude DVS uplinks - unless vc_network['tag'].empty? - if vc_network['tag'][0][:key] == 'SYSTEM/DVS.UPLINKPG' + + # Exclude DVS uplinks + unless vc_network['tag'].empty? + if vc_network['tag'][0][:key] == 'SYSTEM/DVS.UPLINKPG' + return true + end + end + # Exclude portgroup used for VXLAN communication in NSX + if vc_network['name'].match(/^vxw-vmknicPg-dvs-(.*)/) return true end + + return false end - # Exclude portgroup used for VXLAN communication in NSX - if vc_network['name'].match(/^vxw-vmknicPg-dvs-(.*)/) - return true - end - return false + false end - return false - end - # Proccess each network - def process_network(vc_network, - vcenter_instance_name, - vcenter_uuid, - hpool, - one_host, - args) + # Proccess each network + def process_network(params) + vc_network = params[:vc_network] + vcenter_instance_name = params[:vcenter_instance_name] + vcenter_uuid = params[:vcenter_uuid] + _hpool = params[:_hpool] + one_host = params[:one_host] + args = params[:args] - # Initialize network hash - network = {} - # Add name to network hash - network[vc_network._ref] = {'name' => vc_network.name} - # By default no network is excluded - network[vc_network._ref][:excluded] = false + # Initialize network hash + network = {} + # Add name to network hash + network[vc_network._ref] = { 'name' => vc_network.name } + # By default no network is excluded + network[vc_network._ref][:excluded] = false - # Initialize opts hash used to inject data into one template - opts = {} + # Initialize opts hash used to inject data into one template + opts = {} - # Add network type to network hash - network_type = VCenterDriver::Network.get_network_type(vc_network) - network[vc_network._ref][:network_type] = network_type + # Add network type to network hash + network_type = VCenterDriver::Network.get_network_type(vc_network) + network[vc_network._ref][:network_type] = network_type - # Determine if the network must be excluded - network[vc_network._ref][:excluded] = exclude_network?(vc_network, - one_host, - args) - return nil if network[vc_network._ref][:excluded] == true + # Determine if the network must be excluded + network[vc_network._ref][:excluded] = exclude_network?(vc_network, + one_host, + args) + return if network[vc_network._ref][:excluded] == true - case network[vc_network._ref][:network_type] - # Distributed PortGroups - when VCenterDriver::Network::NETWORK_TYPE_DPG - network[vc_network._ref][:sw_name] = \ - vc_network.config.distributedVirtualSwitch.name - # For DistributedVirtualPortgroups there is networks and uplinks - network[vc_network._ref][:uplink] = \ - vc_network.config.uplink - #network[vc_network._ref][:uplink] = false - # NSX-V PortGroups - when VCenterDriver::Network::NETWORK_TYPE_NSXV - network[vc_network._ref][:sw_name] = \ - vc_network.config.distributedVirtualSwitch.name - # For NSX-V ( is the same as DistributedVirtualPortgroups ) - # there is networks and uplinks - network[vc_network._ref][:uplink] = \ - vc_network.config.uplink + case network[vc_network._ref][:network_type] + # Distributed PortGroups + when VCenterDriver::Network::NETWORK_TYPE_DPG + network[vc_network._ref][:sw_name] = \ + vc_network.config.distributedVirtualSwitch.name + # For DistributedVirtualPortgroups there is networks and uplinks + network[vc_network._ref][:uplink] = \ + vc_network.config.uplink + # network[vc_network._ref][:uplink] = false + # NSX-V PortGroups + when VCenterDriver::Network::NETWORK_TYPE_NSXV + network[vc_network._ref][:sw_name] = \ + vc_network.config.distributedVirtualSwitch.name + # For NSX-V ( is the same as DistributedVirtualPortgroups ) + # there is networks and uplinks + network[vc_network._ref][:uplink] = \ + vc_network.config.uplink network[vc_network._ref][:uplink] = false - # Standard PortGroups - when VCenterDriver::Network::NETWORK_TYPE_PG - # There is no uplinks for standard portgroups, so all Standard - # PortGroups are networks and no uplinks - network[vc_network._ref][:uplink] = false - network[vc_network._ref][:sw_name] = vSwitch(vc_network) - # NSX-T PortGroups - when VCenterDriver::Network::NETWORK_TYPE_NSXT - network[vc_network._ref][:sw_name] = \ - vc_network.summary.opaqueNetworkType - # There is no uplinks for NSX-T networks, so all NSX-T networks - # are networks and no uplinks - network[vc_network._ref][:uplink] = false - else - raise 'Unknown network type: ' \ - "#{network[vc_network._ref][:network_type]}" + # Standard PortGroups + when VCenterDriver::Network::NETWORK_TYPE_PG + # There is no uplinks for standard portgroups, so all Standard + # PortGroups are networks and no uplinks + network[vc_network._ref][:uplink] = false + network[vc_network._ref][:sw_name] = + VCenterDriver::Network + .virtual_switch( + vc_network + ) + # NSX-T PortGroups + when VCenterDriver::Network::NETWORK_TYPE_NSXT + network[vc_network._ref][:sw_name] = \ + vc_network.summary.opaqueNetworkType + # There is no uplinks for NSX-T networks, so all NSX-T networks + # are networks and no uplinks + network[vc_network._ref][:uplink] = false + else + raise 'Unknown network type: ' \ + "#{network[vc_network._ref][:network_type]}" + end + + # Multicluster nets support + network[vc_network._ref][:clusters] = {} + network[vc_network._ref][:clusters][:refs] = [] + network[vc_network._ref][:clusters][:one_ids] = [] + network[vc_network._ref][:clusters][:names] = [] + + # Get hosts related to this network and add them if is not + # excluded + vc_hosts = vc_network.host + vc_hosts.each do |vc_host| + # Get vCenter Cluster + vc_cluster = vc_host.parent + # Get one host from each vCenter cluster + one_host = VCenterDriver::VIHelper + .find_by_ref(OpenNebula::HostPool, + 'TEMPLATE/VCENTER_CCR_REF', + vc_cluster._ref, + vcenter_uuid) + # Check if network is excluded from each host + next if exclude_network?(vc_network, one_host, args) + + # Insert vCenter cluster ref + network[vc_network._ref][:clusters][:refs] << vc_cluster._ref + # Insert OpenNebula cluster id + cluster_id = one_cluster_id(one_host) + network[vc_network._ref][:clusters][:one_ids] << cluster_id + # Insert vCenter cluster name + network[vc_network._ref][:clusters][:names] << vc_cluster.name + opts[:dc_name] = vc_cluster.name + end + + # Remove duplicate entries + network[vc_network._ref][:clusters][:refs].uniq! + network[vc_network._ref][:clusters][:one_ids].uniq! + network[vc_network._ref][:clusters][:names].uniq! + + # Mark network as processed + network[vc_network._ref][:processed] = true + + # General net_info related to datacenter + opts[:vcenter_uuid] = vcenter_uuid + opts[:vcenter_instance_name] = vcenter_instance_name + opts[:network_name] = network[vc_network._ref]['name'] + opts[:network_ref] = network.keys.first + opts[:network_type] = network[vc_network._ref][:network_type] + opts[:sw_name] = network[vc_network._ref][:sw_name] + + network[vc_network._ref] = \ + network[vc_network._ref].merge(VCenterDriver::Network + .to_one_template(opts)) + network end - # Multicluster nets support - network[vc_network._ref][:clusters] = {} - network[vc_network._ref][:clusters][:refs] = [] - network[vc_network._ref][:clusters][:one_ids] = [] - network[vc_network._ref][:clusters][:names] = [] + def get_unimported_networks(npool, vcenter_instance_name, hpool, args) + vcenter_uuid = vcenter_instance_uuid + networks = {} - # Get hosts related to this network and add them if is not - # excluded - vc_hosts = vc_network.host - vc_hosts.each do |vc_host| - # Get vCenter Cluster - vc_cluster = vc_host.parent - # Get one host from each vCenter cluster - one_host = VCenterDriver::VIHelper - .find_by_ref(OpenNebula::HostPool, - "TEMPLATE/VCENTER_CCR_REF", - vc_cluster._ref, - vcenter_uuid) - # Check if network is excluded from each host - next if exclude_network?(vc_network,one_host,args) - # Insert vCenter cluster ref - network[vc_network._ref][:clusters][:refs] << vc_cluster._ref - # Insert OpenNebula cluster id - cluster_id = one_cluster_id(one_host) - network[vc_network._ref][:clusters][:one_ids] << cluster_id - # Insert vCenter cluster name - network[vc_network._ref][:clusters][:names] << vc_cluster.name - opts[:dc_name] = vc_cluster.name + # Selected host in OpenNebula + one_client = OpenNebula::Client.new + one_host = OpenNebula::Host.new_with_id(args[:host], one_client) + rc = one_host.info + raise rc.message if OpenNebula.is_error? rc + + # Get all networks in vcenter cluster (one_host) + vc_cluster_networks = cluster_networks(one_host) + + # Iterate over vcenter networks + vc_cluster_networks.each do |vc_cluster_network| + exist = VCenterDriver::VIHelper + .find_by_ref(OpenNebula::VirtualNetworkPool, + 'TEMPLATE/VCENTER_NET_REF', + vc_cluster_network._ref, + vcenter_uuid, + npool) + + next if exist + + params = {} + + params[:vc_network]= vc_cluster_network + params[:vcenter_instance_name]= vcenter_instance_name + params[:vcenter_uuid]= vcenter_uuid + params[:_hpool]= hpool + params[:one_host]= one_host + params[:args] = args + + network = process_network(params) + + networks.merge!(network) unless network.nil? + end + # Added import id + imid = -1 + networks.map {|_k, v| v[:import_id] = imid += 1 } + { vcenter_instance_name => networks } end - # Remove duplicate entries - network[vc_network._ref][:clusters][:refs].uniq! - network[vc_network._ref][:clusters][:one_ids].uniq! - network[vc_network._ref][:clusters][:names].uniq! - - # Mark network as processed - network[vc_network._ref][:processed] = true - - # General net_info related to datacenter - opts[:vcenter_uuid] = vcenter_uuid - opts[:vcenter_instance_name] = vcenter_instance_name - opts[:network_name] = network[vc_network._ref]['name'] - opts[:network_ref] = network.keys.first - opts[:network_type] = network[vc_network._ref][:network_type] - opts[:sw_name] = network[vc_network._ref][:sw_name] - - network[vc_network._ref] = \ - network[vc_network._ref].merge(VCenterDriver::Network - .to_one_template(opts)) - network end + # class DatatacenterFolder - def get_unimported_networks(npool, vcenter_instance_name, hpool, args) - vcenter_uuid = get_vcenter_instance_uuid - networks = {} + ########################################################################## + # Class Datacenter + ########################################################################## + class Datacenter - # Selected host in OpenNebula - one_client = OpenNebula::Client.new() - one_host = OpenNebula::Host.new_with_id(args[:host], one_client) - rc = one_host.info - raise rc.message if OpenNebula.is_error? rc + attr_accessor :item - # Get all networks in vcenter cluster (one_host) - vc_cluster_networks = cluster_networks(one_host) + DPG_CREATE_TIMEOUT = 240 - # Iterate over vcenter networks - vc_cluster_networks.each do |vc_cluster_network| - exist = VCenterDriver::VIHelper - .find_by_ref(OpenNebula::VirtualNetworkPool, - 'TEMPLATE/VCENTER_NET_REF', - vc_cluster_network._ref, - vcenter_uuid, - npool) + def initialize(item, vi_client = nil) + check_item(item, RbVmomi::VIM::Datacenter) - next if exist - - network = process_network(vc_cluster_network, - vcenter_instance_name, - vcenter_uuid, - hpool, - one_host, - args) - - networks.merge!(network) unless network.nil? + @vi_client = vi_client + @item = item + @net_rollback = [] + @locking = true end - # Added import id - imid = -1 - networks.map {|_k, v| v[:import_id] = imid += 1 } - { vcenter_instance_name => networks } - end - -end # class DatatacenterFolder - -class Datacenter - attr_accessor :item - - DPG_CREATE_TIMEOUT = 240 - - def initialize(item, vi_client=nil) - - check_item(item, RbVmomi::VIM::Datacenter) - - @vi_client = vi_client - @item = item - @net_rollback = [] - @locking = true - end - - def datastore_folder - DatastoreFolder.new(@item.datastoreFolder) - end - - def host_folder - HostFolder.new(@item.hostFolder) - end - - def vm_folder - VirtualMachineFolder.new(@item.vmFolder) - end - - def network_folder - NetworkFolder.new(@item.networkFolder) - end - - # Locking function. Similar to flock - def lock - hostlockname = @item['name'].downcase.tr(" ", "_") - if @locking - @locking_file = File.open("/tmp/vcenter-dc-#{hostlockname}-lock","w") - @locking_file.flock(File::LOCK_EX) + def datastore_folder + DatastoreFolder.new(@item.datastoreFolder) end - end - # Unlock driver execution mutex - def unlock - if @locking + def host_folder + HostFolder.new(@item.hostFolder) + end + + def vm_folder + VirtualMachineFolder.new(@item.vmFolder) + end + + def network_folder + NetworkFolder.new(@item.networkFolder) + end + + # Locking function. Similar to flock + def lock + hostlockname = @item['name'].downcase.tr(' ', '_') + return unless @locking + + @locking_file = + File + .open("/tmp/vcenter-dc-#{hostlockname}-lock", 'w') + @locking_file.flock(File::LOCK_EX) + end + + # Unlock driver execution mutex + def unlock + return unless @locking + @locking_file.close end - end - ######################################################################## - # Check if distributed virtual switch exists in host - ######################################################################## - def dvs_exists(switch_name, net_folder) - - return net_folder.items.values.select{ |dvs| - dvs.instance_of?(VCenterDriver::DistributedVirtualSwitch) && - dvs['name'] == switch_name - }.first rescue nil - end - - ######################################################################## - # Is the distributed switch for the distributed pg different? - ######################################################################## - def pg_changes_sw?(dpg, switch_name) - return dpg['config.distributedVirtualSwitch.name'] != switch_name - end - - ######################################################################## - # Create a distributed vcenter switch in a datacenter - ######################################################################## - def create_dvs(switch_name, pnics, mtu=1500) - # Prepare spec for DVS creation - spec = RbVmomi::VIM::DVSCreateSpec.new - spec.configSpec = RbVmomi::VIM::VMwareDVSConfigSpec.new - spec.configSpec.name = switch_name - - # Specify number of uplinks port for dpg - if pnics - pnics = pnics.split(",") - if !pnics.empty? - spec.configSpec.uplinkPortPolicy = RbVmomi::VIM::DVSNameArrayUplinkPortPolicy.new - spec.configSpec.uplinkPortPolicy.uplinkPortName = [] - (0..pnics.size-1).each { |index| - spec.configSpec.uplinkPortPolicy.uplinkPortName[index]="dvUplink#{index+1}" - } - end + ######################################################################## + # Check if distributed virtual switch exists in host + ######################################################################## + def dvs_exists(switch_name, net_folder) + net_folder.items.values.select do |dvs| + dvs.instance_of?(VCenterDriver::DistributedVirtualSwitch) && + dvs['name'] == switch_name + end.first rescue nil end - #Set maximum MTU - spec.configSpec.maxMtu = mtu - - # The DVS must be created in the networkFolder of the datacenter - begin - dvs_creation_task = @item.networkFolder.CreateDVS_Task(:spec => spec) - dvs_creation_task.wait_for_completion - - # If task finished successfuly we rename the uplink portgroup - dvs = nil - if dvs_creation_task.info.state == 'success' - dvs = dvs_creation_task.info.result - dvs.config.uplinkPortgroup[0].Rename_Task(:newName => "#{switch_name}-uplink-pg").wait_for_completion - else - raise "The Distributed vSwitch #{switch_name} could not be created. " - end - rescue Exception => e - raise e + ######################################################################## + # Is the distributed switch for the distributed pg different? + ######################################################################## + def pg_changes_sw?(dpg, switch_name) + dpg['config.distributedVirtualSwitch.name'] != switch_name end - @net_rollback << {:action => :delete_dvs, :dvs => dvs, :name => switch_name} + ######################################################################## + # Create a distributed vcenter switch in a datacenter + ######################################################################## + def create_dvs(switch_name, pnics, mtu = 1500) + # Prepare spec for DVS creation + spec = RbVmomi::VIM::DVSCreateSpec.new + spec.configSpec = RbVmomi::VIM::VMwareDVSConfigSpec.new + spec.configSpec.name = switch_name - return VCenterDriver::DistributedVirtualSwitch.new(dvs, @vi_client) - end + # Specify number of uplinks port for dpg + if pnics + pnics = pnics.split(',') + if !pnics.empty? + spec.configSpec.uplinkPortPolicy = + RbVmomi::VIM::DVSNameArrayUplinkPortPolicy.new + spec.configSpec.uplinkPortPolicy.uplinkPortName = [] + (0..pnics.size-1).each do |index| + spec + .configSpec + .uplinkPortPolicy + .uplinkPortName[index]="dvUplink#{index+1}" + end + end + end - ######################################################################## - # Update a distributed vcenter switch - ######################################################################## - def update_dvs(dvs, pnics, mtu) - # Prepare spec for DVS creation - spec = RbVmomi::VIM::VMwareDVSConfigSpec.new - changed = false + # Set maximum MTU + spec.configSpec.maxMtu = mtu - orig_spec = RbVmomi::VIM::VMwareDVSConfigSpec.new - orig_spec.maxMtu = dvs['config.maxMtu'] - orig_spec.uplinkPortPolicy = RbVmomi::VIM::DVSNameArrayUplinkPortPolicy.new - orig_spec.uplinkPortPolicy.uplinkPortName = [] - (0..dvs['config.uplinkPortgroup'].length-1).each { |index| - orig_spec.uplinkPortPolicy.uplinkPortName[index]="dvUplink#{index+1}" - } + # The DVS must be created in the networkFolder of the datacenter + begin + dvs_creation_task = @item + .networkFolder + .CreateDVS_Task( + :spec => spec + ) + dvs_creation_task.wait_for_completion - # Add more uplinks to default uplink port group according to number of pnics - if pnics - pnics = pnics.split(",") - if !pnics.empty? && dvs['config.uplinkPortgroup'].length != pnics.size - spec.uplinkPortPolicy = RbVmomi::VIM::DVSNameArrayUplinkPortPolicy.new - spec.uplinkPortPolicy.uplinkPortName = [] - (dvs['config.uplinkPortgroup'].length..num_pnics-1).each { |index| - spec.uplinkPortPolicy.uplinkPortName[index]="dvUplink#{index+1}" - } + # If task finished successfuly we rename the uplink portgroup + dvs = nil + if dvs_creation_task.info.state == 'success' + dvs = dvs_creation_task.info.result + dvs + .config + .uplinkPortgroup[0] + .Rename_Task( + :newName => "#{switch_name}-uplink-pg" + ).wait_for_completion + else + raise "The Distributed vSwitch #{switch_name} \ + could not be created. " + end + rescue StandardError => e + raise e + end + + @net_rollback << { + :action => :delete_dvs, + :dvs => dvs, + :name => switch_name + } + + VCenterDriver::DistributedVirtualSwitch.new(dvs, @vi_client) + end + + ######################################################################## + # Update a distributed vcenter switch + ######################################################################## + def update_dvs(dvs, pnics, mtu) + # Prepare spec for DVS creation + spec = RbVmomi::VIM::VMwareDVSConfigSpec.new + changed = false + + orig_spec = RbVmomi::VIM::VMwareDVSConfigSpec.new + orig_spec.maxMtu = dvs['config.maxMtu'] + orig_spec.uplinkPortPolicy = + RbVmomi::VIM::DVSNameArrayUplinkPortPolicy.new + orig_spec.uplinkPortPolicy.uplinkPortName = [] + (0..dvs['config.uplinkPortgroup'].length-1).each do |index| + orig_spec + .uplinkPortPolicy + .uplinkPortName[index]="dvUplink#{index+1}" + end + + # Add more uplinks to default uplink + # port group according to number of pnics + if pnics + pnics = pnics.split(',') + if !pnics.empty? && dvs['config.uplinkPortgroup'] + .length != pnics.size + spec.uplinkPortPolicy = + RbVmomi::VIM::DVSNameArrayUplinkPortPolicy.new + spec.uplinkPortPolicy.uplinkPortName = [] + (dvs['config.uplinkPortgroup'] + .length..num_pnics-1) + .each do |index| + spec + .uplinkPortPolicy + .uplinkPortName[index] = + "dvUplink#{index+1}" + end + changed = true + end + end + + # Set maximum MTU + if mtu != dvs['config.maxMtu'] + spec.maxMtu = mtu changed = true end - end - #Set maximum MTU - if mtu != dvs['config.maxMtu'] - spec.maxMtu = mtu - changed = true - end + # The DVS must be created in the networkFolder of the datacenter + return unless changed - # The DVS must be created in the networkFolder of the datacenter - if changed spec.configVersion = dvs['config.configVersion'] begin - dvs.item.ReconfigureDvs_Task(:spec => spec).wait_for_completion - rescue Exception => e - raise "The Distributed switch #{dvs['name']} could not be updated. "\ + dvs + .item + .ReconfigureDvs_Task( + :spec => spec + ).wait_for_completion + rescue StandardError => e + raise "The Distributed switch #{dvs['name']} could \ + not be updated. "\ + "Reason: #{e.message}" + end + + @net_rollback << { + :action => :update_dvs, + :dvs => dvs.item, + :name => dvs['name'], + :spec => orig_spec + } + end + + ######################################################################## + # Remove a distributed vcenter switch in a datacenter + ######################################################################## + def remove_dvs(dvs) + begin + dvs.item.Destroy_Task.wait_for_completion + rescue StandardError + # Ignore destroy task exception + end + end + + ######################################################################## + # Check if distributed port group exists in datacenter + ######################################################################## + def dpg_exists(pg_name, net_folder) + net_folder.items.values.select do |dpg| + dpg.instance_of?(VCenterDriver::DistributedPortGroup) && + dpg['name'] == pg_name + end.first rescue nil + end + + ######################################################################## + # Check if Opaque Network exists in datacenter + ######################################################################## + def nsx_network(nsx_id, pg_type) + timeout = 180 + case pg_type + when VCenterDriver::Network::NETWORK_TYPE_NSXT + while timeout > 0 + net_folder = network_folder + net_folder.fetch! + net_folder.items.values.each do |net| + if net.instance_of?(VCenterDriver::OpaqueNetwork) && + net.item.summary.opaqueNetworkId == nsx_id + return net.item._ref + end + end + sleep(1) + timeout -= 1 + end + # Not used right now, but maybe neccesary in the future. + when VCenterDriver::Network::NETWORK_TYPE_NSXV + while timeout > 0 + net_folder = network_folder + net_folder.fetch! + net_folder.items.values.each do |net| + if net.instance_of?( + VCenterDriver::DistributedPortGroup + ) && + net.item.key == nsx_id + return net.item._ref + end + end + sleep(1) + timeout -= 1 + end + else + raise "Unknown network Port Group type: #{pg_type}" + end + end + + ######################################################################## + # Create a distributed vcenter port group + ######################################################################## + def create_dpg(dvs, pg_name, vlan_id, num_ports) + spec = RbVmomi::VIM::DVPortgroupConfigSpec.new + + # OpenNebula use DVS static port binding with autoexpand + if num_ports + spec.autoExpand = true + spec.numPorts = num_ports + end + + # Distributed port group name + spec.name = pg_name + + # Set VLAN information + spec.defaultPortConfig = + RbVmomi::VIM::VMwareDVSPortSetting.new + spec.defaultPortConfig.vlan = + RbVmomi::VIM::VmwareDistributedVirtualSwitchVlanIdSpec.new + spec.defaultPortConfig.vlan.vlanId = + vlan_id + spec.defaultPortConfig.vlan.inherited = + false + + # earlyBinding. A free DistributedVirtualPort will be selected and + # assigned to a VirtualMachine when + # the virtual machine is reconfigured + # to connect to the portgroup. + spec.type = 'earlyBinding' + + begin + dvs + .item + .AddDVPortgroup_Task( + :spec => [spec] + ).wait_for_completion + rescue StandardError => e + raise "The Distributed port group #{pg_name} \ + could not be created. "\ "Reason: #{e.message}" end - @net_rollback << {:action => :update_dvs, :dvs => dvs.item, :name => dvs['name'], :spec => orig_spec} - end - end - - ######################################################################## - # Remove a distributed vcenter switch in a datacenter - ######################################################################## - def remove_dvs(dvs) - begin - dvs.item.Destroy_Task.wait_for_completion - rescue - #Ignore destroy task exception - end - end - - ######################################################################## - # Check if distributed port group exists in datacenter - ######################################################################## - def dpg_exists(pg_name, net_folder) - return net_folder.items.values.select{ |dpg| - dpg.instance_of?(VCenterDriver::DistributedPortGroup) && - dpg['name'] == pg_name - }.first rescue nil - end - - ######################################################################## - # Check if Opaque Network exists in datacenter - ######################################################################## - def nsx_network(nsx_id, pgType) - timeout = 180 - if pgType == VCenterDriver::Network::NETWORK_TYPE_NSXT - while timeout > 0 - netFolder = self.network_folder - netFolder.fetch! - netFolder.items.values.each{ |net| - if net.instance_of?(VCenterDriver::OpaqueNetwork) && - net.item.summary.opaqueNetworkId == nsx_id - return net.item._ref - end - } - sleep(1) - timeout -= 1 - end - # Not used right now, but maybe neccesary in the future. - elsif pgType == VCenterDriver::Network::NETWORK_TYPE_NSXV - while timeout > 0 - netFolder = self.network_folder - netFolder.fetch! - netFolder.items.values.each{ |net| - if net.instance_of?(VCenterDriver::DistributedPortGroup) && - net.item.key == nsx_id - return net.item._ref - end - } - sleep(1) - timeout -= 1 - end - else - raise "Unknown network Port Group type: #{pgType}" - end - end - - ######################################################################## - # Create a distributed vcenter port group - ######################################################################## - def create_dpg(dvs, pg_name, vlan_id, num_ports) - spec = RbVmomi::VIM::DVPortgroupConfigSpec.new - - # OpenNebula use DVS static port binding with autoexpand - if num_ports - spec.autoExpand = true - spec.numPorts = num_ports - end - - # Distributed port group name - spec.name = pg_name - - # Set VLAN information - spec.defaultPortConfig = RbVmomi::VIM::VMwareDVSPortSetting.new - spec.defaultPortConfig.vlan = RbVmomi::VIM::VmwareDistributedVirtualSwitchVlanIdSpec.new - spec.defaultPortConfig.vlan.vlanId = vlan_id - spec.defaultPortConfig.vlan.inherited = false - - # earlyBinding. A free DistributedVirtualPort will be selected and - # assigned to a VirtualMachine when the virtual machine is reconfigured - # to connect to the portgroup. - spec.type = "earlyBinding" - - begin - dvs.item.AddDVPortgroup_Task(spec: [spec]).wait_for_completion - rescue Exception => e - raise "The Distributed port group #{pg_name} could not be created. "\ - "Reason: #{e.message}" - end - - # wait until the network is ready and we have a reference - portgroups = dvs['portgroup'].select{ |dpg| - dpg.instance_of?(RbVmomi::VIM::DistributedVirtualPortgroup) && - dpg['name'] == pg_name - } - - (0..DPG_CREATE_TIMEOUT).each do - break if !portgroups.empty? - portgroups = dvs['portgroup'].select{ |dpg| + # wait until the network is ready and we have a reference + portgroups = dvs['portgroup'].select do |dpg| dpg.instance_of?(RbVmomi::VIM::DistributedVirtualPortgroup) && - dpg['name'] == pg_name + dpg['name'] == pg_name + end + + (0..DPG_CREATE_TIMEOUT).each do + break unless portgroups.empty? + + portgroups = dvs['portgroup'].select do |dpg| + dpg + .instance_of?( + RbVmomi::VIM::DistributedVirtualPortgroup + ) && dpg['name'] == pg_name + end + sleep 1 + end + + if portgroups.empty? + raise 'Cannot get VCENTER_NET_REF \ + for new distributed port group' + end + + @net_rollback << { + :action => :delete_dpg, + :dpg => portgroups.first, + :name => pg_name } - sleep 1 + + portgroups.first._ref end - raise "Cannot get VCENTER_NET_REF for new distributed port group" if portgroups.empty? + ######################################################################## + # Update a distributed vcenter port group + ######################################################################## + def update_dpg(dpg, vlan_id, num_ports) + spec = RbVmomi::VIM::DVPortgroupConfigSpec.new - @net_rollback << {:action => :delete_dpg, :dpg => portgroups.first, :name => pg_name} + changed = false - return portgroups.first._ref - end + orig_spec = + RbVmomi::VIM::DVPortgroupConfigSpec.new + orig_spec.numPorts = + dpg['config.numPorts'] + orig_spec.defaultPortConfig = + RbVmomi::VIM::VMwareDVSPortSetting.new + orig_spec.defaultPortConfig.vlan = + RbVmomi::VIM::VmwareDistributedVirtualSwitchVlanIdSpec.new + orig_spec.defaultPortConfig.vlan.vlanId = + dpg['config.defaultPortConfig.vlan.vlanId'] + orig_spec.defaultPortConfig.vlan.inherited = + false - ######################################################################## - # Update a distributed vcenter port group - ######################################################################## - def update_dpg(dpg, vlan_id, num_ports) - spec = RbVmomi::VIM::DVPortgroupConfigSpec.new + if num_ports && num_ports != orig_spec.numPorts + spec.numPorts = num_ports + changed = true + end - changed = false + # earlyBinding. A free DistributedVirtualPort + # will be selected and + # assigned to a VirtualMachine when + # the virtual machine is reconfigured + # to connect to the portgroup. + spec.type = 'earlyBinding' - orig_spec = RbVmomi::VIM::DVPortgroupConfigSpec.new - orig_spec.numPorts = dpg['config.numPorts'] - orig_spec.defaultPortConfig = RbVmomi::VIM::VMwareDVSPortSetting.new - orig_spec.defaultPortConfig.vlan = RbVmomi::VIM::VmwareDistributedVirtualSwitchVlanIdSpec.new - orig_spec.defaultPortConfig.vlan.vlanId = dpg['config.defaultPortConfig.vlan.vlanId'] - orig_spec.defaultPortConfig.vlan.inherited = false + if vlan_id != orig_spec.defaultPortConfig.vlan.vlanId + spec.defaultPortConfig = + RbVmomi::VIM::VMwareDVSPortSetting.new + spec.defaultPortConfig.vlan = + RbVmomi::VIM::VmwareDistributedVirtualSwitchVlanIdSpec.new + spec.defaultPortConfig.vlan.vlanId = vlan_id + spec.defaultPortConfig.vlan.inherited = false + changed = true + end - if num_ports && num_ports != orig_spec.numPorts - spec.numPorts = num_ports - changed = true - end - - # earlyBinding. A free DistributedVirtualPort will be selected and - # assigned to a VirtualMachine when the virtual machine is reconfigured - # to connect to the portgroup. - spec.type = "earlyBinding" - - if vlan_id != orig_spec.defaultPortConfig.vlan.vlanId - spec.defaultPortConfig = RbVmomi::VIM::VMwareDVSPortSetting.new - spec.defaultPortConfig.vlan = RbVmomi::VIM::VmwareDistributedVirtualSwitchVlanIdSpec.new - spec.defaultPortConfig.vlan.vlanId = vlan_id - spec.defaultPortConfig.vlan.inherited = false - changed = true - end - - if changed + return unless changed spec.configVersion = dpg['config.configVersion'] begin - dpg.item.ReconfigureDVPortgroup_Task(:spec => spec).wait_for_completion - rescue Exception => e - raise "The Distributed port group #{dpg['name']} could not be created. "\ - "Reason: #{e.message}" + dpg + .item + .ReconfigureDVPortgroup_Task( + :spec => spec + ).wait_for_completion + rescue StandardError => e + raise "The Distributed port group #{dpg['name']} \ + could not be created. "\ + "Reason: #{e.message}" end - @net_rollback << {:action => :update_dpg, :dpg => dpg.item, :name => dpg['name'], :spec => orig_spec} + @net_rollback << { + :action => :update_dpg, + :dpg => dpg.item, + :name => dpg['name'], + :spec => orig_spec + } end - end - - ######################################################################## - # Remove distributed port group from datacenter - ######################################################################## - def remove_dpg(dpg) - begin - dpg.item.Destroy_Task.wait_for_completion - rescue RbVmomi::VIM::ResourceInUse => e - STDERR.puts "The distributed portgroup #{dpg["name"]} is in use so it cannot be deleted" - return nil - rescue Exception => e - raise "The Distributed portgroup #{dpg["name"]} could not be deleted. Reason: #{e.message} " + ######################################################################## + # Remove distributed port group from datacenter + ######################################################################## + def remove_dpg(dpg) + begin + dpg.item.Destroy_Task.wait_for_completion + rescue RbVmomi::VIM::ResourceInUse + STDERR.puts "The distributed portgroup \ + #{dpg['name']} is in use so it cannot be deleted" + nil + rescue StandardError => e + raise "The Distributed portgroup #{dpg['name']} \ + could not be deleted. Reason: #{e.message} " + end end - end - ######################################################################## - # Perform vcenter network rollback operations - ######################################################################## - def network_rollback - @net_rollback.reverse_each do |nr| - - case nr[:action] + ######################################################################## + # Perform vcenter network rollback operations + ######################################################################## + def network_rollback + @net_rollback.reverse_each do |nr| + case nr[:action] when :update_dpg begin - nr[:dpg].ReconfigureDVPortgroup_Task(:spec => nr[:spec]).wait_for_completion - rescue Exception => e - raise "A rollback operation for distributed port group #{nr[:name]} could not be performed. Reason: #{e.message}" - end + nr[:dpg] + .ReconfigureDVPortgroup_Task( + :spec => nr[:spec] + ).wait_for_completion + rescue StandardError => e + raise "A rollback operation for distributed \ + port group #{nr[:name]} could not \ + be performed. Reason: #{e.message}" + end when :update_dvs begin - nr[:dvs].ReconfigureDvs_Task(:spec => nr[:spec]).wait_for_completion - rescue Exception => e - raise "A rollback operation for distributed standard switch #{nr[:name]} could not be performed. Reason: #{e.message}" - end + nr[:dvs] + .ReconfigureDvs_Task( + :spec => nr[:spec] + ).wait_for_completion + rescue StandardError => e + raise "A rollback operation for distributed\ + standard switch #{nr[:name]} could \ + not be performed. Reason: #{e.message}" + end when :delete_dvs begin - nr[:dvs].Destroy_Task.wait_for_completion + nr[:dvs].Destroy_Task.wait_for_completion rescue RbVmomi::VIM::ResourceInUse - return #Ignore if switch in use + next # Ignore if switch in use rescue RbVmomi::VIM::NotFound - return #Ignore if switch not found - rescue Exception => e - raise "A rollback operation for standard switch #{nr[:name]} could not be performed. Reason: #{e.message}" - end + next # Ignore if switch not found + rescue StandardError => e + raise "A rollback operation \ + for standard switch #{nr[:name]} \ + could not be performed. Reason: #{e.message}" + end when :delete_dpg begin nr[:dpg].Destroy_Task.wait_for_completion rescue RbVmomi::VIM::ResourceInUse - return #Ignore if pg in use + next # Ignore if pg in use rescue RbVmomi::VIM::NotFound - return #Ignore if pg not found - rescue Exception => e - raise "A rollback operation for standard port group #{nr[:name]} could not be performed. Reason: #{e.message}" + next # Ignore if pg not found + rescue StandardError => e + raise "A rollback operation for \ + standard port group #{nr[:name]} could \ + not be performed. Reason: #{e.message}" end + end end end + + ######################################################################## + # PowerOn VMs + ######################################################################## + def power_on_vm(vm) + @item.PowerOnMultiVM_Task({ :vm => [vm] }).wait_for_completion + end + + def self.new_from_ref(ref, vi_client) + new(RbVmomi::VIM::Datacenter.new(vi_client.vim, ref), vi_client) + end + end - ######################################################################## - # PowerOn VMs - ######################################################################## - def power_on_vm(vm) - @item.PowerOnMultiVM_Task({:vm => [vm]}).wait_for_completion - end - - def self.new_from_ref(ref, vi_client) - self.new(RbVmomi::VIM::Datacenter.new(vi_client.vim, ref), vi_client) - end end - -end # module VCenterDriver +# module VCenterDriver diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/datastore.rb b/src/vmm_mad/remotes/lib/vcenter_driver/datastore.rb index f53a4e1407..ca48d4eb2a 100644 --- a/src/vmm_mad/remotes/lib/vcenter_driver/datastore.rb +++ b/src/vmm_mad/remotes/lib/vcenter_driver/datastore.rb @@ -14,658 +14,787 @@ # limitations under the License. # #--------------------------------------------------------------------------- # +############################################################################## +# Module VCenterDriver +############################################################################## module VCenterDriver -require 'digest' -require 'uri' -class DatastoreFolder - attr_accessor :item, :items - def initialize(item) - @item = item - @items = {} - end + require 'digest' + require 'uri' - ######################################################################## - # Builds a hash with Datastore-Ref / Datastore to be used as a cache - # @return [Hash] in the form - # { ds_ref [Symbol] => Datastore object } - ######################################################################## - def fetch! - VIClient.get_entities(@item, 'Datastore').each do |item| - item_name = item._ref - @items[item_name.to_sym] = Datastore.new(item) + ########################################################################## + # Class DatastoreFolder + ########################################################################## + class DatastoreFolder + + attr_accessor :item, :items + + def initialize(item) + @item = item + @items = {} end - VIClient.get_entities(@item, 'StoragePod').each do |sp| - @items[sp._ref.to_sym] = StoragePod.new(sp) - VIClient.get_entities(sp, 'Datastore').each do |item| + ###################################################################### + # Builds a hash with Datastore-Ref / Datastore to be used as a cache + # @return [Hash] in the form + # { ds_ref [Symbol] => Datastore object } + ###################################################################### + def fetch! + VIClient.get_entities(@item, 'Datastore').each do |item| item_name = item._ref @items[item_name.to_sym] = Datastore.new(item) end - end - @items - end - ######################################################################## - # Returns a Datastore or StoragePod. Uses the cache if available. - # @param ref [Symbol] the vcenter ref - # @return Datastore - ######################################################################## - def get(ref) - if !@items[ref.to_sym] - if ref.start_with?("group-") - rbvmomi_spod = RbVmomi::VIM::StoragePod.new(@item._connection, ref) rescue nil - @items[ref.to_sym] = StoragePod.new(rbvmomi_spod) + VIClient.get_entities(@item, 'StoragePod').each do |sp| + @items[sp._ref.to_sym] = StoragePod.new(sp) + VIClient.get_entities(sp, 'Datastore').each do |item| + item_name = item._ref + @items[item_name.to_sym] = Datastore.new(item) + end + end + @items + end + + ###################################################################### + # Returns a Datastore or StoragePod. Uses the cache if available. + # @param ref [Symbol] the vcenter ref + # @return Datastore + ###################################################################### + def get(ref) + if !@items[ref.to_sym] + if ref.start_with?('group-') + rbvmomi_spod = RbVmomi::VIM::StoragePod + .new( + @item._connection, + ref + ) rescue nil + @items[ref.to_sym] = StoragePod.new(rbvmomi_spod) + else + rbvmomi_ds = RbVmomi::VIM::Datastore + .new( + @item._connection, + ref + ) rescue nil + @items[ref.to_sym] = Datastore.new(rbvmomi_ds) + end + end + @items[ref.to_sym] + end + + end + # class DatastoreFolder + + ########################################################################## + # Class Storage + ########################################################################## + class Storage + + attr_accessor :item + + include Memoize + + CURLBIN = 'curl' + + def self.new_from_ref(ref, vi_client) + if ref.start_with?('group-') + VCenterDriver::StoragePod.new_from_ref(ref, vi_client) else - rbvmomi_ds = RbVmomi::VIM::Datastore.new(@item._connection, ref) rescue nil - @items[ref.to_sym] = Datastore.new(rbvmomi_ds) - end - end - @items[ref.to_sym] - end -end # class DatastoreFolder - -class Storage - attr_accessor :item - - include Memoize - - CURLBIN = "curl" - - def self.new_from_ref(ref, vi_client) - if ref.start_with?('group-') - return VCenterDriver::StoragePod.new_from_ref(ref, vi_client) - else - return VCenterDriver::Datastore.new_from_ref(ref, vi_client) - end - end - - def self.get_image_import_template(disk, ipool, type, ds_id, opts = {}, images=[]) - - VCenterDriver::VIHelper.check_opts(opts, [:persistent]) - - ds_name = disk[:datastore].name - image_path = disk[:path_wo_ds] - image_type = disk[:type] - image_prefix = disk[:prefix] - - image_name = nil - - one_image = {} - one_image[:template] = "" - - # Get image name - file_name = File.basename(image_path).gsub(/\.vmdk$/,"") - - #Check if the image has already been imported - image = VIHelper.find_image_by("SOURCE", OpenNebula::ImagePool, image_path, ds_id, ipool) - - if image.nil? - key = "#{file_name}#{ds_name}#{image_path}" - byte = 0 - image_name = VCenterDriver::VIHelper.one_name(OpenNebula::ImagePool, file_name, key, ipool, byte) - while images.include?(image_name) - byte += 2 - image_name = VCenterDriver::VIHelper.one_name(OpenNebula::ImagePool, file_name, key, ipool, byte) - end - - #Set template - one_image[:template] << "NAME=\"#{image_name}\"\n" - one_image[:template] << "PATH=\"vcenter://#{image_path}\"\n" - one_image[:template] << "TYPE=\"#{image_type}\"\n" - one_image[:template] << "PERSISTENT=\"#{opts[:persistent]}\"\n" - one_image[:template] << "VCENTER_IMPORTED=\"YES\"\n" unless CONFIG[:delete_images] - one_image[:template] << "DEV_PREFIX=\"#{image_prefix}\"\n" - else - # Return the image XML if it already exists - one_image[:one] = image - end - - return one_image, image_name - end - - def self.get_one_image_ds_by_ref_and_dc(ref, dc_ref, vcenter_uuid, pool = nil) - if pool.nil? - pool = VCenterDriver::VIHelper.one_pool(OpenNebula::DatastorePool, false) - if pool.respond_to?(:message) - raise "Could not get OpenNebula DatastorePool: #{pool.message}" + VCenterDriver::Datastore.new_from_ref(ref, vi_client) end end - element = pool.select do |e| - e["TEMPLATE/TYPE"] == "IMAGE_DS" && - e["TEMPLATE/VCENTER_DS_REF"] == ref && - e["TEMPLATE/VCENTER_DC_REF"] == dc_ref && - e["TEMPLATE/VCENTER_INSTANCE_ID"] == vcenter_uuid - end.first rescue nil - - return element - end - - # Checks if a RbVmomi::VIM::VirtualDevice is a disk or an iso file - def self.is_disk_or_iso?(device) - is_disk = !(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil? - is_iso = device.backing.is_a? RbVmomi::VIM::VirtualCdromIsoBackingInfo - is_disk || is_iso - end - - - def monitor - summary = self['summary'] - - total_mb = (summary.capacity.to_i / 1024) / 1024 - free_mb = (summary.freeSpace.to_i / 1024) / 1024 - used_mb = total_mb - free_mb - - "USED_MB=#{used_mb}\nFREE_MB=#{free_mb} \nTOTAL_MB=#{total_mb}" - end - - def self.exists_one_by_ref_dc_and_type?(ref, dc_ref, vcenter_uuid, type, pool = nil) - if pool.nil? - pool = VCenterDriver::VIHelper.one_pool(OpenNebula::DatastorePool, false) - if pool.respond_to?(:message) - raise "Could not get OpenNebula DatastorePool: #{pool.message}" - end - end - elements = pool.select do |e| - e["TEMPLATE/TYPE"] == type && - e["TEMPLATE/VCENTER_DS_REF"] == ref && - e["TEMPLATE/VCENTER_DC_REF"] == dc_ref && - e["TEMPLATE/VCENTER_INSTANCE_ID"] == vcenter_uuid - end - - return elements.size == 1 - end - - def to_one(ds_hash, vcenter_uuid, dc_name, dc_ref) - one = "" - one << "DRIVER=\"vcenter\"\n" - one << "NAME=\"#{ds_hash[:name]}\"\n" - one << "TM_MAD=vcenter\n" - one << "VCENTER_INSTANCE_ID=\"#{vcenter_uuid}\"\n" - one << "VCENTER_DC_REF=\"#{dc_ref}\"\n" - one << "VCENTER_DC_NAME=\"#{dc_name}\"\n" - one << "VCENTER_DS_NAME=\"#{ds_hash[:simple_name]}\"\n" - one << "VCENTER_DS_REF=\"#{self['_ref']}\"\n" - return one - end - - def to_one_template(ds_hash, vcenter_uuid, dc_name, dc_ref, type) - one_tmp = { - :one => to_one(ds_hash, vcenter_uuid, dc_name, dc_ref) - } - - if type == "SYSTEM_DS" - one_tmp[:one] << "TYPE=SYSTEM_DS\n" - else - one_tmp[:one] << "DS_MAD=vcenter\n" - one_tmp[:one] << "TYPE=IMAGE_DS\n" - end - - return one_tmp - end - - def create_virtual_disk(img_name, size, adapter_type, disk_type) - leading_dirs = img_name.split('/')[0..-2] - if !leading_dirs.empty? - create_directory(leading_dirs.join('/')) - end - - ds_name = self['name'] - - vmdk_spec = RbVmomi::VIM::FileBackedVirtualDiskSpec( - :adapterType => adapter_type, - :capacityKb => size.to_i*1024, - :diskType => disk_type - ) - - get_vdm.CreateVirtualDisk_Task( - :datacenter => get_dc.item, - :name => "[#{ds_name}] #{img_name}.vmdk", - :spec => vmdk_spec - ).wait_for_completion - - "#{img_name}.vmdk" - end - - def create_directory(directory) - ds_name = self['name'] - - return if self.class == VCenterDriver::StoragePod - - directory_name = "[#{ds_name}] #{directory}" - - create_directory_params = { - :name => directory_name, - :datacenter => get_dc.item, - :createParentDirectories => true - } - - begin - get_fm.MakeDirectory(create_directory_params) - rescue RbVmomi::VIM::FileAlreadyExists => e - # Do nothing if directory already exists - end - end - - def get_fm - self['_connection.serviceContent.fileManager'] - end - - def get_vdm - self['_connection.serviceContent.virtualDiskManager'] - end - - def get_dc - item = @item - - while !item.instance_of? RbVmomi::VIM::Datacenter - item = item.parent - if item.nil? - raise "Could not find the parent Datacenter" - end - end - - Datacenter.new(item) - end - - - -end # class Storage - -class StoragePod < Storage - - def initialize(item, vi_client=nil) - - check_item(item, RbVmomi::VIM::StoragePod) - - @item = item - end - - # This is never cached - def self.new_from_ref(ref, vi_client) - self.new(RbVmomi::VIM::StoragePod.new(vi_client.vim, ref), vi_client) - end -end # class StoragePod - -class Datastore < Storage - - attr_accessor :one_item - - def initialize(item, vi_client=nil) - check_item(item, RbVmomi::VIM::Datastore) - @vi_client = vi_client - @item = item - @one_item = {} - end - - def delete_virtual_disk(img_name) - ds_name = self['name'] - - begin - get_vdm.DeleteVirtualDisk_Task( - :name => "[#{ds_name}] #{img_name}", - :datacenter => get_dc.item - ).wait_for_completion - rescue Exception => e - # Ignore if file not found - if !e.message.start_with?('ManagedObjectNotFound') && - !e.message.start_with?('FileNotFound') - raise e - end - end - end - - def delete_file(img_name) - - ds_name = self['name'] - - begin - get_fm.DeleteDatastoreFile_Task( - :name => "[#{ds_name}] #{img_name}", - :datacenter => get_dc.item - ).wait_for_completion - rescue StandardError => e - # Ignore if file not found - if !e.message.start_with?('ManagedObjectNotFound') && - !e.message.start_with?('FileNotFound') - raise e - end - end - end - - # Copy a VirtualDisk - def copy_virtual_disk(src_path, target_ds, target_path, new_size=nil) - source_ds_name = self['name'] - target_ds_name = target_ds['name'] - - leading_dirs = target_path.split('/')[0..-2] - if !leading_dirs.empty? - if source_ds_name == target_ds_name - create_directory(leading_dirs.join('/')) - else - target_ds.create_directory(leading_dirs.join('/')) - end - end - - copy_params = { - :sourceName => "[#{source_ds_name}] #{src_path}", - :sourceDatacenter => get_dc.item - } - - if File.extname(src_path) == '.vmdk' - copy_params[:destName] = "[#{target_ds_name}] #{target_path}" - get_vdm.CopyVirtualDisk_Task(copy_params).wait_for_completion - - if new_size - resize_spec = { - :name => "[#{target_ds_name}] #{target_path}", - :datacenter => target_ds.get_dc.item, - :newCapacityKb => new_size, - :eagerZero => false - } - - get_vdm.ExtendVirtualDisk_Task(resize_spec).wait_for_completion - end - else - copy_params[:destinationName] = "[#{target_ds_name}] #{target_path}" - get_fm.CopyDatastoreFile_Task(copy_params).wait_for_completion - end - - target_path - end - - def move_virtual_disk(disk, dest_path, dest_dsid, vi_client = nil) - vi_client = @vi_client unless vi_client - - target_ds = VCenterDriver::VIHelper.one_item(OpenNebula::Datastore, dest_dsid, false) - target_ds_ref = target_ds['TEMPLATE/VCENTER_DS_REF'] - target_ds_vc = VCenterDriver::Datastore.new_from_ref(target_ds_ref, vi_client) - dest_name = target_ds_vc['name'] - - target_ds_vc.create_directory(File.dirname(dest_path)) - - dpath_ds = "[#{dest_name}] #{dest_path}" - orig_path = "[#{self['name']}] #{disk.path}" - - move_params = { - sourceName: orig_path, - sourceDatacenter: get_dc.item, - destName: dpath_ds, - force: true - } - - get_vdm.MoveVirtualDisk_Task(move_params).wait_for_completion - end - - def rm_directory(directory) - ds_name = self['name'] - - rm_directory_params = { - :name => "[#{ds_name}] #{directory}", - :datacenter => get_dc.item - } - - get_fm.DeleteDatastoreFile_Task(rm_directory_params).wait_for_completion - end - - def dir_empty?(path) - ds_name = self['name'] - - spec = RbVmomi::VIM::HostDatastoreBrowserSearchSpec.new - - search_params = { - 'datastorePath' => "[#{ds_name}] #{path}", - 'searchSpec' => spec - } - - begin - search_task = self['browser'].SearchDatastoreSubFolders_Task(search_params) - search_task.wait_for_completion - return !!search_task.info.result && - search_task.info.result.length == 1 && - search_task.info.result.first.file.length == 0 - rescue - return false - end - end - - - def upload_file(source_path, target_path) - @item.upload(target_path, source_path) - end - - def download_file(source_path, target_path) - @item.download(source_path, target_path) - end - - # Get file size for image handling - def stat(img_str) - ds_name = self['name'] - img_path = File.dirname img_str - img_name = File.basename img_str - - # Create Search Spec - search_params = get_search_params(ds_name, img_path, img_name) - - # Perform search task and return results - begin - search_task = self['browser']. - SearchDatastoreSubFolders_Task(search_params) - - search_task.wait_for_completion - - size = 0 - - # Try to get vmdk capacity as seen by VM - size = search_task.info.result[0].file[0].capacityKb / 1024 rescue nil - - # Try to get file size - size = search_task.info.result[0].file[0].fileSize / 1024 / 1024 rescue nil if !size - - raise "Could not get file size or capacity" if size.nil? - - size - rescue - raise "Could not find file." - end - end - - def get_search_params(ds_name, img_path=nil, img_name=nil) - spec = RbVmomi::VIM::HostDatastoreBrowserSearchSpec.new - - vmdisk_query = RbVmomi::VIM::VmDiskFileQuery.new - vmdisk_query.details = RbVmomi::VIM::VmDiskFileQueryFlags(:diskType => true, - :capacityKb => true, - :hardwareVersion => true, - :controllerType => true) - - spec.query = [vmdisk_query, - RbVmomi::VIM::IsoImageFileQuery.new] - spec.details = RbVmomi::VIM::FileQueryFlags(:fileOwner => true, - :fileSize => true, - :fileType => true, - :modification => true) - - - spec.matchPattern = img_name.nil? ? [] : [img_name] - - datastore_path = "[#{ds_name}]" - datastore_path << " #{img_path}" if !img_path.nil? - - search_params = {'datastorePath' => datastore_path, - 'searchSpec' => spec} - - return search_params - end - - def get_dc_path - dc = get_dc - p = dc.item.parent - path = [dc.item.name] - while p.instance_of? RbVmomi::VIM::Folder - path.unshift(p.name) - p = p.parent - end - path.delete_at(0) # The first folder is the root "Datacenters" - path.join('/') - end - - def generate_file_url(path) - protocol = self["_connection.http.use_ssl?"] ? 'https://' : 'http://' - hostname = self["_connection.http.address"] - port = self["_connection.http.port"] - dcpath = get_dc_path - - # This creates the vcenter file URL for uploading or downloading files - # e.g: - url = "#{protocol}#{hostname}:#{port}/folder/#{path}?dcPath=#{dcpath}&dsName=#{self['name']}" - return URI.escape(url) - end - - def download_to_stdout(remote_path) - url = generate_file_url(remote_path) - pid = spawn(CURLBIN, - "-k", '--noproxy', '*', '-f', - "-b", self["_connection.cookie"], - url) - - Process.waitpid(pid, 0) - fail "download failed" unless $?.success? - end - - def is_descriptor?(remote_path) - url = generate_file_url(remote_path) - - rout, wout = IO.pipe - pid = spawn(CURLBIN, - "-I", "-k", '--noproxy', '*', '-f', - "-b", self["_connection.cookie"], - url, - :out => wout, - :err => '/dev/null') - - Process.waitpid(pid, 0) - fail "read image header failed" unless $?.success? - - wout.close - size = rout.readlines.select{|l| - l.start_with?("Content-Length") - }[0].sub("Content-Length: ","") - rout.close - size.chomp.to_i < 4096 # If <4k, then is a descriptor - end - - def get_text_file remote_path - url = generate_file_url(remote_path) - - rout, wout = IO.pipe - pid = spawn CURLBIN, "-k", '--noproxy', '*', '-f', - "-b", self["_connection.cookie"], - url, - :out => wout, - :err => '/dev/null' - - Process.waitpid(pid, 0) - fail "get text file failed" unless $?.success? - - wout.close - output = rout.readlines - rout.close - return output - end - - def get_images - img_templates = [] - images = {} - imid = -1 - ds_id = nil - ds_name = self['name'] - - # We need OpenNebula Images and Datastores pools - ipool = VCenterDriver::VIHelper.one_pool(OpenNebula::ImagePool, false) - if ipool.respond_to?(:message) - raise "Could not get OpenNebula ImagePool: #{pool.message}" - end - - dpool = VCenterDriver::VIHelper.one_pool(OpenNebula::DatastorePool, false) - if dpool.respond_to?(:message) - raise "Could not get OpenNebula DatastorePool: #{pool.message}" - end - - ds_id = @one_item["ID"] - - begin - # Create Search Spec - search_params = get_search_params(ds_name) - - # Perform search task and return results - search_task = self['browser'].SearchDatastoreSubFolders_Task(search_params) - search_task.wait_for_completion - - # Loop through search results - search_task.info.result.each do |result| - - # Remove [datastore] from file path - folderpath = "" - size = result.folderPath.size - if result.folderPath[-1] != "]" - result.folderPath[size] = '/' if result.folderPath[-1] != '/' - folderpath = result.folderPath.sub(/^\[#{ds_name}\] /, "") + def self.get_image_import_template(params) + disk = params[:disk] + ipool = params[:ipool] + _type = params[:_type] + ds_id = params[:ds_id] + opts = params[:opts] + images = params[:images] + + VCenterDriver::VIHelper.check_opts(opts, [:persistent]) + + ds_name = disk[:datastore].name + image_path = disk[:path_wo_ds] + image_type = disk[:type] + image_prefix = disk[:prefix] + + image_name = nil + + one_image = {} + one_image[:template] = '' + + # Get image name + file_name = File.basename(image_path).gsub(/\.vmdk$/, '') + + # Check if the image has already been imported + image = VIHelper + .find_image_by( + 'SOURCE', + OpenNebula::ImagePool, + image_path, + ds_id, + ipool + ) + + if image.nil? + key = "#{file_name}#{ds_name}#{image_path}" + byte = 0 + image_name = VCenterDriver::VIHelper + .one_name( + OpenNebula::ImagePool, + file_name, + key, + ipool, + byte + ) + while images.include?(image_name) + byte += 2 + image_name = VCenterDriver::VIHelper + .one_name( + OpenNebula::ImagePool, + file_name, + key, + ipool, + byte + ) end - # Loop through images in result.file - result.file.each do |image| + # Set template + one_image[:template] << "NAME=\"#{image_name}\"\n" + one_image[:template] << "PATH=\"vcenter://#{image_path}\"\n" + one_image[:template] << "TYPE=\"#{image_type}\"\n" + one_image[:template] << "PERSISTENT=\"#{opts[:persistent]}\"\n" + unless CONFIG[:delete_images] + one_image[:template] << "VCENTER_IMPORTED=\"YES\"\n" + end + one_image[:template] << "DEV_PREFIX=\"#{image_prefix}\"\n" + else + # Return the image XML if it already exists + one_image[:one] = image + end - image_path = "" + [one_image, image_name] + end - # Skip not relevant files - next if !["FloppyImageFileInfo", - "IsoImageFileInfo", - "VmDiskFileInfo"].include? image.class.to_s + def self.get_one_image_ds_by_ref_and_dc( + ref, + dc_ref, + vcenter_uuid, + pool = nil + ) + if pool.nil? + pool = VCenterDriver::VIHelper + .one_pool( + OpenNebula::DatastorePool, + false + ) + if pool.respond_to?(:message) + raise "Could not get \ + OpenNebula DatastorePool: #{pool.message}" + end + end - # Get image path and name - image_path << folderpath << image.path - image_name = File.basename(image.path).reverse.sub("kdmv.","").reverse + pool.select do |e| + e['TEMPLATE/TYPE'] == 'IMAGE_DS' && + e['TEMPLATE/VCENTER_DS_REF'] == ref && + e['TEMPLATE/VCENTER_DC_REF'] == dc_ref && + e['TEMPLATE/VCENTER_INSTANCE_ID'] == vcenter_uuid + end.first rescue nil + end - # Get image's type - image_type = image.class.to_s == "VmDiskFileInfo" ? "OS" : "CDROM" + # Checks if a RbVmomi::VIM::VirtualDevice is a disk or an iso file + def self.disk_or_iso?(device) + is_disk = !device.class.ancestors.index( + RbVmomi::VIM::VirtualDisk + ).nil? + is_iso = device + .backing.is_a? RbVmomi::VIM::VirtualCdromIsoBackingInfo + is_disk || is_iso + end - # Get image's size - image_size = image.capacityKb / 1024 rescue nil - image_size = image.fileSize / 1024 / 1024 rescue nil if !image_size + def monitor + summary = self['summary'] - # Assign image prefix if known or assign default prefix - controller = image.controllerType rescue nil - if controller - disk_prefix = controller == "VirtualIDEController" ? "hd" : "sd" - else - # Get default value for disks that are not attached to any controller - disk_prefix = VCenterDriver::VIHelper.get_default("IMAGE/TEMPLATE/DEV_PREFIX") + total_mb = (summary.capacity.to_i / 1024) / 1024 + free_mb = (summary.freeSpace.to_i / 1024) / 1024 + used_mb = total_mb - free_mb + + "USED_MB=#{used_mb}\nFREE_MB=#{free_mb} \nTOTAL_MB=#{total_mb}" + end + + def self.exists_one_by_ref_dc_and_type?( + ref, + dc_ref, + vcenter_uuid, + type, + pool = nil + ) + if pool.nil? + pool = VCenterDriver::VIHelper.one_pool( + OpenNebula::DatastorePool, + false + ) + if pool.respond_to?(:message) + raise "Could not get OpenNebula \ + DatastorePool: #{pool.message}" + end + end + elements = pool.select do |e| + e['TEMPLATE/TYPE'] == type && + e['TEMPLATE/VCENTER_DS_REF'] == ref && + e['TEMPLATE/VCENTER_DC_REF'] == dc_ref && + e['TEMPLATE/VCENTER_INSTANCE_ID'] == vcenter_uuid + end + + elements.size == 1 + end + + def to_one(ds_hash, vcenter_uuid, dc_name, dc_ref) + one = '' + one << "DRIVER=\"vcenter\"\n" + one << "NAME=\"#{ds_hash[:name]}\"\n" + one << "TM_MAD=vcenter\n" + one << "VCENTER_INSTANCE_ID=\"#{vcenter_uuid}\"\n" + one << "VCENTER_DC_REF=\"#{dc_ref}\"\n" + one << "VCENTER_DC_NAME=\"#{dc_name}\"\n" + one << "VCENTER_DS_NAME=\"#{ds_hash[:simple_name]}\"\n" + one << "VCENTER_DS_REF=\"#{self['_ref']}\"\n" + one + end + + def to_one_template(ds_hash, vcenter_uuid, dc_name, dc_ref, type) + one_tmp = { + :one => to_one(ds_hash, vcenter_uuid, dc_name, dc_ref) + } + + if type == 'SYSTEM_DS' + one_tmp[:one] << "TYPE=SYSTEM_DS\n" + else + one_tmp[:one] << "DS_MAD=vcenter\n" + one_tmp[:one] << "TYPE=IMAGE_DS\n" + end + + one_tmp + end + + def create_virtual_disk(img_name, size, adapter_type, disk_type) + leading_dirs = img_name.split('/')[0..-2] + if !leading_dirs.empty? + create_directory(leading_dirs.join('/')) + end + + ds_name = self['name'] + + vmdk_spec = RbVmomi::VIM::FileBackedVirtualDiskSpec( + :adapterType => adapter_type, + :capacityKb => size.to_i*1024, + :diskType => disk_type + ) + + obtain_vdm.CreateVirtualDisk_Task( + :datacenter => obtain_dc.item, + :name => "[#{ds_name}] #{img_name}.vmdk", + :spec => vmdk_spec + ).wait_for_completion + + "#{img_name}.vmdk" + end + + def create_directory(directory) + ds_name = self['name'] + + return if self.class == VCenterDriver::StoragePod + + directory_name = "[#{ds_name}] #{directory}" + + create_directory_params = { + :name => directory_name, + :datacenter => obtain_dc.item, + :createParentDirectories => true + } + + obtain_fm.MakeDirectory(create_directory_params) rescue nil + end + + def obtain_fm + self['_connection.serviceContent.fileManager'] + end + + def obtain_vdm + self['_connection.serviceContent.virtualDiskManager'] + end + + def obtain_dc + item = @item + + until item.instance_of? RbVmomi::VIM::Datacenter + item = item.parent + if item.nil? + raise 'Could not find the parent Datacenter' + end + end + + Datacenter.new(item) + end + + end + # class Storage + + ######################################################################## + # Class StoragePod + ######################################################################## + class StoragePod < Storage + + def initialize(item, _vi_client = nil) + check_item(item, RbVmomi::VIM::StoragePod) + + @item = item + end + + # This is never cached + def self.new_from_ref(ref, vi_client) + new(RbVmomi::VIM::StoragePod.new(vi_client.vim, ref), vi_client) + end + + end + # class StoragePod + + ########################################################################## + # Class Datastore + ########################################################################## + class Datastore < Storage + + attr_accessor :one_item + + def initialize(item, vi_client = nil) + check_item(item, RbVmomi::VIM::Datastore) + @vi_client = vi_client + @item = item + @one_item = {} + end + + def delete_virtual_disk(img_name) + ds_name = self['name'] + + begin + obtain_vdm.DeleteVirtualDisk_Task( + :name => "[#{ds_name}] #{img_name}", + :datacenter => obtain_dc.item + ).wait_for_completion + rescue StandardError => e + # Ignore if file not found + if !e.message.start_with?('ManagedObjectNotFound') && + !e.message.start_with?('FileNotFound') + raise e + end + end + end + + def delete_file(img_name) + ds_name = self['name'] + + begin + obtain_fm.DeleteDatastoreFile_Task( + :name => "[#{ds_name}] #{img_name}", + :datacenter => obtain_dc.item + ).wait_for_completion + rescue StandardError => e + # Ignore if file not found + if !e.message.start_with?('ManagedObjectNotFound') && + !e.message.start_with?('FileNotFound') + raise e + end + end + end + + # Copy a VirtualDisk + def copy_virtual_disk(src_path, target_ds, target_path, new_size = nil) + source_ds_name = self['name'] + target_ds_name = target_ds['name'] + + leading_dirs = target_path.split('/')[0..-2] + if !leading_dirs.empty? + if source_ds_name == target_ds_name + create_directory(leading_dirs.join('/')) + else + target_ds.create_directory(leading_dirs.join('/')) + end + end + + copy_params = { + :sourceName => "[#{source_ds_name}] #{src_path}", + :sourceDatacenter => obtain_dc.item + } + + if File.extname(src_path) == '.vmdk' + copy_params[:destName] = "[#{target_ds_name}] #{target_path}" + obtain_vdm.CopyVirtualDisk_Task(copy_params).wait_for_completion + + if new_size + resize_spec = { + :name => "[#{target_ds_name}] #{target_path}", + :datacenter => target_ds.obtain_dc.item, + :newCapacityKb => new_size, + :eagerZero => false + } + + obtain_vdm.ExtendVirtualDisk_Task( + resize_spec + ).wait_for_completion + end + else + copy_params[:destinationName] = + "[#{target_ds_name}] #{target_path}" + obtain_fm.CopyDatastoreFile_Task( + copy_params + ).wait_for_completion + end + + target_path + end + + def move_virtual_disk(disk, dest_path, dest_dsid, vi_client = nil) + vi_client ||= @vi_client + + target_ds = VCenterDriver::VIHelper.one_item( + OpenNebula::Datastore, + dest_dsid, + false + ) + target_ds_ref = target_ds['TEMPLATE/VCENTER_DS_REF'] + target_ds_vc = VCenterDriver::Datastore + .new_from_ref( + target_ds_ref, + vi_client + ) + dest_name = target_ds_vc['name'] + + target_ds_vc.create_directory(File.dirname(dest_path)) + + dpath_ds = "[#{dest_name}] #{dest_path}" + orig_path = "[#{self['name']}] #{disk.path}" + + move_params = { + :sourceName => orig_path, + :sourceDatacenter => obtain_dc.item, + :destName => dpath_ds, + :force => true + } + + obtain_vdm.MoveVirtualDisk_Task(move_params).wait_for_completion + end + + def rm_directory(directory) + ds_name = self['name'] + + rm_directory_params = { + :name => "[#{ds_name}] #{directory}", + :datacenter => obtain_dc.item + } + + obtain_fm.DeleteDatastoreFile_Task( + rm_directory_params + ).wait_for_completion + end + + def dir_empty?(path) + ds_name = self['name'] + + spec = RbVmomi::VIM::HostDatastoreBrowserSearchSpec.new + + search_params = { + 'datastorePath' => "[#{ds_name}] #{path}", + 'searchSpec' => spec + } + + begin + search_task = self['browser'] + .SearchDatastoreSubFolders_Task(search_params) + search_task.wait_for_completion + !search_task.info.result.nil? && + search_task.info.result.length == 1 && + search_task.info.result.first.file.empty? + rescue StandardError + false + end + end + + def upload_file(source_path, target_path) + @item.upload(target_path, source_path) + end + + def download_file(source_path, target_path) + @item.download(source_path, target_path) + end + + # Get file size for image handling + def stat(img_str) + ds_name = self['name'] + img_path = File.dirname img_str + img_name = File.basename img_str + + # Create Search Spec + search_params = get_search_params(ds_name, img_path, img_name) + + # Perform search task and return results + begin + search_task = self['browser'] + .SearchDatastoreSubFolders_Task(search_params) + + search_task.wait_for_completion + + # Try to get vmdk capacity as seen by VM + size = search_task + .info.result[0].file[0].capacityKb / 1024 rescue nil + + # Try to get file size + size ||= search_task + .info + .result[0].file[0].fileSize / 1024 / 1024 rescue nil + + raise 'Could not get file size or capacity' if size.nil? + + size + rescue StandardError + raise 'Could not find file.' + end + end + + def get_search_params(ds_name, img_path = nil, img_name = nil) + spec = RbVmomi::VIM::HostDatastoreBrowserSearchSpec.new + + vmdisk_query = RbVmomi::VIM::VmDiskFileQuery.new + vmdisk_query.details = RbVmomi::VIM::VmDiskFileQueryFlags( + :diskType => true, + :capacityKb => true, + :hardwareVersion => true, + :controllerType => true + ) + + spec.query = [vmdisk_query, + RbVmomi::VIM::IsoImageFileQuery.new] + spec.details = RbVmomi::VIM::FileQueryFlags( + :fileOwner => true, + :fileSize => true, + :fileType => true, + :modification => true + ) + + if img_name.nil? + spec.matchPattern = [] + else + spec.matchPattern = [img_name] + end + + datastore_path = "[#{ds_name}]" + datastore_path << " #{img_path}" unless img_path.nil? + + { 'datastorePath' => datastore_path, + 'searchSpec' => spec } + end + + def dc_path + dc = obtain_dc + p = dc.item.parent + path = [dc.item.name] + while p.instance_of? RbVmomi::VIM::Folder + path.unshift(p.name) + p = p.parent + end + path.delete_at(0) # The first folder is the root "Datacenters" + path.join('/') + end + + def generate_file_url(path) + if self['_connection.http.use_ssl?'] + protocol = 'https://' + else + protocol = 'http://' + end + hostname = self['_connection.http.address'] + port = self['_connection.http.port'] + dcpath = dc_path + url_path = "folder/#{path}?dcPath=#{dcpath}&dsName=#{self['name']}" + + # This creates the vcenter file URL + # for uploading or downloading files + # e.g: + url = "#{protocol}#{hostname}:#{port}/#{url_path}" + URI.escape(url) # rubocop:disable Lint/UriEscapeUnescape + end + + def download_to_stdout(remote_path) + url = generate_file_url(remote_path) + pid = spawn(CURLBIN, + '-k', '--noproxy', '*', '-f', + '-b', self['_connection.cookie'], + url) + + Process.waitpid(pid, 0) + raise 'download failed' unless $?.success? # rubocop:disable Style/SpecialGlobalVars + end + + def descriptor?(remote_path) + url = generate_file_url(remote_path) + + rout, wout = IO.pipe + pid = spawn(CURLBIN, + '-I', '-k', '--noproxy', '*', '-f', + '-b', self['_connection.cookie'], + url, + :out => wout, + :err => '/dev/null') + + Process.waitpid(pid, 0) + raise 'read image header failed' unless $?.success? # rubocop:disable Style/SpecialGlobalVars + + wout.close + size = rout.readlines.select do |l| + l.start_with?('Content-Length') + end[0].sub('Content-Length: ', '') + rout.close + size.chomp.to_i < 4096 # If <4k, then is a descriptor + end + + def get_text_file(remote_path) + url = generate_file_url(remote_path) + + rout, wout = IO.pipe + pid = spawn CURLBIN, '-k', '--noproxy', '*', '-f', + '-b', self['_connection.cookie'], + url, + :out => wout, + :err => '/dev/null' + + Process.waitpid(pid, 0) + raise 'get text file failed' unless $?.success? # rubocop:disable Style/SpecialGlobalVars + + wout.close + output = rout.readlines + rout.close + output + end + + def all_images + images = {} + imid = -1 + ds_id = nil + ds_name = self['name'] + + # We need OpenNebula Images and Datastores pools + ipool = VCenterDriver::VIHelper + .one_pool(OpenNebula::ImagePool, false) + if ipool.respond_to?(:message) + raise "Could not get OpenNebula ImagePool: #{pool.message}" + end + + dpool = VCenterDriver::VIHelper + .one_pool(OpenNebula::DatastorePool, false) + if dpool.respond_to?(:message) + raise "Could not get OpenNebula DatastorePool: #{pool.message}" + end + + ds_id = @one_item['ID'] + + begin + # Create Search Spec + search_params = get_search_params(ds_name) + + # Perform search task and return results + search_task = self['browser'] + .SearchDatastoreSubFolders_Task(search_params) + search_task.wait_for_completion + + # Loop through search results + search_task.info.result.each do |result| + # Remove [datastore] from file path + folderpath = '' + size = result.folderPath.size + if result.folderPath[-1] != ']' + if result.folderPath[-1] != '/' + result.folderPath[size] = '/' + end + folderpath = result + .folderPath.sub(/^\[#{ds_name}\] /, '') end - # Generate a crypto hash - # this hash is used to avoid name collisions - key = "#{image_name}#{ds_name}#{image_path}" - import_name = VCenterDriver::VIHelper.one_name(OpenNebula::ImagePool, image_name, key, ipool) + # Loop through images in result.file + result.file.each do |image| + image_path = '' - # Set template - one_image = "NAME=\"#{import_name}\"\n" - one_image << "PATH=\"vcenter://#{image_path}\"\n" - one_image << "PERSISTENT=\"NO\"\n" - one_image << "TYPE=\"#{image_type}\"\n" - one_image << "VCENTER_IMPORTED=\"YES\"\n" unless CONFIG[:delete_images] - one_image << "DEV_PREFIX=\"#{disk_prefix}\"\n" + # Skip not relevant files + next unless %w[FloppyImageFileInfo + IsoImageFileInfo + VmDiskFileInfo].include? image.class.to_s - # Check image hasn't already been imported - image_found = VCenterDriver::VIHelper.find_image_by("SOURCE", OpenNebula::ImagePool, - image_path, - ds_id, - ipool) + # Get image path and name + image_path << folderpath << image.path + image_name = File.basename(image.path) + .reverse.sub('kdmv.', '').reverse + + # Get image's type + if image.class.to_s == 'VmDiskFileInfo' + image_type = 'OS' + else + image_type = 'CDROM' + end + + # Get image's size + image_size = image.capacityKb / 1024 rescue nil + image_size ||= image.fileSize / 1024 / 1024 rescue nil + + # Assign image prefix if known or assign default prefix + controller = image.controllerType rescue nil + if controller + if controller == 'VirtualIDEController' + disk_prefix = 'hd' + else + disk_prefix = 'sd' + end + else + # Get default value for disks that + # are not attached to any controller + disk_prefix = VCenterDriver::VIHelper + .get_default( + 'IMAGE/TEMPLATE/DEV_PREFIX' + ) + end + + # Generate a crypto hash + # this hash is used to avoid name collisions + key = "#{image_name}#{ds_name}#{image_path}" + import_name = VCenterDriver::VIHelper + .one_name( + OpenNebula::ImagePool, + image_name, + key, + ipool + ) + + # Set template + one_image = "NAME=\"#{import_name}\"\n" + one_image << "PATH=\"vcenter://#{image_path}\"\n" + one_image << "PERSISTENT=\"NO\"\n" + one_image << "TYPE=\"#{image_type}\"\n" + unless CONFIG[:delete_images] + one_image << "VCENTER_IMPORTED=\"YES\"\n" + end + one_image << "DEV_PREFIX=\"#{disk_prefix}\"\n" + + # Check image hasn't already been imported + image_found = VCenterDriver::VIHelper + .find_image_by( + 'SOURCE', + OpenNebula::ImagePool, + image_path, + ds_id, + ipool + ) + + next if image_found - if !image_found # Add template to image array - images[import_name] = { + images[import_name] = { :import_id => imid+=1, :name => import_name, :ref => import_name, @@ -677,177 +806,212 @@ class Datastore < Storage } end end + rescue StandardError => e + raise "Could not find images. \ + Reason: #{e.message}/#{e.backtrace}" end + vname = @vi_client.vc_name || '' - rescue StandardError => e - raise "Could not find images. Reason: #{e.message}/#{e.backtrace}" + { vname => images } end - vname = @vi_client.vc_name || "" - { vname => images } - end + # This is never cached + def self.new_from_ref(ref, vi_client) + new(RbVmomi::VIM::Datastore.new(vi_client.vim, ref), vi_client) + end - # This is never cached - def self.new_from_ref(ref, vi_client) - self.new(RbVmomi::VIM::Datastore.new(vi_client.vim, ref), vi_client) - end + # detach disk from vCenter vm if possible, destroy the disk on FS + def self.detach_and_destroy(disk, vm, disk_id, prev_ds_ref, vi_client) + # it's not a CDROM (CLONE=NO) + is_cd = !(disk['CLONE'].nil? || disk['CLONE'] == 'YES') - # detach disk from vCenter vm if possible, destroy the disk on FS - def self.detach_and_destroy(disk, vm, disk_id, prev_ds_ref, vi_client) - # it's not a CDROM (CLONE=NO) - is_cd = !(disk["CLONE"].nil? || disk["CLONE"] == "YES") + begin + # Detach disk if possible (VM is reconfigured) + # and gather vCenter info + # Needed for poweroff machines too + ds_ref, img_path = vm.detach_disk(disk) - begin - # Detach disk if possible (VM is reconfigured) and gather vCenter info - # Needed for poweroff machines too - ds_ref, img_path = vm.detach_disk(disk) + return if is_cd - return if is_cd + # Disk could't be detached, use OpenNebula info + if !(ds_ref && img_path && !img_path.empty?) + img_path = vm.disk_real_path(disk, disk_id) + ds_ref = prev_ds_ref + end - # Disk could't be detached, use OpenNebula info - if !(ds_ref && img_path && !img_path.empty?) - img_path = vm.disk_real_path(disk, disk_id) - ds_ref = prev_ds_ref - end + # If disk was already detached we have no way to remove it + ds = VCenterDriver::Datastore.new_from_ref(ds_ref, vi_client) - # If disk was already detached we have no way to remove it - ds = VCenterDriver::Datastore.new_from_ref(ds_ref, vi_client) + search_params = ds.get_search_params(ds['name'], + File.dirname(img_path), + File.basename(img_path)) + # Perform search task and return results + search_task = ds['browser'] + .SearchDatastoreSubFolders_Task(search_params) + search_task.wait_for_completion - search_params = ds.get_search_params(ds['name'], - File.dirname(img_path), - File.basename(img_path)) - - # Perform search task and return results - search_task = ds['browser'].SearchDatastoreSubFolders_Task(search_params) - search_task.wait_for_completion - - ds.delete_virtual_disk(img_path) - img_dir = File.dirname(img_path) - ds.rm_directory(img_dir) if ds.dir_empty?(img_dir) - rescue StandardError => e - if !e.message.start_with?('FileNotFound') - raise e.message # Ignore FileNotFound + ds.delete_virtual_disk(img_path) + img_dir = File.dirname(img_path) + ds.rm_directory(img_dir) if ds.dir_empty?(img_dir) + rescue StandardError => e + if !e.message.start_with?('FileNotFound') + raise e.message # Ignore FileNotFound + end end end + end -end # class Datastore + # class Datastore -class DsImporter < VCenterDriver::VcImporter + ########################################################################## + # Class DsImporter + ########################################################################## + class DsImporter < VCenterDriver::VcImporter - def initialize(one_client, vi_client) - super(one_client, vi_client) - @one_class = OpenNebula::Datastore - end - - def get_list(args = {}) - dc_folder = VCenterDriver::DatacenterFolder.new(@vi_client) - - # one pool creation - dpool = VCenterDriver::VIHelper.one_pool(OpenNebula::DatastorePool, false) - if dpool.respond_to?(:message) - raise "Could not get OpenNebula DatastorePool: #{dpool.message}" + def initialize(one_client, vi_client) + super(one_client, vi_client) + @one_class = OpenNebula::Datastore end - # OpenNebula's HostPool - hpool = VCenterDriver::VIHelper.one_pool(OpenNebula::HostPool, false) - if hpool.respond_to?(:message) - raise "Could not get OpenNebula HostPool: #{hpool.message}" + def get_list(_args = {}) + dc_folder = VCenterDriver::DatacenterFolder.new(@vi_client) + + # one pool creation + dpool = VCenterDriver::VIHelper + .one_pool( + OpenNebula::DatastorePool, + false + ) + if dpool.respond_to?(:message) + raise "Could not get OpenNebula DatastorePool: #{dpool.message}" + end + + # OpenNebula's HostPool + hpool = VCenterDriver::VIHelper + .one_pool( + OpenNebula::HostPool, + false + ) + if hpool.respond_to?(:message) + raise "Could not get OpenNebula HostPool: #{hpool.message}" + end + + rs = dc_folder + .get_unimported_datastores( + dpool, + @vi_client.vc_name, + hpool + ) + @list = rs end - rs = dc_folder.get_unimported_datastores(dpool, @vi_client.vc_name, hpool) - @list = rs - end + def add_cluster(cid, eid) + one_cluster = @info[:clusters][cid] + raise 'no cluster defined' unless one_cluster - def add_cluster(cid, eid) - one_cluster = @info[:clusters][cid] - raise "no cluster defined" unless one_cluster + one_cluster.adddatastore(eid) + end - rc = one_cluster.adddatastore(eid) - end + def remove_default(id) + cid = 0 + @info[:clusters][cid] ||= VCenterDriver::VIHelper + .one_item( + OpenNebula::Cluster, + cid.to_s, + false + ) + @info[:clusters][cid].deldatastore(id.to_i) + end - def remove_default(id) - cid = 0 - @info[:clusters][cid] ||= VCenterDriver::VIHelper.one_item(OpenNebula::Cluster, cid.to_s, false) - @info[:clusters][cid].deldatastore(id.to_i) - end - - def import(selected) - inner = ->(object, auth) { - one = "" + def import(selected) + inner = lambda {|object, auth| + one = '' one << "VCENTER_HOST=\"#{auth[:host]}\"\n" - rc = object.update(one, true) - } + object.update(one, true) + } - opts = @info[selected[:ref]][:opts] + opts = @info[selected[:ref]][:opts] - # Datastore info comes in a pair (SYS, IMG) - pair = selected[:ds] - clusters = selected[:cluster] - clusters = opts["selected_clusters"].each.map(&:to_i) if opts && opts["selected_clusters"] - - res = {id: [], name: selected[:simple_name]} - @info[:rollback] = [] - pair.each do |ds| - create(ds[:one]) do |one_object, id| - res[:id] << id - - add_clusters(id, clusters) - - inner.call(one_object, @vi_client.get_host_credentials) + # Datastore info comes in a pair (SYS, IMG) + pair = selected[:ds] + clusters = selected[:cluster] + if opts && opts['selected_clusters'] + clusters = opts['selected_clusters'].each.map(&:to_i) end + + res = { :id => [], :name => selected[:simple_name] } + @info[:rollback] = [] + pair.each do |ds| + create(ds[:one]) do |one_object, id| + res[:id] << id + + add_clusters(id, clusters) + + inner.call(one_object, @vi_client.host_credentials) + end + end + + res end - return res - end -end - -class ImageImporter < VCenterDriver::VcImporter - - def initialize(one_client, vi_client) - super(one_client, vi_client) - @one_class = OpenNebula::Image end - def get_list(args = {}) + ########################################################################## + # Class ImageImporter + ########################################################################## + class ImageImporter < VCenterDriver::VcImporter - ds_ref = args[:datastore][:ds_ref] - one_ds = args[:datastore][:one_item] - - - raise "can't retrieve ref info from openNebula datastore" unless ds_ref - - ds = VCenterDriver::Datastore.new_from_ref(ds_ref, @vi_client).tap do |spawn| - spawn.one_item = one_ds + def initialize(one_client, vi_client) + super(one_client, vi_client) + @one_class = OpenNebula::Image end - vc_uuid = @vi_client.vim.serviceContent.about.instanceUuid - one_ds_instance_id = one_ds['TEMPLATE/VCENTER_INSTANCE_ID'] + def get_list(args = {}) + ds_ref = args[:datastore][:ds_ref] + one_ds = args[:datastore][:one_item] - if one_ds_instance_id != vc_uuid - raise "Datastore is not in the same vCenter instance provided in credentials" + unless ds_ref + raise "can't retrieve ref info from openNebula datastore" + end + + datastore = VCenterDriver::Datastore + .new_from_ref(ds_ref, @vi_client) + + ds = datastore.tap do |spawn| + spawn.one_item = one_ds + end + + vc_uuid = @vi_client.vim.serviceContent.about.instanceUuid + one_ds_instance_id = one_ds['TEMPLATE/VCENTER_INSTANCE_ID'] + + if one_ds_instance_id != vc_uuid + raise 'Datastore is not in the same vCenter \ + instance provided in credentials' + end + + @list = ds.all_images end - @list = ds.get_images - end + def import(selected) + resource = VCenterDriver::VIHelper.new_one_item(@one_class) + message = 'Error creating the OpenNebula resource' + info = selected[:one] + dsid = selected[:dsid].to_i + name = selected[:name] - def import(selected) - resource = VCenterDriver::VIHelper.new_one_item(@one_class) - message = "Error creating the OpenNebula resource" - info = selected[:one] - dsid = selected[:dsid].to_i - name = selected[:name] + rc = resource.allocate(info, dsid, false) + VCenterDriver::VIHelper.check_error(rc, message) - rc = resource.allocate(info, dsid, false) - VCenterDriver::VIHelper.check_error(rc, message) + resource.info + id = resource['ID'] + @rollback << Raction.new(resource, :delete) - resource.info - id = resource['ID'] - @rollback << Raction.new(resource, :delete) + { :id => [id], :name => name } + end - return {id: [id], name: name} end end -end # module VCenterDriver +# module VCenterDriver diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/file_helper.rb b/src/vmm_mad/remotes/lib/vcenter_driver/file_helper.rb index 8d797bf91a..d1817b6bf8 100644 --- a/src/vmm_mad/remotes/lib/vcenter_driver/file_helper.rb +++ b/src/vmm_mad/remotes/lib/vcenter_driver/file_helper.rb @@ -17,235 +17,339 @@ require 'fileutils' require 'tempfile' +############################################################################## +# Module VCenterDriver +############################################################################## module VCenterDriver -class FileHelper + ########################################################################## + # Class FileHelper + ########################################################################## + class FileHelper - def self.get_img_name(disk, vm_id, vm_name, instantiate_as_persistent=false) - if disk["PERSISTENT"] == "YES" || disk["TYPE"] == "CDROM" - return disk["SOURCE"] - else - disk_id = disk["DISK_ID"] - if disk["SOURCE"] - if instantiate_as_persistent && - disk["OPENNEBULA_MANAGED"] && - disk["OPENNEBULA_MANAGED"].upcase == "NO" - return disk["SOURCE"] # Treat this disk as if was persistent - else - image_name = disk["SOURCE"].split(".").first - return "#{image_name}-#{vm_id}-#{disk_id}.vmdk" - end + def self.get_img_name( + disk, + vm_id, + _vm_name, + instantiate_as_persistent = false + ) + if disk['PERSISTENT'] == 'YES' || disk['TYPE'] == 'CDROM' + disk['SOURCE'] else - ds_volatile_dir = disk["VCENTER_DS_VOLATILE_DIR"] || "one-volatile" - return "#{ds_volatile_dir}/#{vm_id}/one-#{vm_id}-#{disk_id}.vmdk" - end - end - end - - # REMOVE: no need to change... - def self.get_img_name_from_path(path, vm_id, disk_id) - # Note: This will probably fail if the basename contains '.' - return "#{path.split(".").first}-#{vm_id}-#{disk_id}.vmdk" - end - - def self.is_remote_or_needs_unpack?(file) - return !is_remote?(file).nil? || needs_unpack?(file) - end - - def self.is_remote?(file) - file.match(%r{^https?://}) - end - - def self.is_vmdk?(file) - type = %x{file #{file}} - - type.include? "VMware" - end - - def self.is_iso?(file) - type = %x{file #{file}} - - type.include? "ISO" - end - - def self.get_type(file) - type = %x{file -b --mime-type #{file}} - if $?.exitstatus != 0 - STDERR.puts "Can not read file #{file}" - exit(-1) - end - type.strip - end - - def self.needs_unpack?(file_path) - type = get_type(file_path) - type.gsub!(%r{^application/(x-)?}, '') - return %w{bzip2 gzip tar}.include?(type) - end - - def self.vcenter_file_info(file_path) - if File.directory?(file_path) - files = Dir["#{file_path}/*.vmdk"] - found = false - count = 0 - last = nil - - files.each do |f| - if get_type(f).strip == "text/plain" - file_path = f - found = true - break + disk_id = disk['DISK_ID'] + if disk['SOURCE'] + if instantiate_as_persistent && + disk['OPENNEBULA_MANAGED'] && + disk['OPENNEBULA_MANAGED'].upcase == 'NO' + disk['SOURCE'] # Treat this disk as if was persistent + else + image_name = disk['SOURCE'].split('.').first + "#{image_name}-#{vm_id}-#{disk_id}.vmdk" + end else - count += 1 - last = f - end - end - - if !found - if count == 1 - file_path = last - found = true - else - STDERR.puts "Could not find vmdk" - exit(-1) + ds_volatile_dir = + disk['VCENTER_DS_VOLATILE_DIR'] || 'one-volatile' + "#{ds_volatile_dir}/#{vm_id}/one-#{vm_id}-#{disk_id}.vmdk" end end end - case get_type(file_path).strip - when "application/octet-stream" - return { - :type => :standalone, - :file => file_path, - :dir => File.dirname(file_path) - } - when "application/x-iso9660-image" - return { - :type => :standalone, - :file => file_path, - :dir => File.dirname(file_path), - :extension => '.iso' - } - when "text/plain" - info = { - :type => :flat, - :file => file_path, - :dir => File.dirname(file_path) - } + # REMOVE: no need to change... + def self.get_img_name_from_path(path, vm_id, disk_id) + # Note: This will probably fail if the basename contains '.' + "#{path.split('.').first}-#{vm_id}-#{disk_id}.vmdk" + end - files_list = [] - descriptor = File.read(file_path).split("\n") - flat_files = descriptor.select {|l| l.start_with?("RW")} + def self.remote_or_needs_unpack?(file) + !remote?(file).nil? || needs_unpack?(file) + end - flat_files.each do |f| - files_list << info[:dir] + "/" + - f.split(" ")[3].chomp.chomp('"').reverse.chomp('"').reverse + def self.remote?(file) + file.match(%r{^https?://}) + end + + def self.vmdk?(file) + type = `file #{file}` + + type.include? 'VMware' + end + + def self.iso?(file) + type = `file #{file}` + + type.include? 'ISO' + end + + def self.get_type(file) + type = `file -b --mime-type #{file}` + if $?.exitstatus != 0 # rubocop:disable Style/SpecialGlobalVars + STDERR.puts "Can not read file #{file}" + exit(-1) + end + type.strip + end + + def self.needs_unpack?(file_path) + type = get_type(file_path) + type.gsub!(%r{^application/(x-)?}, '') + %w[bzip2 gzip tar].include?(type) + end + + def self.vcenter_file_info(file_path) + if File.directory?(file_path) + files = Dir["#{file_path}/*.vmdk"] + found = false + count = 0 + last = nil + + files.each do |f| + if get_type(f).strip == 'text/plain' + file_path = f + found = true + break + else + count += 1 + last = f + end + end + + if !found + if count == 1 + file_path = last + found = true + else + STDERR.puts 'Could not find vmdk' + exit(-1) + end + end end - info[:flat_files] = files_list + case get_type(file_path).strip + when 'application/octet-stream' + { + :type => :standalone, + :file => file_path, + :dir => File.dirname(file_path) + } + when 'application/x-iso9660-image' + { + :type => :standalone, + :file => file_path, + :dir => File.dirname(file_path), + :extension => '.iso' + } + when 'text/plain' + info = { + :type => :flat, + :file => file_path, + :dir => File.dirname(file_path) + } - return info - else - STDERR.puts "Unrecognized file type" - exit(-1) - end - end + files_list = [] + descriptor = File.read(file_path).split("\n") + flat_files = descriptor.select {|l| l.start_with?('RW') } - def self.escape_path(path) - return path.gsub(" ", "%20") - end + flat_files.each do |f| + files_list << + info[:dir] + + '/' + + f + .split(' ')[3] + .chomp + .chomp('"') + .reverse + .chomp('"') + .reverse + end - def self.unescape_path(path) - return path.gsub("%20", " ") - end + info[:flat_files] = files_list - # Recursively downloads vmdk related files and returns filenames - def self.get_all_filenames_in_descriptor(descriptor_url, ds) - descriptor_filename = File.basename descriptor_url.path - # Build array of files to download - files_to_download = [descriptor_filename] - image_source = descriptor_url.host + descriptor_url.path - descriptor_content = ds.get_text_file image_source - flat_files = descriptor_content.select{|l| l.start_with?("RW")} - flat_files.each do |file| - # Get the filename from lines of type - # RW 2048000 VMFS "filename-flat.vdmdk" - file_to_download = file.split(" ")[3][1..-2] - files_to_download << file_to_download - image_path = File.dirname(descriptor_url.host+descriptor_url.path) - if ds.is_descriptor?(image_path + "/" + file_to_download) - files_to_download << download_all_filenames_in_descriptor(image_path + "/" + file_to_download) + info + else + STDERR.puts 'Unrecognized file type' + exit(-1) end end - return files_to_download - end - - - def self.download_vmdks(files_to_download, url_prefix, temp_folder, ds) - # Download files - url_prefix = url_prefix + "/" - - VCenterDriver::VIClient.in_silence do - files_to_download.each{|file| - ds.download_file(url_prefix + file, temp_folder + file) - } + def self.escape_path(path) + path.gsub(' ', '%20') end - end - # Receives a VMDK descriptor or file, downloads all - # related files, creates a tar.gz and dumps it in stdout - def self.dump_vmdk_tar_gz(vcenter_url, ds) - image_source = vcenter_url.host + vcenter_url.path - if ds.is_descriptor?(image_source) - files_to_download = self.get_all_filenames_in_descriptor(vcenter_url, ds) + def self.unescape_path(path) + path.gsub('%20', ' ') + end - descriptor_name = File.basename vcenter_url.path - temp_folder = VAR_LOCATION + "/vcenter/" + descriptor_name + "/" - FileUtils.mkdir_p(temp_folder) if !File.directory?(temp_folder) - - image_path = File.dirname(vcenter_url.host+vcenter_url.path) - self.download_vmdks(files_to_download, image_path, temp_folder, ds) + # Recursively downloads vmdk related files and returns filenames + def self.get_all_filenames_in_descriptor(descriptor_url, ds) + descriptor_filename = File.basename descriptor_url.path + # Build array of files to download + files_to_download = [descriptor_filename] + image_source = descriptor_url.host + descriptor_url.path + descriptor_content = ds.get_text_file image_source + flat_files = descriptor_content.select {|l| l.start_with?('RW') } + flat_files.each do |file| + # Get the filename from lines of type + # RW 2048000 VMFS "filename-flat.vdmdk" + file_to_download = file.split(' ')[3][1..-2] + files_to_download << file_to_download + image_path = + File + .dirname( + descriptor_url.host+descriptor_url.path + ) + next unless ds.descriptor?(image_path + '/' + file_to_download) - # Create tar.gz - rs = system("cd #{temp_folder} && tar czf #{descriptor_name}.tar.gz #{files_to_download.join(' ')} > /dev/null 2>&1") - (FileUtils.rm_rf(temp_folder) ; raise "Error creating tar file for #{descriptor_name}") unless rs + files_to_download << + download_all_filenames_in_descriptor( + image_path + '/' + file_to_download + ) + end - # Cat file to stdout - rs = system("cat #{temp_folder + descriptor_name}.tar.gz") - (FileUtils.rm_rf(temp_folder) ; raise "Error reading tar for #{descriptor_name}") unless rs + files_to_download + end + + def self.download_vmdks(files_to_download, url_prefix, temp_folder, ds) + # Download files + url_prefix += '/' + + VCenterDriver::VIClient.in_silence do + files_to_download.each do |file| + ds.download_file(url_prefix + file, temp_folder + file) + end + end + end + + # Receives a VMDK descriptor or file, downloads all + # related files, creates a tar.gz and dumps it in stdout + def self.dump_vmdk_tar_gz(vcenter_url, ds) + image_source = vcenter_url.host + vcenter_url.path + if ds.descriptor?(image_source) + files_to_download = + get_all_filenames_in_descriptor( + vcenter_url, + ds + ) - # Delete tar.gz - rs = system("cd #{temp_folder} && rm #{descriptor_name}.tar.gz #{files_to_download.join(' ')}") - (FileUtils.rm_rf(temp_folder) ; raise "Error removing tar for #{descriptor_name}") unless rs - else - # Setting "." as the source will read from the stdin - VCenterDriver::VIClient.in_stderr_silence do descriptor_name = File.basename vcenter_url.path - file_to_download = [vcenter_url.path] - temp_folder = VAR_LOCATION + "/vcenter/" + descriptor_name + "/" + temp_folder = VAR_LOCATION + '/vcenter/' + descriptor_name + '/' + unless File.directory?(temp_folder) + FileUtils + .mkdir_p( + temp_folder + ) + end - FileUtils.mkdir_p(temp_folder + File.dirname(vcenter_url.path) + "/") if !File.directory?(temp_folder + File.dirname(vcenter_url.path) + "/") - - self.download_vmdks(file_to_download, vcenter_url.host, temp_folder, ds) - - temp_folder = temp_folder + File.dirname(vcenter_url.path) + image_path = File.dirname(vcenter_url.host+vcenter_url.path) + download_vmdks(files_to_download, image_path, temp_folder, ds) # Create tar.gz - rs = system("cd #{temp_folder} && tar czf #{descriptor_name}.tar.gz #{descriptor_name} > /dev/null 2>&1") - (FileUtils.rm_rf(temp_folder) ; raise "Error creating tar file for #{descriptor_name}") unless rs + rs = system( + "cd #{temp_folder} \&& tar czf #{descriptor_name}.tar.gz \ + #{files_to_download.join(' ')} > /dev/null 2>&1" + ) + unless rs + FileUtils.rm_rf temp_folder + raise "Error creating tar file for #{descriptor_name}" + end # Cat file to stdout - rs = system("cat #{temp_folder + "/" + descriptor_name}.tar.gz") - (FileUtils.rm_rf(temp_folder) ; raise "Error reading tar for #{descriptor_name}") unless rs + rs = system("cat #{temp_folder + descriptor_name}.tar.gz") + unless rs + FileUtils.rm_rf temp_folder + raise "Error reading tar for #{descriptor_name}" + end # Delete tar.gz - rs = system("cd #{temp_folder} && rm #{descriptor_name}.tar.gz #{descriptor_name}") - (FileUtils.rm_rf(temp_folder) ; raise "Error removing tar for #{descriptor_name}") unless rs + rs = system( + "cd #{temp_folder} \ + && rm #{descriptor_name}.tar.gz #{ + files_to_download + .join(' ')}" + ) + unless rs + FileUtils.rm_rf temp_folder + raise "Error removing tar for #{descriptor_name}" + end + else + # Setting "." as the source will read from the stdin + VCenterDriver::VIClient.in_stderr_silence do + descriptor_name = File.basename vcenter_url.path + file_to_download = [vcenter_url.path] + temp_folder = + VAR_LOCATION + '/vcenter/' + descriptor_name + '/' + + unless File + .directory?( + temp_folder + File + .dirname( + vcenter_url + .path + ) + '/' + ) + FileUtils + .mkdir_p(temp_folder + File + .dirname( + vcenter_url + .path + ) + '/') + end + + download_vmdks( + file_to_download, + vcenter_url.host, + temp_folder, + ds + ) + + temp_folder += File.dirname(vcenter_url.path) + + # Create tar.gz + rs = system( + "cd #{temp_folder} && tar czf #{descriptor_name}.tar.gz\ + #{descriptor_name} > /dev/null 2>&1" + ) + unless rs + ( + FileUtils + .rm_rf( + temp_folder + ) + raise "Error creating tar \ + file for #{descriptor_name}") + end + + # Cat file to stdout + rs = system( + "cat #{temp_folder + '/' + descriptor_name}.tar.gz" + ) + unless rs + ( + FileUtils + .rm_rf( + temp_folder + ) + raise "Error reading tar for #{descriptor_name}") + end # rubocop:disable Style/Semicolon + + # Delete tar.gz + rs = system( + "cd #{temp_folder} \ + && rm #{descriptor_name}.tar.gz #{descriptor_name}" + ) + unless rs + ( + FileUtils + .rm_rf( + temp_folder + ) + raise "Error \ removing tar for #{descriptor_name}") + end # rubocop:disable Style/Semicolon + end end end + end + # class FileHelper -end # class FileHelper - -end # module VCenterDriver +end +# module VCenterDriver diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/host.rb b/src/vmm_mad/remotes/lib/vcenter_driver/host.rb index 96aee4fc5f..6a3a0ed6d0 100644 --- a/src/vmm_mad/remotes/lib/vcenter_driver/host.rb +++ b/src/vmm_mad/remotes/lib/vcenter_driver/host.rb @@ -14,1108 +14,1378 @@ # limitations under the License. # #--------------------------------------------------------------------------- # +############################################################################## +# Module VCenterDriver +############################################################################## module VCenterDriver require 'json' require 'nsx_driver' -class HostFolder - attr_accessor :item, :items + ########################################################################## + # Class HostFolder + ########################################################################## + class HostFolder - def initialize(item) - @item = item - @items = {} - end + attr_accessor :item, :items - def fetch_clusters! - VIClient.get_entities(@item, 'ClusterComputeResource').each do |item| - item_name = item._ref - @items[item_name.to_sym] = ClusterComputeResource.new(item) - end - end - - def get_cluster(ref) - if !@items[ref.to_sym] - rbvmomi_dc = RbVmomi::VIM::ClusterComputeResource.new(@item._connection, ref) - @items[ref.to_sym] = ClusterComputeResource.new(rbvmomi_dc) + def initialize(item) + @item = item + @items = {} end - @items[ref.to_sym] - end -end # class HostFolder - -class ClusterComputeResource - attr_accessor :item - attr_accessor :rp_list - - include Memoize - - def initialize(item, vi_client=nil) - @item = item - @vi_client = vi_client - @rp_list - end - - def fetch_resource_pools(rp, rp_array = []) - rp_array << rp - - rp.resourcePool.each do |child_rp| - fetch_resource_pools(child_rp, rp_array) - end - - rp_array - end - - def resource_pools - if @resource_pools.nil? - @resource_pools = fetch_resource_pools(@item.resourcePool) - end - - @resource_pools - end - - def get_resource_pool_list(rp = @item.resourcePool, parent_prefix = "", rp_array = []) - current_rp = "" - - if !parent_prefix.empty? - current_rp << parent_prefix - current_rp << "/" - end - - resource_pool, name = rp.collect("resourcePool","name") - current_rp << name if name != "Resources" - - resource_pool.each do |child_rp| - get_resource_pool_list(child_rp, current_rp, rp_array) - end - - rp_info = {} - rp_info[:name] = current_rp - rp_info[:ref] = rp._ref - rp_array << rp_info if !current_rp.empty? - - rp_array - end - - def get_nsx - nsx_info = '' - nsx_obj = {} - # In the future add more than one nsx manager - extension_list = [] - extension_list = @vi_client.vim.serviceContent.extensionManager.extensionList - extension_list.each do |ext_list| - if ext_list.key == NSXDriver::NSXConstants::NSXV_EXTENSION_LIST - nsx_obj['type'] = NSXDriver::NSXConstants::NSXV - urlFull = ext_list.client[0].url - urlSplit = urlFull.split("/") - # protocol = "https://" - protocol = urlSplit[0] + "//" - # ipPort = ip:port - ipPort = urlSplit[2] - nsx_obj['url'] = protocol + ipPort - nsx_obj['version'] = ext_list.version - nsx_obj['label'] = ext_list.description.label - elsif ext_list.key == NSXDriver::NSXConstants::NSXT_EXTENSION_LIST - nsx_obj['type'] = NSXDriver::NSXConstants::NSXT - nsx_obj['url'] = ext_list.server[0].url - nsx_obj['version'] = ext_list.version - nsx_obj['label'] = ext_list.description.label - else - next + def fetch_clusters! + VIClient + .get_entities( + @item, + 'ClusterComputeResource' + ).each do |item| + item_name = item._ref + @items[item_name.to_sym] = ClusterComputeResource.new(item) end end - unless nsx_obj.empty? - nsx_info << "NSX_MANAGER=\"#{nsx_obj['url']}\"\n" - nsx_info << "NSX_TYPE=\"#{nsx_obj['type']}\"\n" - nsx_info << "NSX_VERSION=\"#{nsx_obj['version']}\"\n" - nsx_info << "NSX_LABEL=\"#{nsx_obj['label']}\"\n" + + def get_cluster(ref) + if !@items[ref.to_sym] + rbvmomi_dc = + RbVmomi::VIM::ClusterComputeResource + .new( + @item._connection, + ref + ) + @items[ref.to_sym] = + ClusterComputeResource + .new( + rbvmomi_dc + ) + end + + @items[ref.to_sym] end - nsx_info + end + # class HostFolder - def nsx_ready? - @one_item = VCenterDriver::VIHelper - .one_item(OpenNebula::Host, - @vi_client.instance_variable_get(:@host_id).to_i) + ########################################################################## + # Class ClusterComputeResource + ########################################################################## + class ClusterComputeResource - # Check if NSX_MANAGER is into the host template - if [nil, ''].include?(@one_item['TEMPLATE/NSX_MANAGER']) - @nsx_status = "NSX_STATUS = \"Missing NSX_MANAGER\"\n" - return false + attr_accessor :item + attr_accessor :rp_list + + include Memoize + + def initialize(item, vi_client = nil) + @item = item + @vi_client = vi_client + @rp_list # rubocop:disable Lint/Void end - # Check if NSX_USER is into the host template - if [nil, ''].include?(@one_item['TEMPLATE/NSX_USER']) - @nsx_status = "NSX_STATUS = \"Missing NSX_USER\"\n" - return false + def fetch_resource_pools(rp, rp_array = []) + rp_array << rp + + rp.resourcePool.each do |child_rp| + fetch_resource_pools(child_rp, rp_array) + end + + rp_array end - # Check if NSX_PASSWORD is into the host template - if [nil, ''].include?(@one_item['TEMPLATE/NSX_PASSWORD']) - @nsx_status = "NSX_STATUS = \"Missing NSX_PASSWORD\"\n" - return false + def resource_pools + if @resource_pools.nil? + @resource_pools = fetch_resource_pools(@item.resourcePool) + end + + @resource_pools end - # Check if NSX_TYPE is into the host template - if [nil, ''].include?(@one_item['TEMPLATE/NSX_TYPE']) - @nsx_status = "NSX_STATUS = \"Missing NSX_TYPE\"\n" - return false + def get_resource_pool_list( + rp = @item + .resourcePool, + parent_prefix = '', + rp_array = [] + ) + current_rp = '' + + if !parent_prefix.empty? + current_rp << parent_prefix + current_rp << '/' + end + + resource_pool, name = rp.collect('resourcePool', 'name') + current_rp << name if name != 'Resources' + + resource_pool.each do |child_rp| + get_resource_pool_list(child_rp, current_rp, rp_array) + end + + rp_info = {} + rp_info[:name] = current_rp + rp_info[:ref] = rp._ref + rp_array << rp_info unless current_rp.empty? + + rp_array end - # Try a connection as part of NSX_STATUS - nsx_client = NSXDriver::NSXClient - .new_from_id(@vi_client.instance_variable_get(:@host_id).to_i) - - if @one_item['TEMPLATE/NSX_TYPE'] == NSXDriver::NSXConstants::NSXV - # URL to test a connection - url = '/api/2.0/vdn/scopes' - begin - if nsx_client.get(url) - @nsx_status = "NSX_STATUS = \"OK\"\n" - return true + def nsx_get + nsx_info = '' + nsx_obj = {} + # In the future add more than one nsx manager + extension_list = + @vi_client + .vim + .serviceContent + .extensionManager + .extensionList + extension_list.each do |ext_list| + case ext_list.key + when NSXDriver::NSXConstants::NSXV_EXTENSION_LIST + nsx_obj['type'] = NSXDriver::NSXConstants::NSXV + url_full = ext_list.client[0].url + url_split = url_full.split('/') + # protocol = "https://" + protocol = url_split[0] + '//' + # ip_port = ip:port + ip_port = url_split[2] + nsx_obj['url'] = protocol + ip_port + nsx_obj['version'] = ext_list.version + nsx_obj['label'] = ext_list.description.label + when NSXDriver::NSXConstants::NSXT_EXTENSION_LIST + nsx_obj['type'] = NSXDriver::NSXConstants::NSXT + nsx_obj['url'] = ext_list.server[0].url + nsx_obj['version'] = ext_list.version + nsx_obj['label'] = ext_list.description.label else - @nsx_status = "NSX_STATUS = \"Response code incorrect\"\n" - return false + next end - rescue StandardError => e - @nsx_status = 'NSX_STATUS = "Error connecting to ' \ - "NSX_MANAGER\"\n" + end + unless nsx_obj.empty? + nsx_info << "NSX_MANAGER=\"#{nsx_obj['url']}\"\n" + nsx_info << "NSX_TYPE=\"#{nsx_obj['type']}\"\n" + nsx_info << "NSX_VERSION=\"#{nsx_obj['version']}\"\n" + nsx_info << "NSX_LABEL=\"#{nsx_obj['label']}\"\n" + end + nsx_info + end + + def nsx_ready? + @one_item = + VCenterDriver::VIHelper + .one_item( + OpenNebula::Host, + @vi_client + .instance_variable_get( + :@host_id + ).to_i + ) + + # Check if NSX_MANAGER is into the host template + if [nil, ''].include?(@one_item['TEMPLATE/NSX_MANAGER']) + @nsx_status = "NSX_STATUS = \"Missing NSX_MANAGER\"\n" return false end - end - if @one_item['TEMPLATE/NSX_TYPE'] == NSXDriver::NSXConstants::NSXT + # Check if NSX_USER is into the host template + if [nil, ''].include?(@one_item['TEMPLATE/NSX_USER']) + @nsx_status = "NSX_STATUS = \"Missing NSX_USER\"\n" + return false + end + + # Check if NSX_PASSWORD is into the host template + if [nil, ''].include?(@one_item['TEMPLATE/NSX_PASSWORD']) + @nsx_status = "NSX_STATUS = \"Missing NSX_PASSWORD\"\n" + return false + end + + # Check if NSX_TYPE is into the host template + if [nil, ''].include?(@one_item['TEMPLATE/NSX_TYPE']) + @nsx_status = "NSX_STATUS = \"Missing NSX_TYPE\"\n" + return false + end + + # Try a connection as part of NSX_STATUS + nsx_client = NSXDriver::NSXClient + .new_from_id( + @vi_client + .instance_variable_get( + :@host_id + ).to_i + ) + + if @one_item['TEMPLATE/NSX_TYPE'] == NSXDriver::NSXConstants::NSXV + # URL to test a connection + url = '/api/2.0/vdn/scopes' + begin + if nsx_client.get(url) + @nsx_status = "NSX_STATUS = \"OK\"\n" + return true + else + @nsx_status = + "NSX_STATUS = \"Response code incorrect\"\n" + return false + end + rescue StandardError + @nsx_status = 'NSX_STATUS = "Error connecting to ' \ + "NSX_MANAGER\"\n" + return false + end + end + + nxs_type = @one_item['TEMPLATE/NSX_TYPE'] + unless nxs_type == NSXDriver::NSXConstants::NSXT; return; end + # URL to test a connection url = '/api/v1/transport-zones' begin if nsx_client.get(url) @nsx_status = "NSX_STATUS = \"OK\"\n" - return true + true else - @nsx_status = "NSX_STATUS = \"Response code incorrect\"\n" - return false + @nsx_status = + "NSX_STATUS = \"Response code incorrect\"\n" + false end - rescue StandardError => e + rescue StandardError @nsx_status = 'NSX_STATUS = "Error connecting to '\ - "NSX_MANAGER\"\n" - return false + "NSX_MANAGER\"\n" + false end end - end - def get_tz - @nsx_status = '' - if !nsx_ready? - tz_info = @nsx_status - else - tz_info = "NSX_STATUS = OK\n" - tz_info << 'NSX_TRANSPORT_ZONES = [' - - nsx_client = NSXDriver::NSXClient - .new_from_id(@vi_client.instance_variable_get(:@host_id).to_i) - tz_object = NSXDriver::TransportZone.new_child(nsx_client) - - # NSX request to get Transport Zones - if @one_item['TEMPLATE/NSX_TYPE'] == NSXDriver::NSXConstants::NSXV - tzs = tz_object.tzs - tzs.each do |tz| - tz_info << tz.xpath('name').text << '="' - tz_info << tz.xpath('objectId').text << '",' - end - tz_info.chomp!(',') - elsif @one_item['TEMPLATE/NSX_TYPE'] == NSXDriver::NSXConstants::NSXT - r = tz_object.tzs - r['results'].each do |tz| - tz_info << tz['display_name'] << '="' - tz_info << tz['id'] << '",' - end - tz_info.chomp!(',') + def tz_get + @nsx_status = '' + if !nsx_ready? + tz_info = @nsx_status else - raise "Unknown Port Group type #{@one_item['TEMPLATE/NSX_TYPE']}" - end - tz_info << ']' - return tz_info - end - tz_info - end - - def monitor - total_cpu, - num_cpu_cores, - effective_cpu, - total_memory, - effective_mem, - num_hosts, - num_eff_hosts, - overall_status, - drs_enabled, - ha_enabled= @item.collect("summary.totalCpu", - "summary.numCpuCores", - "summary.effectiveCpu", - "summary.totalMemory", - "summary.effectiveMemory", - "summary.numHosts", - "summary.numEffectiveHosts", - "summary.overallStatus", - "configuration.drsConfig.enabled", - "configuration.dasConfig.enabled" - ) - - mhz_core = total_cpu.to_f / num_cpu_cores.to_f - eff_core = effective_cpu.to_f / mhz_core - - free_cpu = sprintf('%.2f', eff_core * 100).to_f - total_cpu = num_cpu_cores.to_f * 100 - used_cpu = sprintf('%.2f', total_cpu - free_cpu).to_f - - total_mem = total_memory.to_i / 1024 - free_mem = effective_mem.to_i * 1024 - - str_info = "" - - # Get cluster name for informative purposes (replace space with _ if any) - str_info << "VCENTER_NAME=" << self['name'].tr(" ", "_") << "\n" - - # System - str_info << "HYPERVISOR=vcenter\n" - str_info << "TOTALHOST=" << num_hosts.to_s << "\n" - str_info << "AVAILHOST=" << num_eff_hosts.to_s << "\n" - str_info << "STATUS=" << overall_status << "\n" - - # CPU - str_info << "CPUSPEED=" << mhz_core.to_s << "\n" - str_info << "TOTALCPU=" << total_cpu.to_s << "\n" - str_info << "USEDCPU=" << used_cpu.to_s << "\n" - str_info << "FREECPU=" << free_cpu.to_s << "\n" - - # Memory - str_info << "TOTALMEMORY=" << total_mem.to_s << "\n" - str_info << "FREEMEMORY=" << free_mem.to_s << "\n" - str_info << "USEDMEMORY=" << (total_mem - free_mem).to_s << "\n" - - # DRS enabled - str_info << "VCENTER_DRS=" << drs_enabled.to_s << "\n" - - # HA enabled - str_info << "VCENTER_HA=" << ha_enabled.to_s << "\n" - - # NSX info - str_info << get_nsx - str_info << get_tz - - str_info << monitor_resource_pools(mhz_core) - - - end - - def monitor_resource_pools(mhz_core) - - @rp_list = get_resource_pool_list - - view = @vi_client.vim.serviceContent.viewManager.CreateContainerView({ - container: @item, #View for RPs inside this cluster - type: ['ResourcePool'], - recursive: true - }) - - pc = @vi_client.vim.serviceContent.propertyCollector - - monitored_properties = [ - "config.cpuAllocation.expandableReservation", - "config.cpuAllocation.limit", - "config.cpuAllocation.reservation", - "config.cpuAllocation.shares.level", - "config.cpuAllocation.shares.shares", - "config.memoryAllocation.expandableReservation", - "config.memoryAllocation.limit", - "config.memoryAllocation.reservation", - "config.memoryAllocation.shares.level", - "config.memoryAllocation.shares.shares" - ] - - filterSpec = RbVmomi::VIM.PropertyFilterSpec( - :objectSet => [ - :obj => view, - :skip => true, - :selectSet => [ - RbVmomi::VIM.TraversalSpec( - :name => 'traverseEntities', - :type => 'ContainerView', - :path => 'view', - :skip => false - ) - ] - ], - :propSet => [ - { :type => 'ResourcePool', :pathSet => monitored_properties } - ] - ) - - result = pc.RetrieveProperties(:specSet => [filterSpec]) - - rps = {} - result.each do |r| - hashed_properties = r.to_hash - if r.obj.is_a?(RbVmomi::VIM::ResourcePool) - rps[r.obj._ref] = hashed_properties + tz_info = "NSX_STATUS = OK\n" + tz_info << 'NSX_TRANSPORT_ZONES = [' + + nsx_client = + NSXDriver::NSXClient + .new_from_id( + @vi_client + .instance_variable_get( + :@host_id + ).to_i + ) + tz_object = NSXDriver::TransportZone.new_child(nsx_client) + + # NSX request to get Transport Zones + case @one_item['TEMPLATE/NSX_TYPE'] + when NSXDriver::NSXConstants::NSXV + tzs = tz_object.tzs + tzs.each do |tz| + tz_info << tz.xpath('name').text << '="' + tz_info << tz.xpath('objectId').text << '",' + end + tz_info.chomp!(',') + when NSXDriver::NSXConstants::NSXT + r = tz_object.tzs + r['results'].each do |tz| + tz_info << tz['display_name'] << '="' + tz_info << tz['id'] << '",' + end + tz_info.chomp!(',') + else + raise "Unknown Port Group type \ + #{@one_item['TEMPLATE/NSX_TYPE']}" + end + tz_info << ']' + return tz_info end + tz_info end - return "" if rps.empty? + def monitor + total_cpu, + num_cpu_cores, + effective_cpu, + total_memory, + effective_mem, + num_hosts, + num_eff_hosts, + overall_status, + drs_enabled, + ha_enabled= @item.collect('summary.totalCpu', + 'summary.numCpuCores', + 'summary.effectiveCpu', + 'summary.totalMemory', + 'summary.effectiveMemory', + 'summary.numHosts', + 'summary.numEffectiveHosts', + 'summary.overallStatus', + 'configuration.drsConfig.enabled', + 'configuration.dasConfig.enabled') - rp_info = "" + mhz_core = total_cpu.to_f / num_cpu_cores.to_f + eff_core = effective_cpu.to_f / mhz_core - rps.each{|ref, info| + free_cpu = format('%.2f', eff_core * 100).to_f # rubocop:disable Style/FormatStringToken + total_cpu = num_cpu_cores.to_f * 100 + used_cpu = format('%.2f', total_cpu - free_cpu).to_f # rubocop:disable Style/FormatStringToken + + total_mem = total_memory.to_i / 1024 + free_mem = effective_mem.to_i * 1024 + + str_info = '' + + # Get cluster name for informative purposes + # (replace space with _ if any) + str_info << 'VCENTER_NAME=' << self['name'].tr(' ', '_') << "\n" + + # System + str_info << "HYPERVISOR=vcenter\n" + str_info << 'TOTALHOST=' << num_hosts.to_s << "\n" + str_info << 'AVAILHOST=' << num_eff_hosts.to_s << "\n" + str_info << 'STATUS=' << overall_status << "\n" # CPU - cpu_expandable = info["config.cpuAllocation.expandableReservation"] ? "YES" : "NO" - cpu_limit = info["config.cpuAllocation.limit"] == "-1" ? "UNLIMITED" : info["config.cpuAllocation.limit"] - cpu_reservation = info["config.cpuAllocation.reservation"] - cpu_num = cpu_reservation.to_f / mhz_core - cpu_shares_level = info["config.cpuAllocation.shares.level"] - cpu_shares = info["config.cpuAllocation.shares.shares"] + str_info << 'CPUSPEED=' << mhz_core.to_s << "\n" + str_info << 'TOTALCPU=' << total_cpu.to_s << "\n" + str_info << 'USEDCPU=' << used_cpu.to_s << "\n" + str_info << 'FREECPU=' << free_cpu.to_s << "\n" - # MEMORY - mem_expandable = info["config.memoryAllocation.expandableReservation"] ? "YES" : "NO" - mem_limit = info["config.memoryAllocation.limit"] == "-1" ? "UNLIMITED" : info["config.memoryAllocation.limit"] - mem_reservation = info["config.memoryAllocation.reservation"].to_f - mem_shares_level = info["config.memoryAllocation.shares.level"] - mem_shares = info["config.memoryAllocation.shares.shares"] + # Memory + str_info << 'TOTALMEMORY=' << total_mem.to_s << "\n" + str_info << 'FREEMEMORY=' << free_mem.to_s << "\n" + str_info << 'USEDMEMORY=' << (total_mem - free_mem).to_s << "\n" - rp_name = @rp_list.select { |item| item[:ref] == ref}.first[:name] rescue "" + # DRS enabled + str_info << 'VCENTER_DRS=' << drs_enabled.to_s << "\n" - rp_name = "Resources" if rp_name.empty? + # HA enabled + str_info << 'VCENTER_HA=' << ha_enabled.to_s << "\n" - rp_info << "\nVCENTER_RESOURCE_POOL_INFO = [" - rp_info << "NAME=\"#{rp_name}\"," - rp_info << "CPU_EXPANDABLE=#{cpu_expandable}," - rp_info << "CPU_LIMIT=#{cpu_limit}," - rp_info << "CPU_RESERVATION=#{cpu_reservation}," - rp_info << "CPU_RESERVATION_NUM_CORES=#{cpu_num}," - rp_info << "CPU_SHARES=#{cpu_shares}," - rp_info << "CPU_SHARES_LEVEL=#{cpu_shares_level}," - rp_info << "MEM_EXPANDABLE=#{mem_expandable}," - rp_info << "MEM_LIMIT=#{mem_limit}," - rp_info << "MEM_RESERVATION=#{mem_reservation}," - rp_info << "MEM_SHARES=#{mem_shares}," - rp_info << "MEM_SHARES_LEVEL=#{mem_shares_level}" - rp_info << "]" - } + # NSX info + str_info << nsx_get + str_info << tz_get - view.DestroyView - - return rp_info - end - - def hostname_to_moref(hostname) - result = filter_hosts - - moref = "" - result.each do |r| - if r.obj.name == hostname - moref = r.obj._ref - break - end + str_info << monitor_resource_pools(mhz_core) end - raise "Host #{hostname} was not found" if moref.empty? - return moref - end - def filter_hosts - view = @vi_client.vim.serviceContent.viewManager.CreateContainerView({ - container: @item, #View for Hosts inside this cluster - type: ['HostSystem'], - recursive: true - }) + def monitor_resource_pools(mhz_core) + @rp_list = get_resource_pool_list - pc = @vi_client.vim.serviceContent.propertyCollector - - monitored_properties = [ - "name", - "runtime.connectionState", - "summary.hardware.numCpuCores", - "summary.hardware.memorySize", - "summary.hardware.cpuModel", - "summary.hardware.cpuMhz", - "summary.quickStats.overallCpuUsage", - "summary.quickStats.overallMemoryUsage" - ] - - filterSpec = RbVmomi::VIM.PropertyFilterSpec( - :objectSet => [ - :obj => view, - :skip => true, - :selectSet => [ - RbVmomi::VIM.TraversalSpec( - :name => 'traverseEntities', - :type => 'ContainerView', - :path => 'view', - :skip => false + view = + @vi_client + .vim + .serviceContent + .viewManager + .CreateContainerView( + { + :container => @item, # View for RPs inside this cluster + :type => ['ResourcePool'], + :recursive => true + } ) - ] - ], - :propSet => [ - { :type => 'HostSystem', :pathSet => monitored_properties } + + pc = @vi_client.vim.serviceContent.propertyCollector + + monitored_properties = [ + 'config.cpuAllocation.expandableReservation', + 'config.cpuAllocation.limit', + 'config.cpuAllocation.reservation', + 'config.cpuAllocation.shares.level', + 'config.cpuAllocation.shares.shares', + 'config.memoryAllocation.expandableReservation', + 'config.memoryAllocation.limit', + 'config.memoryAllocation.reservation', + 'config.memoryAllocation.shares.level', + 'config.memoryAllocation.shares.shares' ] - ) - result = pc.RetrieveProperties(:specSet => [filterSpec]) - view.DestroyView # Destroy the view - return result - end + filter_spec = RbVmomi::VIM.PropertyFilterSpec( + :objectSet => [ + { :obj => view, + :skip => true, + :selectSet => [ + RbVmomi::VIM.TraversalSpec( + :name => 'traverseEntities', + :type => 'ContainerView', + :path => 'view', + :skip => false + ) + ] } + ], + :propSet => [ + { + :type => 'ResourcePool', + :pathSet => monitored_properties + } + ] + ) - def monitor_host_systems - host_info = "" - result = filter_hosts - hosts = {} - result.each do |r| - hashed_properties = r.to_hash - if r.obj.is_a?(RbVmomi::VIM::HostSystem) - hosts[r.obj._ref] = hashed_properties + result = pc.RetrieveProperties(:specSet => [filter_spec]) + + rps = {} + result.each do |r| + hashed_properties = r.to_hash + if r.obj.is_a?(RbVmomi::VIM::ResourcePool) + rps[r.obj._ref] = hashed_properties + end end + + return '' if rps.empty? + + rp_info = '' + + rps.each do |ref, info| + # CPU + if info['config.cpuAllocation.expandableReservation'] + cpu_expandable = 'YES' + else + cpu_expandable = 'NO' + end + if info['config.cpuAllocation.limit'] == '-1' + cpu_limit = 'UNLIMITED' + else + cpu_limit = info['config.cpuAllocation.limit'] + end + cpu_reservation = info['config.cpuAllocation.reservation'] + cpu_num = cpu_reservation.to_f / mhz_core + cpu_shares_level = info['config.cpuAllocation.shares.level'] + cpu_shares = info['config.cpuAllocation.shares.shares'] + + # MEMORY + if info['config.memoryAllocation.expandableReservation'] + mem_expandable = 'YES' + else + mem_expandable = 'NO' + end + if info['config.memoryAllocation.limit'] == '-1' + mem_limit = 'UNLIMITED' + else + mem_limit = info['config.memoryAllocation.limit'] + end + mem_reservation = + info['config.memoryAllocation.reservation'].to_f + mem_shares_level = + info['config.memoryAllocation.shares.level'] + mem_shares = + info['config.memoryAllocation.shares.shares'] + + rp_name = + @rp_list + .select do |item| + item[:ref] == ref + end.first[:name] rescue '' + + rp_name = 'Resources' if rp_name.empty? + + rp_info << "\nVCENTER_RESOURCE_POOL_INFO = [" + rp_info << "NAME=\"#{rp_name}\"," + rp_info << "CPU_EXPANDABLE=#{cpu_expandable}," + rp_info << "CPU_LIMIT=#{cpu_limit}," + rp_info << "CPU_RESERVATION=#{cpu_reservation}," + rp_info << "CPU_RESERVATION_NUM_CORES=#{cpu_num}," + rp_info << "CPU_SHARES=#{cpu_shares}," + rp_info << "CPU_SHARES_LEVEL=#{cpu_shares_level}," + rp_info << "MEM_EXPANDABLE=#{mem_expandable}," + rp_info << "MEM_LIMIT=#{mem_limit}," + rp_info << "MEM_RESERVATION=#{mem_reservation}," + rp_info << "MEM_SHARES=#{mem_shares}," + rp_info << "MEM_SHARES_LEVEL=#{mem_shares_level}" + rp_info << ']' + end + + view.DestroyView + + rp_info end - hosts.each do |ref, info| - next if info["runtime.connectionState"] != "connected" + def hostname_to_moref(hostname) + result = filter_hosts - total_cpu = info["summary.hardware.numCpuCores"] * 100 - used_cpu = (info["summary.quickStats.overallCpuUsage"].to_f / info["summary.hardware.cpuMhz"].to_f) * 100 - used_cpu = sprintf('%.2f', used_cpu).to_f # Trim precission - free_cpu = total_cpu - used_cpu + moref = '' + result.each do |r| + if r.obj.name == hostname + moref = r.obj._ref + break + end + end + raise "Host #{hostname} was not found" if moref.empty? - total_memory = info["summary.hardware.memorySize"]/1024 - used_memory = info["summary.quickStats.overallMemoryUsage"]*1024 - free_memory = total_memory - used_memory - - host_info << "\nHOST=[" - host_info << "STATE=on," - host_info << "HOSTNAME=\"" << info["name"].to_s << "\"," - host_info << "MODELNAME=\"" << info["summary.hardware.cpuModel"].to_s << "\"," - host_info << "CPUSPEED=" << info["summary.hardware.cpuMhz"].to_s << "," - host_info << "MAX_CPU=" << total_cpu.to_s << "," - host_info << "USED_CPU=" << used_cpu.to_s << "," - host_info << "FREE_CPU=" << free_cpu.to_s << "," - host_info << "MAX_MEM=" << total_memory.to_s << "," - host_info << "USED_MEM=" << used_memory.to_s << "," - host_info << "FREE_MEM=" << free_memory.to_s - host_info << "]" + moref end - return host_info - end - - def monitor_vms(host_id, vm_type) - vc_uuid = @vi_client.vim.serviceContent.about.instanceUuid - cluster_name = self["name"] - cluster_ref = self["_ref"] - - # Get info of the host where the VM/template is located - one_host = VCenterDriver::VIHelper.one_item(OpenNebula::Host, host_id, false) - - esx_hosts = {} - @item.host.each do |esx_host| - esx_hosts[esx_host._ref] = { - :name => esx_host.name, - :cpu => esx_host.summary.hardware.cpuMhz.to_f - } - end - - monitored_vms = Set.new - str_info = "" - - view = @vi_client.vim.serviceContent.viewManager.CreateContainerView({ - container: @item, #View for VMs inside this cluster - type: ['VirtualMachine'], - recursive: true - }) - - pc = @vi_client.vim.serviceContent.propertyCollector - - monitored_properties = [ - "name", #VM name - "config.template", #To filter out templates - "summary.runtime.powerState", #VM power state - "summary.quickStats.hostMemoryUsage", #Memory usage - "summary.quickStats.overallCpuUsage", #CPU used by VM - "runtime.host", #ESX host - "resourcePool", #RP - "guest.guestFullName", - "guest.net", #IP addresses as seen by guest tools, - "guest.guestState", - "guest.toolsVersion", - "guest.toolsRunningStatus", - "guest.toolsVersionStatus2", #IP addresses as seen by guest tools, - "config.extraConfig", #VM extraconfig info e.g opennebula.vm.running - "config.hardware.numCPU", - "config.hardware.memoryMB", - "config.annotation", - "datastore" - ] - - filterSpec = RbVmomi::VIM.PropertyFilterSpec( - :objectSet => [ - :obj => view, - :skip => true, - :selectSet => [ - RbVmomi::VIM.TraversalSpec( - :name => 'traverseEntities', - :type => 'ContainerView', - :path => 'view', - :skip => false + def filter_hosts + view = + @vi_client + .vim + .serviceContent + .viewManager + .CreateContainerView( + { + # View for Hosts inside this cluster + :container => @item, + :type => ['HostSystem'], + :recursive => true + } ) - ] - ], - :propSet => [ - { :type => 'VirtualMachine', :pathSet => monitored_properties } + + pc = @vi_client.vim.serviceContent.propertyCollector + + monitored_properties = [ + 'name', + 'runtime.connectionState', + 'summary.hardware.numCpuCores', + 'summary.hardware.memorySize', + 'summary.hardware.cpuModel', + 'summary.hardware.cpuMhz', + 'summary.quickStats.overallCpuUsage', + 'summary.quickStats.overallMemoryUsage' ] - ) - result = pc.RetrieveProperties(:specSet => [filterSpec]) + filter_spec = RbVmomi::VIM.PropertyFilterSpec( + :objectSet => [ + { :obj => view, + :skip => true, + :selectSet => [ + RbVmomi::VIM.TraversalSpec( + :name => 'traverseEntities', + :type => 'ContainerView', + :path => 'view', + :skip => false + ) + ] } + ], + :propSet => [ + { :type => 'HostSystem', :pathSet => monitored_properties } + ] + ) - vms = {} - vm_objects = [] - result.each do |r| - hashed_properties = r.to_hash - if r.obj.is_a?(RbVmomi::VIM::VirtualMachine) - #Only take care of VMs, not templates - if !hashed_properties["config.template"] + result = pc.RetrieveProperties(:specSet => [filter_spec]) + view.DestroyView # Destroy the view + result + end + + def monitor_host_systems + host_info = '' + result = filter_hosts + hosts = {} + result.each do |r| + hashed_properties = r.to_hash + if r.obj.is_a?(RbVmomi::VIM::HostSystem) + hosts[r.obj._ref] = hashed_properties + end + end + + hosts.each do |_ref, info| + next if info['runtime.connectionState'] != 'connected' + + total_cpu = info['summary.hardware.numCpuCores'] * 100 + used_cpu = + ( + info['summary.quickStats.overallCpuUsage'] + .to_f / info['summary.hardware.cpuMhz'] + .to_f + ) * 100 + # Trim precission + used_cpu = format('%.2f', used_cpu).to_f # rubocop:disable Style/FormatStringToken + free_cpu = total_cpu - used_cpu + + total_memory = + info['summary.hardware.memorySize']/1024 + used_memory = + info['summary.quickStats.overallMemoryUsage']*1024 + free_memory = total_memory - used_memory + + host_info << "\nHOST=[" + host_info << 'STATE=on,' + host_info << 'HOSTNAME="' << + info['name'].to_s << '",' + host_info << + 'MODELNAME="' << + info['summary.hardware.cpuModel'].to_s << '",' + host_info << 'CPUSPEED=' << + info['summary.hardware.cpuMhz'].to_s << ',' + host_info << 'MAX_CPU=' << total_cpu.to_s << ',' + host_info << 'USED_CPU=' << used_cpu.to_s << ',' + host_info << 'FREE_CPU=' << free_cpu.to_s << ',' + host_info << 'MAX_MEM=' << total_memory.to_s << ',' + host_info << 'USED_MEM=' << used_memory.to_s << ',' + host_info << 'FREE_MEM=' << free_memory.to_s + host_info << ']' + end + + host_info + end + + def monitor_vms(host_id, vm_type) + vc_uuid = @vi_client.vim.serviceContent.about.instanceUuid + cluster_name = self['name'] + cluster_ref = self['_ref'] + + # Get info of the host where the VM/template is located + one_host = + VCenterDriver::VIHelper + .one_item( + OpenNebula::Host, + host_id + ) + if !one_host + STDERR.puts "Failed to retieve host with id #{host.id}" + if VCenterDriver::CONFIG[:debug_information] + STDERR.puts "#{message} #{e.backtrace}" + end + end + + esx_hosts = {} + @item.host.each do |esx_host| + esx_hosts[esx_host._ref] = { + :name => esx_host.name, + :cpu => esx_host.summary.hardware.cpuMhz.to_f + } + end + + monitored_vms = Set.new + str_info = '' + + view = + @vi_client + .vim + .serviceContent + .viewManager + .CreateContainerView( + { + :container => @item, # View for VMs inside this cluster + :type => ['VirtualMachine'], + :recursive => true + } + ) + + pc = @vi_client.vim.serviceContent.propertyCollector + + monitored_properties = [ + 'name', # VM name + 'config.template', # To filter out templates + 'summary.runtime.powerState', # VM power state + 'summary.quickStats.hostMemoryUsage', # Memory usage + 'summary.quickStats.overallCpuUsage', # CPU used by VM + 'runtime.host', # ESX host + 'resourcePool', # RP + 'guest.guestFullName', + # IP addresses as seen by guest tools, + 'guest.net', + 'guest.guestState', + 'guest.toolsVersion', + 'guest.toolsRunningStatus', + # IP addresses as seen by guest tools, + 'guest.toolsVersionStatus2', + # VM extraconfig info e.g opennebula.vm.running + 'config.extraConfig', + 'config.hardware.numCPU', + 'config.hardware.memoryMB', + 'config.annotation', + 'datastore' + ] + + filter_spec = RbVmomi::VIM.PropertyFilterSpec( + :objectSet => [ + { :obj => view, + :skip => true, + :selectSet => [ + RbVmomi::VIM.TraversalSpec( + :name => 'traverseEntities', + :type => 'ContainerView', + :path => 'view', + :skip => false + ) + ] } + ], + :propSet => [ + { + :type => 'VirtualMachine', + :pathSet => monitored_properties + } + ] + ) + + result = pc.RetrieveProperties(:specSet => [filter_spec]) + + vms = {} + vm_objects = [] + result.each do |r| + hashed_properties = r.to_hash + next unless r.obj.is_a?(RbVmomi::VIM::VirtualMachine) + + # Only take care of VMs, not templates + if !hashed_properties['config.template'] vms[r.obj._ref] = hashed_properties vm_objects << r.obj end end - end - pm = @vi_client.vim.serviceContent.perfManager + pm = @vi_client.vim.serviceContent.perfManager - stats = {} + stats = {} - max_samples = 9 - refresh_rate = 20 #Real time stats takes samples every 20 seconds + max_samples = 9 + refresh_rate = 20 # Real time stats takes samples every 20 seconds - last_mon_time = one_host["TEMPLATE/VCENTER_LAST_PERF_POLL"] + last_mon_time = one_host['TEMPLATE/VCENTER_LAST_PERF_POLL'] - if last_mon_time - interval = (Time.now.to_i - last_mon_time.to_i) - interval = 3601 if interval < 0 - samples = (interval / refresh_rate) - samples = 1 if samples == 0 - max_samples = interval > 3600 ? 9 : samples - end + if last_mon_time + interval = (Time.now.to_i - last_mon_time.to_i) + interval = 3601 if interval < 0 + samples = (interval / refresh_rate) + samples = 1 if samples == 0 + interval > 3600 ? max_samples = 9 : max_samples = samples + end - if !vm_objects.empty? - stats = pm.retrieve_stats( + if !vm_objects.empty? + stats = pm.retrieve_stats( vm_objects, - ['net.transmitted','net.bytesRx','net.bytesTx','net.received', - 'virtualDisk.numberReadAveraged','virtualDisk.numberWriteAveraged', - 'virtualDisk.read','virtualDisk.write'], - {max_samples: max_samples} - ) rescue {} - end - - if !stats.empty? - last_mon_time = Time.now.to_i.to_s - end - - @rp_list = get_resource_pool_list if !@rp_list - - vm_pool = VCenterDriver::VIHelper.one_pool(OpenNebula::VirtualMachinePool, false) - # We filter to retrieve only those VMs running in the host that we are monitoring - host_vms = vm_pool.retrieve_xmlelements("/VM_POOL/VM[HISTORY_RECORDS/HISTORY/HID='#{host_id}']") - - vms.each do |vm_ref,info| - vm_info = '' - begin - esx_host = esx_hosts[info["runtime.host"]._ref] - info[:esx_host_name] = esx_host[:name] - info[:esx_host_cpu] = esx_host[:cpu] - info[:cluster_name] = cluster_name - info[:cluster_ref] = cluster_ref - info[:vc_uuid] = vc_uuid - info[:host_id] = host_id - info[:rp_list] = @rp_list - - # Check the running flag - running_flag = info["config.extraConfig"].select do |val| - val[:key] == "opennebula.vm.running" - end - - if !running_flag.empty? && running_flag.first - running_flag = running_flag[0][:value] - end - - next if running_flag == "no" - - id = -1 - # Find the VM by its deploy_id, which in the vCenter driver is - # the vCenter managed object reference - found_vm = host_vms.select{|vm| vm["DEPLOY_ID"].eql? vm_ref }.first - id = found_vm["ID"] if found_vm - - # skip if it is a wild and we are looking for OpenNebula VMs - next if vm_type == 'ones' and id == -1 - # skip if it is not a wild and we are looking for wilds - next if vm_type == 'wilds' and id != -1 - # skip if already monitored - next if monitored_vms.include? vm_ref - - monitored_vms << vm_ref - - vm = VCenterDriver::VirtualMachine.new(@vi_client, vm_ref, id) - vm.vm_info = info - vm.monitor(stats) - - vm_name = "#{info["name"]} - #{cluster_name}" - vm_info << "VM = [ ID=\"#{id}\", " - vm_info << "VM_NAME=\"#{vm_name}\", " - vm_info << "DEPLOY_ID=\"#{vm_ref}\", " - - # if the machine does not exist in opennebula it means that is a wild: - unless vm.one_exist? - vm_template_64 = Base64.encode64(vm.vm_to_one(vm_name)).gsub("\n","") - vm_info << 'VCENTER_TEMPLATE="YES",' - vm_info << "IMPORT_TEMPLATE=\"#{vm_template_64}\"]\n" - else - mon_s64 = Base64.strict_encode64(vm.info) - vm_info << "MONITOR=\"#{mon_s64}\"]\n" - end - - rescue StandardError => e - vm_info = error_monitoring(e, id, vm_ref, vc_uuid, info) + [ + 'net.transmitted', + 'net.bytesRx', + 'net.bytesTx', + 'net.received', + 'virtualDisk.numberReadAveraged', + 'virtualDisk.numberWriteAveraged', + 'virtualDisk.read', + 'virtualDisk.write' + ], + { + :max_samples => max_samples + } + ) rescue {} end - str_info << vm_info - end - - view.DestroyView # Destroy the view - - return str_info, last_mon_time - end - - def error_monitoring(e, id, vm_ref, vc_uuid, info = {}) - error_info = '' - vm_name = info['name'] || nil - tmp_str = e.inspect - tmp_str << e.backtrace.join("\n") - - error_info << "VM = [ ID=\"#{id}\", " - error_info << "VM_NAME=\"#{vm_name}\", " - error_info << "DEPLOY_ID=\"#{vm_ref}\", " - error_info << "ERROR=\"#{Base64.encode64(tmp_str).gsub("\n", '')}\"]\n" - end - - def monitor_customizations - customizations = self['_connection'].serviceContent.customizationSpecManager.info - - text = '' - - customizations.each do |c| - t = "CUSTOMIZATION = [ " - t << %Q - t << %Q - - text << t - end - - text - end - - def get_dc - item = @item - - while !item.instance_of? RbVmomi::VIM::Datacenter - item = item.parent - if item.nil? - raise "Could not find the parent Datacenter" + if !stats.empty? + last_mon_time = Time.now.to_i.to_s end + + @rp_list ||= get_resource_pool_list + + vm_pool = + VCenterDriver::VIHelper + .one_pool( + OpenNebula::VirtualMachinePool + ) + # We filter to retrieve only those VMs + # running in the host that we are monitoring + host_vms = + vm_pool + .retrieve_xmlelements( + "/VM_POOL/VM[HISTORY_RECORDS/HISTORY/HID='#{host_id}']" + ) + + vms.each do |vm_ref, info| + vm_info = '' + begin + esx_host = esx_hosts[info['runtime.host']._ref] + info[:esx_host_name] = esx_host[:name] + info[:esx_host_cpu] = esx_host[:cpu] + info[:cluster_name] = cluster_name + info[:cluster_ref] = cluster_ref + info[:vc_uuid] = vc_uuid + info[:host_id] = host_id + info[:rp_list] = @rp_list + + # Check the running flag + running_flag = info['config.extraConfig'].select do |val| + val[:key] == 'opennebula.vm.running' + end + + if !running_flag.empty? && running_flag.first + running_flag = running_flag[0][:value] + end + + next if running_flag == 'no' + + id = -1 + # Find the VM by its deploy_id, + # which in the vCenter driver is + # the vCenter managed object reference + found_vm = + host_vms + .select do |vm| + vm['DEPLOY_ID'].eql? vm_ref + end.first + id = found_vm['ID'] if found_vm + + # skip if it is a wild and + # we are looking for OpenNebula VMs + next if (vm_type == 'ones') && (id == -1) + # skip if it is not a wild and we are looking for wilds + next if (vm_type == 'wilds') && (id != -1) + # skip if already monitored + next if monitored_vms.include? vm_ref + + monitored_vms << vm_ref + + vm = + VCenterDriver::VirtualMachine + .new( + @vi_client, + vm_ref, + id + ) + vm.vm_info = info + vm.monitor(stats) + + vm_name = "#{info['name']} - #{cluster_name}" + vm_info << "VM = [ ID=\"#{id}\", " + vm_info << "VM_NAME=\"#{vm_name}\", " + vm_info << "DEPLOY_ID=\"#{vm_ref}\", " + + # if the machine does not exist in + # opennebula it means that is a wild: + if vm.one_exist? + mon_s64 = Base64.strict_encode64(vm.info) + vm_info << "MONITOR=\"#{mon_s64}\"]\n" + else + vm_template64 = + Base64 + .encode64( + vm.vm_to_one(vm_name) + ).gsub("\n", '') + vm_info << 'VCENTER_TEMPLATE="YES",' + vm_info << "IMPORT_TEMPLATE=\"#{vm_template64}\"]\n" + end + rescue StandardError => e + vm_info = error_monitoring(e, id, vm_ref, vc_uuid, info) + end + + str_info << vm_info + end + + view.DestroyView # Destroy the view + + [str_info, last_mon_time] end - Datacenter.new(item) - end + def error_monitoring(e, id, vm_ref, _vc_uuid, info = {}) + error_info = '' + vm_name = info['name'] || nil + tmp_str = e.inspect + tmp_str << e.backtrace.join("\n") - def self.to_one(cluster, con_ops, rp, one_cluster_id) - - one_host = VCenterDriver::VIHelper.new_one_item(OpenNebula::Host) - - if OpenNebula.is_error?(one_host) - raise "Could not create host: #{one_host.message}" + error_info << "VM = [ ID=\"#{id}\", " + error_info << "VM_NAME=\"#{vm_name}\", " + error_info << "DEPLOY_ID=\"#{vm_ref}\", " + error_info << + "ERROR=\"#{Base64.encode64(tmp_str).gsub("\n", '')}\"]\n" end - one_cluster_id = -1 if !one_cluster_id + def monitor_customizations + customizations = + self['_connection'] + .serviceContent + .customizationSpecManager + .info - rc = one_host.allocate(cluster[:cluster_name], 'vcenter', 'vcenter', one_cluster_id.to_i) + text = '' - if OpenNebula.is_error?(rc) - raise "Could not allocate host: #{rc.message}" + customizations.each do |c| + t = 'CUSTOMIZATION = [ ' + t << %(NAME = "#{c.name}", ) + t << %(TYPE = "#{c.type}" ]\n) + + text << t + end + + text end - template = "VCENTER_HOST=\"#{con_ops[:host]}\"\n"\ - "VCENTER_PASSWORD=\"#{con_ops[:password]}\"\n"\ - "VCENTER_USER=\"#{con_ops[:user]}\"\n"\ - "VCENTER_CCR_REF=\"#{cluster[:cluster_ref]}\"\n"\ - "VCENTER_INSTANCE_ID=\"#{cluster[:vcenter_uuid]}\"\n"\ - "VCENTER_VERSION=\"#{cluster[:vcenter_version]}\"\n"\ + def get_dc # rubocop:disable Naming/AccessorMethodName + item = @item - template << "VCENTER_RESOURCE_POOL=\"#{rp}\"" if rp + until item.instance_of? RbVmomi::VIM::Datacenter + item = item.parent + if item.nil? + raise 'Could not find the parent Datacenter' + end + end - template << "VCENTER_PORT=\"#{con_ops[:port]}\"" if con_ops[:port] + Datacenter.new(item) + end - rc = one_host.update(template, false) + def self.to_one(cluster, con_ops, rp, one_cluster_id) + one_host = VCenterDriver::VIHelper.new_one_item(OpenNebula::Host) + + if OpenNebula.is_error?(one_host) + raise "Could not create host: #{one_host.message}" + end + + one_cluster_id ||= -1 + + rc = one_host + .allocate( + cluster[:cluster_name], + 'vcenter', + 'vcenter', + one_cluster_id.to_i + ) - if OpenNebula.is_error?(rc) - update_error = rc.message - rc = one_host.delete if OpenNebula.is_error?(rc) + raise "Could not allocate host: #{rc.message}" + end + + template = "VCENTER_HOST=\"#{con_ops[:host]}\"\n"\ + "VCENTER_PASSWORD=\"#{con_ops[:password]}\"\n"\ + "VCENTER_USER=\"#{con_ops[:user]}\"\n"\ + "VCENTER_CCR_REF=\"#{cluster[:cluster_ref]}\"\n"\ + "VCENTER_INSTANCE_ID=\"#{cluster[:vcenter_uuid]}\"\n"\ + "VCENTER_VERSION=\"#{cluster[:vcenter_version]}\"\n"\ + + template << "VCENTER_RESOURCE_POOL=\"#{rp}\"" if rp + + template << "VCENTER_PORT=\"#{con_ops[:port]}\"" if con_ops[:port] + + rc = one_host.update(template, false) + + if OpenNebula.is_error?(rc) + update_error = rc.message + rc = one_host.delete + + unless OpenNebula.is_error?(rc) + raise "Could not update host: #{rc.message}"; end + raise "Could not update host: #{update_error} "\ - "and could not delete host: #{rc.message}" - else - raise "Could not update host: #{rc.message}" + "and could not delete host: #{rc.message}" end - end - rc = one_host.offline + rc = one_host.offline - if OpenNebula.is_error?(rc) - update_error = rc.message - rc = one_host.delete if OpenNebula.is_error?(rc) + update_error = rc.message + rc = one_host.delete + + unless OpenNebula.is_error?(rc) + raise "Could not offline host: #{rc.message}"; end + raise "Could not offline host: #{update_error} "\ - "and could not delete host: #{rc.message}" - else - raise "Could not offline host: #{rc.message}" + "and could not delete host: #{rc.message}" end - end + rc = one_host.enable - rc = one_host.enable - - if OpenNebula.is_error?(rc) - update_error = rc.message - rc = one_host.delete if OpenNebula.is_error?(rc) + update_error = rc.message + rc = one_host.delete + + unless OpenNebula.is_error?(rc) + raise "Could not enable host: #{rc.message}"; end + raise "Could not enable host: #{update_error} "\ - "and could not delete host: #{rc.message}" - else - raise "Could not enable host: #{rc.message}" + "and could not delete host: #{rc.message}" end + + one_host end - return one_host - end - - def self.new_from_ref(ref, vi_client) - self.new(RbVmomi::VIM::ClusterComputeResource.new(vi_client.vim, ref), vi_client) - end -end # class ClusterComputeResource - -class ESXHost - attr_accessor :item - - include Memoize - - PG_CREATE_TIMEOUT = 240 # We will wait for 4 minutes for the pg creation - - def initialize(item, vi_client=nil) - @net_rollback = [] - @locking = true - @item = item - @vi_client = vi_client - end - - def self.new_from_ref(ref, vi_client) - self.new(RbVmomi::VIM::HostSystem.new(vi_client.vim, ref), vi_client) - end - - # Locking function. Similar to flock - def lock - hostlockname = @item['name'].downcase.tr(" ", "_") - if @locking - @locking_file = File.open("/tmp/vcenter-#{hostlockname}-lock","w") - @locking_file.flock(File::LOCK_EX) + def self.new_from_ref(ref, vi_client) + new( + RbVmomi::VIM::ClusterComputeResource + .new( + vi_client.vim, + ref + ), + vi_client + ) end - end - # Unlock driver execution mutex - def unlock - if @locking + end + # class ClusterComputeResource + + ########################################################################## + # Class ESXHost + ########################################################################## + class ESXHost + + attr_accessor :item + + include Memoize + + PG_CREATE_TIMEOUT = 240 # We will wait for 4 minutes for the pg creation + + def initialize(item, vi_client = nil) + @net_rollback = [] + @locking = true + @item = item + @vi_client = vi_client + end + + def self.new_from_ref(ref, vi_client) + new(RbVmomi::VIM::HostSystem.new(vi_client.vim, ref), vi_client) + end + + # Locking function. Similar to flock + def lock + hostlockname = @item['name'].downcase.tr(' ', '_') + + return unless @locking + + @locking_file = + File + .open("/tmp/vcenter-#{hostlockname}-lock", 'w') + @locking_file.flock(File::LOCK_EX) + end + + # Unlock driver execution mutex + def unlock + return unless @locking + @locking_file.close end - end - ######################################################################## - # Check if standard switch exists in host - ######################################################################## + ######################################################################## + # Check if standard switch exists in host + ######################################################################## - def vss_exists(vswitch_name) - vswitches = @item.configManager.networkSystem.networkInfo.vswitch - return vswitches.select{|vs| vs.name == vswitch_name }.first rescue nil - end + def vss_exists(vswitch_name) + vswitches = @item.configManager.networkSystem.networkInfo.vswitch + vswitches.select {|vs| vs.name == vswitch_name }.first rescue nil + end - ######################################################################## - # Create a standard vcenter switch in an ESX host - ######################################################################## + ######################################################################## + # Create a standard vcenter switch in an ESX host + ######################################################################## - def create_vss(name, pnics=nil, num_ports=128, mtu=1500, pnics_available=nil) - # Get NetworkSystem - nws = self['configManager.networkSystem'] - vswitchspec = nil - hostbridge = nil - nics = [] + def create_vss( + name, + pnics = nil, + num_ports = 128, + mtu = 1500, + pnics_available = nil + ) + # Get NetworkSystem + nws = self['configManager.networkSystem'] + hostbridge = nil + nics = [] - if pnics - pnics = pnics.split(",") - pnics.each do |pnic| - #Add nics if not in use - nics << pnic if pnics_available.include?(pnic) + if pnics + pnics = pnics.split(',') + pnics.each do |pnic| + # Add nics if not in use + nics << pnic if pnics_available.include?(pnic) + end + + if !nics.empty? + hostbridge = + RbVmomi::VIM::HostVirtualSwitchBondBridge( + :nicDevice => nics + ) + end end - if !nics.empty? - hostbridge = RbVmomi::VIM::HostVirtualSwitchBondBridge(:nicDevice => nics) + # Create spec + vswitchspec = + RbVmomi::VIM::HostVirtualSwitchSpec( + :bridge => hostbridge, + :mtu => mtu, + :numPorts => num_ports + ) + + # add vSwitch to the host + begin + nws.AddVirtualSwitch(:vswitchName => name, :spec => vswitchspec) + rescue StandardError => e + raise "The standard vSwitch #{name} could not be \ + created. AddVirtualSwitch failed Reason: #{e.message}." end + + @net_rollback << { :action => :delete_sw, :name => name } + + name end - #Create spec - vswitchspec = RbVmomi::VIM::HostVirtualSwitchSpec(:bridge => hostbridge, :mtu => mtu, :numPorts => num_ports) + ######################################################################## + # Update a standard vcenter switch in an ESX host + ######################################################################## + def update_vss(switch, name, pnics, num_ports, mtu) + pnics = pnics.split(',') rescue [] - #add vSwitch to the host - begin - nws.AddVirtualSwitch(:vswitchName => name, :spec => vswitchspec) - rescue Exception => e - raise "The standard vSwitch #{name} could not be created. AddVirtualSwitch failed Reason: #{e.message}." - end + # Backup switch spec for rollback + orig_spec = switch.spec - @net_rollback << {:action => :delete_sw, :name => name} + # Compare current configuration and return if switch hasn't changed + switch_has_pnics = switch + .spec + .respond_to?( + :bridge + ) && switch + .spec + .bridge + .respond_to?( + :nicDevice + ) - return name - end + same_switch = switch.spec.respond_to?(:mtu) && switch + .spec + .mtu == mtu && + switch + .spec + .respond_to?( + :numPorts + ) && switch.spec.numPorts == num_ports && + (!switch_has_pnics && pnics.empty? || + switch_has_pnics && switch + .spec + .bridge + .nicDevice + .uniq + .sort == pnics.uniq.sort) + return if same_switch - ######################################################################## - # Update a standard vcenter switch in an ESX host - ######################################################################## - def update_vss(switch, name, pnics, num_ports, mtu) - pnics = pnics.split(",") rescue [] - - #Backup switch spec for rollback - orig_spec = switch.spec - - #Compare current configuration and return if switch hasn't changed - same_switch = false - - switch_has_pnics = switch.spec.respond_to?(:bridge) && switch.spec.bridge.respond_to?(:nicDevice) - - - - same_switch = switch.spec.respond_to?(:mtu) && switch.spec.mtu == mtu && - switch.spec.respond_to?(:numPorts) && switch.spec.numPorts == num_ports && - (!switch_has_pnics && pnics.empty? || - switch_has_pnics && switch.spec.bridge.nicDevice.uniq.sort == pnics.uniq.sort) - return if same_switch - - # Let's create a new spec and update the switch - vswitchspec = nil - hostbridge = nil - nws = self['configManager.networkSystem'] - hostbridge = RbVmomi::VIM::HostVirtualSwitchBondBridge(:nicDevice => pnics) if !pnics.empty? - vswitchspec = RbVmomi::VIM::HostVirtualSwitchSpec(:bridge => hostbridge, :mtu => mtu, :numPorts => num_ports) - - begin - nws.UpdateVirtualSwitch(:vswitchName => name, :spec => vswitchspec) - rescue Exception => e - raise "The standard switch with name #{name} could not be updated. Reason: #{e.message}" - end - - @net_rollback << {:action => :update_sw, :name => name, :spec => orig_spec} - end - - ######################################################################## - # Remove a standard vswitch from the host - ######################################################################## - def remove_vss(vswitch_name) - nws = self['configManager.networkSystem'] - - begin - nws.RemoveVirtualSwitch(:vswitchName => vswitch_name) - rescue RbVmomi::VIM::ResourceInUse - STDERR.puts "The standard switch #{vswitch_name} is in use so it cannot be deleted" - return nil - rescue RbVmomi::VIM::NotFound - STDERR.puts "The standard switch #{vswitch_name} was not found in vCenter" - return nil - rescue Exception => e - raise "There was a failure while deleting a vcenter standard switch #{vswitch_name}. Reason: #{e.message}" - end - - return vswitch_name - end - - ######################################################################## - # Get physical nics that are available in a host - ######################################################################## - def get_available_pnics - pnics_in_use = [] - pnics_available = [] - - # Get pnics in use in standard switches - @item.config.network.vswitch.each do |vs| - vs.pnic.each do |pnic| - next unless pnic.instance_of?(String) - - pnic.slice!("key-vim.host.PhysicalNic-") - pnics_in_use << pnic + # Let's create a new spec and update the switch + hostbridge = nil + nws = self['configManager.networkSystem'] + unless pnics.empty? + hostbridge = + RbVmomi::VIM::HostVirtualSwitchBondBridge( + :nicDevice => pnics + ) end - end - - # Get pnics in host - self['config.network'].pnic.each do |pnic| - pnics_available << pnic.device if !pnics_in_use.include?(pnic.device) - end - - return pnics_available - end - - ######################################################################## - # Get networks inside a host - ######################################################################## - def get_pg_inside - pg_inside = {} - - # Get pnics in use in standard switches - @item.config.network.vswitch.each do |vs| - pg_inside[vs.name] = [] - vs.portgroup.each do |pg| - pg.slice!("key-vim.host.PortGroup-") - pg_inside[vs.name] << pg + vswitchspec = + RbVmomi::VIM::HostVirtualSwitchSpec( + :bridge => hostbridge, + :mtu => mtu, + :numPorts => num_ports + ) + begin + nws + .UpdateVirtualSwitch( + :vswitchName => name, + :spec => vswitchspec + ) + rescue StandardError => e + raise "The standard switch with name #{name} \ + could not be updated. Reason: #{e.message}" end - end - pg_inside - end - - ######################################################################## - # Check if proxy switch exists in host for distributed virtual switch - ######################################################################## - - def proxy_switch_exists(switch_name) - nws = self['configManager.networkSystem'] - proxy_switches = nws.networkInfo.proxySwitch - return proxy_switches.select{|ps| ps.dvsName == switch_name }.first rescue nil - end - - ######################################################################## - # Assign a host to a a distributed vcenter switch (proxy switch) - ######################################################################## - - def assign_proxy_switch(dvs, switch_name, pnics, pnics_available) - dvs = dvs.item - - # Return if host is already assigned - return dvs if !dvs['config.host'].select { |host| host.config.host._ref == self['_ref'] }.empty? - - # Prepare spec for DVS reconfiguration - configSpec = RbVmomi::VIM::VMwareDVSConfigSpec.new - configSpec.name = switch_name - configSpec.configVersion = dvs['config.configVersion'] - - # Check if host is already assigned to distributed switch - operation = "add" - ##operation = "edit" if !dvs['config.host'].select { |host| host.config.host._ref == self['_ref'] }.empty? - - # Add host members to the distributed virtual switch - host_member_spec = RbVmomi::VIM::DistributedVirtualSwitchHostMemberConfigSpec.new - host_member_spec.host = @item - host_member_spec.operation = operation - host_member_spec.backing = RbVmomi::VIM::DistributedVirtualSwitchHostMemberPnicBacking.new - host_member_spec.backing.pnicSpec = [] - - # If pnics are needed assign pnics for uplinks - if pnics - pnics = pnics.split(",") - # Get uplink portgroup from dvswitch - uplink_key = dvs['config.uplinkPortgroup'].select{ - |ul| ul.name == "#{switch_name}-uplink-pg"}.first.key rescue nil - - raise "Cannot find the uplink portgroup for #{switch_name}" if !uplink_key - - pnics.each {|pnic| - pnicSpec = RbVmomi::VIM::DistributedVirtualSwitchHostMemberPnicSpec.new - pnicSpec.pnicDevice = pnic - pnicSpec.uplinkPortgroupKey = uplink_key - host_member_spec.backing.pnicSpec << pnicSpec + @net_rollback << { + :action => :update_sw, + :name => name, + :spec => orig_spec } end - configSpec.host = [host_member_spec] + ######################################################################## + # Remove a standard vswitch from the host + ######################################################################## + def remove_vss(vswitch_name) + nws = self['configManager.networkSystem'] - # The DVS must be reconfigured - dvs_reconfigure_task = dvs.ReconfigureDvs_Task(:spec => configSpec) - dvs_reconfigure_task.wait_for_completion - if dvs_reconfigure_task.info.state != 'success' - raise "It wasn't possible to assign host #{self["name"]} as a member of #{switch_name}'" + begin + nws.RemoveVirtualSwitch(:vswitchName => vswitch_name) + rescue RbVmomi::VIM::ResourceInUse + STDERR.puts "The standard switch #{vswitch_name} \ + is in use so it cannot be deleted" + return + rescue RbVmomi::VIM::NotFound + STDERR.puts "The standard switch #{vswitch_name} \ + was not found in vCenter" + return + rescue StandardError => e + raise "There was a failure while deleting a vcenter \ + standard switch #{vswitch_name}. Reason: #{e.message}" + end + + vswitch_name end - return dvs - end + ######################################################################## + # Get physical nics that are available in a host + ######################################################################## + def available_pnics + pnics_in_use = [] + pnics_available = [] - ######################################################################## - # Create a standard port group - ######################################################################## + # Get pnics in use in standard switches + @item.config.network.vswitch.each do |vs| + vs.pnic.each do |pnic| + next unless pnic.instance_of?(String) - def create_pg(pgname, vswitch, vlan=0) - spec = RbVmomi::VIM.HostPortGroupSpec( - :name => pgname, - :vlanId => vlan, - :vswitchName => vswitch, - :policy => RbVmomi::VIM.HostNetworkPolicy - ) + pnic.slice!('key-vim.host.PhysicalNic-') + pnics_in_use << pnic + end + end - nws = self['configManager.networkSystem'] + # Get pnics in host + self['config.network'].pnic.each do |pnic| + next if pnics_in_use + .include?(pnic.device) - begin - nws.AddPortGroup(:portgrp => spec) - rescue Exception => e - raise "A port group with name #{pgname} could not be created. Reason: #{e.message}" + pnics_available << pnic + .device + end + + pnics_available end - @net_rollback << {:action => :delete_pg, :name => pgname} + ######################################################################## + # Get networks inside a host + ######################################################################## + def pg_inside_host + pg_inside = {} - # wait until the network is ready and we have a reference - networks = @item['network'].select{ |net| net.name == pgname } - (0..PG_CREATE_TIMEOUT).each do - break if !networks.empty? - networks = @item['network'].select{ |net| net.name == pgname } - sleep 1 + # Get pnics in use in standard switches + @item.config.network.vswitch.each do |vs| + pg_inside[vs.name] = [] + vs.portgroup.each do |pg| + pg.slice!('key-vim.host.PortGroup-') + pg_inside[vs.name] << pg + end + end + + pg_inside end - raise "Cannot get VCENTER_NET_REF for new port group" if networks.empty? + ######################################################################## + # Check if proxy switch exists in host for distributed virtual switch + ######################################################################## - return networks.first._ref - end + def proxy_switch_exists(switch_name) + nws = self['configManager.networkSystem'] + proxy_switches = nws.networkInfo.proxySwitch + proxy_switches + .select {|ps| ps.dvsName == switch_name } + .first rescue nil + end - ######################################################################## - # Check if standard port group exists in host - ######################################################################## + ######################################################################## + # Assign a host to a a distributed vcenter switch (proxy switch) + ######################################################################## - def pg_exists(pg_name) - nws = self['configManager.networkSystem'] - portgroups = nws.networkInfo.portgroup - return portgroups.select{|pg| pg.spec.name == pg_name }.first rescue nil - end + def assign_proxy_switch(dvs, switch_name, pnics, _pnics_available) + dvs = dvs.item + # Return if host is already assigned + return dvs unless dvs['config.host'] + .select do |host| + host.config.host._ref == self['_ref'] + end.empty? - ######################################################################## - # Is the switch for the pg different? - ######################################################################## + # Prepare spec for DVS reconfiguration + config_spec = RbVmomi::VIM::VMwareDVSConfigSpec.new + config_spec.name = switch_name + config_spec.configVersion = dvs['config.configVersion'] - def pg_changes_sw?(pg, switch_name) - return pg.spec.respond_to?(:vswitchName) && pg.spec.vswitchName != switch_name - end + # Check if host is already assigned to distributed switch + operation = 'add' + # #operation = "edit" if !dvs['config.host'].select + # { |host| host.config.host._ref == self['_ref'] }.empty? - ######################################################################## - # Update a standard port group - ######################################################################## + # Add host members to the distributed virtual switch + host_member_spec = + RbVmomi::VIM::DistributedVirtualSwitchHostMemberConfigSpec + .new + host_member_spec.host = @item + host_member_spec.operation = operation + host_member_spec.backing = + RbVmomi::VIM::DistributedVirtualSwitchHostMemberPnicBacking + .new + host_member_spec.backing.pnicSpec = [] - def update_pg(pg, switch_name, vlan_id) + # If pnics are needed assign pnics for uplinks + if pnics + pnics = pnics.split(',') + # Get uplink portgroup from dvswitch + uplink_key = dvs['config.uplinkPortgroup'].select do |ul| + ul.name == "#{switch_name}-uplink-pg" + end.first.key rescue nil - if pg.spec.respond_to?(:vlanId) && pg.spec.vlanId != vlan_id + unless uplink_key + raise "Cannot find the uplink portgroup for #{switch_name}" + end + + pnics.each do |pnic| + pnic_spec = + RbVmomi::VIM::DistributedVirtualSwitchHostMemberPnicSpec + .new + pnic_spec.pnicDevice = pnic + pnic_spec.uplinkPortgroupKey = uplink_key + host_member_spec.backing.pnicSpec << pnic_spec + end + end + + config_spec.host = [host_member_spec] + + # The DVS must be reconfigured + dvs_reconfigure_task = dvs.ReconfigureDvs_Task(:spec => config_spec) + dvs_reconfigure_task.wait_for_completion + if dvs_reconfigure_task.info.state != 'success' + raise "It wasn't possible to assign host \ + #{self['name']} as a member of #{switch_name}'" + end + + dvs + end + + ######################################################################## + # Create a standard port group + ######################################################################## + + def create_pg(pgname, vswitch, vlan = 0) + spec = RbVmomi::VIM.HostPortGroupSpec( + :name => pgname, + :vlanId => vlan, + :vswitchName => vswitch, + :policy => RbVmomi::VIM.HostNetworkPolicy + ) + + nws = self['configManager.networkSystem'] + + begin + nws.AddPortGroup(:portgrp => spec) + rescue StandardError => e + raise "A port group with name #{pgname} \ + could not be created. Reason: #{e.message}" + end + + @net_rollback << { :action => :delete_pg, :name => pgname } + + # wait until the network is ready and we have a reference + networks = @item['network'].select {|net| net.name == pgname } + (0..PG_CREATE_TIMEOUT).each do + break unless networks.empty? + + networks = @item['network'].select {|net| net.name == pgname } + sleep 1 + end + + if networks.empty? + raise 'Cannot get VCENTER_NET_REF for new port group' + end + + networks.first._ref + end + + ######################################################################## + # Check if standard port group exists in host + ######################################################################## + + def pg_exists(pg_name) + nws = self['configManager.networkSystem'] + portgroups = nws.networkInfo.portgroup + portgroups.select {|pg| pg.spec.name == pg_name }.first rescue nil + end + + ######################################################################## + # Is the switch for the pg different? + ######################################################################## + + def pg_changes_sw?(pg, switch_name) + pg + .spec + .respond_to?( + :vswitchName + ) && pg + .spec + .vswitchName != switch_name + end + + ######################################################################## + # Update a standard port group + ######################################################################## + + def update_pg(pg, switch_name, vlan_id) + unless pg.spec.respond_to?(:vlanId) && pg.spec.vlanId != vlan_id + return; end # Backup original spec orig_spec = pg.spec @@ -1134,88 +1404,112 @@ class ESXHost begin nws.UpdatePortGroup(:pgName => pg_name, :portgrp => spec) - rescue Exception => e - raise "A port group with name #{pg_name} could not be updated. Reason: #{e.message}" + rescue StandardError => e + raise "A port group with name #{pg_name} \ + could not be updated. Reason: #{e.message}" end # Set rollback operation - @net_rollback << {:action => :update_pg, :name => pg_name, :spec => orig_spec} - end - end - - ######################################################################## - # Remove a standard port group from the host - ######################################################################## - - def remove_pg(pgname) - nws = self['configManager.networkSystem'] - - swname = nil - begin - portgroups = nws.networkConfig.portgroup - portgroups.each {|pg| - if pg.spec.name == pgname - swname = pg.spec.vswitchName - break - end + @net_rollback << { + :action => :update_pg, + :name => pg_name, + :spec => orig_spec } - nws.RemovePortGroup(:pgName => pgname) - rescue RbVmomi::VIM::ResourceInUse - STDERR.puts "The standard portgroup #{pgname} is in use so it cannot be deleted" - return nil - rescue RbVmomi::VIM::NotFound - STDERR.puts "The standard portgroup #{pgname} was not found in vCenter" - return nil - rescue Exception => e - raise "There was a failure while deleting a standard portgroup #{pgname} in vCenter. Reason: #{e.message}" end - return swname - end + ######################################################################## + # Remove a standard port group from the host + ######################################################################## - def network_rollback - nws = self['configManager.networkSystem'] + def remove_pg(pgname) + nws = self['configManager.networkSystem'] - @net_rollback.reverse_each do |nr| + swname = nil + begin + portgroups = nws.networkConfig.portgroup + portgroups.each do |pg| + if pg.spec.name == pgname + swname = pg.spec.vswitchName + break + end + end + nws.RemovePortGroup(:pgName => pgname) + rescue RbVmomi::VIM::ResourceInUse + STDERR.puts "The standard portgroup \ + #{pgname} is in use so it cannot be deleted" + return + rescue RbVmomi::VIM::NotFound + STDERR.puts "The standard portgroup \ + #{pgname} was not found in vCenter" + return + rescue StandardError => e + raise "There was a failure while \ + deleting a standard portgroup #{pgname} \ + in vCenter. Reason: #{e.message}" + end - case nr[:action] + swname + end + + def network_rollback + nws = self['configManager.networkSystem'] + + @net_rollback.reverse_each do |nr| + case nr[:action] when :update_pg begin - nws.UpdatePortGroup(:pgName => nr[:name], :portgrp => nr[:spec]) - rescue Exception => e - raise "A rollback operation for standard port group #{nr[:name]} could not be performed. Reason: #{e.message}" + nws + .UpdatePortGroup( + :pgName => nr[:name], + :portgrp => nr[:spec] + ) + rescue StandardError => e + raise "A rollback operation for standard \ + port group #{nr[:name]} could not \ + be performed. Reason: #{e.message}" end when :update_sw begin - nws.UpdateVirtualSwitch(:vswitchName => nr[:name], :spec => nr[:spec]) - rescue Exception => e - raise "A rollback operation for standard switch #{nr[:name]} could not be performed. Reason: #{e.message}" + nws + .UpdateVirtualSwitch( + :vswitchName => nr[:name], + :spec => nr[:spec] + ) + rescue StandardError => e + raise "A rollback operation for standard \ + switch #{nr[:name]} could not \ + be performed. Reason: #{e.message}" end when :delete_sw begin nws.RemoveVirtualSwitch(:vswitchName=> nr[:name]) rescue RbVmomi::VIM::ResourceInUse - return #Ignore if switch in use + next # Ignore if switch in use rescue RbVmomi::VIM::NotFound - return #Ignore if switch not found - rescue Exception => e - raise "A rollback operation for standard switch #{nr[:name]} could not be performed. Reason: #{e.message}" + next # Ignore if switch not found + rescue StandardError => e + raise "A rollback operation for standard \ + switch #{nr[:name]} could not \ + be performed. Reason: #{e.message}" end when :delete_pg begin nws.RemovePortGroup(:pgName => nr[:name]) rescue RbVmomi::VIM::ResourceInUse - return #Ignore if pg in use + next # Ignore if pg in use rescue RbVmomi::VIM::NotFound - return #Ignore if pg not found - rescue Exception => e - raise "A rollback operation for standard port group #{nr[:name]} could not be performed. Reason: #{e.message}" + next # Ignore if pg not found + rescue StandardError => e + raise "A rollback operation for \ + standard port group #{nr[:name]} could \ + not be performed. Reason: #{e.message}" end + end end end + end + # class ESXHost - -end # class ESXHost - -end # module VCenterDriver +end +# module VCenterDriver diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/memoize.rb b/src/vmm_mad/remotes/lib/vcenter_driver/memoize.rb index cf0d3e5bea..cef2bb8135 100644 --- a/src/vmm_mad/remotes/lib/vcenter_driver/memoize.rb +++ b/src/vmm_mad/remotes/lib/vcenter_driver/memoize.rb @@ -14,10 +14,13 @@ # limitations under the License. # #--------------------------------------------------------------------------- # - +############################################################################## +# Module Memoize +############################################################################## module Memoize + def [](property) - @memoize = {} if !defined?(@memoize) + @memoize = {} unless defined?(@memoize) if (value = @memoize[property]) return value @@ -25,13 +28,13 @@ module Memoize current_item = @item - property_path = "" + property_path = '' - property.split(".").each do |elem| + property.split('.').each do |elem| if property_path.empty? property_path << elem else - property_path << "." << elem + property_path << '.' << elem end if (val = @memoize[property_path]) @@ -39,7 +42,7 @@ module Memoize else begin current_item = current_item.send(elem) - rescue Exception => e + rescue StandardError current_item = nil end end @@ -47,14 +50,13 @@ module Memoize break if current_item.nil? @memoize[property_path] = current_item - end @memoize[property] = current_item end def clear(property) - @memoize = {} if !defined?(@memoize) + @memoize = {} unless defined?(@memoize) @memoize.clear[property] if @memoize[property] end @@ -63,8 +65,10 @@ module Memoize end def []=(property, value) - @memoize = {} if !defined?(@memoize) + @memoize = {} unless defined?(@memoize) @memoize[property] = value end -end # module Memoize + +end +# module Memoize diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/network.rb b/src/vmm_mad/remotes/lib/vcenter_driver/network.rb index 30a43b914b..f9816b8674 100644 --- a/src/vmm_mad/remotes/lib/vcenter_driver/network.rb +++ b/src/vmm_mad/remotes/lib/vcenter_driver/network.rb @@ -16,477 +16,622 @@ require 'digest' +############################################################################## +# Module VCenterDriver +############################################################################## module VCenterDriver -class NetworkFolder - attr_accessor :item, :items + ########################################################################## + # Class NetworkFolder + ########################################################################## + class NetworkFolder + + attr_accessor :item, :items + + def initialize(item) + @item = item + @items = {} + end + + ###################################################################### + # Builds a hash with Network-Ref / Network to be used as a cache + # @return [Hash] in the form + # { ds_ref [Symbol] => Network object } + ###################################################################### + def fetch! + VIClient.get_entities(@item, 'Network').each do |item| + item_name = item._ref + @items[item_name.to_sym] = PortGroup.new(item) + end + + VIClient + .get_entities( + @item, + 'DistributedVirtualPortgroup' + ).each do |item| + item_name = item._ref + @items[item_name.to_sym] = DistributedPortGroup.new(item) + end + + VIClient + .get_entities( + @item, + 'VmwareDistributedVirtualSwitch' + ).each do |item| + item_name = item._ref + @items[item_name.to_sym] = DistributedVirtualSwitch.new(item) + end + + VIClient.get_entities(@item, 'OpaqueNetwork').each do |item| + item_name = item._ref + @items[item_name.to_sym] = OpaqueNetwork.new(item) + end + end + + ######################################################################## + # Returns a Network. Uses the cache if available. + # @param ref [Symbol] the vcenter ref + # @return Network + ######################################################################## + def get(ref) + if !@items[ref.to_sym] + rbvmomi_net = RbVmomi::VIM::Network.new(@item._connection, ref) + @items[ref.to_sym] = Network.new(rbvmomi_net) + end + + @items[ref.to_sym] + end - def initialize(item) - @item = item - @items = {} end + # class NetworkFolder - ######################################################################## - # Builds a hash with Network-Ref / Network to be used as a cache - # @return [Hash] in the form - # { ds_ref [Symbol] => Network object } - ######################################################################## - def fetch! - VIClient.get_entities(@item, "Network").each do |item| - item_name = item._ref - @items[item_name.to_sym] = PortGroup.new(item) + ########################################################################## + # Class Network + ########################################################################## + class Network + + attr_accessor :item + + include Memoize + + NETWORK_TYPE_PG = 'Port Group' + NETWORK_TYPE_DPG = 'Distributed Port Group' + NETWORK_TYPE_NSXV = 'NSX-V' # "Virtual Wire" + NETWORK_TYPE_NSXT = 'Opaque Network' + NETWORK_TYPE_UNKNOWN = 'Unknown Network' + + def initialize(item, vi_client = nil) + begin + check_item(item, RbVmomi::VIM::Network) + rescue StandardError + check_item(item, RbVmomi::VIM::DistributedVirtualPortgroup) + end + + @vi_client = vi_client + @item = item end - VIClient.get_entities(@item, "DistributedVirtualPortgroup").each do |item| - item_name = item._ref - @items[item_name.to_sym] = DistributedPortGroup.new(item) + # Checks if a RbVmomi::VIM::VirtualDevice is a network interface + def self.nic?(device) + !device + .class + .ancestors + .index( + RbVmomi::VIM::VirtualEthernetCard + ).nil? end - VIClient.get_entities(@item, "VmwareDistributedVirtualSwitch").each do |item| - item_name = item._ref - @items[item_name.to_sym] = DistributedVirtualSwitch.new(item) + def self.vlanid(vid) + case vid + when -1 + 'error' + when 0 + 'disabled' + when 4095 + 'VGT' + else + vid.to_s + end end - VIClient.get_entities(@item, "OpaqueNetwork").each do |item| - item_name = item._ref - @items[item_name.to_sym] = OpaqueNetwork.new(item) + def self.retrieve_vlanid(network) + begin + name = network.name + id = network + .host + .first + .configManager + .networkSystem + .networkConfig + .portgroup + .select do |p| + p.spec.name == name + end.first.spec.vlanId + rescue StandardError + id = -1 + end + id end + + def self.generate_name(name, opts = {}) + vcenter_instance_name = opts[:vcenter_name] || nil + dc_name = opts[:dc_name] || nil + + hash_name = "#{name} - [#{vcenter_instance_name} - #{dc_name}]" + sha256 = Digest::SHA256.new + network_hash = sha256.hexdigest(hash_name)[0..11] + "#{name} - [#{vcenter_instance_name} - #{dc_name}]_#{network_hash}" + end + + def self.to_one_template(opts = {}) + one_tmp = {} + network_name = opts[:network_name] + network_ref = opts[:network_ref] + network_type = opts[:network_type] + sw_name = opts[:sw_name] + + vcenter_uuid = opts[:vcenter_uuid] + cluster_id = opts[:cluster_id] + + unmanaged = opts[:unmanaged] || nil + template_ref = opts[:template_ref] || nil + dc_ref = opts[:dc_ref] || nil + template_id = opts[:template_id] || nil + + nsx_id = opts[:nsx_id] || nil + nsx_vni = opts[:nsx_vni] || nil + nsx_tz_id = opts[:nsx_tz_id] || nil + + vlanid = opts[:vlanid] || nil + + bridge_name = network_name + network_name = network_name.gsub('/', '_') + + network_import_name = + VCenterDriver::VIHelper + .one_name( + OpenNebula::VirtualNetworkPool, + network_name, + network_ref+vcenter_uuid + ) + + one_tmp[:name] = bridge_name + one_tmp[:import_name] = network_import_name + one_tmp[:bridge] = bridge_name + one_tmp[:type] = network_type + one_tmp[:one_cluster_id] = cluster_id + one_tmp[:ref] = network_ref + + opts = { + :network_import_name => network_import_name, + :bridge_name => bridge_name, + :network_ref => network_ref, + :network_type => network_type, + :vcenter_uuid => vcenter_uuid, + :unmanaged => unmanaged, + :template_ref => template_ref, + :dc_ref => dc_ref, + :template_id => template_id, + :sw_name => sw_name, + :nsx_id => nsx_id, + :nsx_vni => nsx_vni, + :nsx_tz_id => nsx_tz_id, + :vlanid => vlanid + } + + one_tmp[:one] = to_one(opts) + one_tmp + end + + def self.to_one(opts) + template = "NAME=\"#{opts[:network_import_name]}\"\n"\ + "BRIDGE=\"#{opts[:bridge_name]}\"\n"\ + "VN_MAD=\"vcenter\"\n"\ + "VCENTER_PORTGROUP_TYPE=\"#{opts[:network_type]}\"\n"\ + "VCENTER_NET_REF=\"#{opts[:network_ref]}\"\n"\ + "VCENTER_INSTANCE_ID=\"#{opts[:vcenter_uuid]}\"\n"\ + "VCENTER_IMPORTED=\"YES\"\n" + + if opts[:unmanaged] == 'wild' + template += "VCENTER_FROM_WILD=\"#{opts[:template_id]}\"\n" + end + + if opts[:template_ref] + template += + "VCENTER_TEMPLATE_REF=\"#{opts[:template_ref]}\"\n" + end + + if opts[:sw_name] + template += + "VCENTER_SWITCH_NAME=\"#{opts[:sw_name]}\"\n" + end + + if opts[:nsx_id] + template += + "NSX_ID=\"#{opts[:nsx_id]}\"\n" + end + if opts[:nsx_vni] + template += + "NSX_VNI=\"#{opts[:nsx_vni]}\"\n" + end + if opts[:nsx_tz_id] + template += + "NSX_TZ_ID=\"#{opts[:nsx_tz_id]}\"\n" + end + + if opts[:vlanid] + template += + "VCENTER_VLAN_ID=\"#{opts[:vlanid]}\"\n" + end + + template + end + + REQUIRED_ATTRS = [:refs, :one_ids, :one_object] + def self.create_one_network(net_config) + # mandatory parameters: + REQUIRED_ATTRS.each do |attr| + if net_config[attr].nil? + raise "#{attr} required for importing nics operation!" + end + end + + one_vn = VCenterDriver::VIHelper + .new_one_item( + OpenNebula::VirtualNetwork + ) + + done = [] + (0..net_config[:refs].size-1).each do |i| + cl_id = net_config[:one_ids][i] + next if cl_id == -1 || done.include?(cl_id) + + if done.empty? + rc = one_vn.allocate(net_config[:one_object], cl_id.to_i) + VCenterDriver::VIHelper.check_error(rc, 'create network') + one_vn.info + else + one_cluster = VCenterDriver::VIHelper + .one_item( + OpenNebula::Cluster, + cl_id, + false + ) + rc = one_cluster.addvnet(one_vn['ID'].to_i) + VCenterDriver::VIHelper + .check_error(rc, 'addvnet to cluster') + end + done << cl_id + end + + one_vn + end + + def self.get_network_type(network) + case network + when RbVmomi::VIM::DistributedVirtualPortgroup + if network['name'] + .match(/^vxw-dvs-(.*)-virtualwire-(.*)-sid-(.*)/) + VCenterDriver::Network::NETWORK_TYPE_NSXV + else + VCenterDriver::Network::NETWORK_TYPE_DPG + end + when RbVmomi::VIM::OpaqueNetwork + VCenterDriver::Network::NETWORK_TYPE_NSXT + when RbVmomi::VIM::Network + VCenterDriver::Network::NETWORK_TYPE_PG + else + VCenterDriver::Network::NETWORK_TYPE_UNKNOWN + end + end + + # Get vSwitch of Standard PortGroup + # If there is differents vSwitches returns the first. + def self.virtual_switch(vc_pg) + vswitch = [] + vc_hosts = vc_pg.host + vc_hosts.each do |vc_host| + host_pgs = vc_host + .configManager + .networkSystem + .networkInfo + .portgroup + host_pgs.each do |pg| + if vc_pg.name == pg.spec.name + vswitch << pg.spec.vswitchName + end + end + end + vswitch.uniq! + vswitch << 'Invalid configuration' if vswitch.length > 1 + vswitch.join(' / ') + end + + def self.remove_net_ref(network_id) + one_vnet = VCenterDriver::VIHelper + .one_item( + OpenNebula::VirtualNetwork, + network_id + ) + one_vnet.info + one_vnet.delete_element('TEMPLATE/VCENTER_NET_REF') + one_vnet.delete_element('TEMPLATE/VCENTER_INSTANCE_ID') + tmp_str = one_vnet.template_str + one_vnet.update(tmp_str) + one_vnet.info + end + + # This is never cached + def self.new_from_ref(ref, vi_client) + new(RbVmomi::VIM::Network.new(vi_client.vim, ref), vi_client) + end + end + # class Network - + ########################################################################## + # Class PortGroup + ########################################################################## + class PortGroup < Network - ######################################################################## - # Returns a Network. Uses the cache if available. - # @param ref [Symbol] the vcenter ref - # @return Network - ######################################################################## - def get(ref) - if !@items[ref.to_sym] - rbvmomi_net = RbVmomi::VIM::Network.new(@item._connection, ref) - @items[ref.to_sym] = Network.new(rbvmomi_net) - end - - @items[ref.to_sym] - end -end # class NetworkFolder - -class Network - attr_accessor :item - - include Memoize - - NETWORK_TYPE_PG = "Port Group" - NETWORK_TYPE_DPG = "Distributed Port Group" - NETWORK_TYPE_NSXV = "NSX-V" #"Virtual Wire" - NETWORK_TYPE_NSXT = "Opaque Network" - NETWORK_TYPE_UNKNOWN = "Unknown Network" - - def initialize(item, vi_client=nil) - begin + def initialize(item, vi_client = nil) check_item(item, RbVmomi::VIM::Network) - rescue - check_item(item, RbVmomi::VIM::DistributedVirtualPortgroup) + + @vi_client = vi_client + @item = item end - @vi_client = vi_client - @item = item - end - - # Checks if a RbVmomi::VIM::VirtualDevice is a network interface - def self.is_nic?(device) - !device.class.ancestors.index(RbVmomi::VIM::VirtualEthernetCard).nil? - end - - def self.vlanid(vid) - case vid - when -1 - "error" - when 0 - "disabled" - when 4095 - "VGT" - else - "#{vid}" - end - end - - def self.retrieve_vlanid(network) - begin - name = network.name - id = network.host.first.configManager.networkSystem.networkConfig.portgroup.select{|p| - p.spec.name == name - }.first.spec.vlanId - rescue - id = -1 - end - return id - end - - - def self.generate_name(name, opts = {}) - vcenter_instance_name = opts[:vcenter_name] || nil - dc_name = opts[:dc_name] || nil - - hash_name = "#{name} - [#{vcenter_instance_name} - #{dc_name}]" - sha256 = Digest::SHA256.new - network_hash = sha256.hexdigest(hash_name)[0..11] - network_import_name = "#{name} - [#{vcenter_instance_name} - #{dc_name}]_#{network_hash}" - end - - def self.to_one_template(opts = {}) - - one_tmp = {} - network_name = opts[:network_name] - network_ref = opts[:network_ref] - network_type = opts[:network_type] - sw_name = opts[:sw_name] - - vcenter_uuid = opts[:vcenter_uuid] - vcenter_instance_name = opts[:vcenter_instance_name] - dc_name = opts[:dc_name] - cluster_id = opts[:cluster_id] - - unmanaged = opts[:unmanaged] || nil - template_ref = opts[:template_ref] || nil - dc_ref = opts[:dc_ref] || nil - template_id = opts[:template_id] || nil - - nsx_id = opts[:nsx_id] || nil - nsx_vni = opts[:nsx_vni] || nil - nsx_tz_id = opts[:nsx_tz_id] || nil - - vlanid = opts[:vlanid] || nil - - bridge_name = network_name - network_name = network_name.gsub("/","_") - - - network_import_name = VCenterDriver::VIHelper.one_name(OpenNebula::VirtualNetworkPool, network_name, network_ref+vcenter_uuid) - - one_tmp[:name] = bridge_name - one_tmp[:import_name] = network_import_name - one_tmp[:bridge] = bridge_name - one_tmp[:type] = network_type - one_tmp[:one_cluster_id] = cluster_id - one_tmp[:ref] = network_ref - - opts = { - :network_import_name => network_import_name, - :bridge_name => bridge_name, - :network_ref => network_ref, - :network_type => network_type, - :vcenter_uuid => vcenter_uuid, - :unmanaged => unmanaged, - :template_ref => template_ref, - :dc_ref => dc_ref, - :template_id => template_id, - :sw_name => sw_name, - :nsx_id => nsx_id, - :nsx_vni => nsx_vni, - :nsx_tz_id => nsx_tz_id, - :vlanid => vlanid, - } - - one_tmp[:one] = to_one(opts) - return one_tmp - end - - def self.to_one(opts) - - template = "NAME=\"#{opts[:network_import_name]}\"\n"\ - "BRIDGE=\"#{opts[:bridge_name]}\"\n"\ - "VN_MAD=\"vcenter\"\n"\ - "VCENTER_PORTGROUP_TYPE=\"#{opts[:network_type]}\"\n"\ - "VCENTER_NET_REF=\"#{opts[:network_ref]}\"\n"\ - "VCENTER_INSTANCE_ID=\"#{opts[:vcenter_uuid]}\"\n"\ - "VCENTER_IMPORTED=\"YES\"\n" - - if opts[:unmanaged] == "wild" - template += "VCENTER_FROM_WILD=\"#{opts[:template_id]}\"\n" - end - - template += "VCENTER_TEMPLATE_REF=\"#{opts[:template_ref]}\"\n" if opts[:template_ref] - - template += "VCENTER_SWITCH_NAME=\"#{opts[:sw_name]}\"\n" if opts[:sw_name] - - template += "NSX_ID=\"#{opts[:nsx_id]}\"\n" if opts[:nsx_id] - template += "NSX_VNI=\"#{opts[:nsx_vni]}\"\n" if opts[:nsx_vni] - template += "NSX_TZ_ID=\"#{opts[:nsx_tz_id]}\"\n" if opts[:nsx_tz_id] - - template += "VCENTER_VLAN_ID=\"#{opts[:vlanid]}\"\n" if opts[:vlanid] - - return template - end - - REQUIRED_ATTRS = [:refs, :one_ids, :one_object] - def self.create_one_network(net_config) - - # mandatory parameters: - REQUIRED_ATTRS.each do |attr| - raise "#{attr} required for importing nics operation!" if net_config[attr].nil? - end - - one_vn = VCenterDriver::VIHelper.new_one_item(OpenNebula::VirtualNetwork) - - done = [] - for i in 0..net_config[:refs].size-1 - cl_id = net_config[:one_ids][i] - next if cl_id == -1 || done.include?(cl_id) - - if done.empty? - rc = one_vn.allocate(net_config[:one_object],cl_id.to_i) - VCenterDriver::VIHelper.check_error(rc, "create network") - one_vn.info - else - one_cluster = VCenterDriver::VIHelper.one_item(OpenNebula::Cluster, cl_id, false) - rc = one_cluster.addvnet(one_vn['ID'].to_i) - VCenterDriver::VIHelper.check_error(rc,"addvnet to cluster") + def clusters + net_clusters = {} + host_members =@item['host'] + host_members.each do |h| + if !net_clusters.key?(h.parent._ref.to_s) + net_clusters[h.parent._ref.to_s] = h.parent.name.to_s + end end - done << cl_id + net_clusters end - one_vn - end - - def self.get_network_type(network) - case network - when RbVmomi::VIM::DistributedVirtualPortgroup - if network['name'] - .match(/^vxw-dvs-(.*)-virtualwire-(.*)-sid-(.*)/) - VCenterDriver::Network::NETWORK_TYPE_NSXV - else - VCenterDriver::Network::NETWORK_TYPE_DPG - end - when RbVmomi::VIM::OpaqueNetwork - VCenterDriver::Network::NETWORK_TYPE_NSXT - when RbVmomi::VIM::Network + def network_type VCenterDriver::Network::NETWORK_TYPE_PG - else - VCenterDriver::Network::NETWORK_TYPE_UNKNOWN end + end + # class PortGroup - def self.remove_net_ref(network_id) - one_vnet = VCenterDriver::VIHelper.one_item(OpenNebula::VirtualNetwork, network_id) - one_vnet.info - one_vnet.delete_element("TEMPLATE/VCENTER_NET_REF") - one_vnet.delete_element("TEMPLATE/VCENTER_INSTANCE_ID") - tmp_str = one_vnet.template_str - one_vnet.update(tmp_str) - one_vnet.info - end + ########################################################################## + # Class DistributedPortGroup + ########################################################################## + class DistributedPortGroup < Network - # This is never cached - def self.new_from_ref(ref, vi_client) - self.new(RbVmomi::VIM::Network.new(vi_client.vim, ref), vi_client) - end + def initialize(item, vi_client = nil) + check_item(item, RbVmomi::VIM::DistributedVirtualPortgroup) -end # class Network + @vi_client = vi_client + @item = item + end -class PortGroup < Network - - def initialize(item, vi_client=nil) - - check_item(item, RbVmomi::VIM::Network) - - @vi_client = vi_client - @item = item - end - - def clusters - net_clusters = {} - host_members =@item['host'] - host_members.each do |h| - if !net_clusters.key?(h.parent._ref.to_s) - net_clusters[h.parent._ref.to_s] = h.parent.name.to_s + def clusters + net_clusters = {} + # should have to work + # host_members =@item['host'] + host_members = + self['config.distributedVirtualSwitch.summary.hostMember'] + host_members.each do |h| + if !net_clusters.key?(h.parent._ref.to_s) + net_clusters[h.parent._ref.to_s] = h.parent.name.to_s + end end + net_clusters end - net_clusters + + def network_type + VCenterDriver::Network::NETWORK_TYPE_DPG + end + end + # class DistributedPortGroup - def network_type - VCenterDriver::Network::NETWORK_TYPE_PG - end -end # class PortGroup + ########################################################################## + # Class OpaqueNetwork + ########################################################################## + class OpaqueNetwork < Network -class DistributedPortGroup < Network + def initialize(item, vi_client = nil) + check_item(item, RbVmomi::VIM::OpaqueNetwork) - def initialize(item, vi_client=nil) + @vi_client = vi_client + @item = item + end - check_item(item, RbVmomi::VIM::DistributedVirtualPortgroup) - - @vi_client = vi_client - @item = item - end - - def clusters - net_clusters = {} - # should have to work - # host_members =@item['host'] - host_members = self['config.distributedVirtualSwitch.summary.hostMember'] - host_members.each do |h| - if !net_clusters.key?(h.parent._ref.to_s) - net_clusters[h.parent._ref.to_s] = h.parent.name.to_s + def clusters + net_clusters = {} + host_members =@item['host'] + host_members.each do |h| + if !net_clusters.key?(h.parent._ref.to_s) + net_clusters[h.parent._ref.to_s] = h.parent.name.to_s + end end + net_clusters end - net_clusters + + def network_type + VCenterDriver::Network::NETWORK_TYPE_NSXT + end + end + # class OpaqueNetwork + + ########################################################################## + # Class DistributedVirtualSwitch + ########################################################################## + class DistributedVirtualSwitch < Network + + def initialize(item, vi_client = nil) + check_item(item, RbVmomi::VIM::VmwareDistributedVirtualSwitch) + + @vi_client = vi_client + @item = item + end - def network_type - VCenterDriver::Network::NETWORK_TYPE_DPG end -end # class DistributedPortGroup + # class DistributedVirtualSwitch -class OpaqueNetwork < Network + ########################################################################## + # Class NetImporter + ########################################################################## + class NetImporter < VCenterDriver::VcImporter - def initialize(item, vi_client=nil) + def initialize(one_client, vi_client) + super(one_client, vi_client) + @one_class = OpenNebula::VirtualNetwork + @defaults = { :size => '255', :type => 'ether' } + end - check_item(item, RbVmomi::VIM::OpaqueNetwork) + def get_list(args = {}) + dc_folder = VCenterDriver::DatacenterFolder.new(@vi_client) - @vi_client = vi_client - @item = item - end - - def clusters - net_clusters = {} - host_members =@item['host'] - host_members.each do |h| - if !net_clusters.key?(h.parent._ref.to_s) - net_clusters[h.parent._ref.to_s] = h.parent.name.to_s + # OpenNebula's VirtualNetworkPool + npool = VCenterDriver::VIHelper + .one_pool( + OpenNebula::VirtualNetworkPool, + false + ) + if npool.respond_to?(:message) + raise "Could not get \ + OpenNebula VirtualNetworkPool: #{npool.message}" end - end - net_clusters - end - def network_type - VCenterDriver::Network::NETWORK_TYPE_NSXT - end -end # class OpaqueNetwork - -class DistributedVirtualSwitch < Network - - def initialize(item, vi_client=nil) - - check_item(item, RbVmomi::VIM::VmwareDistributedVirtualSwitch) - - @vi_client = vi_client - @item = item - end -end # class DistributedVirtualSwitch - -class NetImporter < VCenterDriver::VcImporter - - def initialize(one_client, vi_client) - super(one_client, vi_client) - @one_class = OpenNebula::VirtualNetwork - @defaults = { size: "255", type: "ether" } - end - - def get_list(args = {}) - dc_folder = VCenterDriver::DatacenterFolder.new(@vi_client) - - # OpenNebula's VirtualNetworkPool - npool = VCenterDriver::VIHelper.one_pool(OpenNebula::VirtualNetworkPool, false) - if npool.respond_to?(:message) - raise "Could not get OpenNebula VirtualNetworkPool: #{npool.message}" - end - - # Get OpenNebula's host pool - hpool = VCenterDriver::VIHelper.one_pool(OpenNebula::HostPool, false) - if hpool.respond_to?(:message) - raise "Could not get OpenNebula HostPool: #{hpool.message}" - end - - rs = dc_folder.get_unimported_networks(npool, @vi_client.vc_name,hpool, args) - @list = rs - end - - def add_cluster(cid, eid) - one_cluster = @info[:clusters][cid] - raise "no cluster defined" unless one_cluster - - rc = one_cluster.addvnet(eid) - end - - def remove_default(id) - cid = 0 - @info[:clusters][cid] ||= VCenterDriver::VIHelper.one_item(OpenNebula::Cluster, cid.to_s, false) - @info[:clusters][cid].delvnet(id.to_i) - end - - def build_ar(opts) - str = "\nAR=[TYPE=\"" - type = opts[:type].downcase - - case type - when "4", "ip4", "ip" - str << "IP4\"" - opts[:ip] = "192.168.1.1" if opts[:ip].empty? - str << ",IP=\"#{opts[:ip]}\"" - when 'ip6' - str << "IP6\"" - str << ",GLOBAL_PREFIX=\"#{opts[:global_prefix]}\"" if opts[:global_prefix] - str << ",ULA_PREFIX=\"#{opts[:ula_prefix]}\"" if opts[:ula_prefix] - when 'ether', 'e' - str << "ETHER\"" - when 'ip6_static' - str << "IP6_STATIC\"" - str << ",IP6=\"#{opts[:ip6]}\"" if opts[:ip6] - str << ",PREFIX_LENGTH=\"#{opts[:prefix_length]}\"" if opts[:prefix_length] - end - - str << ",MAC=\"#{opts[:mac]}\"" if opts[:mac] - str << ",SIZE = \"#{opts[:size]}\"]" - - return str - end - - def import(selected) - opts = @info[selected[:ref]][:opts] - - net = VCenterDriver::Network.new_from_ref(selected[:ref], @vi_client) - vid = VCenterDriver::Network.retrieve_vlanid(net.item) if net - - # If type is NSX we need to update values - if selected[:type] == VCenterDriver::Network::NETWORK_TYPE_NSXV - host_id = @vi_client.instance_variable_get '@host_id' - nsx_client = NSXDriver::NSXClient.new_from_id(host_id) - nsx_net = NSXDriver::VirtualWire - .new_from_name(nsx_client, selected[:name]) - selected[:one] << "NSX_ID=\"#{nsx_net.ls_id}\"\n" - selected[:one] << "NSX_VNI=\"#{nsx_net.ls_vni}\"\n" - selected[:one] << "NSX_TZ_ID=\"#{nsx_net.tz_id}\"\n" - end - - if selected[:type] == VCenterDriver::Network::NETWORK_TYPE_NSXT - host_id = @vi_client.instance_variable_get '@host_id' - nsx_client = NSXDriver::NSXClient.new_from_id(host_id) - nsx_net = NSXDriver::OpaqueNetwork - .new_from_name(nsx_client, selected[:name]) - selected[:one] << "NSX_ID=\"#{nsx_net.ls_id}\"\n" - selected[:one] << "NSX_VNI=\"#{nsx_net.ls_vni}\"\n" - selected[:one] << "NSX_TZ_ID=\"#{nsx_net.tz_id}\"\n" - end - - if vid - vlanid = VCenterDriver::Network.vlanid(vid) - - # we have vlan id - if /\A\d+\z/.match(vlanid) - selected[:one] << "VCENTER_VLAN_ID=\"#{vlanid}\"\n" + # Get OpenNebula's host pool + hpool = VCenterDriver::VIHelper + .one_pool( + OpenNebula::HostPool, + false + ) + if hpool.respond_to?(:message) + raise "Could not get OpenNebula HostPool: #{hpool.message}" end + + rs = dc_folder + .get_unimported_networks( + npool, + @vi_client.vc_name, + hpool, + args + ) + @list = rs end - selected[:one] << build_ar(opts) - selected[:clusters][:one_ids] = opts["selected_clusters"].each.map(&:to_i) if opts["selected_clusters"] + def add_cluster(cid, eid) + one_cluster = @info[:clusters][cid] + raise 'no cluster defined' unless one_cluster - res = {id: [], name: selected[:name]} - create(selected[:one]) do |one_object, id| - res[:id] << id - add_clusters(id, selected[:clusters][:one_ids]) + one_cluster.addvnet(eid) + end + + def remove_default(id) + cid = 0 + @info[:clusters][cid] ||= + VCenterDriver::VIHelper + .one_item( + OpenNebula::Cluster, + cid.to_s, + false + ) + @info[:clusters][cid].delvnet(id.to_i) + end + + def build_ar(opts) + str = "\nAR=[TYPE=\"" + type = opts[:type].downcase + + case type + when '4', 'ip4', 'ip' + str << 'IP4"' + opts[:ip] = '192.168.1.1' if opts[:ip].empty? + str << ",IP=\"#{opts[:ip]}\"" + when 'ip6' + str << 'IP6"' + if opts[:global_prefix] + str << ",GLOBAL_PREFIX=\"#{opts[:global_prefix]}\"" + end + if opts[:ula_prefix] + str << ",ULA_PREFIX=\"#{opts[:ula_prefix]}\"" + end + when 'ether', 'e' + str << 'ETHER"' + when 'ip6_static' + str << 'IP6_STATIC"' + str << ",IP6=\"#{opts[:ip6]}\"" if opts[:ip6] + if opts[:prefix_length] + str << ",PREFIX_LENGTH=\"#{opts[:prefix_length]}\"" + end + end + + str << ",MAC=\"#{opts[:mac]}\"" if opts[:mac] + str << ",SIZE = \"#{opts[:size]}\"]" + + str + end + + def import(selected) + opts = @info[selected[:ref]][:opts] + + net = VCenterDriver::Network + .new_from_ref(selected[:ref], @vi_client) + if net + vid = VCenterDriver::Network + .retrieve_vlanid(net.item) + end + + # If type is NSX we need to update values + if selected[:type] == VCenterDriver::Network::NETWORK_TYPE_NSXV + host_id = @vi_client.instance_variable_get '@host_id' + nsx_client = NSXDriver::NSXClient.new_from_id(host_id) + nsx_net = NSXDriver::VirtualWire + .new_from_name(nsx_client, selected[:name]) + selected[:one] << "NSX_ID=\"#{nsx_net.ls_id}\"\n" + selected[:one] << "NSX_VNI=\"#{nsx_net.ls_vni}\"\n" + selected[:one] << "NSX_TZ_ID=\"#{nsx_net.tz_id}\"\n" + end + + if selected[:type] == VCenterDriver::Network::NETWORK_TYPE_NSXT + host_id = @vi_client.instance_variable_get '@host_id' + nsx_client = NSXDriver::NSXClient.new_from_id(host_id) + nsx_net = NSXDriver::OpaqueNetwork + .new_from_name(nsx_client, selected[:name]) + selected[:one] << "NSX_ID=\"#{nsx_net.ls_id}\"\n" + selected[:one] << "NSX_VNI=\"#{nsx_net.ls_vni}\"\n" + selected[:one] << "NSX_TZ_ID=\"#{nsx_net.tz_id}\"\n" + end + + if vid + vlanid = VCenterDriver::Network.vlanid(vid) + + # we have vlan id + if /\A\d+\z/.match(vlanid) + selected[:one] << "VCENTER_VLAN_ID=\"#{vlanid}\"\n" + end + end + + selected[:one] << build_ar(opts) + if opts['selected_clusters'] + selected[:clusters][:one_ids] = + opts['selected_clusters'] + .each + .map(&:to_i) + end + + res = { :id => [], :name => selected[:name] } + create(selected[:one]) do |_one_object, id| + res[:id] << id + add_clusters(id, selected[:clusters][:one_ids]) + end + + res end - return res end + end -end # module VCenterDriver +# module VCenterDriver diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/vcenter_importer.rb b/src/vmm_mad/remotes/lib/vcenter_driver/vcenter_importer.rb index 805acb77e2..17d7838ff1 100644 --- a/src/vmm_mad/remotes/lib/vcenter_driver/vcenter_importer.rb +++ b/src/vmm_mad/remotes/lib/vcenter_driver/vcenter_importer.rb @@ -14,7 +14,9 @@ # limitations under the License. # #--------------------------------------------------------------------------- # +############################################################################## # Module VCenterDriver +############################################################################## module VCenterDriver ONE_LOCATION = ENV['ONE_LOCATION'] unless defined?(ONE_LOCATION) @@ -35,14 +37,16 @@ module VCenterDriver unless defined?(HOOK_LOCATION) end - # class VCImporter + ########################################################################## + # Class VcImporter + ########################################################################## class VcImporter attr_accessor :list - ####################################################################### + ###################################################################### # Constructors - ####################################################################### + ###################################################################### # # @@ -120,7 +124,6 @@ module VCenterDriver end end - # # Importer return value # # @ return [Hash{:sucess =>[[]] , :error => {}} diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/vi_client.rb b/src/vmm_mad/remotes/lib/vcenter_driver/vi_client.rb index 60269d4962..69b9384fe2 100644 --- a/src/vmm_mad/remotes/lib/vcenter_driver/vi_client.rb +++ b/src/vmm_mad/remotes/lib/vcenter_driver/vi_client.rb @@ -16,232 +16,264 @@ require 'openssl' require 'yaml' +############################################################################## +# Module VCenterDriver +############################################################################## module VCenterDriver -class VIClient - attr_accessor :vim - attr_accessor :rp - attr_accessor :vc_name - attr_accessor :ccr_ref + ########################################################################## + # Class VIClient + ########################################################################## + class VIClient - def initialize(opts, host_id = -1) - opts = {:insecure => true}.merge(opts) - @host_id = host_id - @vim = RbVmomi::VIM.connect(opts) - @vc_name = opts[:host] if opts[:host] + attr_accessor :vim + attr_accessor :rp + attr_accessor :vc_name + attr_accessor :ccr_ref + + def initialize(opts, host_id = -1) + opts = { :insecure => true }.merge(opts) + @host_id = host_id + @vim = RbVmomi::VIM.connect(opts) + @vc_name = opts[:host] if opts[:host] + + # Get ccr and get rp + @ccr_ref = opts.delete(:ccr) + + return unless @ccr_ref - # Get ccr and get rp - @ccr_ref = opts.delete(:ccr) - if @ccr_ref ccr = RbVmomi::VIM::ClusterComputeResource.new(@vim, @ccr_ref) - #Get ref for rp - if ccr - rp = opts.delete(:rp) - if rp - rp_list = get_resource_pools(ccr) - rp_ref = rp_list.select { |r| r[:name] == rp }.first[:ref] rescue nil - @rp = RbVmomi::VIM::ResourcePool(@vim, rp_ref) if rp_ref + # Get ref for rp + + return unless ccr + + rp = opts.delete(:rp) + + return unless rp + + rp_list = get_resource_pools(ccr) + rp_ref = + rp_list + .select {|r| r[:name] == rp }.first[:ref] rescue nil + @rp = RbVmomi::VIM::ResourcePool(@vim, rp_ref) if rp_ref + end + + def rp_confined? + !!@rp + end + + def host_credentials + raise 'no host id defined!' if @host_id == -1 + + host = + OpenNebula::Host + .new_with_id( + @host_id, + OpenNebula::Client.new + ) + rc = host.info + if OpenNebula.is_error?(rc) + raise "Could not get host info \ + for ID: #{host_id} - #{rc.message}" + end + + { :pass => host['TEMPLATE/VCENTER_PASSWORD'], + :user => host['TEMPLATE/VCENTER_USER'], + :host => @vc_name } + end + + def get_resource_pools(ccr, rp = nil, parent_prefix = '', rp_array = []) + current_rp = '' + + if !rp + rp = ccr.resourcePool + else + if !parent_prefix.empty? + current_rp << parent_prefix + current_rp << '/' + end + current_rp << rp.name + end + + if rp.resourcePool.empty? + rp_info = {} + rp_info[:name] = current_rp + rp_info[:ref] = rp._ref + rp_array << rp_info + else + rp.resourcePool.each do |child_rp| + get_resource_pools(ccr, child_rp, current_rp, rp_array) + end + rp_info = {} + rp_info[:name] = current_rp + rp_info[:ref] = rp._ref + rp_array << rp_info unless current_rp.empty? + end + + rp_array + end + + def close_connection + @vim.close + end + + # @return RbVmomi::VIM:: objects + def self.get_entities(folder, type, entities = []) + if folder == [] + return + end + + folder.childEntity.each do |child| + the_name, _junk = child.to_s.split('(') + case the_name + when 'Folder' + get_entities(child, type, entities) + when type + entities.push(child) end end - end - end - def rp_confined? - !!@rp - end - - def get_host_credentials() - raise "no host id defined!" if @host_id == -1 - - host = OpenNebula::Host.new_with_id(@host_id, OpenNebula::Client.new) - rc = host.info - if OpenNebula.is_error?(rc) - raise "Could not get host info for ID: #{host_id} - #{rc.message}" + entities end - {pass: host["TEMPLATE/VCENTER_PASSWORD"], - user: host["TEMPLATE/VCENTER_USER"], - host: @vc_name } + def self.new_from_host(host_id) + begin + client = OpenNebula::Client.new + host = OpenNebula::Host.new_with_id(host_id, client) + rc = host.info(true) + if OpenNebula.is_error?(rc) + raise "Could not get host info for \ + ID: #{host_id} - #{rc.message}" + end - end + connection = { + :host => host['TEMPLATE/VCENTER_HOST'], + :user => host['TEMPLATE/VCENTER_USER'], + :rp => host['TEMPLATE/VCENTER_RESOURCE_POOL'], + :ccr => host['TEMPLATE/VCENTER_CCR_REF'], + :password => host['TEMPLATE/VCENTER_PASSWORD'] + } - def get_resource_pools(ccr, rp = nil, parent_prefix = "", rp_array = []) + vc_port = host['TEMPLATE/VCENTER_PORT'] + connection[:port] = vc_port unless vc_port.nil? - current_rp = "" - - if !rp - rp = ccr.resourcePool - else - if !parent_prefix.empty? - current_rp << parent_prefix - current_rp << "/" - end - current_rp << rp.name - end - - if rp.resourcePool.size == 0 - rp_info = {} - rp_info[:name] = current_rp - rp_info[:ref] = rp._ref - rp_array << rp_info - else - rp.resourcePool.each do |child_rp| - get_resource_pools(ccr, child_rp, current_rp, rp_array) - end - rp_info = {} - rp_info[:name] = current_rp - rp_info[:ref] = rp._ref - rp_array << rp_info if !current_rp.empty? - end - - rp_array - end - - def close_connection - @vim.close - end - - # @return RbVmomi::VIM:: objects - def self.get_entities(folder, type, entities=[]) - if folder == [] - return nil - end - - folder.childEntity.each do |child| - the_name, junk = child.to_s.split('(') - case the_name - when "Folder" - get_entities(child, type, entities) - when type - entities.push(child) + new(connection, host_id) + rescue StandardError => e + raise e end end - return entities - end + def self.new_from_datastore(datastore_id) + begin + client = OpenNebula::Client.new + datastore = + OpenNebula::Datastore + .new_with_id( + datastore_id, + client + ) + rc = datastore.info + if OpenNebula.is_error?(rc) + raise "Could not get datastore info \ + for ID: #{datastore_id} - #{rc.message}" + end - def self.new_from_host(host_id) - begin - client = OpenNebula::Client.new - host = OpenNebula::Host.new_with_id(host_id, client) - rc = host.info(true) - if OpenNebula.is_error?(rc) - raise "Could not get host info for ID: #{host_id} - #{rc.message}" + vcenter_id = datastore['TEMPLATE/VCENTER_INSTANCE_ID'] + + host_pool = OpenNebula::HostPool.new(client) + rc = host_pool.info + if OpenNebula.is_error?(rc) + raise "Could not get hosts information - #{rc.message}" + end + + user = '' + password = '' + port = 0 + host_pool.each do |host| + vc_instance_id = host['TEMPLATE/VCENTER_INSTANCE_ID'] + next unless vc_instance_id == vcenter_id + + host_decrypted = + OpenNebula::Host + .new_with_id( + host['ID'], + client + ) + host_decrypted.info(true) + user = host_decrypted['TEMPLATE/VCENTER_USER'] + password = host_decrypted['TEMPLATE/VCENTER_PASSWORD'] + port = host_decrypted['TEMPLATE/VCENTER_PORT'] + end + if password.empty? || user.empty? + raise "Error getting \ + credentials for datastore #{datastore_id}" + end + + connection = { + :host => datastore['TEMPLATE/VCENTER_HOST'], + :user => user, + :password => password + } + + connection[:port] = port unless port.nil? + + new(connection) + rescue StandardError => e + raise e end - - connection = { - :host => host["TEMPLATE/VCENTER_HOST"], - :user => host["TEMPLATE/VCENTER_USER"], - :rp => host["TEMPLATE/VCENTER_RESOURCE_POOL"], - :ccr => host["TEMPLATE/VCENTER_CCR_REF"], - :password => host["TEMPLATE/VCENTER_PASSWORD"] - } - - connection[:port] = host["TEMPLATE/VCENTER_PORT"] unless host["TEMPLATE/VCENTER_PORT"].nil? - - self.new(connection, host_id) - - rescue Exception => e - raise e end - end - def self.new_from_datastore(datastore_id) - begin - client = OpenNebula::Client.new - datastore = OpenNebula::Datastore.new_with_id(datastore_id, client) - rc = datastore.info - if OpenNebula.is_error?(rc) - raise "Could not get datastore info for ID: #{datastore_id} - #{rc.message}" + def self.decrypt(msg, token) + begin + cipher = OpenSSL::Cipher.new('aes-256-cbc') + + cipher.decrypt + + # Truncate for Ruby 2.4 (in previous versions this was being + # automatically truncated) + cipher.key = token[0..31] + + msg = cipher.update(Base64.decode64(msg)) + msg << cipher.final + rescue StandardError + raise 'Error decrypting secret.' end - vcenter_id = datastore["TEMPLATE/VCENTER_INSTANCE_ID"] + end - host_pool = OpenNebula::HostPool.new(client) - rc = host_pool.info - if OpenNebula.is_error?(rc) - raise "Could not get hosts information - #{rc.message}" + def self.in_silence + begin + orig_stderr = $stderr.clone + orig_stdout = $stdout.clone + $stderr.reopen File.new('/dev/null', 'w') + $stdout.reopen File.new('/dev/null', 'w') + retval = yield + rescue StandardError => e + $stdout.reopen orig_stdout + $stderr.reopen orig_stderr + raise e + ensure + $stdout.reopen orig_stdout + $stderr.reopen orig_stderr end + retval + end - user = "" - password = "" - port = 0 - host_pool.each do |host| - if host["TEMPLATE/VCENTER_INSTANCE_ID"] == vcenter_id - host_decrypted = OpenNebula::Host.new_with_id(host["ID"], client) - host_decrypted.info(true) - user = host_decrypted["TEMPLATE/VCENTER_USER"] - password = host_decrypted["TEMPLATE/VCENTER_PASSWORD"] - port = host_decrypted["TEMPLATE/VCENTER_PORT"] - end + def self.in_stderr_silence + begin + orig_stderr = $stderr.clone + $stderr.reopen File.new('/dev/null', 'w') + retval = yield + rescue StandardError => e + $stderr.reopen orig_stderr + raise e + ensure + $stderr.reopen orig_stderr end - if password.empty? or user.empty? - raise "Error getting credentials for datastore #{datastore_id}" - end - - connection = { - :host => datastore["TEMPLATE/VCENTER_HOST"], - :user => user, - :password => password - } - - connection[:port] = port unless port.nil? - - self.new(connection) - - rescue Exception => e - raise e + retval end + end - def self.decrypt(msg, token) - begin - cipher = OpenSSL::Cipher.new("aes-256-cbc") - - cipher.decrypt - - # Truncate for Ruby 2.4 (in previous versions this was being - # automatically truncated) - cipher.key = token[0..31] - - msg = cipher.update(Base64::decode64(msg)) - msg << cipher.final - rescue - raise "Error decrypting secret." - end - end - - def self.in_silence - begin - orig_stderr = $stderr.clone - orig_stdout = $stdout.clone - $stderr.reopen File.new('/dev/null', 'w') - $stdout.reopen File.new('/dev/null', 'w') - retval = yield - rescue Exception => e - $stdout.reopen orig_stdout - $stderr.reopen orig_stderr - raise e - ensure - $stdout.reopen orig_stdout - $stderr.reopen orig_stderr - end - retval - end - - def self.in_stderr_silence - begin - orig_stderr = $stderr.clone - $stderr.reopen File.new('/dev/null', 'w') - retval = yield - rescue Exception => e - $stderr.reopen orig_stderr - raise e - ensure - $stderr.reopen orig_stderr - end - retval - end end - -end # module VCenterDriver +# module VCenterDriver diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/vi_helper.rb b/src/vmm_mad/remotes/lib/vcenter_driver/vi_helper.rb index 9e5863e37d..674a003fad 100644 --- a/src/vmm_mad/remotes/lib/vcenter_driver/vi_helper.rb +++ b/src/vmm_mad/remotes/lib/vcenter_driver/vi_helper.rb @@ -14,258 +14,279 @@ # limitations under the License. # #--------------------------------------------------------------------------- # +######################################################################## +# Module VCenterDriver +######################################################################## module VCenterDriver -class VIHelper + ######################################################################## + # Class VIHelper + ######################################################################## + class VIHelper - ETC_LOCATION = "/etc/one/" if !defined?(ETC_LOCATION) - VCENTER_DRIVER_DEFAULT = "#{ETC_LOCATION}/vcenter_driver.default" - VM_PREFIX_DEFAULT = "one-$i-" + ETC_LOCATION = '/etc/one/' unless defined?(ETC_LOCATION) + VCENTER_DRIVER_DEFAULT = "#{ETC_LOCATION}/vcenter_driver.default" + VM_PREFIX_DEFAULT = 'one-$i-' - def self.client - @@client ||= OpenNebula::Client.new - end - - def self.return_if_error(rc, item, exit_if_fail) - if OpenNebula::is_error?(rc) - raise rc.message if !exit_if_fail - - STDERR.puts rc.message - exit 1 - else - item + def self.client + @@client ||= OpenNebula::Client.new # rubocop:disable Style/ClassVars end - end - require 'scripts_common' - def self.check_error(rc, message, _exit=false) - if OpenNebula::is_error?(rc) - OpenNebula::error_message("\n Error #{message}: #{rc.message}\n") - exit 1 if (_exit) + def self.return_if_error(rc, item, exit_if_fail) + if OpenNebula.is_error?(rc) + raise rc.message unless exit_if_fail + + STDERR.puts rc.message + exit 1 + else + item + end + end + + require 'scripts_common' + def self.check_error(rc, message, exit_condition = false) + return unless OpenNebula.is_error?(rc) + + OpenNebula.error_message("\n Error #{message}: #{rc.message}\n") + exit 1 if exit_condition raise rc.message end - end - def self.get_cluster_id(clusters) - clusters.each do |id| - return id unless id == -1 + def self.get_cluster_id(clusters) + clusters.each do |id| + return id unless id == -1 + end + -1 end - return -1 - end - def self.one_managed?(object) - if object.class.ancestors.include?(OpenNebula::XMLElement) - managed = object["TEMPLATE/OPENNEBULA_MANAGED"] || object["USER_TEMPLATE/OPENNEBULA_MANAGED"] - return managed != "NO" + def self.one_managed?(object) + if object.class.ancestors.include?(OpenNebula::XMLElement) + managed = + object['TEMPLATE/OPENNEBULA_MANAGED'] || + object['USER_TEMPLATE/OPENNEBULA_MANAGED'] + return managed != 'NO' + end + false end - return false - end - def self.check_opts(opts, att_list) - att_list.each do |att| - raise "#{att} option is mandatory" if opts[att].nil? + def self.check_opts(opts, att_list) + att_list.each do |att| + raise "#{att} option is mandatory" if opts[att].nil? + end end - end - def self.one_item(the_class, id, exit_if_fail = true) - item = the_class.new_with_id(id, client) - rc = item.info - return_if_error(rc, item, exit_if_fail) - end - - def self.new_one_item(the_class) - item = the_class.new(the_class.build_xml, client) - return item - end - - def self.one_pool(the_class, exit_if_fail = true) - item = the_class.new(client) - - rc = nil - begin - rc = item.info_all - rescue + def self.one_item(the_class, id, exit_if_fail = true) + item = the_class.new_with_id(id, client) rc = item.info + return_if_error(rc, item, exit_if_fail) end - return_if_error(rc, item, exit_if_fail) - end + def self.new_one_item(the_class) + the_class.new(the_class.build_xml, client) + end + + def self.one_pool(the_class, exit_if_fail = true) + item = the_class.new(client) + + rc = nil + begin + rc = item.info_all + rescue StandardError + rc = item.info + end + + return_if_error(rc, item, exit_if_fail) + end + + def self.find_by_name(the_class, name, pool = nil, raise_if_fail = true) + pool = one_pool(the_class, raise_if_fail) if pool.nil? + element = pool.find {|e| e['NAME'] == name.to_s } + if element.nil? && raise_if_fail + raise "Could not find element '#{name}' in pool '#{the_class}'" + end - def self.find_by_name(the_class, name, pool = nil, raise_if_fail = true) - pool = one_pool(the_class, raise_if_fail) if pool.nil? - element = pool.find{|e| e['NAME'] == "#{name}" } - if element.nil? && raise_if_fail - raise "Could not find element '#{name}' in pool '#{the_class}'" - else element end - end - def self.generate_name(opts, nbytes) + def self.generate_name(opts, nbytes) + return opts[:name] if nbytes <= 0 - return opts[:name] if nbytes <= 0 + @sha256 ||= Digest::SHA256.new + chain = opts[:key] + hash = @sha256.hexdigest(chain)[0..nbytes-1] - @sha256 ||= Digest::SHA256.new - chain = opts[:key] - hash = @sha256.hexdigest(chain)[0..nbytes-1] - - return "#{opts[:name]}-#{hash}" - end - - def self.one_name(the_class, name, key, pool = nil, bytes = 0) - - # Remove \u007F character that comes from vcenter - name = name.tr("\u007F", "") - pool = one_pool(the_class) if pool.nil? - - import_name = generate_name({name: name, key: key}, bytes) - - begin - find_by_name(the_class, import_name, pool) - rescue RuntimeError => e - return import_name + "#{opts[:name]}-#{hash}" end - one_name(the_class, name, key, pool, bytes+2) - end + def self.one_name(the_class, name, key, pool = nil, bytes = 0) + # Remove \u007F character that comes from vcenter + name = name.tr("\u007F", '') + pool = one_pool(the_class) if pool.nil? - def self.get_ref_key(element, attribute, vcenter_uuid = nil) - key = element[attribute] + import_name = generate_name({ :name => name, :key => key }, bytes) - return nil if key.nil? + begin + find_by_name(the_class, import_name, pool) + rescue StandardError + return import_name + end - tvid = element["TEMPLATE/VCENTER_INSTANCE_ID"] - uvid = element["USER_TEMPLATE/VCENTER_INSTANCE_ID"] - - if tvid - key += tvid - elsif uvid - key += uvid - elsif vcenter_uuid - key += vcenter_uuid + one_name(the_class, name, key, pool, bytes+2) end - return key - end + def self.get_ref_key(element, attribute, vcenter_uuid = nil) + key = element[attribute] - def self.create_ref_hash(attribute, pool, vcenter_uuid = nil) - hash = {} + return if key.nil? - pool.each_element(Proc.new do |e| - refkey = get_ref_key(e, attribute, vcenter_uuid) - hash[refkey] = e - end) + tvid = element['TEMPLATE/VCENTER_INSTANCE_ID'] + uvid = element['USER_TEMPLATE/VCENTER_INSTANCE_ID'] - hash - end + if tvid + key += tvid + elsif uvid + key += uvid + elsif vcenter_uuid + key += vcenter_uuid + end - def self.clean_ref_hash - @ref_hash = {} - end + key + end - def self.add_ref_hash(attr, one_object) - raise "cache is empty!" unless @ref_hash + def self.create_ref_hash(attribute, pool, vcenter_uuid = nil) + hash = {} - refkey = get_ref_key(one_object, attr) + pool.each_element(proc do |e| + refkey = get_ref_key(e, attribute, vcenter_uuid) + hash[refkey] = e + end) + + hash + end + + def self.clean_ref_hash + @ref_hash = {} + end + + def self.add_ref_hash(attr, one_object) + raise 'cache is empty!' unless @ref_hash + + refkey = get_ref_key(one_object, attr) + + return unless @ref_hash[attr] - if @ref_hash[attr] @ref_hash[attr][refkey] = one_object end - end - def self.remove_ref_hash(attr, one_object) - raise "cache is empty!" unless @ref_hash + def self.remove_ref_hash(attr, one_object) + raise 'cache is empty!' unless @ref_hash - refkey = get_ref_key(one_object, attr) + refkey = get_ref_key(one_object, attr) + + return unless @ref_hash[attr] - if @ref_hash[attr] @ref_hash[attr].delete(refkey) end - end - def self.find_by_ref(the_class, attribute, ref, vcenter_uuid, pool = nil) - pool = one_pool(the_class, false) if pool.nil? - @ref_hash ||= {} + def self.find_by_ref( + the_class, + attribute, + ref, + vcenter_uuid, + pool = nil + ) + pool = one_pool(the_class, false) if pool.nil? + @ref_hash ||= {} - if @ref_hash[attribute].nil? || @ref_hash[attribute] == {} - @ref_hash[attribute] = create_ref_hash(attribute, - pool, - vcenter_uuid) - end - - refkey = "" - refkey = ref if ref - refkey += vcenter_uuid if vcenter_uuid - - return @ref_hash[attribute][refkey] - end - - def self.find_image_by(att, the_class, path, ds_id, pool = nil) - pool = one_pool(the_class, false) if pool.nil? - element = pool.find{|e| - e[att] == path && - e["DATASTORE_ID"] == ds_id} - return element - end - - def self.find_persistent_image_by_source(source, pool) - element = pool.find{|e| - e["SOURCE"] == source && - e["PERSISTENT"] == "1" - } - - return element - end - - def self.find_vcenter_vm_by_name(one_vm, host, vi_client) - # Let's try to find the VM object only by its name - # Let's build the VM name - vm_prefix = host['TEMPLATE/VM_PREFIX'] - vm_prefix = VM_PREFIX_DEFAULT if vm_prefix.nil? || vm_prefix.empty? - vm_prefix.gsub!("$i", one_vm['ID']) - vm_name = vm_prefix + one_vm['NAME'] - - # We have no DEPLOY_ID, the VM has never been deployed - # let's use a view to try to find the VM from the root folder - view = vi_client.vim.serviceContent.viewManager.CreateContainerView({ - container: vi_client.vim.rootFolder, - type: ['VirtualMachine'], - recursive: true - }) - - vcenter_vm = view.view.find{ |v| v.name == vm_name } if !!view.view && !view.view.empty? - - view.DestroyView # Destroy the view - - return vcenter_vm - end - - def self.get_default(xpath) - begin - xml = OpenNebula::XMLElement.new - xml.initialize_xml(File.read(VCENTER_DRIVER_DEFAULT), 'VCENTER') - return xml[xpath] - rescue - return nil - end - end - - def self.get_location(item) - folders = [] - while !item.instance_of? RbVmomi::VIM::Datacenter - item = item.parent - if !item.instance_of? RbVmomi::VIM::Datacenter - folders << item.name if item.name != "host" + if @ref_hash[attribute].nil? || @ref_hash[attribute] == {} + @ref_hash[attribute] = create_ref_hash(attribute, + pool, + vcenter_uuid) end - raise "Could not find the location" if item.nil? + + refkey = '' + refkey = ref if ref + refkey += vcenter_uuid if vcenter_uuid + + @ref_hash[attribute][refkey] + end + + def self.find_image_by(att, the_class, path, ds_id, pool = nil) + pool = one_pool(the_class, false) if pool.nil? + pool.find do |e| + e[att] == path && + e['DATASTORE_ID'] == ds_id + end + end + + def self.find_persistent_image_by_source(source, pool) + pool.find do |e| + e['SOURCE'] == source && + e['PERSISTENT'] == '1' + end + end + + def self.find_vcenter_vm_by_name(one_vm, host, vi_client) + # Let's try to find the VM object only by its name + # Let's build the VM name + vm_prefix = host['TEMPLATE/VM_PREFIX'] + vm_prefix = VM_PREFIX_DEFAULT if vm_prefix.nil? || vm_prefix.empty? + vm_prefix.gsub!('$i', one_vm['ID']) + vm_name = vm_prefix + one_vm['NAME'] + + # We have no DEPLOY_ID, the VM has never been deployed + # let's use a view to try to find the VM from the root folder + view = vi_client + .vim + .serviceContent + .viewManager + .CreateContainerView( + { + :container => vi_client.vim.rootFolder, + :type => ['VirtualMachine'], + :recursive => true + } + ) + + if !view.view.nil? && !view.view.empty? + vcenter_vm = view + .view + .find {|v| v.name == vm_name } + end + + view.DestroyView # Destroy the view + + vcenter_vm + end + + def self.get_default(xpath) + begin + xml = OpenNebula::XMLElement.new + xml.initialize_xml(File.read(VCENTER_DRIVER_DEFAULT), 'VCENTER') + xml[xpath] + rescue StandardError + nil + end + end + + def self.get_location(item) + folders = [] + until item.instance_of? RbVmomi::VIM::Datacenter + item = item.parent + if !item.instance_of? RbVmomi::VIM::Datacenter + folders << item.name if item.name != 'host' + end + raise 'Could not find the location' if item.nil? + end + location = folders.reverse.join('/') + location = '/' if location.empty? + + location end - location = folders.reverse.join("/") - location = "/" if location.empty? - return location end + # class VIHelper -end # class VIHelper - -end # module VCenterDriver +end +# module VCenterDriver diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb b/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb index 53ba45a994..1fc4aff9ee 100644 --- a/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb +++ b/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb @@ -198,7 +198,7 @@ module VCenterDriver ############################################################################ # @return Boolean whether the VM exists in vCenter - def is_new? + def new? one_item["DEPLOY_ID"].empty? end @@ -237,7 +237,7 @@ module VCenterDriver @vi_client.vim.serviceContent.about.instanceUuid end - def get_disk_keys + def disk_keys_get unmanaged_keys = {} @item.config.extraConfig.each do |val| u = val[:key].include?("opennebula.disk") @@ -672,7 +672,7 @@ module VCenterDriver end # Queries to OpenNebula the machine nics xml representation - def get_one_nics + def one_nics_get one_item.info if one_item.instance_of?(OpenNebula::VirtualMachine) one_item.retrieve_xmlelements("TEMPLATE/NIC") end @@ -682,7 +682,7 @@ module VCenterDriver # @param one_disk [XMLelement] The OpenNebula object representation of the disk # @param keys [Hash (String => String)] Hashmap with the unmanaged keys # @param vc_disks [Array (vcenter_disks)] Array of the machine real disks - # see get_vcenter_disks method + # see vcenter_disks_get method # # @return [vCenter_disk] the proper disk def query_disk(one_disk, keys, vc_disks) @@ -738,8 +738,8 @@ module VCenterDriver def info_nics @nics = {macs: {}} - vc_nics = get_vcenter_nics - one_nics = get_one_nics + vc_nics = vcenter_nics_get + one_nics = one_nics_get one_nics.each do |one_nic| index = one_nic["NIC_ID"] @@ -785,8 +785,8 @@ module VCenterDriver def info_disks @disks = {} - keys = get_disk_keys - vc_disks = get_vcenter_disks + keys = disk_keys_get + vc_disks = vcenter_disks_get one_disks = get_one_disks one_disks.each do |one_disk| @@ -834,7 +834,7 @@ module VCenterDriver raise "nic #{index} not found" unless one_nic - vc_nics = get_vcenter_nics + vc_nics = vcenter_nics_get vc_nic = query_nic(mac, vc_nics) if vc_nic @@ -858,8 +858,8 @@ module VCenterDriver raise "disk #{index} not found" unless one_disk - keys = opts[:keys].nil? ? get_disk_keys : opts[:keys] - vc_disks = opts[:disks].nil? ? get_vcenter_disks : opts[:disks] + keys = opts[:keys].nil? ? disk_keys_get : opts[:keys] + vc_disks = opts[:disks].nil? ? vcenter_disks_get : opts[:disks] vc_disk = query_disk(one_disk, keys, vc_disks) if vc_disk @@ -892,7 +892,7 @@ module VCenterDriver # for unmanaged nics begin if !unmanaged_nics.empty? - nics = get_vcenter_nics + nics = vcenter_nics_get select_net =->(ref){ device = nil @@ -1042,16 +1042,16 @@ module VCenterDriver key_prefix = managed ? "opennebula.mdisk" : "opennebula.disk" # Get vcenter VM disks to know real path of cloned disk - vcenter_disks = get_vcenter_disks + vcenter_disks = vcenter_disks_get # Create an array with the paths of the disks in vcenter template if !template_ref.nil? template = VCenterDriver::Template.new_from_ref(template_ref, vi_client) - template_disks = template.get_vcenter_disks + template_disks = template.vcenter_disks_get else # If we are dealing with a Wild VM, we simply use # what is available in the vCenter VM - template_disks = get_vcenter_disks + template_disks = vcenter_disks_get end template_disks_vector = [] template_disks.each do |d| @@ -1386,7 +1386,7 @@ module VCenterDriver # start in one, we want the next avaliable id card_num = 1 @item["config.hardware.device"].each do |dv| - card_num += 1 if is_nic?(dv) + card_num += 1 if VCenterDriver::Network.nic?(dv) end nic_card = Nic.nic_model_class(model) @@ -1413,7 +1413,7 @@ module VCenterDriver # grab the last unitNumber to ensure the nic to be added at the end if !unumber - @unic = @unic || get_vcenter_nics.map{|d| d.unitNumber}.max || 0 + @unic = @unic || vcenter_nics_get.map{|d| d.unitNumber}.max || 0 unumber = @unic += 1 else @unic = unumber @@ -1487,7 +1487,7 @@ module VCenterDriver card_num = 1 # start in one, we want the next available id @item["config.hardware.device"].each do |dv| - card_num += 1 if is_nic?(dv) + card_num += 1 if VCenterDriver::Network.nic?(dv) end nic_card = Nic.nic_model_class(model) @@ -1644,7 +1644,7 @@ module VCenterDriver def detach_disks_specs() detach_disk_array = [] extra_config = [] - keys = get_disk_keys.invert + keys = disk_keys_get.invert ipool = VCenterDriver::VIHelper.one_pool(OpenNebula::ImagePool) disks_each(:detached?) do |d| key = d.key.to_s @@ -1675,7 +1675,7 @@ module VCenterDriver extraconfig_new = [] # vCenter mob disks vc_disks = @item["config.hardware.device"].select do |vc_device| - is_disk?(vc_device) + disk?(vc_device) end return unless vc_disks # For each changed disk, compare with vcenter mob disk @@ -1740,8 +1740,8 @@ module VCenterDriver device_change = [] # Extract unmanaged_keys - unmanaged_keys = get_disk_keys - vc_disks = get_vcenter_disks + unmanaged_keys = disk_keys_get + vc_disks = vcenter_disks_get # Check if we're dealing with a StoragePod SYSTEM ds storpod = disk["VCENTER_DS_REF"].start_with?('group-') @@ -1832,7 +1832,7 @@ module VCenterDriver end vm.config.hardware.device.each do |disk| - if is_disk_or_cdrom?(disk) + if disk_or_cdrom?(disk) # Let's try to find if disks is persistent source_unescaped = disk.backing.fileName.sub(/^\[(.*?)\] /, "") rescue next source = VCenterDriver::FileHelper.escape_path(source_unescaped) @@ -1922,10 +1922,10 @@ module VCenterDriver @used_keys = [] unless @used_keys if type == "CDROM" - bound = "is_cdrom?" + bound = "cdrom?" key = 3000 else - bound = "is_disk?" + bound = "disk?" key = 2000 end @@ -2395,7 +2395,7 @@ module VCenterDriver ############################################################################ def shutdown - if !is_powered_off? + if !powered_off? begin if vm_tools? @item.ShutdownGuest @@ -2407,7 +2407,7 @@ module VCenterDriver raise e.message if error != 'InvalidPowerState' end timeout = CONFIG[:vm_poweron_wait_default] - wait_timeout(:is_powered_off?, timeout) + wait_timeout(:powered_off?, timeout) end end @@ -2442,14 +2442,14 @@ module VCenterDriver set_running(true, true) if set_running timeout = CONFIG[:vm_poweron_wait_default] - wait_timeout(:is_powered_on?, timeout) + wait_timeout(:powered_on?, timeout) end - def is_powered_on? + def powered_on? return @item.runtime.powerState == "poweredOn" end - def is_powered_off? + def powered_off? return @item.runtime.powerState == "poweredOff" end diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/vm_template.rb b/src/vmm_mad/remotes/lib/vcenter_driver/vm_template.rb index d161adf3d4..b8b73abac6 100644 --- a/src/vmm_mad/remotes/lib/vcenter_driver/vm_template.rb +++ b/src/vmm_mad/remotes/lib/vcenter_driver/vm_template.rb @@ -15,1046 +15,1290 @@ #--------------------------------------------------------------------------- # require 'ipaddr' +############################################################################## +# Module VCenterDriver +############################################################################## module VCenterDriver -class Template + ########################################################################## + # Class Template + ########################################################################## + class Template - attr_accessor :item + attr_accessor :item - include Memoize + include Memoize - def initialize(item=nil, vi_client=nil) - @item = item - check_item(@item, nil) if (@item) - @vi_client = vi_client - @locking = true - end - - # Locking function. Similar to flock - def lock - if @locking - @locking_file = File.open("/tmp/vcenter-importer-lock","w") - @locking_file.flock(File::LOCK_EX) + def initialize(item = nil, vi_client = nil) + @item = item + check_item(@item, nil) if @item + @vi_client = vi_client + @locking = true end - end - # Unlock driver execution mutex - def unlock - if @locking + # Locking function. Similar to flock + def lock + return unless @locking + + @locking_file = File.open('/tmp/vcenter-importer-lock', 'w') + @locking_file.flock(File::LOCK_EX) + end + + # Unlock driver execution mutex + def unlock + return unless @locking + @locking_file.close - if File.exist?("/tmp/vcenter-importer-lock") - File.delete("/tmp/vcenter-importer-lock") - end - end - end - - def vm? - self.class == VCenterDriver::VirtualMachine - end - - def online? - raise "vcenter item not found!" unless @item - - !@item["guest.net"].empty? - end - - def get_dc - item = @item - - trace = [] - while item && !item.instance_of?(RbVmomi::VIM::Datacenter) - rp = item.resourcePool rescue nil - if rp && rp.instance_of?(RbVmomi::VIM::VirtualApp) - trace << "rp:" + item.to_s - item = rp.parent rescue nil - else - trace << item.to_s - item = item.parent rescue nil + if File.exist?('/tmp/vcenter-importer-lock') + File.delete('/tmp/vcenter-importer-lock') end end - if item.nil? - trace = "[" + trace.join(", ") + "]" - raise "Could not find the parent Datacenter. Trace: #{trace}" + def vm? + self.class == VCenterDriver::VirtualMachine end - Datacenter.new(item) - end + def online? + raise 'vcenter item not found!' unless @item - def delete_template - @item.Destroy_Task.wait_for_completion - end + !@item['guest.net'].empty? + end - def get_vcenter_instance_uuid - @vi_client.vim.serviceContent.about.instanceUuid rescue nil - end + def get_dc + item = @item - def create_template_copy(template_name) - error = nil - template_ref = nil - - template_name = "one-#{self['name']}" if template_name.empty? - - relocate_spec_params = {} - relocate_spec_params[:pool] = get_rp - relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec(relocate_spec_params) - - clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec({ - :location => relocate_spec, - :powerOn => false, - :template => false - }) - - template = nil - begin - template = @item.CloneVM_Task(:folder => @item.parent, - :name => template_name, - :spec => clone_spec).wait_for_completion - template_ref = template._ref - rescue StandardError => e - if !e.message.start_with?('DuplicateName') - error = "Could not create the template clone. Reason: #{e.message}" - return error, nil - end - - dc = get_dc - vm_folder = dc.vm_folder - vm_folder.fetch! - vm = vm_folder.items - .select{|k,v| v.item.name == template_name} - .values.first.item rescue nil - - if vm - begin - vm.Destroy_Task.wait_for_completion - template = @item.CloneVM_Task(:folder => @item.parent, - :name => template_name, - :spec => clone_spec).wait_for_completion - template_ref = template._ref - rescue - error = "Could not delete the existing template, please remove it manually from vCenter. Reason: #{e.message}" + trace = [] + while item && !item.instance_of?(RbVmomi::VIM::Datacenter) + rp = item.resourcePool rescue nil + if rp && rp.instance_of?(RbVmomi::VIM::VirtualApp) + trace << 'rp:' + item.to_s + item = rp.parent rescue nil + else + trace << item.to_s + item = item.parent rescue nil end - else - error = "Could not create the template clone. Reason: #{e.message}" end + + if item.nil? + trace = '[' + trace.join(', ') + ']' + raise "Could not find the parent Datacenter. Trace: #{trace}" + end + + Datacenter.new(item) end - return error, template_ref - end - - # Linked Clone over existing template - def create_delta_disks - - begin - disks = @item['config.hardware.device'].grep(RbVmomi::VIM::VirtualDisk) - disk_without_snapshots = disks.select { |x| x.backing.parent.nil? } - rescue - error = "Cannot extract existing disks on template." - use_linked_clones = false - return error, use_linked_clones + def delete_template + @item.Destroy_Task.wait_for_completion end - if !disk_without_snapshots.empty? + def vcenter_instance_uuid + @vi_client.vim.serviceContent.about.instanceUuid rescue nil + end + def create_template_copy(template_name) + error = nil + template_ref = nil + + template_name = "one-#{self['name']}" if template_name.empty? + + relocate_spec_params = {} + relocate_spec_params[:pool] = rp_get + relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec(relocate_spec_params) + + clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec({ + :location => relocate_spec, + :powerOn => false, + :template => false + }) + + template = nil begin - if self['config.template'] - @item.MarkAsVirtualMachine(:pool => get_rp, :host => self['runtime.host']) - end + template = @item.CloneVM_Task(:folder => @item.parent, + :name => template_name, + :spec => clone_spec).wait_for_completion + template_ref = template._ref rescue StandardError => e - @item.MarkAsTemplate() - error = "Cannot mark the template as a VirtualMachine. Not using linked clones. Reason: #{e.message}/#{e.backtrace}" + if !e.message.start_with?('DuplicateName') + error = "Could not create the template clone. Reason: #{e.message}" + return error, nil + end + + dc = get_dc + vm_folder = dc.vm_folder + vm_folder.fetch! + vm = vm_folder.items + .select {|_k, v| v.item.name == template_name } + .values.first.item rescue nil + + if vm + begin + vm.Destroy_Task.wait_for_completion + template = @item.CloneVM_Task(:folder => @item.parent, + :name => template_name, + :spec => clone_spec).wait_for_completion + template_ref = template._ref + rescue StandardError + error = "Could not delete the existing template, please remove it manually from vCenter. Reason: #{e.message}" + end + else + error = "Could not create the template clone. Reason: #{e.message}" + end + end + + [error, template_ref] + end + + # Linked Clone over existing template + def create_delta_disks + begin + disks = @item['config.hardware.device'].grep(RbVmomi::VIM::VirtualDisk) + disk_without_snapshots = disks.select {|x| x.backing.parent.nil? } + rescue StandardError + error = 'Cannot extract existing disks on template.' use_linked_clones = false return error, use_linked_clones end - begin - spec = {} - spec[:deviceChange] = [] + if !disk_without_snapshots.empty? - disk_without_snapshots.each do |disk| - remove_disk_spec = { :operation => :remove, :device => disk } - spec[:deviceChange] << remove_disk_spec + begin + if self['config.template'] + @item.MarkAsVirtualMachine(:pool => rp_get, :host => self['runtime.host']) + end + rescue StandardError => e + @item.MarkAsTemplate() + error = "Cannot mark the template as a VirtualMachine. Not using linked clones. Reason: #{e.message}/#{e.backtrace}" + use_linked_clones = false + return error, use_linked_clones + end - add_disk_spec = { :operation => :add, - :fileOperation => :create, - :device => disk.dup.tap { |x| + begin + spec = {} + spec[:deviceChange] = [] + + disk_without_snapshots.each do |disk| + remove_disk_spec = { :operation => :remove, :device => disk } + spec[:deviceChange] << remove_disk_spec + + add_disk_spec = { :operation => :add, + :fileOperation => :create, + :device => disk.dup.tap do |x| x.backing = x.backing.dup x.backing.fileName = "[#{disk.backing.datastore.name}]" x.backing.parent = disk.backing - } - } - spec[:deviceChange] << add_disk_spec - end - - @item.ReconfigVM_Task(:spec => spec).wait_for_completion if !spec[:deviceChange].empty? - rescue StandardError => e - error = "Cannot create the delta disks on top of the template. Reason: #{e.message}." - use_linked_clones = false - return error, use_linked_clones - end - - begin - @item.MarkAsTemplate() - rescue - error = "Cannot mark the VirtualMachine as a template. Not using linked clones." - use_linked_clones = false - return error, use_linked_clones - end - - error = nil - use_linked_clones = true - return error, use_linked_clones - else - # Template already has delta disks - error = nil - use_linked_clones = true - return error, use_linked_clones - end - end - - ######################################################################## - # Import vcenter disks - # @param type [object] contains the type of the object(:object) and identifier(:id) - # @return error, template_disks - ######################################################################## - def import_vcenter_disks(vc_uuid, dpool, ipool, type) - disk_info = "" - error = "" - images = [] - - begin - lock #Lock import operation, to avoid concurrent creation of images - - ##ccr_ref = self["runtime.host.parent._ref"] - dc = get_dc - dc_ref = dc.item._ref - - #Get disks and info required - vc_disks = get_vcenter_disks - vc_disks.sort_by! {|d| d[:device].unitNumber} - - # Track allocated images - allocated_images = [] - - vc_disks.each do |disk| - ds_ref = nil - begin - ds_ref = disk[:datastore]._ref - rescue - raise "The ISO #{disk[:path_wo_ds].name} cannot be found because the datastore was removed or deleted" - end - datastore_found = VCenterDriver::Storage.get_one_image_ds_by_ref_and_dc(ds_ref, - dc_ref, - vc_uuid, - dpool) - if datastore_found.nil? - error = "\n ERROR: datastore #{disk[:datastore].name}: has to be imported first as an image datastore!\n" - - #Rollback delete disk images - allocated_images.each do |i| - i.delete + end } + spec[:deviceChange] << add_disk_spec end - break + @item.ReconfigVM_Task(:spec => spec).wait_for_completion unless spec[:deviceChange].empty? + rescue StandardError => e + error = "Cannot create the delta disks on top of the template. Reason: #{e.message}." + use_linked_clones = false + return error, use_linked_clones end - opts = {:persistent => vm? ? "YES":"NO"} - image_import, image_name = VCenterDriver::Datastore.get_image_import_template(disk, ipool, type, datastore_found["ID"], opts, images) - #Image is already in the datastore - if image_import[:one] - # This is the disk info - disk_tmp = "" - disk_tmp << "DISK=[\n" - disk_tmp << "IMAGE_ID=\"#{image_import[:one]["ID"]}\",\n" - disk_tmp << "OPENNEBULA_MANAGED=\"NO\"\n" - disk_tmp << "]\n" - disk_info << disk_tmp + begin + @item.MarkAsTemplate() + rescue StandardError + error = 'Cannot mark the VirtualMachine as a template. Not using linked clones.' + use_linked_clones = false + return error, use_linked_clones + end - elsif !image_import[:template].empty? + error = nil + use_linked_clones = true + [error, use_linked_clones] + else + # Template already has delta disks + error = nil + use_linked_clones = true + [error, use_linked_clones] + end + end - # Then the image is created as it's not in the datastore - one_i = VCenterDriver::VIHelper.new_one_item(OpenNebula::Image) - allocated_images << one_i - rc = one_i.allocate(image_import[:template], datastore_found['ID'].to_i, false) + ######################################################################## + # Import vcenter disks + # @param type [object] contains the type of the object(:object) and identifier(:id) + # @return error, template_disks + ######################################################################## + def import_vcenter_disks(vc_uuid, dpool, ipool, type) + disk_info = '' + error = '' + images = [] + + begin + lock # Lock import operation, to avoid concurrent creation of images + + # #ccr_ref = self["runtime.host.parent._ref"] + dc = get_dc + dc_ref = dc.item._ref + + # Get disks and info required + vc_disks = vcenter_disks_get + vc_disks.sort_by! {|d| d[:device].unitNumber } + + # Track allocated images + allocated_images = [] + + vc_disks.each do |disk| + ds_ref = nil + begin + ds_ref = disk[:datastore]._ref + rescue StandardError + raise "The ISO #{disk[:path_wo_ds].name} cannot be found because the datastore was removed or deleted" + end + datastore_found = VCenterDriver::Storage.get_one_image_ds_by_ref_and_dc(ds_ref, + dc_ref, + vc_uuid, + dpool) + if datastore_found.nil? + error = "\n ERROR: datastore #{disk[:datastore].name}: has to be imported first as an image datastore!\n" + + # Rollback delete disk images + allocated_images.each do |i| + i.delete + end - if OpenNebula.is_error?(rc) - error = " Error creating disk from template: #{rc.message}\n" break end - # Monitor image, we need READY state - one_i.info - start_time = Time.now + params = { + :disk => disk, + :ipool => ipool, + :_type => type, + :ds_id => datastore_found['ID'], + :opts => { + :persistent => vm? ? 'YES':'NO' + }, + :images => images + } - while one_i.state_str != "READY" and Time.now - start_time < 300 - sleep 1 + image_import, image_name = VCenterDriver::Datastore.get_image_import_template(params) + # Image is already in the datastore + if image_import[:one] + # This is the disk info + disk_tmp = '' + disk_tmp << "DISK=[\n" + disk_tmp << "IMAGE_ID=\"#{image_import[:one]['ID']}\",\n" + disk_tmp << "OPENNEBULA_MANAGED=\"NO\"\n" + disk_tmp << "]\n" + disk_info << disk_tmp + + elsif !image_import[:template].empty? + + # Then the image is created as it's not in the datastore + one_i = VCenterDriver::VIHelper.new_one_item(OpenNebula::Image) + allocated_images << one_i + rc = one_i.allocate(image_import[:template], datastore_found['ID'].to_i, false) + + if OpenNebula.is_error?(rc) + error = " Error creating disk from template: #{rc.message}\n" + break + end + + # Monitor image, we need READY state one_i.info + start_time = Time.now + + while (one_i.state_str != 'READY') && (Time.now - start_time < 300) + sleep 1 + one_i.info + end + + # Add info for One template + one_i.info + disk_info << "DISK=[\n" + disk_info << "IMAGE_ID=\"#{one_i['ID']}\",\n" + disk_info << "OPENNEBULA_MANAGED=\"NO\"\n" + disk_info << "]\n" + + images.push(image_name) + end + end + rescue StandardError => e + error = "\n There was an error trying to create an image for disk in vcenter template. Reason: #{e.message}\n#{e.backtrace}" + ensure + unlock + if !error.empty? && allocated_images + # Rollback delete disk images + allocated_images.each do |i| + i.delete end - - - #Add info for One template - one_i.info - disk_info << "DISK=[\n" - disk_info << "IMAGE_ID=\"#{one_i["ID"]}\",\n" - disk_info << "OPENNEBULA_MANAGED=\"NO\"\n" - disk_info << "]\n" - - images.push(image_name) end end - rescue StandardError => e - error = "\n There was an error trying to create an image for disk in vcenter template. Reason: #{e.message}\n#{e.backtrace}" - ensure - unlock - if !error.empty? && allocated_images - #Rollback delete disk images - allocated_images.each do |i| - i.delete + [error, disk_info, allocated_images] + end + + def create_ar(nic, with_id = false) + ar_tmp = '' + if nic[:mac] && nic[:ipv4] && nic[:ipv6] + ar_tmp << "AR=[\n" + ar_tmp << "AR_ID=0,\n" if with_id + ar_tmp << "TYPE=\"IP4_6_STATIC\",\n" + ar_tmp << "IP=\"#{nic[:ipv4]}\",\n" + ar_tmp << "MAC=\"#{nic[:mac]}\",\n" + ar_tmp << "IP6=\"#{nic[:ipv6]}\",\n" + ar_tmp << "PREFIX_LENGTH=\"64\",\n" + ar_tmp << "SIZE=\"1\"\n" + ar_tmp << "]\n" + elsif nic[:mac] && nic[:ipv6] + ar_tmp << "AR=[\n" + ar_tmp << "AR_ID=0,\n" if with_id + ar_tmp << "TYPE=\"IP6_STATIC\",\n" + ar_tmp << "MAC=\"#{nic[:mac]}\",\n" + ar_tmp << "IP6=\"#{nic[:ipv6]}\",\n" + ar_tmp << "PREFIX_LENGTH=\"64\",\n" + ar_tmp << "SIZE=\"1\"\n" + ar_tmp << "]\n" + elsif nic[:mac] && nic[:ipv4] + ar_tmp << "AR=[\n" + ar_tmp << "AR_ID=0,\n" if with_id + ar_tmp << "TYPE=\"IP4\",\n" + ar_tmp << "IP=\"#{nic[:ipv4]}\",\n" + ar_tmp << "MAC=\"#{nic[:mac]}\",\n" + ar_tmp << "SIZE=\"1\"\n" + ar_tmp << "]\n" + else + ar_tmp << "AR=[\n" + ar_tmp << "AR_ID=0,\n" if with_id + ar_tmp << "TYPE=\"ETHER\",\n" + ar_tmp << "MAC=\"#{nic[:mac]}\",\n" + ar_tmp << "SIZE=\"1\"\n" + ar_tmp << "]\n" + end + ar_tmp + end + + def save_ar_ids(network_found, nic, ar_ids, start_ids = false) + if start_ids + value = [] + arsNew = network_found.to_hash['VNET']['AR_POOL']['AR'] + arsNew = [arsNew] if arsNew.class.to_s.eql? 'Hash' + last_id = 0 + if ar_ids.has_key?(nic[:net_ref]) + ref = nic[:net_ref] + value = ar_ids[ref.to_s] + value.insert(value.length, last_id.to_s) + ar_ids.store(nic[:net_ref], value) + else + value.insert(value.length, last_id.to_s) + ar_ids.store(nic[:net_ref], value) + end + else + value = [] + arsNew = network_found.to_hash['VNET']['AR_POOL']['AR'] + arsNew = [arsNew] if arsNew.class.to_s.eql? 'Hash' + last_id = arsNew.last['AR_ID'] + if ar_ids.has_key?(nic[:net_ref]) + ref = nic[:net_ref] + value = ar_ids[ref.to_s] + value.insert(value.length, last_id) + ar_ids.store(nic[:net_ref], value) + else + value.insert(value.length, last_id) + ar_ids.store(nic[:net_ref], value) end end + last_id end - return error, disk_info, allocated_images - end + def find_ips_in_network(network, vm_object, nic) + ipv4 = ipv6 = '' + return unless vm_object.is_a?(VCenterDriver::VirtualMachine) - def create_ar(nic, with_id = false) - ar_tmp = "" - if nic[:mac] && nic[:ipv4] && nic[:ipv6] - ar_tmp << "AR=[\n" - ar_tmp << "AR_ID=0,\n" if with_id - ar_tmp << "TYPE=\"IP4_6_STATIC\",\n" - ar_tmp << "IP=\"#{nic[:ipv4]}\",\n" - ar_tmp << "MAC=\"#{nic[:mac]}\",\n" - ar_tmp << "IP6=\"#{nic[:ipv6]}\",\n" - ar_tmp << "PREFIX_LENGTH=\"64\",\n" - ar_tmp << "SIZE=\"1\"\n" - ar_tmp << "]\n" - elsif nic[:mac] && nic[:ipv6] - ar_tmp << "AR=[\n" - ar_tmp << "AR_ID=0,\n" if with_id - ar_tmp << "TYPE=\"IP6_STATIC\",\n" - ar_tmp << "MAC=\"#{nic[:mac]}\",\n" - ar_tmp << "IP6=\"#{nic[:ipv6]}\",\n" - ar_tmp << "PREFIX_LENGTH=\"64\",\n" - ar_tmp << "SIZE=\"1\"\n" - ar_tmp << "]\n" - elsif nic[:mac] && nic[:ipv4] - ar_tmp << "AR=[\n" - ar_tmp << "AR_ID=0,\n" if with_id - ar_tmp << "TYPE=\"IP4\",\n" - ar_tmp << "IP=\"#{nic[:ipv4]}\",\n" - ar_tmp << "MAC=\"#{nic[:mac]}\",\n" - ar_tmp << "SIZE=\"1\"\n" - ar_tmp << "]\n" - else - ar_tmp << "AR=[\n" - ar_tmp << "AR_ID=0,\n" if with_id - ar_tmp << "TYPE=\"ETHER\",\n" - ar_tmp << "MAC=\"#{nic[:mac]}\",\n" - ar_tmp << "SIZE=\"1\"\n" - ar_tmp << "]\n" - end - ar_tmp - end + network.info - def save_ar_ids(network_found, nic, ar_ids, start_ids = false) - if start_ids - value = [] - arsNew = network_found.to_hash["VNET"]["AR_POOL"]["AR"] - arsNew = [arsNew] if arsNew.class.to_s.eql? "Hash" - last_id = 0 - if ar_ids.has_key?(nic[:net_ref]) - ref = nic[:net_ref] - value = ar_ids[ref.to_s] - value.insert(value.length, last_id.to_s) - ar_ids.store(nic[:net_ref], value) - else - value.insert(value.length , last_id.to_s) - ar_ids.store(nic[:net_ref], value) - end - else - value = [] - arsNew = network_found.to_hash["VNET"]["AR_POOL"]["AR"] - arsNew = [arsNew] if arsNew.class.to_s.eql? "Hash" - last_id = arsNew.last["AR_ID"] - if ar_ids.has_key?(nic[:net_ref]) - ref = nic[:net_ref] - value = ar_ids[ref.to_s] - value.insert(value.length, last_id) - ar_ids.store(nic[:net_ref], value) - else - value.insert(value.length , last_id) - ar_ids.store(nic[:net_ref], value) - end - end - last_id - end - - def find_ips_in_network(network, vm_object, nic) - ipv4 = ipv6 = "" - return if !vm_object.is_a?(VCenterDriver::VirtualMachine) - network.info - - # Iterate over Retrieve vCenter VM NICs - unless vm_object.item.guest.net.empty? - vm_object.item.guest.net.each do |net| - mac = net.macAddress - if nic[:mac] == mac + # Iterate over Retrieve vCenter VM NICs + unless vm_object.item.guest.net.empty? + vm_object.item.guest.net.each do |net| + mac = net.macAddress + next unless nic[:mac] == mac next unless net.ipConfig next if net.ipConfig.ipAddress.empty? + net.ipConfig.ipAddress.each do |ip_config| ip = IPAddr.new(ip_config.ipAddress) ar_array = network.to_hash['VNET']['AR_POOL']['AR'] ar_array = [ar_array] if ar_array.is_a?(Hash) ipv4, ipv6 = find_ip_in_ar(ip, ar_array) if ar_array - break if ipv4 !="" or ipv6 != "" + break if (ipv4 !='') || (ipv6 != '') end break end end + [ipv4, ipv6] end - return ipv4, ipv6 - end - def find_ip_in_ar(ip, ar_array) - ipv4 = ipv6 = "" - ar_array.each do |ar| - if ar.key?('IP') && ar.key?('IP_END') + def find_ip_in_ar(ip, ar_array) + ipv4 = ipv6 = '' + ar_array.each do |ar| + next unless ar.key?('IP') && ar.key?('IP_END') + start_ip = IPAddr.new(ar['IP']) end_ip = IPAddr.new(ar['IP_END']) - if ip.family == start_ip.family && - ip.family == end_ip.family - if ip > start_ip && ip < end_ip - ipv4 = ip.to_s if ip.ipv4? - ipv6 = ip.to_s if ip.ipv6? - end + next unless ip.family == start_ip.family && + ip.family == end_ip.family + + if ip > start_ip && ip < end_ip + ipv4 = ip.to_s if ip.ipv4? + ipv6 = ip.to_s if ip.ipv6? end end + [ipv4, ipv6] end - return ipv4, ipv6 - end - # Get vSwitch of Standard PortGroup - # If there is differents vSwitches returns the first. - def vSwitch(vc_pg) - vswitch = [] - vc_hosts = vc_pg.host - vc_hosts.each do |vc_host| - host_pgs = vc_host.configManager.networkSystem.networkInfo.portgroup rescue [] - host_pgs.each do |pg| - if vc_pg.name == pg.spec.name - vswitch << pg.spec.vswitchName + def import_vcenter_nics( + vi_client, + vc_uuid, + npool, + hpool, + vcenter_instance_name, + template_ref, + vm_object, + vm_id = nil, + dc_name = nil + ) + nic_info = '' + error = '' + ar_ids = {} + begin + lock # Lock import operation, to avoid concurrent creation of networks + + if !dc_name + dc = get_dc + dc_name = dc.item.name + dc_ref = dc.item._ref end - end - end - vswitch.uniq! - vswitch << 'Invalid configuration' if vswitch.length > 1 - vswitch.join(" / ") - end - def import_vcenter_nics(vi_client, vc_uuid, npool, hpool, vcenter_instance_name, - template_ref, vm_object, vm_id=nil, dc_name=nil) - nic_info = '' - error = '' - ar_ids = {} - begin - lock # Lock import operation, to avoid concurrent creation of networks + ccr_ref = self['runtime.host.parent._ref'] + ccr_name = self['runtime.host.parent.name'] - if !dc_name - dc = get_dc - dc_name = dc.item.name - dc_ref = dc.item._ref - end + # Get nics and info required + vc_nics = vcenter_nics_hash - ccr_ref = self["runtime.host.parent._ref"] - ccr_name = self["runtime.host.parent.name"] + # Track allocated networks for rollback + allocated_networks = [] - # Get nics and info required - vc_nics = vcenter_nics_hash + vc_nics.each do |nic| + # Check if the network already exists + network_found = VCenterDriver::VIHelper.find_by_ref(OpenNebula::VirtualNetworkPool, + 'TEMPLATE/VCENTER_NET_REF', + nic[:net_ref], + vc_uuid, + npool) + # Network is already in OpenNebula + if network_found + nic_tmp = "NIC=[\n" + nic_tmp << "NETWORK_ID=\"#{network_found['ID']}\",\n" - # Track allocated networks for rollback - allocated_networks = [] + if vm? + ipv4, ipv6 = find_ips_in_network(network_found, vm_object, nic) + ar_tmp = create_ar(nic) + network_found.add_ar(ar_tmp) + network_found.info + last_id = save_ar_ids(network_found, nic, ar_ids) - # Track port groups duplicated in this VM - duplicated_networks = [] + # This is the existing nic info + nic_tmp << "AR_ID=\"#{last_id}\",\n" + if nic[:mac] && ipv4.empty? && ipv6.empty? + nic_tmp << "MAC=\"#{nic[:mac]}\",\n" + end + nic_tmp << "IP=\"#{ipv4}\"," unless ipv4.empty? + nic_tmp << "IP=\"#{ipv6}\"," unless ipv6.empty? + if nic[:ipv4_additionals] + nic_tmp << + "VCENTER_ADDITIONALS_IP4=\"#{nic[:ipv4_additionals]}\",\n" + end + if nic[:ipv6] + nic_tmp << "VCENTER_IP6=\"#{nic[:ipv6]}\",\n" + end - vc_nics.each do |nic| - # Check if the network already exists - network_found = VCenterDriver::VIHelper.find_by_ref(OpenNebula::VirtualNetworkPool, - 'TEMPLATE/VCENTER_NET_REF', - nic[:net_ref], - vc_uuid, - npool) - # Network is already in OpenNebula - if network_found - nic_tmp = "NIC=[\n" - nic_tmp << "NETWORK_ID=\"#{network_found["ID"]}\",\n" + if nic[:ipv6_global] + nic_tmp << "IP6_GLOBAL=\"#{nic[:ipv6_global]}\",\n" + end - if vm? - ipv4, ipv6 = find_ips_in_network(network_found, vm_object, nic) - ar_tmp = create_ar(nic) - network_found.add_ar(ar_tmp) - network_found.info - last_id = save_ar_ids(network_found, nic, ar_ids) + if nic[:ipv6_ula] + nic_tmp << "IP6_ULA=\"#{nic[:ipv6_ula]}\",\n" + end - # This is the existing nic info - nic_tmp << "AR_ID=\"#{last_id}\",\n" - nic_tmp << "MAC=\"#{nic[:mac]}\",\n" if nic[:mac] and ipv4.empty? and ipv6.empty? - nic_tmp << "IP=\"#{ipv4}\"," if !ipv4.empty? - nic_tmp << "IP=\"#{ipv6}\"," if !ipv6.empty? - nic_tmp << "VCENTER_ADDITIONALS_IP4=\"#{nic[:ipv4_additionals]}\",\n" if nic[:ipv4_additionals] - nic_tmp << "VCENTER_IP6=\"#{nic[:ipv6]}\",\n" if nic[:ipv6] - nic_tmp << "IP6_GLOBAL=\"#{nic[:ipv6_global]}\",\n" if nic[:ipv6_global] - nic_tmp << "IP6_ULA=\"#{nic[:ipv6_ula]}\",\n" if nic[:ipv6_ula] - nic_tmp << "VCENTER_ADDITIONALS_IP6=\"#{nic[:ipv6_additionals]}\",\n" if nic[:ipv6_additionals] - end + if nic[:ipv6_additionals] + nic_tmp << + "VCENTER_ADDITIONALS_IP6\ + =\"#{nic[:ipv6_additionals]}\",\n" + end + end - nic_tmp << "OPENNEBULA_MANAGED=\"NO\"\n" - nic_tmp << "]\n" + nic_tmp << "OPENNEBULA_MANAGED=\"NO\"\n" + nic_tmp << "]\n" - nic_info << nic_tmp + nic_info << nic_tmp - # Network not found - else - config = {} - config[:refs] = nic[:refs] - - # Then the network has to be created as it's not in OpenNebula - one_vn = VCenterDriver::VIHelper.new_one_item(OpenNebula::VirtualNetwork) - - # Let's get the OpenNebula hosts ids associated to the clusters references - config[:one_ids] = nic[:refs].map do |ref| - VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool, - "TEMPLATE/VCENTER_CCR_REF", - ref, - vc_uuid, - hpool)["CLUSTER_ID"] rescue -1 - end - - if vm? - unmanaged = "wild" + # Network not found else - unmanaged = "template" - end + config = {} + config[:refs] = nic[:refs] - net = VCenterDriver::Network.new_from_ref(nic[:net_ref], vi_client) - vid = VCenterDriver::Network.retrieve_vlanid(net.item) if net + # # Then the network has to be created + # # as it's not in OpenNebula + # one_vn = + # VCenterDriver::VIHelper + # .new_one_item( + # OpenNebula::VirtualNetwork + # ) - case nic[:pg_type] - # Distributed PortGroups - when VCenterDriver::Network::NETWORK_TYPE_DPG - config[:sw_name] = nic[:network].config.distributedVirtualSwitch.name - # For DistributedVirtualPortgroups there is networks and uplinks - config[:uplink] = false - # NSX-V PortGroups - when VCenterDriver::Network::NETWORK_TYPE_NSXV - config[:sw_name] = nic[:network].config.distributedVirtualSwitch.name - # For NSX-V ( is the same as DistributedVirtualPortgroups ) - # there is networks and uplinks - config[:uplink] = false - - host_id = vi_client.instance_variable_get '@host_id' - - begin - nsx_client = NSXDriver::NSXClient.new_from_id(host_id) - rescue - nsx_client = nil + # Let's get the OpenNebula hosts ids + # associated to the clusters references + config[:one_ids] = nic[:refs].map do |ref| + VCenterDriver::VIHelper + .find_by_ref( + OpenNebula::HostPool, + 'TEMPLATE/VCENTER_CCR_REF', + ref, + vc_uuid, + hpool + )['CLUSTER_ID'] rescue -1 end - if nsx_client != nil - nsx_net = NSXDriver::VirtualWire.new_from_name(nsx_client, nic[:net_name]) - - config[:nsx_id] = nsx_net.ls_id - config[:nsx_vni] = nsx_net.ls_vni - config[:nsx_tz_id] = nsx_net.tz_id - end - # Standard PortGroups - when VCenterDriver::Network::NETWORK_TYPE_PG - # There is no uplinks for standard portgroups, so all Standard - # PortGroups are networks and no uplinks - config[:uplink] = false - config[:sw_name] = vSwitch(nic[:network]) - # NSX-T PortGroups - when VCenterDriver::Network::NETWORK_TYPE_NSXT - config[:sw_name] = \ - nic[:network].summary.opaqueNetworkType - # There is no uplinks for NSX-T networks, so all NSX-T networks - # are networks and no uplinks - config[:uplink] = false - - host_id = vi_client.instance_variable_get '@host_id' - - begin - nsx_client = NSXDriver::NSXClient.new_from_id(host_id) - rescue - nsx_client = nil + if vm? + unmanaged = 'wild' + else + unmanaged = 'template' end - if nsx_client != nil - nsx_net = NSXDriver::OpaqueNetwork.new_from_name(nsx_client, nic[:net_name]) - - config[:nsx_id] = nsx_net.ls_id - config[:nsx_vni] = nsx_net.ls_vni - config[:nsx_tz_id] = nsx_net.tz_id + net = VCenterDriver::Network + .new_from_ref( + nic[:net_ref], + vi_client + ) + if net + vid = VCenterDriver::Network.retrieve_vlanid(net.item) end - else - raise "Unknown network type: #{nic[:pg_type]}" - end + case nic[:pg_type] + # Distributed PortGroups + when VCenterDriver::Network::NETWORK_TYPE_DPG + config[:sw_name] = + nic[:network] + .config + .distributedVirtualSwitch + .name + # For DistributedVirtualPortgroups + # there is networks and uplinks + config[:uplink] = false + # NSX-V PortGroups + when VCenterDriver::Network::NETWORK_TYPE_NSXV + config[:sw_name] = + nic[:network] + .config + .distributedVirtualSwitch + .name + # For NSX-V ( is the same as + # DistributedVirtualPortgroups ) + # there is networks and uplinks + config[:uplink] = false - import_opts = { - :network_name=> nic[:net_name], - :sw_name=> config[:sw_name], - :network_ref=> nic[:net_ref], - :network_type=> nic[:pg_type], - :ccr_ref=> ccr_ref, - :ccr_name=> ccr_name, - :vcenter_uuid=> vc_uuid, - :vcenter_instance_name=> vcenter_instance_name, - :dc_name=> dc_name, - :unmanaged=> unmanaged, - :template_ref=> template_ref, - :dc_ref=> dc_ref, - :template_id=> vm_id - } + host_id = vi_client.instance_variable_get '@host_id' - if nic[:pg_type] == VCenterDriver::Network::NETWORK_TYPE_NSXV || nic[:pg_type] == VCenterDriver::Network::NETWORK_TYPE_NSXT - import_opts[:nsx_id] = config[:nsx_id] - import_opts[:nsx_vni] = config[:nsx_vni] - import_opts[:nsx_tz_id] = config[:nsx_tz_id] - end + begin + nsx_client = + NSXDriver::NSXClient + .new_from_id( + host_id + ) + rescue StandardError + nsx_client = nil + end - if vid - vlanid = VCenterDriver::Network.vlanid(vid) + if !nsx_client.nil? + nsx_net = + NSXDriver::VirtualWire + .new_from_name( + nsx_client, + nic[:net_name] + ) - # we have vlan id - if /\A\d+\z/.match(vlanid) - import_opts[:vlanid] = vlanid + config[:nsx_id] = nsx_net.ls_id + config[:nsx_vni] = nsx_net.ls_vni + config[:nsx_tz_id] = nsx_net.tz_id + end + # Standard PortGroups + when VCenterDriver::Network::NETWORK_TYPE_PG + # There is no uplinks for standard portgroups, + # so all Standard + # PortGroups are networks and no uplinks + config[:uplink] = false + config[:sw_name] = + VCenterDriver::Network + .virtual_switch( + nic[:network] + ) + # NSX-T PortGroups + when VCenterDriver::Network::NETWORK_TYPE_NSXT + config[:sw_name] = \ + nic[:network].summary.opaqueNetworkType + # There is no uplinks for NSX-T networks, + # so all NSX-T networks + # are networks and no uplinks + config[:uplink] = false + + host_id = vi_client.instance_variable_get '@host_id' + + begin + nsx_client = + NSXDriver::NSXClient + .new_from_id( + host_id + ) + rescue StandardError + nsx_client = nil + end + + if !nsx_client.nil? + nsx_net = + NSXDriver::OpaqueNetwork + .new_from_name( + nsx_client, + nic[:net_name] + ) + + config[:nsx_id] = nsx_net.ls_id + config[:nsx_vni] = nsx_net.ls_vni + config[:nsx_tz_id] = nsx_net.tz_id + end + else + raise "Unknown network type: #{nic[:pg_type]}" end + + import_opts = { + :network_name=> nic[:net_name], + :sw_name=> config[:sw_name], + :network_ref=> nic[:net_ref], + :network_type=> nic[:pg_type], + :ccr_ref=> ccr_ref, + :ccr_name=> ccr_name, + :vcenter_uuid=> vc_uuid, + :vcenter_instance_name=> vcenter_instance_name, + :dc_name=> dc_name, + :unmanaged=> unmanaged, + :template_ref=> template_ref, + :dc_ref=> dc_ref, + :template_id=> vm_id + } + + if nic[:pg_type] == + VCenterDriver::Network::NETWORK_TYPE_NSXV || + nic[:pg_type] == + VCenterDriver::Network::NETWORK_TYPE_NSXT + import_opts[:nsx_id] = config[:nsx_id] + import_opts[:nsx_vni] = config[:nsx_vni] + import_opts[:nsx_tz_id] = config[:nsx_tz_id] + end + + if vid + vlanid = VCenterDriver::Network.vlanid(vid) + + # we have vlan id + if /\A\d+\z/.match(vlanid) + import_opts[:vlanid] = vlanid + end + end + + # Prepare the Virtual Network template + one_vnet = + VCenterDriver::Network + .to_one_template( + import_opts + ) + + # always has to be created because of + # templates when they are instantiated + ar_tmp = '' + ar_tmp << "AR=[\n" + ar_tmp << "TYPE=\"ETHER\",\n" + ar_tmp << "SIZE=255\n" + ar_tmp << "]\n" + + if vm? + ar_tmp << create_ar(nic, true) + end + + one_vnet[:one] << ar_tmp + config[:one_object] = one_vnet[:one] + _cluster_id = + VCenterDriver::VIHelper + .get_cluster_id( + config[:one_ids] + ) + + one_vn = + VCenterDriver::Network + .create_one_network( + config + ) + allocated_networks << one_vn + VCenterDriver::VIHelper.clean_ref_hash + one_vn.info + + nic_tmp = "NIC=[\n" + nic_tmp << "NETWORK_ID=\"#{one_vn.id}\",\n" + + if vm? + last_id = save_ar_ids(one_vn, nic, ar_ids) + nic_tmp << "AR_ID=\"#{last_id}\",\n" + if nic[:mac] + nic_tmp << "MAC=\"#{nic[:mac]}\",\n" + end + if nic[:ipv4_additionals] + nic_tmp << + "VCENTER_ADDITIONALS_IP4\ + =\"#{nic[:ipv4_additionals]}\",\n" + end + if nic[:ipv6] + nic_tmp << + "VCENTER_IP6=\"#{nic[:ipv6]}\",\n" + end + if nic[:ipv6_global] + nic_tmp << + "IP6_GLOBAL=\"#{nic[:ipv6_global]}\",\n" + end + if nic[:ipv6_ula] + nic_tmp << + "IP6_ULA=\"#{nic[:ipv6_ula]}\",\n" + end + if nic[:ipv6_additionals] + nic_tmp << + "VCENTER_ADDITIONALS_IP6\ + =\"#{nic[:ipv6_additionals]}\",\n" + end + end + + nic_tmp << "OPENNEBULA_MANAGED=\"NO\"\n" + nic_tmp << "]\n" + nic_info << nic_tmp + + # Refresh npool + npool.info_all end - - # Prepare the Virtual Network template - one_vnet = VCenterDriver::Network.to_one_template(import_opts) - - # always has to be created because of templates when they are instantiated - ar_tmp = "" - ar_tmp << "AR=[\n" - ar_tmp << "TYPE=\"ETHER\",\n" - ar_tmp << "SIZE=255\n" - ar_tmp << "]\n" - - if vm? - ar_tmp << create_ar(nic, true) + end + rescue StandardError => e + error = "\n There was an error trying to create \ + a virtual network to repesent a \ + vCenter network for a VM or VM Template. \ + Reason: #{e.message}" + ensure + unlock + # Rollback, delete virtual networks + if !error.empty? && allocated_networks + allocated_networks.each do |n| + n.delete end - - one_vnet[:one] << ar_tmp - config[:one_object] = one_vnet[:one] - cluster_id = VCenterDriver::VIHelper.get_cluster_id(config[:one_ids]) - - one_vn = VCenterDriver::Network.create_one_network(config) - allocated_networks << one_vn - VCenterDriver::VIHelper.clean_ref_hash() - one_vn.info - - nic_tmp = "NIC=[\n" - nic_tmp << "NETWORK_ID=\"#{one_vn.id}\",\n" - - if vm? - last_id = save_ar_ids(one_vn, nic, ar_ids) - nic_tmp << "AR_ID=\"#{last_id}\",\n" - nic_tmp << "MAC=\"#{nic[:mac]}\",\n" if nic[:mac] - nic_tmp << "VCENTER_ADDITIONALS_IP4=\"#{nic[:ipv4_additionals]}\",\n" if nic[:ipv4_additionals] - nic_tmp << "VCENTER_IP6=\"#{nic[:ipv6]}\",\n" if nic[:ipv6] - nic_tmp << "IP6_GLOBAL=\"#{nic[:ipv6_global]}\",\n" if nic[:ipv6_global] - nic_tmp << "IP6_ULA=\"#{nic[:ipv6_ula]}\",\n" if nic[:ipv6_ula] - nic_tmp << "VCENTER_ADDITIONALS_IP6=\"#{nic[:ipv6_additionals]}\",\n" if nic[:ipv6_additionals] - end - - nic_tmp << "OPENNEBULA_MANAGED=\"NO\"\n" - nic_tmp << "]\n" - nic_info << nic_tmp - - # Refresh npool - npool.info_all - end - end - rescue StandardError => e - error = "\n There was an error trying to create a virtual network to repesent a vCenter network for a VM or VM Template. Reason: #{e.message}" - ensure - unlock - #Rollback, delete virtual networks - if !error.empty? && allocated_networks - allocated_networks.each do |n| - n.delete end end + + [error, nic_info, ar_ids, allocated_networks] end - return error, nic_info, ar_ids, allocated_networks - end + def get_vcenter_disk_key(unit_number, controller_key) + key = nil - def get_vcenter_disk_key(unit_number, controller_key) + @item['config.hardware.device'].each do |device| + disk = {} - key = nil + next unless disk_or_iso?(device) - @item["config.hardware.device"].each do |device| - disk = {} + disk[:device] = device + next unless device.controllerKey == controller_key && + device.unitNumber == unit_number - if is_disk_or_iso?(device) - disk[:device] = device - if device.controllerKey == controller_key && - device.unitNumber == unit_number - - key = device.key - break - end + key = device.key + break end + + key end - return key - end + def vcenter_disks_get + disks = [] + ide_controlled = [] + sata_controlled = [] + scsi_controlled = [] + controller = {} - def get_vcenter_disks + @item['config.hardware.device'].each do |device| + disk = {} - disks = [] - ide_controlled = [] - sata_controlled = [] - scsi_controlled = [] - controller = {} + if device.is_a? RbVmomi::VIM::VirtualIDEController + ide_controlled.concat(device.device) + controller[device.key] = "ide#{device.busNumber}" + end - @item["config.hardware.device"].each do |device| - disk = {} + if device.is_a? RbVmomi::VIM::VirtualSATAController + sata_controlled.concat(device.device) + controller[device.key] = "sata#{device.busNumber}" + end - if device.is_a? RbVmomi::VIM::VirtualIDEController - ide_controlled.concat(device.device) - controller[device.key] = "ide#{device.busNumber}" - end + if device.is_a? RbVmomi::VIM::VirtualSCSIController + scsi_controlled.concat(device.device) + controller[device.key] = "scsi#{device.busNumber}" + end - if device.is_a? RbVmomi::VIM::VirtualSATAController - sata_controlled.concat(device.device) - controller[device.key] = "sata#{device.busNumber}" - end + next unless disk_or_iso?(device) - if device.is_a? RbVmomi::VIM::VirtualSCSIController - scsi_controlled.concat(device.device) - controller[device.key] = "scsi#{device.busNumber}" - end + disk[:device] = device - if is_disk_or_iso?(device) - disk[:device] = device + unless device.backing.datastore + raise "datastore not found for VM's device" + end - raise "datastore not found for VM's device" unless device.backing.datastore - disk[:datastore] = device.backing.datastore - disk[:path] = device.backing.fileName - disk[:path_wo_ds]= disk[:path].sub(/^\[(.*?)\] /, "") - disk[:type] = is_disk?(device) ? "OS" : "CDROM" - disk[:key] = device.key - disk[:prefix] = "hd" if ide_controlled.include?(device.key) - disk[:prefix] = "sd" if scsi_controlled.include?(device.key) - disk[:prefix] = "sd" if sata_controlled.include?(device.key) - disk[:tag] = "#{controller[device.controllerKey]}:#{device.unitNumber}" + disk[:datastore] = + device.backing.datastore + disk[:path] = + device.backing.fileName + disk[:path_wo_ds]= + disk[:path].sub(/^\[(.*?)\] /, '') + disk?(device) ? disk[:type] = 'OS' : disk[:type] = 'CDROM' + disk[:key] = + device.key + if ide_controlled.include?(device.key) + disk[:prefix] = 'hd' + end + if scsi_controlled.include?(device.key) + disk[:prefix] = 'sd' + end + if sata_controlled.include?(device.key) + disk[:prefix] = 'sd' + end + disk[:tag] = + "#{controller[device.controllerKey]}:#{device.unitNumber}" disks << disk end + + disks end - return disks - end - - def get_vcenter_nics - nics = [] - @item.config.hardware.device.each do |device| - nics << device if is_nic?(device) - end - - nics - end - - def identify_network(identifier, network) - if network.class == RbVmomi::VIM::DistributedVirtualPortgroup - if identifier == network.key - return network - else - return nil + def vcenter_nics_get + nics = [] + @item.config.hardware.device.each do |device| + nics << device if VCenterDriver::Network.nic?(device) end + + nics end - if network.class == RbVmomi::VIM::Network - if identifier == network - return network - else - return nil + def identify_network(identifier, network) + if network.class == RbVmomi::VIM::DistributedVirtualPortgroup + return network if identifier == network.key + + return end - end - if network.class == RbVmomi::VIM::OpaqueNetwork + if network.class == RbVmomi::VIM::Network + return network if identifier == network + + return + end + + return unless network.class == RbVmomi::VIM::OpaqueNetwork + if identifier == network.summary.opaqueNetworkId - return network + network else - return nil + nil end end - end - def retrieve_from_device(device) - deviceNetwork = nil - deviceNetworkId = nil - # First search network corresponding this device - # Distributed Networks and NSX-V Networks - if device.backing[:port] != nil - deviceNetworkId = device.backing.port.portgroupKey - # Standard Networks - elsif device.backing[:network] != nil - deviceNetworkId = device.backing[:network] - # NSX-T Opaque Networks - elsif device.backing[:opaqueNetworkId] != nil - deviceNetworkId = device.backing[:opaqueNetworkId] + def retrieve_from_device(device) + device_network = nil + device_network_id = nil + # First search network corresponding this device + # Distributed Networks and NSX-V Networks + if !device.backing[:port].nil? + device_network_id = device.backing.port.portgroupKey + # Standard Networks + elsif !device.backing[:network].nil? + device_network_id = device.backing[:network] + # NSX-T Opaque Networks + elsif !device.backing[:opaqueNetworkId].nil? + device_network_id = device.backing[:opaqueNetworkId] + end + + # Check if networkId exists + if device_network_id.nil? + raise "Invalid or not supported network #{device.backing}" + end + + # Matching between device and network objects + @item.network.each do |net| + device_network = identify_network(device_network_id, net) + break unless device_network.nil? + end + + # Check network matching + if device_network.nil? + raise "\"#{device.deviceInfo.label}\" \ + not match any known network" + end + + res = {} + + res[:refs] = device_network.host.map do |h| + h.parent._ref if h.parent + end + + res[:net_name] = + device_network.name + res[:net_ref] = + device_network._ref + res[:pg_type] = + VCenterDriver::Network + .get_network_type( + device_network + ) + res[:network] = + device_network + + res end - # Check if networkId exists - if deviceNetworkId == nil - raise "Invalid or not supported network #{device.backing}" + def vcenter_nics_hash + parse_live = lambda {|inets_raw| + h = nil + begin + h = inets_raw.to_h + rescue NoMethodError + h = {} + inets_raw.each do |nic_dev| + h[nic_dev[0]] = nic_dev[1] + end + end + + return h + } + + nics = [] + inets_raw = nil + inets = {} + + @item['config.hardware.device'].each do |device| + next unless VCenterDriver::Network.nic?(device) + + nic = retrieve_from_device(device) + nic[:mac] = device.macAddress rescue nil + if vm? + if online? + inets_raw ||= + @item['guest.net'] + .map + .with_index {|x, _| [x.macAddress, x] } + inets = parse_live.call(inets_raw) if inets.empty? + + if !inets[nic[:mac]].nil? + ip_addresses = + inets[nic[:mac]] + .ipConfig + .ipAddress rescue nil + end + + if !ip_addresses.nil? && !ip_addresses.empty? + nic[:ipv4], + nic[:ipv4_additionals] = nil + nic[:ipv6], + nic[:ipv6_ula], + nic[:ipv6_global], + nic[:ipv6_additionals] = nil + fill_nic(ip_addresses, nic) + end + end + end + nics << nic + end + + nics end - # Matching between device and network objects - @item.network.each do |net| - deviceNetwork = identify_network(deviceNetworkId, net) - break unless deviceNetwork.nil? + def fill_nic(ip_addresses, nic) + (0...ip_addresses.length).each do |i| + ip = ip_addresses[i].ipAddress + if ip =~ Resolv::IPv4::Regex + if nic[:ipv4] + if nic[:ipv4_additionals] + nic[:ipv4_additionals] += ',' + ip + else + nic[:ipv4_additionals] = ip + end + else + nic[:ipv4] = ip + end + elsif ip_addresses[i].ipAddress =~ Resolv::IPv6::Regex + if get_ipv6_prefix(ip, 3) == '2000' + if nic[:ipv6_global] + if nic[:ipv6_additionals] + nic[:ipv6_additionals] += ',' + ip + else + nic[:ipv6_additionals] = ip + end + else + nic[:ipv6_global] = ip + end + elsif get_ipv6_prefix(ip, 10) == 'fe80' + nic[:ipv6] = ip + elsif get_ipv6_prefix(ip, 7) == 'fc00' + if nic[:ipv6_ula] + if nic[:ipv6_additionals] + nic[:ipv6_additionals] += ',' + ip + else + nic[:ipv6_additionals] = ip + end + else + nic[:ipv6_ula] = ip + end + end + end + end end - # Check network matching - if deviceNetwork.nil? - raise "\"#{device.deviceInfo.label}\" not match any known network" + def get_ipv6_prefix(ipv6, prefix_length) + ip_slice = ipv6.split(':').map {|elem| elem.hex }.map do |elem| + int, dec = elem.divmod(1) + bin = int.to_s(2).to_s + + while dec > 0 + int, dec = (dec * 2).divmod(1) + bin << int.to_s + end + + elem = bin + end.map {|elem| elem.rjust(16, '0') } + + ip_chain = ip_slice.join + prefix = ip_chain[0, prefix_length] + + cont = 0 + limit = prefix.length + index = 0 + slices = [] + + while cont < limit + slices[index] = prefix.slice(cont, 4) + slices[index] = slices[index].ljust(4, '0') + index +=1 + cont+=4 + end + + slices + .map do |elem| + '%0x' % elem.to_i(2) # rubocop:disable Style/FormatString, Style/FormatStringToken + end.join.ljust(4, '0') end - res = {} - - res[:refs] = deviceNetwork.host.map do |h| - h.parent._ref if h.parent + # Checks if a RbVmomi::VIM::VirtualDevice is a disk or a cdrom + def disk_or_cdrom?(device) + is_disk = + !device + .class + .ancestors + .index(RbVmomi::VIM::VirtualDisk).nil? + is_cdrom = + !device + .class + .ancestors + .index(RbVmomi::VIM::VirtualCdrom).nil? + is_disk || is_cdrom end - res[:net_name] = deviceNetwork.name - res[:net_ref] = deviceNetwork._ref - res[:pg_type] = VCenterDriver::Network.get_network_type(deviceNetwork) - res[:network] = deviceNetwork + # Checks if a RbVmomi::VIM::VirtualDevice is a disk or an iso file + def disk_or_iso?(device) + is_disk = + !device + .class + .ancestors + .index(RbVmomi::VIM::VirtualDisk).nil? + is_iso = + device + .backing + .is_a? RbVmomi::VIM::VirtualCdromIsoBackingInfo + is_disk || is_iso + end - res - end + # Checks if a RbVmomi::VIM::VirtualDevice is a disk + def disk?(device) + !device.class.ancestors.index(RbVmomi::VIM::VirtualDisk).nil? + end - def vcenter_nics_hash - parse_live = ->(inets_raw) { - h = nil + def cdrom?(device) + device.backing.is_a? RbVmomi::VIM::VirtualCdromIsoBackingInfo + end + + # @return RbVmomi::VIM::ResourcePool, first resource pool in cluster + def rp_get + self['runtime.host.parent.resourcePool'] + end + + def esx_name + self['runtime.host.name'] + end + + def vm_to_one(vm_name) + str = "NAME = \"#{vm_name}\"\n"\ + "CPU = \"#{@vm_info['config.hardware.numCPU']}\"\n"\ + "vCPU = \"#{@vm_info['config.hardware.numCPU']}\"\n"\ + "MEMORY = \"#{@vm_info['config.hardware.memoryMB']}\"\n"\ + "HYPERVISOR = \"vcenter\"\n"\ + "CONTEXT = [\n"\ + " NETWORK = \"YES\",\n"\ + " SSH_PUBLIC_KEY = \"$USER[SSH_PUBLIC_KEY]\"\n"\ + "]\n"\ + "VCENTER_INSTANCE_ID =\"#{@vm_info[:vc_uuid]}\"\n"\ + "VCENTER_CCR_REF =\"#{@vm_info[:cluster_ref]}\"\n" + + str << "DEPLOY_ID =\"#{self['_ref']}\"\n" + @state = 'POWEROFF' if @state == 'd' + str << "IMPORT_STATE =\"#{@state}\"\n" + + # Get DS information + if !@vm_info['datastore'].nil? + !@vm_info['datastore'].last.nil? && + !@vm_info['datastore'].last._ref.nil? + ds_ref = vm_template_ds_ref + str << "VCENTER_DS_REF = \"#{ds_ref}\"\n" + end + + vnc_port = nil + keymap = + VCenterDriver::VIHelper + .get_default( + 'VM/TEMPLATE/GRAPHICS/KEYMAP' + ) + + @vm_info['config.extraConfig'].select do |xtra| + if xtra[:key].downcase=='remotedisplay.vnc.port' + vnc_port = xtra[:value] + end + + if xtra[:key].downcase=='remotedisplay.vnc.keymap' + keymap = xtra[:value] + end + end + + if !@vm_info['config.extraConfig'].empty? + str << "GRAPHICS = [\n"\ + " TYPE =\"vnc\",\n" + str << " PORT =\"#{vnc_port}\",\n" if vnc_port + str << " KEYMAP =\"#{keymap}\",\n" if keymap + str << " LISTEN =\"0.0.0.0\"\n" + str << "]\n" + end + + if !@vm_info['config.annotation'] || @vm_info['config.annotation'] + .empty? + str << 'DESCRIPTION = "vCenter Template \ + imported by OpenNebula' \ + " from Cluster #{@vm_info['cluster_name']}\"\n" + else + notes = @vm_info['config.annotation'] + .gsub('\\', '\\\\') + .gsub('"', '\\"') + str << "DESCRIPTION = \"#{notes}\"\n" + end + + case @vm_info['guest.guestFullName'] + when /CentOS/i + str << "LOGO=images/logos/centos.png\n" + when /Debian/i + str << "LOGO=images/logos/debian.png\n" + when /Red Hat/i + str << "LOGO=images/logos/redhat.png\n" + when /Ubuntu/i + str << "LOGO=images/logos/ubuntu.png\n" + when /Windows XP/i + str << "LOGO=images/logos/windowsxp.png\n" + when /Windows/i + str << "LOGO=images/logos/windows8.png\n" + when /Linux/i + str << "LOGO=images/logos/linux.png\n" + end + + str + end + + # Gets MOREF from Datastore used by the VM. It validates + # the selected DS is not only used to host swap. + def vm_template_ds_ref begin - h = inets_raw.to_h - rescue NoMethodError - h = {} - inets_raw.each do |nic_dev| - h[nic_dev[0]] = nic_dev[1] - end - end - - return h - } - - nics = [] - inets_raw = nil - inets = {} - - @item['config.hardware.device'].each do |device| - next unless is_nic?(device) - - nic = retrieve_from_device(device) - nic[:mac] = device.macAddress rescue nil - if vm? - if online? - inets_raw ||= @item['guest.net'].map.with_index {|x, _| [x.macAddress, x]} - inets = parse_live.call(inets_raw) if inets.empty? - - if !inets[nic[:mac]].nil? - ipAddresses = inets[nic[:mac]].ipConfig.ipAddress rescue nil - end - - if !ipAddresses.nil? && !ipAddresses.empty? - nic[:ipv4], nic[:ipv4_additionals] = nil - nic[:ipv6], nic[:ipv6_ula], nic[:ipv6_global], nic[:ipv6_additionals] = nil - fill_nic(ipAddresses, nic) - end - end - end - nics << nic - end - - return nics - end - - def fill_nic(ipAddresses, nic) - for i in 0...ipAddresses.length - ip = ipAddresses[i].ipAddress - if ip =~ Resolv::IPv4::Regex - if nic[:ipv4] - if nic[:ipv4_additionals] - nic[:ipv4_additionals] += ',' + ip - else - nic[:ipv4_additionals] = ip - end - else - nic[:ipv4] = ip - end - elsif ipAddresses[i].ipAddress =~ Resolv::IPv6::Regex - if get_ipv6_prefix(ip, 3) == '2000' - if nic[:ipv6_global] - if nic[:ipv6_additionals] - nic[:ipv6_additionals] += ',' + ip - else - nic[:ipv6_additionals] = ip + ds_ref = nil + if @vm_info['datastore'].length > 1 + swap_path = '' + @vm_info['config.extraConfig'].each do |element| + if element.key == 'sched.swap.derivedName' + swap_path = element.value end - else - nic[:ipv6_global] = ip end - elsif get_ipv6_prefix(ip, 10) == 'fe80' - nic[:ipv6] = ip - elsif get_ipv6_prefix(ip, 7) == 'fc00' - if nic[:ipv6_ula] - if nic[:ipv6_additionals] - nic[:ipv6_additionals] += ',' + ip - else - nic[:ipv6_additionals] = ip + @vm_info['datastore'].each do |datastore| + path = datastore.summary.url.sub(%r{ds:///*}, '') + if !swap_path.include?(path) && !datastore._ref.nil? + ds_ref = datastore._ref + break end - else - nic[:ipv6_ula] = ip + end + elsif @vm_info['datastore'].length == 1 + if !@vm_info['datastore'].first._ref.nil? + ds_ref = @vm_info['datastore'].first._ref end end - end - end - end - def get_ipv6_prefix(ipv6, prefix_length) - - ipSlice = ipv6.split(":").map{ |elem| elem.hex }.map{ |elem| - - int, dec = elem.divmod(1) - bin = "#{int.to_s(2)}" - - while dec > 0 - int, dec = (dec * 2).divmod(1) - bin << int.to_s - end - - elem = bin - }.map{ |elem| elem.rjust(16, '0') } - - ipChain = ipSlice.join - prefix = ipChain[0, prefix_length] - - cont = 0 - limit = prefix.length - index = 0 - slices = [] - - while cont < limit - slices[index] = prefix.slice(cont, 4) - slices[index] = slices[index].ljust(4, '0') - index +=1 - cont+=4 - end - - finalPrefix = slices.map{|elem| "%0x" % elem.to_i(2) }.join.ljust(4, '0') - return finalPrefix - end - - # Checks if a RbVmomi::VIM::VirtualDevice is a disk or a cdrom - def is_disk_or_cdrom?(device) - is_disk = !(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil? - is_cdrom = !(device.class.ancestors.index(RbVmomi::VIM::VirtualCdrom)).nil? - is_disk || is_cdrom - end - - # Checks if a RbVmomi::VIM::VirtualDevice is a disk or an iso file - def is_disk_or_iso?(device) - is_disk = !(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil? - is_iso = device.backing.is_a? RbVmomi::VIM::VirtualCdromIsoBackingInfo - is_disk || is_iso - end - - # Checks if a RbVmomi::VIM::VirtualDevice is a disk - def is_disk?(device) - !(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil? - end - - def is_cdrom?(device) - device.backing.is_a? RbVmomi::VIM::VirtualCdromIsoBackingInfo - end - - # Checks if a RbVmomi::VIM::VirtualDevice is a network interface - def is_nic?(device) - !device.class.ancestors.index(RbVmomi::VIM::VirtualEthernetCard).nil? - end - - # @return RbVmomi::VIM::ResourcePool, first resource pool in cluster - def get_rp - self['runtime.host.parent.resourcePool'] - end - - def get_esx_name - self['runtime.host.name'] - end - - def vm_to_one(vm_name) - str = "NAME = \"#{vm_name}\"\n"\ - "CPU = \"#{@vm_info["config.hardware.numCPU"]}\"\n"\ - "vCPU = \"#{@vm_info["config.hardware.numCPU"]}\"\n"\ - "MEMORY = \"#{@vm_info["config.hardware.memoryMB"]}\"\n"\ - "HYPERVISOR = \"vcenter\"\n"\ - "CONTEXT = [\n"\ - " NETWORK = \"YES\",\n"\ - " SSH_PUBLIC_KEY = \"$USER[SSH_PUBLIC_KEY]\"\n"\ - "]\n"\ - "VCENTER_INSTANCE_ID =\"#{@vm_info[:vc_uuid]}\"\n"\ - "VCENTER_CCR_REF =\"#{@vm_info[:cluster_ref]}\"\n" - - str << "DEPLOY_ID =\"#{self["_ref"]}\"\n" - @state = 'POWEROFF' if @state == 'd' - str << "IMPORT_STATE =\"#{@state}\"\n" - - # Get DS information - if !@vm_info["datastore"].nil? - !@vm_info["datastore"].last.nil? && - !@vm_info["datastore"].last._ref.nil? - ds_ref = vm_template_ds_ref - str << "VCENTER_DS_REF = \"#{ds_ref}\"\n" - end - - vnc_port = nil - keymap = VCenterDriver::VIHelper.get_default("VM/TEMPLATE/GRAPHICS/KEYMAP") - - @vm_info["config.extraConfig"].select do |xtra| - if xtra[:key].downcase=="remotedisplay.vnc.port" - vnc_port = xtra[:value] - end - - if xtra[:key].downcase=="remotedisplay.vnc.keymap" - keymap = xtra[:value] + ds_ref + rescue StandardError => e + "Could not find DATASTORE for this VM. Reason: #{e.message}" end end - if !@vm_info["config.extraConfig"].empty? + def self.template_to_one( + template, + vc_uuid, + ccr_ref, + ccr_name, + import_name + ) + num_cpu, memory, annotation, guest_fullname = + template + .item + .collect( + 'config.hardware.numCPU', + 'config.hardware.memoryMB', + 'config.annotation', + 'guest.guestFullName' + ) + + str = "NAME = \"#{import_name}\"\n"\ + "CPU = \"#{num_cpu}\"\n"\ + "vCPU = \"#{num_cpu}\"\n"\ + "MEMORY = \"#{memory}\"\n"\ + "HYPERVISOR = \"vcenter\"\n"\ + "CONTEXT = [\n"\ + " NETWORK = \"YES\",\n"\ + " SSH_PUBLIC_KEY = \"$USER[SSH_PUBLIC_KEY]\"\n"\ + "]\n"\ + "VCENTER_INSTANCE_ID =\"#{vc_uuid}\"\n" + + str << "VCENTER_TEMPLATE_REF =\"#{template['_ref']}\"\n" + str << "VCENTER_CCR_REF =\"#{ccr_ref}\"\n" + str << "GRAPHICS = [\n"\ " TYPE =\"vnc\",\n" - str << " PORT =\"#{vnc_port}\",\n" if vnc_port - str << " KEYMAP =\"#{keymap}\",\n" if keymap str << " LISTEN =\"0.0.0.0\"\n" str << "]\n" - end - if !@vm_info["config.annotation"] || @vm_info["config.annotation"].empty? - str << "DESCRIPTION = \"vCenter Template imported by OpenNebula" \ - " from Cluster #{@vm_info["cluster_name"]}\"\n" - else - notes = @vm_info["config.annotation"].gsub("\\", "\\\\").gsub("\"", "\\\"") - str << "DESCRIPTION = \"#{notes}\"\n" - end + if annotation.nil? || annotation.empty? + str << 'DESCRIPTION = "vCenter Template \ + imported by OpenNebula' \ + " from Cluster #{ccr_name}\"\n" + else + notes = annotation.gsub('\\', '\\\\').gsub('"', '\\"') + str << "DESCRIPTION = \"#{notes}\"\n" + end - case @vm_info["guest.guestFullName"] + case guest_fullname when /CentOS/i str << "LOGO=images/logos/centos.png\n" when /Debian/i @@ -1069,336 +1313,359 @@ class Template str << "LOGO=images/logos/windows8.png\n" when /Linux/i str << "LOGO=images/logos/linux.png\n" + end + + str end - return str - end + def self.get_xml_template( + template, + vcenter_uuid, + vi_client, + dc_name = nil, + rp_cache = {} + ) + begin + template_ref = template['_ref'] + template_name = template['name'] + template_ccr = template['runtime.host.parent'] + template_ccr_ref = template_ccr._ref + template_ccr_name = template_ccr.name - #Gets MOREF from Datastore used by the VM. It validates - #the selected DS is not only used to host swap. - def vm_template_ds_ref - begin - ds_ref = nil - if @vm_info["datastore"].length > 1 - swap_path = "" - @vm_info["config.extraConfig"].each do |element| - if element.key == "sched.swap.derivedName" - swap_path = element.value + # Get datacenter info + if !dc_name + dc = get_dc + dc_name = dc.item.name + end + + # Get resource pools and generate a list + if !rp_cache[template_ccr_name] + tmp_cluster = + VCenterDriver::ClusterComputeResource + .new_from_ref( + template_ccr_ref, + vi_client + ) + rp_list = tmp_cluster.get_resource_pool_list + rp = '' + if !rp_list.empty? + rp_name_list = [] + rp_list.each do |rp_hash| + rp_name_list << rp_hash[:name] + end + rp = 'O|list|Which resource pool \ + you want this VM to run in? ' + rp << "|#{rp_name_list.join(',')}" # List of RP + rp << "|#{rp_name_list.first}" # Default RP + end + rp_cache[template_ccr_name] = {} + rp_cache[template_ccr_name][:rp] = rp + rp_cache[template_ccr_name][:rp_list] = rp_list + end + rp = rp_cache[template_ccr_name][:rp] + rp_list = rp_cache[template_ccr_name][:rp_list] + + # Determine the location path for the template + vcenter_template = + VCenterDriver::VirtualMachine + .new_without_id( + vi_client, + template_ref + ) + item = vcenter_template.item + folders = [] + until item.instance_of? RbVmomi::VIM::Datacenter + item = item.parent + if !item.instance_of? RbVmomi::VIM::Datacenter + folders << item.name if item.name != 'vm' + end + if item.nil? + raise 'Could not find the templates parent location' end end - @vm_info["datastore"].each do |datastore| - path = datastore.summary.url.sub(/ds:\/\/\/*/, "") - if !swap_path.include?(path) && !datastore._ref.nil? - ds_ref = datastore._ref - break + location = folders.reverse.join('/') + location = '/' if location.empty? + + # Generate a crypto hash for the template + # name and take the first 12 chars + import_name = + VCenterDriver::VIHelper + .one_name( + OpenNebula::TemplatePool, + template_name, + template_ref+vcenter_uuid + ) + + template_name = template_name.tr("\u007F", '') + template_ccr_name = template_ccr_name.tr("\u007F", '') + + # Prepare the Hash that will be used by importers to display + # the object being imported + one_tmp = {} + one_tmp[:name] = import_name + one_tmp[:ref] = template_ref + one_tmp[:dc_name] = dc_name + one_tmp[:template_name] = template_name + one_tmp[:sunstone_template_name]= + "#{template_name} [ Cluster: #{template_ccr_name} \ + - Template location: #{location} ]" + one_tmp[:template_location] = location + one_tmp[:vcenter_ccr_ref] = template_ccr_ref + one_tmp[:vcenter_ref] = template_ref + one_tmp[:vcenter_instance_uuid] = vcenter_uuid + one_tmp[:cluster_name] = template_ccr_name + one_tmp[:rp] = rp + one_tmp[:rp_list] = rp_list + one_tmp[:template] = template + # By default we import disks and nics + one_tmp[:import_disks_and_nics] = true + + # Get the host ID of the OpenNebula host + # which represents the vCenter Cluster + one_host = + VCenterDriver::VIHelper + .find_by_ref( + OpenNebula::HostPool, + 'TEMPLATE/VCENTER_CCR_REF', + template_ccr_ref, + vcenter_uuid + ) + host_id = one_host['ID'] + unless host_id + raise "Could not find the host's ID associated \ + to template being imported" + end + + # Get the OpenNebula's template hash + one_tmp[:one] = + template_to_one( + template, + vcenter_uuid, + template_ccr_ref, + template_ccr_name, + import_name + ) + one_tmp + rescue StandardError + nil + end + end + + # TODO: check with uuid + def self.new_from_ref(ref, vi_client) + new(RbVmomi::VIM::VirtualMachine.new(vi_client.vim, ref), vi_client) + end + + end + + ########################################################################## + # Class VmImporter + ########################################################################## + class VmImporter < VCenterDriver::VcImporter + + def initialize(one_client, vi_client) + super(one_client, vi_client) + @one_class = OpenNebula::Template + + @defaults = { + :linked_clone => '0', + :copy => '0', + :name => '', + :folder => '', + :resourcepool => [], + :type => '' + } + end + + def get_list(_args = {}) + dc_folder = VCenterDriver::DatacenterFolder.new(@vi_client) + + # Get OpenNebula's templates pool + tpool = + VCenterDriver::VIHelper + .one_pool( + OpenNebula::TemplatePool, + false + ) + if tpool.respond_to?(:message) + raise "Could not get OpenNebula TemplatePool: #{tpool.message}" + end + + @list = dc_folder.get_unimported_templates(@vi_client, tpool) + end + + def rp_opts(type, rps) + str = '' + + return str if (type == 'default') || rps.empty? + + if type == 'fixed' + str << "VCENTER_RESOURCE_POOL=\"#{rps}\"\n" + else + default = rps.first + rps_str = rps.join(',') + + str << 'USER_INPUTS=[' + str << "VCENTER_RESOURCE_POOL=\"M|list|resource \ + pool list|#{rps_str}|#{default}\"" + str << ']' + end + + str + end + + def import(selected) + opts = @info[selected[:ref]][:opts] + working_template = selected + + vcenter = selected[:vcenter] + vc_uuid = selected[:vcenter_instance_uuid] + dc = selected[:dc_name] + + linked_clone = opts[:linked_clone] == '1' + copy = opts[:copy] == '1' + deploy_in_folder = !opts[:folder].empty? + + res = { :id => [], :name => selected[:name] } + dpool, ipool, npool, hpool = create_pools + + template = + VCenterDriver::Template + .new_from_ref( + selected[:vcenter_ref], + @vi_client + ) + # Linked clones and copy preparation + if linked_clone + # reached this point we need to delete + # the template if something go wrong + if copy + error, template_copy_ref = + selected[:template] + .create_template_copy( + opts[:name] + ) + unless template_copy_ref + raise "There is a problem creating creating \ + your copy: #{error}" + end + + template = + VCenterDriver::Template + .new_from_ref( + template_copy_ref, + @vi_client + ) + @rollback << + Raction + .new( + template, + :delete_template + ) + + one_template = + VCenterDriver::Template + .get_xml_template( + template, + vc_uuid, + @vi_client, + dc + ) + unless one_template + raise "There is a problem obtaining info \ + from your template's copy" + end + + working_template = one_template + end + + lc_error, use_lc = template.create_delta_disks + if lc_error + raise 'Something was wront with create \ + delta disk operation' + end + + if use_lc + working_template[:one] << + "\nVCENTER_LINKED_CLONES=\"YES\"\n" + end + end + + if deploy_in_folder + working_template[:one] << + "VCENTER_VM_FOLDER=\"#{opts[:folder]}\"\n" + end + + working_template[:one] << + "VCENTER_TEMPLATE_NAME=\"#{selected[:name]}\"\n" + + create(working_template[:one]) do |one_object, id| + res[:id] << id + + type = { :object => 'template', :id => id } + error, template_disks, allocated_images = + template + .import_vcenter_disks( + vc_uuid, + dpool, + ipool, + type + ) + + if allocated_images + # rollback stack + allocated_images.reverse.each do |i| + @rollback.unshift(Raction.new(i, :delete)) end end - elsif @vm_info["datastore"].length == 1 - if !@vm_info["datastore"].first._ref.nil? - ds_ref = @vm_info["datastore"].first._ref + raise error unless error.empty? + + working_template[:one] << template_disks + + if template_copy_ref + template_moref = template_copy_ref + else + template_moref = selected[:vcenter_ref] end - end - return ds_ref - rescue StandardError => e - error = "Could not find DATASTORE for this VM. Reason: #{e.message}" + error, template_nics, _ar_ids, allocated_nets = + template + .import_vcenter_nics( + @vi_client, + vc_uuid, + npool, + hpool, + vcenter, + template_moref, + nil, + id, + dc + ) - return error - end - end - - - def self.template_to_one(template, vc_uuid, ccr_ref, ccr_name, import_name, host_id) - - num_cpu, memory, annotation, guest_fullname = template.item.collect("config.hardware.numCPU","config.hardware.memoryMB","config.annotation","guest.guestFullName") - - str = "NAME = \"#{import_name}\"\n"\ - "CPU = \"#{num_cpu}\"\n"\ - "vCPU = \"#{num_cpu}\"\n"\ - "MEMORY = \"#{memory}\"\n"\ - "HYPERVISOR = \"vcenter\"\n"\ - "CONTEXT = [\n"\ - " NETWORK = \"YES\",\n"\ - " SSH_PUBLIC_KEY = \"$USER[SSH_PUBLIC_KEY]\"\n"\ - "]\n"\ - "VCENTER_INSTANCE_ID =\"#{vc_uuid}\"\n" - - str << "VCENTER_TEMPLATE_REF =\"#{template["_ref"]}\"\n" - str << "VCENTER_CCR_REF =\"#{ccr_ref}\"\n" - - str << "GRAPHICS = [\n"\ - " TYPE =\"vnc\",\n" - str << " LISTEN =\"0.0.0.0\"\n" - str << "]\n" - - if annotation.nil? || annotation.empty? - str << "DESCRIPTION = \"vCenter Template imported by OpenNebula" \ - " from Cluster #{ccr_name}\"\n" - else - notes = annotation.gsub("\\", "\\\\").gsub("\"", "\\\"") - str << "DESCRIPTION = \"#{notes}\"\n" - end - - case guest_fullname - when /CentOS/i - str << "LOGO=images/logos/centos.png\n" - when /Debian/i - str << "LOGO=images/logos/debian.png\n" - when /Red Hat/i - str << "LOGO=images/logos/redhat.png\n" - when /Ubuntu/i - str << "LOGO=images/logos/ubuntu.png\n" - when /Windows XP/i - str << "LOGO=images/logos/windowsxp.png\n" - when /Windows/i - str << "LOGO=images/logos/windows8.png\n" - when /Linux/i - str << "LOGO=images/logos/linux.png\n" - end - - return str - end - - def self.get_xml_template(template, vcenter_uuid, vi_client, vcenter_instance_name=nil, dc_name=nil, rp_cache={}) - begin - template_ref = template['_ref'] - template_name = template["name"] - template_ccr = template['runtime.host.parent'] - template_ccr_ref = template_ccr._ref - template_ccr_name = template_ccr.name - - # Set vcenter instance name - vcenter_instance_name = vi_client.vim.host if !vcenter_instance_name - - # Get datacenter info - if !dc_name - dc = get_dc - dc_name = dc.item.name - end - - #Get resource pools and generate a list - if !rp_cache[template_ccr_name] - tmp_cluster = VCenterDriver::ClusterComputeResource.new_from_ref(template_ccr_ref, vi_client) - rp_list = tmp_cluster.get_resource_pool_list - rp = "" - if !rp_list.empty? - rp_name_list = [] - rp_list.each do |rp_hash| - rp_name_list << rp_hash[:name] + if allocated_nets + # rollback stack + allocated_nets.reverse.each do |n| + @rollback.unshift(Raction.new(n, :delete)) end - rp = "O|list|Which resource pool you want this VM to run in? " - rp << "|#{rp_name_list.join(",")}" #List of RP - rp << "|#{rp_name_list.first}" #Default RP end - rp_cache[template_ccr_name] = {} - rp_cache[template_ccr_name][:rp] = rp - rp_cache[template_ccr_name][:rp_list] = rp_list + raise error unless error.empty? + + working_template[:one] << template_nics + working_template[:one] << rp_opts( + opts[:type], + opts[:resourcepool] + ) + + one_object.update(working_template[:one]) end - rp = rp_cache[template_ccr_name][:rp] - rp_list = rp_cache[template_ccr_name][:rp_list] - - # Determine the location path for the template - vcenter_template = VCenterDriver::VirtualMachine.new_without_id(vi_client, template_ref) - item = vcenter_template.item - folders = [] - while !item.instance_of? RbVmomi::VIM::Datacenter - item = item.parent - if !item.instance_of? RbVmomi::VIM::Datacenter - folders << item.name if item.name != "vm" - end - raise "Could not find the templates parent location" if item.nil? - end - location = folders.reverse.join("/") - location = "/" if location.empty? - - # Generate a crypto hash for the template name and take the first 12 chars - import_name = VCenterDriver::VIHelper.one_name(OpenNebula::TemplatePool, template_name, template_ref+vcenter_uuid) - - template_name = template_name.tr("\u007F", "") - template_ccr_name = template_ccr_name.tr("\u007F", "") - - # Prepare the Hash that will be used by importers to display - # the object being imported - one_tmp = {} - one_tmp[:name] = import_name - one_tmp[:ref] = template_ref - one_tmp[:dc_name] = dc_name - one_tmp[:template_name] = template_name - one_tmp[:sunstone_template_name]= "#{template_name} [ Cluster: #{template_ccr_name} - Template location: #{location} ]" - one_tmp[:template_location] = location - one_tmp[:vcenter_ccr_ref] = template_ccr_ref - one_tmp[:vcenter_ref] = template_ref - one_tmp[:vcenter_instance_uuid] = vcenter_uuid - one_tmp[:cluster_name] = template_ccr_name - one_tmp[:rp] = rp - one_tmp[:rp_list] = rp_list - one_tmp[:template] = template - one_tmp[:import_disks_and_nics] = true # By default we import disks and nics - - - # Get the host ID of the OpenNebula host which represents the vCenter Cluster - host_id = nil - one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool, - "TEMPLATE/VCENTER_CCR_REF", - template_ccr_ref, - vcenter_uuid) - host_id = one_host["ID"] - cluster_id = one_host["CLUSTER_ID"] - raise "Could not find the host's ID associated to template being imported" if !host_id - - # Get the OpenNebula's template hash - one_tmp[:one] = template_to_one(template, vcenter_uuid, template_ccr_ref, template_ccr_name, import_name, host_id) - return one_tmp - rescue StandardError => e - return nil + res + end + + def attr + 'TEMPLATE/VCENTER_TEMPLATE_REF' end - end - # TODO check with uuid - def self.new_from_ref(ref, vi_client) - self.new(RbVmomi::VIM::VirtualMachine.new(vi_client.vim, ref), vi_client) end end - -class VmImporter < VCenterDriver::VcImporter - - def initialize(one_client, vi_client) - super(one_client, vi_client) - @one_class = OpenNebula::Template - - @defaults = { - linked_clone: '0', - copy: '0', - name: '', - folder: '', - resourcepool: [], - type: '' - } - - end - - def get_list(args = {}) - dc_folder = VCenterDriver::DatacenterFolder.new(@vi_client) - - # Get OpenNebula's templates pool - tpool = VCenterDriver::VIHelper.one_pool(OpenNebula::TemplatePool, false) - if tpool.respond_to?(:message) - raise "Could not get OpenNebula TemplatePool: #{tpool.message}" - end - - @list = dc_folder.get_unimported_templates(@vi_client, tpool) - end - - - def rp_opts(type, rps) - str = "" - - return str if (type == "default") || rps.empty? - - - if (type == "fixed") - str << "VCENTER_RESOURCE_POOL=\"#{rps}\"\n" - else - default = rps.first - rps_str = rps.join(',') - - str << "USER_INPUTS=[" - str << "VCENTER_RESOURCE_POOL=\"M|list|resource pool list|#{rps_str}|#{default}\"" - str << "]" - end - - return str - end - - def import(selected) - opts = @info[selected[:ref]][:opts] - working_template = selected - - vcenter = selected[:vcenter] - vc_uuid = selected[:vcenter_instance_uuid] - dc = selected[:dc_name] - - linked_clone = opts[:linked_clone] == '1' - copy = opts[:copy] == '1' - deploy_in_folder = !opts[:folder].empty? - - res = {id: [], name: selected[:name]} - dpool, ipool, npool, hpool = create_pools - - template = VCenterDriver::Template.new_from_ref(selected[:vcenter_ref], @vi_client) - # Linked clones and copy preparation - if linked_clone - if copy # reached this point we need to delete the template if something go wrong - error, template_copy_ref = selected[:template].create_template_copy(opts[:name]) - raise "There is a problem creating creating your copy: #{error}" unless template_copy_ref - - template = VCenterDriver::Template.new_from_ref(template_copy_ref, @vi_client) - @rollback << Raction.new(template, :delete_template) - - one_template = VCenterDriver::Template.get_xml_template(template, vc_uuid, @vi_client, vcenter, dc) - raise "There is a problem obtaining info from your template's copy" unless one_template - working_template = one_template - end - - lc_error, use_lc = template.create_delta_disks - raise "Something was wront with create delta disk operation" if lc_error - working_template[:one] << "\nVCENTER_LINKED_CLONES=\"YES\"\n" if use_lc - end - - working_template[:one] << "VCENTER_VM_FOLDER=\"#{opts[:folder]}\"\n" if deploy_in_folder - - working_template[:one] << "VCENTER_TEMPLATE_NAME=\"#{selected[:name]}\"\n" - - create(working_template[:one]) do |one_object, id| - res[:id] << id - - type = {:object => "template", :id => id} - error, template_disks, allocated_images = template.import_vcenter_disks(vc_uuid, dpool, ipool, type) - - if allocated_images - #rollback stack - allocated_images.reverse.each do |i| - @rollback.unshift(Raction.new(i, :delete)) - end - end - raise error if !error.empty? - - working_template[:one] << template_disks - - template_moref = template_copy_ref ? template_copy_ref : selected[:vcenter_ref] - - error, template_nics, ar_ids, allocated_nets = template.import_vcenter_nics(@vi_client, - vc_uuid, - npool, - hpool, - vcenter, - template_moref, - nil, - id, - dc) - - if allocated_nets - #rollback stack - allocated_nets.reverse.each do |n| - @rollback.unshift(Raction.new(n, :delete)) - end - end - raise error if !error.empty? - - working_template[:one] << template_nics - working_template[:one] << rp_opts(opts[:type], opts[:resourcepool]) - - one_object.update(working_template[:one]) - end - - return res - end - - def attr - "TEMPLATE/VCENTER_TEMPLATE_REF" - end -end - -end diff --git a/src/vmm_mad/remotes/vcenter/deploy b/src/vmm_mad/remotes/vcenter/deploy index 875ea66bff..a391c75856 100755 --- a/src/vmm_mad/remotes/vcenter/deploy +++ b/src/vmm_mad/remotes/vcenter/deploy @@ -92,7 +92,7 @@ begin end end - if vm.is_powered_off? + if vm.powered_off? vm.sync(deploy) # Only mark the VM as running if we are deploying it for the first time set_running = !deploy_id_valid?(deploy_id)