diff --git a/src/sunstone/routes/vcenter.rb b/src/sunstone/routes/vcenter.rb index 9a7aa582c2..adcb5a2588 100644 --- a/src/sunstone/routes/vcenter.rb +++ b/src/sunstone/routes/vcenter.rb @@ -23,8 +23,11 @@ end # TODO vcenter_driver should be stored in RUBY_LIB_LOCATION $: << REMOTES_LOCATION+"/vmm/vcenter/" +MAX_VCENTER_PASSWORD_LENGTH = 22 #This is the maximum length for a vCenter password + require 'vcenter_driver' + helpers do def vcenter_client hpref = "HTTP-" @@ -56,6 +59,22 @@ helpers do error 404, error.to_json end + if vpass.size > MAX_VCENTER_PASSWORD_LENGTH + begin + client = OpenNebula::Client.new + system = OpenNebula::System.new(client) + config = system.get_configuration + token = config["ONE_KEY"] + vpass = VCenterDriver::VIClient::decrypt(vpass, token) + + rescue Exception => e + msg = "I was unable to decrypt the vCenter password credentials" + logger.error("[vCenter] #{e.message}/#{e.backtrace}. " + msg) + error = Error.new(msg) + error 404, error.to_json + end + end + return VCenterDriver::VIClient.new({ :user => vuser, :password => vpass, @@ -127,7 +146,96 @@ get '/vcenter/templates' do end end -get '/vcenter/template/:vcenter_ref' do +post '/vcenter/image_rollback/:image_id' do + begin + image_id = params[:image_id] + one_image = VCenterDriver::VIHelper.one_item(OpenNebula::Image, image_id.to_s, false) + + if OpenNebula.is_error?(one_image) + raise "Error finding image #{image_id}: #{rc.message}\n" + end + + rc = one_image.delete + if OpenNebula.is_error?(rc) + raise "Error deleting image #{image_id}: #{rc.message}\n" + end + + [200, "Image #{image_id} deleted in rollback.".to_json] + rescue Exception => e + logger.error("[vCenter] " + e.message) + error = Error.new(e.message) + error 403, error.to_json + end +end + +post '/vcenter/network_rollback/:network_id' do + begin + network_id = params[:network_id] + one_vnet = VCenterDriver::VIHelper.one_item(OpenNebula::VirtualNetwork, network_id.to_s, false) + + if OpenNebula.is_error?(one_vnet) + raise "Error finding network #{network_id}: #{rc.message}\n" + end + + rc = one_vnet.delete + if OpenNebula.is_error?(rc) + raise "Error deleting network #{network_id}: #{rc.message}\n" + end + + [200, "Network #{network_id} deleted in rollback.".to_json] + rescue Exception => e + logger.error("[vCenter] " + e.message) + error = Error.new(e.message) + error 403, error.to_json + end +end + +post '/vcenter/template_rollback/:template_id' do + begin + template_id = params[:template_id] + one_template = VCenterDriver::VIHelper.one_item(OpenNebula::Template, template_id.to_s, false) + + if OpenNebula.is_error?(one_template) + raise "Error finding template #{template_id}: #{rc.message}\n" + end + + rc = one_template.delete + if OpenNebula.is_error?(rc) + raise "Error deleting template #{template_id}: #{rc.message}\n" + end + + [200, "Template #{template_id} deleted in rollback.".to_json] + rescue Exception => e + logger.error("[vCenter] " + e.message) + error = Error.new(e.message) + error 403, error.to_json + end +end + +post '/vcenter/wild_rollback/:vm_id' do + begin + vm_id = params[:vm_id] + one_vm = VCenterDriver::VIHelper.one_item(OpenNebula::VirtualMachine, vm_id.to_s, false) + + if OpenNebula.is_error?(one_vm) + raise "Error finding VM #{vm_id}: #{rc.message}\n" + end + + rc = one_vm.delete + if OpenNebula.is_error?(rc) + raise "Error deleting VM #{vm_id}: #{rc.message}\n" + end + + [200, "VM #{vm_id} deleted in rollback.".to_json] + rescue Exception => e + logger.error("[vCenter] " + e.message) + error = Error.new(e.message) + error 403, error.to_json + end +end + + +get '/vcenter/template/:vcenter_ref/:template_id' do begin t = {} t[:one] = "" @@ -136,7 +244,8 @@ get '/vcenter/template/:vcenter_ref' do append = true lc_error = nil - ref = params[:vcenter_ref] + ref = params[:vcenter_ref] + template_id = params[:template_id] if !ref || ref.empty? msg = "No template ref specified" @@ -172,6 +281,14 @@ get '/vcenter/template/:vcenter_ref' do error 404, error.to_json end + hpool = VCenterDriver::VIHelper.one_pool(OpenNebula::HostPool) + if hpool.respond_to?(:message) + msg = "Could not get OpenNebula HostPool: #{hpool.message}" + logger.error("[vCenter] " + msg) + error = Error.new(msg) + error 404, error.to_json + end + # POST params if @request_body && !@request_body.empty? body_hash = JSON.parse(@request_body) @@ -227,7 +344,7 @@ get '/vcenter/template/:vcenter_ref' do end # Create images or get disks information for template - error, template_disks = template.import_vcenter_disks(vc_uuid, dpool, ipool) + error, template_disks = template.import_vcenter_disks(vc_uuid, dpool, ipool, true, template_id) if !error.empty? append = false @@ -238,15 +355,23 @@ get '/vcenter/template/:vcenter_ref' do error 404, error.to_json end - t[:one] << template_disks + #t[:one] << template_disks + t[:disks] = template_disks template_moref = template_copy_ref ? template_copy_ref : ref + wild = false #We're importing templates not wild vms + # Create images or get nics information for template error, template_nics = template.import_vcenter_nics(vc_uuid, npool, + hpool, vcenter_client.vim.host, - template_moref) + template_moref, + wild, + true, + template["name"], + template_id) if !error.empty? append = false @@ -257,7 +382,8 @@ get '/vcenter/template/:vcenter_ref' do error 404, error.to_json end - t[:one] << template_nics + #t[:one] << template_nics + t[:nics] = template_nics t[:lc_error] = lc_error t[:append] = append @@ -271,6 +397,99 @@ get '/vcenter/template/:vcenter_ref' do end end +get '/vcenter/wild/:vcenter_ref' do + begin + t = {} + template = nil + vm_ref = params[:vcenter_ref] + sunstone = true + wild = true + + if !vm_ref || vm_ref.empty? + msg = "No VM moref for Wild VM specified" + logger.error("[vCenter] " + msg) + error = Error.new(msg) + error 404, error.to_json + end + + vc_uuid = vcenter_client.vim.serviceContent.about.instanceUuid + + dpool = VCenterDriver::VIHelper.one_pool(OpenNebula::DatastorePool) + if dpool.respond_to?(:message) + msg = "Could not get OpenNebula DatastorePool: #{dpool.message}" + logger.error("[vCenter] " + msg) + error = Error.new(msg) + error 404, error.to_json + end + + ipool = VCenterDriver::VIHelper.one_pool(OpenNebula::ImagePool) + if ipool.respond_to?(:message) + msg = "Could not get OpenNebula ImagePool: #{ipool.message}" + logger.error("[vCenter] " + msg) + error = Error.new(msg) + error 404, error.to_json + end + + npool = VCenterDriver::VIHelper.one_pool(OpenNebula::VirtualNetworkPool) + if npool.respond_to?(:message) + msg = "Could not get OpenNebula VirtualNetworkPool: #{npool.message}" + logger.error("[vCenter] " + msg) + error = Error.new(msg) + error 404, error.to_json + end + + hpool = VCenterDriver::VIHelper.one_pool(OpenNebula::HostPool) + if hpool.respond_to?(:message) + msg = "Could not get OpenNebula HostPool: #{hpool.message}" + logger.error("[vCenter] " + msg) + error = Error.new(msg) + error 404, error.to_json + end + + vcenter_vm = VCenterDriver::VirtualMachine.new_from_ref(vm_ref, vcenter_client) + vm_name = vcenter_vm["name"] + + # Get disks information for template + error, template_disks = vcenter_vm.import_vcenter_disks(vc_uuid, dpool, ipool, sunstone) + + if !error.empty? + msg = error + logger.error("[vCenter] " + msg) + error = Error.new(msg) + error 404, error.to_json + end + + t[:disks] = template_disks + + # Get nics information for template + + # Create images or get nics information for template + error, template_nics = vcenter_vm.import_vcenter_nics(vc_uuid, + npool, + hpool, + vcenter_client.vim.host, + vm_ref, + wild, + sunstone, + vm_name) + + if !error.empty? + msg = error + logger.error("[vCenter] " + msg) + error = Error.new(msg) + error 404, error.to_json + end + + t[:nics] = template_nics + + [200, t.to_json] + rescue Exception => e + logger.error("[vCenter] " + e.message) + error = Error.new(e.message) + error 403, error.to_json + end +end + get '/vcenter/networks' do begin dc_folder = VCenterDriver::DatacenterFolder.new(vcenter_client) @@ -284,7 +503,16 @@ get '/vcenter/networks' do error 404, error.to_json end - networks = dc_folder.get_unimported_networks(npool,vcenter_client.vim.host) + hpool = VCenterDriver::VIHelper.one_pool(OpenNebula::HostPool, false) + + if hpool.respond_to?(:message) + msg = "Could not get OpenNebula HostPool: #{hpool.message}" + logger.error("[vCenter] " + msg) + error = Error.new(msg) + error 404, error.to_json + end + + networks = dc_folder.get_unimported_networks(npool,vcenter_client.vim.host,hpool) if networks.nil? msg = "No datacenter found" @@ -305,7 +533,16 @@ get '/vcenter/images/:ds_name' do begin one_ds = VCenterDriver::VIHelper.find_by_name(OpenNebula::DatastorePool, params[:ds_name]) - one_ds_ref = one_ds['TEMPLATE/VCENTER_DS_REF'] + one_ds_ref = one_ds['TEMPLATE/VCENTER_DS_REF'] + one_ds_instance_id = one_ds['TEMPLATE/VCENTER_INSTANCE_ID'] + vc_uuid = vcenter_client.vim.serviceContent.about.instanceUuid + + if one_ds_instance_id != vc_uuid + msg = "Datastore is not in the same vCenter instance provided in credentials" + logger.error("[vCenter] " + msg) + error = Error.new(msg) + error 404, error.to_json + end ds = VCenterDriver::Datastore.new_from_ref(one_ds_ref, vcenter_client) ds.one_item = one_ds @@ -340,7 +577,6 @@ get '/vcenter/datastores' do error 404, error.to_json end - hpool = VCenterDriver::VIHelper.one_pool(OpenNebula::HostPool, false) if hpool.respond_to?(:message) @@ -350,7 +586,6 @@ get '/vcenter/datastores' do error 404, error.to_json end - datastores = dc_folder.get_unimported_datastores(dpool, vcenter_client.vim.host, hpool) if datastores.nil? msg = "No datacenter found" diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/datacenter.rb b/src/vmm_mad/remotes/lib/vcenter_driver/datacenter.rb index a02ebb18b0..d974f7e8ba 100644 --- a/src/vmm_mad/remotes/lib/vcenter_driver/datacenter.rb +++ b/src/vmm_mad/remotes/lib/vcenter_driver/datacenter.rb @@ -1,5 +1,6 @@ module VCenterDriver require 'set' +require 'digest' class DatacenterFolder attr_accessor :items @@ -46,41 +47,71 @@ class DatacenterFolder host_objects = {} vcenter_uuid = get_vcenter_instance_uuid - vcenter_version = get_vcenter_api_version - fetch! if @items.empty? #Get datacenters + fetch! if @items.empty? # Get datacenters + sha256 = Digest::SHA256.new # Prepare crypto hash generator + + # Loop through datacenters @items.values.each do |dc| dc_name = dc.item.name host_objects[dc_name] = [] + # Get clusters inside a datacenter host_folder = dc.host_folder host_folder.fetch_clusters! host_folder.items.values.each do |ccr| + # Check if the cluster is a host in OpenNebula's pool one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool, "TEMPLATE/VCENTER_CCR_REF", ccr['_ref'], vcenter_uuid, hpool) + next if one_host - next if one_host #If the host has been already imported - + # Get a ClusterComputeResource object cluster = VCenterDriver::ClusterComputeResource.new_from_ref(ccr['_ref'], @vi_client) + + # Obtain a list of resource pools found in the cluster rpools = cluster.get_resource_pool_list.select {|rp| !rp[:name].empty?} - host_info = {} - cluster_name = "[#{vcenter_instance_name}-#{dc_name}]_#{ccr['name']}" + # Determine a host location (folder and subfolders) + item = cluster.item + folders = [] + while !item.instance_of? RbVmomi::VIM::Datacenter + item = item.parent + if !item.instance_of? RbVmomi::VIM::Datacenter + folders << item.name if item.name != "host" + end + raise "Could not find the host's location" if item.nil? + end + location = folders.reverse.join("/") + location = "/" if location.empty? + + # Generate a crypto hash and take the first 12 characters to + # avoid name collisions. + full_name = "#{ccr['name']}_[#{vcenter_instance_name}-#{dc_name}]_#{location}" + cluster_hash = sha256.hexdigest(full_name)[0..11] + + # Setting host import name and replace spaces and weird characters + cluster_name = "#{ccr['name']}_[#{vcenter_instance_name}-#{dc_name}]_#{cluster_hash}" cluster_name = cluster_name.tr(" ", "_") cluster_name = cluster_name.tr("\u007F", "") # Remove \u007F character that comes from vcenter + # Prepare hash for import tool + host_info = {} + host_info[:simple_name] = ccr['name'] host_info[:cluster_name] = cluster_name host_info[:cluster_ref] = ccr['_ref'] + host_info[:cluster_location] = location + host_info[:cluster_hash] = cluster_hash host_info[:vcenter_uuid] = vcenter_uuid host_info[:vcenter_version] = vcenter_version host_info[:rp_list] = rpools + # Add the hash to current datacenter host_objects[dc_name] << host_info end end @@ -96,34 +127,10 @@ class DatacenterFolder fetch! if @items.empty? #Get datacenters - one_clusters = {} - @items.values.each do |dc| + clusters_in_ds = {} dc_name = dc.item.name - - one_clusters[dc_name] = [] - - host_folder = dc.host_folder - host_folder.fetch_clusters! - - host_folder.items.values.each do |ccr| - cluster = {} - cluster[:ref] = ccr['_ref'] - cluster[:name] = ccr['name'] - attribute = "TEMPLATE/VCENTER_CCR_REF" - one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool, - attribute, - ccr['_ref'], - vcenter_uuid, - hpool) - - if !!one_host - cluster[:host_id] = one_host['ID'] - one_clusters[dc_name] << cluster - end - end - - next if one_clusters[dc_name].empty? #No clusters imported, continue + dc_ref = dc.item._ref ds_objects[dc_name] = [] datastore_folder = dc.datastore_folder @@ -131,82 +138,103 @@ class DatacenterFolder datastore_folder.items.values.each do |ds| - name, capacity, freeSpace = ds.item.collect("name","summary.capacity","summary.freeSpace") + name, capacity, freeSpace = ds.item.collect("name", + "summary.capacity", + "summary.freeSpace") - ds_name = "[#{vcenter_instance_name} - #{dc_name}] #{name}" - ds_total_mb = ((capacity.to_i / 1024) / 1024) - ds_free_mb = ((freeSpace.to_i / 1024) / 1024) + ds_name = "#{name}" + ds_total_mb = ((capacity.to_i / 1024) / 1024) + ds_free_mb = ((freeSpace.to_i / 1024) / 1024) if ds.instance_of? VCenterDriver::Datastore - hosts_in_ds = ds['host'] - clusters_in_ds = {} + ds_hash = {} + ds_hash[:simple_name] = "#{ds_name}" + ds_hash[:total_mb] = ds_total_mb + ds_hash[:free_mb] = ds_free_mb + ds_hash[:ds] = [] + ds_hash[:cluster] = [] - hosts_in_ds.each do |host| + hosts = ds["host"] + hosts.each do |host| cluster_ref = host.key.parent._ref - if !clusters_in_ds[cluster_ref] - clusters_in_ds[cluster_ref] = host.key.parent.name + if !clusters_in_ds.key?(cluster_ref) + clusters_in_ds[cluster_ref] = nil + # Try to locate cluster ref in host's pool + one_cluster = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool, + "TEMPLATE/VCENTER_CCR_REF", + cluster_ref, + vcenter_uuid, + hpool) + if one_cluster + ds_hash[:cluster] << one_cluster["CLUSTER_ID"].to_i + clusters_in_ds[cluster_ref] = one_cluster["CLUSTER_ID"].to_i + end + else + ds_hash[:cluster] << clusters_in_ds[cluster_ref] if clusters_in_ds[cluster_ref] && !ds_hash[:cluster].include?(clusters_in_ds[cluster_ref]) end end - clusters_in_ds.each do |ccr_ref, ccr_name| - ds_hash = {} + already_image_ds = VCenterDriver::Storage.exists_one_by_ref_dc_and_type?(ds["_ref"], dc_ref, vcenter_uuid, "IMAGE_DS", dpool) - ds_hash[:name] = "#{ds_name} - #{ccr_name.tr(" ", "_")}" - ds_hash[:total_mb] = ds_total_mb - ds_hash[:free_mb] = ds_free_mb - ds_hash[:cluster] = ccr_name - ds_hash[:ds] = [] - - already_image_ds = VCenterDriver::Storage.exists_one_by_ref_ccr_and_type?(ds["_ref"], ccr_ref, vcenter_uuid, "IMAGE_DS", dpool) - - if !already_image_ds - object = ds.to_one_template(one_clusters[dc_name], ds_hash[:name], ccr_ref, "IMAGE_DS", vcenter_uuid) - ds_hash[:ds] << object if !object.nil? - end - - already_system_ds = VCenterDriver::Storage.exists_one_by_ref_ccr_and_type?(ds["_ref"], ccr_ref, vcenter_uuid, "SYSTEM_DS", dpool) - - if !already_system_ds - object = ds.to_one_template(one_clusters[dc_name], ds_hash[:name], ccr_ref, "SYSTEM_DS", vcenter_uuid) - ds_hash[:ds] << object if !object.nil? - end - - ds_objects[dc_name] << ds_hash if !ds_hash[:ds].empty? + if !already_image_ds + ds_hash[:name] = "#{ds_name} [#{vcenter_instance_name} - #{dc_name}] (IMG)" + object = ds.to_one_template(ds_hash, vcenter_uuid, dc_name, dc_ref, "IMAGE_DS") + ds_hash[:ds] << object if !object.nil? end + + already_system_ds = VCenterDriver::Storage.exists_one_by_ref_dc_and_type?(ds["_ref"], dc_ref, vcenter_uuid, "SYSTEM_DS", dpool) + + if !already_system_ds + ds_hash[:name] = "#{ds_name} [#{vcenter_instance_name} - #{dc_name}] (SYS)" + object = ds.to_one_template(ds_hash, vcenter_uuid, dc_name, dc_ref, "SYSTEM_DS") + ds_hash[:ds] << object if !object.nil? + end + + ds_hash[:name] = "#{ds_name} [#{vcenter_instance_name} - #{dc_name}]" + + ds_objects[dc_name] << ds_hash if !ds_hash[:ds].empty? + end if ds.instance_of? VCenterDriver::StoragePod - clusters_in_spod = {} - ds_in_spod = ds['children'] + ds_hash = {} + ds_hash[:simple_name] = "#{ds_name}" + ds_hash[:total_mb] = ds_total_mb + ds_hash[:free_mb] = ds_free_mb + ds_hash[:ds] = [] + ds_hash[:cluster] = [] - ds_in_spod.each do |sp_ds| - hosts_in_ds = sp_ds.host - hosts_in_ds.each do |host| + ds['children'].each do |sp_ds| + hosts = sp_ds.host + hosts.each do |host| cluster_ref = host.key.parent._ref - if !clusters_in_spod[cluster_ref] - clusters_in_spod[cluster_ref] = host.key.parent.name + if !clusters_in_ds.include?(cluster_ref) + clusters_in_ds[cluster_ref] = nil + # Try to locate cluster ref in cluster's pool + one_cluster = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool, + "TEMPLATE/VCENTER_CCR_REF", + cluster_ref, + vcenter_uuid, + hpool) + if one_cluster + ds_hash[:cluster] << one_cluster["CLUSTER_ID"].to_i + clusters_in_ds[cluster_ref] = one_cluster["CLUSTER_ID"].to_i + end + else + ds_hash[:cluster] << clusters_in_ds[cluster_ref] if clusters_in_ds[cluster_ref] && !ds_hash[:cluster].include?(clusters_in_ds[cluster_ref]) end end end - clusters_in_spod.each do |ccr_ref, ccr_name| - ds_hash = {} - ds_hash[:name] = "#{ds_name} - #{ccr_name.tr(" ", "_")}" - ds_hash[:total_mb] = ds_total_mb - ds_hash[:free_mb] = ds_free_mb - ds_hash[:cluster] = ccr_name - ds_hash[:ds] = [] + already_system_ds = VCenterDriver::Storage.exists_one_by_ref_dc_and_type?(ds["_ref"], dc_ref, vcenter_uuid, "SYSTEM_DS", dpool) - ds_hash[:ds] = [] - already_system_ds = VCenterDriver::Storage.exists_one_by_ref_ccr_and_type?(ds["_ref"], ccr_ref, vcenter_uuid, "SYSTEM_DS", dpool) - - if !already_system_ds - object = ds.to_one_template(one_clusters[dc_name], ds_hash[:name], ccr_ref, "SYSTEM_DS", vcenter_uuid) - ds_hash[:ds] << object if !object.nil? - end - - ds_objects[dc_name] << ds_hash if !ds_hash[:ds].empty? + if !already_system_ds + ds_hash[:name] = "#{ds_name} [#{vcenter_instance_name} - #{dc_name}] (StorDRS)" + object = ds.to_one_template(ds_hash, vcenter_uuid, dc_name, dc_ref, "SYSTEM_DS") + ds_hash[:ds] << object if !object.nil? end + + ds_objects[dc_name] << ds_hash if !ds_hash[:ds].empty? end end end @@ -268,7 +296,6 @@ class DatacenterFolder view.DestroyView # Destroy the view - templates.each do |template| one_template = VCenterDriver::VIHelper.find_by_ref(OpenNebula::TemplatePool, @@ -288,7 +315,7 @@ class DatacenterFolder template_objects end - def get_unimported_networks(npool,vcenter_instance_name) + def get_unimported_networks(npool,vcenter_instance_name, hpool) network_objects = {} vcenter_uuid = get_vcenter_instance_uuid @@ -373,6 +400,36 @@ class DatacenterFolder clusters.each do |ref, info| + one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool, + "TEMPLATE/VCENTER_CCR_REF", + ref, + vcenter_uuid, + hpool) + if !one_host || !one_host['CLUSTER_ID'] + cluster_id = -1 + else + cluster_id = one_host['CLUSTER_ID'] + end + + one_cluster = VCenterDriver::ClusterComputeResource.new_from_ref(ref, @vi_client) + + # Determine a host location + item = one_cluster.item + folders = [] + while !item.instance_of? RbVmomi::VIM::Datacenter + item = item.parent + if !item.instance_of? RbVmomi::VIM::Datacenter + folders << item.name if item.name != "host" + end + + if item.nil? + raise "Could not find the host's location" + end + end + + location = folders.reverse.join("/") + location = "/" if location.empty? + network_obj = info['network'] network_obj.each do |n| @@ -388,13 +445,17 @@ class DatacenterFolder next if one_network #If the network has been already imported one_vnet = VCenterDriver::Network.to_one_template(network_name, - network_ref, - network_type, - ref, - info['name'], - vcenter_uuid, - vcenter_instance_name, - dc_name) + network_ref, + network_type, + ref, + info['name'], + vcenter_uuid, + vcenter_instance_name, + dc_name, + cluster_id, + location) + + network_objects[dc_name] << one_vnet end @@ -719,7 +780,7 @@ class Datacenter case nr[:action] when :update_dpg begin - nr[:dpg].ReconfigureDVPortgroupConfigSpec_Task(:spec => nr[:spec]) + nr[:dpg].ReconfigureDVPortgroup_Task(:spec => nr[:spec]) rescue Exception => e raise "A rollback operation for distributed port group #{nr[:name]} could not be performed. Reason: #{e.message}" end @@ -753,6 +814,13 @@ class Datacenter end end + ######################################################################## + # PowerOn VMs + ######################################################################## + def power_on_vm(vm) + @item.PowerOnMultiVM_Task({:vm => [vm]}).wait_for_completion + end + def self.new_from_ref(ref, vi_client) self.new(RbVmomi::VIM::Datacenter.new(vi_client.vim, ref), vi_client) end diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/datastore.rb b/src/vmm_mad/remotes/lib/vcenter_driver/datastore.rb index 3a2f32de6d..eba454cb06 100644 --- a/src/vmm_mad/remotes/lib/vcenter_driver/datastore.rb +++ b/src/vmm_mad/remotes/lib/vcenter_driver/datastore.rb @@ -1,5 +1,5 @@ module VCenterDriver - +require 'digest' class DatastoreFolder attr_accessor :item, :items @@ -68,7 +68,7 @@ class Storage end end - def self.get_image_import_template(ds_name, image_path, image_type, image_prefix, ipool) + def self.get_image_import_template(ds_name, image_path, image_type, image_prefix, ipool, template_id) one_image = {} one_image[:template] = "" @@ -77,9 +77,13 @@ class Storage # Get image name file_name = File.basename(image_path).gsub(/\.vmdk$/,"") - image_name = "#{file_name} - #{ds_name}" + if template_id + image_name = "#{file_name} - #{ds_name} [Template #{template_id}]" + else + image_name = "#{file_name} - #{ds_name}" + end - #Chek if the image has already been imported + #Check if the image has already been imported image = VCenterDriver::VIHelper.find_by_name(OpenNebula::ImagePool, image_name, ipool, @@ -100,7 +104,7 @@ class Storage return one_image end - def self.get_one_image_ds_by_ref_and_ccr(ref, ccr_ref, vcenter_uuid, pool = nil) + def self.get_one_image_ds_by_ref_and_dc(ref, dc_ref, vcenter_uuid, pool = nil) if pool.nil? pool = VCenterDriver::VIHelper.one_pool(OpenNebula::DatastorePool, false) if pool.respond_to?(:message) @@ -111,7 +115,7 @@ class Storage element = pool.select do |e| e["TEMPLATE/TYPE"] == "IMAGE_DS" && e["TEMPLATE/VCENTER_DS_REF"] == ref && - e["TEMPLATE/VCENTER_CCR_REF"] == ccr_ref && + e["TEMPLATE/VCENTER_DC_REF"] == dc_ref && e["TEMPLATE/VCENTER_INSTANCE_ID"] == vcenter_uuid end.first rescue nil @@ -136,7 +140,7 @@ class Storage "USED_MB=#{used_mb}\nFREE_MB=#{free_mb} \nTOTAL_MB=#{total_mb}" end - def self.exists_one_by_ref_ccr_and_type?(ref, ccr_ref, vcenter_uuid, type, pool = nil) + def self.exists_one_by_ref_dc_and_type?(ref, dc_ref, vcenter_uuid, type, pool = nil) if pool.nil? pool = VCenterDriver::VIHelper.one_pool(OpenNebula::DatastorePool, false) if pool.respond_to?(:message) @@ -146,42 +150,28 @@ class Storage elements = pool.select do |e| e["TEMPLATE/TYPE"] == type && e["TEMPLATE/VCENTER_DS_REF"] == ref && - e["TEMPLATE/VCENTER_CCR_REF"] == ccr_ref && + e["TEMPLATE/VCENTER_DC_REF"] == dc_ref && e["TEMPLATE/VCENTER_INSTANCE_ID"] == vcenter_uuid end return elements.size == 1 end - def to_one(ds_name, vcenter_uuid, ccr_ref, host_id) + def to_one(ds_hash, vcenter_uuid, dc_name, dc_ref) one = "" - one << "NAME=\"#{ds_name}\"\n" + one << "NAME=\"#{ds_hash[:name]}\"\n" one << "TM_MAD=vcenter\n" one << "VCENTER_INSTANCE_ID=\"#{vcenter_uuid}\"\n" - one << "VCENTER_CCR_REF=\"#{ccr_ref}\"\n" + one << "VCENTER_DC_REF=\"#{dc_ref}\"\n" + one << "VCENTER_DC_NAME=\"#{dc_name}\"\n" + one << "VCENTER_DS_NAME=\"#{ds_hash[:simple_name]}\"\n" one << "VCENTER_DS_REF=\"#{self['_ref']}\"\n" - one << "VCENTER_ONE_HOST_ID=\"#{host_id}\"\n" - return one end - def to_one_template(one_clusters, name, ccr_ref, type, vcenter_uuid) - - one_cluster = one_clusters.select { |ccr| ccr[:ref] == ccr_ref }.first rescue nil - - return nil if one_cluster.nil? - - ds_name = "" - - if type == "IMAGE_DS" - ds_name << "#{name} (IMG)" - else - ds_name << "#{name} (SYS)" - ds_name << " [StorDRS]" if self.class == VCenterDriver::StoragePod - end - + def to_one_template(ds_hash, vcenter_uuid, dc_name, dc_ref, type) one_tmp = { - :one => to_one(ds_name, vcenter_uuid, ccr_ref, one_cluster[:host_id]) + :one => to_one(ds_hash, vcenter_uuid, dc_name, dc_ref) } if type == "SYSTEM_DS" @@ -202,6 +192,8 @@ class Storage ds_name = self['name'] + disk_type = 'preallocated' if disk_type == 'thick' + vmdk_spec = RbVmomi::VIM::FileBackedVirtualDiskSpec( :adapterType => adapter_type, :capacityKb => size.to_i*1024, @@ -538,10 +530,7 @@ class Datastore < Storage ds_id = nil ds_name = self['name'] - img_types = ["FloppyImageFileInfo", - "IsoImageFileInfo", - "VmDiskFileInfo"] - + # We need OpenNebula Images and Datastores pools ipool = VCenterDriver::VIHelper.one_pool(OpenNebula::ImagePool, false) if ipool.respond_to?(:message) raise "Could not get OpenNebula ImagePool: #{pool.message}" @@ -555,34 +544,45 @@ class Datastore < Storage ds_id = @one_item["ID"] begin + # Prepare sha256 crypto generator + sha256 = Digest::SHA256.new + # Create Search Spec search_params = get_search_params(ds_name) # Perform search task and return results - search_task = self['browser']. - SearchDatastoreSubFolders_Task(search_params) + search_task = self['browser'].SearchDatastoreSubFolders_Task(search_params) search_task.wait_for_completion + # Loop through search results search_task.info.result.each do |result| + # Remove [datastore] from file path folderpath = "" if result.folderPath[-1] != "]" folderpath = result.folderPath.sub(/^\[#{ds_name}\] /, "") end + # Loop through images in result.file result.file.each do |image| + image_path = "" # Skip not relevant files - next if !img_types.include? image.class.to_s + next if !["FloppyImageFileInfo", + "IsoImageFileInfo", + "VmDiskFileInfo"].include? image.class.to_s # Get image path and name image_path << folderpath << image.path image_name = File.basename(image.path).reverse.sub("kdmv.","").reverse - # Get image's disk and type + # Get image's type image_type = image.class.to_s == "VmDiskFileInfo" ? "OS" : "CDROM" - disk_type = image.class.to_s == "VmDiskFileInfo" ? image.diskType : nil + + # Get image's size + image_size = image.capacityKb / 1024 rescue nil + image_size = image.fileSize / 1024 / 1024 rescue nil if !image_size # Assign image prefix if known or assign default prefix controller = image.controllerType rescue nil @@ -593,23 +593,33 @@ class Datastore < Storage disk_prefix = VCenterDriver::VIHelper.get_default("IMAGE/TEMPLATE/DEV_PREFIX") end - #Set template - one_image = "NAME=\"#{image_name} - #{ds_name}\"\n" + # Generate a crypto hash and get the first 12 characters + # this hash is used to avoid name collisions + full_name = "#{image_name} - #{ds_name} [#{image_path}]" + image_hash = sha256.hexdigest(full_name)[0..11] + import_name = "#{image_name} - #{ds_name} [#{image_hash}]" + + # Set template + one_image = "NAME=\"#{import_name}\"\n" one_image << "PATH=\"vcenter://#{image_path}\"\n" one_image << "PERSISTENT=\"NO\"\n" one_image << "TYPE=\"#{image_type}\"\n" - one_image << "VCENTER_DISK_TYPE=\"#{disk_type}\"\n" if disk_type one_image << "VCENTER_IMPORTED=\"YES\"\n" one_image << "DEV_PREFIX=\"#{disk_prefix}\"\n" - if VCenterDriver::VIHelper.find_by_name(OpenNebula::ImagePool, - "#{image_name} - #{ds_name}", - ipool, - false).nil? + # Check image hasn't already been imported + vcenter_path = "vcenter://#{image_path}" + image_found = VCenterDriver::VIHelper.find_image_by_path(OpenNebula::ImagePool, + vcenter_path, + ds_id, + ipool) + + if !image_found + # Add template to image array img_templates << { - :name => "#{image_name} - #{ds_name}", + :name => import_name, :path => image_path, - :size => (image.fileSize / 1024).to_s, + :size => image_size.to_s, :type => image.class.to_s, :dsid => ds_id, :one => one_image diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/importer.rb b/src/vmm_mad/remotes/lib/vcenter_driver/importer.rb index fcd4678c9c..3a968c553d 100644 --- a/src/vmm_mad/remotes/lib/vcenter_driver/importer.rb +++ b/src/vmm_mad/remotes/lib/vcenter_driver/importer.rb @@ -2,6 +2,8 @@ module VCenterDriver class Importer +VNC_ESX_HOST_FOLDER = "/tmp" + def self.import_wild(host_id, vm_ref, one_vm, template) begin @@ -21,10 +23,18 @@ def self.import_wild(host_id, vm_ref, one_vm, template) if npool.respond_to?(:message) raise "Could not get OpenNebula VirtualNetworkPool: #{npool.message}" end + hpool = VCenterDriver::VIHelper.one_pool(OpenNebula::HostPool) + if hpool.respond_to?(:message) + raise "Could not get OpenNebula HostPool: #{hpool.message}" + end vcenter_vm = VCenterDriver::VirtualMachine.new_from_ref(vm_ref, vi_client) + vm_name = vcenter_vm["name"] - error, template_disks = vcenter_vm.import_vcenter_disks(vc_uuid, dpool, ipool) + wild = true + sunstone = false + + error, template_disks = vcenter_vm.import_vcenter_disks(vc_uuid, dpool, ipool, sunstone) return OpenNebula::Error.new(error) if !error.empty? template << template_disks @@ -32,8 +42,13 @@ def self.import_wild(host_id, vm_ref, one_vm, template) # Create images or get nics information for template error, template_nics = vcenter_vm.import_vcenter_nics(vc_uuid, npool, + hpool, + vc_name, vm_ref, - vc_name) + wild, + sunstone, + vm_name) + return OpenNebula::Error.new(error) if !error.empty? template << template_nics @@ -41,7 +56,8 @@ def self.import_wild(host_id, vm_ref, one_vm, template) rc = one_vm.allocate(template) return rc if OpenNebula.is_error?(rc) - one_vm.deploy(host_id, false) + rc = one_vm.deploy(host_id, false) + return rc if OpenNebula.is_error?(rc) # Set reference to template disks and nics in VM template vcenter_vm.one_item = one_vm @@ -53,10 +69,10 @@ def self.import_wild(host_id, vm_ref, one_vm, template) # Let's update the info to gather VNC port until vnc_port || elapsed_seconds > 30 - sleep(2) + sleep(1) one_vm.info vnc_port = one_vm["TEMPLATE/GRAPHICS/PORT"] - elapsed_seconds += 2 + elapsed_seconds += 1 end if vnc_port @@ -68,11 +84,17 @@ def self.import_wild(host_id, vm_ref, one_vm, template) vcenter_vm.item.ReconfigVM_Task(:spec => spec).wait_for_completion end + # Add VCENTER_ESX_HOST to MONITOR info so VNC works for running VMs F#4242 + esx_host = vcenter_vm["runtime.host.name"].to_s + f = File.open(File.join(VNC_ESX_HOST_FOLDER, "vcenter_vnc_#{one_vm.id}"), 'w') + f.write(esx_host) + f.close + return one_vm.id rescue Exception => e vi_client.close_connection if vi_client - return OpenNebula::Error.new(e.message) + return OpenNebula::Error.new("#{e.message}/#{e.backtrace}") end end @@ -91,6 +113,7 @@ def self.import_clusters(con_ops, options) dc_folder = VCenterDriver::DatacenterFolder.new(vi_client) vcenter_instance_name = vi_client.vim.host + vc_uuid = vi_client.vim.serviceContent.about.instanceUuid # OpenNebula's ClusterPool cpool = VCenterDriver::VIHelper.one_pool(OpenNebula::ClusterPool, false) @@ -100,7 +123,7 @@ def self.import_clusters(con_ops, options) cluster_list = {} cpool.each do |c| - cluster_list[c["ID"]] = c["NAME"] + cluster_list[c["ID"]] = c["NAME"] if c["ID"].to_i != 0 end # Get OpenNebula's host pool @@ -129,32 +152,46 @@ def self.import_clusters(con_ops, options) one_cluster_id = nil rpool = nil if !use_defaults - STDOUT.print "\n * Import cluster #{cluster[:cluster_name]} (y/[n])? " + STDOUT.print "\n * vCenter cluster found:\n"\ + " - Name : \e[92m#{cluster[:simple_name]}\e[39m\n"\ + " - Location : #{cluster[:cluster_location]}\n"\ + " Import cluster (y/[n])? " next if STDIN.gets.strip.downcase != 'y' + end - if cluster_list.size > 1 - STDOUT.print "\n In which OpenNebula cluster do you want the vCenter cluster to be included?\n " + if cluster_list.size > 0 + STDOUT.print "\n In which OpenNebula cluster do you want the vCenter cluster to be included?\n " - cluster_list_str = "\n" - cluster_list.each do |key, value| - cluster_list_str << " - ID: " << key << " - NAME: " << value << "\n" + cluster_list_str = "\n" + cluster_list.each do |key, value| + cluster_list_str << " - \e[94mID: " << key << "\e[39m - NAME: " << value << "\n" + end + + STDOUT.print "\n #{cluster_list_str}" + STDOUT.print "\n Specify the ID of the cluster or press Enter if you want OpenNebula to create a new cluster for you: " + + answer = STDIN.gets.strip + if !answer.empty? + one_cluster_id = answer + else + one_cluster = VCenterDriver::VIHelper.new_one_item(OpenNebula::Cluster) + rc = one_cluster.allocate("#{cluster[:cluster_name]}") + if ::OpenNebula.is_error?(rc) + STDOUT.puts " Error creating OpenNebula cluster: #{rc.message}\n" + next end - - STDOUT.print "\n #{cluster_list_str}" - STDOUT.print "\n Specify the ID of the cluster or Enter to use the default cluster: " - - answer = STDIN.gets.strip - one_cluster_id = answer if !answer.empty? + one_cluster_id = one_cluster.id end end + # Generate the template and create the host in the pool one_host = VCenterDriver::ClusterComputeResource.to_one(cluster, con_ops, rpool, one_cluster_id) - STDOUT.puts "\n OpenNebula host #{cluster[:cluster_name]} with"\ - " id #{one_host.id} successfully created." + STDOUT.puts "\n OpenNebula host \e[92m#{cluster[:cluster_name]}\e[39m with"\ + " ID \e[94m#{one_host.id}\e[39m successfully created." STDOUT.puts } } @@ -206,10 +243,20 @@ def self.import_templates(con_ops, options) if npool.respond_to?(:message) raise "Could not get OpenNebula VirtualNetworkPool: #{npool.message}" end + hpool = VCenterDriver::VIHelper.one_pool(OpenNebula::HostPool) + if hpool.respond_to?(:message) + raise "Could not get OpenNebula HostPool: #{hpool.message}" + end # Get vcenter intance uuid as moref is unique for each vcenter vc_uuid = vi_client.vim.serviceContent.about.instanceUuid + # Init vars + allocated_images = [] + allocated_nets = [] + one_t = nil + template_copy_ref = nil + rs.each {|dc, tmps| if !use_defaults @@ -228,14 +275,17 @@ def self.import_templates(con_ops, options) if !use_defaults STDOUT.print "\n * VM Template found:\n"\ - " - Name : #{t[:name]}\n"\ - " - Moref : #{t[:vcenter_ref]}\n"\ - " - Cluster: #{t[:cluster_name]}\n"\ + " - Name : \e[92m#{t[:template_name]}\e[39m\n"\ + " - Cluster : \e[96m#{t[:cluster_name]}\e[39m\n"\ + " - Location : #{t[:template_location]}\n"\ " Import this VM template (y/[n])? " next if STDIN.gets.strip.downcase != 'y' end + allocated_images = [] + allocated_nets = [] + # Linked Clones if !use_defaults @@ -267,7 +317,7 @@ def self.import_templates(con_ops, options) template_name = STDIN.gets.strip.downcase STDOUT.print "\n WARNING!!! The cloning operation can take some time"\ - " depending on the size of disks. Please wait...\n" + " depending on the size of disks. \e[96mPlease wait...\e[39m\n" error, template_copy_ref = template.create_template_copy(template_name) @@ -303,7 +353,7 @@ def self.import_templates(con_ops, options) else # Create linked clones on top of the existing template # Create a VirtualMachine object from the template_copy_ref - STDOUT.print "\n Delta disks are being created, please be patient..." + STDOUT.print "\n Delta disks are being created, \e[96please be patient...\e[39m" lc_error, use_lc = template.create_delta_disks if lc_error @@ -329,38 +379,69 @@ def self.import_templates(con_ops, options) t[:one] << "VCENTER_VM_FOLDER=\"#{vcenter_vm_folder}\"\n" if !vcenter_vm_folder.empty? end + + # Create template object + one_t = VCenterDriver::VIHelper.new_one_item(OpenNebula::Template) + + rc = one_t.allocate(t[:one]) + + if OpenNebula.is_error?(rc) + STDOUT.puts " Error creating template: #{rc.message}\n" + template.delete_template if template_copy_ref + next + end + + one_t.info + ## Add existing disks to template (OPENNEBULA_MANAGED) STDOUT.print "\n The existing disks and networks in the template"\ - " are being imported, please be patient..." + " are being imported, \e[96mplease be patient...\e[39m\n" template = t[:template] if !template - error, template_disks = template.import_vcenter_disks(vc_uuid, - dpool, - ipool) + error, template_disks, allocated_images = template.import_vcenter_disks(vc_uuid, + dpool, + ipool, + false, + one_t["ID"]) if error.empty? t[:one] << template_disks else STDOUT.puts error + # Rollback template.delete_template if template_copy_ref + one_t.delete if one_t + one_t = nil next end template_moref = template_copy_ref ? template_copy_ref : t[:vcenter_ref] - error, template_nics = template.import_vcenter_nics(vc_uuid, - npool, - options[:vcenter], - template_moref, - dc) + wild = false # We are not importing from a Wild VM + error, template_nics, allocated_nets = template.import_vcenter_nics(vc_uuid, + npool, + hpool, + options[:vcenter], + template_moref, + wild, + false, + template["name"], + one_t["ID"], + dc) + if error.empty? t[:one] << template_nics else STDOUT.puts error + # Rollback + allocated_images.each do |i| i.delete end + allocated_images = [] template.delete_template if template_copy_ref + one_t.delete if one_t + one_t = nil next end @@ -450,15 +531,21 @@ def self.import_templates(con_ops, options) t[:one] << "]" end - one_t = VCenterDriver::VIHelper.new_one_item(OpenNebula::Template) + rc = one_t.update(t[:one]) - rc = one_t.allocate(t[:one]) - - if ::OpenNebula.is_error?(rc) + if OpenNebula.is_error?(rc) STDOUT.puts " Error creating template: #{rc.message}\n" + + # Rollback template.delete_template if template_copy_ref + allocated_images.each do |i| i.delete end + allocated_images = [] + allocated_nets.each do |n| n.delete end + allocated_nets = [] + one_t.delete if one_t + one_t = nil else - STDOUT.puts " OpenNebula template #{one_t.id} created!\n" + STDOUT.puts "\n OpenNebula template \e[92m#{t[:name]}\e[39m with ID \e[94m#{one_t.id}\e[39m created!\n" end } } @@ -466,7 +553,15 @@ def self.import_templates(con_ops, options) puts "\n" exit 0 #Ctrl+C rescue Exception => e - STDOUT.puts " Error: #{e.message}/\n#{e.backtrace}" + STDOUT.puts "There was an error trying to import a vcenter template: #{e.message}/\n#{e.backtrace}" + + # Rollback + allocated_images.each do |i| i.delete end + allocated_images = [] + allocated_nets.each do |n| n.delete end + allocated_nets = [] + one_t.delete if one_t + template.delete_template if template_copy_ref ensure vi_client.close_connection if vi_client end @@ -492,7 +587,13 @@ def self.import_networks(con_ops, options) raise "Could not get OpenNebula VirtualNetworkPool: #{npool.message}" end - rs = dc_folder.get_unimported_networks(npool,options[:vcenter]) + # Get OpenNebula's host pool + hpool = VCenterDriver::VIHelper.one_pool(OpenNebula::HostPool, false) + if hpool.respond_to?(:message) + raise "Could not get OpenNebula HostPool: #{hpool.message}" + end + + rs = dc_folder.get_unimported_networks(npool,options[:vcenter],hpool) STDOUT.print "done!\n" @@ -510,12 +611,13 @@ def self.import_networks(con_ops, options) end tmps.each do |n| - one_cluster_id = nil if !use_defaults print_str = "\n * Network found:\n"\ - " - Name : #{n[:name]}\n"\ - " - Type : #{n[:type]}\n" - print_str << " - Cluster : #{n[:cluster]}\n" + " - Name : \e[92m#{n[:name]}\e[39m\n"\ + " - Type : #{n[:type]}\n"\ + " - Cluster : \e[96m#{n[:cluster]}\e[39m\n"\ + " - Cluster location : #{n[:cluster_location]}\n"\ + " - OpenNebula Cluster ID : #{n[:one_cluster_id]}\n" print_str << " Import this Network (y/[n])? " STDOUT.print print_str @@ -547,9 +649,10 @@ def self.import_networks(con_ops, options) if !use_defaults STDOUT.print " What type of Virtual Network"\ " do you want to create (IPv[4],IPv[6]"\ - ",[E]thernet) ?" + ",[E]thernet)?" type_answer = STDIN.gets.strip + type_answer = "e" if type_answer.empty? if ["4","6","e"].include?(type_answer.downcase) ar_type = type_answer.downcase else @@ -637,14 +740,14 @@ def self.import_networks(con_ops, options) one_vn = VCenterDriver::VIHelper.new_one_item(OpenNebula::VirtualNetwork) - rc = one_vn.allocate(n[:one]) + rc = one_vn.allocate(n[:one],n[:one_cluster_id].to_i) if ::OpenNebula.is_error?(rc) STDOUT.puts "\n Error creating virtual network: " + " #{rc.message}\n" else - STDOUT.puts "\n OpenNebula virtual network " + - "#{one_vn.id} created with size #{size}!\n" + STDOUT.puts "\n OpenNebula virtual network \e[92m#{n[:import_name]}\e[39m " + + "with ID \e[94m#{one_vn.id}\e[39m created with size #{size}!\n" end end } @@ -677,7 +780,7 @@ def self.import_datastore(con_ops, options) raise "Could not get OpenNebula DatastorePool: #{dpool.message}" end - # Get OpenNebula's host pool + # OpenNebula's HostPool hpool = VCenterDriver::VIHelper.one_pool(OpenNebula::HostPool, false) if hpool.respond_to?(:message) raise "Could not get OpenNebula HostPool: #{hpool.message}" @@ -689,7 +792,7 @@ def self.import_datastore(con_ops, options) rs.each {|dc, tmps| if !use_defaults - STDOUT.print "\nDo you want to process datacenter #{dc} (y/[n])? " + STDOUT.print "\nDo you want to process datacenter \e[95m#{dc}\e[39m (y/[n])? " next if STDIN.gets.strip.downcase != 'y' end @@ -702,29 +805,50 @@ def self.import_datastore(con_ops, options) tmps.each{ |d| if !use_defaults STDOUT.print "\n * Datastore found:\n"\ - " - Name : #{d[:name]}\n"\ - " - Total MB : #{d[:total_mb]}\n"\ - " - Free MB : #{d[:free_mb]}\n"\ - " - Cluster : #{d[:cluster]}\n"\ - " Import this as Datastore [y/n]? " + " - Name : \e[92m#{d[:simple_name]}\e[39m\n"\ + " - Total MB : #{d[:total_mb]}\n"\ + " - Free MB : #{d[:free_mb]}\n"\ + " - OpenNebula Cluster IDs: #{d[:cluster].join(',')}\n"\ + " Import this datastore [y/n]? " next if STDIN.gets.strip.downcase != 'y' - STDOUT.print "\n NOTE: For each vcenter datastore a SYSTEM and IMAGE datastore\n"\ + STDOUT.print "\n NOTE: For each vCenter datastore a SYSTEM and IMAGE datastore\n"\ " will be created in OpenNebula except for a StorageDRS which is \n"\ " represented as a SYSTEM datastore only.\n" end - ds_allocate_error = false d[:ds].each do |ds| one_d = VCenterDriver::VIHelper.new_one_item(OpenNebula::Datastore) rc = one_d.allocate(ds[:one]) if ::OpenNebula.is_error?(rc) STDOUT.puts " \n Error creating datastore: #{rc.message}" - ds_allocate_error = true else - STDOUT.puts " \n OpenNebula datastore #{one_d.id} created!\n" + # Update template with credentials + one = "" + one << "VCENTER_HOST=\"#{con_ops[:host]}\"\n" + one << "VCENTER_USER=\"#{con_ops[:user]}\"\n" + one << "VCENTER_PASSWORD=\"#{con_ops[:password]}\"\n" + + rc = one_d.update(one,true) + if ::OpenNebula.is_error?(rc) + STDOUT.puts " \n Error updating datastore: \e[91m#{rc.message}\e[39m" + else + STDOUT.puts " \n OpenNebula datastore \e[92m#{d[:name]}\e[39m with ID \e[94m#{one_d.id}\e[39m created!\n" + + # Let's add it to clusters + d[:cluster].each do |cid| + one_cluster = VCenterDriver::VIHelper.one_item(OpenNebula::Cluster, cid.to_s, false) + if ::OpenNebula.is_error?(one_cluster) + STDOUT.puts " \n Error retrieving cluster #{cid}: #{rc.message}" + end + rc = one_cluster.adddatastore(one_d.id) + if ::OpenNebula.is_error?(rc) + STDOUT.puts " \n Error adding datastore #{one_d.id} to OpenNebula cluster #{cid}: #{rc.message}. Yoy may have to place this datastore in the right cluster by hand" + end + end + end end end } @@ -758,6 +882,12 @@ def self.import_images(con_ops, ds_name, options) ds = VCenterDriver::Datastore.new_from_ref(one_ds_ref, vi_client) ds.one_item = one_ds #Store opennebula template for datastore + vc_uuid = vi_client.vim.serviceContent.about.instanceUuid + one_ds_instance_id = one_ds['TEMPLATE/VCENTER_INSTANCE_ID'] + + if one_ds_instance_id != vc_uuid + raise "Datastore is not in the same vCenter instance provided in credentials" + end images = ds.get_images @@ -770,6 +900,7 @@ def self.import_images(con_ops, ds_name, options) " - Name : #{i[:name]}\n"\ " - Path : #{i[:path]}\n"\ " - Type : #{i[:type]}\n"\ + " - Size (MB) : #{i[:size]}\n"\ " Import this Image (y/[n])? " next if STDIN.gets.strip.downcase != 'y' diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/network.rb b/src/vmm_mad/remotes/lib/vcenter_driver/network.rb index cbc1af160b..33a599c2f6 100644 --- a/src/vmm_mad/remotes/lib/vcenter_driver/network.rb +++ b/src/vmm_mad/remotes/lib/vcenter_driver/network.rb @@ -1,5 +1,5 @@ module VCenterDriver - +require 'digest' class NetworkFolder attr_accessor :item, :items @@ -70,38 +70,53 @@ class Network def self.to_one_template(network_name, network_ref, network_type, ccr_ref, ccr_name, vcenter_uuid, - vcenter_instance_name, dc_name, - unmanaged=false, template_ref=nil) + vcenter_instance_name, dc_name, cluster_id, + cluster_location, + unmanaged=nil, template_ref=nil, dc_ref=nil, + vm_or_template_name=nil, template_id=nil) one_tmp = {} if unmanaged - network_import_name = "#{network_name} [#{network_ref} - #{template_ref} - #{vcenter_uuid}]" + if unmanaged == "wild" + network_import_name = "#{network_name} [VM #{vm_or_template_name}]" + end + + if unmanaged == "template" + network_import_name = "#{network_name} [#{vm_or_template_name} - Template #{template_id}]" + end else - network_import_name = "[#{vcenter_instance_name} - #{dc_name}] #{network_name} - #{ccr_name.tr(" ", "_")}" + full_name = "#{network_name} - #{ccr_name.tr(" ", "_")} [#{vcenter_instance_name} - #{dc_name}]_#{cluster_location}" + sha256 = Digest::SHA256.new + network_hash = sha256.hexdigest(full_name)[0..11] + network_import_name = "#{network_name} - #{ccr_name.tr(" ", "_")} [#{vcenter_instance_name} - #{dc_name}]_#{network_hash}" end - one_tmp[:name] = network_import_name + one_tmp[:name] = network_name + one_tmp[:import_name] = network_import_name one_tmp[:bridge] = network_name one_tmp[:type] = network_type one_tmp[:cluster] = ccr_name + one_tmp[:cluster_location] = cluster_location one_tmp[:vcenter_ccr_ref] = ccr_ref + one_tmp[:one_cluster_id] = cluster_id one_tmp[:one] = to_one(network_import_name, network_name, network_ref, network_type, - ccr_ref, vcenter_uuid, unmanaged, template_ref) + ccr_ref, vcenter_uuid, unmanaged, template_ref, dc_ref) return one_tmp end def self.to_one(network_import_name, network_name, network_ref, network_type, - ccr_ref, vcenter_uuid, unmanaged, template_ref) + ccr_ref, vcenter_uuid, unmanaged, template_ref, dc_ref) + template = "NAME=\"#{network_import_name}\"\n"\ "BRIDGE=\"#{network_name}\"\n"\ "VN_MAD=\"dummy\"\n"\ "VCENTER_PORTGROUP_TYPE=\"#{network_type}\"\n"\ "VCENTER_NET_REF=\"#{network_ref}\"\n"\ - "VCENTER_CCR_REF=\"#{ccr_ref}\"\n"\ "VCENTER_INSTANCE_ID=\"#{vcenter_uuid}\"\n" - template += "OPENNEBULA_MANAGED=\"NO\"\n" if unmanaged + template += "VCENTER_CCR_REF=\"#{ccr_ref}\"\n" if !unmanaged + template += "OPENNEBULA_MANAGED=\"NO\"\n" if unmanaged template += "VCENTER_TEMPLATE_REF=\"#{template_ref}\"\n" if template_ref return template @@ -115,7 +130,7 @@ class Network end end - def self.get_unmanaged_vnet_by_ref(ref, ccr_ref, template_ref, vcenter_uuid, pool = nil) + def self.get_unmanaged_vnet_by_ref(ref, template_ref, vcenter_uuid, pool = nil) if pool.nil? pool = VCenterDriver::VIHelper.one_pool(OpenNebula::VirtualNetworkPool, false) if pool.respond_to?(:message) @@ -124,7 +139,6 @@ class Network end element = pool.select do |e| e["TEMPLATE/VCENTER_NET_REF"] == ref && - e["TEMPLATE/VCENTER_CCR_REF"] == ccr_ref && e["TEMPLATE/VCENTER_INSTANCE_ID"] == vcenter_uuid && e["TEMPLATE/VCENTER_TEMPLATE_REF"] == template_ref && e["TEMPLATE/OPENNEBULA_MANAGED"] == "NO" @@ -143,46 +157,21 @@ class Network one_vnet.info end - def self.vcenter_networks_to_be_removed(device_change_nics, vcenter_uuid) - - networks = {} - npool = VCenterDriver::VIHelper.one_pool(OpenNebula::VirtualNetworkPool, false) - if npool.respond_to?(:message) - raise "Could not get OpenNebula VirtualNetworkPool: #{npool.message}" - end - - device_change_nics.each do |nic| - if nic[:operation] == :remove - vnet_ref = nil - - # Port group - if nic[:device].backing.respond_to?(:network) - vnet_ref = nic[:device].backing.network._ref - end - - # Distributed port group - if nic[:device].backing.respond_to?(:port) && - nic[:device].backing.port.respond_to?(:portgroupKey) - vnet_ref = nic[:device].backing.port.portgroupKey - end - - # Find vnet_ref in OpenNebula's pool of networks - one_network = VCenterDriver::VIHelper.find_by_ref(OpenNebula::VirtualNetworkPool, - "TEMPLATE/VCENTER_NET_REF", - vnet_ref, - vcenter_uuid, - npool) - next if !one_network - - # Add pg or dpg name that are in vcenter but not in - # OpenNebula's VM to a hash for later removal - if one_network["VN_MAD"] == "vcenter" && !networks.key?(one_network["BRIDGE"]) - networks[one_network["BRIDGE"]] = one_network - end + def self.get_unmanaged_vnet_by_ref(ref, template_ref, vcenter_uuid, pool = nil) + if pool.nil? + pool = VCenterDriver::VIHelper.one_pool(OpenNebula::VirtualNetworkPool, false) + if pool.respond_to?(:message) + raise "Could not get OpenNebula VirtualNetworkPool: #{pool.message}" end end + element = pool.select do |e| + e["TEMPLATE/VCENTER_NET_REF"] == ref && + e["TEMPLATE/VCENTER_INSTANCE_ID"] == vcenter_uuid && + e["TEMPLATE/VCENTER_TEMPLATE_REF"] == template_ref && + e["TEMPLATE/OPENNEBULA_MANAGED"] == "NO" + end.first rescue nil - networks + return element end # This is never cached diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/vi_client.rb b/src/vmm_mad/remotes/lib/vcenter_driver/vi_client.rb index 3735ef3928..c18aec0e3c 100644 --- a/src/vmm_mad/remotes/lib/vcenter_driver/vi_client.rb +++ b/src/vmm_mad/remotes/lib/vcenter_driver/vi_client.rb @@ -87,36 +87,73 @@ class VIClient end def self.new_from_host(host_id) - client = OpenNebula::Client.new - host = OpenNebula::Host.new_with_id(host_id, client) - rc = host.info - if OpenNebula.is_error?(rc) - puts rc.message - exit -1 + begin + client = OpenNebula::Client.new + host = OpenNebula::Host.new_with_id(host_id, client) + rc = host.info + if OpenNebula.is_error?(rc) + raise "Could not get host info for ID: #{host_id} - #{rc.message}" + end + + password = host["TEMPLATE/VCENTER_PASSWORD"] + + system = OpenNebula::System.new(client) + config = system.get_configuration + if OpenNebula.is_error?(config) + raise "Error getting oned configuration : #{config.message}" + end + + token = config["ONE_KEY"] + + password = VIClient::decrypt(password, token) + + connection = { + :host => host["TEMPLATE/VCENTER_HOST"], + :user => host["TEMPLATE/VCENTER_USER"], + :rp => host["TEMPLATE/VCENTER_RESOURCE_POOL"], + :ccr => host["TEMPLATE/VCENTER_CCR_REF"], + :password => password + } + + self.new(connection) + + rescue Exception => e + raise e end + end - password = host["TEMPLATE/VCENTER_PASSWORD"] + def self.new_from_datastore(datastore_id) + begin + client = OpenNebula::Client.new + datastore = OpenNebula::Datastore.new_with_id(datastore_id, client) + rc = datastore.info + if OpenNebula.is_error?(rc) + raise "Could not get datastore info for ID: #{datastore_id} - #{rc.message}" + end - system = OpenNebula::System.new(client) - config = system.get_configuration - if OpenNebula.is_error?(config) - puts "Error getting oned configuration : #{config.message}" - exit -1 + password = datastore["TEMPLATE/VCENTER_PASSWORD"] + + system = OpenNebula::System.new(client) + config = system.get_configuration + if OpenNebula.is_error?(config) + raise "Error getting oned configuration : #{config.message}" + end + + token = config["ONE_KEY"] + + password = VIClient::decrypt(password, token) + + connection = { + :host => datastore["TEMPLATE/VCENTER_HOST"], + :user => datastore["TEMPLATE/VCENTER_USER"], + :password => password + } + + self.new(connection) + + rescue Exception => e + raise e end - - token = config["ONE_KEY"] - - password = VIClient::decrypt(password, token) - - connection = { - :host => host["TEMPLATE/VCENTER_HOST"], - :user => host["TEMPLATE/VCENTER_USER"], - :rp => host["TEMPLATE/VCENTER_RESOURCE_POOL"], - :ccr => host["TEMPLATE/VCENTER_CCR_REF"], - :password => password - } - - self.new(connection) end def self.decrypt(msg, token) @@ -132,8 +169,7 @@ class VIClient msg = cipher.update(Base64::decode64(msg)) msg << cipher.final rescue - puts "Error decrypting secret." - exit -1 + raise "Error decrypting secret." end end diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/vi_helper.rb b/src/vmm_mad/remotes/lib/vcenter_driver/vi_helper.rb index 537e334422..615a44a1f9 100644 --- a/src/vmm_mad/remotes/lib/vcenter_driver/vi_helper.rb +++ b/src/vmm_mad/remotes/lib/vcenter_driver/vi_helper.rb @@ -4,6 +4,7 @@ class VIHelper ETC_LOCATION = "/etc/one/" if !defined?(ETC_LOCATION) VCENTER_DRIVER_DEFAULT = "#{ETC_LOCATION}/vcenter_driver.default" + VM_PREFIX_DEFAULT = "one-$i-" def self.client @@client ||= OpenNebula::Client.new @@ -58,6 +59,14 @@ class VIHelper return element end + def self.find_image_by_path(the_class, path, ds_id, pool = nil) + pool = one_pool(the_class, false) if pool.nil? + element = pool.select{|e| + e["PATH"] == path && + e["DATASTORE_ID"] == ds_id}.first rescue nil + return element + end + def self.find_persistent_image_by_source(source, pool) element = pool.select{|e| e["SOURCE"] == source && diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb b/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb index 90a6ce0b03..e9688e2ec7 100644 --- a/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb +++ b/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb @@ -1,5 +1,5 @@ module VCenterDriver - +require 'digest' class VirtualMachineFolder attr_accessor :item, :items @@ -215,14 +215,17 @@ class Template end end - def import_vcenter_disks(vc_uuid, dpool, ipool) + def import_vcenter_disks(vc_uuid, dpool, ipool, sunstone=false, template_id=nil) disk_info = "" error = "" + sunstone_disk_info = [] begin lock #Lock import operation, to avoid concurrent creation of images - ccr_ref = self["runtime.host.parent._ref"] + ##ccr_ref = self["runtime.host.parent._ref"] + dc = get_dc + dc_ref = dc.item._ref #Get disks and info required vc_disks = get_vcenter_disks @@ -231,8 +234,8 @@ class Template allocated_images = [] vc_disks.each do |disk| - datastore_found = VCenterDriver::Storage.get_one_image_ds_by_ref_and_ccr(disk[:datastore]._ref, - ccr_ref, + datastore_found = VCenterDriver::Storage.get_one_image_ds_by_ref_and_dc(disk[:datastore]._ref, + dc_ref, vc_uuid, dpool) if datastore_found.nil? @@ -250,63 +253,86 @@ class Template disk[:path], disk[:type], disk[:prefix], - ipool) + ipool, + template_id) #Image is already in the datastore if image_import[:one] # This is the disk info - disk_info << "DISK=[\n" - disk_info << "IMAGE_ID=\"#{image_import[:one]["ID"]}\",\n" - disk_info << "OPENNEBULA_MANAGED=\"NO\"\n" - disk_info << "]\n" - elsif !image_import[:template].empty? - # Then the image is created as it's not in the datastore - one_i = VCenterDriver::VIHelper.new_one_item(OpenNebula::Image) - - allocated_images << one_i - - rc = one_i.allocate(image_import[:template], datastore_found['ID'].to_i) - - if ::OpenNebula.is_error?(rc) - error = " Error creating disk from template: #{rc.message}. Cannot import the template\n" - - #Rollback delete disk images - allocated_images.each do |i| - i.delete - end - - break + disk_tmp = "" + disk_tmp << "DISK=[\n" + disk_tmp << "IMAGE_ID=\"#{image_import[:one]["ID"]}\",\n" + disk_tmp << "OPENNEBULA_MANAGED=\"NO\"\n" + disk_tmp << "]\n" + if sunstone + sunstone_disk = {} + sunstone_disk[:type] = "EXISTING_DISK" + sunstone_disk[:image_tmpl] = disk_tmp + sunstone_disk_info << sunstone_disk + else + disk_info << disk_tmp end - #Add info for One template - one_i.info - disk_info << "DISK=[\n" - disk_info << "IMAGE_ID=\"#{one_i["ID"]}\",\n" - disk_info << "IMAGE_UNAME=\"#{one_i["UNAME"]}\",\n" - disk_info << "OPENNEBULA_MANAGED=\"NO\"\n" - disk_info << "]\n" + elsif !image_import[:template].empty? + + if sunstone + sunstone_disk = {} + sunstone_disk[:type] = "NEW_DISK" + sunstone_disk[:image_tmpl] = image_import[:template] + sunstone_disk[:ds_id] = datastore_found['ID'].to_i + sunstone_disk_info << sunstone_disk + else + # Then the image is created as it's not in the datastore + one_i = VCenterDriver::VIHelper.new_one_item(OpenNebula::Image) + allocated_images << one_i + rc = one_i.allocate(image_import[:template], datastore_found['ID'].to_i) + + if OpenNebula.is_error?(rc) + error = " Error creating disk from template: #{rc.message}\n" + break + end + + #Add info for One template + one_i.info + disk_info << "DISK=[\n" + disk_info << "IMAGE_ID=\"#{one_i["ID"]}\",\n" + disk_info << "IMAGE_UNAME=\"#{one_i["UNAME"]}\",\n" + disk_info << "OPENNEBULA_MANAGED=\"NO\"\n" + disk_info << "]\n" + end end end rescue Exception => e - error = "There was an error trying to create an image for disk in vcenter template. Reason: #{e.message}" + error = "\n There was an error trying to create an image for disk in vcenter template. Reason: #{e.message}\n#{e.backtrace}" ensure unlock + if !error.empty? + #Rollback delete disk images + allocated_images.each do |i| + i.delete + end + end end - return error, disk_info + return error, sunstone_disk_info, allocated_images if sunstone + + return error, disk_info, allocated_images if !sunstone + end - def import_vcenter_nics(vc_uuid, npool, vcenter_instance_name, - template_ref, dc_name=nil) + def import_vcenter_nics(vc_uuid, npool, hpool, vcenter_instance_name, + template_ref, wild, sunstone=false, vm_name=nil, vm_id=nil, dc_name=nil) nic_info = "" error = "" + sunstone_nic_info = [] begin - lock #Lock import operation, to avoid concurrent creation of images + lock #Lock import operation, to avoid concurrent creation of networks if !dc_name dc = get_dc dc_name = dc.item.name + dc_ref = dc.item._ref end ccr_ref = self["runtime.host.parent._ref"] @@ -315,30 +341,63 @@ class Template #Get disks and info required vc_nics = get_vcenter_nics - # Track allocated networks + # Track allocated networks for rollback allocated_networks = [] vc_nics.each do |nic| + # Check if the network already exists network_found = VCenterDriver::Network.get_unmanaged_vnet_by_ref(nic[:net_ref], - ccr_ref, template_ref, vc_uuid, npool) - #Network is already in the datastore + #Network is already in OpenNebula if network_found + # This is the existing nic info - nic_info << "NIC=[\n" - nic_info << "NETWORK_ID=\"#{network_found["ID"]}\",\n" - nic_info << "OPENNEBULA_MANAGED=\"NO\"\n" - nic_info << "]\n" + nic_tmp = "" + nic_tmp << "NIC=[\n" + nic_tmp << "NETWORK_ID=\"#{network_found["ID"]}\",\n" + nic_tmp << "OPENNEBULA_MANAGED=\"NO\"\n" + nic_tmp << "]\n" + + if sunstone + sunstone_nic = {} + sunstone_nic[:type] = "EXISTING_NIC" + sunstone_nic[:network_tmpl] = nic_tmp + sunstone_nic_info << sunstone_nic + else + nic_info << nic_tmp + end else # Then the network has to be created as it's not in OpenNebula one_vn = VCenterDriver::VIHelper.new_one_item(OpenNebula::VirtualNetwork) - allocated_networks << one_vn - + # We're importing unmanaged nics unmanaged = true + # Let's get the OpenNebula host associated to the cluster reference + one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool, + "TEMPLATE/VCENTER_CCR_REF", + ccr_ref, + vc_uuid, + hpool) + + # Let's get the CLUSTER_ID from the OpenNebula host + if !one_host || !one_host['CLUSTER_ID'] + cluster_id = -1 + else + cluster_id = one_host['CLUSTER_ID'] + end + + # We have to know if we're importing nics from a wild vm + # or from a template + if wild + unmanaged = "wild" + else + unmanaged = "template" + end + + # Prepare the Virtual Network template one_vnet = VCenterDriver::Network.to_one_template(nic[:net_name], nic[:net_ref], nic[:pg_type], @@ -347,8 +406,13 @@ class Template vc_uuid, vcenter_instance_name, dc_name, + cluster_id, + nil, unmanaged, - template_ref) + template_ref, + dc_ref, + vm_name, + vm_id) # By default add an ethernet range to network size 255 ar_str = "" @@ -358,35 +422,47 @@ class Template ar_str << "]\n" one_vnet[:one] << ar_str - rc = one_vn.allocate(one_vnet[:one]) + if sunstone + sunstone_nic = {} + sunstone_nic[:type] = "NEW_NIC" + sunstone_nic[:network_tmpl] = one_vnet[:one] + sunstone_nic[:one_cluster_id] = cluster_id.to_i + sunstone_nic_info << sunstone_nic + else + # Allocate the Virtual Network + allocated_networks << one_vn + rc = one_vn.allocate(one_vnet[:one], cluster_id.to_i) - if ::OpenNebula.is_error?(rc) - error = " Error creating virtual network from template: #{rc.message}. Cannot import the template\n" - - #Rollback, delete virtual networks - allocated_networks.each do |n| - n.delete + if OpenNebula.is_error?(rc) + error = "\n ERROR: Could not allocate virtual network due to #{rc.message}\n" + break end - break + # Add info for One template + one_vn.info + nic_info << "NIC=[\n" + nic_info << "NETWORK_ID=\"#{one_vn["ID"]}\",\n" + nic_info << "OPENNEBULA_MANAGED=\"NO\"\n" + nic_info << "]\n" end - - #Add info for One template - one_vn.info - nic_info << "NIC=[\n" - nic_info << "NETWORK_ID=\"#{one_vn["ID"]}\",\n" - nic_info << "OPENNEBULA_MANAGED=\"NO\"\n" - nic_info << "]\n" end end rescue Exception => e - error = "There was an error trying to create a virtual network for network in vcenter template. Reason: #{e.message}" + error = "\n There was an error trying to create a virtual network for network in vcenter template. Reason: #{e.message}" ensure unlock + #Rollback, delete virtual networks + if !error.empty? + allocated_networks.each do |n| + n.delete + end + end end - return error, nic_info + return error, nic_info, allocated_networks if !sunstone + + return error, sunstone_nic_info, allocated_networks if sunstone end def get_vcenter_disk_key(unit_number, controller_key) @@ -492,27 +568,6 @@ class Template self['runtime.host.parent.resourcePool'] end - def to_one_template(template, cluster_ref, cluster_name, has_nics_and_disks, rp, rp_list, vcenter_uuid, vcenter_instance_name, dc_name) - - template_ref = template['_ref'] - template_name = template["name"] - - one_tmp = {} - one_tmp[:name] = "[#{vcenter_instance_name} - #{dc_name}] #{template_name} - #{cluster_name}" - one_tmp[:template_name] = template_name - one_tmp[:vcenter_ccr_ref] = cluster_ref - one_tmp[:vcenter_ref] = template_ref - one_tmp[:vcenter_instance_uuid] = vcenter_uuid - one_tmp[:cluster_name] = cluster_name - one_tmp[:rp] = rp - one_tmp[:rp_list] = rp_list - one_tmp[:template] = template - one_tmp[:import_disks_and_nics] = has_nics_and_disks - - one_tmp[:one] = to_one(true, vcenter_uuid, cluster_ref, cluster_name, vcenter_instance_name, dc_name) - return one_tmp - end - def vm_to_one(vm_name) str = "NAME = \"#{vm_name}\"\n"\ @@ -581,34 +636,11 @@ class Template return str end - def to_one(template=false, vc_uuid=nil, ccr_ref=nil, ccr_name=nil, vcenter_instance_name, dc_name) + def self.template_to_one(template, vc_uuid, ccr_ref, ccr_name, import_name, host_id) - if !ccr_ref && !ccr_name - cluster = @item["runtime.host"].parent - ccr_name = cluster.name - ccr_ref = cluster._ref - end + num_cpu, memory, annotation, guest_fullname = template.item.collect("config.hardware.numCPU","config.hardware.memoryMB","config.annotation","guest.guestFullName") - vc_uuid = self["_connection.serviceContent.about.instanceUuid"] if !vc_uuid - - # Get info of the host where the VM/template is located - host_id = nil - one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool, - "TEMPLATE/VCENTER_CCR_REF", - ccr_ref, - vc_uuid) - - num_cpu, memory, extraconfig, annotation, guest_fullname = @item.collect("config.hardware.numCPU","config.hardware.memoryMB","config.extraConfig","config.annotation","guest.guestFullName") - host_id = one_host["ID"] if one_host - - name = "" - if template - name << "[#{vcenter_instance_name} - #{dc_name}] #{self["name"]} - #{ccr_name.tr(" ", "_")}" - else - name << "#{self["name"]} - #{ccr_name.tr(" ", "_")}" - end - - str = "NAME = \"#{name}\"\n"\ + str = "NAME = \"#{import_name}\"\n"\ "CPU = \"#{num_cpu}\"\n"\ "vCPU = \"#{num_cpu}\"\n"\ "MEMORY = \"#{memory}\"\n"\ @@ -620,40 +652,13 @@ class Template "]\n"\ "VCENTER_INSTANCE_ID =\"#{vc_uuid}\"\n" - if !template - str << "IMPORT_VM_ID =\"#{self["_ref"]}\"\n" - str << "IMPORT_STATE =\"#{@state}\"\n" - end + str << "VCENTER_TEMPLATE_REF =\"#{template["_ref"]}\"\n" + str << "VCENTER_CCR_REF =\"#{ccr_ref}\"\n" - if template - str << "VCENTER_TEMPLATE_REF =\"#{self['_ref']}\"\n" - str << "VCENTER_CCR_REF =\"#{ccr_ref}\"\n" - end - - vnc_port = nil - keymap = nil - - if !template - extraconfig.select do |xtra| - - if xtra[:key].downcase=="remotedisplay.vnc.port" - vnc_port = xtra[:value] - end - - if xtra[:key].downcase=="remotedisplay.vnc.keymap" - keymap = xtra[:value] - end - end - end - - if !extraconfig.empty? - str << "GRAPHICS = [\n"\ - " TYPE =\"vnc\",\n" - str << " PORT =\"#{vnc_port}\",\n" if vnc_port - str << " KEYMAP =\"#{keymap}\",\n" if keymap - str << " LISTEN =\"0.0.0.0\"\n" - str << "]\n" - end + str << "GRAPHICS = [\n"\ + " TYPE =\"vnc\",\n" + str << " LISTEN =\"0.0.0.0\"\n" + str << "]\n" if annotation.nil? || annotation.empty? str << "DESCRIPTION = \"vCenter Template imported by OpenNebula" \ @@ -686,28 +691,22 @@ class Template def self.get_xml_template(template, vcenter_uuid, vi_client, vcenter_instance_name=nil, dc_name=nil, rp_cache={}) begin - template_ccr = template['runtime.host.parent'] - template_ccr_ref = template_ccr._ref - template_ccr_name =template_ccr.name + template_ref = template['_ref'] + template_name = template["name"] + template_ccr = template['runtime.host.parent'] + template_ccr_ref = template_ccr._ref + template_ccr_name = template_ccr.name + # Set vcenter instance name vcenter_instance_name = vi_client.vim.host if !vcenter_instance_name + # Get datacenter info if !dc_name dc = get_dc dc_name = dc.item.name end - # Check if template has nics or disks to be imported later - has_nics_and_disks = true - ##template["config.hardware.device"].each do |device| - ## if VCenterDriver::Storage.is_disk_or_iso?(device) || - ## VCenterDriver::Network.is_nic?(device) - ## has_nics_and_disks = true - ## break - ## end - ##end - - #Get resource pools + #Get resource pools and generate a list if !rp_cache[template_ccr_name] tmp_cluster = VCenterDriver::ClusterComputeResource.new_from_ref(template_ccr_ref, vi_client) rp_list = tmp_cluster.get_resource_pool_list @@ -728,18 +727,58 @@ class Template rp = rp_cache[template_ccr_name][:rp] rp_list = rp_cache[template_ccr_name][:rp_list] - object = template.to_one_template(template, - template_ccr_ref, - template_ccr_name, - has_nics_and_disks, - rp, - rp_list, - vcenter_uuid, - vcenter_instance_name, - dc_name) - return object + # Determine the location path for the template + vcenter_template = VCenterDriver::VirtualMachine.new_from_ref(template_ref, vi_client) + item = vcenter_template.item + folders = [] + while !item.instance_of? RbVmomi::VIM::Datacenter + item = item.parent + if !item.instance_of? RbVmomi::VIM::Datacenter + folders << item.name if item.name != "vm" + end + raise "Could not find the templates parent location" if item.nil? + end + location = folders.reverse.join("/") + location = "/" if location.empty? + # Generate a crypto hash for the template name and take the first 12 chars + sha256 = Digest::SHA256.new + full_name = "#{template_name} - #{template_ccr_name} [#{vcenter_instance_name} - #{dc_name}]_#{location}" + template_hash = sha256.hexdigest(full_name)[0..11] + template_name = template_name.tr("\u007F", "") + template_ccr_name = template_ccr_name.tr("\u007F", "") + import_name = "#{template_name} - #{template_ccr_name} [#{vcenter_instance_name} - #{dc_name}]_#{template_hash}" + + # Prepare the Hash that will be used by importers to display + # the object being imported + one_tmp = {} + one_tmp[:name] = import_name + one_tmp[:template_name] = template_name + one_tmp[:sunstone_template_name]= "#{template_name} [ Cluster: #{template_ccr_name} - Template location: #{location} ]" + one_tmp[:template_hash] = template_hash + one_tmp[:template_location] = location + one_tmp[:vcenter_ccr_ref] = template_ccr_ref + one_tmp[:vcenter_ref] = template_ref + one_tmp[:vcenter_instance_uuid] = vcenter_uuid + one_tmp[:cluster_name] = template_ccr_name + one_tmp[:rp] = rp + one_tmp[:rp_list] = rp_list + one_tmp[:template] = template + one_tmp[:import_disks_and_nics] = true # By default we import disks and nics + + # Get the host ID of the OpenNebula host which represents the vCenter Cluster + host_id = nil + one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool, + "TEMPLATE/VCENTER_CCR_REF", + template_ccr_ref, + vcenter_uuid) + host_id = one_host["ID"] + raise "Could not find the host's ID associated to template being imported" if !host_id + + # Get the OpenNebula's template hash + one_tmp[:one] = template_to_one(template, vcenter_uuid, template_ccr_ref, template_ccr_name, import_name, host_id) + return one_tmp rescue return nil end @@ -930,7 +969,6 @@ class VirtualMachine < Template # @return RbVmomi::VIM::Datastore or nil def get_ds ##req_ds = one_item['USER_TEMPLATE/VCENTER_DS_REF'] - ##TODO SCHED_DS_REQUIREMENTS?? current_ds_id = one_item["HISTORY_RECORDS/HISTORY[last()]/DS_ID"] current_ds = VCenterDriver::VIHelper.one_item(OpenNebula::Datastore, current_ds_id) current_ds_ref = current_ds['TEMPLATE/VCENTER_DS_REF'] @@ -1424,16 +1462,9 @@ class VirtualMachine < Template :extraConfig => extraconfig } - # prepare pg and sw for vcenter nics if any - configure_vcenter_network - # device_change hash (nics) device_change += device_change_nics - # track pg or dpg in case they must be removed - vcenter_uuid = get_vcenter_instance_uuid - networks = VCenterDriver::Network.vcenter_networks_to_be_removed(device_change_nics, vcenter_uuid) - # Now attach disks that are in OpenNebula's template but not in vcenter # e.g those that has been attached in poweroff device_change_ds, device_change_spod, device_change_spod_ids = device_attach_disks(disks_in_onevm_vector, vc_disks) @@ -1448,9 +1479,6 @@ class VirtualMachine < Template spec_hash[:deviceChange] = device_change spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash) @item.ReconfigVM_Task(:spec => spec).wait_for_completion - - #Remove switch and pg if NICs detached in poweroff - remove_poweroff_detached_vcenter_nets(networks) if !networks.empty? end def extraconfig_context @@ -1546,7 +1574,6 @@ class VirtualMachine < Template # Returns an array of actions to be included in :deviceChange def calculate_add_nic_spec(nic) - #TODO include VCENTER_NET_MODEL usage it should be in one_item mac = nic["MAC"] pg_name = nic["BRIDGE"] model = nic["VCENTER_NET_MODEL"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/MODEL") @@ -1554,15 +1581,15 @@ class VirtualMachine < Template backing = nil limit_in = nic["INBOUND_PEAK_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/INBOUND_PEAK_BW") - limit_out = nic["OUTBOUND_PEAK_BW"] + limit_out = nic["OUTBOUND_PEAK_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/OUTBOUND_PEAK_BW") limit = nil if limit_in && limit_out limit=([limit_in.to_i, limit_out.to_i].min / 1024) * 8 end - rsrv_in = nic["INBOUND_AVG_BW"] - rsrv_out = nic["OUTBOUND_AVG_BW"] + rsrv_in = nic["INBOUND_AVG_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/INBOUND_AVG_BW") + rsrv_out = nic["OUTBOUND_AVG_BW"] || VCenterDriver::VIHelper.get_default("VM/TEMPLATE/NIC/OUTBOUND_AVG_BW") rsrv = nil if rsrv_in || rsrv_out @@ -1644,207 +1671,6 @@ class VirtualMachine < Template } end - def vcenter_standard_network(nic, esx_host, vcenter_uuid) - pg_name = nic["BRIDGE"] - switch_name = nic["VCENTER_SWITCH_NAME"] - pnics = nic["PHYDEV"] || nil - mtu = nic["MTU"] || 1500 - vlan_id = nic["VLAN_ID"] || nic["AUTOMATIC_VLAN_ID"] || 0 - num_ports = nic["VCENTER_SWITCH_NPORTS"] || 128 - - begin - esx_host.lock # Exclusive lock for ESX host operation - - pnics_available = nil - pnics_available = esx_host.get_available_pnics if pnics - - # Get port group if it exists - pg = esx_host.pg_exists(pg_name) - - # Disallow changes of switch name for existing pg - if pg && esx_host.pg_changes_sw?(pg, switch_name) - raise "The port group's switch name can not be modified"\ - " for OpenNebula's virtual network, please revert"\ - " it back in its definition and create a different"\ - " virtual network instead." - end - - if !pg - # Get standard switch if it exists - vs = esx_host.vss_exists(switch_name) - - if !vs - switch_name = esx_host.create_vss(switch_name, pnics, num_ports, mtu, pnics_available) - else - #Update switch - esx_host.update_vss(vs, switch_name, pnics, num_ports, mtu) - end - - vnet_ref = esx_host.create_pg(pg_name, switch_name, vlan_id) - - # We must update XML so the VCENTER_NET_REF is set - one_vnet = VCenterDriver::VIHelper.one_item(OpenNebula::VirtualNetwork, nic["NETWORK_ID"]) - one_vnet.delete_element("TEMPLATE/VCENTER_NET_REF") if one_vnet["TEMPLATE/VCENTER_NET_REF"] - one_vnet.delete_element("TEMPLATE/VCENTER_INSTANCE_ID") if one_vnet["TEMPLATE/VCENTER_INSTANCE_ID"] - rc = one_vnet.update("VCENTER_NET_REF = \"#{vnet_ref}\"\n"\ - "VCENTER_INSTANCE_ID = \"#{vcenter_uuid}\"", true) - if OpenNebula.is_error?(rc) - raise "Could not update VCENTER_NET_REF for virtual network" - end - one_vnet.info - - else - # pg exist, update - esx_host.update_pg(pg, switch_name, vlan_id) - - # update switch if needed - vs = esx_host.vss_exists(switch_name) - esx_host.update_vss(vs, switch_name, pnics, num_ports, mtu) if vs - end - - rescue Exception => e - esx_host.network_rollback - raise e - ensure - esx_host.unlock if esx_host # Remove lock - end - end - - def vcenter_distributed_network(nic, esx_host, vcenter_uuid, dc, net_folder) - pg_name = nic["BRIDGE"] - switch_name = nic["VCENTER_SWITCH_NAME"] - pnics = nic["PHYDEV"] || nil - mtu = nic["MTU"] || 1500 - vlan_id = nic["VLAN_ID"] || nic["AUTOMATIC_VLAN_ID"] || 0 - num_ports = nic["VCENTER_SWITCH_NPORTS"] || 8 - - begin - # Get distributed port group if it exists - dpg = dc.dpg_exists(pg_name, net_folder) - - # Disallow changes of switch name for existing pg - if dpg && dc.pg_changes_sw?(dpg, switch_name) - raise "The port group's switch name can not be modified"\ - " for OpenNebula's virtual network, please revert"\ - " it back in its definition and create a different"\ - " virtual network instead." - end - - if !dpg - # Get distributed virtual switch if it exists - dvs = dc.dvs_exists(switch_name, net_folder) - - if !dvs - dvs = dc.create_dvs(switch_name, pnics, mtu) - else - #Update switch - dc.update_dvs(dvs, pnics, mtu) - end - - vnet_ref = dc.create_dpg(dvs, pg_name, vlan_id, num_ports) - - # We must connect portgroup to current host - begin - esx_host.lock - - pnics_available = nil - pnics_available = esx_host.get_available_pnics if pnics - - proxy_switch = esx_host.proxy_switch_exists(switch_name) - - esx_host.assign_proxy_switch(dvs, switch_name, pnics, pnics_available) - - rescue Exception => e - raise e - ensure - esx_host.unlock if esx_host # Remove lock - end - - # We must update XML so the VCENTER_NET_REF is set - one_vnet = VCenterDriver::VIHelper.one_item(OpenNebula::VirtualNetwork, nic["NETWORK_ID"]) - one_vnet.delete_element("TEMPLATE/VCENTER_NET_REF") if one_vnet["TEMPLATE/VCENTER_NET_REF"] - one_vnet.delete_element("TEMPLATE/VCENTER_INSTANCE_ID") if one_vnet["TEMPLATE/VCENTER_INSTANCE_ID"] - rc = one_vnet.update("VCENTER_NET_REF = \"#{vnet_ref}\"\n"\ - "VCENTER_INSTANCE_ID = \"#{vcenter_uuid}\"", true) - if OpenNebula.is_error?(rc) - raise "Could not update VCENTER_NET_REF for virtual network" - end - one_vnet.info - else - # pg exist, dpg update - dc.update_dpg(dpg, vlan_id, num_ports) - - # update switch if needed - dvs = dc.dvs_exists(switch_name, net_folder) - dc.update_dvs(dvs, pnics, mtu) if dvs - - # We must connect or update portgroup to current host (proxyswitch) - begin - esx_host.lock - - pnics_available = nil - pnics_available = esx_host.get_available_pnics if pnics - - proxy_switch = esx_host.proxy_switch_exists(switch_name) - esx_host.assign_proxy_switch(dvs, switch_name, pnics, pnics_available) - - rescue Exception => e - raise e - ensure - esx_host.unlock if esx_host # Remove lock - end - end - - rescue Exception => e - dc.network_rollback - raise e - end - - end - - def configure_vcenter_network(nic_xml=nil) - nics = [] - if nic_xml - nics << nic_xml - else - nics = one_item.retrieve_xmlelements("TEMPLATE/NIC[VN_MAD=\"vcenter\"]") - end - - return if nics.empty? - - vcenter_uuid = get_vcenter_instance_uuid - esx_host = VCenterDriver::ESXHost.new_from_ref(self['runtime'].host._ref, vi_client) - - nics.each do |nic| - - if nic["VCENTER_INSTANCE_ID"] && nic["VCENTER_INSTANCE_ID"] != vcenter_uuid - raise "The virtual network is not assigned to the right vcenter server, create a different virtual network instead" - end - - if nic["VCENTER_PORTGROUP_TYPE"] == "Port Group" - vcenter_standard_network(nic, esx_host, vcenter_uuid) - end - - if nic["VCENTER_PORTGROUP_TYPE"] == "Distributed Port Group" - dc = cluster.get_dc # Get datacenter - begin - dc.lock - - # Explore network folder in search of dpg and dvs - net_folder = dc.network_folder - net_folder.fetch! - - vcenter_distributed_network(nic, esx_host, vcenter_uuid, dc, net_folder) - rescue Exception => e - #TODO rollback - raise e - ensure - dc.unlock if dc - end - end - end - end - # Add NIC to VM def attach_nic spec_hash = {} @@ -1854,9 +1680,6 @@ class VirtualMachine < Template nic = one_item.retrieve_xmlelements("TEMPLATE/NIC[ATTACH='YES']").first begin - # Prepare network for vcenter networks - configure_vcenter_network(nic) if nic["VN_MAD"] == "vcenter" - # A new NIC requires a vcenter spec attach_nic_array = [] attach_nic_array << calculate_add_nic_spec(nic) @@ -2301,63 +2124,6 @@ class VirtualMachine < Template end end - def remove_poweroff_detached_vcenter_nets(networks) - esx_host = VCenterDriver::ESXHost.new_from_ref(@item.runtime.host._ref, vi_client) - dc = cluster.get_dc # Get datacenter - - networks.each do |pg_name, one| - - if one["TEMPLATE/VCENTER_PORTGROUP_TYPE"] == "Port Group" - begin - esx_host.lock # Exclusive lock for ESX host operation - - next if !esx_host.pg_exists(pg_name) - swname = esx_host.remove_pg(pg_name) - next if !swname - - # We must update XML so the VCENTER_NET_REF is unset - VCenterDriver::Network.remove_net_ref(one["ID"]) - - next if !esx_host.vss_exists(swname) - swname = esx_host.remove_vss(swname) - - rescue Exception => e - raise e - ensure - esx_host.unlock if esx_host # Remove lock - end - end - - if one["TEMPLATE/VCENTER_PORTGROUP_TYPE"] == "Distributed Port Group" - begin - dc.lock - - # Explore network folder in search of dpg and dvs - net_folder = dc.network_folder - net_folder.fetch! - - # Get distributed port group if it exists - dpg = dc.dpg_exists(pg_name, net_folder) - dc.remove_dpg(dpg) if dpg - - # We must update XML so the VCENTER_NET_REF is unset - VCenterDriver::Network.remove_net_ref(one["ID"]) - - # Get distributed virtual switch and try to remove it - switch_name = one["TEMPLATE/VCENTER_SWITCH_NAME"] - dvs = dc.dvs_exists(switch_name, net_folder) - dc.remove_dvs(dvs) if dvs - - rescue Exception => e - dc.network_rollback - raise e - ensure - dc.unlock if dc - end - end - end - end - def find_free_ide_controller(position=0) free_ide_controllers = [] @@ -2632,6 +2398,9 @@ class VirtualMachine < Template end def poweron + ## If need in the future, you can power on VMs from datacenter + ## dc = get_dc + ## dc.power_on_vm(@item) @item.PowerOnVM_Task.wait_for_completion end