1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-02-05 21:57:24 +03:00

F #4913: Fix vCenter Instantiate as Persistent

This commit is contained in:
mcabrerizo 2017-07-06 21:34:35 +02:00
parent 706cd8d951
commit 95fc926722
4 changed files with 120 additions and 43 deletions

View File

@ -91,10 +91,42 @@ begin
new_size = new_size > original_size ? new_size * 1024 : nil
end
unmanaged_disk = false
# Unmanaged disks are those with OPENNEBULA_MANAGED=NO or volatile disks
# that are created in StorageDRS clusters
unmanaged_disk = (!!disk['OPENNEBULA_MANAGED'] && disk['OPENNEBULA_MANAGED'].downcase == "no") ||
(target_ds_ref.start_with?('group-') && !!disk["TYPE"] && disk["TYPE"].downcase == "fs")
if (target_ds_ref.start_with?('group-') && !!disk["TYPE"] && disk["TYPE"].downcase == "fs")
unmanaged_disk = true
else
if (disk['OPENNEBULA_MANAGED'] && disk['OPENNEBULA_MANAGED'].downcase == "no")
# Let's check if the disk is really unmanaged
if one_vm["TEMPLATE/CLONING_TEMPLATE_ID"]
# In this case we're not cloning the disk, althought the disk
# is unmanaged, the disk is treated as persistent until the
# vm is terminated. That way the disk added to the new vcenter
# template is the one that OpenNebula created when the OpenNebula
# VM template was created
unmanaged_disk = true
else
# What's the moref of the template linked to this VM?
template_ref = one_vm["USER_TEMPLATE/VCENTER_TEMPLATE_REF"]
# What's the ID of the image and the source
image_id = disk["IMAGE_ID"]
one_image = VCenterDriver::VIHelper.one_item(OpenNebula::Image, image_id)
one_source = one_image["SOURCE"]
# Let's inspect the disks inside the template
# if we found that the image source of the disk matches one of
# the template disks backing paths, then it's really an unmanaged
vc_template = VCenterDriver::Template.new_from_ref(template_ref, vi_client)
template_disks = vc_template.get_vcenter_disks
found_disks = template_disks.select { |d| d[:path_wo_ds] == one_source } rescue []
unmanaged_disk = !found_disks.empty?
end
end
end
if !unmanaged_disk
source_ds_vc = VCenterDriver::Datastore.new_from_ref(source_ds_ref, vi_client)

View File

@ -94,36 +94,39 @@ if path.match(/disk\.\d+$/)
begin
if !vm.has_snapshots?
# TODO: if the deploy has failed, the disks may exist, but the vm may
# not exist...
vm.one_item = one_vm
# detach the disk or cdrom
ds_ref, img_path = vm.detach_disk(disk)
# Don't do a detach if unmanaged disks and state is terminate (EPILOG)
unless one_vm["LCM_STATE"].to_i == 11 && disk["OPENNEBULA_MANAGED"] && disk["OPENNEBULA_MANAGED"].upcase == "NO"
# If disk was already detached we have no way to remove it
if ds_ref && img_path && !img_path.empty?
ds = VCenterDriver::Datastore.new_from_ref(ds_ref, vi_client)
# Detach disk
ds_ref, img_path = vm.detach_disk(disk)
# delete the disk if it's not a CDROM (CLONE=NO)
if disk["CLONE"].nil? || disk["CLONE"] == "YES"
# If disk was already detached we have no way to remove it
if ds_ref && img_path && !img_path.empty?
ds = VCenterDriver::Datastore.new_from_ref(ds_ref, vi_client)
search_params = ds.get_search_params(ds['name'],
File.dirname(img_path),
File.basename(img_path))
# delete the disk if it's not a CDROM (CLONE=NO)
if disk["CLONE"].nil? || disk["CLONE"] == "YES"
# Perform search task and return results
begin
search_task = ds['browser'].SearchDatastoreSubFolders_Task(search_params)
search_task.wait_for_completion
search_params = ds.get_search_params(ds['name'],
File.dirname(img_path),
File.basename(img_path))
ds.delete_virtual_disk(img_path)
img_dir = File.dirname(img_path)
ds.rm_directory(img_dir) if ds.dir_empty?(img_dir)
# Perform search task and return results
begin
search_task = ds['browser'].SearchDatastoreSubFolders_Task(search_params)
search_task.wait_for_completion
rescue Exception => e
if !e.message.start_with?('FileNotFound')
raise e.message # Ignore FileNotFound
ds.delete_virtual_disk(img_path)
img_dir = File.dirname(img_path)
ds.rm_directory(img_dir) if ds.dir_empty?(img_dir)
rescue Exception => e
if !e.message.start_with?('FileNotFound')
raise e.message # Ignore FileNotFound
end
end
end
end
@ -142,10 +145,11 @@ else
# Remove the VM
begin
# All OpenNebula managed disks have been detached. The VM may have still
# disks that belong to the template (OPENNEBULA_MANAGED disks). These disks
# will be deleted with the destroy operation. If the user wants to
# save them to a VM, it can be done using the disk-saveas operation.
# All OpenNebula managed disks have been detached unless it has snapshots.
# The VM may have still unmanaged disks that belong to the template
# (OPENNEBULA_MANAGED disks). These disks will be deleted with the destroy
# operation. If the user wants to save them, it can be done using the
# disk-saveas operation.
vm.poweroff_hard if vm.is_powered_on?
@ -153,30 +157,65 @@ else
# will try to detach persistent disks once we have removed all snapshots
# that way they won't be removed. If the vm has been marked as template
# persistent disks shouldn't be detached
if vm.has_snapshots? && !vm.instantiated_as_persistent?
vm.remove_all_snapshots
disks = one_vm.retrieve_xmlelements("TEMPLATE/DISK[PERSISTENT=\"YES\"]")
vm.remove_all_snapshots if vm.has_snapshots?
disks = one_vm.retrieve_xmlelements("TEMPLATE/DISK")
if vm.has_snapshots? || vm.instantiated_as_persistent?
disks.each do |d|
vm.detach_disk(d)
if d["PERSISTENT"] == "YES"
vm.detach_disk(d)
else
# Remove any disk that is not unmanaged from the VM if
# instantiate as persistent was set
if vm.instantiated_as_persistent? &&
(!d["OPENNEBULA_MANAGED"] || d["OPENNEBULA_MANAGED"].upcase != "NO")
vm.detach_disk(d)
end
end
end
end
# If the VM was instantiated to persistent keep the VM
if vm.instantiated_as_persistent?
#Convert VM to template in vCenter
# Remove the MAC addresses so they cannot be in conflict
# with OpenNebula assigned mac addresses.
# We detach all nics from the VM
vm.detach_all_nics
# We attach new NICs where the MAC address is assigned by vCenter
nic_specs = []
nics = one_vm.retrieve_xmlelements("TEMPLATE/NIC")
nics.each do |nic|
if (nic["OPENNEBULA_MANAGED"] && nic["OPENNEBULA_MANAGED"].upcase == "NO")
nic_specs << vm.calculate_add_nic_spec_autogenerate_mac(nic)
end
end
# Reconfigure VM to add unmanaged nics
spec_hash = {}
spec_hash[:deviceChange] = nic_specs
spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
vm.item.ReconfigVM_Task(:spec => spec).wait_for_completion
# Convert VM to template in vCenter
vm.mark_as_template
# Create new Opennebula template and set VCENTER_TEMPLATE_REF
# Edit the Opennebula template
one_client = OpenNebula::Client.new
template_id = vm.one_item['TEMPLATE/TEMPLATE_ID']
new_template = OpenNebula::Template.new_with_id(template_id, one_client)
new_template.info
# Update the template reference
new_template.update("VCENTER_TEMPLATE_REF= #{vm.item._ref}", true)
else
# Destroy the VM and remaining disks
vm.destroy
end
# Destroy the VM unless the instantiate as persistent is used
vm.destroy if !vm.instantiated_as_persistent?
rescue Exception => e
message = "Error unregistering vm #{vmid} (#{vm_ref})."\
" Reason: #{e.message}\n#{e.backtrace}"

View File

@ -5,14 +5,20 @@ module VCenterDriver
class FileHelper
def self.get_img_name(disk, vm_id, vm_name)
def self.get_img_name(disk, vm_id, vm_name, instantiate_as_persistent=false)
if disk["PERSISTENT"] == "YES" || disk["TYPE"] == "CDROM"
return disk["SOURCE"]
else
disk_id = disk["DISK_ID"]
if disk["SOURCE"]
image_name = disk["SOURCE"].split(".").first
return "#{image_name}-#{vm_id}-#{disk_id}.vmdk"
if instantiate_as_persistent &&
disk["OPENNEBULA_MANAGED"] &&
disk["OPENNEBULA_MANAGED"].upcase == "NO"
return disk["SOURCE"] # Treat this disk as if was persistent
else
image_name = disk["SOURCE"].split(".").first
return "#{image_name}-#{vm_id}-#{disk_id}.vmdk"
end
else
ds_volatile_dir = disk["VCENTER_DS_VOLATILE_DIR"] || "one-volatile"
return "#{ds_volatile_dir}/#{vm_id}/one-#{vm_id}-#{disk_id}.vmdk"

View File

@ -1878,7 +1878,7 @@ class VirtualMachine < Template
next
end
img_name = VCenterDriver::FileHelper.get_img_name(disk, one_item['ID'], self['name'])
img_name = VCenterDriver::FileHelper.get_img_name(disk, one_item['ID'], self['name'], instantiated_as_persistent?)
onevm_disks_vector << "#{img_name}"
end
@ -2120,7 +2120,7 @@ class VirtualMachine < Template
return ds_ref, img_path
end
# Get vcenter device representing DISK object (hotplug)
# Get vcenter device representing DISK object (hotplug)
def disk_attached_to_vm(disk, unmanaged_keys, vc_disks)
img_name = ""
@ -2136,7 +2136,7 @@ class VirtualMachine < Template
end
# Alright let's see if we can find other devices only with the expected image name
img_name = VCenterDriver::FileHelper.get_img_name(disk, one_item['ID'], self['name'])
img_name = VCenterDriver::FileHelper.get_img_name(disk, one_item['ID'], self['name'], instantiated_as_persistent?)
if d[:path_wo_ds] == "#{img_name}"
device_found = d
break
@ -2147,7 +2147,7 @@ class VirtualMachine < Template
end
def calculate_add_disk_spec(disk, position=0)
img_name = VCenterDriver::FileHelper.get_img_name(disk, one_item['ID'], self['name'])
img_name = VCenterDriver::FileHelper.get_img_name(disk, one_item['ID'], self['name'],instantiated_as_persistent?)
type = disk["TYPE"]
size_kb = disk["SIZE"].to_i * 1024