1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-03-22 18:50:08 +03:00

F #2387: vCenter tm refactor

==================================
F #2387: vCenter tm clone refactor
F #2387: vCenter tm delete refactor
This commit is contained in:
sergio semedi 2018-09-05 15:19:52 +02:00 committed by Tino Vázquez
parent 331306b05b
commit 5e98b292d2
4 changed files with 93 additions and 220 deletions

View File

@ -71,31 +71,26 @@ begin
search_params = ds.get_search_params(ds_name, img_path, img_name)
# Perform search task and return results
begin
search_task = ds['browser'].SearchDatastoreSubFolders_Task(search_params)
search_task.wait_for_completion
search_task = ds['browser'].SearchDatastoreSubFolders_Task(search_params)
search_task.wait_for_completion
if img_type != CDROM
# delete the disk
ds.delete_virtual_disk(img_src)
else
# delete the CDROM iso
ds.delete_file(img_src)
end
ds.rm_directory(img_dir) if ds.dir_empty?(img_dir)
rescue Exception => e
if !e.message.start_with?('FileNotFound')
raise e.message # Ignore FileNotFound
end
if img_type != CDROM
# delete the disk
ds.delete_virtual_disk(img_src)
else
# delete the CDROM iso
ds.delete_file(img_src)
end
ds.rm_directory(img_dir) if ds.dir_empty?(img_dir)
end
rescue Exception => e
message = "Error deleting virtual disk #{img_src}."\
" Reason: \"#{e.message}\"\n#{e.backtrace}"
STDERR.puts error_message(message)
exit -1
if !e.message.start_with?('FileNotFound')
message = "Error deleting virtual disk #{img_src}."\
" Reason: \"#{e.message}\"\n#{e.backtrace}"
STDERR.puts error_message(message)
exit -1
end
ensure
vi_client.close_connection if vi_client
end

View File

@ -97,65 +97,26 @@ begin
new_size = new_size > original_size ? new_size * 1024 : nil
end
unmanaged_disk = false
# TODO: we should think about what to do with swap_disks
managed_disk = !(disk['OPENNEBULA_MANAGED'] && disk['OPENNEBULA_MANAGED'].downcase == "no")
is_volatile = disk["TYPE"] && disk["TYPE"].downcase == "fs"
is_storage_drs = target_ds_ref.start_with?('group-')
# Unmanaged disks are those with OPENNEBULA_MANAGED=NO or volatile disks
# that are created in StorageDRS clusters
if (target_ds_ref.start_with?('group-') && !!disk["TYPE"] && disk["TYPE"].downcase == "fs")
unmanaged_disk = true
return 0 if !managed_disk || (is_storage_drs && is_volatile)
raise "Non persistent images not supported for StorageDRS datastores" if is_storage_drs
source_ds_vc = VCenterDriver::Datastore.new_from_ref(source_ds_ref, vi_client)
if source_ds_ref == target_ds_ref
target_ds_vc = source_ds_vc
else
if (disk['OPENNEBULA_MANAGED'] && disk['OPENNEBULA_MANAGED'].downcase == "no")
# Let's check if the disk is really unmanaged
if one_vm["TEMPLATE/CLONING_TEMPLATE_ID"]
# In this case we're not cloning the disk, although the disk
# is unmanaged, the disk is treated as persistent until the
# vm is terminated. That way the disk added to the new vcenter
# template is the one that OpenNebula created when the OpenNebula
# VM template was created
unmanaged_disk = true
else
# What's the moref of the template linked to this VM?
template_ref = one_vm["USER_TEMPLATE/VCENTER_TEMPLATE_REF"]
# What's the ID of the image and the source
image_id = disk["IMAGE_ID"]
one_image = VCenterDriver::VIHelper.one_item(OpenNebula::Image, image_id)
one_source_escaped = one_image["SOURCE"]
one_source = VCenterDriver::FileHelper.unescape_path(one_source_escaped)
# TODO: review this with disk clone
# Let's inspect the disks inside the template
# if we found that the image source of the disk matches one of
# the template disks backing paths, then it's really an unmanaged
vc_template = VCenterDriver::Template.new_from_ref(template_ref, vi_client)
template_disks = vc_template.get_vcenter_disks
found_disks = template_disks.select { |d| d[:path_wo_ds] == one_source } rescue []
unmanaged_disk = !found_disks.empty?
end
end
target_ds_vc = VCenterDriver::Storage.new_from_ref(target_ds_ref, vi_client)
end
if !unmanaged_disk
source_ds_vc = VCenterDriver::Datastore.new_from_ref(source_ds_ref, vi_client)
if source_ds_ref == target_ds_ref
target_ds_vc = source_ds_vc
else
target_ds_vc = VCenterDriver::Storage.new_from_ref(target_ds_ref, vi_client)
end
target_ds_name_vc = target_ds_vc['name']
if target_ds_ref.start_with?('group-')
raise "Non persistent images management is not supported for StorageDRS datastores"
end
source_ds_vc.copy_virtual_disk(src_path, target_ds_vc, target_path, new_size)
end
target_ds_name_vc = target_ds_vc['name']
source_ds_vc.copy_virtual_disk(src_path, target_ds_vc, target_path, new_size)
rescue Exception => e
message = "Error clone virtual disk #{src_path} in "\
"datastore #{target_ds_name_vc}. "\

View File

@ -39,51 +39,12 @@ require 'vcenter_driver'
VM_PREFIX_DEFAULT = "one-$i-"
# Don't do a detach if unmanaged disks and state is terminate (EPILOG)
def can_detach(disk, one_vm)
!(one_vm["LCM_STATE"].to_i == 11 && disk["OPENNEBULA_MANAGED"] && disk["OPENNEBULA_MANAGED"].upcase == "NO")
end
def can_detach(disk, vm)
# it's not a CDROM (CLONE=NO)
def not_a_cd(disk)
disk["CLONE"].nil? || disk["CLONE"] == "YES"
end
one_vm = vm.one_item
detachable= !(one_vm["LCM_STATE"].to_i == 11 && disk["OPENNEBULA_MANAGED"] && disk["OPENNEBULA_MANAGED"].upcase == "NO")
# detach disk from vCenter vm if possible, destroy the disk on FS
def detach_and_destroy(disk, vm, disk_id, prev_ds_ref, vi_client)
begin
# Detach disk if possible (VM is reconfigured) and gather vCenter info
# Needed for poweroff machines too
ds_ref, img_path = vm.detach_disk(disk)
return unless not_a_cd(disk)
# Disk could't be detached, use OpenNebula info
if !(ds_ref && img_path && !img_path.empty?)
img_path = vm.disk_real_path(disk, disk_id)
ds_ref = prev_ds_ref
end
# If disk was already detached we have no way to remove it
ds = VCenterDriver::Datastore.new_from_ref(ds_ref, vi_client)
search_params = ds.get_search_params(ds['name'],
File.dirname(img_path),
File.basename(img_path))
# Perform search task and return results
search_task = ds['browser'].SearchDatastoreSubFolders_Task(search_params)
search_task.wait_for_completion
ds.delete_virtual_disk(img_path)
img_dir = File.dirname(img_path)
ds.rm_directory(img_dir) if ds.dir_empty?(img_dir)
rescue Exception => e
if !e.message.start_with?('FileNotFound')
raise e.message # Ignore FileNotFound
end
end
return detachable && !vm.has_snapshots?
end
path = ARGV[0]
@ -108,6 +69,7 @@ vm = nil
# tm:delete INIT block:
begin
@error_message = "error obtaining client and vm"
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
if vm_ref && !vm_ref.empty?
@ -124,121 +86,36 @@ begin
end
vm.one_item = one_vm
rescue Exception => e
vi_client.close_connection if vi_client
STDERR.puts "Error obtaining the vCenter client and VM object."\
" Reason: #{e.message}\n#{e.backtrace}"
exit -1
end
is_disk = path.match(/disk\.\d+$/)
# Detach and remove the disk (if it is not a CDROM)
if path.match(/disk\.\d+$/)
begin
# Get DS ref
if is_disk
dsid = img_path.split("/")[-3] # get dsid from path
@error_message = "error deleting disk with #{img_path} in datastore: #{dsid}"
one_ds = VCenterDriver::VIHelper.one_item(OpenNebula::Datastore, dsid)
ds_ref = one_ds['TEMPLATE/VCENTER_DS_REF']
# Get disk info
# Get disk info, destroy it if is possible
disk_id = img_path.split(".")[-1]
disk = one_vm.retrieve_xmlelements("TEMPLATE/DISK[DISK_ID=#{disk_id}]").first
if !vm.has_snapshots?
if can_detach(disk, one_vm)
detach_and_destroy(disk, vm, disk_id, ds_ref, vi_client)
end
if can_detach(disk, vm)
VCenterDriver::Datastore.detach_and_destroy(disk, vm, disk_id, ds_ref, vi_client)
end
rescue Exception => e
message = "Error delete virtual disk #{img_path} in datastore #{dsid}."\
" Reason: #{e.message}\n#{e.backtrace}"
STDERR.puts error_message(message)
exit -1
ensure
vi_client.close_connection if vi_client
end
# Is not a Disk, remove the VM
else
begin
# All OpenNebula managed disks have been detached unless it has snapshots.
# The VM may have still unmanaged disks that belong to the template
# (OPENNEBULA_MANAGED disks). These disks will be deleted with the destroy
# operation. If the user wants to save them, it can be done using the
# disk-saveas operation.
else
@error_message = "Error unregistering vm #{vmid} (#{vm_ref})."
vm.poweroff_hard if vm.is_powered_on?
# If the VM has snapshots the TM could not detach disks so we
# will try to detach persistent disks once we have removed all snapshots
# that way they won't be removed. If the vm has been marked as template
# persistent disks shouldn't be detached
vm.remove_all_snapshots if vm.has_snapshots?
disks = one_vm.retrieve_xmlelements("TEMPLATE/DISK")
if vm.has_snapshots? || vm.instantiated_as_persistent?
disks.each do |d|
if d["PERSISTENT"] == "YES"
vm.detach_disk(d)
else
# Remove any disk that is not unmanaged from the VM if
# instantiate as persistent was set
if vm.instantiated_as_persistent? &&
(!d["OPENNEBULA_MANAGED"] || d["OPENNEBULA_MANAGED"].upcase != "NO")
vm.detach_disk(d)
end
end
end
disks.each do |d|
managed_disk = !d["OPENNEBULA_MANAGED"] || d["OPENNEBULA_MANAGED"].upcase != "NO"
vm.detach_disk(d) if d["PERSISTENT"] == "YES"
end
# If the VM was instantiated to persistent keep the VM
if vm.instantiated_as_persistent?
# Remove the MAC addresses so they cannot be in conflict
# with OpenNebula assigned mac addresses.
# We detach all nics from the VM
vm.detach_all_nics
# We attach new NICs where the MAC address is assigned by vCenter
nic_specs = []
nics = one_vm.retrieve_xmlelements("TEMPLATE/NIC")
nics.each do |nic|
if (nic["OPENNEBULA_MANAGED"] && nic["OPENNEBULA_MANAGED"].upcase == "NO")
nic_specs << vm.calculate_add_nic_spec_autogenerate_mac(nic)
end
end
# Reconfigure VM to add unmanaged nics
spec_hash = {}
spec_hash[:deviceChange] = nic_specs
spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
vm.item.ReconfigVM_Task(:spec => spec).wait_for_completion
# Convert VM to template in vCenter
vm.mark_as_template
# Edit the Opennebula template
one_client = OpenNebula::Client.new
template_id = vm.one_item['TEMPLATE/TEMPLATE_ID']
new_template = OpenNebula::Template.new_with_id(template_id, one_client)
new_template.info
# Update the template reference
new_template.update("VCENTER_TEMPLATE_REF=#{vm.item._ref}", true)
else
# Destroy the VM and remaining disks
vm.destroy
end
rescue Exception => e
message = "Error unregistering vm #{vmid} (#{vm_ref})."\
" Reason: #{e.message}\n#{e.backtrace}"
STDERR.puts error_message(message)
exit -1
ensure
vi_client.close_connection if vi_client
vm.destroy
end
rescue Exception => e
vi_client.close_connection if vi_client
STDERR.puts "#{@error_message}. Reason: #{e.message}\n#{e.backtrace}"
exit -1
end

View File

@ -668,6 +668,46 @@ class Datastore < Storage
def self.new_from_ref(ref, vi_client)
self.new(RbVmomi::VIM::Datastore.new(vi_client.vim, ref), vi_client)
end
# detach disk from vCenter vm if possible, destroy the disk on FS
def self.detach_and_destroy(disk, vm, disk_id, prev_ds_ref, vi_client)
# it's not a CDROM (CLONE=NO)
is_cd = !(disk["CLONE"].nil? || disk["CLONE"] == "YES")
begin
# Detach disk if possible (VM is reconfigured) and gather vCenter info
# Needed for poweroff machines too
ds_ref, img_path = vm.detach_disk(disk)
return if is_cd
# Disk could't be detached, use OpenNebula info
if !(ds_ref && img_path && !img_path.empty?)
img_path = vm.disk_real_path(disk, disk_id)
ds_ref = prev_ds_ref
end
# If disk was already detached we have no way to remove it
ds = VCenterDriver::Datastore.new_from_ref(ds_ref, vi_client)
search_params = ds.get_search_params(ds['name'],
File.dirname(img_path),
File.basename(img_path))
# Perform search task and return results
search_task = ds['browser'].SearchDatastoreSubFolders_Task(search_params)
search_task.wait_for_completion
ds.delete_virtual_disk(img_path)
img_dir = File.dirname(img_path)
ds.rm_directory(img_dir) if ds.dir_empty?(img_dir)
rescue Exception => e
if !e.message.start_with?('FileNotFound')
raise e.message # Ignore FileNotFound
end
end
end
end # class Datastore
class DsImporter < VCenterDriver::VcImporter