diff --git a/src/tm_mad/vcenter/mv b/src/tm_mad/vcenter/mv index dac4b13926..ab7e416d2b 100755 --- a/src/tm_mad/vcenter/mv +++ b/src/tm_mad/vcenter/mv @@ -45,6 +45,7 @@ begin one_client = OpenNebula::Client.new vm = OpenNebula::VirtualMachine.new_with_id(vmid, one_client) vm.info + src_ds = vm.retrieve_elements("HISTORY_RECORDS/HISTORY/DS_ID")[-2] if src_ds == dsid @@ -54,8 +55,8 @@ begin end rescue StandardError => e - message = "Cannot migrate for VM #{vmid}"\ - 'failed due to '\ + message = "Cannot migrate for VM #{vmid}. "\ + 'Failed due to '\ "\"#{e.message}\"\n" OpenNebula.log_error(message) STDERR.puts "#{message} #{e.backtrace}" if VCenterDriver::CONFIG[:debug_information] diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb b/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb index 4839ea5b0c..d5b6748640 100644 --- a/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb +++ b/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb @@ -670,10 +670,9 @@ class VirtualMachine < VCenterDriver::Template # Create and reconfigure VM related methods ############################################################################ - # This function creates a new VM from the @one_item XML and returns the + # This function creates a new VM from the driver_action XML and returns the # VMware ref - # @param one_item OpenNebula::VirtualMachine - # @param vi_client VCenterDriver::VIClient + # @param drv_action XML representing the deploy action # @return String vmware ref def clone_vm(drv_action) vcenter_name = get_vcenter_name @@ -970,13 +969,11 @@ class VirtualMachine < VCenterDriver::Template # Queries to OpenNebula the machine disks xml representation def get_one_disks - one_item.info one_item.retrieve_xmlelements("TEMPLATE/DISK") end # Queries to OpenNebula the machine nics xml representation def get_one_nics - one_item.info one_item.retrieve_xmlelements("TEMPLATE/NIC") end @@ -2900,10 +2897,10 @@ class VirtualMachine < VCenterDriver::Template # Migrate a VM to another cluster and/or datastore # @params [int] vm_id ID of the VM to be migrated - # params [String] src_host Name of the source cluster - # params [String] dst_host Name of the target cluster + # params [String] src_host Name of the source cluster + # params [String] dst_host Name of the target cluster # params [Bool] hot_ds Wether this is a DS migration with the VM running or not - # params [int] Destination datastore ID + # params [int] ds Destination datastore ID def self.migrate_routine(vm_id, src_host, dst_host, hot_ds = false, ds = nil) one_client = OpenNebula::Client.new pool = OpenNebula::HostPool.new(one_client) @@ -2933,6 +2930,13 @@ class VirtualMachine < VCenterDriver::Template # required vcenter objects vc_vm = VCenterDriver::VirtualMachine.new_without_id(vi_client, vm['/VM/DEPLOY_ID']) + + vc_vm.vm_id = vm_id + error = !vc_vm.disks_each(:managed?).empty? && !ds.nil? + # We know this comes from a migration from poweroff state (not a poweroff migration) + # since all the other cases are treated in vmm drivers: save, migrate and shutdown + raise 'datastore migration from poweroff state with managed disks is not supported' if error + ccr_ref = dst_host['/HOST/TEMPLATE/VCENTER_CCR_REF'] vc_host = VCenterDriver::ClusterComputeResource.new_from_ref(ccr_ref, vi_client).item diff --git a/src/vmm_mad/remotes/vcenter/cancel b/src/vmm_mad/remotes/vcenter/cancel index 06e0134612..14d751a6e3 100755 --- a/src/vmm_mad/remotes/vcenter/cancel +++ b/src/vmm_mad/remotes/vcenter/cancel @@ -36,6 +36,10 @@ vm_id = ARGV[-2] drv_action = OpenNebula::XMLElement.new drv_action.initialize_xml(Base64.decode64(STDIN.read), 'VMM_DRIVER_ACTION_DATA') +lcm_state = drv_action['/VMM_DRIVER_ACTION_DATA/VM/LCM_STATE'] +check_valid(lcm_state, 'lcm_state') +lcm_state_str = OpenNebula::VirtualMachine::LCM_STATE[lcm_state.to_i] + host = VCenterDriver::VIHelper.find_by_name(OpenNebula::HostPool, host) host_id = host['ID'] @@ -46,6 +50,17 @@ begin vm.one_item = drv_action.retrieve_xmlelements('VM').first + if (%{'SAVE_MIGRATE'}).include?(lcm_state_str) + vm.vm_id = vm_id + dst_ds = drv_action['VM/HISTORY_RECORDS/HISTORY/DS_ID'] + src_ds = drv_action['DATASTORE/ID'] + + new_ds = dst_ds != src_ds + + error = !vm.disks_each(:managed?).empty? && new_ds + raise 'cold datastore migration in poweroff hard with managed disk(s) is not supported' if error + end + vm.poweroff_hard rescue StandardError => e message = "Cancel VM #{vm_ref} failed due to "\ diff --git a/src/vmm_mad/remotes/vcenter/migrate b/src/vmm_mad/remotes/vcenter/migrate index 420c08efb7..2c01e95421 100755 --- a/src/vmm_mad/remotes/vcenter/migrate +++ b/src/vmm_mad/remotes/vcenter/migrate @@ -29,7 +29,7 @@ $LOAD_PATH << File.dirname(__FILE__) require 'vcenter_driver' -d_id = ARGV[0] +vm_ref = ARGV[0] vm_id = ARGV[-2] src_host = ARGV[-3] dst_host = ARGV[-4] @@ -42,11 +42,11 @@ src_ds = drv_action['DATASTORE/ID'] begin vi_client = VCenterDriver::VIClient.new_from_host(src_host) - vm = VCenterDriver::VirtualMachine.new(vi_client, d_id, vm_id) + vm = VCenterDriver::VirtualMachine.new(vi_client, vm_ref, vm_id) new_ds = dst_ds != src_ds error = !vm.disks_each(:managed?).empty? && new_ds - raise 'live migration with managed disk is not supported' if error + raise 'live datastore migration with managed disk is not supported' if error if new_ds VCenterDriver::VirtualMachine diff --git a/src/vmm_mad/remotes/vcenter/save b/src/vmm_mad/remotes/vcenter/save index c0ac3c6264..7bbb13712f 100755 --- a/src/vmm_mad/remotes/vcenter/save +++ b/src/vmm_mad/remotes/vcenter/save @@ -30,6 +30,7 @@ $LOAD_PATH << File.dirname(__FILE__) require 'vcenter_driver' vm_ref = ARGV[0] +vm_id = ARGV[3] vc_cluster_name = ARGV[-1] host = VCenterDriver::VIHelper.find_by_name(OpenNebula::HostPool, vc_cluster_name) @@ -45,7 +46,7 @@ check_valid(lcm_state, 'lcm_state') lcm_state_str = OpenNebula::VirtualMachine::LCM_STATE[lcm_state.to_i] if !(%{'SAVE_SUSPEND', 'SAVE_STOP', 'SAVE_MIGRATE'}).include?(lcm_state_str) - STDERR.puts "Wrong lcm state #{lcm_state_str} }when saving VM" + STDERR.puts "Wrong lcm state #{lcm_state_str} when saving VM" exit(-1) end @@ -54,6 +55,17 @@ begin vm = VCenterDriver::VirtualMachine.new_without_id(vi_client, vm_ref) + if (%{'SAVE_MIGRATE'}).include?(lcm_state_str) + vm.vm_id = vm_id + dst_ds = drv_action['VM/HISTORY_RECORDS/HISTORY/DS_ID'] + src_ds = drv_action['DATASTORE/ID'] + + new_ds = dst_ds != src_ds + + error = !vm.disks_each(:managed?).empty? && new_ds + raise 'cold datastore migration with managed disk(s) is not supported' if error + end + vm.suspend rescue StandardError => e diff --git a/src/vmm_mad/remotes/vcenter/shutdown b/src/vmm_mad/remotes/vcenter/shutdown index 80f347f777..810795f9dc 100755 --- a/src/vmm_mad/remotes/vcenter/shutdown +++ b/src/vmm_mad/remotes/vcenter/shutdown @@ -45,7 +45,7 @@ lcm_state = drv_action['/VMM_DRIVER_ACTION_DATA/VM/LCM_STATE'] check_valid(lcm_state, 'lcm_state') lcm_state_str = OpenNebula::VirtualMachine::LCM_STATE[lcm_state.to_i] -if !(%{'SHUTDOWN', 'SHUTDOWN_POWEROFF', 'SHUTDOWN_UNDEPLOY'}).include?(lcm_state_str) +if !(%{'SAVE_MIGRATE', 'SHUTDOWN', 'SHUTDOWN_POWEROFF', 'SHUTDOWN_UNDEPLOY'}).include?(lcm_state_str) STDERR.puts "Wrong lcm state #{lcm_state_str} when shutting down VM" exit(-1) end @@ -62,6 +62,19 @@ begin vm = VCenterDriver::VirtualMachine.new_without_id(vi_client, vm_ref) + + if (%{'SAVE_MIGRATE'}).include?(lcm_state_str) + vm.vm_id = vm_id + dst_ds = drv_action['VM/HISTORY_RECORDS/HISTORY/DS_ID'] + src_ds = drv_action['DATASTORE/ID'] + + new_ds = dst_ds != src_ds + + error = !vm.disks_each(:managed?).empty? && new_ds + raise 'cold datastore migration in poweroff with managed disk(s) is not supported' if error + end + + vm.shutdown # Undeploy, Poweroff or Terminate rescue StandardError => e message = "Shutdown of VM #{vm_ref} on vCenter cluster "\