1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-03-21 14:50:08 +03:00

B #2230: vCenter driver migrate feedback, esx bug fixed

======================================
B #2230: ruby fuctions from sh commons
B #2230: migrate routine added, now vCenter vms migrate with relocate_spec
B #2230: cold migrate now works as expected, small ruby common fix
B #2230: vCenter cold/hot migrate fix
This commit is contained in:
semedi 2018-07-12 09:45:03 +02:00 committed by Tino Vázquez
parent 715fcf1545
commit ff7104646d
5 changed files with 88 additions and 61 deletions

View File

@ -55,6 +55,23 @@ module OpenNebula
return error_str
end
def self.is_disk?(arg)
arg.match("disk\.[0-9]+$")
end
# Gets the host from an argument
def self.arg_host(arg)
result = arg.match("^\([^:]*\):.*$")
return result[1] if result
end
def self.arg_path(arg)
result = arg.match('^[^:]*:(.*)$')
return result[1] if result
end
# Executes a command, if it fails returns error message and exits
# If a second parameter is present it is used as the error message when
# the command fails

View File

@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2018, OpenNebula Project, OpenNebula Systems #
@ -16,35 +16,39 @@
# limitations under the License. #
#--------------------------------------------------------------------------- #
SRC=$1
DST=$2
ONE_LOCATION=ENV["ONE_LOCATION"] if !defined?(ONE_LOCATION)
VMID=$3
DSID=$4
if [ -z "${ONE_LOCATION}" ]; then
TMCOMMON=/var/lib/one/remotes/tm/tm_common.sh
MIGRATE_DRIVER=/var/lib/one/remotes/vmm/vcenter/migrate
if !ONE_LOCATION
RUBY_LIB_LOCATION="/usr/lib/one/ruby" if !defined?(RUBY_LIB_LOCATION)
else
TMCOMMON=$ONE_LOCATION/var/remotes/tm/tm_common.sh
MIGRATE_DRIVER=$ONE_LOCATION/var/remotes/vmm/vcenter/migrate
fi
RUBY_LIB_LOCATION=ONE_LOCATION+"/lib/ruby" if !defined?(RUBY_LIB_LOCATION)
end
. $TMCOMMON
$: << RUBY_LIB_LOCATION
$: << File.dirname(__FILE__)
DST_PATH=`arg_path $DST`
require 'vcenter_driver'
HOST_ORIG=`arg_host $SRC`
HOST_DEST=`arg_host $DST`
src = ARGV[0]
dst = ARGV[1]
vmid = ARGV[2]
dsid = ARGV[3]
if [ `is_disk $DST_PATH` -eq 1 ]; then
exit 0
fi
begin
dst_path = OpenNebula.arg_path(dst)
if [ "$SRC" == "$DST" ]; then
exit 0
fi
host_orig = OpenNebula.arg_host(src)
host_dest = OpenNebula.arg_host(dst)
ruby $MIGRATE_DRIVER $HOST_DEST $HOST_ORIG $VMID $DSID
exit 0 if OpenNebula.is_disk?(dst_path)
exit 0 if src == dst
exit 0
VCenterDriver::VirtualMachine.migrate_routine(vmid, host_orig, host_dest)
rescue StandardError => e
message = "Cannot migrate for VM #{vmid}"\
'failed due to '\
"\"#{e.message}\"\n"
OpenNebula.log_error(message)
exit(-1)
end

View File

@ -878,6 +878,10 @@ class Template
self['runtime.host.parent.resourcePool']
end
def get_esx_name
self['runtime.host.name']
end
def vm_to_one(vm_name)
str = "NAME = \"#{vm_name}\"\n"\
@ -2927,7 +2931,14 @@ class VirtualMachine < Template
# retrieve host from DRS
resourcepool = config[:cluster].resourcePool
#relocate_spec_params = {}
#relocate_spec_params[:pool] = resourcepool
#relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec(relocate_spec_params)
#@item.RelocateVM_Task(spec: relocate_spec, priority: "defaultPriority").wait_for_completion
@item.MigrateVM_Task(:pool=> resourcepool, :priority => "defaultPriority").wait_for_completion
return get_esx_name
rescue Exception => e
raise "Cannot migrate VM #{e.message}\n#{e.backtrace.join("\n")}"
end
@ -3337,7 +3348,7 @@ class VirtualMachine < Template
end
end
# STATIC MEMBERS AND CONSTRUCTORS
# STATIC MEMBERS, ROUTINES AND CONSTRUCTORS
###############################################################################################
def self.get_vm(opts = {})
@ -3361,6 +3372,35 @@ class VirtualMachine < Template
return one_vm
end
def self.migrate_routine(vm_id, src_host, dst_host, ds = nil)
one_client = OpenNebula::Client.new
pool = OpenNebula::HostPool.new(one_client)
pool.info
src_id = pool["/HOST_POOL/HOST[NAME='#{src_host}']/ID"].to_i
dst_id = pool["/HOST_POOL/HOST[NAME='#{dst_host}']/ID"].to_i
vi_client = VCenterDriver::VIClient.new_from_host(src_id)
# required one objects
vm = OpenNebula::VirtualMachine.new_with_id(vm_id, one_client)
dst_host = OpenNebula::Host.new_with_id(dst_id, one_client)
# get info
vm.info
dst_host.info
# required vcenter objects
vc_vm = VCenterDriver::VirtualMachine.new_without_id(vi_client, vm['/VM/DEPLOY_ID'])
ccr_ref = dst_host['/HOST/TEMPLATE/VCENTER_CCR_REF']
vc_host = VCenterDriver::ClusterComputeResource.new_from_ref(ccr_ref, vi_client).item
config = { :cluster => vc_host }
esx = vc_vm.migrate(config)
vm.replace({ 'VCENTER_CCR_REF' => ccr_ref, 'VCENTER_ESX_HOST' => esx })
end
# Try to build the vcenterdriver virtualmachine without
# any opennebula id or object, this constructor can find
# inside the opennebula pool until match

View File

@ -34,40 +34,11 @@ src_host = ARGV[-3]
dst_host = ARGV[-4]
begin
one_client = OpenNebula::Client.new
pool = OpenNebula::HostPool.new(one_client)
pool.info
src_id = pool["/HOST_POOL/HOST[NAME='#{src_host}']/ID"].to_i
dst_id = pool["/HOST_POOL/HOST[NAME='#{dst_host}']/ID"].to_i
vi_client = VCenterDriver::VIClient.new_from_host(src_id)
# required one objects
vm = OpenNebula::VirtualMachine.new_with_id(vm_id, one_client)
dst_host = OpenNebula::Host.new_with_id(dst_id, one_client)
# get info
vm.info
dst_host.info
# required vcenter objects
vc_vm = VCenterDriver::VirtualMachine.new_without_id(vi_client, vm['/VM/DEPLOY_ID'])
ccr_ref = dst_host['/HOST/TEMPLATE/VCENTER_CCR_REF']
esx_host = dst_host['/HOST/TEMPLATE/HOST/HOSTNAME']
vc_host = VCenterDriver::ClusterComputeResource.new_from_ref(ccr_ref, vi_client).item
config = { :cluster => vc_host }
vc_vm.migrate(config)
vm.replace({ 'VCENTER_CCR_REF' => ccr_ref, 'VCENTER_ESX_HOST' => esx_host })
VCenterDriver::VirtualMachine.migrate_routine(vm_id, src_host, dst_host)
rescue StandardError => e
message = "Cannot migrate for VM #{vm_id}"\
'failed due to '\
"\"#{e.message}\"\n"
OpenNebula.log_error(message)
exit(-1)
ensure
vi_client.close_connection if vi_client
end

View File

@ -44,12 +44,7 @@ lcm_state = drv_action['/VMM_DRIVER_ACTION_DATA/VM/LCM_STATE']
check_valid(lcm_state, 'lcm_state')
lcm_state_str = OpenNebula::VirtualMachine::LCM_STATE[lcm_state.to_i]
if lcm_state_str == 'SAVE_MIGRATE'
STDERR.puts 'Migration between vCenters cluster not supported'
exit(-1)
end
if !(%{'SAVE_SUSPEND', 'SAVE_STOP'}).include?(lcm_state_str)
if !(%{'SAVE_SUSPEND', 'SAVE_STOP', 'SAVE_MIGRATE'}).include?(lcm_state_str)
STDERR.puts "Wrong lcm state #{lcm_state_str} }when saving VM"
exit(-1)
end