1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-03-21 14:50:08 +03:00

Merge branch 'feature-4913'

This commit is contained in:
Jaime Melis 2017-04-26 14:51:42 +02:00
commit f6f1905862
107 changed files with 9304 additions and 4861 deletions

View File

@ -236,7 +236,8 @@ LIB_DIRS="$LIB_LOCATION/ruby \
$LIB_LOCATION/mads \
$LIB_LOCATION/sh \
$LIB_LOCATION/ruby/cli \
$LIB_LOCATION/ruby/cli/one_helper"
$LIB_LOCATION/ruby/cli/one_helper \
$LIB_LOCATION/ruby/vcenter_driver"
VAR_DIRS="$VAR_LOCATION/remotes \
$VAR_LOCATION/remotes/im \
@ -258,6 +259,7 @@ VAR_DIRS="$VAR_LOCATION/remotes \
$VAR_LOCATION/remotes/vnm/ebtables \
$VAR_LOCATION/remotes/vnm/fw \
$VAR_LOCATION/remotes/vnm/ovswitch \
$VAR_LOCATION/remotes/vnm/vcenter \
$VAR_LOCATION/remotes/tm/ \
$VAR_LOCATION/remotes/tm/dummy \
$VAR_LOCATION/remotes/tm/shared \
@ -381,6 +383,7 @@ INSTALL_FILES=(
AUTH_DUMMY_FILES:$VAR_LOCATION/remotes/auth/dummy
AUTH_PLAIN_FILES:$VAR_LOCATION/remotes/auth/plain
VMM_EXEC_LIB_FILES:$VAR_LOCATION/remotes/vmm/lib
VMM_EXEC_LIB_VCENTER_FILES:$LIB_LOCATION/ruby/vcenter_driver
VMM_EXEC_KVM_SCRIPTS:$VAR_LOCATION/remotes/vmm/kvm
VMM_EXEC_VCENTER_SCRIPTS:$VAR_LOCATION/remotes/vmm/vcenter
VMM_EXEC_EC2_SCRIPTS:$VAR_LOCATION/remotes/vmm/ec2
@ -413,6 +416,7 @@ INSTALL_FILES=(
NETWORK_EBTABLES_FILES:$VAR_LOCATION/remotes/vnm/ebtables
NETWORK_FW_FILES:$VAR_LOCATION/remotes/vnm/fw
NETWORK_OVSWITCH_FILES:$VAR_LOCATION/remotes/vnm/ovswitch
NETWORK_VCENTER_FILES:$VAR_LOCATION/remotes/vnm/vcenter
EXAMPLE_SHARE_FILES:$SHARE_LOCATION/examples
WEBSOCKIFY_SHARE_FILES:$SHARE_LOCATION/websockify
INSTALL_GEMS_SHARE_FILES:$SHARE_LOCATION
@ -519,6 +523,7 @@ INSTALL_ONEFLOW_ETC_FILES=(
INSTALL_ETC_FILES=(
ETC_FILES:$ETC_LOCATION
EC2_ETC_FILES:$ETC_LOCATION
VCENTER_ETC_FILES:$ETC_LOCATION
AZ_ETC_FILES:$ETC_LOCATION
VMM_EXEC_ETC_FILES:$ETC_LOCATION/vmm_exec
HM_ETC_FILES:$ETC_LOCATION/hm
@ -643,6 +648,22 @@ MADS_LIB_FILES="src/mad/sh/madcommon.sh \
VMM_EXEC_LIB_FILES="src/vmm_mad/remotes/lib/poll_common.rb"
#-------------------------------------------------------------------------------
# VMM Lib vcenter files, used by the vCenter Driver to be installed in
# $REMOTES_LOCATION/vmm/lib/vcenter
#-------------------------------------------------------------------------------
VMM_EXEC_LIB_VCENTER_FILES="src/vmm_mad/remotes/lib/vcenter_driver/datastore.rb
src/vmm_mad/remotes/lib/vcenter_driver/vi_client.rb \
src/vmm_mad/remotes/lib/vcenter_driver/importer.rb \
src/vmm_mad/remotes/lib/vcenter_driver/file_helper.rb \
src/vmm_mad/remotes/lib/vcenter_driver/host.rb \
src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb \
src/vmm_mad/remotes/lib/vcenter_driver/vi_helper.rb \
src/vmm_mad/remotes/lib/vcenter_driver/memoize.rb \
src/vmm_mad/remotes/lib/vcenter_driver/datacenter.rb \
src/vmm_mad/remotes/lib/vcenter_driver/network.rb"
#-------------------------------------------------------------------------------
# VMM SH Driver KVM scripts, to be installed under $REMOTES_LOCATION/vmm/kvm
#-------------------------------------------------------------------------------
@ -692,8 +713,8 @@ VMM_EXEC_VCENTER_SCRIPTS="src/vmm_mad/remotes/vcenter/cancel \
src/vmm_mad/remotes/vcenter/poll \
src/vmm_mad/remotes/vcenter/shutdown \
src/vmm_mad/remotes/vcenter/reconfigure \
src/vmm_mad/remotes/vcenter/prereconfigure \
src/vmm_mad/remotes/vcenter/resize_disk"
src/vmm_mad/remotes/vcenter/preconfigure \
src/vmm_mad/remotes/vcenter/prereconfigure"
#------------------------------------------------------------------------------
# VMM Driver EC2 scripts, to be installed under $REMOTES_LOCATION/vmm/ec2
@ -763,7 +784,7 @@ IM_PROBES_KVM_PROBES_FILES="src/im_mad/remotes/kvm-probes.d/kvm.rb \
src/im_mad/remotes/common.d/version.sh \
src/im_mad/remotes/common.d/collectd-client-shepherd.sh"
IM_PROBES_VCENTER_FILES="src/im_mad/remotes/vcenter.d/vcenter.rb"
IM_PROBES_VCENTER_FILES="src/im_mad/remotes/vcenter.d/poll"
IM_PROBES_EC2_FILES="src/im_mad/remotes/ec2.d/poll"
@ -840,6 +861,10 @@ NETWORK_OVSWITCH_FILES="src/vnm_mad/remotes/ovswitch/clean \
src/vnm_mad/remotes/ovswitch/update_sg \
src/vnm_mad/remotes/ovswitch/OpenvSwitch.rb"
NETWORK_VCENTER_FILES="src/vnm_mad/remotes/vcenter/pre \
src/vnm_mad/remotes/vcenter/post \
src/vnm_mad/remotes/vcenter/clean"
#-------------------------------------------------------------------------------
# IPAM drivers to be installed under $REMOTES_LOCATION/ipam
#-------------------------------------------------------------------------------
@ -1004,9 +1029,11 @@ TM_VCENTER_FILES="src/tm_mad/vcenter/clone \
src/tm_mad/vcenter/snap_delete \
src/tm_mad/vcenter/snap_revert \
src/tm_mad/vcenter/failmigrate \
src/datastore_mad/remotes/vcenter/monitor \
src/tm_mad/vcenter/delete \
src/tm_mad/vcenter/resize"
src/tm_mad/vcenter/context \
src/tm_mad/vcenter/monitor \
src/tm_mad/vcenter/mkimage \
src/tm_mad/vcenter/mkswap \
src/tm_mad/vcenter/delete"
TM_ISCSI_FILES="src/tm_mad/iscsi_libvirt/clone \
src/tm_mad/iscsi_libvirt/ln \
@ -1198,6 +1225,8 @@ EC2_ETC_FILES="src/vmm_mad/remotes/ec2/ec2_driver.conf \
AZ_ETC_FILES="src/vmm_mad/remotes/az/az_driver.conf \
src/vmm_mad/remotes/az/az_driver.default"
VCENTER_ETC_FILES="src/vmm_mad/remotes/lib/vcenter_driver/vcenter_driver.default"
#-------------------------------------------------------------------------------
# Virtualization drivers config. files, to be installed under $ETC_LOCATION
# - ssh, $ETC_LOCATION/vmm_exec

View File

@ -538,7 +538,7 @@ TM_MAD = [
DATASTORE_MAD = [
EXECUTABLE = "one_datastore",
ARGUMENTS = "-t 15 -d dummy,fs,lvm,ceph,dev,iscsi_libvirt,vcenter -s shared,ssh,ceph,fs_lvm"
ARGUMENTS = "-t 15 -d dummy,fs,lvm,ceph,dev,iscsi_libvirt,vcenter -s shared,ssh,ceph,fs_lvm,vcenter"
]
#*******************************************************************************
@ -770,7 +770,7 @@ VM_RESTRICTED_ATTR = "MEMORY_COST"
VM_RESTRICTED_ATTR = "DISK_COST"
VM_RESTRICTED_ATTR = "PCI"
VM_RESTRICTED_ATTR = "USER_INPUTS"
VM_RESTRICTED_ATTR = "DEPLOY_FOLDER"
VM_RESTRICTED_ATTR = "VCENTER_VM_FOLDER"
#VM_RESTRICTED_ATTR = "RANK"
#VM_RESTRICTED_ATTR = "SCHED_RANK"
@ -834,15 +834,30 @@ INHERIT_DATASTORE_ATTR = "GLUSTER_HOST"
INHERIT_DATASTORE_ATTR = "GLUSTER_VOLUME"
INHERIT_DATASTORE_ATTR = "DISK_TYPE"
INHERIT_DATASTORE_ATTR = "ADAPTER_TYPE"
INHERIT_DATASTORE_ATTR = "VCENTER_ADAPTER_TYPE"
INHERIT_DATASTORE_ATTR = "VCENTER_DS_REF"
INHERIT_DATASTORE_ATTR = "VCENTER_DS_IMAGE_DIR"
INHERIT_DATASTORE_ATTR = "VCENTER_DS_VOLATILE_DIR"
INHERIT_IMAGE_ATTR = "DISK_TYPE"
INHERIT_IMAGE_ATTR = "ADAPTER_TYPE"
INHERIT_IMAGE_ATTR = "VCENTER_ADAPTER_TYPE"
INHERIT_VNET_ATTR = "VLAN_TAGGED_ID"
INHERIT_VNET_ATTR = "FILTER_IP_SPOOFING"
INHERIT_VNET_ATTR = "FILTER_MAC_SPOOFING"
INHERIT_VNET_ATTR = "MTU"
INHERIT_VNET_ATTR = "INBOUND_AVG_BW"
INHERIT_VNET_ATTR = "INBOUND_PEAK_BW"
INHERIT_VNET_ATTR = "INBOUND_PEAK_KB"
INHERIT_VNET_ATTR = "OUTBOUND_AVG_BW"
INHERIT_VNET_ATTR = "OUTBOUND_PEAK_BW"
INHERIT_VNET_ATTR = "OUTBOUND_PEAK_KB"
INHERIT_VNET_ATTR = "VCENTER_NET_REF"
INHERIT_VNET_ATTR = "VCENTER_SWITCH_NAME"
INHERIT_VNET_ATTR = "VCENTER_PORTGROUP_TYPE"
INHERIT_VNET_ATTR = "VCENTER_CCR_REF"
INHERIT_VNET_ATTR = "VCENTER_INSTANCE_ID"
#*******************************************************************************
# Transfer Manager Driver Behavior Configuration
@ -958,7 +973,9 @@ DS_MAD_CONF = [
]
DS_MAD_CONF = [
NAME = "vcenter", REQUIRED_ATTRS = "VCENTER_CLUSTER", PERSISTENT_ONLY = "YES",
NAME = "vcenter",
REQUIRED_ATTRS = "VCENTER_ONE_HOST_ID, VCENTER_INSTANCE_ID, VCENTER_DS_REF, VCENTER_CCR_REF",
PERSISTENT_ONLY = "YES",
MARKETPLACE_ACTIONS = "export"
]

View File

@ -248,10 +248,10 @@ VXLAN_IDS = [
# DEFAULT_CDROM_DEVICE_PREFIX: Same as above but for CDROM devices.
#
# DEFAULT_IMAGE_PERSISTENT: Control the default value for the PERSISTENT
# attribute on image creation (oneimage clone, onevm disk-saveas). If blank
# attribute on image creation (oneimage clone, onevm disk-saveas). If blank
# images will inherit the persistent attribute from the base image.
#
# DEFAULT_IMAGE_PERSISTENT_NEW: Control the default value for the PERSISTENT
# DEFAULT_IMAGE_PERSISTENT_NEW: Control the default value for the PERSISTENT
# attribute on image creation (oneimage create). By default images are no
# persistent if not set.
#*******************************************************************************
@ -569,7 +569,7 @@ TM_MAD = [
DATASTORE_MAD = [
EXECUTABLE = "one_datastore",
ARGUMENTS = "-t 15 -d dummy,fs,lvm,ceph,dev,iscsi_libvirt,vcenter -s shared,ssh,ceph,fs_lvm,qcow2"
ARGUMENTS = "-t 15 -d dummy,fs,lvm,ceph,dev,iscsi_libvirt,vcenter -s shared,ssh,ceph,fs_lvm,qcow2,vcenter"
]
#*******************************************************************************
@ -814,8 +814,8 @@ DEFAULT_UMASK = 177
VM_ADMIN_OPERATIONS = "migrate, delete, recover, retry, deploy, resched"
VM_MANAGE_OPERATIONS = "undeploy, hold, release, stop, suspend, resume, reboot,
poweroff, disk-attach, nic-attach, disk-snapshot, terminate, disk-resize,
VM_MANAGE_OPERATIONS = "undeploy, hold, release, stop, suspend, resume, reboot,
poweroff, disk-attach, nic-attach, disk-snapshot, terminate, disk-resize,
snapshot, updateconf, rename, resize, update, disk-saveas"
VM_USE_OPERATIONS = ""
@ -856,7 +856,7 @@ VM_RESTRICTED_ATTR = "EMULATOR"
VM_RESTRICTED_ATTR = "USER_INPUTS/CPU"
VM_RESTRICTED_ATTR = "USER_INPUTS/MEMORY"
VM_RESTRICTED_ATTR = "USER_INPUTS/VCPU"
VM_RESTRICTED_ATTR = "DEPLOY_FOLDER"
VM_RESTRICTED_ATTR = "VCENTER_VM_FOLDER"
#VM_RESTRICTED_ATTR = "RANK"
#VM_RESTRICTED_ATTR = "SCHED_RANK"
@ -920,10 +920,13 @@ INHERIT_DATASTORE_ATTR = "GLUSTER_HOST"
INHERIT_DATASTORE_ATTR = "GLUSTER_VOLUME"
INHERIT_DATASTORE_ATTR = "DISK_TYPE"
INHERIT_DATASTORE_ATTR = "ADAPTER_TYPE"
INHERIT_DATASTORE_ATTR = "VCENTER_ADAPTER_TYPE"
INHERIT_DATASTORE_ATTR = "VCENTER_DS_REF"
INHERIT_DATASTORE_ATTR = "VCENTER_DS_IMAGE_DIR"
INHERIT_DATASTORE_ATTR = "VCENTER_DS_VOLATILE_DIR"
INHERIT_IMAGE_ATTR = "DISK_TYPE"
INHERIT_IMAGE_ATTR = "ADAPTER_TYPE"
INHERIT_IMAGE_ATTR = "VCENTER_ADAPTER_TYPE"
INHERIT_VNET_ATTR = "VLAN_TAGGED_ID"
INHERIT_VNET_ATTR = "FILTER_IP_SPOOFING"
@ -936,6 +939,12 @@ INHERIT_VNET_ATTR = "OUTBOUND_AVG_BW"
INHERIT_VNET_ATTR = "OUTBOUND_PEAK_BW"
INHERIT_VNET_ATTR = "OUTBOUND_PEAK_KB"
INHERIT_VNET_ATTR = "VCENTER_NET_REF"
INHERIT_VNET_ATTR = "VCENTER_SWITCH_NAME"
INHERIT_VNET_ATTR = "VCENTER_PORTGROUP_TYPE"
INHERIT_VNET_ATTR = "VCENTER_CCR_REF"
INHERIT_VNET_ATTR = "VCENTER_INSTANCE_ID"
#*******************************************************************************
# Transfer Manager Driver Behavior Configuration
#*******************************************************************************
@ -1050,7 +1059,9 @@ DS_MAD_CONF = [
]
DS_MAD_CONF = [
NAME = "vcenter", REQUIRED_ATTRS = "VCENTER_CLUSTER", PERSISTENT_ONLY = "YES",
NAME = "vcenter",
REQUIRED_ATTRS = "VCENTER_ONE_HOST_ID, VCENTER_INSTANCE_ID, VCENTER_DS_REF, VCENTER_CCR_REF",
PERSISTENT_ONLY = "NO",
MARKETPLACE_ACTIONS = "export"
]

Binary file not shown.

View File

@ -354,11 +354,11 @@ EOT
:description => 'Sends READY=YES to OneGate, useful for OneFlow'
},
{
:name => 'deploy_folder',
:large => '--deploy_folder path',
:name => 'vcenter_vm_folder',
:large => '--vcenter_vm_folder path',
:format => String,
:description => "In a vCenter environment sets the the VMs and Template folder where the VM will be placed in." \
" The path uses slashes to separate folders. For example: --deploy_folder \"/Management/VMs\""
" The path uses slashes to separate folders. For example: --vcenter_vm_folder \"/Management/VMs\""
}
]
@ -1133,7 +1133,7 @@ EOT
template<<' ]' << "\n"
end
template<<"DEPLOY_FOLDER=#{options[:deploy_folder]}\n" if options[:deploy_folder]
template<<"VCENTER_VM_FOLDER=#{options[:vcenter_vm_folder]}\n" if options[:vcenter_vm_folder]
context=create_context(options)
template<<context if context

View File

@ -103,14 +103,22 @@ class OneImageHelper < OpenNebulaHelper::OneHelper
:name => "disk_type",
:large => "--disk_type disk_type",
:description => "Type of the image \n"<<
" "*31<<"for KVM: BLOCK, CDROM, RBD or FILE \n"<<
" "*31<<"for vCenter: THIN, TICHK, ZEOREDTHICK " <<
" " * 31 << "BLOCK, CDROM, RBD or FILE \n" <<
"(for others, check the documentation) ",
:format => String
},
{
:name => "adapter_type",
:large => "--adapter_type adapter_type",
:name => "vcenter_disk_type",
:large => "--vcenter_disk_type vcenter_disk_type",
:description => "The vCenter Disk Type of the image \n"<<
" " * 31 <<
"for vCenter: THIN, THICK, ZEROEDTHICK " <<
"(for others, check the documentation) ",
:format => String
},
{
:name => "vcenter_adapter_type",
:large => "--vcenter_adapter_type vcenter_adapter_type",
:description => "Controller that will handle this image in " <<
"vCenter (lsiLogic, ide, busLogic). For other "<<
"values check the documentation",
@ -358,16 +366,16 @@ class OneImageHelper < OpenNebulaHelper::OneHelper
end
def self.create_image_variables(options, name)
if Array===name
names=name
if Array === name
names = name
else
names=[name]
names = [name]
end
t=''
t = ''
names.each do |n|
if options[n]
t<<"#{n.to_s.upcase}=\"#{options[n]}\"\n"
t << "#{n.to_s.upcase}=\"#{options[n]}\"\n"
end
end
@ -375,16 +383,21 @@ class OneImageHelper < OpenNebulaHelper::OneHelper
end
def self.create_image_template(options)
template_options=TEMPLATE_OPTIONS.map do |o|
template_options = TEMPLATE_OPTIONS.map do |o|
o[:name].to_sym
end
template=create_image_variables(
options, template_options-[:persistent, :dry, :prefix])
template = create_image_variables(
options,
template_options - [:persistent, :dry, :prefix ]
)
if options[:persistent]
template << "PERSISTENT=YES\n"
end
template<<"PERSISTENT=YES\n" if options[:persistent]
if options[:prefix]
template<<"DEV_PREFIX=\"#{options[:prefix]}\"\n"
template << "DEV_PREFIX=\"#{options[:prefix]}\"\n"
end
[0, template]

View File

@ -1,7 +1,7 @@
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2016, OpenNebula Project, OpenNebula Systems #
# Copyright 2002-2017, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
@ -65,9 +65,9 @@ cmd=CommandParser::CmdParser.new(ARGV) do
helper.set_client(options)
end
########################################################################
############################################################################
# Global Options
########################################################################
############################################################################
cmd_options=CommandParser::OPTIONS-[CommandParser::VERBOSE]
set :option, cmd_options+OpenNebulaHelper::CLIENT_OPTIONS
@ -93,6 +93,13 @@ cmd=CommandParser::CmdParser.new(ARGV) do
:format => String
}
USE_DEFAULTS = {
:name => "defaults",
:large => "--use-defaults",
:description => "Use defaults for answers to questions",
:format => String
}
############################################################################
# Import clusters
############################################################################
@ -100,470 +107,80 @@ cmd=CommandParser::CmdParser.new(ARGV) do
Import vCenter clusters as OpenNebula hosts
EOT
command :hosts, hosts_desc, :options=>[ VCENTER, USER, PASS ] do
command :hosts, hosts_desc, :options=>[ VCENTER, USER, PASS, USE_DEFAULTS ] do
con_ops = connection_options("Hosts", options)
begin
STDOUT.print "\nConnecting to vCenter: #{options[:vcenter]}..."
vc = VCenterDriver::VIClient.new_connection(con_ops)
STDOUT.print "done!\n\n"
STDOUT.print "Exploring vCenter resources..."
rs = vc.hierarchy
STDOUT.print "done!\n\n"
rs.each {|dc, cluster|
STDOUT.print "Do you want to process datacenter #{dc} [y/n]? "
next if STDIN.gets.strip.downcase != 'y'
if cluster.empty?
STDOUT.puts " No new clusters found in #{dc}..."
next
end
cluster.each{ |c|
STDOUT.print " * Import cluster #{c} [y/n]? "
next if STDIN.gets.strip.downcase != 'y'
r, m = VCenterDriver::VCenterHost.to_one(c, vc)
if r == 0
STDOUT.puts " OpenNebula host #{c} with id #{m}"\
" successfully created."
else
STDOUT.puts " Error: #{m}"
end
STDOUT.puts
}
}
rescue Exception => e
STDOUT.puts "error: #{e.message}"
exit -1
end
VCenterDriver::Importer.import_clusters(con_ops, options)
exit 0
end
############################################################################
# Import templates
############################################################################
templates_desc = <<-EOT.unindent
Import vCenter VM Templates into OpenNebula
EOT
command :templates, templates_desc, :options=>[ VCENTER, USER, PASS ] do
command :templates, templates_desc, :options=>[ VCENTER, USER, PASS, USE_DEFAULTS ] do
con_ops = connection_options("VM Templates", options)
begin
STDOUT.print "\nConnecting to vCenter: #{options[:vcenter]}..."
vc = VCenterDriver::VIClient.new_connection(con_ops)
STDOUT.print "done!\n\n"
STDOUT.print "Looking for VM Templates..."
rs = vc.vm_templates
STDOUT.print "done!\n"
rs.each {|dc, tmps|
STDOUT.print "\nDo you want to process datacenter #{dc} [y/n]? "
next if STDIN.gets.strip.downcase != 'y'
if tmps.empty?
STDOUT.print " No new VM Templates found in #{dc}...\n\n"
next
end
tmps.each{ |t|
STDOUT.print "\n * VM Template found:\n"\
" - Name : #{t[:name]}\n"\
" - UUID : #{t[:uuid]}\n"\
" - Cluster: #{t[:host]}\n"\
" Import this VM template [y/n]? "
next if STDIN.gets.strip.downcase != 'y'
ds_input = ""
rp_input = ""
# Datastores
STDOUT.print "\n This template is currently set to be "\
"deployed in datastore #{t[:default_ds]}."\
"\n Press y to keep the default, n to select"\
" a new datastore or d to delegate the choice"\
" to the user [y/n/d]? "
answer = STDIN.gets.strip.downcase
case answer
when 'd'
ds_split = t[:ds].split("|")
list_of_ds = ds_split[-2]
default_ds = ds_split[-1]
ds_input = ds_split[0] + "|" + ds_split[1] + "|" +
ds_split[2] + "|"
# Available list of datastores
input_str = " The list of available datastores to be"\
" presented to the user are \"#{list_of_ds}\""
input_str+= "\n Press y to agree, or input a comma"\
" separated list of datastores to edit "\
"[y/comma separated list] "
STDOUT.print input_str
answer = STDIN.gets.strip
if answer.downcase == 'y'
ds_input += ds_split[3] + "|"
else
ds_input += answer + "|"
end
# Default
input_str = " The default datastore presented to "\
"the end user is set to \"#{default_ds}\"."
input_str+= "\n Press y to agree, or input a new "\
"datastore [y/datastore name] "
STDOUT.print input_str
answer = STDIN.gets.strip
if answer.downcase == 'y'
ds_input += ds_split[4]
else
ds_input += answer
end
when 'n'
ds_split = t[:ds].split("|")
list_of_ds = ds_split[-2]
input_str = " The list of available datastores is"\
" \"#{list_of_ds}\"."
input_str+= "\n Please input the new default datastore: "
STDOUT.print input_str
answer = STDIN.gets.strip
t[:one] += "VCENTER_DATASTORE=\"#{answer}\"\n"
end
# Resource Pools
rp_split = t[:rp].split("|")
if rp_split.size > 3
STDOUT.print "\n This template is currently set to "\
"launch VMs in the default resource pool."\
"\n Press y to keep this behaviour, n to select"\
" a new resource pool or d to delegate the choice"\
" to the user [y/n/d]? "
answer = STDIN.gets.strip.downcase
case answer
when 'd'
list_of_rp = rp_split[-2]
default_rp = rp_split[-1]
rp_input = rp_split[0] + "|" + rp_split[1] + "|" +
rp_split[2] + "|"
# Available list of resource pools
input_str = " The list of available resource pools "\
"to be presented to the user are "\
"\"#{list_of_rp}\""
input_str+= "\n Press y to agree, or input a comma"\
" separated list of resource pools to edit "\
"[y/comma separated list] "
STDOUT.print input_str
answer = STDIN.gets.strip
if answer.downcase == 'y'
rp_input += rp_split[3] + "|"
else
rp_input += answer + "|"
end
# Default
input_str = " The default resource pool presented "\
"to the end user is set to"\
" \"#{default_rp}\"."
input_str+= "\n Press y to agree, or input a new "\
"resource pool [y/resource pool name] "
STDOUT.print input_str
answer = STDIN.gets.strip
if answer.downcase == 'y'
rp_input += rp_split[4]
else
rp_input += answer
end
when 'n'
list_of_rp = rp_split[-2]
input_str = " The list of available resource pools is"\
" \"#{list_of_rp}\"."
input_str+= "\n Please input the new default resource pool: "
STDOUT.print input_str
answer = STDIN.gets.strip
t[:one] += "RESOURCE_POOL=\"#{answer}\"\n"
end
end
if ds_input != "" ||
rp_input != ""
t[:one] += "USER_INPUTS=["
t[:one] += "VCENTER_DATASTORE=\"#{ds_input}\"," if ds_input != ""
t[:one] += "RESOURCE_POOL=\"#{rp_input}\"," if rp_input != ""
t[:one] = t[:one][0..-2]
t[:one] += "]"
end
one_t = ::OpenNebula::Template.new(
::OpenNebula::Template.build_xml, vc.one)
rc = one_t.allocate(t[:one])
if ::OpenNebula.is_error?(rc)
STDOUT.puts " Error creating template: #{rc.message}\n"
else
STDOUT.puts " OpenNebula template #{one_t.id} created!\n"
end
}
}
rescue Exception => e
STDOUT.puts "error: #{e.message}"
exit -1
end
VCenterDriver::Importer.import_templates(con_ops, options)
exit 0
end
############################################################################
# Import vms (deprecated)
############################################################################
vms_desc = <<-EOT.unindent
Deprecated action in onevcenter, please use onehost importvm instead
EOT
command :vms, vms_desc, :options=>[ VCENTER, USER, PASS ] do
command :vms, vms_desc, :options=>[ VCENTER, USER, PASS, USE_DEFAULTS ] do
STDERR.puts "Deprecated action in onevcenter, please use onehost "\
"importvm instead"
exit -1
end
############################################################################
# Import networks
############################################################################
network_desc = <<-EOT.unindent
Import vCenter networks into OpenNebula
EOT
command :networks, network_desc, :options=>[ VCENTER, USER, PASS ] do
command :networks, network_desc, :options=>[ VCENTER, USER, PASS, USE_DEFAULTS ] do
con_ops = connection_options("Networks", options)
begin
STDOUT.print "\nConnecting to vCenter: #{options[:vcenter]}..."
vc = VCenterDriver::VIClient.new_connection(con_ops)
STDOUT.print "done!\n\n"
STDOUT.print "Looking for vCenter networks..."
rs = vc.vcenter_networks
STDOUT.print "done!\n"
rs.each {|dc, tmps|
STDOUT.print "\nDo you want to process datacenter #{dc} [y/n]? "
next if STDIN.gets.strip.downcase != 'y'
if tmps.empty?
STDOUT.print " No new Networks found in #{dc}...\n\n"
next
end
tmps.each{ |n|
print_str = "\n * Network found:\n"\
" - Name : #{n[:name]}\n"\
" - Type : #{n[:type]}\n"
print_str += " - VLAN ID : #{n[:vlan]}\n" if n[:vlan]
print_str += " - Cluster : #{n[:cluster]}\n"
print_str += " Import this Network [y/n]? "
STDOUT.print print_str
next if STDIN.gets.strip.downcase != 'y'
# Size
STDOUT.print " How many VMs are you planning"\
" to fit into this network [255]? "
size = STDIN.gets.strip
size = "255" if size.to_i.to_s != size
# Type
STDOUT.print " What type of Virtual Network"\
" do you want to create (IPv[4],IPv[6]"\
",[E]thernet) ?"
type = STDIN.gets.strip
ar_str = "\nAR=[TYPE=\""
case type.downcase
when "4"
ar_str += "IP4\""
STDOUT.print " Please input the first IP "\
"in the range: "
ip = STDIN.gets.strip
ar_str += ",IP=" + ip
STDOUT.print " Please input the first MAC "\
"in the range [Enter for default]: "
mac = STDIN.gets.strip
ar_str += ",MAC=" + mac if !mac.empty?
when "6"
ar_str += "IP6\""
STDOUT.print " Please input the first MAC "\
"in the range [Enter for default]: "
mac = STDIN.gets.strip
ar_str += ",MAC=" + mac if !mac.empty?
STDOUT.print " Please input the GLOBAL PREFIX "\
"[Enter for default]: "
gp = STDIN.gets.strip
ar_str += ",GLOBAL_PREFIX=" + gp if !gp.empty?
STDOUT.print " Please input the ULA PREFIX "\
"[Enter for default]: "
up = STDIN.gets.strip
ar_str += ",ULA_PREFIX=" + up if !up.empty?
when "e"
ar_str += "ETHER\""
STDOUT.print " Please input the first MAC "\
"in the range [Enter for default]: "
mac = STDIN.gets.strip
ar_str += ",MAC=" + mac if !mac.empty?
else
STDOUT.puts " Type [#{type}] not supported,"\
" defaulting to Ethernet."
ar_str += "ETHER\""
STDOUT.print " Please input the first MAC "\
"in the range [Enter for default]: "
mac = STDIN.gets.strip
ar_str += ",MAC=" + mac if !mac.empty?
end
ar_str += ",SIZE = \"#{size}\"]"
one_vn = ::OpenNebula::VirtualNetwork.new(
::OpenNebula::Template.build_xml, vc.one)
vnet_template = n[:one] + ar_str
rc = one_vn.allocate(vnet_template)
if ::OpenNebula.is_error?(rc)
STDOUT.puts " Error creating virtual network: " +
" #{rc.message}\n"
else
STDOUT.puts " OpenNebula virtual network " +
"#{one_vn.id} created with size #{size}!\n"
end
}
}
rescue Exception => e
STDOUT.puts "error: #{e.message}"
exit -1
end
VCenterDriver::Importer.import_networks(con_ops, options)
exit 0
end
############################################################################
# Import datastores
############################################################################
datastores_desc = <<-EOT.unindent
Import vCenter Datastores into OpenNebula
EOT
command :datastores, datastores_desc, :options=>[ VCENTER, USER, PASS ] do
command :datastores, datastores_desc, :options=>[ VCENTER, USER, PASS, USE_DEFAULTS ] do
con_ops = connection_options("Datastores", options)
begin
STDOUT.print "\nConnecting to vCenter: #{options[:vcenter]}..."
vc = VCenterDriver::VIClient.new_connection(con_ops)
STDOUT.print "done!\n\n"
STDOUT.print "Looking for Datastores..."
rs = vc.vcenter_datastores
STDOUT.print "done!\n"
rs.each {|dc, tmps|
STDOUT.print "\nDo you want to process datacenter #{dc} [y/n]? "
next if STDIN.gets.strip.downcase != 'y'
if tmps.empty?
STDOUT.print " No new Datastores found in #{dc}...\n\n"
next
end
tmps.each{ |d|
STDOUT.print "\n * Datastore found:\n"\
" - Name : #{d[:name]}\n"\
" - Total MB : #{d[:total_mb]}\n"\
" - Free MB : #{d[:free_mb]}\n"\
" - Cluster : #{d[:cluster]}\n"\
" Import this Datastore [y/n]? "
next if STDIN.gets.strip.downcase != 'y'
one_d = ::OpenNebula::Datastore.new(
::OpenNebula::Datastore.build_xml, vc.one)
rc = one_d.allocate(d[:one])
if ::OpenNebula.is_error?(rc)
STDOUT.puts " Error creating datastore: #{rc.message}\n"\
" One datastore can exist only once, and "\
"can be used in any vCenter Cluster that "\
"has access to it. Also, no spaces allowed "\
"in datastore name (rename it in vCenter "\
"and try again)"
else
STDOUT.puts " OpenNebula datastore #{one_d.id} created!\n"
end
}
}
rescue Exception => e
STDOUT.puts "error: #{e.message}"
exit -1
end
VCenterDriver::Importer.import_datastore(con_ops, options)
exit 0
end
############################################################################
# Import images
############################################################################
images_desc = <<-EOT.unindent
Import vCenter Images into OpenNebula
EOT
command :images, images_desc, :ds_name, :options=>[ VCENTER, USER, PASS ] do
command :images, images_desc, :ds_name, :options=>[ VCENTER, USER, PASS, USE_DEFAULTS ] do
ds_name = args[0]
if !ds_name
@ -573,49 +190,7 @@ cmd=CommandParser::CmdParser.new(ARGV) do
con_ops = connection_options("Images", options)
begin
STDOUT.print "\nConnecting to vCenter: #{options[:vcenter]}..."
vc = VCenterDriver::VIClient.new_connection(con_ops)
STDOUT.print "done!\n\n"
STDOUT.print "Looking for Images..."
images = vc.vcenter_images(ds_name)
STDOUT.print "done!\n"
images.each{ |i|
STDOUT.print "\n * Image found:\n"\
" - Name : #{i[:name]}\n"\
" - Path : #{i[:path]}\n"\
" - Type : #{i[:type]}\n"\
" Import this Image [y/n]? "
next if STDIN.gets.strip.downcase != 'y'
one_i = ::OpenNebula::Image.new(
::OpenNebula::Image.build_xml, vc.one)
rc = one_i.allocate(i[:one], i[:dsid].to_i)
if ::OpenNebula.is_error?(rc)
STDOUT.puts "Error creating image: #{rc.message}\n"
if rc.message == "[ImageAllocate] Not enough space "\
"in datastore"
STDOUT.puts "Please disable DATASTORE_CAPACITY_"\
"CHECK in /etc/one/oned.conf and "\
"restart OpenNebula."
end
else
STDOUT.puts " OpenNebula image #{one_i.id} created!\n"
end
}
rescue Exception => e
STDOUT.puts "error: #{e.message}"
exit -1
end
VCenterDriver::Importer.import_images(con_ops, ds_name, options)
exit 0
end

View File

@ -1,7 +1,7 @@
#!/usr/bin/env ruby
# ---------------------------------------------------------------------------- #
# Copyright 2002-2016, OpenNebula Project, OpenNebula Systems #
# Copyright 2002-2017, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
@ -16,10 +16,6 @@
# limitations under the License. #
# ---------------------------------------------------------------------------- #
###############################################################################
# This script is used retrieve the file size of a disk
###############################################################################
ONE_LOCATION=ENV["ONE_LOCATION"] if !defined?(ONE_LOCATION)
if !ONE_LOCATION
@ -32,7 +28,6 @@ $: << RUBY_LIB_LOCATION
$: << File.dirname(__FILE__)
require 'vcenter_driver'
require 'digest'
drv_action_enc = ARGV[0]
id = ARGV[1]
@ -40,26 +35,43 @@ id = ARGV[1]
drv_action = OpenNebula::XMLElement.new
drv_action.initialize_xml(Base64.decode64(drv_action_enc), 'DS_DRIVER_ACTION_DATA')
ds_name = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/NAME"]
hostname = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/VCENTER_CLUSTER"]
img_path = drv_action["/DS_DRIVER_ACTION_DATA/IMAGE/PATH"]
target_ds_ref = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/VCENTER_DS_REF"]
host_id = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/VCENTER_ONE_HOST_ID"]
ds_image_dir = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/VCENTER_DS_IMAGE_DIR"] || "one"
src_path = drv_action["/DS_DRIVER_ACTION_DATA/IMAGE/PATH"]
src_img_id = drv_action["/DS_DRIVER_ACTION_DATA/IMAGE/CLONING_ID"]
if ds_name.nil? || hostname.nil? || img_path.nil?
STDERR.puts "Not enough information to clone the image, missing datastore"\
" name or vcenter cluster name or image path."
exit -1
end
check_valid src_img_id, "cloning id"
check_valid host_id, "vcenter cluster"
check_valid src_path, "image path"
check_valid target_ds_ref, "target ds ref"
# Get source ds_ref
source_img = VCenterDriver::VIHelper.one_item(OpenNebula::Image, src_img_id)
source_ds_id = source_img['DATASTORE_ID']
source_ds = VCenterDriver::VIHelper.one_item(OpenNebula::Datastore, source_ds_id)
source_ds_ref = source_ds['TEMPLATE/VCENTER_DS_REF']
# Generate target path
str_for_target_path = Time.now.to_s + id.to_s
target_path = Digest::MD5.hexdigest(str_for_target_path) + ".vmdk"
target_path = "#{ds_image_dir}/#{id}/one-#{id}.vmdk"
begin
host_id = VCenterDriver::VIClient.translate_hostname(hostname)
vi_client = VCenterDriver::VIClient.new host_id
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
ds = VCenterDriver::Datastore.new_from_ref(source_ds_ref, vi_client)
target_ds_vc = VCenterDriver::Datastore.new_from_ref(target_ds_ref, vi_client)
target_ds_name = target_ds_vc['name']
puts ds.copy_virtual_disk(src_path, target_ds_vc, target_path)
puts vi_client.copy_virtual_disk(img_path, ds_name, target_path)
rescue Exception => e
STDERR.puts "Error cloning img #{img_path} size. Reason: #{e.message}"
message = "Error cloning img #{src_path} to #{target_ds_name}"\
" Reason: \"#{e.message}\"\n#{e.backtrace}"
STDERR.puts error_message(message)
exit -1
ensure
vi_client.close_connection if vi_client
end

View File

@ -1,7 +1,7 @@
#!/usr/bin/env ruby
# ---------------------------------------------------------------------------- #
# Copyright 2002-2016, OpenNebula Project, OpenNebula Systems #
# Copyright 2002-2017, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
@ -16,10 +16,6 @@
# limitations under the License. #
# ---------------------------------------------------------------------------- #
###############################################################################
# This script is used retrieve the file size of a disk
###############################################################################
ONE_LOCATION=ENV["ONE_LOCATION"] if !defined?(ONE_LOCATION)
if !ONE_LOCATION
@ -33,123 +29,7 @@ end
$: << RUBY_LIB_LOCATION
$: << File.dirname(__FILE__)
require 'opennebula'
require 'fileutils'
require 'tempfile'
def is_remote?(file)
file.match(%r{^https?://})
end
def is_vmdk?(file)
type = %x{file #{file}}
type.include? "VMware"
end
def get_type(file)
type = %x{file -b --mime-type #{file}}
if $?.exitstatus != 0
STDERR.puts "Can not read file #{file}"
exit(-1)
end
type.strip
end
def needs_unpack?(file)
temp = Tempfile.new('one-')
temp.close
file_path = file
if is_remote?(file)
rc = system("curl --fail -sS -k -L #{file_path} | head -c 1024 > #{temp.path}")
if !rc
STDERR.puts "Can not download file #{file_path}"
exit(-1)
end
file_path = temp.path
end
type = get_type(file_path)
type.gsub!(%r{^application/(x-)?}, '')
unpack = %w{bzip2 gzip tar}.include?(type)
temp.unlink
unpack
end
def vmdk_info(file)
file_path = file
if File.directory?(file_path)
files = Dir["#{file_path}/*.vmdk"]
found = false
count = 0
last = nil
files.each do |f|
if get_type(f).strip == "text/plain"
file_path = f
found = true
break
else
count += 1
last = f
end
end
if !found
if count == 1
file_path = last
found = true
else
STDERR.puts "Could not find vmdk"
exit(-1)
end
end
end
case get_type(file_path).strip
when "application/octet-stream"
return {
:type => :standalone,
:file => file_path,
:dir => File.dirname(file_path)
}
when "application/x-iso9660-image"
return {
:type => :standalone,
:file => file_path,
:dir => File.dirname(file_path),
:extension => '.iso'
}
when "text/plain"
info = {
:type => :flat,
:file => file_path,
:dir => File.dirname(file_path)
}
files_list = []
descriptor = File.read(file_path).split("\n")
flat_files = descriptor.select {|l| l.start_with?("RW")}
flat_files.each do |f|
files_list << info[:dir] + "/" +
f.split(" ")[3].chomp.chomp('"').reverse.chomp('"').reverse
end
info[:flat_files] = files_list
return info
else
STDERR.puts "Unrecognized file type"
exit(-1)
end
end
require 'vcenter_driver'
drv_action_enc = ARGV[0]
id = ARGV[1]
@ -157,65 +37,61 @@ id = ARGV[1]
drv_action = OpenNebula::XMLElement.new
drv_action.initialize_xml(Base64.decode64(drv_action_enc), 'DS_DRIVER_ACTION_DATA')
img_path = drv_action["/DS_DRIVER_ACTION_DATA/IMAGE/PATH"]
md5 = drv_action["/DS_DRIVER_ACTION_DATA/IMAGE/TEMPLATE/MD5"]
sha1 = drv_action["/DS_DRIVER_ACTION_DATA/IMAGE/TEMPLATE/SHA1"]
nodecomp = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/NO_DECOMPRESS"]
limit_bw = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/LIMIT_TRANSFER_BW"]
hostname = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/VCENTER_CLUSTER"]
ds_name = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/NAME"]
img_path = drv_action["/DS_DRIVER_ACTION_DATA/IMAGE/PATH"]
host_id = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/VCENTER_ONE_HOST_ID"]
ds_ref = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/VCENTER_DS_REF"]
ds_image_dir = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/VCENTER_DS_IMAGE_DIR"] || "one"
md5 = drv_action["/DS_DRIVER_ACTION_DATA/IMAGE/TEMPLATE/MD5"]
sha1 = drv_action["/DS_DRIVER_ACTION_DATA/IMAGE/TEMPLATE/SHA1"]
nodecomp = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/NO_DECOMPRESS"]
limit_bw = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/LIMIT_TRANSFER_BW"]
check_valid img_path, "img_path"
if img_path.nil?
STDERR.puts "Not enough information to register the image,"\
" missing image path."
exit(-1)
end
# if image is already in a vCenter datastore return the path
if img_path.start_with? "vcenter://"
# File already in the vCenter datastore
puts img_path.sub("vcenter://","")
exit(0)
end
downsh_args = " "
downsh_args += "--md5 #{md5} " if md5 and !md5.empty? and !md5.eql? "-"
downsh_args += "--sha1 #{sha1} " if sha1 and !sha1.empty?
downsh_args += "--nodecomp " if nodecomp and !nodecomp.empty?
downsh_args += "--limit #{limit_bw} " if limit_bw and !limit_bw.empty?
temp_file = nil
filename = File.basename(img_path)
target_path = "#{ds_image_dir}/#{id}"
downloader = "#{File.dirname(__FILE__)}/../downloader.sh #{downsh_args}"
# If image is in a remote http location it has to be downloaded
# or if is a zipped file it has to be unzipped in a temp folder
# Generate target path
str_for_target_path = Time.now.to_s + id.to_s
target_path = Digest::MD5.hexdigest(str_for_target_path)
if VCenterDriver::FileHelper.is_remote_or_needs_unpack?(img_path)
temp_folder = File.join(VAR_LOCATION, "vcenter/#{target_path}")
temp_file = File.join(temp_folder, File.basename(img_path))
files_to_upload = Array.new
# Create tmp directory
FileUtils.mkdir_p(temp_folder)
file_path = img_path
skip_download = false
delete_file = false
files_to_upload = []
# Specify downloader args
downsh_args = " "
downsh_args << "--md5 #{md5} " if md5 && !md5.empty? && !md5.eql?("-")
downsh_args << "--sha1 #{sha1} " if sha1 && !sha1.empty?
downsh_args << "--nodecomp " if nodecomp && !nodecomp.empty?
downsh_args << "--limit #{limit_bw} " if limit_bw && !limit_bw.empty?
if is_remote?(file_path) || needs_unpack?(file_path)
temp_folder = File.join(VAR_LOCATION, "vcenter")
temp_file = File.join(temp_folder, File.basename(target_path))
FileUtils.mkdir_p(temp_folder) if !File.directory?(temp_folder)
downloader = "#{File.dirname(__FILE__)}/../downloader.sh #{downsh_args}"
rc = system("#{downloader} #{img_path} #{temp_file}")
rc = system("#{downloader} #{file_path} #{temp_file}")
if !rc
STDERR.puts "Error downloading #{file_path}"
STDERR.puts "Error downloading #{img_path}"
FileUtils.rm_rf(temp_file)
exit(-1)
exit -1
end
delete_file = true
original_path = File.basename(file_path)
file_path = temp_file
img_path = temp_file
end
info = vmdk_info(file_path)
# Time to upload files to vCenter
files_to_upload = []
info = VCenterDriver::FileHelper.vcenter_file_info(img_path)
extension = info[:extension] || ''
case info[:type]
@ -227,34 +103,40 @@ when :flat
end
files_to_upload.each_with_index do |f, index|
path = "#{target_path}/#{File.basename(f)}"
path = "#{target_path}/#{File.basename(f)}"
# Change path for gzipped standalone file
if(target_path == File.basename(f))
path = "#{target_path}/#{original_path}"
path = "#{target_path}/#{filename}"
# remove gz or bz2 if part of filename
if path.end_with?("gz") and is_vmdk?(f)
path.gsub!(/gz$/,'')
if path.end_with?("gz") && VCenterDriver::FileHelper.is_vmdk?(f)
path.gsub!(/gz$/,'')
end
if path.end_with?("bz2") and is_vmdk?(f)
path.gsub!(/bz2$/,'')
end
end
if path.end_with?("bz2") && VCenterDriver::FileHelper.is_vmdk?(f)
path.gsub!(/bz2$/,'')
end
end
# Change path if vmdk is part of filename
# but it's not the extension'
if /[^.]+vmdk$/.match(path) and is_vmdk?(f)
# Change path if vmdk is part of filename but it's not the extension
if /[^.]+vmdk$/.match(path) && VCenterDriver::FileHelper.is_vmdk?(f)
path.gsub!(/vmdk$/,'')
extension = '.vmdk'
extension = '.vmdk'
end
# Add iso extension if file is an ISO file
if VCenterDriver::FileHelper.is_iso?(f)
path = "#{File.dirname(path)}/#{File.basename(path,".*")}"
extension = '.iso'
end
if index == files_to_upload.size - 1
uploader_args = hostname + " " + ds_name + " " +
uploader_args = host_id + " " + ds_ref + " " +
"#{path}#{extension}" + " " + f
else
uploader_args = hostname + " " + ds_name + " " +
uploader_args = host_id + " " + ds_ref + " " +
path + " " + f + " &> /dev/null"
end
@ -262,12 +144,11 @@ files_to_upload.each_with_index do |f, index|
rc = system(cmd)
if !rc
STDERR.puts "Can not upload file #{f}"
FileUtils.rm_rf(temp_file) if delete_file
STDERR.puts "Cannot upload file #{f}"
FileUtils.rm_rf(temp_file) if temp_file
exit(-1)
end
end
FileUtils.rm_rf(temp_file) if delete_file
FileUtils.rm_rf(temp_file) if temp_file

View File

@ -17,7 +17,7 @@
# ---------------------------------------------------------------------------- #
###############################################################################
# This script is used retrieve the file size of a disk
# This script is used retrieve the file size of a disk
###############################################################################
ONE_LOCATION=ENV["ONE_LOCATION"] if !defined?(ONE_LOCATION)
@ -39,21 +39,20 @@ id = ARGV[1]
drv_action = OpenNebula::XMLElement.new
drv_action.initialize_xml(Base64.decode64(drv_action_enc), 'DS_DRIVER_ACTION_DATA')
img_source = drv_action["/DS_DRIVER_ACTION_DATA/IMAGE/SOURCE"]
img_size = drv_action["/DS_DRIVER_ACTION_DATA/IMAGE/SIZE"]
md5 = drv_action["/DS_DRIVER_ACTION_DATA/IMAGE/TEMPLATE/MD5"]
md5 = md5.nil? ? "-" : md5
hostname = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/VCENTER_CLUSTER"]
hostname = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/VCENTER_ONE_HOST_ID"]
ds_name = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/NAME"]
if img_source.nil?
STDERR.puts "Not enough information to export the image,"\
" missing image source."
message = "Not enough information to export the image,"\
" missing image source."
STDERR.puts error_message(message)
exit -1
end

View File

@ -17,7 +17,7 @@
# ---------------------------------------------------------------------------- #
###############################################################################
# This script is used retrieve the file size of a disk
# This script is used retrieve the file size of a disk
###############################################################################
ONE_LOCATION=ENV["ONE_LOCATION"] if !defined?(ONE_LOCATION)
@ -36,37 +36,48 @@ require 'vcenter_driver'
drv_action_enc = ARGV[0]
id = ARGV[1]
drv_action =OpenNebula::XMLElement.new
drv_action = OpenNebula::XMLElement.new
drv_action.initialize_xml(Base64.decode64(drv_action_enc), 'DS_DRIVER_ACTION_DATA')
ds_name = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/NAME"]
hostname = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/VCENTER_CLUSTER"]
adapter_type = drv_action["/DS_DRIVER_ACTION_DATA/IMAGE/TEMPLATE/ADAPTER_TYPE"]
disk_type = drv_action["/DS_DRIVER_ACTION_DATA/IMAGE/TEMPLATE/DISK_TYPE"]
size = drv_action["/DS_DRIVER_ACTION_DATA/IMAGE/SIZE"]
img_name = drv_action["/DS_DRIVER_ACTION_DATA/IMAGE/NAME"]
ds_ref = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/VCENTER_DS_REF"]
host_id = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/VCENTER_ONE_HOST_ID"]
ds_image_dir = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/VCENTER_DS_IMAGE_DIR"] || "one"
img_id = drv_action["/DS_DRIVER_ACTION_DATA/IMAGE/ID"]
adapter_type = drv_action["/DS_DRIVER_ACTION_DATA/IMAGE/TEMPLATE/VCENTER_ADAPTER_TYPE"] ||
VCenterDriver::VIHelper.get_default("IMAGE/TEMPLATE/VCENTER_ADAPTER_TYPE")
disk_type = drv_action["/DS_DRIVER_ACTION_DATA/IMAGE/TEMPLATE/VCENTER_DISK_TYPE"] ||
VCenterDriver::VIHelper.get_default("IMAGE/TEMPLATE/VCENTER_DISK_TYPE")
size = drv_action["/DS_DRIVER_ACTION_DATA/IMAGE/SIZE"]
fs_type = drv_action["/DS_DRIVER_ACTION_DATA/IMAGE/FSTYPE"]
if ds_name.nil? ||
hostname.nil? ||
adapter_type.nil? ||
disk_type.nil? ||
size.nil? ||
img_name.nil?
STDERR.puts "Not enough information to create the image."
exit -1
check_valid img_id, "img_id"
check_valid host_id, "vcenter_cluster"
check_valid adapter_type, "adapter_type"
check_valid disk_type, "disk_type"
check_valid size, "size"
check_valid ds_ref, "ds_ref"
# TODO path in vCenter? choose a specific directory
img_name = "#{ds_image_dir}/#{img_id}/one-#{img_id}"
if fs_type == "save_as"
puts img_name + ".vmdk"
exit 0
end
begin
host_id = VCenterDriver::VIClient.translate_hostname(hostname)
vi_client = VCenterDriver::VIClient.new host_id
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
ds = VCenterDriver::Datastore.new_from_ref(ds_ref, vi_client)
puts ds.create_virtual_disk(img_name, size, adapter_type, disk_type)
puts vi_client.create_virtual_disk(img_name,
ds_name,
size,
adapter_type,
disk_type)
rescue Exception => e
STDERR.puts "Error creating virtual disk in #{ds_name}."\
" Reason: #{e.message}"
message = "Error creating virtual disk #{img_name}."\
" Reason: \"#{e.message}\"\n#{e.backtrace}"
STDERR.puts error_message(message)
exit -1
ensure
vi_client.close_connection if vi_client
end

View File

@ -39,21 +39,26 @@ id = ARGV[1]
drv_action = OpenNebula::XMLElement.new
drv_action.initialize_xml(Base64.decode64(drv_action_enc), 'DS_DRIVER_ACTION_DATA')
ds_name = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/NAME"]
hostname = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/VCENTER_CLUSTER"]
host_id = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/VCENTER_ONE_HOST_ID"]
ds_ref = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/VCENTER_DS_REF"]
check_valid ds_ref, "ds_ref"
if ds_name.nil? ||
hostname.nil?
if host_id.nil? || ds_ref.nil?
STDERR.puts "Not enough information to monitor the image."
exit -1
end
begin
host_id = VCenterDriver::VIClient.translate_hostname(hostname)
vi_client = VCenterDriver::VIClient.new host_id
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
storage = VCenterDriver::Storage.new_from_ref(ds_ref, vi_client)
puts storage.monitor
puts vi_client.monitor_ds(ds_name)
rescue Exception => e
STDERR.puts "Error monitoring #{ds_name}. Reason: #{e.message}"
message = "Error monitoring host #{id}."\
" Reason: \"#{e.message}\"\n#{e.backtrace}"
STDERR.puts error_message(message)
exit -1
ensure
vi_client.close_connection if vi_client
end

View File

@ -17,7 +17,7 @@
# ---------------------------------------------------------------------------- #
###############################################################################
# This script is used retrieve the file size of a disk
# This script is used retrieve the file size of a disk
###############################################################################
ONE_LOCATION=ENV["ONE_LOCATION"] if !defined?(ONE_LOCATION)
@ -39,25 +39,43 @@ id = ARGV[1]
drv_action =OpenNebula::XMLElement.new
drv_action.initialize_xml(Base64.decode64(drv_action_enc), 'DS_DRIVER_ACTION_DATA')
ds_name = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/NAME"]
hostname = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/VCENTER_CLUSTER"]
img_src = drv_action["/DS_DRIVER_ACTION_DATA/IMAGE/SOURCE"]
ds_ref = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/VCENTER_DS_REF"]
host_id = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/VCENTER_ONE_HOST_ID"]
img_src = drv_action["/DS_DRIVER_ACTION_DATA/IMAGE/SOURCE"]
imported = drv_action["/DS_DRIVER_ACTION_DATA/IMAGE/TEMPLATE/VCENTER_IMPORTED"]
if ds_name.nil? ||
hostname.nil? ||
img_src.nil?
STDERR.puts "Not enough information to delete the image."
exit -1
end
check_valid ds_ref, "ds_ref"
check_valid host_id, "vcenter_cluster"
check_valid img_src, "img_src"
CDROM = "1"
begin
host_id = VCenterDriver::VIClient.translate_hostname(hostname)
vi_client = VCenterDriver::VIClient.new host_id
#TODO should imported disks be deleted?
if imported.nil? || imported.empty?
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
vi_client.delete_virtual_disk(img_src,
ds_name)
ds = VCenterDriver::Datastore.new_from_ref(ds_ref, vi_client)
img_dir = img_src.split('/')[0..-2].join('/')
img_type = drv_action["/DS_DRIVER_ACTION_DATA/IMAGE/TYPE"]
if img_type != CDROM
# delete the disk
ds.delete_virtual_disk(img_src)
else
# delete the CDROM iso
ds.delete_file(img_src)
end
ds.rm_directory(img_dir) if ds.dir_empty?(img_dir)
end
rescue Exception => e
STDERR.puts "Error delete virtual disk #{img_src} in #{ds_name}."\
" Reason: #{e.message}"
message = "Error deleting virtual disk #{img_src}."\
" Reason: \"#{e.message}\"\n#{e.backtrace}"
STDERR.puts error_message(message)
exit -1
ensure
vi_client.close_connection if vi_client
end

View File

@ -1,7 +1,7 @@
#!/usr/bin/env ruby
# ---------------------------------------------------------------------------- #
# Copyright 2002-2016, OpenNebula Project, OpenNebula Systems #
# Copyright 2002-2017, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
@ -17,7 +17,7 @@
# ---------------------------------------------------------------------------- #
###############################################################################
# This script is used retrieve the file size of a disk
# This script is used retrieve the file size of a disk
###############################################################################
ONE_LOCATION=ENV["ONE_LOCATION"] if !defined?(ONE_LOCATION)
@ -39,32 +39,31 @@ id = ARGV[1]
drv_action =OpenNebula::XMLElement.new
drv_action.initialize_xml(Base64.decode64(drv_action_enc), 'DS_DRIVER_ACTION_DATA')
ds_name = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/NAME"]
hostname = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/VCENTER_CLUSTER"]
ds_ref = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/VCENTER_DS_REF"]
host_id = drv_action["/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/VCENTER_ONE_HOST_ID"]
img_path = drv_action["/DS_DRIVER_ACTION_DATA/IMAGE/PATH"]
if ds_name.nil? ||
hostname.nil? ||
img_path.nil?
STDERR.puts "Not enough information to stat the image."
exit -1
end
check_valid ds_ref, "ds_ref"
check_valid host_id, "vcenter_cluster"
check_valid img_path, "img_path"
if img_path.start_with? "vcenter://"
begin
img_path = img_path.sub("vcenter://","")
host_id = VCenterDriver::VIClient.translate_hostname(hostname)
vi_client = VCenterDriver::VIClient.new host_id
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
ds = VCenterDriver::Datastore.new_from_ref(ds_ref, vi_client)
puts ds.stat(img_path.sub("vcenter://",""))
puts vi_client.stat(ds_name, img_path)
rescue Exception => e
STDERR.puts "Error calculating image #{img_path} size."\
" Reason: #{e.message}"
message = "Error calculating image #{img_path} size."\
" Reason: \"#{e.message}\"\n#{e.backtrace}"
STDERR.puts error_message(message)
exit -1
ensure
vi_client.close_connection if vi_client
end
else
cmd = "#{File.dirname(__FILE__)}/../fs/stat #{drv_action_enc}"
system(cmd)
end

View File

@ -1,7 +1,7 @@
#!/usr/bin/env ruby
# ---------------------------------------------------------------------------- #
# Copyright 2002-2016, OpenNebula Project, OpenNebula Systems #
# Copyright 2002-2017, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
@ -34,28 +34,24 @@ require 'uri'
require 'cgi'
require 'fileutils'
vcenter_url = ARGV[0]
vcenter_url = ARGV[0]
u = URI.parse(vcenter_url)
params = CGI.parse(u.query)
u = URI.parse(vcenter_url)
params = CGI.parse(u.query)
hostname = params["param_host"][0]
ds_name = params["param_dsname"][0]
img_src = u.host + u.path
vc_cluster_name = params["param_host"][0]
ds_name = params["param_dsname"][0]
img_src = u.host + u.path
begin
host_id = VCenterDriver::VIClient.translate_hostname(hostname)
vi_client = VCenterDriver::VIClient.new host_id
ds = vi_client.get_datastore(ds_name)
host = VCenterDriver::VIHelper.find_by_name(OpenNebula::HostPool, vc_cluster_name)
host_id = host['ID']
if ds.is_a? RbVmomi::VIM::StoragePod
STDERR.puts "Cannot download images from StoragePod #{ds_name} on #{hostname}."\
"Reason: Not supported"
exit(-1)
end
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
ds = VCenterDriver::Datastore.new_from_ref(source_ds_ref, vi_client)
if ds.is_descriptor? img_src
if ds.is_descriptor?(img_src)
descriptor_name = File.basename u.path
temp_folder = VAR_LOCATION + "/vcenter/" + descriptor_name + "/"
FileUtils.mkdir_p(temp_folder) if !File.directory?(temp_folder)
@ -73,7 +69,7 @@ begin
VCenterDriver::VIClient.in_silence do
files_to_download.each{|file|
ds.download(url_prefix + file, temp_folder + file)
ds.download_file(url_prefix + file, temp_folder + file)
}
end
@ -91,11 +87,14 @@ begin
else
# Setting "." as the source will read from the stdin
VCenterDriver::VIClient.in_stderr_silence do
ds.download_to_stdout img_src
ds.download_to_stdout(img_src)
end
end
rescue Exception => e
STDERR.puts "Cannot download image #{u.path} from datastore #{ds_name} "\
"on #{hostname}. Reason: #{e.message}"
"on #{hostname}. Reason: \"#{e.message}\"\n#{e.backtrace}"
exit -1
ensure
vi_client.close_connection if vi_client
end

View File

@ -1,7 +1,7 @@
#!/usr/bin/env ruby
# ---------------------------------------------------------------------------- #
# Copyright 2002-2016, OpenNebula Project, OpenNebula Systems #
# Copyright 2002-2017, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
@ -29,62 +29,31 @@ $: << File.dirname(__FILE__)
require 'vcenter_driver'
hostname = ARGV[0]
ds_name = ARGV[1]
target_path = ARGV[2]
source_path = ARGV[3]
host_id = ARGV[0]
target_ds_ref = ARGV[1]
target_path = ARGV[2]
source_path = ARGV[3]
begin
host_id = VCenterDriver::VIClient.translate_hostname(hostname)
vi_client = VCenterDriver::VIClient.new host_id
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
ds = VCenterDriver::Datastore.new_from_ref(target_ds_ref, vi_client)
ds = vi_client.get_datastore(ds_name)
# Setting "." as the source will read from the stdin
source_path = "." if source_path.nil?
if ds.is_a? RbVmomi::VIM::StoragePod
STDERR.puts "Cannot upload image to StoragePod #{ds_name} on #{hostname}."\
"Reason: Not supported"
exit(-1)
end
# Monkey path datastore objects. Can not be done patching the class
# as the library redefines it when a new object is created. Using both
# the database vmodl.db and the Datastore.rb
#
# This patch fixes a bug in rbvmomi. It does not specify the path of the
# datacenter. If it is inside a folder it could not be found.
class <<ds
def get_ds_path
p = datacenter.parent
path = [datacenter.name]
while p.class == RbVmomi::VIM::Folder
path.unshift(p.name)
p = p.parent
end
path.delete_at(0) # The first folder is the root "Datacenters"
path.join('/')
end
def mkuripath path
"/folder/#{URI.escape path}?dcPath=#{URI.escape get_ds_path}&dsName=#{URI.escape name}"
end
end
directory = File.dirname(target_path)
vi_client.create_directory(directory, ds_name)
ds.create_directory(File.dirname(target_path))
VCenterDriver::VIClient.in_silence do
if source_path
ds.upload(target_path, source_path)
else
# Setting "." as the source will read from the stdin
ds.upload(target_path, ".")
end
ds.upload_file(source_path, target_path)
end
puts target_path
rescue Exception => e
STDERR.puts "Cannot upload image to datastore #{ds_name} on #{hostname}."\
"Reason: #{e.message}"
STDERR.puts "Cannot upload image to datastore #{ds_name} "\
"Reason: \"#{e.message}\"\n#{e.backtrace}"
exit -1
ensure
vi_client.close_connection if vi_client
end

View File

@ -1,7 +1,7 @@
#!/usr/bin/env ruby
# ---------------------------------------------------------------------------- #
# Copyright 2002-2016, OpenNebula Project, OpenNebula Systems #
# Copyright 2002-2017, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
@ -29,24 +29,46 @@ $: << RUBY_LIB_LOCATION
require 'vcenter_driver'
host_id = ARGV[4]
check_valid host_id, "host_id"
if !host_id
exit -1
begin
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
# Get CCR reference
client = OpenNebula::Client.new
host = OpenNebula::Host.new_with_id(host_id, client)
rc = host.info
if OpenNebula::is_error? rc
STDERR.puts rc.message
exit 1
end
ccr_ref = host["TEMPLATE/VCENTER_CCR_REF"]
# Get vCenter Cluster
cluster = VCenterDriver::ClusterComputeResource.new_from_ref(ccr_ref, vi_client)
# Print monitoring info
puts cluster.monitor
puts cluster.monitor_host_systems
vm_monitor_info = cluster.monitor_vms
if !vm_monitor_info.empty?
puts "VM_POLL=YES"
puts vm_monitor_info
end
puts cluster.monitor_customizations
dc = cluster.get_dc
ds_folder = dc.datastore_folder
ds_folder.fetch!
puts ds_folder.monitor
rescue Exception => e
STDERR.puts "IM poll for vcenter cluster #{host_id} failed due to "\
"\"#{e.message}\"\n#{e.backtrace}"
exit(-1)
ensure
vi_client.close_connection if vi_client
end
vi_client = VCenterDriver::VIClient.new host_id
vcenter_host = VCenterDriver::VCenterHost.new vi_client
cluster_info = vcenter_host.monitor_cluster
cluster_info << vcenter_host.monitor_host_systems
vm_monitor_info = vcenter_host.monitor_vms
cluster_info << "\nVM_POLL=YES"
cluster_info << "#{vm_monitor_info}" if !vm_monitor_info.empty?
cluster_info << "\n"
cluster_info << vcenter_host.monitor_customizations
cluster_info << vcenter_host.get_available_ds
puts cluster_info

View File

@ -156,6 +156,9 @@ void OpenNebulaTemplate::set_multiple_conf_default()
set_conf_ds("shared", "", "NO");
set_conf_ds("ssh", "", "NO");
set_conf_ds("vmfs", "BRIDGE_LIST", "NO");
set_conf_ds("vcenter",
"VCENTER_ONE_HOST_ID, VCENTER_INSTANCE_ID, VCENTER_DS_REF, VCENTER_CCR_REF",
"NO");
set_conf_ds("ceph",
"DISK_TYPE,BRIDGE_LIST,CEPH_HOST,CEPH_USER,CEPH_SECRET",
"NO");

View File

@ -371,7 +371,7 @@ IM_MAD = [
#
# type : driver type, supported drivers: xen, kvm, xml
#
# keep_snapshots: do not remove snapshots on power on/off cycles and live
# keep_snapshots: do not remove snapshots on power on/off cycles and live
# migrations if the hypervisor supports that.
#
# imported_vms_actions : comma-separated list of actions supported
@ -833,10 +833,10 @@ INHERIT_DATASTORE_ATTR = "GLUSTER_HOST"
INHERIT_DATASTORE_ATTR = "GLUSTER_VOLUME"
INHERIT_DATASTORE_ATTR = "DISK_TYPE"
INHERIT_DATASTORE_ATTR = "ADAPTER_TYPE"
INHERIT_DATASTORE_ATTR = "VCENTER_ADAPTER_TYPE"
INHERIT_IMAGE_ATTR = "DISK_TYPE"
INHERIT_IMAGE_ATTR = "ADAPTER_TYPE"
INHERIT_IMAGE_ATTR = "VCENTER_ADAPTER_TYPE"
INHERIT_VNET_ATTR = "VLAN_TAGGED_ID"
INHERIT_VNET_ATTR = "FILTER_IP_SPOOFING"
@ -957,7 +957,9 @@ DS_MAD_CONF = [
]
DS_MAD_CONF = [
NAME = "vcenter", REQUIRED_ATTRS = "VCENTER_CLUSTER", PERSISTENT_ONLY = "YES",
NAME = "vcenter",
REQUIRED_ATTRS = "VCENTER_ONE_HOST_ID, VCENTER_INSTANCE_ID, VCENTER_DS_REF, VCENTER_CCR_REF",
PERSISTENT_ONLY = "YES",
MARKETPLACE_ACTIONS = "export"
]

View File

@ -223,12 +223,23 @@ module OpenNebula
xml = OpenNebula::VirtualMachine.build_xml
vm = OpenNebula::VirtualMachine.new(xml, @client)
rc = vm.allocate(template)
# vCenter wild VMs has a different process
# image and vnets objects representing existing nics and disks
# must be created and referenced
vcenter_wild_vm = wild.key? "VCENTER_TEMPLATE"
if vcenter_wild_vm
require 'vcenter_driver'
host_id = self["ID"]
vm_ref = wild["DEPLOY_ID"]
return VCenterDriver::Importer.import_wild(host_id, vm_ref, vm, template)
else
rc = vm.allocate(template)
return rc if OpenNebula.is_error?(rc)
return rc if OpenNebula.is_error?(rc)
vm.deploy(id, false)
return vm.id
vm.deploy(id, false)
return vm.id
end
end
#######################################################################

View File

@ -349,7 +349,7 @@ module OpenNebula
ds = OpenNebula::Datastore.new_with_id(ds_id, @client)
rc = ds.info
return rc if OpenNebula.is_error?(rc)
self.update("VCENTER_DATASTORE=#{ds['/DATASTORE/NAME']}", true)
self.update("VCENTER_DS_REF=#{ds['/DATASTORE/VCENTER_DS_REF']}", true)
end
return call(VM_METHODS[:deploy],
@ -744,13 +744,6 @@ module OpenNebula
self['DEPLOY_ID']
end
# Returns the deploy_id of the VirtualMachine (numeric value)
def keep_disks?
!self['USER_TEMPLATE/KEEP_DISKS_ON_DONE'].nil? &&
self['USER_TEMPLATE/KEEP_DISKS_ON_DONE'].downcase=="yes"
end
# Clones the VM's source Template, replacing the disks with live snapshots
# of the current disks. The VM capacity and NICs are also preserved
#

View File

@ -45,6 +45,7 @@ module OpenNebula
end
end
end
@xml
end
# Builds a XML document
@ -167,6 +168,23 @@ module OpenNebula
end
# Iterates over every Element in the XPath and returns an array
# with XMLElements
# @return [XMLElement]
def retrieve_xmlelements(xpath_str)
collection = []
if NOKOGIRI
@xml.xpath(xpath_str).each { |pelem|
collection << XMLElement.new(pelem)
}
else
@xml.elements.each(xpath_str) { |pelem|
collection << XMLElement.new(pelem)
}
end
collection
end
# Gets an attribute from an element
# key:: _String_ xpath for the element
# name:: _String_ name of the attribute

View File

@ -211,8 +211,8 @@ class OpenNebulaVNC
vnc_port = vm_resource['TEMPLATE/GRAPHICS/PORT']
vnc_pw = vm_resource['TEMPLATE/GRAPHICS/PASSWD']
if vm_resource['MONITORING/ESX_HOST'] # It is behind a vCenter
host = vm_resource['MONITORING/ESX_HOST']
if vm_resource['MONITORING/VCENTER_ESX_HOST'] # It is behind a vCenter
host = vm_resource['MONITORING/VCENTER_ESX_HOST']
end
# Generate token random_str: host:port

View File

@ -52,7 +52,7 @@ features:
# True to show an input to specify the the VMs and Template path/folder where a vCenter VM will
# deployed to
vcenter_deploy_folder: false
vcenter_vm_folder: false
tabs:
dashboard-tab:
# The following widgets can be used inside any of the '_per_row' settings

View File

@ -17,8 +17,8 @@ enabled_tabs:
- datastores-tab
- images-tab
#- files-tab
#- marketplaces-tab
#- marketplaceapps-tab
- marketplaces-tab
- marketplaceapps-tab
- network-top-tab
- vnets-tab
- vnets-topology-tab
@ -52,7 +52,7 @@ features:
# True to show an input to specify the the VMs and Template path/folder where a vCenter VM will
# deployed to
vcenter_deploy_folder: true
vcenter_vm_folder: true
tabs:
dashboard-tab:
# The following widgets can be used inside any of the '_per_row' settings
@ -345,7 +345,7 @@ tabs:
VM.resize: true
VM.attachdisk: true
VM.detachdisk: true
VM.disk_saveas: false
VM.disk_saveas: true
VM.attachnic: true
VM.detachnic: true
VM.snapshot_create: true
@ -544,7 +544,7 @@ tabs:
#- 13 # Search data
actions:
Datastore.refresh: true
Datastore.create_dialog: true
Datastore.create_dialog: false
Datastore.import_dialog: true
Datastore.addtocluster: true
Datastore.rename: true

View File

@ -52,7 +52,7 @@ features:
# True to show an input to specify the the VMs and Template path/folder where a vCenter VM will
# deployed to
vcenter_deploy_folder: false
vcenter_vm_folder: false
tabs:
dashboard-tab:
# The following widgets can be used inside any of the '_per_row' settings

View File

@ -52,7 +52,7 @@ features:
# True to show an input to specify the the VMs and Template path/folder where a vCenter VM will
# deployed to
vcenter_deploy_folder: false
vcenter_vm_folder: false
tabs:
dashboard-tab:
# The following widgets can be used inside any of the '_per_row' settings

View File

@ -52,7 +52,7 @@ features:
# True to show an input to specify the the VMs and Template path/folder where a vCenter VM will
# deployed to
vcenter_deploy_folder: false
vcenter_vm_folder: false
tabs:
dashboard-tab:
# The following widgets can be used inside any of the '_per_row' settings

View File

@ -183,7 +183,6 @@ define(function(require) {
break;
case "vcenter":
$('input#system_ds_type', dialog).attr('disabled', 'disabled');
$('input#file_ds_type', dialog).attr('disabled', 'disabled');
_selectvCenter(dialog);
break;

View File

@ -67,9 +67,18 @@ define(function(require) {
function _html() {
var renameTrHTML = RenameTr.html(TAB_ID, RESOURCE, this.element.NAME);
var templateTableHTML = TemplateTable.html(
this.element.TEMPLATE, RESOURCE,
Locale.tr("Attributes"));
var strippedTemplate = {};
var strippedTemplateVcenter = {};
$.each(this.element.TEMPLATE, function(key, value) {
if (key.match(/^VCENTER_*/)){
strippedTemplateVcenter[key] = value;
}
else {
strippedTemplate[key] = value;
}
});
var templateTableHTML = TemplateTable.html(strippedTemplate, RESOURCE, Locale.tr("Attributes"), true);
var templateTableVcenterHTML = TemplateTable.html(strippedTemplateVcenter, RESOURCE, Locale.tr("Vcenter information"), false);
var permissionsTableHTML = PermissionsTable.html(TAB_ID, RESOURCE, this.element);
var capacityBar = DatastoreCapacityBar.html(this.element);
var stateStr = OpenNebulaDatastore.stateStr(this.element.STATE);
@ -84,6 +93,7 @@ define(function(require) {
'element': this.element,
'renameTrHTML': renameTrHTML,
'templateTableHTML': templateTableHTML,
'templateTableVcenterHTML': templateTableVcenterHTML,
'permissionsTableHTML': permissionsTableHTML,
'capacityBar': capacityBar,
'stateStr': stateStr,
@ -94,7 +104,20 @@ define(function(require) {
function _setup(context) {
RenameTr.setup(TAB_ID, RESOURCE, this.element.ID, context);
TemplateTable.setup(this.element.TEMPLATE, RESOURCE, this.element.ID, context);
var strippedTemplate = {};
var strippedTemplateVcenter = {};
$.each(this.element.TEMPLATE, function(key, value) {
if (key.match(/^VCENTER_*/)){
strippedTemplateVcenter[key] = value;
}
else {
strippedTemplate[key] = value;
}
});
if($.isEmptyObject(strippedTemplateVcenter)){
$('.vcenter', context).hide();
}
TemplateTable.setup(strippedTemplate, RESOURCE, this.element.ID, context);
PermissionsTable.setup(TAB_ID, RESOURCE, this.element, context);
return false;
}

View File

@ -58,6 +58,9 @@
{{{permissionsTableHTML}}}
</div>
</div>
<div class="row vcenter">
<div class="large-9 columns">{{{templateTableVcenterHTML}}}</div>
</div>
<div class="row">
<div class="large-9 columns">{{{templateTableHTML}}}</div>
</div>

View File

@ -65,12 +65,17 @@ define(function(require) {
// in the template table. Unshow values are stored in the unshownTemplate
// object to be used when the host info is updated.
that.unshownTemplate = {};
that.strippedTemplateVcenter = {};
that.strippedTemplate = {};
var unshownKeys = ['HOST', 'VM', 'WILDS', 'ZOMBIES', 'RESERVED_CPU', 'RESERVED_MEM'];
$.each(that.element.TEMPLATE, function(key, value) {
if ($.inArray(key, unshownKeys) > -1) {
that.unshownTemplate[key] = value;
} else {
that.unshownTemplate[key] = value;
}
else if (key.match(/^VCENTER_*/)){
that.strippedTemplateVcenter[key] = value;
}
else {
that.strippedTemplate[key] = value;
}
});
@ -92,6 +97,10 @@ define(function(require) {
this.strippedTemplate,
RESOURCE,
Locale.tr("Attributes"));
var templateTableVcenterHTML = TemplateTable.html(
this.strippedTemplateVcenter,
RESOURCE,
Locale.tr("Vcenter information"),false);
var renameTrHTML = RenameTr.html(TAB_ID, RESOURCE, this.element.NAME);
var clusterTrHTML = ClusterTr.html(this.element.CLUSTER);
var permissionsTableHTML = PermissionsTable.html(TAB_ID, RESOURCE, this.element);
@ -106,6 +115,7 @@ define(function(require) {
'renameTrHTML': renameTrHTML,
'clusterTrHTML': clusterTrHTML,
'templateTableHTML': templateTableHTML,
'templateTableVcenterHTML': templateTableVcenterHTML,
'permissionsTableHTML': permissionsTableHTML,
'cpuBars': cpuBars,
'memoryBars': memoryBars,
@ -137,6 +147,10 @@ define(function(require) {
TemplateTable.setup(this.strippedTemplate, RESOURCE, this.element.ID, context, this.unshownTemplate);
PermissionsTable.setup(TAB_ID, RESOURCE, this.element, context);
if($.isEmptyObject(this.strippedTemplateVcenter)){
$('.vcenter', context).hide();
}
//.off and .on prevent multiple clicks events
$(document).off('click', '.update_reserved_hosts').on("click", '.update_reserved', function(){
var reservedCPU = parseInt(document.getElementById('change_bar_cpu_hosts').value);

View File

@ -107,6 +107,9 @@
</table>
</div>
</div>
<div class="row vcenter">
<div class="large-9 columns">{{{templateTableVcenterHTML}}}</div>
</div>
<div class="row">
<div class="large-9 columns">{{{templateTableHTML}}}</div>
</div>

View File

@ -205,8 +205,8 @@ define(function(require) {
});
// Custom Adapter Type
var custom_attrs = ["adapter_type",
"disk_type",
var custom_attrs = ["vcenter_adapter_type",
"vcenter_disk_type",
"img_dev_prefix",
"img_driver"];
@ -361,10 +361,10 @@ define(function(require) {
}
document.getElementById( 'percent_progress' ).textContent = 'Completed: ' + (this.progress().toFixed(3)*100).toFixed(1) +'%';
$('div.progressbar', $('div[id="' + fileName + 'progressBar"]')).html(
ProgressBar.html(this.progress(), 1, fileName) );
ProgressBar.html(this.progress(), 1, fileName) );
});
}
return false;
}
@ -414,12 +414,12 @@ define(function(require) {
if (target)
img_json["TARGET"] = target;
var adapter_type = WizardFields.retrieveInput($('#adapter_type', context));
if (adapter_type) {
if (adapter_type == "custom") {
adapter_type = WizardFields.retrieveInput($('#custom_adapter_type', context));
var vcenter_adapter_type = WizardFields.retrieveInput($('#vcenter_adapter_type', context));
if (vcenter_adapter_type) {
if (vcenter_adapter_type == "custom") {
vcenter_adapter_type = WizardFields.retrieveInput($('#custom_vcenter_adapter_type', context));
}
img_json["ADAPTER_TYPE"] = adapter_type;
img_json["VCENTER_ADAPTER_TYPE"] = vcenter_adapter_type;
}
switch ($('#src_path_select input:checked', context).val()){
@ -431,12 +431,12 @@ define(function(require) {
size = WizardFields.retrieveInput($('#img_size', context));
if (size) img_json["SIZE"] = size;
var disk_type = WizardFields.retrieveInput($('#disk_type', context));
if (disk_type) {
if (disk_type == "custom"){
disk_type = WizardFields.retrieveInput($('#custom_disk_type', context));
var vcenter_disk_type = WizardFields.retrieveInput($('#vcenter_disk_type', context));
if (vcenter_disk_type) {
if (vcenter_disk_type == "custom"){
vcenter_disk_type = WizardFields.retrieveInput($('#custom_disk_type', context));
}
img_json["DISK_TYPE"] = disk_type;
img_json["VCENTER_DISK_TYPE"] = vcenter_disk_type;
}
break;

View File

@ -104,10 +104,10 @@
<input type="text" name="img_size" id="img_size" />
</div>
<div class="medium-6 columns only_vcenter">
<label for="disk_type">
<label for="vcenter_disk_type">
{{tr "Disk provisioning type"}}
</label>
<select name="disk_type" id="disk_type">
<select name="vcenter_disk_type" id="vcenter_disk_type">
<option value="" selected="selected"></option>
<option value="thin">Thin</option>
<option value="thick">Thick</option>
@ -115,7 +115,7 @@
<option value="custom">custom</option>
</select>
<div>
<input type="text" id="custom_disk_type" name="custom_disk_type" />
<input type="text" id="custom_vcenter_disk_type" name="custom_vcenter_disk_type" />
</div>
</div>
</div>
@ -126,10 +126,10 @@
<div class="medium-6 columns">
<div class="row only_vcenter">
<div class="large-12 columns">
<label for="adapter_type">
<label for="vcenter_adapter_type">
{{tr "Bus adapter controller"}}
</label>
<select name="adapter_type" id="adapter_type">
<select name="vcenter_adapter_type" id="vcenter_adapter_type">
<option value="" selected="selected"></option>
<option value="lsiLogic">lsiLogic</option>
<option value="ide">ide</option>
@ -137,7 +137,7 @@
<option value="custom">custom</option>
</select>
<div>
<input type="text" id="custom_adapter_type" name="custom_adapter_type" />
<input type="text" id="custom_vcenter_adapter_type" name="custom_vcenter_adapter_type" />
</div>
</div>
</div>

View File

@ -31,19 +31,21 @@
<label for="vcenter_password">{{tr "Password"}}</label>
<input type="password" name="vcenter_password" id="vcenter_password" required />
</div>
<div class="large-2 medium-6 columns">
<div class="large-5 medium-6 columns">
<label>{{tr "Datastore"}}</label>
<div id="vcenter_datastore_wrapper"/>
</div>
<div class="large-3 medium-6 columns">
<label>&nbsp;</label>
<button type="submit" class="button radius">
{{tr "Get Images"}}
</button>
</div>
<div class="row">
<div style="float:right">
<label>&nbsp;</label>
<button type="submit" class="button radius">
{{tr "Get Images"}}
</button>
</div>
</div>
</form>
<div class="row collapse">
{{{vCenterImagesHTML}}}
</div>
</div>
</div>

View File

@ -165,14 +165,8 @@ define(function(require) {
// part of an array, and it is filled in different tabs, the $.extend deep
// merge can't work. We define an auxiliary attribute for it.
if (templateJSON["VCENTER_PUBLIC_CLOUD"]) {
if (templateJSON['PUBLIC_CLOUD'] == undefined) {
templateJSON['PUBLIC_CLOUD'] = [];
}
templateJSON['PUBLIC_CLOUD'].push(templateJSON["VCENTER_PUBLIC_CLOUD"]);
delete templateJSON["VCENTER_PUBLIC_CLOUD"];
if (templateJSON['PUBLIC_CLOUD'] == undefined) {
templateJSON['PUBLIC_CLOUD'] = [];
}
// PCI with TYPE=NIC is not defined in the 'other' tab. Because it is

View File

@ -191,8 +191,8 @@ define(function(require) {
var customization = WizardFields.retrieveInput($('input.vcenter_customizations_value', context));
if (customization) {
templateJSON["VCENTER_PUBLIC_CLOUD"] = {
CUSTOMIZATION_SPEC : customization
templateJSON["USER_TEMPLATE"] = {
VCENTER_CUSTOMIZATION_SPEC : customization
};
}
} else {
@ -259,8 +259,8 @@ define(function(require) {
if(this["TYPE"] == "vcenter"){
$("input#context_type_vcenter", context).click();
if(this["CUSTOMIZATION_SPEC"]){
WizardFields.fillInput($('input.vcenter_customizations_value', context), this["CUSTOMIZATION_SPEC"]);
if(this["VCENTER_CUSTOMIZATION_SPEC"]){
WizardFields.fillInput($('input.vcenter_customizations_value', context), this["VCENTER_CUSTOMIZATION_SPEC"]);
} else if(userInputsJSON || contextJSON) {
$("input#context_type_opennebula", context).click();
}
@ -286,6 +286,7 @@ define(function(require) {
}
if (contextJSON) {
$("input#context_type_opennebula", context).click();
var file_ds_regexp = /FILE\[IMAGE=(\w+?)\W+IMAGE_UNAME=(\w+?)\]/g;
var net_regexp = /^NETWORK$/;
var ssh_regexp = /^SSH_PUBLIC_KEY$/;
@ -332,7 +333,7 @@ define(function(require) {
that.contextFilesTable.selectResourceTableSelect(selectedResources);
}
});
} else if ("START_SCRIPT_BASE64" == key) {
$(".ENCODE_START_SCRIPT", context).prop('checked', 'checked');
$(".START_SCRIPT", context).val(atob(value));

View File

@ -160,9 +160,13 @@ define(function(require) {
$(".only_" + this.value).show();
if (this.value == "vcenter"){
$("#vcenter_template_uuid", context).attr("required", "");
$("#vcenter_template_ref", context).attr("required", "");
$("#vcenter_instance_id", context).attr("required", "");
$("#vcenter_ccr_ref", context).attr("required", "");
} else {
$("#vcenter_template_uuid", context).removeAttr("required");
$("#vcenter_template_ref", context).removeAttr("required");
$("#vcenter_instance_id", context).removeAttr("required");
$("#vcenter_ccr_ref", context).removeAttr("required");
}
// There is another listener in context.js setup
});
@ -187,15 +191,13 @@ define(function(require) {
if(templateJSON["MEMORY_UNIT_COST"] == "GB")
templateJSON["MEMORY_COST"] = templateJSON["MEMORY_COST"] * 1024;
if (templateJSON["HYPERVISOR"] == 'vcenter') {
templateJSON["VCENTER_PUBLIC_CLOUD"] = {
'TYPE': 'vcenter',
'VM_TEMPLATE': WizardFields.retrieveInput($("#vcenter_template_uuid", context))
};
templateJSON["VCENTER_TEMPLATE_REF"] = WizardFields.retrieveInput($("#vcenter_template_ref", context));
templateJSON["VCENTER_CCR_REF"] = WizardFields.retrieveInput($("#vcenter_ccr_ref", context));
templateJSON["VCENTER_INSTANCE_ID"] = WizardFields.retrieveInput($("#vcenter_instance_id", context));
if (Config.isFeatureEnabled("vcenter_deploy_folder")) {
templateJSON["DEPLOY_FOLDER"] = WizardFields.retrieveInput($("#vcenter_deploy_folder", context))
if (Config.isFeatureEnabled("vcenter_vm_folder")) {
templateJSON["VCENTER_VM_FOLDER"] = WizardFields.retrieveInput($("#vcenter_vm_folder", context))
}
templateJSON["KEEP_DISKS_ON_DONE"] = $("#KEEP_DISKS", context).is(':checked')?"YES":"NO"
}
var sunstone_template = {};
@ -210,27 +212,6 @@ define(function(require) {
var userInputs = {};
// Retrieve Datastore Attribute
var dsInput = $(".vcenter_datastore_input", context);
if (dsInput.length > 0) {
var dsModify = WizardFields.retrieveInput($('.modify_datastore', dsInput));
var dsInitial = WizardFields.retrieveInput($('.initial_datastore', dsInput));
var dsParams = WizardFields.retrieveInput($('.available_datastores', dsInput));
if (dsModify === 'fixed' && dsInitial !== '') {
templateJSON['VCENTER_DATASTORE'] = dsInitial;
} else if (dsModify === 'list' && dsParams !== '') {
var dsUserInputsStr = UserInputs.marshall({
type: 'list',
description: Locale.tr("Which datastore you want this VM to run on?"),
initial: dsInitial,
params: dsParams
});
userInputs['VCENTER_DATASTORE'] = dsUserInputsStr;
}
}
// Retrieve Resource Pool Attribute
var rpInput = $(".vcenter_rp_input", context);
if (rpInput.length > 0) {
@ -239,7 +220,7 @@ define(function(require) {
var rpParams = WizardFields.retrieveInput($('.available_rps', rpInput));
if (rpModify === 'fixed' && rpInitial !== '') {
templateJSON['RESOURCE_POOL'] = rpInitial;
templateJSON['VCENTER_RESOURCE_POOL'] = rpInitial;
} else if (rpModify === 'list' && rpParams !== '') {
var rpUserInputs = UserInputs.marshall({
type: 'list',
@ -248,7 +229,7 @@ define(function(require) {
params: WizardFields.retrieveInput($('.available_rps', rpInput))
});
userInputs['RESOURCE_POOL'] = rpUserInputs;
userInputs['VCENTER_RESOURCE_POOL'] = rpUserInputs;
}
}
@ -275,24 +256,17 @@ define(function(require) {
delete sunstone_template["NETWORK_SELECT"];
}
if (templateJSON["HYPERVISOR"] == 'vcenter' &&
templateJSON["KEEP_DISKS_ON_DONE"] &&
templateJSON["KEEP_DISKS_ON_DONE"].toLowerCase() == "yes" ) {
$("#KEEP_DISKS", context).attr("checked", "checked");
}
delete templateJSON["KEEP_DISKS_ON_DONE"];
if (Config.isFeatureEnabled("vcenter_deploy_folder")) {
if (Config.isFeatureEnabled("vcenter_vm_folder")) {
if (templateJSON["HYPERVISOR"] == 'vcenter' &&
templateJSON["DEPLOY_FOLDER"]) {
WizardFields.fillInput($("#vcenter_deploy_folder", context), templateJSON["DEPLOY_FOLDER"]);
templateJSON["VCENTER_VM_FOLDER"]) {
WizardFields.fillInput($("#vcenter_vm_folder", context), templateJSON["VCENTER_VM_FOLDER"]);
}
} else {
$(".vcenter_deploy_folder_input", context).remove();
$(".vcenter_vm_folder_input", context).remove();
}
delete templateJSON["DEPLOY_FOLDER"];
delete templateJSON["VCENTER_VM_FOLDER"];
if (templateJSON["HYPERVISOR"] == 'vcenter') {
var publicClouds = templateJSON["PUBLIC_CLOUD"];
@ -304,7 +278,7 @@ define(function(require) {
$.each(publicClouds, function(){
if(this["TYPE"] == "vcenter"){
WizardFields.fillInput($("#vcenter_template_uuid", context), this["VM_TEMPLATE"]);
WizardFields.fillInput($("#vcenter_template_ref", context), this["VCENTER_TEMPLATE_REF"]);
return false;
}
});
@ -317,37 +291,38 @@ define(function(require) {
}
if (templateJSON["USER_INPUTS"]) {
if (templateJSON["USER_INPUTS"]["VCENTER_DATASTORE"]) {
var ds = UserInputs.unmarshall(templateJSON["USER_INPUTS"]["VCENTER_DATASTORE"]);
$('.modify_datastore', context).val('list');
$('.initial_datastore', context).val(ds.initial);
$('.available_datastores', context).val(ds.params);
delete templateJSON["USER_INPUTS"]["VCENTER_DATASTORE"];
}
if (templateJSON["USER_INPUTS"]["RESOURCE_POOL"]) {
var rp = UserInputs.unmarshall(templateJSON["USER_INPUTS"]["RESOURCE_POOL"]);
if (templateJSON["USER_INPUTS"]["VCENTER_RESOURCE_POOL"]) {
var rp = UserInputs.unmarshall(templateJSON["USER_INPUTS"]["VCENTER_RESOURCE_POOL"]);
$('.modify_rp', context).val('list');
$('.initial_rp', context).val(rp.initial);
$('.available_rps', context).val(rp.params);
delete templateJSON["USER_INPUTS"]["RESOURCE_POOL"];
delete templateJSON["USER_INPUTS"]["VCENTER_RESOURCE_POOL"];
}
}
if (templateJSON["VCENTER_DATASTORE"]) {
$('.modify_datastore', context).val('fixed');
WizardFields.fillInput($('.initial_datastore', context), templateJSON["VCENTER_DATASTORE"]);
delete templateJSON["VCENTER_DATASTORE"];
if (templateJSON["VCENTER_RESOURCE_POOL"]) {
$('.modify_rp', context).val('fixed');
WizardFields.fillInput($('.initial_rp', context), templateJSON["VCENTER_RESOURCE_POOL"]);
delete templateJSON["VCENTER_RESOURCE_POOL"];
}
if (templateJSON["RESOURCE_POOL"]) {
$('.modify_rp', context).val('fixed');
WizardFields.fillInput($('.initial_rp', context), templateJSON["RESOURCE_POOL"]);
if(templateJSON["VCENTER_TEMPLATE_REF"]){
WizardFields.fillInput($("#vcenter_template_ref", context), templateJSON["VCENTER_TEMPLATE_REF"]);
delete templateJSON["VCENTER_TEMPLATE_REF"];
}
delete templateJSON["RESOURCE_POOL"];
if(templateJSON["VCENTER_CCR_REF"]){
WizardFields.fillInput($("#vcenter_ccr_ref", context), templateJSON["VCENTER_CCR_REF"]);
delete templateJSON["VCENTER_CCR_REF"];
}
if(templateJSON["VCENTER_INSTANCE_ID"]){
WizardFields.fillInput($("#vcenter_instance_id", context), templateJSON["VCENTER_INSTANCE_ID"]);
delete templateJSON["VCENTER_INSTANCE_ID"];
}
CapacityCreate.fill($("div.capacityCreate", context), templateJSON);

View File

@ -56,77 +56,56 @@
<legend>{{tr "vCenter"}}</legend>
<div class="row hypervisor only_vcenter" style="display: none;">
<div class="medium-6 columns">
<label for="vcenter_template_uuid">
{{tr "vCenter Template UUID"}}
<label for="vcenter_template_ref">
{{tr "vCenter Template Ref"}}
</label>
<input type="text" id="vcenter_template_uuid"/>
</div>
<div class="medium-6 columns">
<div class="row">
<div class="large-6 columns">
<label>
{{tr "VM disks"}}
{{{tip (tr "If selected, VM disks won't be deleted after the VM finishes its lifecycle.")}}}
</label>
<input type="checkbox" id="KEEP_DISKS" name="KEEP_DISKS"/>
<label for="KEEP_DISKS">{{tr "Keep"}}</label>
</div>
</div>
<input type="text" id="vcenter_template_ref"/>
</div>
</div>
<div class="vcenter_datastore_input row">
<div class="large-3 medium-6 columns">
<label>
{{tr "Default Datastore"}}
<input type="text" class="initial_datastore" value="{{datastore.initial}}"/>
<div class="row hypervisor only_vcenter" style="display: none;">
<div class="medium-6 columns">
<label for="vcenter_ccr_ref">
{{tr "vCenter Cluster Ref"}}
</label>
<input type="text" id="vcenter_ccr_ref"/>
</div>
<div class="large-3 medium-6 columns">
<label>
{{tr "Type"}}
<select class="modify_datastore">
<option value="fixed">{{tr "Fixed"}}</option>
<option value="list">{{tr "Provide on instantiation"}}</option>
</select>
</label>
</div>
<div class="large-6 medium-12 columns">
<label>
{{tr "Available Datastores"}}
<input type="text" class="available_datastores" value="{{datastore.params}}"/>
<div class="medium-6 columns">
<label for="vcenter_instance_id">
{{tr "vCenter Instance ID"}}
</label>
<input type="text" id="vcenter_instance_id"/>
</div>
</div>
<div class="vcenter_rp_input row">
<div class="large-3 medium-6 columns">
<label>
{{tr "Default Resource Pool"}}
<input type="text" class="initial_rp" value="{{resourcePool.initial}}"/>
</label>
</div>
<div class="large-3 medium-6 columns">
<label>
{{tr "Type"}}
<select class="modify_rp">
<option value="fixed">{{tr "Fixed"}}</option>
<option value="list">{{tr "Provide on instantiation"}}</option>
</select>
</label>
</div>
<div class="large-6 medium-12 columns">
<label>
{{tr "Available Resource Pools"}}
<input type="text" class="available_rps" value="{{resourcePool.params}}"/>
</label>
</div>
</div>
<div class="vcenter_deploy_folder_input row">
<div class="large-3 medium-6 columns">
<label>
{{tr "Default Resource Pool"}}
<input type="text" class="initial_rp" value="{{resourcePool.initial}}"/>
</label>
</div>
<div class="large-3 medium-6 columns">
<label>
{{tr "Type"}}
<select class="modify_rp">
<option value="fixed">{{tr "Fixed"}}</option>
<option value="list">{{tr "Provide on instantiation"}}</option>
</select>
</label>
</div>
<div class="large-6 medium-12 columns">
<label>
{{tr "Available Resource Pools"}}
<input type="text" class="available_rps" value="{{resourcePool.params}}"/>
</label>
</div>
</div>
<div class="vcenter_vm_folder_input row">
<div class="medium-6 columns">
<label for="vcenter_deploy_folder">
{{tr "Deployment Folder"}}
{{{tip (tr "If specified, the the VMs and Template folder path where the VM will be created inside the data center. The path is delimited by slashes e.g /Management/VMs. If no path is set the VM will be deployed in the same folder where the template exists.")}}}
<label for="vcenter_vm_folder">
{{tr "vCenter VM Folder"}}
{{{tip (tr "If specified, the the VMs and Template folder path where the VM will be created inside the data center. The path is delimited by slashes e.g /Management/VMs. If no path is set the VM will be placed in the same folder where the template is located.")}}}
</label>
<input type="text" id="vcenter_deploy_folder"/>
<input type="text" id="vcenter_vm_folder"/>
</div>
</div>
</fieldset>

View File

@ -47,6 +47,14 @@
<input type="text" wizard_field="NETWORK_UNAME" id="NETWORK_UNAME" name="NETWORK_UNAME"/>
</div>
</div>
<div class="row" style="display: none">
<div class="medium-6 columns">
<label>
{{tr "OPEN NEBULA MANAGEMENT"}}
</label>
<input type="text" wizard_field="OPENNEBULA_MANAGED" id="OPENNEBULA_MANAGED" name="OPENNEBULA_MANAGED"/>
</div>
</div>
</fieldset>
<fieldset>
<legend>{{tr "Override Network Values IPv4"}}</legend>

View File

@ -74,19 +74,25 @@
<label for="TYPE">
{{tr "Disk type"}}
</label>
<select wizard_field="TYPE" id="TYPE" name="type">
<select class="hypervisor only_kvm" wizard_field="TYPE" id="TYPE" name="type">
<option value="fs">{{tr "FS"}}</option>
<option value="swap">{{tr "Swap"}}</option>
</select>
<select class="hypervisor only_vcenter" style="display: none" wizard_field="TYPE" id="TYPE" name="type">
<option value="fs">{{tr "FS"}}</option>
</select>
</div>
<div class="medium-6 columns">
<label for="FORMAT">
{{tr "Filesystem format"}}
</label>
<select wizard_field="FORMAT" name="format" id="FORMAT">
<select class="hypervisor only_kvm" wizard_field="FORMAT" name="format" id="FORMAT">
<option value="raw" selected="selected">raw</option>
<option value="qcow2">qcow2</option>
</select>
<select class="hypervisor only_vcenter" style="display: none" wizard_field="FORMAT" name="format" id="FORMAT">
<option value="raw" selected="selected">raw</option>
</select>
</div>
</div>
<br>

View File

@ -1,5 +1,5 @@
<div class="row">
<div class="medium-6 columns">
<div class="medium-6 columns hypervisor only_kvm">
<label for="TARGET">
{{tr "Target device"}}
{{{tip (tr "Device to map image disk. If set, it will overwrite the default device mapping.")}}}
@ -20,7 +20,7 @@
</div>
</div>
<div class="row">
<div class="medium-6 columns">
<div class="medium-6 columns hypervisor only_kvm">
<label>
{{tr "BUS"}}
<select id="disk_dev_prefix" name="disk_dev_prefix">
@ -35,7 +35,7 @@
<input type="text" id="custom_disk_dev_prefix" name="custom_disk_dev_prefix" />
</div>
</div>
<div class="medium-6 columns">
<div class="medium-6 columns hypervisor only_kvm">
<label for="READONLY">
{{tr "Read-only"}}
</label>
@ -45,6 +45,30 @@
<option value="no">{{tr "no"}}</option>
</select>
</div>
<div class="medium-6 columns hypervisor only_vcenter">
<label for="vcenter_adapter_type">
{{tr "Bus adapter controller"}}
</label>
<select wizard_field="VCENTER_ADAPTER_TYPE" name="vcenter_adapter_type" id="vcenter_adapter_type">
<option value="" selected="selected"></option>
<option value="lsiLogic">lsiLogic</option>
<option value="ide">ide</option>
<option value="busLogic">busLogic</option>
<option value="custom">custom</option>
</select>
</div>
<div class="medium-6 columns only_vcenter">
<label for="vcenter_disk_type">
{{tr "Disk provisioning type"}}
</label>
<select wizard_field="VCENTER_DISK_TYPE" name="vcenter_disk_type" id="vcenter_disk_type">
<option value="" selected="selected"></option>
<option value="thin">Thin</option>
<option value="thick">Thick</option>
<option value="eagerZeroedThick">Eager Zeroed Thick</option>
<option value="custom">custom</option>
</select>
</div>
</div>
<div class="row vm_param">
<div class="medium-6 columns hypervisor only_kvm">

View File

@ -32,7 +32,7 @@ define(function(require) {
var DisksResize = require('utils/disks-resize');
var NicsSection = require('utils/nics-section');
var VMGroupSection = require('utils/vmgroup-section');
var DeployFolder = require('utils/deploy-folder');
var VcenterVMFolder = require('utils/vcenter-vm-folder');
var CapacityInputs = require('tabs/templates-tab/form-panels/create/wizard-tabs/general/capacity-inputs');
var Config = require('sunstone-config');
@ -214,10 +214,10 @@ define(function(require) {
tmp_json.PCI = pcis;
}
if (Config.isFeatureEnabled("vcenter_deploy_folder")){
if(!$.isEmptyObject(original_tmpl.TEMPLATE.PUBLIC_CLOUD.TYPE) &&
original_tmpl.TEMPLATE.PUBLIC_CLOUD.TYPE === 'vcenter'){
$.extend(tmp_json, DeployFolder.retrieveChanges($(".deployFolderContext" + template_id)));
if (Config.isFeatureEnabled("vcenter_vm_folder")){
if(!$.isEmptyObject(original_tmpl.TEMPLATE.HYPERVISOR) &&
original_tmpl.TEMPLATE.HYPERVISOR === 'vcenter'){
$.extend(tmp_json, VcenterVMFolder.retrieveChanges($(".vcenterVMFolderContext" + template_id)));
}
}
@ -279,9 +279,9 @@ define(function(require) {
VMGroupSection.insert(template_json,
$(".vmgroupContext"+ template_json.VMTEMPLATE.ID, context));
deployFolderContext = $(".deployFolderContext" + template_json.VMTEMPLATE.ID, context);
DeployFolder.setup(deployFolderContext);
DeployFolder.fill(deployFolderContext, template_json.VMTEMPLATE);
vcenterVMFolderContext = $(".vcenterVMFolderContext" + template_json.VMTEMPLATE.ID, context);
VcenterVMFolder.setup(vcenterVMFolderContext);
VcenterVMFolder.fill(vcenterVMFolderContext, template_json.VMTEMPLATE);
var inputs_div = $(".template_user_inputs" + template_json.VMTEMPLATE.ID, context);

View File

@ -68,8 +68,8 @@
</div>
</div>
<div class="row">
<div class="medium-6 small-12 columns deployFolderContext{{element.ID}}">
<div class="provision_deploy_folder_selector">
<div class="medium-6 small-12 columns vcenterVMFolderContext{{element.ID}}">
<div class="provision_vcenter_vm_folder_selector">
</div>
</div>
</div>

View File

@ -91,17 +91,24 @@ define(function(require) {
// Get rid of the unwanted (for show) SCHED_* keys
var that = this;
var strippedTemplate = {};
var strippedTemplateVcenter = {};
var unshownValues = {};
$.each(that.element.USER_TEMPLATE, function(key, value) {
if (key.match(/^SCHED_*/) || key == "USER_INPUTS") {
unshownValues[key] = value;
} else {
}
else if (key.match(/^VCENTER_*/)){
strippedTemplateVcenter[key] = value;
}
else {
strippedTemplate[key] = value;
}
});
var templateTableHTML = TemplateTable.html(strippedTemplate, RESOURCE, Locale.tr("Attributes"));
var templateTableHTML = TemplateTable.html(strippedTemplate, RESOURCE, Locale.tr("Attributes"), true);
var templateTableVcenterHTML = TemplateTable.html(strippedTemplateVcenter, RESOURCE, Locale.tr("Vcenter information"), false);
var monitoring = $.extend({}, this.element.MONITORING);
delete monitoring.CPU;
@ -127,6 +134,7 @@ define(function(require) {
'IP': IP,
'resched': resched,
'permissionsTableHTML': permissionsTableHTML,
'templateTableVcenterHTML': templateTableVcenterHTML,
'templateTableHTML': templateTableHTML,
'monitoringTableContentHTML': monitoringTableContentHTML,
'vrouterHTML': vrouterHTML
@ -139,14 +147,22 @@ define(function(require) {
// Get rid of the unwanted (for show) SCHED_* keys
var that = this;
var strippedTemplate = {};
var strippedTemplateVcenter = {};
var unshownValues = {};
$.each(that.element.USER_TEMPLATE, function(key, value) {
if (!key.match(/^SCHED_*/)) {
strippedTemplate[key] = value;
} else {
$.each(that.element.USER_TEMPLATE, function(key, value) {
if (key.match(/^SCHED_*/) || key == "USER_INPUTS") {
unshownValues[key] = value;
}
})
else if (key.match(/^VCENTER_*/)){
strippedTemplateVcenter[key] = value;
}
else {
strippedTemplate[key] = value;
}
});
if($.isEmptyObject(strippedTemplateVcenter)){
$('.vcenter', context).hide();
}
TemplateTable.setup(strippedTemplate, RESOURCE, this.element.ID, context, unshownValues);
}

View File

@ -81,6 +81,9 @@
</div>
<div class="large-6 columns">{{{permissionsTableHTML}}}</div>
</div>
<div class="row vcenter">
<div class="large-9 columns">{{{templateTableVcenterHTML}}}</div>
</div>
<div class="row">
<div class="large-9 columns">{{{templateTableHTML}}}</div>
</div>

View File

@ -436,6 +436,10 @@ define(function(require) {
context.on('click', '#attach_disk', function() {
var dialog = Sunstone.getDialog(ATTACH_DISK_DIALOG_ID);
dialog.setElement(that.element);
if(that.element.USER_TEMPLATE.HYPERVISOR && that.element.USER_TEMPLATE.HYPERVISOR == 'vcenter'){
$('.hypervisor.only_kvm').hide();
$('.hypervisor.only_vcenter').show();
}
dialog.show();
return false;
});
@ -445,7 +449,6 @@ define(function(require) {
context.off('click', '.detachdisk');
context.on('click', '.detachdisk', function() {
var disk_id = $(this).parents('tr').attr('disk_id');
Sunstone.getDialog(CONFIRM_DIALOG_ID).setParams({
//header :
headerTabId: TAB_ID,
@ -596,7 +599,7 @@ define(function(require) {
return false;
});
}
Tree.setup(context);
}

View File

@ -126,7 +126,8 @@ define(function(require) {
$("div.mode_param [wizard_field]", context).prop('wizard_field_disabled', true);
$('input#vn_mad', context).removeAttr('required');
$('input#vn_mad', context).removeAttr('value');
$('#vcenter_switch_name', context).removeAttr('required');
switch ($(this).val()) {
case "dummy":
$("div.mode_param.dummy", context).show();
@ -164,6 +165,16 @@ define(function(require) {
$('input#bridge', context).attr('required', '');
break;
case "vcenter":
$("div.mode_param.vcenter", context).show();
$("div.mode_param.vcenter [wizard_field]", context).prop('wizard_field_disabled', false);
$('input#bridge', context).attr('value', $('#name', context).val());
$('#vcenter_switch_name', context).attr('required', '');
$('input#vn_mad', context).attr('required', '');
$('input#vn_mad', context).attr('value', 'vcenter');
$('#div_vn_mad', context).hide();
break;
case "custom":
$("div.mode_param.custom", context).show();
$("div.mode_param.custom [wizard_field]", context).prop('wizard_field_disabled', false);

View File

@ -87,10 +87,11 @@
<option value="802.1Q">{{tr "802.1Q"}}</option>
<option value="vxlan">{{tr "VXLAN"}}</option>
<option value="ovswitch">{{tr "Open vSwitch"}}</option>
<option value="vcenter">{{tr "vCenter"}}</option>
<option value="custom">{{tr "Custom"}}</option>
</select>
</div>
<div class="large-3 medium-6 columns mode_param custom">
<div class="large-3 medium-6 columns mode_param vcenter custom" id="div_vn_mad">
<label for="vn_mad">
{{tr "Network Driver (VN_MAD)"}}
</label>
@ -115,6 +116,9 @@
<div class="network_mode_description" value="ovswitch">
{{tr "Open vSwitch, restrict network access with Open vSwitch Virtual Switch. Security Groups are not applied."}}
</div>
<div class="network_mode_description" value="vcenter">
{{tr "vSphere standard switches or distributed switches with port groups. Security Groups are not applied."}}
</div>
<div class="network_mode_description" value="custom">
{{tr "Custom, use a custom virtual network driver."}}
</div>
@ -139,7 +143,7 @@
</div>
</div>
<div class="row">
<div class="medium-3 columns left mode_param 8021Q vxlan ovswitch custom">
<div class="medium-3 columns left mode_param 8021Q vxlan ovswitch vcenter custom">
<label>
{{tr "VLAN ID"}}
<select wizard_field="AUTOMATIC_VLAN_ID">
@ -159,13 +163,45 @@
</label>
<input type="text" wizard_field="PHYDEV" name="phydev" id="phydev" />
</div>
<div class="medium-3 columns left mode_param 8021Q vxlan custom">
<div class="medium-3 columns left mode_param vcenter">
<label for="phydev">
{{tr "Physical device"}}
<span class="tip">
{{tr "Physical NIC names for uplinks. Use comma to separate values (e.g vmnic1,vmnic2)"}}
</span>
</label>
<input type="text" wizard_field="PHYDEV" name="phydev" id="phydev" />
</div>
<div class="medium-3 columns left mode_param 8021Q vxlan vcenter custom">
<label for="mtu">
{{tr "MTU of the interface"}}
</label>
<input type="text" wizard_field="MTU" name="mtu" id="mtu" />
</div>
</div>
<div class="row">
<div class="medium-3 columns left mode_param vcenter">
<label for="vcenter_switch_name">
{{tr "Switch name"}}
</label>
<input type="text" wizard_field="VCENTER_SWITCH_NAME" name="vcenter_switch_name" id="vcenter_switch_name" maxlength="32" />
</div>
<div class="medium-3 columns left mode_param vcenter">
<label>
{{tr "Number of ports"}}
</label>
<input type="number" wizard_field="VCENTER_SWITCH_NPORTS" name="vcenter_switch_nports" id="vcenter_switch_nports" />
</div>
<div class="medium-3 columns left mode_param vcenter">
<label>
{{tr "Port group type"}}
<select wizard_field="VCENTER_PORTGROUP_TYPE">
<option value="Port Group">{{tr "Port group"}}</option>
<option value="Distributed Port Group">{{tr "Distributed Port Group"}}</option>
</select>
</label>
</div>
</div>
</div>
<div class="tabs-panel" id="vnetCreateARTab">
<div class="row collapse" id="vnetCreateARTabCreate">

View File

@ -78,14 +78,22 @@ define(function(require) {
"OUTBOUND_PEAK_BW",
"OUTBOUND_PEAK_KB" ];
var strippedTemplate = $.extend({}, this.element.TEMPLATE);
$.each(hiddenKeys, function(i, key){
delete strippedTemplate[key];
var strippedTemplate = {};
var strippedTemplateVcenter = {};
$.each(this.element.TEMPLATE, function(key, value) {
if (!$.inArray(key, hiddenKeys) > -1) {
if (key.match(/^VCENTER_*/)){
strippedTemplateVcenter[key] = value;
}
else {
strippedTemplate[key] = value;
}
}
});
var templateTableHTML = TemplateTable.html(strippedTemplate, RESOURCE,
Locale.tr("Attributes"));
var templateTableVcenterHTML = TemplateTable.html(strippedTemplateVcenter, RESOURCE,
Locale.tr("Vcenter information"), false);
//====
// TODO: move to util?
@ -114,7 +122,8 @@ define(function(require) {
'renameTrHTML': renameTrHTML,
'reservationTrHTML': reservationTrHTML,
'permissionsTableHTML': permissionsTableHTML,
'templateTableHTML': templateTableHTML
'templateTableHTML': templateTableHTML,
'templateTableVcenterHTML': templateTableVcenterHTML
});
}
@ -134,20 +143,25 @@ define(function(require) {
"OUTBOUND_PEAK_BW",
"OUTBOUND_PEAK_KB" ];
var strippedTemplate = $.extend({}, this.element.TEMPLATE);
$.each(hiddenKeys, function(i, key){
delete strippedTemplate[key];
});
var hiddenValues = {};
$.each(hiddenKeys, function(i, key){
if (that.element.TEMPLATE[key] != undefined){
hiddenValues[key] = that.element.TEMPLATE[key];
var strippedTemplate = {};
var strippedTemplateVcenter = {};
$.each(that.element.TEMPLATE, function(key, value) {
if ($.inArray(key, hiddenKeys) > -1) {
hiddenValues[key] = value;
}
if (key.match(/^VCENTER_*/)){
strippedTemplateVcenter[key] = value;
}
else {
strippedTemplate[key] = value;
}
});
if($.isEmptyObject(strippedTemplateVcenter)){
$('.vcenter', context).hide();
}
TemplateTable.setup(strippedTemplate, RESOURCE, this.element.ID, context, hiddenValues);
//===

View File

@ -96,6 +96,11 @@
</table>
</div>
</div>
<div class="row vcenter">
<div class="large-9 columns">
{{{templateTableVcenterHTML}}}
</div>
</div>
<div class="row">
<div class="large-9 columns">
{{{templateTableHTML}}}

View File

@ -31,7 +31,7 @@ define(function(require) {
@param {String} tableName Header of the table (i.e: Locale.tr("Attributes"))
@returns {String} HTML table
*/
function _html(templateJSON, resourceType, tableName) {
function _html(templateJSON, resourceType, tableName, modify=true) {
var str =
'<table id="' + resourceType.toLowerCase() + '_template_table" class="dataTable configuration_attrs">\
<thead>\
@ -41,13 +41,15 @@ define(function(require) {
'</th>\
</tr>\
</thead>' +
fromJSONtoHTMLTable(templateJSON, resourceType) +
'<tr>\
fromJSONtoHTMLTable(templateJSON, resourceType, undefined, undefined,modify);
if (modify) {
str += '<tr>\
<td class="key_td"><input type="text" name="new_key" id="new_key" /></td>\
<td class="value_td"><textarea type="text" name="new_value" id="new_value"></textarea></td>\
<td class="text-right"><button type="button" id="button_add_value" class="button small secondary"><i class="fa fa-lg fa-plus-circle"></i></button>\</td>\
</tr>\
</table>'
</tr>';
}
str += '</table>'
return str;
}
@ -359,7 +361,7 @@ define(function(require) {
}
// Returns an HTML string with the json keys and values
function fromJSONtoHTMLTable(templateJSON, resourceType, vectorial, ocurrence) {
function fromJSONtoHTMLTable(templateJSON, resourceType, vectorial, ocurrence, modify) {
var str = ""
if (!templateJSON) { return "Not defined";}
var field = null;
@ -370,14 +372,15 @@ define(function(require) {
templateJSON[field],
resourceType,
vectorial,
ocurrence);
ocurrence,
modify);
}
return str;
}
// Helper for fromJSONtoHTMLTable function
function fromJSONtoHTMLRow(field, value, resourceType, vectorial_key, ocurrence) {
function fromJSONtoHTMLRow(field, value, resourceType, vectorial_key, ocurrence, modify) {
var str = "";
// value can be an array
@ -391,28 +394,32 @@ define(function(require) {
if (typeof current_value == 'object') {
str += '<tr id="' + resourceType.toLowerCase() + '_template_table_' + field + '">\
<td class="key_td key_vectorial_td">' + Locale.tr(field) + '</td>\
<td class="value_vectorial_td"></td>\
<td class="text-right nowrap">\
<span id="div_add_vectorial">\
<a id="div_add_vectorial_' + field + '" class="add_vectorial_a ocurrence_' + it + ' vectorial_key_' + field + '" href="#"><i class="fa fa-plus-sign"/></a>\
</span>&emsp;\
<span id="div_minus">\
<a id="div_minus_' + field + '" class="remove_vectorial_x ocurrence_' + it + '" href="#"><i class="fa fa-pencil-square-o"/><i class="fa fa-trash-o"/></a>\
</span>\
</td>\
</tr>'
<td class="value_vectorial_td"></td>';
if (modify) {
str += '<td class="text-right nowrap">\
<span id="div_add_vectorial">\
<a id="div_add_vectorial_' + field + '" class="add_vectorial_a ocurrence_' + it + ' vectorial_key_' + field + '" href="#"><i class="fa fa-plus-sign"/></a>\
</span>&emsp;\
<span id="div_minus">\
<a id="div_minus_' + field + '" class="remove_vectorial_x ocurrence_' + it + '" href="#"><i class="fa fa-pencil-square-o"/><i class="fa fa-trash-o"/></a>\
</span>\
</td>';
}
str += '</tr>'
str += fromJSONtoHTMLTable(current_value,
resourceType,
field,
it);
it,
modify);
} else {
// if it is a single value, create the row for this occurence of the key
str += fromJSONtoHTMLRow(field,
current_value,
resourceType,
false,
it);
it,
modify);
}
}
} else // or value can be a string
@ -425,48 +432,55 @@ define(function(require) {
if (vectorial_key) {
str += '<tr>\
<td class="key_td key_vectorial_td">&emsp;&emsp;' + Locale.tr(field) + '</td>\
<td class="value_td value_vectorial_td value_td_input_' + field + ocurrence_str + ' vectorial_key_' + vectorial_key + '" id="value_td_input_' + field + '">' + TemplateUtils.htmlEncode(value) + '</td>\
<td class="text-right nowrap">\
<span id="div_edit_vectorial">\
<a id="div_edit_' + field + '" class="edit_e' + ocurrence_str + ' vectorial_key_' + vectorial_key + '" href="#"><i class="fa fa-pencil-square-o"/></a>\
</span>&emsp;\
<span id="div_minus_vectorial">\
<a id="div_minus_' + field + '" class="remove_x' + ocurrence_str + ' vectorial_key_' + vectorial_key + '" href="#"><i class="fa fa-trash-o"/></a>\
</span>\
</td>\
</tr>';
<td class="value_td value_vectorial_td value_td_input_' + field + ocurrence_str + ' vectorial_key_' + vectorial_key + '" id="value_td_input_' + field + '">' + TemplateUtils.htmlEncode(value) + '</td>';
if (modify) {
str += '<td class="text-right nowrap">\
<span id="div_edit_vectorial">\
<a id="div_edit_' + field + '" class="edit_e' + ocurrence_str + ' vectorial_key_' + vectorial_key + '" href="#"><i class="fa fa-pencil-square-o"/></a>\
</span>&emsp;\
<span id="div_minus_vectorial">\
<a id="div_minus_' + field + '" class="remove_x' + ocurrence_str + ' vectorial_key_' + vectorial_key + '" href="#"><i class="fa fa-trash-o"/></a>\
</span>\
</td>'
}
str += '</tr>';
} else {
// If it is not comming from a vectorial daddy key, it can still vectorial itself
if (typeof value == 'object') {
str += '<tr id="' + resourceType.toLowerCase() + '_template_table_' + field + '">\
<td class="key_td key_vectorial_td">' + Locale.tr(field) + '</td>\
<td class="value_vectorial_td"></td>\
<td class="text-right nowrap">\
<span id="div_add_vectorial">\
<a id="div_add_vectorial_' + field + '" class="add_vectorial_a' + ocurrence_str + ' vectorial_key_' + field + '" href="#"><i class="fa fa-plus-sign"/></a>\
</span>&emsp;\
<span id="div_minus">\
<a id="div_minus_' + field + '" class="remove_vectorial_x' + ocurrence_str + '" href="#"><i class="fa fa-trash-o"/></a>\
</span>\
</td>'
<td class="value_vectorial_td"></td>';
if (modify) {
str += '<td class="text-right nowrap">\
<span id="div_add_vectorial">\
<a id="div_add_vectorial_' + field + '" class="add_vectorial_a' + ocurrence_str + ' vectorial_key_' + field + '" href="#"><i class="fa fa-plus-sign"/></a>\
</span>&emsp;\
<span id="div_minus">\
<a id="div_minus_' + field + '" class="remove_vectorial_x' + ocurrence_str + '" href="#"><i class="fa fa-trash-o"/></a>\
</span>\
</td>'
}
str += fromJSONtoHTMLTable(value,
resourceType,
field,
ocurrence);
ocurrence,
modify);
} else // or, just a single value
{
str += '<tr>\
<td class="key_td">' + Locale.tr(field) + '</td>\
<td class="value_td" id="value_td_input_' + field + '">' + TemplateUtils.htmlEncode(value) + '</td>\
<td class="text-right nowrap">\
<span id="div_edit">\
<a id="div_edit_' + field + '" class="edit_e' + ocurrence_str + '" href="#"><i class="fa fa-pencil-square-o"/></a>\
</span>&emsp;\
<span id="div_minus">\
<a id="div_minus_' + field + '" class="remove_x' + ocurrence_str + '" href="#"><i class="fa fa-trash-o"/></a>\
</span>\
</td>\
</tr>';
<td class="value_td" id="value_td_input_' + field + '">' + TemplateUtils.htmlEncode(value) + '</td>';
if (modify) {
str += '<td class="text-right nowrap">\
<span id="div_edit">\
<a id="div_edit_' + field + '" class="edit_e' + ocurrence_str + '" href="#"><i class="fa fa-pencil-square-o"/></a>\
</span>&emsp;\
<span id="div_minus">\
<a id="div_minus_' + field + '" class="remove_x' + ocurrence_str + '" href="#"><i class="fa fa-trash-o"/></a>\
</span>\
</td>';
}
str += '</tr>';
}
}

View File

@ -21,7 +21,7 @@ define(function(require){
var OpenNebulaImage = require('opennebula/image');
var UserInputs = require('utils/user-inputs');
var WizardFields = require('utils/wizard-fields');
var DeployFolderTemplate = require('hbs!./deploy-folder/html');
var VcenterVMFolderTemplate = require('hbs!./vcenter-vm-folder/html');
return {
'setup': _setup,
@ -30,27 +30,27 @@ define(function(require){
};
function _setup(context) {
if (!Config.isFeatureEnabled("vcenter_deploy_folder")){
if (!Config.isFeatureEnabled("vcenter_vm_folder")){
$(context).remove();
}
}
function _fill(context, element) {
if (Config.isFeatureEnabled("vcenter_deploy_folder")){
var deployFolderContext = context;
var template_public_cloud_type = element.TEMPLATE.PUBLIC_CLOUD.TYPE
if (Config.isFeatureEnabled("vcenter_vm_folder")){
var vcenterVMFolderContext = context;
var template_public_cloud_type = element.TEMPLATE.HYPERVISOR
if ($.isEmptyObject(template_public_cloud_type)) {
deployFolderContext.html("");
vcenterVMFolderContext.html("");
} else {
if (template_public_cloud_type === 'vcenter') {
var deploy_folder = element.TEMPLATE.DEPLOY_FOLDER
deployFolderContext.html(DeployFolderTemplate());
$("#deploy_folder_input", deployFolderContext).val(deploy_folder);
$("#deploy_folder_input", deployFolderContext).data("original_value",deploy_folder);
var vcenter_vm_folder = element.TEMPLATE.VCENTER_VM_FOLDER
vcenterVMFolderContext.html(VcenterVMFolderTemplate());
$("#vcenter_vm_folder_input", vcenterVMFolderContext).val(vcenter_vm_folder);
$("#vcenter_vm_folder_input", vcenterVMFolderContext).data("original_value",vcenter_vm_folder);
} else {
deployFolderContext.html("");
vcenterVMFolderContext.html("");
}
}
}

View File

@ -19,11 +19,11 @@
<i class="fa fa-folder"></i> {{tr "vCenter Deployment"}}
</legend>
<div class="deployContainer">
<label for="deploy_folder_input">
{{tr "Deployment Folder"}}
{{{tip (tr "If specified, the VMs and Template folder path where the VM will be created inside the data center. The path is delimited by slashes e.g /Management/VMs. If no path is set the VM will be deployed in the same folder where the template exists.")}}}
<label for="vcenter_vm_folder_input">
{{tr "vCenter VM Folder"}}
{{{tip (tr "If specified, the VMs and Template folder path where the VM will be created inside the data center. The path is delimited by slashes e.g /Management/VMs. If no path is set the VM will be placed in the same folder where the template is located.")}}}
</label>
<input type="text" name="deploy_folder_input" id="deploy_folder_input" wizard_field="DEPLOY_FOLDER"/>
<input type="text" name="vcenter_vm_folder_input" id="vcenter_vm_folder_input" wizard_field="VCENTER_VM_FOLDER"/>
</div>
</fieldset>

View File

@ -100,11 +100,19 @@ define(function(require) {
var newdiv = $(content).appendTo($(".vcenter_datacenter_list", context));
var tbody = $('#' + tableId + ' tbody', context);
$.each(elements, function(id, cluster_name) {
$.each(elements, function(id, cluster) {
var cluster_name = cluster.cluster_name;
var rp_list = '<select class="select_rp"><option></option>';
if(cluster.rp_list.length > 0){
for(var i = 0; i < cluster.rp_list.length; i++){
rp_list += '<option>' + cluster.rp_list[i].name + '</option>';
}
}
rp_list += '</select>';
var opts = { name: cluster_name };
var trow = $(RowTemplate(opts)).appendTo(tbody);
$(".check_item", trow).data("cluster_name", cluster_name);
$(".check_item", trow).data("cluster", cluster);
});
var elementsTable = new DomDataTable(
@ -165,7 +173,7 @@ define(function(require) {
var host_json = {
"host": {
"name": $(this).data("cluster_name"),
"name": $(this).data("cluster").cluster_name,
"vm_mad": "vcenter",
"vnm_mad": "dummy",
"im_mad": "vcenter",
@ -173,6 +181,10 @@ define(function(require) {
}
};
var cluster_ref = $(this).data("cluster").cluster_ref;
var vcenter_uuid = $(this).data("cluster").vcenter_uuid;
var vcenter_version = $(this).data("cluster").vcenter_version;
OpenNebulaHost.create({
timeout: true,
data: host_json,
@ -183,9 +195,12 @@ define(function(require) {
});
var template_raw =
"VCENTER_USER=\"" + that.opts.vcenter_user + "\"\n" +
"VCENTER_PASSWORD=\"" + that.opts.vcenter_password + "\"\n" +
"VCENTER_HOST=\"" + that.opts.vcenter_host + "\"\n";
"VCENTER_USER=\"" + that.opts.vcenter_user + "\"\n" +
"VCENTER_PASSWORD=\"" + that.opts.vcenter_password + "\"\n" +
"VCENTER_HOST=\"" + that.opts.vcenter_host + "\"\n" +
"VCENTER_INSTANCE_ID=\"" + vcenter_uuid + "\"\n" +
"VCENTER_CCR_REF=\"" + cluster_ref + "\"\n" +
"VCENTER_VERSION=\"" + vcenter_version + "\"\n";
Sunstone.runAction("Host.update_template", response.HOST.ID, template_raw);
},

View File

@ -94,7 +94,7 @@ define(function(require) {
columns : [
'<input type="checkbox" class="check_all"/>',
Locale.tr("Name"),
Locale.tr("Datacenter"),
Locale.tr("Cluster"),
""
]
});
@ -106,7 +106,7 @@ define(function(require) {
var opts = { name: element.name, cluster: element.cluster };
var trow = $(RowTemplate(opts)).appendTo(tbody);
$(".check_item", trow).data("one_template", element.one)
$(".check_item", trow).data("one_template", element.ds)
});
var elementsTable = new DomDataTable(
@ -159,29 +159,30 @@ define(function(require) {
var row_context = $(this).closest("tr");
VCenterCommon.importLoading({context : row_context});
var datastore_json = {
"datastore": {
"datastore_raw": $(this).data("one_template")
},
"cluster_id" : -1
};
OpenNebulaDatastore.create({
timeout: true,
data: datastore_json,
success: function(request, response) {
VCenterCommon.importSuccess({
context : row_context,
message : Locale.tr("Datastore created successfully. ID: %1$s", response.DATASTORE.ID)
});
},
error: function (request, error_json) {
VCenterCommon.importFailure({
context : row_context,
message : (error_json.error.message || Locale.tr("Cannot contact server: is it running and reachable?"))
});
}
var one_template = $(this).data("one_template");
$.each(one_template, function(id, element){
var datastore_json = {
"datastore": {
"datastore_raw": this.one
},
"cluster_id" : -1
};
OpenNebulaDatastore.create({
timeout: true,
data: datastore_json,
success: function(request, response) {
VCenterCommon.importSuccess({
context : row_context,
message : Locale.tr("Datastore created successfully. ID: %1$s", response.DATASTORE.ID)
});
},
error: function (request, error_json) {
VCenterCommon.importFailure({
context : row_context,
message : (error_json.error.message || Locale.tr("Cannot contact server: is it running and reachable?"))
});
}
});
});
});
});

View File

@ -186,6 +186,23 @@ define(function(require) {
'</label>' +
'</div>';
break;
case 'IP6_STATIC':
net_form_str =
'<div class="large-6 medium-6 columns end">' +
'<label>' + Locale.tr("IPv6 address") +
'<input type="text" class="six_static_net"/>' +
'</label>' +
'</div>'+'<div class="large-4 medium-6 columns end">' +
'<label>' + Locale.tr("Prefix length") +
'<input type="text" class="six_prefix_net"/>' +
'</label>' +
'</div>'+
'<div class="large-6 medium-6 columns end">' +
'<label>' + Locale.tr("MAC") +
'<input type="text" class="eth_mac_net" placeholder="' + Locale.tr("Optional") + '"/>' +
'</label>' +
'</div>';
break;
}
$('.net_options', row_context).html(net_form_str);
@ -251,6 +268,21 @@ define(function(require) {
ar_array.push("ULA_PREFIX=" + ula);
}
break;
case 'IP6_STATIC':
var mac = $('.six_mac_net', row_context).val();
var ip6_static = $('.six_static_net', row_context).val();
var prefix = $('.six_prefix_net', row_context).val();
if (mac) {
ar_array.push("MAC=" + mac);
}
if (ip6_static) {
ar_array.push("IP6=" + ip6_static);
}
if (prefix) {
ar_array.push("PREFIX_LENGTH=" + prefix);
}
break;
}

View File

@ -45,6 +45,7 @@
<option value="ETHER">eth</option>
<option value="IP4">ipv4</option>
<option value="IP6">ipv6</option>
<option value="IP6_STATIC">ipv6 static</option>
</select>
</label>
</div>

View File

@ -45,7 +45,6 @@ define(function(require) {
/*
Retrieve the list of templates from vCenter and fill the container with them
opts = {
datacenter: "Datacenter Name",
cluster: "Cluster Name",
@ -56,6 +55,7 @@ define(function(require) {
}
*/
function _fillVCenterTemplates(opts) {
this.opts = opts;
var path = '/vcenter/templates';
var context = $(".vcenter_import", opts.container);
@ -102,18 +102,25 @@ define(function(require) {
$.each(elements, function(id, element) {
var opts = {};
if (element.ds && element.ds !== '') {
opts.datastore = UserInputs.unmarshall(element.ds);
}
if (element.rp && element.rp !== '') {
opts.resourcePool = UserInputs.unmarshall(element.rp);
}
opts.data = element;
opts.resourcePool.params = opts.resourcePool.params.split(",");
opts.id = UniqueId.id();
var trow = $(RowTemplate(opts)).appendTo(tbody);
$.each(opts.resourcePool.params, function(){
$("#available_rps_" + opts.id + " [value ='" + this + "']").mousedown(function(e) {
e.preventDefault();
$(this).prop('selected', !$(this).prop('selected'));
return false;
});
});
$('.check_item', trow).data("import_data", element);
});
@ -160,6 +167,7 @@ define(function(require) {
}
function _import(context) {
that = this;
$.each($(".vcenter_import_table", context), function() {
$.each($(this).DataTable().$(".check_item:checked"), function() {
var row_context = $(this).closest("tr");
@ -169,45 +177,28 @@ define(function(require) {
var attrs = [];
var userInputs = [];
// Retrieve Datastore Attribute
var dsInput = $(".vcenter_datastore_input", row_context);
if (dsInput.length > 0) {
var dsModify = $('.modify_datastore', dsInput).val();
var dsInitial = $('.initial_datastore', dsInput).val();
var dsParams = $('.available_datastores', dsInput).val();
if (dsModify === 'fixed' && dsInitial !== '') {
attrs.push('VCENTER_DATASTORE="' + dsInitial + '"')
} else if (dsModify === 'list' && dsParams !== '') {
var dsUserInputsStr = UserInputs.marshall({
type: 'list',
description: Locale.tr("Which datastore you want this VM to run on?"),
initial: dsInitial,
params: dsParams
});
userInputs.push('VCENTER_DATASTORE="' + dsUserInputsStr + '"');
}
}
// Retrieve Resource Pool Attribute
var rpInput = $(".vcenter_rp_input", row_context);
if (rpInput.length > 0) {
var rpModify = $('.modify_rp', rpInput).val();
var rpInitial = $('.initial_rp', rpInput).val();
var rpParams = $('.available_rps', rpInput).val();
var rpParams = "";
$.each($('.available_rps option:selected', rpInput), function(){
rpParams += $(this).val() + ",";
});
var rpParams = rpParams.slice(0,-1);
if (rpModify === 'fixed' && rpInitial !== '') {
attrs.push('RESOURCE_POOL="' + rpInitial + '"');
attrs.push('VCENTER_RESOURCE_POOL="' + rpInitial + '"');
} else if (rpModify === 'list' && rpParams !== '') {
var rpUserInputs = UserInputs.marshall({
type: 'list',
description: Locale.tr("Which resource pool you want this VM to run in?"),
initial: rpInitial,
params: $('.available_rps', rpInput).val()
params: rpParams
});
userInputs.push('RESOURCE_POOL="' + rpUserInputs + '"');
userInputs.push('VCENTER_RESOURCE_POOL="' + rpUserInputs + '"');
}
}
@ -222,29 +213,80 @@ define(function(require) {
template += "\nUSER_INPUTS=[\n" + userInputs.join(",\n") + "]";
}
var template_json = {
"vmtemplate": {
"template_raw": template
}
};
if($(this).data("import_data").import_disks_and_nics){
VCenterCommon.importLoading({
context : row_context,
message : Locale.tr("Importing images and vnets associated to template disks and nics...")
});
var path = '/vcenter/template/' + $(this).data("import_data").vcenter_ref;
$.ajax({
url: path,
type: "GET",
data: {timeout: false},
headers: {
"X-VCENTER-USER": that.opts.vcenter_user,
"X-VCENTER-PASSWORD": that.opts.vcenter_password,
"X-VCENTER-HOST": that.opts.vcenter_host
},
dataType: "json",
success: function(response){
template += "\n" + response.one;
var template_json = {
"vmtemplate": {
"template_raw": template
}
};
OpenNebulaTemplate.create({
timeout: true,
data: template_json,
success: function(request, response) {
VCenterCommon.importSuccess({
context : row_context,
message : Locale.tr("Template created successfully. ID: %1$s", response.VMTEMPLATE.ID)
});
},
error: function (request, error_json) {
VCenterCommon.importFailure({
context : row_context,
message : (error_json.error.message || Locale.tr("Cannot contact server: is it running and reachable?"))
});
}
});
},
error: function(response){
VCenterCommon.importFailure({
context : row_context,
message : OpenNebulaError(response).error.message
});
Notifier.onError({}, OpenNebulaError(response));
}
});
}
else {
var template_json = {
"vmtemplate": {
"template_raw": template
}
};
OpenNebulaTemplate.create({
timeout: true,
data: template_json,
success: function(request, response) {
VCenterCommon.importSuccess({
context : row_context,
message : Locale.tr("Template created successfully. ID: %1$s", response.VMTEMPLATE.ID)
});
},
error: function (request, error_json) {
VCenterCommon.importFailure({
context : row_context,
message : (error_json.error.message || Locale.tr("Cannot contact server: is it running and reachable?"))
});
}
});
OpenNebulaTemplate.create({
timeout: true,
data: template_json,
success: function(request, response) {
VCenterCommon.importSuccess({
context : row_context,
message : Locale.tr("Template created successfully. ID: %1$s", response.VMTEMPLATE.ID)
});
},
error: function (request, error_json) {
VCenterCommon.importFailure({
context : row_context,
message : (error_json.error.message || Locale.tr("Cannot contact server: is it running and reachable?"))
});
}
});
}
});
});
}
});
});

View File

@ -15,15 +15,15 @@
{{! -------------------------------------------------------------------------- }}
<tr>
<td>
<input type="checkbox" class="check_item"/>
<td style="vertical-align: top">
<input type="checkbox" class="check_item" style="margin-top: 5px"/>
</td>
<td>
<div class="accordion_advanced vcenter_row">
<div class="row">
<div class="large-12 columns">
<label class="inline">
<a href="#{{data.uuid}}Advanced" class="accordion_advanced_toggle"> <i class="fa fa-fw fa-chevron-down"/><i class="fa fa-fw fa-chevron-up"/>
<a href="#{{data.vcenter_ref}}Advanced" class="accordion_advanced_toggle"> <i class="fa fa-fw fa-chevron-down"/><i class="fa fa-fw fa-chevron-up"/>
</a>
{{data.name}}
</label>
@ -31,34 +31,9 @@
</div>
<div class="row">
<div class="large-11 large-centered columns">
<div id="{{data.uuid}}Advanced" class="content" hidden>
{{#if datastore}}
<div class="vcenter_datastore_input row">
<div class="medium-3 columns">
<label>
{{tr "Default Datastore"}}
<input type="text" class="initial_datastore" value="{{datastore.initial}}"/>
</label>
</div>
<div class="medium-3 columns">
<label>
{{tr "Type"}}
<select class="modify_datastore">
<option value="fixed">{{tr "Fixed"}}</option>
<option value="list">{{tr "Provide on instantiation"}}</option>
</select>
</label>
</div>
<div class="medium-4 columns end">
<label>
{{tr "Available Datastores"}}
<input type="text" class="available_datastores" value="{{datastore.params}}"/>
</label>
</div>
</div>
{{/if}}
<div id="{{data.vcenter_ref}}Advanced" class="content" hidden>
{{#if resourcePool}}
<div class="vcenter_rp_input row">
<div class="vcenter_rp_input row" style="margin-left: 11px">
<div class="medium-3 columns">
<label>
{{tr "Default Resource Pool"}}
@ -77,7 +52,11 @@
<div class="medium-4 columns end">
<label>
{{tr "Available Resource Pools"}}
<input type="text" class="available_rps" value="{{resourcePool.params}}"/>
<select multiple="true" size="3" class="available_rps" id="available_rps_{{id}}">
{{#each resourcePool.params}}
<option value="{{this}}" selected>{{this}}</option>
{{/each}}
</select>
</label>
</div>
</div>

View File

@ -101,6 +101,9 @@ define(function(require) {
$(".vcenter_import_result_row", opts.context).show();
if(opts.message){
$(".vcenter_import_response", opts.context).html(opts.message);
}
}
function _importSuccess(opts) {

View File

@ -56,11 +56,11 @@ helpers do
error 404, error.to_json
end
return VCenterDriver::VIClient.new_connection({
return VCenterDriver::VIClient.new({
:user => vuser,
:password => vpass,
:host => vhost},
::OpenNebula::Client.new(nil,$conf[:one_xmlrpc]))
:host => vhost})
end
# def af_format_response(resp)
@ -77,7 +77,18 @@ end
get '/vcenter' do
begin
rs = vcenter_client.hierarchy
dc_folder = VCenterDriver::DatacenterFolder.new(vcenter_client)
hpool = VCenterDriver::VIHelper.one_pool(OpenNebula::HostPool, false)
if hpool.respond_to?(:message)
msg = "Could not get OpenNebula HostPool: #{hpool.message}"
logger.error("[vCenter] " + msg)
error = Error.new(msg)
error 404, error.to_json
end
rs = dc_folder.get_unimported_hosts(hpool,vcenter_client.vim.host)
[200, rs.to_json]
rescue Exception => e
logger.error("[vCenter] " + e.message)
@ -88,8 +99,19 @@ end
get '/vcenter/templates' do
begin
templates = vcenter_client.vm_templates(
$cloud_auth.client(session[:user], session[:active_zone_endpoint]))
dc_folder = VCenterDriver::DatacenterFolder.new(vcenter_client)
tpool = VCenterDriver::VIHelper.one_pool(OpenNebula::TemplatePool, false)
if tpool.respond_to?(:message)
msg = "Could not get OpenNebula TemplatePool: #{tpool.message}"
logger.error("[vCenter] " + msg)
error = Error.new(msg)
error 404, error.to_json
end
templates = dc_folder.get_unimported_templates(vcenter_client, tpool)
if templates.nil?
msg = "No datacenter found"
logger.error("[vCenter] " + msg)
@ -97,7 +119,6 @@ get '/vcenter/templates' do
error 404, error.to_json
end
#ctemplates = templates.select{|t| t[:host] == params[:name]}
[200, templates.to_json]
rescue Exception => e
logger.error("[vCenter] " + e.message)
@ -106,10 +127,139 @@ get '/vcenter/templates' do
end
end
get '/vcenter/template/:vcenter_ref' do
begin
t = {}
t[:one] = ""
template_copy_ref = nil
template = nil
append = true
lc_error = nil
ref = params[:vcenter_ref]
if !ref || ref.empty?
msg = "No template ref specified"
logger.error("[vCenter] " + msg)
error = Error.new(msg)
error 404, error.to_json
end
template = VCenterDriver::Template.new_from_ref(ref, vcenter_client)
vc_uuid = vcenter_client.vim.serviceContent.about.instanceUuid
dpool = VCenterDriver::VIHelper.one_pool(OpenNebula::DatastorePool)
ipool = VCenterDriver::VIHelper.one_pool(OpenNebula::ImagePool)
npool = VCenterDriver::VIHelper.one_pool(OpenNebula::VirtualNetworkPool)
# POST params
if @request_body && !@request_body.empty?
body_hash = JSON.parse(@request_body)
use_linked_clones = body_hash['use_linked_clones'] || false
create_copy = body_hash['create_copy'] || false
template_name = body_hash['template_name'] || ""
if !use_linked_clones && (create_copy || !template_name.empty?)
msg = "Should not set create template copy or template copy name if not using linked clones"
logger.error("[vCenter] " + msg)
error = Error.new(msg)
error 403, error.to_json
end
if use_linked_clones && !create_copy && !template_name.empty?
msg = "Should not set template copy name if create template copy hasn't been selected"
logger.error("[vCenter] " + msg)
error = Error.new(msg)
error 403, error.to_json
end
if create_copy
lc_error, template_copy_ref = template.create_template_copy(template_name)
if template_copy_ref
template = VCenterDriver::Template.new_from_ref(template_copy_ref, vcenter_client)
one_template = VCenterDriver::Template.get_xml_template(template, vc_uuid, vcenter_client, vcenter_client.vim.host)
if one_template
lc_error, use_lc = template.create_delta_disks
if !lc_error
one_template[:one] << "\nVCENTER_LINKED_CLONES=\"YES\"\n"
t = one_template
append = false # t[:one] replaces the current template
end
else
lc_error = "Could not obtain the info from the template's copy"
template.delete_template if template_copy_ref
end
end
else
lc_error, use_lc = template.create_delta_disks
if !lc_error
append = true
t[:one] << "\nVCENTER_LINKED_CLONES=\"YES\"\n" if use_lc
end
end
end
# Create images or get disks information for template
error, template_disks = template.import_vcenter_disks(vc_uuid, dpool, ipool)
if !error.empty?
append = false
template.delete_template if template_copy_ref
msg = error
logger.error("[vCenter] " + msg)
error = Error.new(msg)
error 404, error.to_json
end
t[:one] << template_disks
# Create images or get nics information for template
error, template_nics = template.import_vcenter_nics(vc_uuid, npool, vcenter_client.vim.host)
if !error.empty?
append = false
template.delete_template if template_copy_ref
msg = error
logger.error("[vCenter] " + msg)
error = Error.new(msg)
error 404, error.to_json
end
t[:one] << template_nics
t[:lc_error] = lc_error
t[:append] = append
[200, t.to_json]
rescue Exception => e
template.delete_template if template_copy_ref
logger.error("[vCenter] " + e.message)
error = Error.new(e.message)
error 403, error.to_json
end
end
get '/vcenter/networks' do
begin
networks = vcenter_client.vcenter_networks(
$cloud_auth.client(session[:user], session[:active_zone_endpoint]))
dc_folder = VCenterDriver::DatacenterFolder.new(vcenter_client)
npool = VCenterDriver::VIHelper.one_pool(OpenNebula::VirtualNetworkPool, false)
if npool.respond_to?(:message)
msg = "Could not get OpenNebula VirtualNetworkPool: #{npool.message}"
logger.error("[vCenter] " + msg)
error = Error.new(msg)
error 404, error.to_json
end
networks = dc_folder.get_unimported_networks(npool,vcenter_client.vim.host)
if networks.nil?
msg = "No datacenter found"
logger.error("[vCenter] " + msg)
@ -127,8 +277,14 @@ end
get '/vcenter/images/:ds_name' do
begin
images = vcenter_client.vcenter_images(params[:ds_name],
$cloud_auth.client(session[:user], session[:active_zone_endpoint]))
one_ds = VCenterDriver::VIHelper.find_by_name(OpenNebula::DatastorePool,
params[:ds_name])
one_ds_ref = one_ds['TEMPLATE/VCENTER_DS_REF']
ds = VCenterDriver::Datastore.new_from_ref(one_ds_ref, vcenter_client)
ds.one_item = one_ds
images = ds.get_images
if images.nil?
msg = "No datastore found"
@ -147,8 +303,29 @@ end
get '/vcenter/datastores' do
begin
datastores = vcenter_client.vcenter_datastores(
$cloud_auth.client(session[:user], session[:active_zone_endpoint]))
dc_folder = VCenterDriver::DatacenterFolder.new(vcenter_client)
dpool = VCenterDriver::VIHelper.one_pool(OpenNebula::DatastorePool, false)
if dpool.respond_to?(:message)
msg = "Could not get OpenNebula DatastorePool: #{dpool.message}"
logger.error("[vCenter] " + msg)
error = Error.new(msg)
error 404, error.to_json
end
hpool = VCenterDriver::VIHelper.one_pool(OpenNebula::HostPool, false)
if hpool.respond_to?(:message)
msg = "Could not get OpenNebula HostPool: #{hpool.message}"
logger.error("[vCenter] " + msg)
error = Error.new(msg)
error 404, error.to_json
end
datastores = dc_folder.get_unimported_datastores(dpool, vcenter_client.vim.host, hpool)
if datastores.nil?
msg = "No datacenter found"
logger.error("[vCenter] " + msg)

View File

@ -1 +0,0 @@
../common/dummy.sh

125
src/tm_mad/vcenter/clone Executable file
View File

@ -0,0 +1,125 @@
#!/usr/bin/env ruby
# ---------------------------------------------------------------------------- #
# Copyright 2002-2017, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
# clone fe:SOURCE host:remote_system_ds/disk.i vmid dsid
# - fe is the front-end hostname
# - SOURCE is the path of the disk image in the form DS_BASE_PATH/disk
# - host is the target host to deploy the VM
# - remote_system_ds is the path for the system datastore in the host
# - vmid is the id of the VM
# - dsid is the target datastore (0 is the system datastore)
ONE_LOCATION=ENV["ONE_LOCATION"] if !defined?(ONE_LOCATION)
if !ONE_LOCATION
RUBY_LIB_LOCATION="/usr/lib/one/ruby" if !defined?(RUBY_LIB_LOCATION)
else
RUBY_LIB_LOCATION=ONE_LOCATION+"/lib/ruby" if !defined?(RUBY_LIB_LOCATION)
end
$: << RUBY_LIB_LOCATION
$: << File.dirname(__FILE__)
require 'vcenter_driver'
src = ARGV[0]
dst = ARGV[1]
vm_id = ARGV[2]
source_ds_id = ARGV[3]
check_valid src, "src"
check_valid dst, "dst"
check_valid vm_id, "vm_id"
check_valid source_ds_id, "source_ds_id"
target_ds_id = dst.split("/")[-3]
disk_id = dst.split(".")[-1]
src_path = src.split(":")[-1]
hostname = dst.split(":").first
# Get host ID
host = VCenterDriver::VIHelper.find_by_name(OpenNebula::HostPool, hostname)
host_id = host['ID']
# Get datastores refs
source_ds = VCenterDriver::VIHelper.one_item(OpenNebula::Datastore, source_ds_id)
source_ds_ref = source_ds['TEMPLATE/VCENTER_DS_REF']
target_ds = VCenterDriver::VIHelper.one_item(OpenNebula::Datastore, target_ds_id)
target_ds_ref = target_ds['TEMPLATE/VCENTER_DS_REF']
check_valid source_ds_ref, "source_ds"
check_valid target_ds_ref, "target_ds"
# Get VM info
one_vm = VCenterDriver::VIHelper.one_item(OpenNebula::VirtualMachine, vm_id)
# calculate target path
target_path = VCenterDriver::FileHelper.get_img_name_from_path(src_path,
vm_id,
disk_id)
begin
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
# Find disk info
disk = one_vm.retrieve_xmlelements("TEMPLATE/DISK[SOURCE=\"#{src_path}\"]").first rescue nil
raise "Cannot find disk element in vm template" if !disk
new_size = nil
# Check if resize is needed
if disk["ORIGINAL_SIZE"]
original_size = disk["ORIGINAL_SIZE"].to_i
new_size = disk["SIZE"].to_i
# Shrink not supported (nil). Size is in KB
new_size = new_size > original_size ? new_size * 1024 : nil
end
# Unmanaged disks are those with OPENNEBULA_MANAGED=NO or volatile disks
# that are created in StorageDRS clusters
unmanaged_disk = (!!disk['OPENNEBULA_MANAGED'] && disk['OPENNEBULA_MANAGED'].downcase == "no") ||
(target_ds_ref.start_with?('group-') && !!disk["TYPE"] && disk["TYPE"].downcase == "fs")
if !unmanaged_disk
source_ds_vc = VCenterDriver::Datastore.new_from_ref(source_ds_ref, vi_client)
if source_ds_ref == target_ds_ref
target_ds_vc = source_ds_vc
else
target_ds_vc = VCenterDriver::Storage.new_from_ref(target_ds_ref, vi_client)
end
target_ds_name_vc = target_ds_vc['name']
if target_ds_ref.start_with?('group-')
raise "Non persistent images management is not supported for StorageDRS datastores"
end
source_ds_vc.copy_virtual_disk(src_path, target_ds_vc, target_path, new_size)
end
rescue Exception => e
message = "Error clone virtual disk #{src_path} in "\
"datastore #{target_ds_name_vc}. "\
"Reason: #{e.message}\n#{e.backtrace}"
STDERR.puts error_message(message)
exit -1
ensure
vi_client.close_connection if vi_client
end

View File

@ -1 +0,0 @@
../common/dummy.sh

110
src/tm_mad/vcenter/cpds Executable file
View File

@ -0,0 +1,110 @@
#!/usr/bin/env ruby
# ---------------------------------------------------------------------------- #
# Copyright 2002-2016, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
# cpds host:remote_system_ds/disk.i fe:SOURCE snap_id vmid ds_id
# - fe is the front-end hostname
# - SOURCE is the path of the disk image in the form DS_BASE_PATH/disk
# - host is the target host to deploy the VM
# - remote_system_ds is the path for the system datastore in the host
# - snap_id is the snapshot id. "-1" for none
ONE_LOCATION = ENV["ONE_LOCATION"] if !defined?(ONE_LOCATION)
if !ONE_LOCATION
RUBY_LIB_LOCATION = "/usr/lib/one/ruby" if !defined?(RUBY_LIB_LOCATION)
else
RUBY_LIB_LOCATION = ONE_LOCATION + "/lib/ruby" if !defined?(RUBY_LIB_LOCATION)
end
$: << RUBY_LIB_LOCATION
$: << File.dirname(__FILE__)
require 'vcenter_driver'
src = ARGV[0]
target_path = ARGV[1]
snap_id = ARGV[2] #TODO snapshots?
vmid = ARGV[3]
target_ds_id = ARGV[4]
check_valid src,"src"
check_valid target_path,"target_path"
check_valid vmid,"vmid"
check_valid target_ds_id,"target_ds_id"
disk_id = src.split(".")[-1]
hostname, src_path = src.split ":"
# Get host ID
host = VCenterDriver::VIHelper.find_by_name(OpenNebula::HostPool, hostname)
host_id = host['ID']
# Get OpenNebula VM (state and deploy_id)
one_vm = VCenterDriver::VIHelper.one_item(OpenNebula::VirtualMachine, vmid)
vm_ref = one_vm['DEPLOY_ID']
if one_vm['LCM_STATE'].to_i == 26 #ACTIVE / HOTPLUG_SAVEAS
STDERR.puts "'disk-saveas' operation is not supported for running VMs."
exit 1
end
begin
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
vm = VCenterDriver::VirtualMachine.new_from_ref(vm_ref, vi_client)
if vm.has_snapshots?
STDERR.puts "'disk-saveas' operation is not supported for VMs with system snapshots."
exit 1
end
# Get source and target ds ref
disk = one_vm.retrieve_xmlelements("TEMPLATE/DISK[DISK_ID=#{disk_id}]").first
source_ds_ref = ""
# If disk is unmanaged get the reference from template
if disk["OPENNEBULA_MANAGED"] && disk["OPENNEBULA_MANAGED"].downcase == "no"
unmanaged_keys = vm.get_unmanaged_keys
device_key = unmanaged_keys["opennebula.disk.#{disk["DISK_ID"]}"].to_i
image_path, source_ds_ref = vm.get_device_filename_and_ds_from_key(device_key)
src_path = image_path.sub(/^\[(.*?)\] /, "")
else
# Get image source path
src_path = VCenterDriver::FileHelper.get_img_name(disk, vmid, vm['name'])
source_ds_ref = disk["VCENTER_DS_REF"]
end
source_ds_vc = VCenterDriver::Datastore.new_from_ref(source_ds_ref, vi_client)
# Get target ds ref
target_ds = VCenterDriver::VIHelper.one_item(OpenNebula::Datastore, target_ds_id)
target_ds_ref = target_ds['TEMPLATE/VCENTER_DS_REF']
target_ds_vc = VCenterDriver::Datastore.new_from_ref(target_ds_ref, vi_client)
target_ds_name_vc = target_ds_vc['name']
source_ds_vc.copy_virtual_disk(src_path, target_ds_vc, target_path)
rescue Exception => e
message = "Error copying img #{src_path} to #{target_ds_name_vc} "\
"Reason: #{e.message}\n#{e.backtrace}"
STDERR.puts error_message(message)
exit -1
ensure
vi_client.close_connection if vi_client
end

View File

@ -1,7 +1,7 @@
#!/usr/bin/env ruby
# ---------------------------------------------------------------------------- #
# Copyright 2002-2016, OpenNebula Project, OpenNebula Systems #
# Copyright 2002-2017, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
@ -21,6 +21,7 @@
# - remote_system_ds is the path for the system datastore in the host
# - vmid is the id of the VM
# - dsid is the target datastore (0 is the system datastore)
# ---------------------------------------------------------------------------- #
ONE_LOCATION=ENV["ONE_LOCATION"] if !defined?(ONE_LOCATION)
@ -35,18 +36,229 @@ $: << File.dirname(__FILE__)
require 'vcenter_driver'
hostname, img_path = ARGV[0].split(":")
vmid = ARGV[1]
dsid = ARGV[2]
VM_PREFIX_DEFAULT = "one-$i-"
path = ARGV[0]
vmid = ARGV[1]
dsid = ARGV[2]
check_valid path, "path"
check_valid vmid, "vmid"
check_valid dsid, "dsid"
hostname, img_path = path.split(":")
# Get host ID
host = VCenterDriver::VIHelper.find_by_name(OpenNebula::HostPool, hostname)
host_id = host['ID']
# Get VM
one_vm = VCenterDriver::VIHelper.one_item(OpenNebula::VirtualMachine, vmid)
vm_ref = one_vm['DEPLOY_ID']
vm = nil
begin
host_id = VCenterDriver::VIClient.translate_hostname(hostname)
vi_client = VCenterDriver::VIClient.new host_id
ds_name = VCenterDriver::VIClient.find_ds_name(dsid)
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
vi_client.delete_virtual_disk(img_path, ds_name)
if !!vm_ref && !vm_ref.empty?
vm = VCenterDriver::VirtualMachine.new_from_ref(vm_ref, vi_client)
else
vcenter_vm = VCenterDriver::VIHelper.find_vcenter_vm_by_name(one_vm, host, vi_client)
# If no VM object retrieved, raise an exception
raise "Could not find the undeployed VM in vCenter's inventory using it's name" if !vcenter_vm
vm_ref = vcenter_vm._ref
vm = VCenterDriver::VirtualMachine.new_from_ref(vm_ref, vi_client)
end
rescue Exception => e
STDERR.puts "Error delete virtual disk #{img_path} in datastore #{dsid}."\
" Reason: #{e.message}"
vi_client.close_connection if vi_client
STDERR.puts "Error obtaining the vCenter client and VM object."\
" Reason: #{e.message}\n#{e.backtrace}"
exit -1
end
if path.match(/disk\.\d+$/)
# Detach and remove the disk (if it is not a CDROM)
# Get DS ref
dsid = img_path.split("/")[-3] # get dsid from path
one_ds = VCenterDriver::VIHelper.one_item(OpenNebula::Datastore, dsid)
ds_ref = one_ds['TEMPLATE/VCENTER_DS_REF']
# Get disk info
disk_id = img_path.split(".")[-1]
disk = one_vm.retrieve_xmlelements("TEMPLATE/DISK[DISK_ID=#{disk_id}]").first
begin
if !vm.has_snapshots?
# TODO: if the deploy has failed, the disks may exist, but the vm may
# not exist...
vm.one_item = one_vm
# detach the disk or cdrom
ds_ref, img_path = vm.detach_disk(disk)
# If disk was already detached we have no way to remove it
if ds_ref && img_path && !img_path.empty?
ds = VCenterDriver::Datastore.new_from_ref(ds_ref, vi_client)
# delete the disk if it's not a CDROM (CLONE=NO)
if disk["CLONE"].nil? || disk["CLONE"] == "YES"
ds.delete_virtual_disk(img_path)
img_dir = File.dirname(img_path)
ds.rm_directory(img_dir) if ds.dir_empty?(img_dir)
end
end
end
rescue Exception => e
message = "Error delete virtual disk #{img_path} in datastore #{dsid}."\
" Reason: #{e.message}\n#{e.backtrace}"
STDERR.puts error_message(message)
exit -1
ensure
vi_client.close_connection if vi_client
end
else
# Remove the VM
begin
# All OpenNebula managed disks have been detached. The VM may have still
# disks that belong to the template (OPENNEBULA_MANAGED disks). These disks
# will be deleted with the destroy operation. If the user wants to
# save them to a VM, it can be done using the disk-saveas operation.
# If we have NICs associated to VN_MAD=vcenter we must check if pgs and
# switches must be deleted
# track pg or dpg in case they must be removed
vcenter_uuid = vm.get_vcenter_instance_uuid
networks = {}
npool = VCenterDriver::VIHelper.one_pool(OpenNebula::VirtualNetworkPool, false)
# Check nics in VM
vm.item["config.hardware.device"].each do |dv|
if vm.is_nic?(dv)
if dv.backing.respond_to?(:network)
vnet_ref = dv.backing.network._ref
end
if dv.backing.respond_to?(:port) &&
dv.backing.port.respond_to?(:portgroupKey)
vnet_ref = dv.backing.port.portgroupKey
end
one_network = VCenterDriver::VIHelper.find_by_ref(OpenNebula::VirtualNetworkPool,
"TEMPLATE/VCENTER_NET_REF",
vnet_ref,
vcenter_uuid,
npool)
next if !one_network
if one_network["VN_MAD"] == "vcenter" && !networks.key?(one_network["BRIDGE"])
networks[one_network["BRIDGE"]] = one_network
end
end
end
#Remove pgs and switches if not needed
if !networks.empty?
esx_host = VCenterDriver::ESXHost.new_from_ref(vm.item.runtime.host._ref, vi_client)
dc = vm.cluster.get_dc # Get datacenter
networks.each do |pg_name, one|
if one["TEMPLATE/VCENTER_PORTGROUP_TYPE"] == "Port Group"
begin
esx_host.lock # Exclusive lock for ESX host operation
next if !esx_host.pg_exists(pg_name)
swname = esx_host.remove_pg(pg_name)
next if !swname
# We must update XML so the VCENTER_NET_REF is unset
VCenterDriver::Network.remove_net_ref(one["ID"])
next if !esx_host.vss_exists(swname)
swname = esx_host.remove_vss(swname)
rescue Exception => e
raise e
ensure
esx_host.unlock if esx_host # Remove lock
end
end
if one["TEMPLATE/VCENTER_PORTGROUP_TYPE"] == "Distributed Port Group"
begin
dc.lock
# Explore network folder in search of dpg and dvs
net_folder = dc.network_folder
net_folder.fetch!
# Get distributed port group if it exists
dpg = dc.dpg_exists(pg_name, net_folder)
dc.remove_dpg(dpg) if dpg
# We must update XML so the VCENTER_NET_REF is unset
VCenterDriver::Network.remove_net_ref(one["ID"])
# Get distributed virtual switch and try to remove it
switch_name = one["TEMPLATE/VCENTER_SWITCH_NAME"]
dvs = dc.dvs_exists(switch_name, net_folder)
dc.remove_dvs(dvs) if dvs
rescue Exception => e
#TODO rollback
raise e
ensure
dc.unlock if dc
end
end
end
end
vm.poweroff_hard if vm.is_powered_on?
# If the VM has snapshots the TM could not detach disks so we
# will try to detach persistent disks once we have removed all snapshots
# that way they won't be removed. If the vm has been marked as template
# persistent disks shouldn't be detached
if vm.has_snapshots? && !vm.instantiated_as_persistent?
vm.remove_all_snapshots
disks = one_vm.retrieve_xmlelements("TEMPLATE/DISK[PERSISTENT=\"YES\"]")
disks.each do |d|
vm.detach_disk(d)
end
end
# If the VM was instantiated to persistent keep the VM
if vm.instantiated_as_persistent?
#Convert VM to template in vCenter
vm.mark_as_template
# Create new Opennebula template and set VCENTER_TEMPLATE_REF
one_client = OpenNebula::Client.new
template_id = vm.one_item['TEMPLATE/TEMPLATE_ID']
new_template = OpenNebula::Template.new_with_id(template_id, one_client)
new_template.info
new_template.update("VCENTER_TEMPLATE_REF= #{vm.item._ref}", true)
end
# Destroy the VM unless the instantiate as persistent is used
vm.destroy if !vm.instantiated_as_persistent?
rescue Exception => e
message = "Error unregistering vm #{vmid} (#{vm_ref})."\
" Reason: #{e.message}\n#{e.backtrace}"
STDERR.puts error_message(message)
exit -1
ensure
vi_client.close_connection if vi_client
end
end

View File

@ -1,7 +1,7 @@
#!/usr/bin/env ruby
# ---------------------------------------------------------------------------- #
# Copyright 2002-2016, OpenNebula Project, OpenNebula Systems #
# Copyright 2002-2017, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
@ -38,40 +38,58 @@ $: << File.dirname(__FILE__)
require 'vcenter_driver'
size = ARGV[0]
fstype = ARGV[1]
hostname, img_path = ARGV[2].split(":")
path = ARGV[2]
vmid = ARGV[3]
dsid = ARGV[4]
begin
disk_id = img_path.split(".")[1]
vm = OpenNebula::VirtualMachine.new_with_id(vmid,
OpenNebula::Client.new)
vm.info
adapter_type = vm["VM/TEMPLATE/DISK[DISK_ID=#{disk_id}]/ADAPTER_TYPE"]
disk_type = vm["VM/TEMPLATE/DISK[DISK_ID=#{disk_id}]/DISK_TYPE"]
rescue Exception => e
STDERR.puts "Error getting attributes from VM template #{vmid}."\
" Reason: #{e.message}"
exit -1
end
check_valid size, "size"
check_valid path, "path"
check_valid vmid, "vmid"
check_valid dsid, "dsid"
if disk_type.nil? or disk_type.empty?
disk_type = "thin"
end
hostname, img_name = path.split(":")
disk_id = img_name.split(".")[-1]
# Get host ID
host = VCenterDriver::VIHelper.find_by_name(OpenNebula::HostPool, hostname)
host_id = host['ID']
# Get datastore ref
one_ds = VCenterDriver::VIHelper.one_item(OpenNebula::Datastore, dsid)
ds_ref = one_ds['TEMPLATE/VCENTER_DS_REF']
# Get one_vm
one_vm = VCenterDriver::VIHelper.one_item(OpenNebula::VirtualMachine, vmid)
# Adapter and disk type from one_vm
adapter_type = one_vm["/VM/TEMPLATE/DISK[DISK_ID=#{disk_id}]/VCENTER_ADAPTER_TYPE"] ||
VCenterDriver::VIHelper.get_default("IMAGE/TEMPLATE/VCENTER_ADAPTER_TYPE")
disk_type = one_vm["/VM/TEMPLATE/DISK[DISK_ID=#{disk_id}]/VCENTER_DISK_TYPE"] ||
VCenterDriver::VIHelper.get_default("IMAGE/TEMPLATE/VCENTER_DISK_TYPE")
check_valid adapter_type, "adapter_type"
check_valid disk_type, "disk_type"
# Volatile images dir from one_vm
ds_volatile_dir = one_vm["/VM/TEMPLATE/DISK[DISK_ID=#{disk_id}]/VCENTER_DS_VOLATILE_DIR"] ||
"one-volatile"
begin
host_id = VCenterDriver::VIClient.translate_hostname(hostname)
vi_client = VCenterDriver::VIClient.new host_id
ds_name = VCenterDriver::VIClient.find_ds_name(dsid)
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
img_name = "#{ds_volatile_dir}/#{vmid}/one-#{vmid}-#{disk_id}"
ds_vc = VCenterDriver::Storage.new_from_ref(ds_ref, vi_client)
if ds_vc.class == VCenterDriver::Datastore
ds_vc.create_virtual_disk(img_name, size, adapter_type, disk_type)
end
puts vi_client.create_virtual_disk(img_name,
ds_name,
size,
adapter_type,
disk_type)
rescue Exception => e
STDERR.puts "Error creating virtual disk in #{ds_name}."\
" Reason: #{e.message}"
message = "Error creating virtual disk in #{ds_vc['name']}."\
" Reason: #{e.message}\n#{e.backtrace}"
STDERR.puts error_message(message)
exit -1
end
ensure
vi_client.close_connection if vi_client
end

View File

@ -1 +1 @@
../common/dummy.sh
../common/not_supported.sh

1
src/tm_mad/vcenter/monitor Symbolic link
View File

@ -0,0 +1 @@
../../datastore_mad/remotes/vcenter/monitor

View File

@ -1 +0,0 @@
../common/dummy.sh

78
src/tm_mad/vcenter/mvds Executable file
View File

@ -0,0 +1,78 @@
#!/usr/bin/env ruby
# ---------------------------------------------------------------------------- #
# Copyright 2002-2017, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
# mvds host:remote_system_ds/disk.i fe:SOURCE vmid dsid
# - fe is the front-end hostname
# - SOURCE is the path of the disk image in the form DS_BASE_PATH/disk
# - host is the target host to deploy the VM
# - remote_system_ds is the path for the system datastore in the host
# - vmid is the id of the VM
# - dsid is the target datastore (0 is the system datastore)
ONE_LOCATION=ENV["ONE_LOCATION"] if !defined?(ONE_LOCATION)
if !ONE_LOCATION
RUBY_LIB_LOCATION="/usr/lib/one/ruby" if !defined?(RUBY_LIB_LOCATION)
else
RUBY_LIB_LOCATION=ONE_LOCATION+"/lib/ruby" if !defined?(RUBY_LIB_LOCATION)
end
$: << RUBY_LIB_LOCATION
$: << File.dirname(__FILE__)
require 'vcenter_driver'
path = ARGV[0]
vmid = ARGV[2]
check_valid path, "path"
check_valid vmid, "vmid"
hostname, img_path = path.split(":")
# Get host ID
host = VCenterDriver::VIHelper.find_by_name(OpenNebula::HostPool, hostname)
host_id = host['ID']
# Get VM
one_vm = VCenterDriver::VIHelper.one_item(OpenNebula::VirtualMachine, vmid)
vm_ref = one_vm['DEPLOY_ID']
# Get image path
disk_id = img_path.split(".")[-1]
disk = one_vm.retrieve_xmlelements("TEMPLATE/DISK[DISK_ID=#{disk_id}]").first
begin
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
vm = VCenterDriver::VirtualMachine.new_from_ref(vm_ref, vi_client)
vm.one_item = one_vm
# Don't detach persistent disks if the VM has snapshots,
# or the instantiate to persistent is used
vm.detach_disk(disk) if !vm.has_snapshots? && !vm.instantiated_as_persistent?
rescue Exception => e
message = "Error detaching virtual disk #{disk_id} from vm #{vmid}."\
" Reason: #{e.message}\n#{e.backtrace}"
STDERR.puts error_message(message)
exit -1
ensure
vi_client.close_connection if vi_client
end

View File

@ -739,7 +739,8 @@ class ExecDriver < VirtualMachineDriver
id,
host,
ACTION[:snapshot_create],
:script_name => "snapshot_create")
:script_name => "snapshot_create",
:stdin => xml_data)
end
#

View File

@ -16,5 +16,4 @@
# limitations under the License. #
#--------------------------------------------------------------------------- #
SCRIPT_NAME=$(basename $0)
echo "Action $SCRIPT_NAME not supported" 1>&2
exit 0

View File

@ -0,0 +1,761 @@
module VCenterDriver
require 'set'
class DatacenterFolder
attr_accessor :items
def initialize(vi_client)
@vi_client = vi_client
@items = {}
end
########################################################################
# Builds a hash with Datacenter-Ref / Datacenter to be used as a cache
# @return [Hash] in the form
# { dc_ref [Symbol] => Datacenter object }
########################################################################
def fetch!
VIClient.get_entities(@vi_client.vim.root, "Datacenter").each do |item|
item_name = item._ref
@items[item_name.to_sym] = Datacenter.new(item)
end
end
########################################################################
# Returns a Datacenter. Uses the cache if available.
# @param ref [Symbol] the vcenter ref
# @return Datacenter
########################################################################
def get(ref)
if !@items[ref.to_sym]
rbvmomi_dc = RbVmomi::VIM::Datacenter.new(@vi_client.vim, ref)
@items[ref.to_sym] = Datacenter.new(rbvmomi_dc)
end
@items[ref.to_sym]
end
def get_vcenter_instance_uuid
@vi_client.vim.serviceContent.about.instanceUuid
end
def get_vcenter_api_version
@vi_client.vim.serviceContent.about.apiVersion
end
def get_unimported_hosts(hpool, vcenter_instance_name)
host_objects = {}
vcenter_uuid = get_vcenter_instance_uuid
vcenter_version = get_vcenter_api_version
fetch! if @items.empty? #Get datacenters
@items.values.each do |dc|
dc_name = dc.item.name
host_objects[dc_name] = []
host_folder = dc.host_folder
host_folder.fetch_clusters!
host_folder.items.values.each do |ccr|
one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool,
"TEMPLATE/VCENTER_CCR_REF",
ccr['_ref'],
vcenter_uuid,
hpool)
next if one_host #If the host has been already imported
cluster = VCenterDriver::ClusterComputeResource.new_from_ref(ccr['_ref'], @vi_client)
rpools = cluster.get_resource_pool_list.select {|rp| !rp[:name].empty?}
host_info = {}
cluster_name = "[#{vcenter_instance_name}-#{dc_name}]_#{ccr['name']}"
cluster_name = cluster_name.tr(" ", "_")
cluster_name = cluster_name.tr("\u007F", "") # Remove \u007F character that comes from vcenter
host_info[:cluster_name] = cluster_name
host_info[:cluster_ref] = ccr['_ref']
host_info[:vcenter_uuid] = vcenter_uuid
host_info[:vcenter_version] = vcenter_version
host_info[:rp_list] = rpools
host_objects[dc_name] << host_info
end
end
return host_objects
end
def get_unimported_datastores(dpool, vcenter_instance_name, hpool)
ds_objects = {}
vcenter_uuid = get_vcenter_instance_uuid
fetch! if @items.empty? #Get datacenters
one_clusters = {}
@items.values.each do |dc|
dc_name = dc.item.name
one_clusters[dc_name] = []
host_folder = dc.host_folder
host_folder.fetch_clusters!
host_folder.items.values.each do |ccr|
cluster = {}
cluster[:ref] = ccr['_ref']
cluster[:name] = ccr['name']
attribute = "TEMPLATE/VCENTER_CCR_REF"
one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool,
attribute,
ccr['_ref'],
vcenter_uuid,
hpool)
if !!one_host
cluster[:host_id] = one_host['ID']
one_clusters[dc_name] << cluster
end
end
next if one_clusters[dc_name].empty? #No clusters imported, continue
ds_objects[dc_name] = []
datastore_folder = dc.datastore_folder
datastore_folder.fetch!
datastore_folder.items.values.each do |ds|
name, capacity, freeSpace = ds.item.collect("name","summary.capacity","summary.freeSpace")
ds_name = "[#{vcenter_instance_name} - #{dc_name}] #{name}"
ds_total_mb = ((capacity.to_i / 1024) / 1024)
ds_free_mb = ((freeSpace.to_i / 1024) / 1024)
if ds.instance_of? VCenterDriver::Datastore
hosts_in_ds = ds['host']
clusters_in_ds = {}
hosts_in_ds.each do |host|
cluster_ref = host.key.parent._ref
if !clusters_in_ds[cluster_ref]
clusters_in_ds[cluster_ref] = host.key.parent.name
end
end
clusters_in_ds.each do |ccr_ref, ccr_name|
ds_hash = {}
ds_hash[:name] = "#{ds_name} - #{ccr_name.tr(" ", "_")}"
ds_hash[:total_mb] = ds_total_mb
ds_hash[:free_mb] = ds_free_mb
ds_hash[:cluster] = ccr_name
ds_hash[:ds] = []
already_image_ds = VCenterDriver::Storage.exists_one_by_ref_ccr_and_type?(ds["_ref"], ccr_ref, vcenter_uuid, "IMAGE_DS", dpool)
if !already_image_ds
object = ds.to_one_template(one_clusters[dc_name], ds_hash[:name], ccr_ref, "IMAGE_DS", vcenter_uuid)
ds_hash[:ds] << object if !object.nil?
end
already_system_ds = VCenterDriver::Storage.exists_one_by_ref_ccr_and_type?(ds["_ref"], ccr_ref, vcenter_uuid, "SYSTEM_DS", dpool)
if !already_system_ds
object = ds.to_one_template(one_clusters[dc_name], ds_hash[:name], ccr_ref, "SYSTEM_DS", vcenter_uuid)
ds_hash[:ds] << object if !object.nil?
end
ds_objects[dc_name] << ds_hash if !ds_hash[:ds].empty?
end
end
if ds.instance_of? VCenterDriver::StoragePod
clusters_in_spod = {}
ds_in_spod = ds['children']
ds_in_spod.each do |sp_ds|
hosts_in_ds = sp_ds.host
hosts_in_ds.each do |host|
cluster_ref = host.key.parent._ref
if !clusters_in_spod[cluster_ref]
clusters_in_spod[cluster_ref] = host.key.parent.name
end
end
end
clusters_in_spod.each do |ccr_ref, ccr_name|
ds_hash = {}
ds_hash[:name] = "#{ds_name} - #{ccr_name.tr(" ", "_")}"
ds_hash[:total_mb] = ds_total_mb
ds_hash[:free_mb] = ds_free_mb
ds_hash[:cluster] = ccr_name
ds_hash[:ds] = []
ds_hash[:ds] = []
already_system_ds = VCenterDriver::Storage.exists_one_by_ref_ccr_and_type?(ds["_ref"], ccr_ref, vcenter_uuid, "SYSTEM_DS", dpool)
if !already_system_ds
object = ds.to_one_template(one_clusters[dc_name], ds_hash[:name], ccr_ref, "SYSTEM_DS", vcenter_uuid)
ds_hash[:ds] << object if !object.nil?
end
ds_objects[dc_name] << ds_hash if !ds_hash[:ds].empty?
end
end
end
end
ds_objects
end
def get_unimported_templates(vi_client, tpool)
template_objects = {}
vcenter_uuid = get_vcenter_instance_uuid
vcenter_instance_name = vi_client.vim.host
fetch! if @items.empty? #Get datacenters
@items.values.each do |dc|
rp_cache = {}
dc_name = dc.item.name
template_objects[dc_name] = []
view = vi_client.vim.serviceContent.viewManager.CreateContainerView({
container: dc.item.vmFolder,
type: ['VirtualMachine'],
recursive: true
})
pc = vi_client.vim.serviceContent.propertyCollector
filterSpec = RbVmomi::VIM.PropertyFilterSpec(
:objectSet => [
:obj => view,
:skip => true,
:selectSet => [
RbVmomi::VIM.TraversalSpec(
:name => 'traverseEntities',
:type => 'ContainerView',
:path => 'view',
:skip => false
)
]
],
:propSet => [
{ :type => 'VirtualMachine', :pathSet => ['config.template'] }
]
)
result = pc.RetrieveProperties(:specSet => [filterSpec])
vms = {}
result.each do |r|
vms[r.obj._ref] = r.to_hash if r.obj.is_a?(RbVmomi::VIM::VirtualMachine)
end
templates = []
vms.each do |ref,value|
if value["config.template"]
templates << VCenterDriver::Template.new_from_ref(ref, vi_client)
end
end
view.DestroyView # Destroy the view
templates.each do |template|
one_template = VCenterDriver::VIHelper.find_by_ref(OpenNebula::TemplatePool,
"TEMPLATE/VCENTER_TEMPLATE_REF",
template['_ref'],
vcenter_uuid,
tpool)
next if one_template #If the template has been already imported
one_template = VCenterDriver::Template.get_xml_template(template, vcenter_uuid, vi_client, vcenter_instance_name, dc_name, rp_cache)
template_objects[dc_name] << one_template if !!one_template
end
end
template_objects
end
def get_unimported_networks(npool,vcenter_instance_name)
network_objects = {}
vcenter_uuid = get_vcenter_instance_uuid
pc = @vi_client.vim.serviceContent.propertyCollector
#Get all port groups and distributed port groups in vcenter instance
view = @vi_client.vim.serviceContent.viewManager.CreateContainerView({
container: @vi_client.vim.rootFolder,
type: ['Network','DistributedVirtualPortgroup'],
recursive: true
})
filterSpec = RbVmomi::VIM.PropertyFilterSpec(
:objectSet => [
:obj => view,
:skip => true,
:selectSet => [
RbVmomi::VIM.TraversalSpec(
:name => 'traverseEntities',
:type => 'ContainerView',
:path => 'view',
:skip => false
)
]
],
:propSet => [
{ :type => 'Network', :pathSet => ['name'] },
{ :type => 'DistributedVirtualPortgroup', :pathSet => ['name'] }
]
)
result = pc.RetrieveProperties(:specSet => [filterSpec])
networks = {}
result.each do |r|
networks[r.obj._ref] = r.to_hash if r.obj.is_a?(RbVmomi::VIM::DistributedVirtualPortgroup) || r.obj.is_a?(RbVmomi::VIM::Network)
networks[r.obj._ref][:network_type] = r.obj.is_a?(RbVmomi::VIM::DistributedVirtualPortgroup) ? "Distributed Port Group" : "Port Group"
end
view.DestroyView # Destroy the view
fetch! if @items.empty? #Get datacenters
@items.values.each do |dc|
dc_name = dc.item.name
network_objects[dc_name] = []
view = @vi_client.vim.serviceContent.viewManager.CreateContainerView({
container: dc.item,
type: ['ClusterComputeResource'],
recursive: true
})
filterSpec = RbVmomi::VIM.PropertyFilterSpec(
:objectSet => [
:obj => view,
:skip => true,
:selectSet => [
RbVmomi::VIM.TraversalSpec(
:name => 'traverseEntities',
:type => 'ContainerView',
:path => 'view',
:skip => false
)
]
],
:propSet => [
{ :type => 'ClusterComputeResource', :pathSet => ['name','network'] }
]
)
result = pc.RetrieveProperties(:specSet => [filterSpec])
clusters = {}
result.each do |r|
clusters[r.obj._ref] = r.to_hash if r.obj.is_a?(RbVmomi::VIM::ClusterComputeResource)
end
view.DestroyView # Destroy the view
clusters.each do |ref, info|
network_obj = info['network']
network_obj.each do |n|
network_ref = n._ref
network_name = networks[network_ref]['name']
network_type = networks[network_ref][:network_type]
one_network = VCenterDriver::VIHelper.find_by_ref(OpenNebula::VirtualNetworkPool,
"TEMPLATE/VCENTER_NET_REF",
network_ref,
vcenter_uuid,
npool)
next if one_network #If the network has been already imported
one_vnet = VCenterDriver::Network.to_one_template(network_name,
network_ref,
network_type,
ref,
info['name'],
vcenter_uuid,
vcenter_instance_name,
dc_name)
network_objects[dc_name] << one_vnet
end
end # network loop
end #datacenters loop
return network_objects
end
end # class DatatacenterFolder
class Datacenter
attr_accessor :item
DPG_CREATE_TIMEOUT = 240
def initialize(item, vi_client=nil)
if !item.instance_of? RbVmomi::VIM::Datacenter
raise "Expecting type 'RbVmomi::VIM::Datacenter'. " <<
"Got '#{item.class} instead."
end
@vi_client = vi_client
@item = item
@net_rollback = []
@locking = true
end
def datastore_folder
DatastoreFolder.new(@item.datastoreFolder)
end
def host_folder
HostFolder.new(@item.hostFolder)
end
def vm_folder
VirtualMachineFolder.new(@item.vmFolder)
end
def network_folder
NetworkFolder.new(@item.networkFolder)
end
# Locking function. Similar to flock
def lock
hostlockname = @item['name'].downcase.tr(" ", "_")
if @locking
@locking_file = File.open("/tmp/vcenter-dc-#{hostlockname}-lock","w")
@locking_file.flock(File::LOCK_EX)
end
end
# Unlock driver execution mutex
def unlock
if @locking
@locking_file.close
end
end
########################################################################
# Check if distributed virtual switch exists in host
########################################################################
def dvs_exists(switch_name, net_folder)
return net_folder.items.values.select{ |dvs|
dvs.instance_of?(VCenterDriver::DistributedVirtualSwitch) &&
dvs['name'] == switch_name
}.first rescue nil
end
########################################################################
# Is the distributed switch for the distributed pg different?
########################################################################
def pg_changes_sw?(dpg, switch_name)
return dpg['config.distributedVirtualSwitch.name'] != switch_name
end
########################################################################
# Create a distributed vcenter switch in a datacenter
########################################################################
def create_dvs(switch_name, pnics, mtu=1500)
# Prepare spec for DVS creation
spec = RbVmomi::VIM::DVSCreateSpec.new
spec.configSpec = RbVmomi::VIM::VMwareDVSConfigSpec.new
spec.configSpec.name = switch_name
# Specify number of uplinks port for dpg
if pnics
pnics = pnics.split(",")
if !pnics.empty?
spec.configSpec.uplinkPortPolicy = RbVmomi::VIM::DVSNameArrayUplinkPortPolicy.new
spec.configSpec.uplinkPortPolicy.uplinkPortName = []
(0..pnics.size-1).each { |index|
spec.configSpec.uplinkPortPolicy.uplinkPortName[index]="dvUplink#{index+1}"
}
end
end
#Set maximum MTU
spec.configSpec.maxMtu = mtu
# The DVS must be created in the networkFolder of the datacenter
begin
dvs_creation_task = @item.networkFolder.CreateDVS_Task(:spec => spec)
dvs_creation_task.wait_for_completion
# If task finished successfuly we rename the uplink portgroup
dvs = nil
if dvs_creation_task.info.state == 'success'
dvs = dvs_creation_task.info.result
dvs.config.uplinkPortgroup[0].Rename_Task(:newName => "#{switch_name}-uplink-pg").wait_for_completion
else
raise "The Distributed vSwitch #{switch_name} could not be created. "
end
rescue Exception => e
raise e
end
@net_rollback << {:action => :delete_dvs, :dvs => dvs, :name => switch_name}
return VCenterDriver::DistributedVirtualSwitch.new(dvs, @vi_client)
end
########################################################################
# Update a distributed vcenter switch
########################################################################
def update_dvs(dvs, pnics, mtu)
# Prepare spec for DVS creation
spec = RbVmomi::VIM::VMwareDVSConfigSpec.new
changed = false
orig_spec = RbVmomi::VIM::VMwareDVSConfigSpec.new
orig_spec.maxMtu = dvs['config.maxMtu']
orig_spec.uplinkPortPolicy = RbVmomi::VIM::DVSNameArrayUplinkPortPolicy.new
orig_spec.uplinkPortPolicy.uplinkPortName = []
(0..dvs['config.uplinkPortgroup'].length-1).each { |index|
orig_spec.uplinkPortPolicy.uplinkPortName[index]="dvUplink#{index+1}"
}
# Add more uplinks to default uplink port group according to number of pnics
if pnics
pnics = pnics.split(",")
if !pnics.empty? && dvs['config.uplinkPortgroup'].length != pnics.size
spec.uplinkPortPolicy = RbVmomi::VIM::DVSNameArrayUplinkPortPolicy.new
spec.uplinkPortPolicy.uplinkPortName = []
(dvs['config.uplinkPortgroup'].length..num_pnics-1).each { |index|
spec.uplinkPortPolicy.uplinkPortName[index]="dvUplink#{index+1}"
}
changed = true
end
end
#Set maximum MTU
if mtu != dvs['config.maxMtu']
spec.maxMtu = mtu
changed = true
end
# The DVS must be created in the networkFolder of the datacenter
if changed
spec.configVersion = dvs['config.configVersion']
begin
dvs.item.ReconfigureDvs_Task(:spec => spec).wait_for_completion
rescue Exception => e
raise "The Distributed switch #{dvs['name']} could not be updated. "\
"Reason: #{e.message}"
end
@net_rollback << {:action => :update_dvs, :dvs => dvs.item, :name => dvs['name'], :spec => orig_spec}
end
end
########################################################################
# Remove a distributed vcenter switch in a datacenter
########################################################################
def remove_dvs(dvs)
begin
dvs.item.Destroy_Task.wait_for_completion
rescue
#Ignore destroy task exception
end
end
########################################################################
# Check if distributed port group exists in datacenter
########################################################################
def dpg_exists(pg_name, net_folder)
return net_folder.items.values.select{ |dpg|
dpg.instance_of?(VCenterDriver::DistributedPortGroup) &&
dpg['name'] == pg_name
}.first rescue nil
end
########################################################################
# Create a distributed vcenter port group
########################################################################
def create_dpg(dvs, pg_name, vlan_id, num_ports)
spec = RbVmomi::VIM::DVPortgroupConfigSpec.new
# OpenNebula use DVS static port binding with autoexpand
if num_ports
spec.autoExpand = true
spec.numPorts = num_ports
end
# Distributed port group name
spec.name = pg_name
# Set VLAN information
spec.defaultPortConfig = RbVmomi::VIM::VMwareDVSPortSetting.new
spec.defaultPortConfig.vlan = RbVmomi::VIM::VmwareDistributedVirtualSwitchVlanIdSpec.new
spec.defaultPortConfig.vlan.vlanId = vlan_id
spec.defaultPortConfig.vlan.inherited = false
# earlyBinding. A free DistributedVirtualPort will be selected and
# assigned to a VirtualMachine when the virtual machine is reconfigured
# to connect to the portgroup.
spec.type = "earlyBinding"
begin
dvs.item.AddDVPortgroup_Task(spec: [spec]).wait_for_completion
rescue Exception => e
raise "The Distributed port group #{pg_name} could not be created. "\
"Reason: #{e.message}"
end
# wait until the network is ready and we have a reference
portgroups = dvs['portgroup'].select{ |dpg|
dpg.instance_of?(RbVmomi::VIM::DistributedVirtualPortgroup) &&
dpg['name'] == pg_name
}
(0..DPG_CREATE_TIMEOUT).each do
break if !portgroups.empty?
portgroups = dvs['portgroup'].select{ |dpg|
dpg.instance_of?(RbVmomi::VIM::DistributedVirtualPortgroup) &&
dpg['name'] == pg_name
}
sleep 1
end
raise "Cannot get VCENTER_NET_REF for new distributed port group" if portgroups.empty?
@net_rollback << {:action => :delete_dpg, :dpg => portgroups.first, :name => pg_name}
return portgroups.first._ref
end
########################################################################
# Update a distributed vcenter port group
########################################################################
def update_dpg(dpg, vlan_id, num_ports)
spec = RbVmomi::VIM::DVPortgroupConfigSpec.new
changed = false
orig_spec = RbVmomi::VIM::DVPortgroupConfigSpec.new
orig_spec.numPorts = dpg['config.numPorts']
orig_spec.defaultPortConfig = RbVmomi::VIM::VMwareDVSPortSetting.new
orig_spec.defaultPortConfig.vlan = RbVmomi::VIM::VmwareDistributedVirtualSwitchVlanIdSpec.new
orig_spec.defaultPortConfig.vlan.vlanId = dpg['config.defaultPortConfig.vlan.vlanId']
orig_spec.defaultPortConfig.vlan.inherited = false
if num_ports && num_ports != orig_spec.numPorts
spec.numPorts = num_ports
changed = true
end
# earlyBinding. A free DistributedVirtualPort will be selected and
# assigned to a VirtualMachine when the virtual machine is reconfigured
# to connect to the portgroup.
spec.type = "earlyBinding"
if vlan_id != orig_spec.defaultPortConfig.vlan.vlanId
spec.defaultPortConfig = RbVmomi::VIM::VMwareDVSPortSetting.new
spec.defaultPortConfig.vlan = RbVmomi::VIM::VmwareDistributedVirtualSwitchVlanIdSpec.new
spec.defaultPortConfig.vlan.vlanId = vlan_id
spec.defaultPortConfig.vlan.inherited = false
changed = true
end
if changed
spec.configVersion = dpg['config.configVersion']
begin
dpg.item.ReconfigureDVPortgroup_Task(:spec => spec).wait_for_completion
rescue Exception => e
raise "The Distributed port group #{dpg['name']} could not be created. "\
"Reason: #{e.message}"
end
@net_rollback << {:action => :update_dpg, :dpg => dpg.item, :name => dpg['name'], :spec => orig_spec}
end
end
########################################################################
# Remove distributed port group from datacenter
########################################################################
def remove_dpg(dpg)
begin
dpg.item.Destroy_Task.wait_for_completion
rescue RbVmomi::VIM::ResourceInUse => e
STDERR.puts "The distributed portgroup #{dpg["name"]} is in use so it cannot be deleted"
return nil
rescue Exception => e
raise "The Distributed portgroup #{dpg["name"]} could not be deleted. Reason: #{e.message} "
end
end
########################################################################
# Perform vcenter network rollback operations
########################################################################
def network_rollback
@net_rollback.reverse_each do |nr|
case nr[:action]
when :update_dpg
begin
nr[:dpg].ReconfigureDVPortgroupConfigSpec_Task(:spec => nr[:spec])
rescue Exception => e
raise "A rollback operation for distributed port group #{nr[:name]} could not be performed. Reason: #{e.message}"
end
when :update_dvs
begin
nr[:dvs].ReconfigureDvs_Task(:spec => nr[:spec])
rescue Exception => e
raise "A rollback operation for distributed standard switch #{nr[:name]} could not be performed. Reason: #{e.message}"
end
when :delete_dvs
begin
nr[:dvs].Destroy_Task.wait_for_completion
rescue RbVmomi::VIM::ResourceInUse
return #Ignore if switch in use
rescue RbVmomi::VIM::NotFound
return #Ignore if switch not found
rescue Exception => e
raise "A rollback operation for standard switch #{nr[:name]} could not be performed. Reason: #{e.message}"
end
when :delete_dpg
begin
nr[:dpg].Destroy_Task.wait_for_completion
rescue RbVmomi::VIM::ResourceInUse
return #Ignore if pg in use
rescue RbVmomi::VIM::NotFound
return #Ignore if pg not found
rescue Exception => e
raise "A rollback operation for standard port group #{nr[:name]} could not be performed. Reason: #{e.message}"
end
end
end
end
def self.new_from_ref(ref, vi_client)
self.new(RbVmomi::VIM::Datacenter.new(vi_client.vim, ref), vi_client)
end
end
end # module VCenterDriver

View File

@ -0,0 +1,624 @@
module VCenterDriver
class DatastoreFolder
attr_accessor :item, :items
def initialize(item)
@item = item
@items = {}
end
########################################################################
# Builds a hash with Datastore-Ref / Datastore to be used as a cache
# @return [Hash] in the form
# { ds_ref [Symbol] => Datastore object }
########################################################################
def fetch!
VIClient.get_entities(@item, "Datastore").each do |item|
item_name = item._ref
@items[item_name.to_sym] = Datastore.new(item)
end
VIClient.get_entities(@item, "StoragePod").each do |sp|
@items[sp._ref.to_sym] = StoragePod.new(sp)
VIClient.get_entities(sp, "Datastore").each do |item|
item_name = item._ref
@items[item_name.to_sym] = Datastore.new(item)
end
end
end
def monitor
monitor = ""
@items.values.each do |ds|
monitor << "VCENTER_DS_REF=\"#{ds['_ref']}\"\n"
end
monitor
end
########################################################################
# Returns a Datastore or StoragePod. Uses the cache if available.
# @param ref [Symbol] the vcenter ref
# @return Datastore
########################################################################
def get(ref)
if !@items[ref.to_sym]
if ref.start_with?("group-")
rbvmomi_spod = RbVmomi::VIM::StoragePod.new(@item._connection, ref) rescue nil
@items[ref.to_sym] = StoragePod.new(rbvmomi_spod)
else
rbvmomi_ds = RbVmomi::VIM::Datastore.new(@item._connection, ref) rescue nil
@items[ref.to_sym] = Datastore.new(rbvmomi_ds)
end
end
@items[ref.to_sym]
end
end # class DatastoreFolder
class Storage
attr_accessor :item
include Memoize
def self.new_from_ref(ref, vi_client)
if ref.start_with?('group-')
return VCenterDriver::StoragePod.new_from_ref(ref, vi_client)
else
return VCenterDriver::Datastore.new_from_ref(ref, vi_client)
end
end
def self.get_image_import_template(ds_name, image_path, image_type, image_prefix, ipool)
one_image = {}
one_image[:template] = ""
# Remove ds info from path
image_path.sub!(/^\[#{ds_name}\] /, "")
# Get image name
file_name = File.basename(image_path).gsub(/\.vmdk$/,"")
image_name = "#{file_name} - #{ds_name}"
#Chek if the image has already been imported
image = VCenterDriver::VIHelper.find_by_name(OpenNebula::ImagePool,
image_name,
ipool,
false)
if image.nil?
#Set template
one_image[:template] << "NAME=\"#{image_name}\"\n"
one_image[:template] << "PATH=\"vcenter://#{image_path}\"\n"
one_image[:template] << "TYPE=\"#{image_type}\"\n"
one_image[:template] << "PERSISTENT=\"NO\"\n"
one_image[:template] << "VCENTER_IMPORTED=\"YES\"\n"
one_image[:template] << "DEV_PREFIX=\"#{image_prefix}\"\n"
else
# Return the image XML if it already exists
one_image[:one] = image
end
return one_image
end
def self.get_one_image_ds_by_ref_and_ccr(ref, ccr_ref, vcenter_uuid, pool = nil)
pool = VCenterDriver::VIHelper.one_pool(OpenNebula::DatastorePool, false) if pool.nil?
element = pool.select do |e|
e["TEMPLATE/TYPE"] == "IMAGE_DS" &&
e["TEMPLATE/VCENTER_DS_REF"] == ref &&
e["TEMPLATE/VCENTER_CCR_REF"] == ccr_ref &&
e["TEMPLATE/VCENTER_INSTANCE_ID"] == vcenter_uuid
end.first rescue nil
return element
end
# Checks if a RbVmomi::VIM::VirtualDevice is a disk or an iso file
def self.is_disk_or_iso?(device)
is_disk = !(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil?
is_iso = device.backing.is_a? RbVmomi::VIM::VirtualCdromIsoBackingInfo
is_disk || is_iso
end
def monitor
summary = self['summary']
total_mb = (summary.capacity.to_i / 1024) / 1024
free_mb = (summary.freeSpace.to_i / 1024) / 1024
used_mb = total_mb - free_mb
"USED_MB=#{used_mb}\nFREE_MB=#{free_mb} \nTOTAL_MB=#{total_mb}"
end
def self.exists_one_by_ref_ccr_and_type?(ref, ccr_ref, vcenter_uuid, type, pool = nil)
pool = VCenterDriver::VIHelper.one_pool(OpenNebula::DatastorePool, false) if pool.nil?
elements = pool.select do |e|
e["TEMPLATE/TYPE"] == type &&
e["TEMPLATE/VCENTER_DS_REF"] == ref &&
e["TEMPLATE/VCENTER_CCR_REF"] == ccr_ref &&
e["TEMPLATE/VCENTER_INSTANCE_ID"] == vcenter_uuid
end
return elements.size == 1
end
def to_one(ds_name, vcenter_uuid, ccr_ref, host_id)
one = ""
one << "NAME=\"#{ds_name}\"\n"
one << "TM_MAD=vcenter\n"
one << "VCENTER_INSTANCE_ID=\"#{vcenter_uuid}\"\n"
one << "VCENTER_CCR_REF=\"#{ccr_ref}\"\n"
one << "VCENTER_DS_REF=\"#{self['_ref']}\"\n"
one << "VCENTER_ONE_HOST_ID=\"#{host_id}\"\n"
return one
end
def to_one_template(one_clusters, name, ccr_ref, type, vcenter_uuid)
one_cluster = one_clusters.select { |ccr| ccr[:ref] == ccr_ref }.first rescue nil
return nil if one_cluster.nil?
ds_name = ""
if type == "IMAGE_DS"
ds_name << "#{name} (IMG)"
else
ds_name << "#{name} (SYS)"
ds_name << " [StorDRS]" if self.class == VCenterDriver::StoragePod
end
one_tmp = {
:one => to_one(ds_name, vcenter_uuid, ccr_ref, one_cluster[:host_id])
}
if type == "SYSTEM_DS"
one_tmp[:one] << "TYPE=SYSTEM_DS\n"
else
one_tmp[:one] << "DS_MAD=vcenter\n"
one_tmp[:one] << "TYPE=IMAGE_DS\n"
end
return one_tmp
end
def create_virtual_disk(img_name, size, adapter_type, disk_type)
leading_dirs = img_name.split('/')[0..-2]
if !leading_dirs.empty?
create_directory(leading_dirs.join('/'))
end
ds_name = self['name']
vmdk_spec = RbVmomi::VIM::FileBackedVirtualDiskSpec(
:adapterType => adapter_type,
:capacityKb => size.to_i*1024,
:diskType => disk_type
)
get_vdm.CreateVirtualDisk_Task(
:datacenter => get_dc.item,
:name => "[#{ds_name}] #{img_name}.vmdk",
:spec => vmdk_spec
).wait_for_completion
"#{img_name}.vmdk"
end
def create_directory(directory)
ds_name = self['name']
return if self.class == VCenterDriver::StoragePod
directory_name = "[#{ds_name}] #{directory}"
create_directory_params = {
:name => directory_name,
:datacenter => get_dc.item,
:createParentDirectories => true
}
begin
get_fm.MakeDirectory(create_directory_params)
rescue RbVmomi::VIM::FileAlreadyExists => e
# Do nothing if directory already exists
end
end
def get_fm
self['_connection.serviceContent.fileManager']
end
def get_vdm
self['_connection.serviceContent.virtualDiskManager']
end
def get_dc
item = @item
while !item.instance_of? RbVmomi::VIM::Datacenter
item = item.parent
if item.nil?
raise "Could not find the parent Datacenter"
end
end
Datacenter.new(item)
end
end # class Storage
class StoragePod < Storage
def initialize(item, vi_client=nil)
if !item.instance_of? RbVmomi::VIM::StoragePod
raise "Expecting type 'RbVmomi::VIM::StoragePod'. " <<
"Got '#{item.class} instead."
end
@item = item
end
# This is never cached
def self.new_from_ref(ref, vi_client)
self.new(RbVmomi::VIM::StoragePod.new(vi_client.vim, ref), vi_client)
end
end # class StoragePod
class Datastore < Storage
attr_accessor :one_item
def initialize(item, vi_client=nil)
if !item.instance_of? RbVmomi::VIM::Datastore
raise "Expecting type 'RbVmomi::VIM::Datastore'. " <<
"Got '#{item.class} instead."
end
@item = item
@one_item = {}
end
def delete_virtual_disk(img_name)
ds_name = self['name']
begin
get_vdm.DeleteVirtualDisk_Task(
:name => "[#{ds_name}] #{img_name}",
:datacenter => get_dc.item
).wait_for_completion
rescue Exception => e
# Ignore if file not found
if !e.message.start_with?('ManagedObjectNotFound')
raise e
end
end
end
def delete_file(img_name)
ds_name = self['name']
begin
get_fm.DeleteDatastoreFile_Task(
:name => "[#{ds_name}] #{img_name}",
:datacenter => get_dc.item
).wait_for_completion
rescue Exception => e
# Ignore if file not found
if !e.message.start_with?('ManagedObjectNotFound')
raise e
end
end
end
# Copy a VirtualDisk
def copy_virtual_disk(src_path, target_ds, target_path, new_size=nil)
source_ds_name = self['name']
target_ds_name = target_ds['name']
leading_dirs = target_path.split('/')[0..-2]
if !leading_dirs.empty?
if source_ds_name == target_ds_name
create_directory(leading_dirs.join('/'))
else
target_ds.create_directory(leading_dirs.join('/'))
end
end
copy_params = {
:sourceName => "[#{source_ds_name}] #{src_path}",
:sourceDatacenter => get_dc.item,
:destName => "[#{target_ds_name}] #{target_path}"
}
get_vdm.CopyVirtualDisk_Task(copy_params).wait_for_completion
if new_size
resize_spec = {
:name => "[#{target_ds_name}] #{target_path}",
:datacenter => target_ds.get_dc.item,
:newCapacityKb => new_size,
:eagerZero => false
}
get_vdm.ExtendVirtualDisk_Task(resize_spec).wait_for_completion
end
target_path
end
def rm_directory(directory)
ds_name = self['name']
rm_directory_params = {
:name => "[#{ds_name}] #{directory}",
:datacenter => get_dc.item
}
get_fm.DeleteDatastoreFile_Task(rm_directory_params)
end
def dir_empty?(path)
ds_name = self['name']
spec = RbVmomi::VIM::HostDatastoreBrowserSearchSpec.new
search_params = {
'datastorePath' => "[#{ds_name}] #{path}",
'searchSpec' => spec
}
ls = self['browser'].SearchDatastoreSubFolders_Task(search_params)
ls.info.result && ls.info.result.length == 1 && \
ls.info.result.first.file.length == 0
end
def upload_file(source_path, target_path)
@item.upload(target_path, source_path)
end
def download_file(source, target)
@item.download(url_prefix + file, temp_folder + file)
end
# Get file size for image handling
def stat(img_str)
ds_name = self['name']
img_path = File.dirname img_str
img_name = File.basename img_str
# Create Search Spec
search_params = get_search_params(ds_name, img_path, img_name)
# Perform search task and return results
begin
search_task = self['browser'].
SearchDatastoreSubFolders_Task(search_params)
search_task.wait_for_completion
size = 0
# Try to get vmdk capacity as seen by VM
size = search_task.info.result[0].file[0].capacityKb / 1024 rescue nil
# Try to get file size
size = search_task.info.result[0].file[0].fileSize / 1024 / 1024 rescue nil if !size
raise "Could not get file size or capacity" if size.nil?
size
rescue
raise "Could not find file."
end
end
def get_search_params(ds_name, img_path=nil, img_name=nil)
spec = RbVmomi::VIM::HostDatastoreBrowserSearchSpec.new
vmdisk_query = RbVmomi::VIM::VmDiskFileQuery.new
vmdisk_query.details = RbVmomi::VIM::VmDiskFileQueryFlags(:diskType => true,
:capacityKb => true,
:hardwareVersion => true,
:controllerType => true)
spec.query = [vmdisk_query,
RbVmomi::VIM::IsoImageFileQuery.new]
spec.details = RbVmomi::VIM::FileQueryFlags(:fileOwner => true,
:fileSize => true,
:fileType => true,
:modification => true)
spec.matchPattern = img_name.nil? ? [] : [img_name]
datastore_path = "[#{ds_name}]"
datastore_path << " #{img_path}" if !img_path.nil?
search_params = {'datastorePath' => datastore_path,
'searchSpec' => spec}
return search_params
end
def get_dc_path
dc = get_dc
p = dc.item.parent
path = [dc.item.name]
while p.instance_of? RbVmomi::VIM::Folder
path.unshift(p.name)
p = p.parent
end
path.delete_at(0) # The first folder is the root "Datacenters"
path.join('/')
end
def generate_file_url(path)
protocol = self[_connection.http.use_ssl?] ? 'https://' : 'http://'
hostname = self[_connection.http.address]
port = self[_connection.http.port]
dcpath = get_dc_path
# This creates the vcenter file URL for uploading or downloading files
# e.g:
url = "#{protocol}#{hostname}:#{port}/folder/#{path}?dcPath=#{dcpath}&dsName=#{self[name]}"
return url
end
def download_to_stdout(remote_path)
url = generate_file_url(remote_path)
pid = spawn(CURLBIN,
"-k", '--noproxy', '*', '-f',
"-b", self[_connection.cookie],
url)
Process.waitpid(pid, 0)
fail "download failed" unless $?.success?
end
def is_descriptor?(remote_path)
url = generate_file_url(remote_path)
rout, wout = IO.pipe
pid = spawn(CURLBIN,
"-I", "-k", '--noproxy', '*', '-f',
"-b", _connection.cookie,
url,
:out => wout,
:err => '/dev/null')
Process.waitpid(pid, 0)
fail "read image header failed" unless $?.success?
wout.close
size = rout.readlines.select{|l|
l.start_with?("Content-Length")
}[0].sub("Content-Length: ","")
rout.close
size.chomp.to_i < 4096 # If <4k, then is a descriptor
end
def get_text_file remote_path
url = generate_file_url(remote_path)
rout, wout = IO.pipe
pid = spawn CURLBIN, "-k", '--noproxy', '*', '-f',
"-b", _connection.cookie,
url,
:out => wout,
:err => '/dev/null'
Process.waitpid(pid, 0)
fail "get text file failed" unless $?.success?
wout.close
output = rout.readlines
rout.close
return output
end
def get_images
img_templates = []
ds_id = nil
ds_name = self['name']
img_types = ["FloppyImageFileInfo",
"IsoImageFileInfo",
"VmDiskFileInfo"]
ipool = VCenterDriver::VIHelper.one_pool(OpenNebula::ImagePool, false)
if ipool.respond_to?(:message)
raise "Could not get OpenNebula ImagePool: #{pool.message}"
end
dpool = VCenterDriver::VIHelper.one_pool(OpenNebula::DatastorePool, false)
if dpool.respond_to?(:message)
raise "Could not get OpenNebula DatastorePool: #{pool.message}"
end
ds_id = @one_item["ID"]
begin
# Create Search Spec
search_params = get_search_params(ds_name)
# Perform search task and return results
search_task = self['browser'].
SearchDatastoreSubFolders_Task(search_params)
search_task.wait_for_completion
search_task.info.result.each do |result|
folderpath = ""
if result.folderPath[-1] != "]"
folderpath = result.folderPath.sub(/^\[#{ds_name}\] /, "")
end
result.file.each do |image|
image_path = ""
# Skip not relevant files
next if !img_types.include? image.class.to_s
# Get image path and name
image_path << folderpath << image.path
image_name = File.basename(image.path).reverse.sub("kdmv.","").reverse
# Get image's disk and type
image_type = image.class.to_s == "VmDiskFileInfo" ? "OS" : "CDROM"
disk_type = image.class.to_s == "VmDiskFileInfo" ? image.diskType : nil
# Assign image prefix if known or assign default prefix
controller = image.controllerType rescue nil
if controller
disk_prefix = controller == "VirtualIDEController" ? "hd" : "sd"
else
# Get default value for disks that are not attached to any controller
disk_prefix = VCenterDriver::VIHelper.get_default("IMAGE/TEMPLATE/DEV_PREFIX")
end
#Set template
one_image = "NAME=\"#{image_name} - #{ds_name}\"\n"
one_image << "PATH=\"vcenter://#{image_path}\"\n"
one_image << "PERSISTENT=\"NO\"\n"
one_image << "TYPE=\"#{image_type}\"\n"
one_image << "VCENTER_DISK_TYPE=\"#{disk_type}\"\n" if disk_type
one_image << "VCENTER_IMPORTED=\"YES\"\n"
one_image << "DEV_PREFIX=\"#{disk_prefix}\"\n"
if VCenterDriver::VIHelper.find_by_name(OpenNebula::ImagePool,
"#{image_name} - #{ds_name}",
ipool,
false).nil?
img_templates << {
:name => "#{image_name} - #{ds_name}",
:path => image_path,
:size => (image.fileSize / 1024).to_s,
:type => image.class.to_s,
:dsid => ds_id,
:one => one_image
}
end
end
end
rescue Exception => e
raise "Could not find images. Reason: #{e.message}/#{e.backtrace}"
end
return img_templates
end
# This is never cached
def self.new_from_ref(ref, vi_client)
self.new(RbVmomi::VIM::Datastore.new(vi_client.vim, ref), vi_client)
end
end # class Datastore
end # module VCenterDriver

View File

@ -0,0 +1,132 @@
require 'fileutils'
require 'tempfile'
module VCenterDriver
class FileHelper
def self.get_img_name(disk, vm_id, vm_name)
if disk["PERSISTENT"] == "YES" || disk["TYPE"] == "CDROM"
return disk["SOURCE"]
else
disk_id = disk["DISK_ID"]
if disk["SOURCE"]
image_name = disk["SOURCE"].split(".").first
return "#{image_name}-#{vm_id}-#{disk_id}.vmdk"
else
ds_volatile_dir = disk["VCENTER_DS_VOLATILE_DIR"] || "one-volatile"
return "#{ds_volatile_dir}/#{vm_id}/one-#{vm_id}-#{disk_id}.vmdk"
end
end
end
def self.get_img_name_from_path(path, vm_id, disk_id)
return "#{path.split(".").first}-#{vm_id}-#{disk_id}.vmdk"
end
def self.is_remote_or_needs_unpack?(file)
return is_remote?(file) || needs_unpack?(file)
end
def self.is_remote?(file)
file.match(%r{^https?://})
end
def self.is_vmdk?(file)
type = %x{file #{file}}
type.include? "VMware"
end
def self.is_iso?(file)
type = %x{file #{file}}
type.include? "ISO"
end
def self.get_type(file)
type = %x{file -b --mime-type #{file}}
if $?.exitstatus != 0
STDERR.puts "Can not read file #{file}"
exit(-1)
end
type.strip
end
def self.needs_unpack?(file_path)
type = get_type(file_path)
type.gsub!(%r{^application/(x-)?}, '')
return %w{bzip2 gzip tar}.include?(type)
end
def self.vcenter_file_info(file_path)
if File.directory?(file_path)
files = Dir["#{file_path}/*.vmdk"]
found = false
count = 0
last = nil
files.each do |f|
if get_type(f).strip == "text/plain"
file_path = f
found = true
break
else
count += 1
last = f
end
end
if !found
if count == 1
file_path = last
found = true
else
STDERR.puts "Could not find vmdk"
exit(-1)
end
end
end
case get_type(file_path).strip
when "application/octet-stream"
return {
:type => :standalone,
:file => file_path,
:dir => File.dirname(file_path)
}
when "application/x-iso9660-image"
return {
:type => :standalone,
:file => file_path,
:dir => File.dirname(file_path),
:extension => '.iso'
}
when "text/plain"
info = {
:type => :flat,
:file => file_path,
:dir => File.dirname(file_path)
}
files_list = []
descriptor = File.read(file_path).split("\n")
flat_files = descriptor.select {|l| l.start_with?("RW")}
flat_files.each do |f|
files_list << info[:dir] + "/" +
f.split(" ")[3].chomp.chomp('"').reverse.chomp('"').reverse
end
info[:flat_files] = files_list
return info
else
STDERR.puts "Unrecognized file type"
exit(-1)
end
end
end # class FileHelper
end # module VCenterDriver

View File

@ -0,0 +1,965 @@
module VCenterDriver
class HostFolder
attr_accessor :item, :items
def initialize(item)
@item = item
@items = {}
end
def fetch_clusters!
VIClient.get_entities(@item, 'ClusterComputeResource').each do |item|
item_name = item._ref
@items[item_name.to_sym] = ClusterComputeResource.new(item)
end
end
def get_cluster(ref)
if !@items[ref.to_sym]
rbvmomi_dc = RbVmomi::VIM::ClusterComputeResource.new(@item._connection, ref)
@items[ref.to_sym] = ClusterComputeResource.new(rbvmomi_dc)
end
@items[ref.to_sym]
end
end # class HostFolder
class ClusterComputeResource
attr_accessor :item
attr_accessor :rp_list
include Memoize
def initialize(item, vi_client=nil)
@item = item
@vi_client = vi_client
@rp_list
end
def fetch_resource_pools(rp, rp_array = [])
rp_array << rp
rp.resourcePool.each do |child_rp|
fetch_resource_pools(child_rp, rp_array)
end
rp_array
end
def resource_pools
if @resource_pools.nil?
@resource_pools = fetch_resource_pools(@item.resourcePool)
end
@resource_pools
end
def get_resource_pool_list(rp = @item.resourcePool, parent_prefix = "", rp_array = [])
current_rp = ""
if !parent_prefix.empty?
current_rp << parent_prefix
current_rp << "/"
end
resource_pool, name = rp.collect("resourcePool","name")
current_rp << name if name != "Resources"
resource_pool.each do |child_rp|
get_resource_pool_list(child_rp, current_rp, rp_array)
end
rp_info = {}
rp_info[:name] = current_rp
rp_info[:ref] = rp._ref
rp_array << rp_info if !current_rp.empty?
rp_array
end
def monitor
total_cpu,
num_cpu_cores,
effective_cpu,
total_memory,
effective_mem,
num_hosts,
num_eff_hosts = @item.collect("summary.totalCpu",
"summary.numCpuCores",
"summary.effectiveCpu",
"summary.totalMemory",
"summary.effectiveMemory",
"summary.numHosts",
"summary.numEffectiveHosts"
)
mhz_core = total_cpu.to_f / num_cpu_cores.to_f
eff_core = effective_cpu.to_f / mhz_core
free_cpu = sprintf('%.2f', eff_core * 100).to_f
total_cpu = num_cpu_cores.to_f * 100
used_cpu = sprintf('%.2f', total_cpu - free_cpu).to_f
total_mem = total_memory.to_i / 1024
free_mem = effective_mem.to_i * 1024
str_info = ""
# Get cluster name for informative purposes (replace space with _ if any)
str_info << "VCENTER_NAME=" << self['name'].tr(" ", "_") << "\n"
# System
str_info << "HYPERVISOR=vcenter\n"
str_info << "TOTALHOST=" << num_hosts.to_s << "\n"
str_info << "AVAILHOST=" << num_eff_hosts.to_s << "\n"
# CPU
str_info << "CPUSPEED=" << mhz_core.to_s << "\n"
str_info << "TOTALCPU=" << total_cpu.to_s << "\n"
str_info << "USEDCPU=" << used_cpu.to_s << "\n"
str_info << "FREECPU=" << free_cpu.to_s << "\n"
# Memory
str_info << "TOTALMEMORY=" << total_mem.to_s << "\n"
str_info << "FREEMEMORY=" << free_mem.to_s << "\n"
str_info << "USEDMEMORY=" << (total_mem - free_mem).to_s << "\n"
str_info << "VCENTER_LAST_PERF_POLL=" << Time.now.to_i.to_s << "\n"
str_info << monitor_resource_pools(mhz_core)
end
def monitor_resource_pools(mhz_core)
@rp_list = get_resource_pool_list
view = @vi_client.vim.serviceContent.viewManager.CreateContainerView({
container: @item, #View for RPs inside this cluster
type: ['ResourcePool'],
recursive: true
})
pc = @vi_client.vim.serviceContent.propertyCollector
monitored_properties = [
"config.cpuAllocation.expandableReservation",
"config.cpuAllocation.limit",
"config.cpuAllocation.reservation",
"config.cpuAllocation.shares.level",
"config.cpuAllocation.shares.shares",
"config.memoryAllocation.expandableReservation",
"config.memoryAllocation.limit",
"config.memoryAllocation.reservation",
"config.memoryAllocation.shares.level",
"config.memoryAllocation.shares.shares"
]
filterSpec = RbVmomi::VIM.PropertyFilterSpec(
:objectSet => [
:obj => view,
:skip => true,
:selectSet => [
RbVmomi::VIM.TraversalSpec(
:name => 'traverseEntities',
:type => 'ContainerView',
:path => 'view',
:skip => false
)
]
],
:propSet => [
{ :type => 'ResourcePool', :pathSet => monitored_properties }
]
)
result = pc.RetrieveProperties(:specSet => [filterSpec])
rps = {}
result.each do |r|
hashed_properties = r.to_hash
if r.obj.is_a?(RbVmomi::VIM::ResourcePool)
rps[r.obj._ref] = hashed_properties
end
end
return "" if rps.empty?
rp_info = ""
rps.each{|ref, info|
# CPU
cpu_expandable = info["config.cpuAllocation.expandableReservation"] ? "YES" : "NO"
cpu_limit = info["config.cpuAllocation.limit"] == "-1" ? "UNLIMITED" : info["config.cpuAllocation.limit"]
cpu_reservation = info["config.cpuAllocation.reservation"]
cpu_num = cpu_reservation.to_f / mhz_core
cpu_shares_level = info["config.cpuAllocation.shares.level"]
cpu_shares = info["config.cpuAllocation.shares.shares"]
# MEMORY
mem_expandable = info["config.memoryAllocation.expandableReservation"] ? "YES" : "NO"
mem_limit = info["config.memoryAllocation.limit"] == "-1" ? "UNLIMITED" : info["config.memoryAllocation.limit"]
mem_reservation = info["config.memoryAllocation.reservation"].to_f
mem_shares_level = info["config.memoryAllocation.shares.level"]
mem_shares = info["config.memoryAllocation.shares.shares"]
rp_name = rp_list.select { |item| item[:ref] == ref}.first[:name] rescue ""
rp_name = "Resources" if rp_name.empty?
rp_info << "\nVCENTER_RESOURCE_POOL_INFO = ["
rp_info << "NAME=\"#{rp_name}\","
rp_info << "CPU_EXPANDABLE=#{cpu_expandable},"
rp_info << "CPU_LIMIT=#{cpu_limit},"
rp_info << "CPU_RESERVATION=#{cpu_reservation},"
rp_info << "CPU_RESERVATION_NUM_CORES=#{cpu_num},"
rp_info << "CPU_SHARES=#{cpu_shares},"
rp_info << "CPU_SHARES_LEVEL=#{cpu_shares_level},"
rp_info << "MEM_EXPANDABLE=#{mem_expandable},"
rp_info << "MEM_LIMIT=#{mem_limit},"
rp_info << "MEM_RESERVATION=#{mem_reservation},"
rp_info << "MEM_SHARES=#{mem_shares},"
rp_info << "MEM_SHARES_LEVEL=#{mem_shares_level}"
rp_info << "]"
}
view.DestroyView
return rp_info
end
def monitor_host_systems
host_info = ""
view = @vi_client.vim.serviceContent.viewManager.CreateContainerView({
container: @item, #View for Hosts inside this cluster
type: ['HostSystem'],
recursive: true
})
pc = @vi_client.vim.serviceContent.propertyCollector
monitored_properties = [
"name",
"runtime.connectionState",
"summary.hardware.numCpuCores",
"summary.hardware.memorySize",
"summary.hardware.cpuModel",
"summary.hardware.cpuMhz",
"summary.quickStats.overallCpuUsage",
"summary.quickStats.overallMemoryUsage"
]
filterSpec = RbVmomi::VIM.PropertyFilterSpec(
:objectSet => [
:obj => view,
:skip => true,
:selectSet => [
RbVmomi::VIM.TraversalSpec(
:name => 'traverseEntities',
:type => 'ContainerView',
:path => 'view',
:skip => false
)
]
],
:propSet => [
{ :type => 'HostSystem', :pathSet => monitored_properties }
]
)
result = pc.RetrieveProperties(:specSet => [filterSpec])
hosts = {}
result.each do |r|
hashed_properties = r.to_hash
if r.obj.is_a?(RbVmomi::VIM::HostSystem)
hosts[r.obj._ref] = hashed_properties
end
end
hosts.each do |ref, info|
next if info["runtime.connectionState"] != "connected"
total_cpu = info["summary.hardware.numCpuCores"] * 100
used_cpu = (info["summary.quickStats.overallCpuUsage"].to_f / info["summary.hardware.cpuMhz"].to_f) * 100
used_cpu = sprintf('%.2f', used_cpu).to_f # Trim precission
free_cpu = total_cpu - used_cpu
total_memory = info["summary.hardware.memorySize"]/1024
used_memory = info["summary.quickStats.overallMemoryUsage"]*1024
free_memory = total_memory - used_memory
host_info << "\nHOST=["
host_info << "STATE=on,"
host_info << "HOSTNAME=\"" << info["name"].to_s << "\","
host_info << "MODELNAME=\"" << info["summary.hardware.cpuModel"].to_s << "\","
host_info << "CPUSPEED=" << info["summary.hardware.cpuMhz"].to_s << ","
host_info << "MAX_CPU=" << total_cpu.to_s << ","
host_info << "USED_CPU=" << used_cpu.to_s << ","
host_info << "FREE_CPU=" << free_cpu.to_s << ","
host_info << "MAX_MEM=" << total_memory.to_s << ","
host_info << "USED_MEM=" << used_memory.to_s << ","
host_info << "FREE_MEM=" << free_memory.to_s
host_info << "]"
end
view.DestroyView # Destroy the view
return host_info
end
def monitor_vms
vc_uuid = @vi_client.vim.serviceContent.about.instanceUuid
cluster_name = self["name"]
cluster_ref = self["_ref"]
# Get info of the host where the VM/template is located
host_id = nil
one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool,
"TEMPLATE/VCENTER_CCR_REF",
cluster_ref,
vc_uuid)
host_id = one_host["ID"] if one_host
# Extract CPU info and name for each esx host in cluster
esx_hosts = {}
@item.host.each do |esx_host|
info = {}
info[:name] = esx_host.name
info[:cpu] = esx_host.summary.hardware.cpuMhz.to_f
esx_hosts[esx_host._ref] = info
end
@monitored_vms = Set.new
str_info = ""
view = @vi_client.vim.serviceContent.viewManager.CreateContainerView({
container: @item, #View for VMs inside this cluster
type: ['VirtualMachine'],
recursive: true
})
pc = @vi_client.vim.serviceContent.propertyCollector
monitored_properties = [
"name", #VM name
"config.template", #To filter out templates
"summary.runtime.powerState", #VM power state
"summary.quickStats.hostMemoryUsage", #Memory usage
"summary.quickStats.overallCpuUsage", #CPU used by VM
"runtime.host", #ESX host
"resourcePool", #RP
"guest.guestFullName",
"guest.net", #IP addresses as seen by guest tools,
"guest.guestState",
"guest.toolsVersion",
"guest.toolsRunningStatus",
"guest.toolsVersionStatus2", #IP addresses as seen by guest tools,
"config.extraConfig", #VM extraconfig info e.g opennebula.vm.running
"config.hardware.numCPU",
"config.hardware.memoryMB",
"config.annotation"
]
filterSpec = RbVmomi::VIM.PropertyFilterSpec(
:objectSet => [
:obj => view,
:skip => true,
:selectSet => [
RbVmomi::VIM.TraversalSpec(
:name => 'traverseEntities',
:type => 'ContainerView',
:path => 'view',
:skip => false
)
]
],
:propSet => [
{ :type => 'VirtualMachine', :pathSet => monitored_properties }
]
)
result = pc.RetrieveProperties(:specSet => [filterSpec])
vms = {}
vm_objects = []
result.each do |r|
hashed_properties = r.to_hash
if r.obj.is_a?(RbVmomi::VIM::VirtualMachine)
#Only take care of VMs, not templates
if !hashed_properties["config.template"]
vms[r.obj._ref] = hashed_properties
vm_objects << r.obj
end
end
end
pm = @vi_client.vim.serviceContent.perfManager
stats = []
max_samples = 9
refresh_rate = 20 #Real time stats takes samples every 20 seconds
last_mon_time = one_host["TEMPLATE/VCENTER_LAST_PERF_POLL"]
if last_mon_time
interval = (Time.now.to_i - last_mon_time.to_i)
interval = 3601 if interval < 0
samples = (interval / refresh_rate)
samples = 1 if samples == 0
max_samples = interval > 3600 ? 9 : samples
end
if !vm_objects.empty?
stats = pm.retrieve_stats(
vm_objects,
['net.transmitted','net.bytesRx','net.bytesTx','net.received',
'virtualDisk.numberReadAveraged','virtualDisk.numberWriteAveraged',
'virtualDisk.read','virtualDisk.write'],
{max_samples: max_samples}
)
end
get_resource_pool_list if !@rp_list
vms.each do |vm_ref,info|
begin
vm = VCenterDriver::VirtualMachine.new_from_ref(vm_ref, @vi_client)
esx_host = esx_hosts[info["runtime.host"]._ref]
info[:esx_host_name] = esx_host[:name]
info[:esx_host_cpu] = esx_host[:cpu]
info[:cluster_name] = cluster_name
info[:cluster_ref] = cluster_ref
info[:vc_uuid] = vc_uuid
info[:host_id] = host_id
info[:rp_list] = @rp_list
vm.vm_info = info
number = -1
# Check the running flag
running_flag = info["config.extraConfig"].select do |val|
val[:key] == "opennebula.vm.running"
end
if !running_flag.empty? && running_flag.first
running_flag = running_flag[0][:value]
end
next if running_flag == "no"
# Extract vmid if possible
matches = info["name"].match(/^one-(\d*)(-(.*))?$/)
number = matches[1] if matches
# Extract vmid from ref and vcenter instance uuid if possible
if number == -1
one_vm = VCenterDriver::VIHelper.find_by_ref(OpenNebula::VirtualMachinePool,
"DEPLOY_ID",
vm_ref,
vc_uuid)
number = one_vm["ID"] if one_vm
end
if number != -1
next if @monitored_vms.include? number
@monitored_vms << number
vm.one_item if vm.get_vm_id
end
vm.monitor(stats)
vm_name = "#{info["name"]} - #{cluster_name}"
str_info << %Q{
VM = [
ID="#{number}",
VM_NAME="#{vm_name}",
DEPLOY_ID="#{vm_ref}",
}
if number == -1
vm_template_64 = Base64.encode64(vm.vm_to_one(vm_name)).gsub("\n","")
str_info << "VCENTER_TEMPLATE=\"YES\","
str_info << "IMPORT_TEMPLATE=\"#{vm_template_64}\","
end
str_info << "POLL=\"#{vm.info.gsub('"', "\\\"")}\"]"
rescue Exception => e
STDERR.puts e.inspect
STDERR.puts e.backtrace
end
end
view.DestroyView # Destroy the view
return str_info
end
def monitor_customizations
customizations = self['_connection'].serviceContent.customizationSpecManager.info
text = ''
customizations.each do |c|
t = "CUSTOMIZATION = [ "
t << %Q<NAME = "#{c.name}", >
t << %Q<TYPE = "#{c.type}" ]\n>
text << t
end
text
end
def get_dc
item = @item
while !item.instance_of? RbVmomi::VIM::Datacenter
item = item.parent
if item.nil?
raise "Could not find the parent Datacenter"
end
end
Datacenter.new(item)
end
def self.to_one(cluster, con_ops, rp, one_cluster_id)
one_host = VCenterDriver::VIHelper.new_one_item(OpenNebula::Host)
if OpenNebula.is_error?(one_host)
raise "Could not create host: #{one_host.message}"
end
one_cluster_id = -1 if !one_cluster_id
rc = one_host.allocate(cluster[:cluster_name], 'vcenter', 'vcenter', one_cluster_id.to_i)
if OpenNebula.is_error?(rc)
raise "Could not allocate host: #{rc.message}"
end
template = "VCENTER_HOST=\"#{con_ops[:host]}\"\n"\
"VCENTER_PASSWORD=\"#{con_ops[:password]}\"\n"\
"VCENTER_USER=\"#{con_ops[:user]}\"\n"\
"VCENTER_CCR_REF=\"#{cluster[:cluster_ref]}\"\n"\
"VCENTER_INSTANCE_ID=\"#{cluster[:vcenter_uuid]}\"\n"\
"VCENTER_VERSION=\"#{cluster[:vcenter_version]}\"\n"\
template << "VCENTER_RESOURCE_POOL=\"#{rp}\"" if rp
rc = one_host.update(template, false)
if OpenNebula.is_error?(rc)
update_error = rc.message
rc = one_host.delete
if OpenNebula.is_error?(rc)
raise "Could not update host: #{update_error} "\
"and could not delete host: #{rc.message}"
else
raise "Could not update host: #{rc.message}"
end
end
return one_host
end
def self.new_from_ref(ref, vi_client)
self.new(RbVmomi::VIM::ClusterComputeResource.new(vi_client.vim, ref), vi_client)
end
end # class ClusterComputeResource
class ESXHost
attr_accessor :item
include Memoize
PG_CREATE_TIMEOUT = 240 # We will wait for 4 minutes for the pg creation
def initialize(item, vi_client=nil)
@net_rollback = []
@locking = true
@item = item
@vi_client = vi_client
end
def self.new_from_ref(ref, vi_client)
self.new(RbVmomi::VIM::HostSystem.new(vi_client.vim, ref), vi_client)
end
# Locking function. Similar to flock
def lock
hostlockname = @item['name'].downcase.tr(" ", "_")
if @locking
@locking_file = File.open("/tmp/vcenter-#{hostlockname}-lock","w")
@locking_file.flock(File::LOCK_EX)
end
end
# Unlock driver execution mutex
def unlock
if @locking
@locking_file.close
end
end
########################################################################
# Check if standard switch exists in host
########################################################################
def vss_exists(vswitch_name)
vswitches = @item.configManager.networkSystem.networkInfo.vswitch
return vswitches.select{|vs| vs.name == vswitch_name }.first rescue nil
end
########################################################################
# Create a standard vcenter switch in an ESX host
########################################################################
def create_vss(name, pnics=nil, num_ports=128, mtu=1500, pnics_available=nil)
# Get NetworkSystem
nws = self['configManager.networkSystem']
vswitchspec = nil
hostbridge = nil
nics = []
if pnics
pnics = pnics.split(",")
pnics.each do |pnic|
#Add nics if not in use
nics << pnic if pnics_available.include?(pnic)
end
if !nics.empty?
hostbridge = RbVmomi::VIM::HostVirtualSwitchBondBridge(:nicDevice => nics)
end
end
#Create spec
vswitchspec = RbVmomi::VIM::HostVirtualSwitchSpec(:bridge => hostbridge, :mtu => mtu, :numPorts => num_ports)
#add vSwitch to the host
begin
nws.AddVirtualSwitch(:vswitchName => name, :spec => vswitchspec)
rescue Exception => e
raise "The standard vSwitch #{name} could not be created. AddVirtualSwitch failed Reason: #{e.message}."
end
@net_rollback << {:action => :delete_sw, :name => name}
return name
end
########################################################################
# Update a standard vcenter switch in an ESX host
########################################################################
def update_vss(switch, name, pnics, num_ports, mtu)
pnics = pnics.split(",") rescue []
#Backup switch spec for rollback
orig_spec = switch.spec
#Compare current configuration and return if switch hasn't changed
same_switch = false
same_switch = switch.spec.respond_to?(:mtu) && switch.spec.mtu == mtu &&
switch.spec.respond_to?(:numPorts) && switch.spec.mtu == num_ports &&
(!pnics || (pnics && switch.spec.respond_to?(:bridge) &&
switch.spec.bridge.respond_to?(:nicDevice) &&
switch.spec.bridge.nicDevice.uniq.sort == pnics.uniq.sort))
return if same_switch
# Let's create a new spec and update the switch
vswitchspec = nil
hostbridge = nil
nws = self['configManager.networkSystem']
hostbridge = RbVmomi::VIM::HostVirtualSwitchBondBridge(:nicDevice => pnics) if !pnics.empty?
vswitchspec = RbVmomi::VIM::HostVirtualSwitchSpec(:bridge => hostbridge, :mtu => mtu, :numPorts => num_ports)
begin
nws.UpdateVirtualSwitch(:vswitchName => name, :spec => vswitchspec)
rescue Exception => e
raise "The standard switch with name #{name} could not be updated. Reason: #{e.message}"
end
@net_rollback << {:action => :update_sw, :name => name, :spec => orig_spec}
end
########################################################################
# Remove a standard vswitch from the host
########################################################################
def remove_vss(vswitch_name)
nws = self['configManager.networkSystem']
begin
nws.RemoveVirtualSwitch(:vswitchName => vswitch_name)
rescue RbVmomi::VIM::ResourceInUse
STDERR.puts "The standard switch #{vswitch_name} is in use so it cannot be deleted"
return nil
rescue RbVmomi::VIM::NotFound
STDERR.puts "The standard switch #{vswitch_name} was not found in vCenter"
return nil
rescue Exception => e
raise "There was a failure while deleting a vcenter standard switch #{vswitch_name}. Reason: #{e.message}"
end
return vswitch_name
end
########################################################################
# Get physical nics that are available in a host
########################################################################
def get_available_pnics
pnics_in_use = []
pnics_available = []
# Get pnics in use in standard switches
@item.config.network.vswitch.each do |vs|
vs.pnic.each do |pnic|
pnic.slice!("key-vim.host.PhysicalNic-")
pnics_in_use << pnic
end
end
# Get pnics in host
self['config.network'].pnic.each do |pnic|
pnics_available << pnic.device if !pnics_in_use.include?(pnic.device)
end
return pnics_available
end
########################################################################
# Check if proxy switch exists in host for distributed virtual switch
########################################################################
def proxy_switch_exists(switch_name)
nws = self['configManager.networkSystem']
proxy_switches = nws.networkInfo.proxySwitch
return proxy_switches.select{|ps| ps.dvsName == switch_name }.first rescue nil
end
########################################################################
# Assign a host to a a distributed vcenter switch (proxy switch)
########################################################################
def assign_proxy_switch(dvs, switch_name, pnics, pnics_available)
dvs = dvs.item
# Prepare spec for DVS reconfiguration
configSpec = RbVmomi::VIM::VMwareDVSConfigSpec.new
configSpec.name = switch_name
configSpec.configVersion = dvs['config.configVersion']
# Check if host is already assigned to distributed switch
operation = "add"
operation = "edit" if !dvs['config.host'].select { |host| host.config.host._ref == self['_ref'] }.empty?
# Add host members to the distributed virtual switch
host_member_spec = RbVmomi::VIM::DistributedVirtualSwitchHostMemberConfigSpec.new
host_member_spec.host = @item
host_member_spec.operation = operation
host_member_spec.backing = RbVmomi::VIM::DistributedVirtualSwitchHostMemberPnicBacking.new
host_member_spec.backing.pnicSpec = []
# If pnics are needed assign pnics for uplinks
if pnics
pnics = pnics.split(",")
# Get uplink portgroup from dvswitch
uplink_key = dvs['config.uplinkPortgroup'].select{
|ul| ul.name == "#{switch_name}-uplink-pg"}.first.key rescue nil
raise "Cannot find the uplink portgroup for #{switch_name}" if !uplink_key
pnics.each {|pnic|
pnicSpec = RbVmomi::VIM::DistributedVirtualSwitchHostMemberPnicSpec.new
pnicSpec.pnicDevice = pnic
pnicSpec.uplinkPortgroupKey = uplink_key
host_member_spec.backing.pnicSpec << pnicSpec
}
end
configSpec.host = [host_member_spec]
# The DVS must be reconfigured
dvs_reconfigure_task = dvs.ReconfigureDvs_Task(:spec => configSpec)
dvs_reconfigure_task.wait_for_completion
if dvs_reconfigure_task.info.state != 'success'
raise "It wasn't possible to assign host #{self["name"]} as a member of #{switch_name}'"
end
return dvs
end
########################################################################
# Create a standard port group
########################################################################
def create_pg(pgname, vswitch, vlan=0)
spec = RbVmomi::VIM.HostPortGroupSpec(
:name => pgname,
:vlanId => vlan,
:vswitchName => vswitch,
:policy => RbVmomi::VIM.HostNetworkPolicy
)
nws = self['configManager.networkSystem']
begin
nws.AddPortGroup(:portgrp => spec)
rescue Exception => e
raise "A port group with name #{pgname} could not be created. Reason: #{e.message}"
end
@net_rollback << {:action => :delete_pg, :name => pgname}
# wait until the network is ready and we have a reference
networks = @item['network'].select{ |net| net.name == pgname }
(0..PG_CREATE_TIMEOUT).each do
break if !networks.empty?
networks = @item['network'].select{ |net| net.name == pgname }
sleep 1
end
raise "Cannot get VCENTER_NET_REF for new port group" if networks.empty?
return networks.first._ref
end
########################################################################
# Check if standard port group exists in host
########################################################################
def pg_exists(pg_name)
nws = self['configManager.networkSystem']
portgroups = nws.networkInfo.portgroup
return portgroups.select{|pg| pg.spec.name == pg_name }.first rescue nil
end
########################################################################
# Is the switch for the pg different?
########################################################################
def pg_changes_sw?(pg, switch_name)
return pg.spec.respond_to?(:vswitchName) && pg.spec.vswitchName != switch_name
end
########################################################################
# Update a standard port group
########################################################################
def update_pg(pg, switch_name, vlan_id)
if pg.spec.respond_to?(:vlanId) && pg.spec.vlanId != vlan_id
# Backup original spec
orig_spec = pg.spec
# Create new spec
pg_name = pg.spec.name
spec = RbVmomi::VIM.HostPortGroupSpec(
:name => pg_name,
:vlanId => vlan_id,
:vswitchName => switch_name,
:policy => RbVmomi::VIM.HostNetworkPolicy
)
nws = self['configManager.networkSystem']
begin
nws.UpdatePortGroup(:pgName => pg_name, :portgrp => spec)
rescue Exception => e
raise "A port group with name #{pg_name} could not be updated. Reason: #{e.message}"
end
# Set rollback operation
@net_rollback << {:action => :update_pg, :name => pg_name, :spec => orig_spec}
end
end
########################################################################
# Remove a standard port group from the host
########################################################################
def remove_pg(pgname)
nws = self['configManager.networkSystem']
swname = nil
begin
portgroups = nws.networkConfig.portgroup
portgroups.each {|pg|
if pg.spec.name == pgname
swname = pg.spec.vswitchName
break
end
}
nws.RemovePortGroup(:pgName => pgname)
rescue RbVmomi::VIM::ResourceInUse
STDERR.puts "The standard portgroup #{pgname} is in use so it cannot be deleted"
return nil
rescue RbVmomi::VIM::NotFound
STDERR.puts "The standard portgroup #{pgname} was not found in vCenter"
return nil
rescue Exception => e
raise "There was a failure while deleting a standard portgroup #{pgname} in vCenter. Reason: #{e.message}"
end
return swname
end
def network_rollback
nws = self['configManager.networkSystem']
@net_rollback.reverse_each do |nr|
case nr[:action]
when :update_pg
begin
nws.UpdatePortGroup(:pgName => nr[:name], :portgrp => nr[:spec])
rescue Exception => e
raise "A rollback operation for standard port group #{nr[:name]} could not be performed. Reason: #{e.message}"
end
when :update_sw
begin
nws.UpdateVirtualSwitch(:vswitchName => nr[:name], :spec => nr[:spec])
rescue Exception => e
raise "A rollback operation for standard switch #{nr[:name]} could not be performed. Reason: #{e.message}"
end
when :delete_sw
begin
nws.RemoveVirtualSwitch(:vswitchName=> nr[:name])
rescue RbVmomi::VIM::ResourceInUse
return #Ignore if switch in use
rescue RbVmomi::VIM::NotFound
return #Ignore if switch not found
rescue Exception => e
raise "A rollback operation for standard switch #{nr[:name]} could not be performed. Reason: #{e.message}"
end
when :delete_pg
begin
nws.RemovePortGroup(:pgName => nr[:name])
rescue RbVmomi::VIM::ResourceInUse
return #Ignore if pg in use
rescue RbVmomi::VIM::NotFound
return #Ignore if pg not found
rescue Exception => e
raise "A rollback operation for standard port group #{nr[:name]} could not be performed. Reason: #{e.message}"
end
end
end
end
end # class ESXHost
end # module VCenterDriver

View File

@ -0,0 +1,778 @@
module VCenterDriver
class Importer
def self.import_wild(host_id, vm_ref, one_vm, template)
begin
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
vc_uuid = vi_client.vim.serviceContent.about.instanceUuid
dc_name = vi_client.vim.host
dpool = VCenterDriver::VIHelper.one_pool(OpenNebula::DatastorePool)
ipool = VCenterDriver::VIHelper.one_pool(OpenNebula::ImagePool)
npool = VCenterDriver::VIHelper.one_pool(OpenNebula::VirtualNetworkPool)
vcenter_vm = VCenterDriver::VirtualMachine.new_from_ref(vm_ref, vi_client)
error, template_disks = vcenter_vm.import_vcenter_disks(vc_uuid, dpool, ipool)
return OpenNebula::Error.new(error) if !error.empty?
template << template_disks
# Create images or get nics information for template
error, template_nics = vcenter_vm.import_vcenter_nics(vc_uuid, npool, dc_name)
return OpenNebula::Error.new(error) if !error.empty?
template << template_nics
rc = one_vm.allocate(template)
return rc if OpenNebula.is_error?(rc)
one_vm.deploy(host_id, false)
# Set reference to template disks and nics in VM template
vcenter_vm.one_item = one_vm
vcenter_vm.reference_imported_disks(vm_ref)
vcenter_vm.reference_imported_nics
# Set vnc configuration F#5074
extraconfig = []
extraconfig += vcenter_vm.extraconfig_vnc
spec_hash = { :extraConfig => extraconfig }
spec = RbVmomi::VIM.VirtualMachineConfigSpec(spec_hash)
vcenter_vm.item.ReconfigVM_Task(:spec => spec).wait_for_completion
return one_vm.id
rescue Exception => e
vi_client.close_connection if vi_client
return OpenNebula::Error.new(e.message)
end
end
def self.import_clusters(con_ops, options)
begin
STDOUT.print "\nConnecting to vCenter: #{options[:vcenter]}..."
use_defaults = options.key?(:defaults)
vi_client = VCenterDriver::VIClient.new(con_ops)
STDOUT.print "done!\n\n"
STDOUT.print "Exploring vCenter resources..."
dc_folder = VCenterDriver::DatacenterFolder.new(vi_client)
vcenter_instance_name = vi_client.vim.host
# OpenNebula's ClusterPool
cpool = VCenterDriver::VIHelper.one_pool(OpenNebula::ClusterPool, false)
if cpool.respond_to?(:message)
raise "Could not get OpenNebula ClusterPool: #{cpool.message}"
end
cluster_list = {}
cpool.each do |c|
cluster_list[c["ID"]] = c["NAME"]
end
# Get OpenNebula's host pool
hpool = VCenterDriver::VIHelper.one_pool(OpenNebula::HostPool, false)
if hpool.respond_to?(:message)
raise "Could not get OpenNebula HostPool: #{hpool.message}"
end
rs = dc_folder.get_unimported_hosts(hpool,vcenter_instance_name)
STDOUT.print "done!\n\n"
rs.each {|dc, clusters|
if !use_defaults
STDOUT.print "Do you want to process datacenter #{dc} (y/[n])? "
next if STDIN.gets.strip.downcase != 'y'
end
if clusters.empty?
STDOUT.puts "\n No new clusters found in #{dc}..."
next
end
clusters.each{ |cluster|
one_cluster_id = nil
rpool = nil
if !use_defaults
STDOUT.print "\n * Import cluster #{cluster[:cluster_name]} (y/[n])? "
next if STDIN.gets.strip.downcase != 'y'
if cluster_list.size > 1
STDOUT.print "\n In which OpenNebula cluster do you want the vCenter cluster to be included?\n "
cluster_list_str = "\n"
cluster_list.each do |key, value|
cluster_list_str << " - ID: " << key << " - NAME: " << value << "\n"
end
STDOUT.print "\n #{cluster_list_str}"
STDOUT.print "\n Specify the ID of the cluster or Enter to use the default cluster: "
answer = STDIN.gets.strip
one_cluster_id = answer if !answer.empty?
end
end
one_host = VCenterDriver::ClusterComputeResource.to_one(cluster,
con_ops,
rpool,
one_cluster_id)
STDOUT.puts "\n OpenNebula host #{cluster[:cluster_name]} with"\
" id #{one_host.id} successfully created."
STDOUT.puts
}
}
rescue Interrupt => e
puts "\n"
exit 0 #Ctrl+C
rescue Exception => e
STDOUT.puts " Error: #{e.message}/\n#{e.backtrace}"
ensure
vi_client.close_connection if vi_client
end
end
def self.import_templates(con_ops, options)
begin
STDOUT.print "\nConnecting to vCenter: #{options[:vcenter]}..."
use_defaults = options.key?(:defaults)
vi_client = VCenterDriver::VIClient.new(con_ops)
STDOUT.print "done!\n\n"
STDOUT.print "Looking for VM Templates..."
dc_folder = VCenterDriver::DatacenterFolder.new(vi_client)
# Get OpenNebula's templates pool
tpool = VCenterDriver::VIHelper.one_pool(OpenNebula::TemplatePool, false)
if tpool.respond_to?(:message)
raise "Could not get OpenNebula TemplatePool: #{tpool.message}"
end
rs = dc_folder.get_unimported_templates(vi_client, tpool)
STDOUT.print "done!\n"
# Create OpenNebula pools
dpool = VCenterDriver::VIHelper.one_pool(OpenNebula::DatastorePool)
ipool = VCenterDriver::VIHelper.one_pool(OpenNebula::ImagePool)
npool = VCenterDriver::VIHelper.one_pool(OpenNebula::VirtualNetworkPool)
# Get vcenter intance uuid as moref is unique for each vcenter
vc_uuid = vi_client.vim.serviceContent.about.instanceUuid
rs.each {|dc, tmps|
if !use_defaults
STDOUT.print "\nDo you want to process datacenter #{dc} (y/[n])? "
next if STDIN.gets.strip.downcase != 'y'
end
if tmps.empty?
STDOUT.print " No new VM Templates found in #{dc}...\n\n"
next
end
tmps.each{ |t|
template = nil
template_copy_ref = nil
if !use_defaults
STDOUT.print "\n * VM Template found:\n"\
" - Name : #{t[:name]}\n"\
" - Moref : #{t[:vcenter_ref]}\n"\
" - Cluster: #{t[:cluster_name]}\n"\
" Import this VM template (y/[n])? "
next if STDIN.gets.strip.downcase != 'y'
end
# Linked Clones
if !use_defaults
template = VCenterDriver::Template.new_from_ref(t[:vcenter_ref], vi_client)
STDOUT.print "\n For faster deployment operations"\
" and lower disk usage, OpenNebula"\
" can create new VMs as linked clones."\
"\n Would you like to use Linked Clones with VMs based on this template (y/[n])? "
if STDIN.gets.strip.downcase == 'y'
STDOUT.print "\n Linked clones requires that delta"\
" disks must be created for each disk in the template."\
" This operation may change the template contents."\
" \n Do you want OpenNebula to create a copy of the template,"\
" so the original template remains untouched ([y]/n)? "
template = t[:template]
if STDIN.gets.strip.downcase != 'n'
STDOUT.print "\n The new template will be named"\
" adding a one- prefix to the name"\
" of the original template. \n"\
" If you prefer a different name"\
" please specify or press Enter"\
" to use defaults: "
template_name = STDIN.gets.strip.downcase
STDOUT.print "\n WARNING!!! The cloning operation can take some time"\
" depending on the size of disks. Please wait...\n"
error, template_copy_ref = template.create_template_copy(template_name)
if template_copy_ref
template = VCenterDriver::Template.new_from_ref(template_copy_ref, vi_client)
one_template = VCenterDriver::Template.get_xml_template(template, vc_uuid, vi_client, options[:vcenter], dc)
if one_template
#Now create delta disks
STDOUT.print "\n Delta disks are being created, please be patient..."
lc_error, use_lc = template.create_delta_disks
if lc_error
STDOUT.print "\n ERROR. Something was wrong with the create delta disks on the template operation: #{lc_error}.\n"\
"\n Linked Clones will not be used with this template.\n"
else
one_template[:one] << "\nVCENTER_LINKED_CLONES=\"YES\"\n"
t = one_template
end
else
STDOUT.print "\n ERROR. Something was wrong obtaining the info from the template's copy.\n"\
"\n Linked Clones will not be used with this template.\n"
template.delete_template if template_copy_ref
end
else
STDOUT.print "\n ERROR. #{error}\n"
end
else
# Create linked clones on top of the existing template
# Create a VirtualMachine object from the template_copy_ref
STDOUT.print "\n Delta disks are being created, please be patient..."
lc_error, use_lc = template.create_delta_disks
if lc_error
STDOUT.print "\n ERROR. Something was wrong with the create delta disks on the template operation: #{lc_error}.\n"\
"\n Linked Clones will not be used with this template.\n"
end
t[:one] << "\nVCENTER_LINKED_CLONES=\"YES\"\n" if use_lc
end
end
end
vcenter_vm_folder = ""
if !use_defaults
STDOUT.print "\n\n Do you want to specify a folder where"\
" the deployed VMs based on this template will appear"\
" in vSphere's VM and Templates section?"\
"\n If no path is set, VMs will be placed in the same"\
" location where the template lives."\
"\n Please specify a path using slashes to separate folders"\
" e.g /Management/VMs or press Enter to use defaults: "\
vcenter_vm_folder = STDIN.gets.strip
t[:one] << "VCENTER_VM_FOLDER=\"#{vcenter_vm_folder}\"\n" if !vcenter_vm_folder.empty?
end
## Add existing disks to template (OPENNEBULA_MANAGED)
STDOUT.print "\n The existing disks and networks in the template"\
" are being imported, please be patient..."
template = t[:template] if !template
error, template_disks = template.import_vcenter_disks(vc_uuid,
dpool,
ipool)
if error.empty?
t[:one] << template_disks
else
STDOUT.puts error
template.delete_template if template_copy_ref
next
end
error, template_nics = template.import_vcenter_nics(vc_uuid,
npool,
options[:vcenter],
dc)
if error.empty?
t[:one] << template_nics
else
STDOUT.puts error
template.delete_template if template_copy_ref
next
end
# Resource Pools
rp_input = ""
rp_split = t[:rp].split("|")
if !use_defaults
if rp_split.size > 3
STDOUT.print "\n\n This template is currently set to "\
"launch VMs in the default resource pool."\
"\n Press y to keep this behaviour, n to select"\
" a new resource pool or d to delegate the choice"\
" to the user ([y]/n/d)? "
answer = STDIN.gets.strip.downcase
case answer
when 'd'
list_of_rp = rp_split[-2]
default_rp = rp_split[-1]
rp_input = rp_split[0] + "|" + rp_split[1] + "|" +
rp_split[2] + "|"
# Available list of resource pools
input_str = " The list of available resource pools "\
"to be presented to the user are "\
"\"#{list_of_rp}\""
input_str+= "\n Press y to agree, or input a comma"\
" separated list of resource pools to edit "\
"([y]/comma separated list) "
STDOUT.print input_str
answer = STDIN.gets.strip
if !answer.empty? && answer.downcase != 'y'
rp_input += answer + "|"
else
rp_input += rp_split[3] + "|"
end
# Default
input_str = " The default resource pool presented "\
"to the end user is set to"\
" \"#{default_rp}\"."
input_str+= "\n Press y to agree, or input a new "\
"resource pool ([y]/resource pool name) "
STDOUT.print input_str
answer = STDIN.gets.strip
if !answer.empty? && answer.downcase != 'y'
rp_input += answer
else
rp_input += rp_split[4]
end
when 'n'
list_of_rp = rp_split[-2]
STDOUT.print " The list of available resource pools is:\n\n"
index = 1
t[:rp_list].each do |r|
list_str = " - #{r[:name]}\n"
index += 1
STDOUT.print list_str
end
input_str = "\n Please input the new default"\
" resource pool name: "
STDOUT.print input_str
answer = STDIN.gets.strip
t[:one] << "VCENTER_RESOURCE_POOL=\"#{answer}\"\n"
end
end
end
if !rp_input.empty?
t[:one] << "USER_INPUTS=["
t[:one] << "VCENTER_RESOURCE_POOL=\"#{rp_input}\"," if !rp_input.empty?
t[:one] = t[:one][0..-2]
t[:one] << "]"
end
one_t = VCenterDriver::VIHelper.new_one_item(OpenNebula::Template)
rc = one_t.allocate(t[:one])
if ::OpenNebula.is_error?(rc)
STDOUT.puts " Error creating template: #{rc.message}\n"
template.delete_template if template_copy_ref
else
STDOUT.puts " OpenNebula template #{one_t.id} created!\n"
end
}
}
rescue Interrupt => e
puts "\n"
exit 0 #Ctrl+C
rescue Exception => e
STDOUT.puts " Error: #{e.message}/\n#{e.backtrace}"
ensure
vi_client.close_connection if vi_client
end
end
def self.import_networks(con_ops, options)
begin
STDOUT.print "\nConnecting to vCenter: #{options[:vcenter]}..."
use_defaults = options.key?(:defaults)
vi_client = VCenterDriver::VIClient.new(con_ops)
STDOUT.print "done!\n\n"
STDOUT.print "Looking for vCenter networks..."
dc_folder = VCenterDriver::DatacenterFolder.new(vi_client)
# OpenNebula's VirtualNetworkPool
npool = VCenterDriver::VIHelper.one_pool(OpenNebula::VirtualNetworkPool, false)
if npool.respond_to?(:message)
raise "Could not get OpenNebula VirtualNetworkPool: #{npool.message}"
end
rs = dc_folder.get_unimported_networks(npool,options[:vcenter])
STDOUT.print "done!\n"
rs.each {|dc, tmps|
if !use_defaults
STDOUT.print "\nDo you want to process datacenter #{dc} [y/n]? "
next if STDIN.gets.strip.downcase != 'y'
end
if tmps.empty?
STDOUT.print " No new Networks found in #{dc}...\n\n"
next
end
tmps.each do |n|
one_cluster_id = nil
if !use_defaults
print_str = "\n * Network found:\n"\
" - Name : #{n[:name]}\n"\
" - Type : #{n[:type]}\n"
print_str << " - Cluster : #{n[:cluster]}\n"
print_str << " Import this Network (y/[n])? "
STDOUT.print print_str
next if STDIN.gets.strip.downcase != 'y'
end
size="255"
ar_type="e"
first_ip=nil
first_mac=nil
slaac=nil
global_prefix=nil
ula_prefix=nil
ip6_address = nil
prefix_length = nil
# Size
if !use_defaults
STDOUT.print " How many VMs are you planning"\
" to fit into this network [255]? "
size_answer = STDIN.gets.strip
if !size_answer.empty?
size = size_answer.to_i.to_s rescue "255"
end
end
# Type
if !use_defaults
STDOUT.print " What type of Virtual Network"\
" do you want to create (IPv[4],IPv[6]"\
",[E]thernet) ?"
type_answer = STDIN.gets.strip
if ["4","6","e"].include?(type_answer.downcase)
ar_type = type_answer.downcase
else
ar_type = "e"
STDOUT.puts " Type [#{type_answer}] not supported,"\
" defaulting to Ethernet."
end
case ar_type.downcase
when "4"
STDOUT.print " Please input the first IP "\
"in the range: "
first_ip = STDIN.gets.strip
STDOUT.print " Please input the first MAC "\
"in the range [Enter for default]: "
mac_answer = STDIN.gets.strip
first_mac = first_mac_answer if !mac_answer.empty?
when "6"
STDOUT.print " Please input the first MAC "\
"in the range [Enter for default]: "
mac_answer = STDIN.gets.strip
first_mac = first_mac_answer if !mac_answer.empty?
STDOUT.print " Do you want to use SLAAC "\
"Stateless Address Autoconfiguration? ([y]/n): "
slaac_answer = STDIN.gets.strip.strip.downcase
if slaac_answer == 'n'
slaac = false
STDOUT.print " Please input the IPv6 address (cannot be empty): "
ip6_address = STDIN.gets.strip
ip6_address = ip6_address if !ip6_address.empty?
STDOUT.print " Please input the Prefix length (cannot be empty): "
prefix_length = STDIN.gets.strip
prefix_length = prefix_length if !prefix_length.empty?
else
slaac = true
STDOUT.print " Please input the GLOBAL PREFIX "\
"[Enter for default]: "
gp_answer = STDIN.gets.strip
global_prefix = gp_answer if !gp_answer.empty?
STDOUT.print " Please input the ULA PREFIX "\
"[Enter for default]: "
ula_answer = STDIN.gets.strip
ula_prefix = ula_answer if !ula_answer.empty?
end
when "e"
STDOUT.print " Please input the first MAC "\
"in the range [Enter for default]: "
mac_answer = STDIN.gets.strip
first_mac = first_mac_answer if !mac_answer.empty?
end
end
ar_str = "\nAR=[TYPE=\""
case ar_type
when "4"
ar_str << "IP4\""
ar_str << ",IP=" + first_ip if first_ip
ar_str << ",MAC=" + first_mac if first_mac
when "6"
if slaac
ar_str << "IP6\""
ar_str << ",MAC=" + first_mac if first_mac
ar_str << ",GLOBAL_PREFIX=" + global_prefix if global_prefix
ar_str << ",ULA_PREFIX=" + ula_prefix if ula_prefix
else
ar_str << "IP6_STATIC\""
ar_str << ",MAC=" + first_mac if first_mac
ar_str << ",IP6=" + ip6_address if ip6_address
ar_str << ",PREFIX_LENGTH=" + prefix_length if prefix_length
end
when "e"
ar_str << "ETHER\""
ar_str << ",MAC=" + first_mac if first_mac
end
ar_str << ",SIZE = \"#{size}\"]"
n[:one] << ar_str
one_vn = VCenterDriver::VIHelper.new_one_item(OpenNebula::VirtualNetwork)
rc = one_vn.allocate(n[:one])
if ::OpenNebula.is_error?(rc)
STDOUT.puts "\n Error creating virtual network: " +
" #{rc.message}\n"
else
STDOUT.puts "\n OpenNebula virtual network " +
"#{one_vn.id} created with size #{size}!\n"
end
end
}
rescue Interrupt => e
puts "\n"
exit 0 #Ctrl+C
rescue Exception => e
STDOUT.puts " Error: #{e.message}/\n#{e.backtrace}"
ensure
vi_client.close_connection if vi_client
end
end
def self.import_datastore(con_ops, options)
begin
STDOUT.print "\nConnecting to vCenter: #{options[:vcenter]}..."
use_defaults = options.key?(:defaults)
vi_client = VCenterDriver::VIClient.new(con_ops)
STDOUT.print "done!\n\n"
STDOUT.print "Looking for Datastores..."
dc_folder = VCenterDriver::DatacenterFolder.new(vi_client)
dpool = VCenterDriver::VIHelper.one_pool(OpenNebula::DatastorePool, false)
if dpool.respond_to?(:message)
raise "Could not get OpenNebula DatastorePool: #{dpool.message}"
end
# Get OpenNebula's host pool
hpool = VCenterDriver::VIHelper.one_pool(OpenNebula::HostPool, false)
if hpool.respond_to?(:message)
raise "Could not get OpenNebula HostPool: #{hpool.message}"
end
rs = dc_folder.get_unimported_datastores(dpool, options[:vcenter], hpool)
STDOUT.print "done!\n"
rs.each {|dc, tmps|
if !use_defaults
STDOUT.print "\nDo you want to process datacenter #{dc} (y/[n])? "
next if STDIN.gets.strip.downcase != 'y'
end
if tmps.empty?
STDOUT.print " No new Datastores or StoragePods found in #{dc}...\n\n"
next
end
tmps.each{ |d|
if !use_defaults
STDOUT.print "\n * Datastore found:\n"\
" - Name : #{d[:name]}\n"\
" - Total MB : #{d[:total_mb]}\n"\
" - Free MB : #{d[:free_mb]}\n"\
" - Cluster : #{d[:cluster]}\n"\
" Import this as Datastore [y/n]? "
next if STDIN.gets.strip.downcase != 'y'
STDOUT.print "\n NOTE: For each vcenter datastore a SYSTEM and IMAGE datastore\n"\
" will be created in OpenNebula except for a StorageDRS which is \n"\
" represented as a SYSTEM datastore only.\n"
end
ds_allocate_error = false
d[:ds].each do |ds|
one_d = VCenterDriver::VIHelper.new_one_item(OpenNebula::Datastore)
rc = one_d.allocate(ds[:one])
if ::OpenNebula.is_error?(rc)
STDOUT.puts " \n Error creating datastore: #{rc.message}"
ds_allocate_error = true
else
STDOUT.puts " \n OpenNebula datastore #{one_d.id} created!\n"
end
end
}
}
rescue Interrupt => e
puts "\n"
exit 0 #Ctrl+C
rescue Exception => e
STDOUT.puts " Error: #{e.message}/\n#{e.backtrace}"
ensure
vi_client.close_connection if vi_client
end
end
def self.import_images(con_ops, ds_name, options)
begin
STDOUT.print "\nConnecting to vCenter: #{options[:vcenter]}..."
use_defaults = options.key?(:defaults)
vi_client = VCenterDriver::VIClient.new(con_ops)
STDOUT.print "done!\n\n"
STDOUT.print "Looking for Images..."
one_ds = VCenterDriver::VIHelper.find_by_name(OpenNebula::DatastorePool,
ds_name)
one_ds_ref = one_ds['TEMPLATE/VCENTER_DS_REF']
ds = VCenterDriver::Datastore.new_from_ref(one_ds_ref, vi_client)
ds.one_item = one_ds #Store opennebula template for datastore
images = ds.get_images
STDOUT.print "done!\n"
images.each{ |i|
if !use_defaults
STDOUT.print "\n * Image found:\n"\
" - Name : #{i[:name]}\n"\
" - Path : #{i[:path]}\n"\
" - Type : #{i[:type]}\n"\
" Import this Image (y/[n])? "
next if STDIN.gets.strip.downcase != 'y'
end
one_i = VCenterDriver::VIHelper.new_one_item(OpenNebula::Image)
rc = one_i.allocate(i[:one], i[:dsid].to_i)
if ::OpenNebula.is_error?(rc)
STDOUT.puts "Error creating image: #{rc.message}\n"
if rc.message == "[ImageAllocate] Not enough space "\
"in datastore"
STDOUT.puts "Please disable DATASTORE_CAPACITY_"\
"CHECK in /etc/one/oned.conf and "\
"restart OpenNebula."
end
else
STDOUT.puts " OpenNebula image #{one_i.id} created!\n"
end
}
rescue Interrupt => e
puts "\n"
exit 0 #Ctrl+C
rescue Exception => e
STDOUT.puts " Error: #{e.message}/\n#{e.backtrace}"
ensure
vi_client.close_connection if vi_client
end
end
end # Importer
end # module VCenterDriver

View File

@ -0,0 +1,44 @@
module Memoize
def [](property)
@memoize = {} if !defined?(@memoize)
if (value = @memoize[property])
return value
end
current_item = @item
property_path = ""
property.split(".").each do |elem|
if property_path.empty?
property_path << elem
else
property_path << "." << elem
end
if (val = @memoize[property_path])
current_item = val
else
begin
current_item = current_item.send(elem)
rescue Exception => e
current_item = nil
end
end
break if current_item.nil?
@memoize[property_path] = current_item
end
@memoize[property] = current_item
end
def []=(property, value)
@memoize = {} if !defined?(@memoize)
@memoize[property] = value
end
end # module Memoize

View File

@ -0,0 +1,245 @@
module VCenterDriver
class NetworkFolder
attr_accessor :item, :items
def initialize(item)
@item = item
@items = {}
end
########################################################################
# Builds a hash with Network-Ref / Network to be used as a cache
# @return [Hash] in the form
# { ds_ref [Symbol] => Network object }
########################################################################
def fetch!
VIClient.get_entities(@item, "Network").each do |item|
item_name = item._ref
@items[item_name.to_sym] = PortGroup.new(item)
end
VIClient.get_entities(@item, "DistributedVirtualPortgroup").each do |item|
item_name = item._ref
@items[item_name.to_sym] = DistributedPortGroup.new(item)
end
VIClient.get_entities(@item, "VmwareDistributedVirtualSwitch").each do |item|
item_name = item._ref
@items[item_name.to_sym] = DistributedVirtualSwitch.new(item)
end
end
########################################################################
# Returns a Network. Uses the cache if available.
# @param ref [Symbol] the vcenter ref
# @return Network
########################################################################
def get(ref)
if !@items[ref.to_sym]
rbvmomi_net = RbVmomi::VIM::Network.new(@item._connection, ref)
@items[ref.to_sym] = Network.new(rbvmomi_net)
end
@items[ref.to_sym]
end
end # class NetworkFolder
class Network
attr_accessor :item
include Memoize
def initialize(item, vi_client=nil)
if !item.instance_of?(RbVmomi::VIM::Network) &&
!item.instance_of?(RbVmomi::VIM::DistributedVirtualPortgroup )
raise "Expecting type 'RbVmomi::VIM::Network'. " <<
"Got '#{item.class} instead."
end
@vi_client = vi_client
@item = item
end
# Checks if a RbVmomi::VIM::VirtualDevice is a network interface
def self.is_nic?(device)
!device.class.ancestors.index(RbVmomi::VIM::VirtualEthernetCard).nil?
end
def self.to_one_template(network_name, network_ref, network_type,
ccr_ref, ccr_name, vcenter_uuid,
vcenter_instance_name, dc_name)
one_tmp = {}
network_import_name = "[#{vcenter_instance_name} - #{dc_name}] #{network_name} - #{ccr_name.tr(" ", "_")}"
one_tmp[:name] = network_import_name
one_tmp[:bridge] = network_name
one_tmp[:type] = network_type
one_tmp[:cluster] = ccr_name
one_tmp[:vcenter_ccr_ref] = ccr_ref
one_tmp[:one] = to_one(network_import_name, network_name, network_ref, network_type,
ccr_ref, vcenter_uuid)
return one_tmp
end
def self.to_one(network_import_name, network_name, network_ref, network_type,
ccr_ref, vcenter_uuid)
template = "NAME=\"#{network_import_name}\"\n"\
"BRIDGE=\"#{network_name}\"\n"\
"VN_MAD=\"dummy\"\n"\
"VCENTER_PORTGROUP_TYPE=\"#{network_type}\"\n"\
"VCENTER_NET_REF=\"#{network_ref}\"\n"\
"VCENTER_CCR_REF=\"#{ccr_ref}\"\n"\
"VCENTER_INSTANCE_ID=\"#{vcenter_uuid}\"\n"
return template
end
def self.get_network_type(device)
if device.backing.network.instance_of?(RbVmomi::VIM::DistributedVirtualPortgroup)
return "Distributed Port Group"
else
return "Port Group"
end
end
def self.get_one_vnet_ds_by_ref_and_ccr(ref, ccr_ref, vcenter_uuid, pool = nil)
pool = VCenterDriver::VIHelper.one_pool(OpenNebula::VirtualNetworkPool, false) if pool.nil?
element = pool.select do |e|
e["TEMPLATE/VCENTER_NET_REF"] == ref &&
e["TEMPLATE/VCENTER_CCR_REF"] == ccr_ref &&
e["TEMPLATE/VCENTER_INSTANCE_ID"] == vcenter_uuid
end.first rescue nil
return element
end
def self.remove_net_ref(network_id)
one_vnet = VCenterDriver::VIHelper.one_item(OpenNebula::VirtualNetwork, network_id)
one_vnet.info
one_vnet.delete_element("TEMPLATE/VCENTER_NET_REF")
one_vnet.delete_element("TEMPLATE/VCENTER_INSTANCE_ID")
tmp_str = one_vnet.template_str
one_vnet.update(tmp_str)
one_vnet.info
end
def self.vcenter_networks_to_be_removed(device_change_nics, vcenter_uuid)
networks = {}
npool = VCenterDriver::VIHelper.one_pool(OpenNebula::VirtualNetworkPool, false)
device_change_nics.each do |nic|
if nic[:operation] == :remove
vnet_ref = nil
# Port group
if nic[:device].backing.respond_to?(:network)
vnet_ref = nic[:device].backing.network._ref
end
# Distributed port group
if nic[:device].backing.respond_to?(:port) &&
nic[:device].backing.port.respond_to?(:portgroupKey)
vnet_ref = nic[:device].backing.port.portgroupKey
end
# Find vnet_ref in OpenNebula's pool of networks
one_network = VCenterDriver::VIHelper.find_by_ref(OpenNebula::VirtualNetworkPool,
"TEMPLATE/VCENTER_NET_REF",
vnet_ref,
vcenter_uuid,
npool)
next if !one_network
# Add pg or dpg name that are in vcenter but not in
# OpenNebula's VM to a hash for later removal
if one_network["VN_MAD"] == "vcenter" && !networks.key?(one_network["BRIDGE"])
networks[one_network["BRIDGE"]] = one_network
end
end
end
networks
end
# This is never cached
def self.new_from_ref(ref, vi_client)
self.new(RbVmomi::VIM::Network.new(vi_client.vim, ref), vi_client)
end
end # class Network
class PortGroup < Network
def initialize(item, vi_client=nil)
if !item.instance_of?(RbVmomi::VIM::Network)
raise "Expecting type 'RbVmomi::VIM::Network'. " <<
"Got '#{item.class} instead."
end
@vi_client = vi_client
@item = item
end
def clusters
net_clusters = {}
host_members =@item['host']
host_members.each do |h|
if !net_clusters.key?(h.parent._ref.to_s)
net_clusters[h.parent._ref.to_s] = h.parent.name.to_s
end
end
net_clusters
end
def network_type
"Port Group"
end
end # class PortGroup
class DistributedPortGroup < Network
def initialize(item, vi_client=nil)
if !item.instance_of?(RbVmomi::VIM::DistributedVirtualPortgroup )
raise "Expecting type 'RbVmomi::VIM::DistributedVirtualPortgroup'. " <<
"Got '#{item.class} instead."
end
@vi_client = vi_client
@item = item
end
def clusters
net_clusters = {}
host_members = self['config.distributedVirtualSwitch.summary.hostMember']
host_members.each do |h|
if !net_clusters.key?(h.parent._ref.to_s)
net_clusters[h.parent._ref.to_s] = h.parent.name.to_s
end
end
net_clusters
end
def network_type
"Distributed Port Group"
end
end # class DistributedPortGroup
class DistributedVirtualSwitch < Network
def initialize(item, vi_client=nil)
if !item.instance_of?(RbVmomi::VIM::VmwareDistributedVirtualSwitch )
raise "Expecting type 'RbVmomi::VIM::VmwareDistributedVirtualSwitch'. " <<
"Got '#{item.class} instead."
end
@vi_client = vi_client
@item = item
end
end # class DistributedVirtualSwitch
end # module VCenterDriver

View File

@ -0,0 +1,16 @@
<VCENTER>
<VM>
<TEMPLATE>
<NIC>
<MODEL>e1000</MODEL>
</NIC>
</TEMPLATE>
</VM>
<IMAGE>
<TEMPLATE>
<DEV_PREFIX>sd</DEV_PREFIX>
<VCENTER_DISK_TYPE>thin</DISK_TYPE>
<VCENTER_ADAPTER_TYPE>lsiLogic</ADAPTER_TYPE>
</TEMPLATE>
</IMAGE>
</VCENTER>

View File

@ -0,0 +1,173 @@
require 'openssl'
module VCenterDriver
class VIClient
attr_accessor :vim
attr_accessor :rp
def initialize(opts)
opts = {:insecure => true}.merge(opts)
@vim = RbVmomi::VIM.connect(opts)
# Get ccr and get rp
ccr_ref = opts.delete(:ccr)
if ccr_ref
ccr = RbVmomi::VIM::ClusterComputeResource.new(@vim, ccr_ref)
#Get ref for rp
if ccr
rp = opts.delete(:rp)
if rp
rp_list = get_resource_pools(ccr)
rp_ref = rp_list.select { |r| r[:name] == rp }.first[:ref] rescue nil
@rp = RbVmomi::VIM::ResourcePool(@vim, rp_ref) if rp_ref
end
end
end
end
def rp_confined?
!!@rp
end
def get_resource_pools(ccr, rp = nil, parent_prefix = "", rp_array = [])
current_rp = ""
if !rp
rp = ccr.resourcePool
else
if !parent_prefix.empty?
current_rp << parent_prefix
current_rp << "/"
end
current_rp << rp.name
end
if rp.resourcePool.size == 0
rp_info = {}
rp_info[:name] = current_rp
rp_info[:ref] = rp._ref
rp_array << rp_info
else
rp.resourcePool.each do |child_rp|
get_resource_pools(ccr, child_rp, current_rp, rp_array)
end
rp_info = {}
rp_info[:name] = current_rp
rp_info[:ref] = rp._ref
rp_array << rp_info if !current_rp.empty?
end
rp_array
end
def close_connection
@vim.close
end
# @return RbVmomi::VIM::<type> objects
def self.get_entities(folder, type, entities=[])
if folder == []
return nil
end
folder.childEntity.each do |child|
the_name, junk = child.to_s.split('(')
case the_name
when "Folder"
get_entities(child, type, entities)
when type
entities.push(child)
end
end
return entities
end
def self.new_from_host(host_id)
client = OpenNebula::Client.new
host = OpenNebula::Host.new_with_id(host_id, client)
rc = host.info
if OpenNebula.is_error?(rc)
puts rc.message
exit -1
end
password = host["TEMPLATE/VCENTER_PASSWORD"]
system = OpenNebula::System.new(client)
config = system.get_configuration
if OpenNebula.is_error?(config)
puts "Error getting oned configuration : #{config.message}"
exit -1
end
token = config["ONE_KEY"]
password = VIClient::decrypt(password, token)
connection = {
:host => host["TEMPLATE/VCENTER_HOST"],
:user => host["TEMPLATE/VCENTER_USER"],
:rp => host["TEMPLATE/VCENTER_RESOURCE_POOL"],
:ccr => host["TEMPLATE/VCENTER_CCR_REF"],
:password => password
}
self.new(connection)
end
def self.decrypt(msg, token)
begin
cipher = OpenSSL::Cipher.new("aes-256-cbc")
cipher.decrypt
# Truncate for Ruby 2.4 (in previous versions this was being
# automatically truncated)
cipher.key = token[0..31]
msg = cipher.update(Base64::decode64(msg))
msg << cipher.final
rescue
puts "Error decrypting secret."
exit -1
end
end
def self.in_silence
begin
orig_stderr = $stderr.clone
orig_stdout = $stdout.clone
$stderr.reopen File.new('/dev/null', 'w')
$stdout.reopen File.new('/dev/null', 'w')
retval = yield
rescue Exception => e
$stdout.reopen orig_stdout
$stderr.reopen orig_stderr
raise e
ensure
$stdout.reopen orig_stdout
$stderr.reopen orig_stderr
end
retval
end
def self.in_stderr_silence
begin
orig_stderr = $stderr.clone
$stderr.reopen File.new('/dev/null', 'w')
retval = yield
rescue Exception => e
$stderr.reopen orig_stderr
raise e
ensure
$stderr.reopen orig_stderr
end
retval
end
end
end # module VCenterDriver

View File

@ -0,0 +1,104 @@
module VCenterDriver
class VIHelper
ETC_LOCATION = "/etc/one/" if !defined?(ETC_LOCATION)
VCENTER_DRIVER_DEFAULT = "#{ETC_LOCATION}/vcenter_driver.default"
def self.client
@@client ||= OpenNebula::Client.new
end
def self.return_if_error(rc, item, exit_if_fail)
if OpenNebula::is_error?(rc)
raise rc.message if !exit_if_fail
STDERR.puts rc.message
exit 1
else
item
end
end
def self.one_item(the_class, id, exit_if_fail = true)
item = the_class.new_with_id(id, client)
rc = item.info
return_if_error(rc, item, exit_if_fail)
end
def self.new_one_item(the_class)
item = the_class.new(the_class.build_xml, client)
return item
end
def self.one_pool(the_class, exit_if_fail = true)
item = the_class.new(client)
rc = item.info
return_if_error(rc, item, exit_if_fail)
end
def self.find_by_name(the_class, name, pool = nil, raise_if_fail = true)
pool = one_pool(the_class, raise_if_fail) if pool.nil?
element = pool.select{|e| e['NAME'] == "#{name}" }.first rescue nil
if element.nil? && raise_if_fail
raise "Could not find element '#{name}' in pool '#{the_class}'"
else
element
end
end
def self.find_by_ref(the_class, attribute, ref, vcenter_uuid, pool = nil)
pool = one_pool(the_class, false) if pool.nil?
element = pool.select{|e|
e["#{attribute}"] == ref &&
(e["TEMPLATE/VCENTER_INSTANCE_ID"] == vcenter_uuid ||
e["USER_TEMPLATE/VCENTER_INSTANCE_ID"] == vcenter_uuid)}.first rescue nil
return element
end
def self.find_persistent_image_by_source(source, pool)
element = pool.select{|e|
e["SOURCE"] == source &&
e["PERSISTENT"] == "1"
}.first rescue nil
return element
end
def self.find_vcenter_vm_by_name(one_vm, host, vi_client)
# Let's try to find the VM object only by its name
# Let's build the VM name
vm_prefix = host['TEMPLATE/VM_PREFIX']
vm_prefix = VM_PREFIX_DEFAULT if vm_prefix.nil? || vm_prefix.empty?
vm_prefix.gsub!("$i", one_vm['ID'])
vm_name = vm_prefix + one_vm['NAME']
# We have no DEPLOY_ID, the VM has never been deployed
# let's use a view to try to find the VM from the root folder
view = vi_client.vim.serviceContent.viewManager.CreateContainerView({
container: vi_client.vim.rootFolder,
type: ['VirtualMachine'],
recursive: true
})
vcenter_vm = view.view.find{ |v| v.name == vm_name } if !!view.view && !view.view.empty?
view.DestroyView # Destroy the view
return vcenter_vm
end
def self.get_default(xpath)
begin
xml = OpenNebula::XMLElement.new
xml.initialize_xml(File.read(VCENTER_DRIVER_DEFAULT), 'VCENTER')
return xml[xpath]
rescue
return nil
end
end
end # class VIHelper
end # module VCenterDriver

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
#!/usr/bin/env ruby
# ---------------------------------------------------------------------------- #
# Copyright 2002-2016, OpenNebula Project, OpenNebula Systems #
# Copyright 2002-2017, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
@ -16,10 +16,6 @@
# limitations under the License. #
# ---------------------------------------------------------------------------- #
###############################################################################
# This script is used retrieve the file size of a disk
###############################################################################
ONE_LOCATION=ENV["ONE_LOCATION"] if !defined?(ONE_LOCATION)
if !ONE_LOCATION
@ -32,38 +28,31 @@ $: << RUBY_LIB_LOCATION
$: << File.dirname(__FILE__)
require 'vcenter_driver'
require 'opennebula'
deploy_id = ARGV[0]
source = ARGV[1]
target = ARGV[2]
target_index = ARGV[3]
vm_ref = ARGV[0]
drv_action_enc = ARGV[4]
drv_action = OpenNebula::XMLElement.new
drv_action.initialize_xml(Base64.decode64(drv_action_enc), 'VMM_DRIVER_ACTION_DATA')
hostname = drv_action["/VMM_DRIVER_ACTION_DATA/HOST"]
vm_id = drv_action["/VMM_DRIVER_ACTION_DATA/VM/ID"]
ds_name = drv_action["/VMM_DRIVER_ACTION_DATA/VM/TEMPLATE/DISK[ATTACH='YES']/DATASTORE"]
img_path = drv_action["/VMM_DRIVER_ACTION_DATA/VM/TEMPLATE/DISK[ATTACH='YES']/SOURCE"]
type = drv_action["/VMM_DRIVER_ACTION_DATA/VM/TEMPLATE/DISK[ATTACH='YES']/TYPE"]
size_kb = 0 # No volatile disk support at the moment
if ds_name.nil? || hostname.nil? || img_path.nil?
STDERR.puts "Not enough information to attach disk, missing datastore"\
" name or vcenter cluster name or image path."
exit -1
end
vc_cluster_name = drv_action["/VMM_DRIVER_ACTION_DATA/HOST"]
host = VCenterDriver::VIHelper.find_by_name(OpenNebula::HostPool, vc_cluster_name)
host_id = host['ID']
begin
VCenterDriver::VCenterVm.attach_disk(hostname,
deploy_id,
ds_name,
img_path,
type,
size_kb)
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
vm = VCenterDriver::VirtualMachine.new_from_ref(vm_ref, vi_client)
vm.one_item = drv_action.retrieve_xmlelements('VM').first
vm.attach_disk
rescue Exception => e
STDERR.puts "Error attach image #{img_path}. Reason: #{e.message}"
message = "Attach image for VM #{vm_ref} on vCenter cluster #{vc_cluster_name} "\
"failed due to \"#{e.message}\"\n#{e.backtrace}"
STDERR.puts error_message(message)
exit -1
end
ensure
vi_client.close_connection if vi_client
end

View File

@ -1,7 +1,7 @@
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2016, OpenNebula Project, OpenNebula Systems #
# Copyright 2002-2017, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
@ -29,20 +29,30 @@ $: << File.dirname(__FILE__)
require 'vcenter_driver'
deploy_id = ARGV[0]
mac = ARGV[1]
bridge = ARGV[2]
model = ARGV[3]
host = ARGV[-1]
vm_ref = ARGV[0]
vc_cluster_name = ARGV[-1]
drv_action = OpenNebula::XMLElement.new
drv_action.initialize_xml(Base64.decode64(STDIN.read), 'VMM_DRIVER_ACTION_DATA')
host = VCenterDriver::VIHelper.find_by_name(OpenNebula::HostPool, vc_cluster_name)
host_id = host['ID']
begin
VCenterDriver::VCenterVm.attach_nic(deploy_id,
mac,
bridge,
model,
host)
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
vm = VCenterDriver::VirtualMachine.new_from_ref(vm_ref, vi_client)
# Setting one_item with info with the vm_xml including NIC to be added
vm.one_item = drv_action.retrieve_xmlelements("VM").first
vm.attach_nic
rescue Exception => e
STDERR.puts "Attach NIC for VM #{deploy_id} on host #{host} failed " +
"due to \"#{e.message}\""
message = "Attach NIC for VM #{vm_ref} on vCenter cluster #{vc_cluster_name} "\
"failed due to \"#{e.message}\"\n#{e.backtrace}"
STDERR.puts error_message(message)
exit -1
ensure
vi_client.close_connection if vi_client
end

View File

@ -28,33 +28,31 @@ $: << RUBY_LIB_LOCATION
$: << File.dirname(__FILE__)
require 'vcenter_driver'
require 'opennebula'
deploy_id = ARGV[0]
vm_ref = ARGV[0]
host = ARGV[1]
vm_id = ARGV[-2]
drv_action_enc = STDIN.read.gsub("\n","")
drv_action = OpenNebula::XMLElement.new
drv_action.initialize_xml(Base64.decode64(drv_action_enc),
'VMM_DRIVER_ACTION_DATA')
drv_action.initialize_xml(Base64.decode64(STDIN.read), 'VMM_DRIVER_ACTION_DATA')
lcm_state_num = drv_action["/VMM_DRIVER_ACTION_DATA/VM/LCM_STATE"].to_i
lcm_state = OpenNebula::VirtualMachine::LCM_STATE[lcm_state_num]
keep_disks = !drv_action['/VMM_DRIVER_ACTION_DATA/VM/USER_TEMPLATE/KEEP_DISKS_ON_DONE'].nil? &&
drv_action['/VMM_DRIVER_ACTION_DATA/VM/USER_TEMPLATE/KEEP_DISKS_ON_DONE'].downcase=="yes"
disks = [drv_action.to_hash["VMM_DRIVER_ACTION_DATA"]["VM"]["TEMPLATE"]["DISK"]].flatten.compact
cloned_tmplt = nil
if !drv_action['/VMM_DRIVER_ACTION_DATA/VM/TEMPLATE/CLONING_TEMPLATE_ID'].nil?
cloned_tmplt = drv_action['/VMM_DRIVER_ACTION_DATA/VM/TEMPLATE/TEMPLATE_ID']
end
host = VCenterDriver::VIHelper.find_by_name(OpenNebula::HostPool, host)
host_id = host['ID']
begin
VCenterDriver::VCenterVm.cancel(deploy_id, host, lcm_state, keep_disks, disks, cloned_tmplt)
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
vm = VCenterDriver::VirtualMachine.new_from_ref(vm_ref, vi_client)
vm.one_item = drv_action.retrieve_xmlelements('VM').first
vm.poweroff_hard
rescue Exception => e
STDERR.puts "Cancel of VM #{deploy_id} on host #{host} failed " +
"due to \"#{e.message}\""
message = "Cancel VM #{vm_ref} failed due to "\
"\"#{e.message}\"\n#{e.backtrace}"
STDERR.puts error_message(message)
exit -1
ensure
vi_client.close_connection if vi_client
end

View File

@ -1,7 +1,7 @@
#!/usr/bin/env ruby
# ---------------------------------------------------------------------------- #
# Copyright 2002-2016, OpenNebula Project, OpenNebula Systems #
# Copyright 2002-2017, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
@ -16,7 +16,7 @@
# limitations under the License. #
# ---------------------------------------------------------------------------- #
ONE_LOCATION=ENV["ONE_LOCATION"] if !defined?(ONE_LOCATION)
ONE_LOCATION = ENV["ONE_LOCATION"] if !defined?(ONE_LOCATION)
if !ONE_LOCATION
RUBY_LIB_LOCATION="/usr/lib/one/ruby" if !defined?(RUBY_LIB_LOCATION)
@ -28,38 +28,57 @@ $: << RUBY_LIB_LOCATION
$: << File.dirname(__FILE__)
require 'vcenter_driver'
require 'opennebula'
dfile = ARGV[0]
host = ARGV[1]
vm_id = ARGV[2]
drv_action_enc = STDIN.read.gsub("\n","")
dfile = ARGV[0]
cluster_name = ARGV[1]
vm_id = ARGV[2]
drv_action = OpenNebula::XMLElement.new
drv_action.initialize_xml(Base64.decode64(drv_action_enc),
'VM')
drv_action.initialize_xml(Base64.decode64(STDIN.read), 'VM')
deploy_id = drv_action["/VM/DEPLOY_ID"]
lcm_state_num = drv_action["/VM/LCM_STATE"].to_i
datastore = drv_action["/VM/USER_TEMPLATE/VCENTER_DATASTORE"]
lcm_state = OpenNebula::VirtualMachine::LCM_STATE[lcm_state_num]
ops = {
:ref => drv_action["/VM/USER_TEMPLATE/PUBLIC_CLOUD/VCENTER_REF"],
:name => drv_action["/VM/USER_TEMPLATE/PUBLIC_CLOUD/VCENTER_NAME"]
}
deploy_id = drv_action["DEPLOY_ID"]
host_id = drv_action["HISTORY_RECORDS/HISTORY/HID"]
begin
puts VCenterDriver::VCenterVm.deploy(File.read(dfile),
lcm_state,
deploy_id,
host,
datastore,
ops)
rescue Exception => e
STDERR.puts "Deploy of VM #{vm_id} on host #{host} with #{dfile} failed " +
"due to \"#{e.message}\""
exit -1
end
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
if deploy_id && !deploy_id.empty?
# VM is not new, we just need to reconfigure it and to power it on
vm = VCenterDriver::VirtualMachine.new_from_ref(deploy_id, vi_client)
# Setting one_item is optional, but it saves a couple of API calls if
# we already have it
one_vm = VCenterDriver::VIHelper.one_item(OpenNebula::VirtualMachine, vm_id)
vm.one_item = one_vm
else
# VM is new
vm = VCenterDriver::VirtualMachine.new
# Clone the VM from template and provide XML info
vc_template_ref = drv_action['USER_TEMPLATE/VCENTER_TEMPLATE_REF']
vm.clone_vm(drv_action, vi_client)
one_vm = VCenterDriver::VIHelper.one_item(OpenNebula::VirtualMachine, vm_id)
vm.one_item = one_vm
# Set reference to template disks and nics in VM template for detach ops
vm.reference_unmanaged_devices(vc_template_ref)
end
# Resize unmanaged disks
vm.resize_unmanaged_disks
vm.reconfigure
vm.poweron
vm.set_running(true)
puts vm['_ref']
rescue Exception => e
message = "Deploy of VM #{vm_id} on vCenter cluster #{cluster_name} " +
"with #{dfile} failed due to \"#{e.message}\"\n#{e.backtrace}"
STDERR.puts error_message(message)
exit -1
ensure
vi_client.close_connection if vi_client
end

View File

@ -1,24 +1,20 @@
#!/usr/bin/env ruby
# ---------------------------------------------------------------------------- #
# Copyright 2002-2016, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
###############################################################################
# This script is used retrieve the file size of a disk
###############################################################################
# -------------------------------------------------------------------------- #
# Copyright 2002-2017, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
ONE_LOCATION=ENV["ONE_LOCATION"] if !defined?(ONE_LOCATION)
@ -32,36 +28,29 @@ $: << RUBY_LIB_LOCATION
$: << File.dirname(__FILE__)
require 'vcenter_driver'
require 'opennebula'
deploy_id = ARGV[0]
source = ARGV[1]
target = ARGV[2]
target_index = ARGV[3]
drv_action_enc = STDIN.read.gsub("\n","")
vm_ref = ARGV[0]
vc_cluster_name = ARGV[-1]
drv_action = OpenNebula::XMLElement.new
drv_action.initialize_xml(Base64.decode64(drv_action_enc),
'VMM_DRIVER_ACTION_DATA')
drv_action.initialize_xml(Base64.decode64(STDIN.read), 'VMM_DRIVER_ACTION_DATA')
hostname = drv_action["/VMM_DRIVER_ACTION_DATA/HOST"]
vm_id = drv_action["/VMM_DRIVER_ACTION_DATA/VM/ID"]
img_path = drv_action["/VMM_DRIVER_ACTION_DATA/VM/TEMPLATE/DISK[ATTACH='YES']/SOURCE"]
ds_name = drv_action["/VMM_DRIVER_ACTION_DATA/VM/TEMPLATE/DISK[ATTACH='YES']/DATASTORE"]
if ds_name.nil? || hostname.nil? || vm_id.nil? | img_path.nil?
STDERR.puts "Not enough information to perform detach_disk."
exit -1
end
host = VCenterDriver::VIHelper.find_by_name(OpenNebula::HostPool, vc_cluster_name)
host_id = host['ID']
begin
VCenterDriver::VCenterVm.detach_disk(hostname,
deploy_id,
ds_name,
img_path)
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
vm = VCenterDriver::VirtualMachine.new_from_ref(vm_ref, vi_client)
raise "vCenter doesn't allow to remove a virtual disk if it's part of a "\
"snapshot of the virtual machine." if vm.has_snapshots?
rescue Exception => e
STDERR.puts "Error detaching image #{img_path}. Reason: #{e.message}"
message = "Detach DISK for VM #{vm_ref} on vCenter cluster #{vc_cluster_name} "\
"failed due to \"#{e.message}\"\n#{e.backtrace}"
STDERR.puts error_message(message)
exit -1
end
ensure
vi_client.close_connection if vi_client
end

View File

@ -29,17 +29,30 @@ $: << File.dirname(__FILE__)
require 'vcenter_driver'
deploy_id = ARGV[0]
mac = ARGV[1]
host = ARGV[3]
vm_ref = ARGV[0]
vc_cluster_name = ARGV[3]
drv_action = OpenNebula::XMLElement.new
drv_action.initialize_xml(Base64.decode64(STDIN.read), 'VMM_DRIVER_ACTION_DATA')
host = VCenterDriver::VIHelper.find_by_name(OpenNebula::HostPool, vc_cluster_name)
host_id = host['ID']
begin
VCenterDriver::VCenterVm.detach_nic(deploy_id,
mac,
host)
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
vm = VCenterDriver::VirtualMachine.new_from_ref(vm_ref, vi_client)
# Setting one_item with info with the vm_xml including NIC to be added
vm.one_item = drv_action.retrieve_xmlelements("VM").first
vm.detach_nic
rescue Exception => e
STDERR.puts "Detach NIC for VM #{deploy_id} on host #{host} failed " +
"due to \"#{e.message}\""
message = "Detach NIC for VM #{vm_ref} on vCenter cluster #{vc_cluster_name} "\
"failed due to \"#{e.message}\"\n#{e.backtrace}"
STDERR.puts error_message(message)
exit -1
end
ensure
vi_client.close_connection if vi_client
end

View File

@ -1,20 +0,0 @@
#!/bin/bash
# -------------------------------------------------------------------------- #
# Copyright 2002-2016, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
SCRIPT_NAME=$(basename $0)
echo "Action $SCRIPT_NAME not supported" 1>&2

View File

@ -0,0 +1 @@
../common/not_supported.sh

View File

@ -30,15 +30,27 @@ $: << File.dirname(__FILE__)
require 'vcenter_driver'
deploy_id = ARGV[0]
host = ARGV[1]
vm_ref = ARGV[0]
vc_cluster_name = ARGV[1]
host_id = VCenterDriver::VIClient.translate_hostname(host)
vi_client = VCenterDriver::VIClient.new host_id
host = VCenterDriver::VCenterHost.new vi_client
vm = vi_client.find_vm_template(deploy_id)
vm = VCenterDriver::VCenterVm.new(vi_client, vm)
host = VCenterDriver::VIHelper.find_by_name(OpenNebula::HostPool, vc_cluster_name)
host_id = host['ID']
vm.monitor(host)
begin
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
puts vm.info
vm = VCenterDriver::VirtualMachine.new_from_ref(vm_ref, vi_client)
vm.monitor_poll_vm
puts vm.info
rescue Exception => e
message = "Cannot poll info for VM #{vm_ref} on vCenter cluster "\
"#{vc_cluster_name} failed due to "\
"\"#{e.message}\"\n#{e.backtrace}"
STDERR.puts error_message(message)
exit -1
ensure
vi_client.close_connection if vi_client
end

View File

@ -0,0 +1 @@
../common/dummy.sh

View File

@ -1,20 +0,0 @@
#!/usr/bin/env ruby
# ---------------------------------------------------------------------------- #
# Copyright 2002-2016, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
exit(0)

View File

@ -0,0 +1 @@
../common/dummy.sh

View File

@ -1,7 +1,7 @@
#!/usr/bin/env ruby
# ---------------------------------------------------------------------------- #
# Copyright 2002-2016, OpenNebula Project, OpenNebula Systems #
# Copyright 2002-2017, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
@ -29,13 +29,25 @@ $: << File.dirname(__FILE__)
require 'vcenter_driver'
deploy_id = ARGV[0]
host = ARGV[1]
vm_ref = ARGV[0]
vc_cluster_name = ARGV[1]
host = VCenterDriver::VIHelper.find_by_name(OpenNebula::HostPool, vc_cluster_name)
host_id = host['ID']
begin
VCenterDriver::VCenterVm.reboot(deploy_id, host)
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
vm = VCenterDriver::VirtualMachine.new_from_ref(vm_ref, vi_client)
vm.reboot
rescue Exception => e
STDERR.puts "Guest reboot of VM #{deploy_id} on host #{host} failed " +
"due to \"#{e.message}\""
message = "Guest reboot of VM #{vm_ref} on vCenter cluster "\
"#{vc_cluster_name} failed due to "\
"\"#{e.message}\"\n#{e.backtrace}"
STDERR.puts error_message(message)
exit -1
end
ensure
vi_client.close_connection if vi_client
end

View File

@ -1,7 +1,7 @@
#!/usr/bin/env ruby
# ---------------------------------------------------------------------------- #
# Copyright 2002-2016, OpenNebula Project, OpenNebula Systems #
# Copyright 2002-2017, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
@ -29,20 +29,25 @@ $: << File.dirname(__FILE__)
require 'vcenter_driver'
deploy_id = ARGV[0]
host = ARGV[-1]
vm_id = ARGV[-2]
vm_ref = ARGV[0]
vc_cluster_name = ARGV[-1]
vm = OpenNebula::VirtualMachine.new_with_id(vm_id, OpenNebula::Client.new)
vm.info
vm_xml = vm.to_xml
host = VCenterDriver::VIHelper.find_by_name(OpenNebula::HostPool, vc_cluster_name)
host_id = host['ID']
begin
VCenterDriver::VCenterVm.reconfigure(deploy_id, host, vm_xml)
rescue Exception => e
STDERR.puts "Reconfiguration of VM #{deploy_id} on host #{host} failed " +
"due to \"#{e.message}\""
exit -1
end
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
vm = VCenterDriver::VirtualMachine.new_from_ref(vm_ref, vi_client)
vm.regenerate_context
rescue Exception => e
message = "Reconfiguration of VM #{vm_ref} on vCenter cluster "\
"#{vc_cluster_name} failed due to "\
"\"#{e.message}\"\n#{e.backtrace}"
STDERR.puts error_message(message)
exit -1
ensure
vi_client.close_connection if vi_client
end

Some files were not shown because too many files have changed in this diff Show More