1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-03-21 14:50:08 +03:00

Merge branch 'feature-4913' into feature-4913

This commit is contained in:
Miguel Cabrerizo 2017-04-10 19:22:56 +02:00 committed by GitHub
commit e23b7bf6b6
85 changed files with 4562 additions and 1335 deletions

View File

@ -181,13 +181,32 @@ public:
quota_del(DATASTORE, uid, gid, tmpl);
}
/**
* Delete Datastore related usage from quota counters.
* for the given user and group
* @param uid of the user
* @param gid of the group
* @param tmpl template for the image, with usage
*/
static void ds_del(int uid, int gid, vector<Template *> tmpls)
{
vector<Template *>::iterator it;
for ( it = tmpls.begin(); it != tmpls.end() ; ++it )
{
quota_del(DATASTORE, uid, gid, *it);
delete *it;
}
}
/**
* Delete a set of Datastore usage attributes from quota counters. Each
* quota datastore is associate to a given image. NOTE: The templates
* *ARE FREED* by this function
* @param ds_quotas a map with image_id and a tmpl with usage attributes
*/
static void ds_del(map<int, Template *>& ds_quotas);
static void ds_del_recreate(int uid, int gid, vector<Template *>& ds_quotas);
/**
* Delete usage from the given quota counters.

View File

@ -243,6 +243,18 @@ public:
set(new SingleAttribute(name, value));
}
void add(const string& name, bool value)
{
if ( value )
{
set(new SingleAttribute(name, "YES"));
}
else
{
set(new SingleAttribute(name, "NO"));
}
}
/**
* Removes an attribute from the template. The attributes are returned. The
* attributes MUST be freed by the calling funtion

View File

@ -1041,7 +1041,7 @@ public:
* Releases all disk images taken by this Virtual Machine
* @param quotas disk space to free from image datastores
*/
void release_disk_images(map<int, Template *>& quotas);
void release_disk_images(vector<Template *>& quotas);
/**
* @return reference to the VirtualMachine disks
@ -1374,7 +1374,7 @@ public:
* @param ds_quotas The DS SIZE freed from image datastores.
*/
void delete_non_persistent_disk_resizes(Template **vm_quotas,
map<int, Template *>& ds_quotas)
vector<Template *>& ds_quotas)
{
disks.delete_non_persistent_resizes(vm_quotas, ds_quotas);
}
@ -1471,11 +1471,13 @@ public:
* @param snap_id of the snapshot
* @param ds_quotas template with snapshot usage for the DS quotas
* @param vm_quotas template with snapshot usage for the VM quotas
* @param io delete ds quotas from image owners
* @param vo delete ds quotas from vm owners
*/
void delete_disk_snapshot(int disk_id, int snap_id, Template **ds_quotas,
Template **vm_quotas)
Template **vm_quotas, bool& io, bool& vo)
{
disks.delete_snapshot(disk_id, snap_id, ds_quotas, vm_quotas);
disks.delete_snapshot(disk_id, snap_id, ds_quotas, vm_quotas, io, vo);
}
/**
@ -1485,7 +1487,7 @@ public:
* @param ds_quotas The DS SIZE freed from image datastores.
*/
void delete_non_persistent_disk_snapshots(Template **vm_quotas,
map<int, Template *>& ds_quotas)
vector<Template *>& ds_quotas)
{
disks.delete_non_persistent_snapshots(vm_quotas, ds_quotas);
}

View File

@ -244,8 +244,11 @@ public:
* @param snap_id of the snapshot
* @param ds_quotas template with snapshot usage for the DS quotas
* @param vm_quotas template with snapshot usage for the VM quotas
* @param io delete ds quotas from image owners
* @param vo delete ds quotas from vm owners
*/
void delete_snapshot(int snap_id, Template **ds_quota, Template **vm_quota);
void delete_snapshot(int snap_id, Template **ds_quota, Template **vm_quota,
bool& io, bool& vo);
/* ---------------------------------------------------------------------- */
/* Disk resize functions */
@ -261,8 +264,12 @@ public:
* @param new_size of disk
* @param dsdeltas increment in datastore usage
* @param vmdelta increment in system datastore usage
* @param do_img_owner quotas counter allocated for image uid/gid
* @param do_vm_owner quotas counter allocated for vm uid/gid
*
*/
void resize_quotas(long long new_size, Template& dsdelta, Template& vmdelta);
void resize_quotas(long long new_size, Template& dsdelta, Template& vmdelta,
bool& do_img_owner, bool& do_vm_owner);
/* ---------------------------------------------------------------------- */
/* Disk space usage functions */
@ -273,6 +280,11 @@ public:
*/
long long system_ds_size();
/**
* @return the space required by this disk in the image datastore
*/
long long image_ds_size();
/**
* Compute the storage needed by the disk in the system and/or image
* datastore
@ -408,6 +420,14 @@ public:
static void extended_info(int uid, Template * tmpl);
/**
* Computes the storage in the image DS needed for the disks in a VM
* template
* @param tmpl with DISK descriptions
* @param ds_quotas templates for quota updates
*/
static void image_ds_quotas(Template * tmpl, vector<Template *>& ds_quotas);
/**
* Sets Datastore information on volatile disks
*/
@ -446,7 +466,7 @@ public:
* @param img_error true if the image has to be set in error state
* @param quotas disk space usage to free from image datastores
*/
void release_images(int vmid, bool img_error, map<int, Template *>& quotas);
void release_images(int vmid, bool img_error, vector<Template *>& quotas);
/* ---------------------------------------------------------------------- */
/* DISK cloning functions */
@ -666,9 +686,11 @@ public:
* @param snap_id of the snapshot
* @param ds_quotas template with snapshot usage for the DS quotas
* @param vm_quotas template with snapshot usage for the VM quotas
* @param io delete ds quotas from image owners
* @param vo delete ds quotas from vm owners
*/
void delete_snapshot(int disk_id, int snap_id, Template **ds_quota,
Template **vm_quota);
Template **vm_quota, bool& io, bool& vo);
/**
* Deletes all the disk snapshots for non-persistent disks and for persistent
@ -677,7 +699,7 @@ public:
* @param ds_quotas The DS SIZE freed from image datastores.
*/
void delete_non_persistent_snapshots(Template **vm_quotas,
map<int, Template *>& ds_quotas);
vector<Template *> &ds_quotas);
/**
* Restores the disk original size for non-persistent and for persistent
@ -686,7 +708,7 @@ public:
* @param ds_quotas The DS SIZE freed from image datastores.
*/
void delete_non_persistent_resizes(Template **vm_quotas,
map<int, Template *>& ds_quotas);
vector<Template *> &ds_quotas);
protected:

View File

@ -259,6 +259,7 @@ VAR_DIRS="$VAR_LOCATION/remotes \
$VAR_LOCATION/remotes/vnm/ebtables \
$VAR_LOCATION/remotes/vnm/fw \
$VAR_LOCATION/remotes/vnm/ovswitch \
$VAR_LOCATION/remotes/vnm/vcenter \
$VAR_LOCATION/remotes/tm/ \
$VAR_LOCATION/remotes/tm/dummy \
$VAR_LOCATION/remotes/tm/shared \
@ -415,6 +416,7 @@ INSTALL_FILES=(
NETWORK_EBTABLES_FILES:$VAR_LOCATION/remotes/vnm/ebtables
NETWORK_FW_FILES:$VAR_LOCATION/remotes/vnm/fw
NETWORK_OVSWITCH_FILES:$VAR_LOCATION/remotes/vnm/ovswitch
NETWORK_VCENTER_FILES:$VAR_LOCATION/remotes/vnm/vcenter
EXAMPLE_SHARE_FILES:$SHARE_LOCATION/examples
WEBSOCKIFY_SHARE_FILES:$SHARE_LOCATION/websockify
INSTALL_GEMS_SHARE_FILES:$SHARE_LOCATION
@ -857,6 +859,10 @@ NETWORK_OVSWITCH_FILES="src/vnm_mad/remotes/ovswitch/clean \
src/vnm_mad/remotes/ovswitch/update_sg \
src/vnm_mad/remotes/ovswitch/OpenvSwitch.rb"
NETWORK_VCENTER_FILES="src/vnm_mad/remotes/vcenter/pre \
src/vnm_mad/remotes/vcenter/post \
src/vnm_mad/remotes/vcenter/clean"
#-------------------------------------------------------------------------------
# IPAM drivers to be installed under $REMOTES_LOCATION/ipam
#-------------------------------------------------------------------------------

View File

@ -770,7 +770,7 @@ VM_RESTRICTED_ATTR = "MEMORY_COST"
VM_RESTRICTED_ATTR = "DISK_COST"
VM_RESTRICTED_ATTR = "PCI"
VM_RESTRICTED_ATTR = "USER_INPUTS"
VM_RESTRICTED_ATTR = "DEPLOY_FOLDER"
VM_RESTRICTED_ATTR = "VCENTER_VM_FOLDER"
#VM_RESTRICTED_ATTR = "RANK"
#VM_RESTRICTED_ATTR = "SCHED_RANK"

View File

@ -248,10 +248,10 @@ VXLAN_IDS = [
# DEFAULT_CDROM_DEVICE_PREFIX: Same as above but for CDROM devices.
#
# DEFAULT_IMAGE_PERSISTENT: Control the default value for the PERSISTENT
# attribute on image creation (oneimage clone, onevm disk-saveas). If blank
# attribute on image creation (oneimage clone, onevm disk-saveas). If blank
# images will inherit the persistent attribute from the base image.
#
# DEFAULT_IMAGE_PERSISTENT_NEW: Control the default value for the PERSISTENT
# DEFAULT_IMAGE_PERSISTENT_NEW: Control the default value for the PERSISTENT
# attribute on image creation (oneimage create). By default images are no
# persistent if not set.
#*******************************************************************************
@ -814,8 +814,8 @@ DEFAULT_UMASK = 177
VM_ADMIN_OPERATIONS = "migrate, delete, recover, retry, deploy, resched"
VM_MANAGE_OPERATIONS = "undeploy, hold, release, stop, suspend, resume, reboot,
poweroff, disk-attach, nic-attach, disk-snapshot, terminate, disk-resize,
VM_MANAGE_OPERATIONS = "undeploy, hold, release, stop, suspend, resume, reboot,
poweroff, disk-attach, nic-attach, disk-snapshot, terminate, disk-resize,
snapshot, updateconf, rename, resize, update, disk-saveas"
VM_USE_OPERATIONS = ""
@ -856,7 +856,7 @@ VM_RESTRICTED_ATTR = "EMULATOR"
VM_RESTRICTED_ATTR = "USER_INPUTS/CPU"
VM_RESTRICTED_ATTR = "USER_INPUTS/MEMORY"
VM_RESTRICTED_ATTR = "USER_INPUTS/VCPU"
VM_RESTRICTED_ATTR = "DEPLOY_FOLDER"
VM_RESTRICTED_ATTR = "VCENTER_VM_FOLDER"
#VM_RESTRICTED_ATTR = "RANK"
#VM_RESTRICTED_ATTR = "SCHED_RANK"

Binary file not shown.

View File

@ -354,11 +354,11 @@ EOT
:description => 'Sends READY=YES to OneGate, useful for OneFlow'
},
{
:name => 'deploy_folder',
:large => '--deploy_folder path',
:name => 'vcenter_vm_folder',
:large => '--vcenter_vm_folder path',
:format => String,
:description => "In a vCenter environment sets the the VMs and Template folder where the VM will be placed in." \
" The path uses slashes to separate folders. For example: --deploy_folder \"/Management/VMs\""
" The path uses slashes to separate folders. For example: --vcenter_vm_folder \"/Management/VMs\""
}
]
@ -1133,7 +1133,7 @@ EOT
template<<' ]' << "\n"
end
template<<"DEPLOY_FOLDER=#{options[:deploy_folder]}\n" if options[:deploy_folder]
template<<"VCENTER_VM_FOLDER=#{options[:vcenter_vm_folder]}\n" if options[:vcenter_vm_folder]
context=create_context(options)
template<<context if context

View File

@ -212,7 +212,7 @@ error:
void DispatchManager::free_vm_resources(VirtualMachine * vm)
{
Template* tmpl;
map<int, Template *> ds_quotas;
vector<Template *> ds_quotas;
int uid;
int gid;
@ -247,7 +247,7 @@ void DispatchManager::free_vm_resources(VirtualMachine * vm)
if ( !ds_quotas.empty() )
{
Quotas::ds_del(ds_quotas);
Quotas::ds_del(uid, gid, ds_quotas);
}
if (vrid != -1)
@ -1052,8 +1052,8 @@ int DispatchManager::delete_recreate(VirtualMachine * vm,
Template * vm_quotas_snp = 0;
Template * vm_quotas_rsz = 0;
map<int, Template *> ds_quotas_snp;
map<int, Template *> ds_quotas_rsz;
vector<Template *> ds_quotas_snp;
vector<Template *> ds_quotas_rsz;
int vm_uid, vm_gid;
@ -1118,12 +1118,12 @@ int DispatchManager::delete_recreate(VirtualMachine * vm,
if ( !ds_quotas_snp.empty() )
{
Quotas::ds_del(ds_quotas_snp);
Quotas::ds_del_recreate(vm_uid, vm_gid, ds_quotas_snp);
}
if ( !ds_quotas_rsz.empty() )
{
Quotas::ds_del(ds_quotas_rsz);
Quotas::ds_del_recreate(vm_uid, vm_gid, ds_quotas_rsz);
}
if ( vm_quotas_snp != 0 )

View File

@ -204,7 +204,7 @@ void DispatchManager::done_action(int vid)
VirtualMachine * vm;
Template * tmpl;
map<int, Template *> ds_quotas;
vector<Template *> ds_quotas;
int uid;
int gid;
@ -264,7 +264,7 @@ void DispatchManager::done_action(int vid)
if ( !ds_quotas.empty() )
{
Quotas::ds_del(ds_quotas);
Quotas::ds_del(uid, gid, ds_quotas);
}
if (!deploy_id.empty())

View File

@ -820,8 +820,8 @@ void LifeCycleManager::delete_recreate_action(const LCMAction& la)
Template * vm_quotas_snp = 0;
Template * vm_quotas_rsz = 0;
map<int, Template *> ds_quotas_snp;
map<int, Template *> ds_quotas_rsz;
vector<Template *> ds_quotas_snp;
vector<Template *> ds_quotas_rsz;
int vm_uid, vm_gid;
@ -888,12 +888,12 @@ void LifeCycleManager::delete_recreate_action(const LCMAction& la)
if ( !ds_quotas_snp.empty() )
{
Quotas::ds_del(ds_quotas_snp);
Quotas::ds_del_recreate(vm_uid, vm_gid, ds_quotas_snp);
}
if ( !ds_quotas_rsz.empty() )
{
Quotas::ds_del(ds_quotas_rsz);
Quotas::ds_del_recreate(vm_uid, vm_gid, ds_quotas_rsz);
}
if ( vm_quotas_snp != 0 )

View File

@ -1865,6 +1865,8 @@ void LifeCycleManager::disk_snapshot_success(int vid)
Template *ds_quotas = 0;
Template *vm_quotas = 0;
bool img_owner, vm_owner;
const VirtualMachineDisk * disk;
Snapshots snaps(-1);
const Snapshots* tmp_snaps;
@ -1909,7 +1911,8 @@ void LifeCycleManager::disk_snapshot_success(int vid)
case VirtualMachine::DISK_SNAPSHOT_DELETE_POWEROFF:
case VirtualMachine::DISK_SNAPSHOT_DELETE_SUSPENDED:
vm->log("LCM", Log::INFO, "VM disk snapshot deleted.");
vm->delete_disk_snapshot(disk_id, snap_id, &ds_quotas, &vm_quotas);
vm->delete_disk_snapshot(disk_id, snap_id, &ds_quotas, &vm_quotas,
img_owner, vm_owner);
break;
default:
@ -1941,16 +1944,24 @@ void LifeCycleManager::disk_snapshot_success(int vid)
if ( ds_quotas != 0 )
{
Image* img = ipool->get(img_id, true);
if(img != 0)
if ( img_owner )
{
int img_uid = img->get_uid();
int img_gid = img->get_gid();
Image* img = ipool->get(img_id, true);
img->unlock();
if(img != 0)
{
int img_uid = img->get_uid();
int img_gid = img->get_gid();
Quotas::ds_del(img_uid, img_gid, ds_quotas);
img->unlock();
Quotas::ds_del(img_uid, img_gid, ds_quotas);
}
}
if ( vm_owner )
{
Quotas::ds_del(vm_uid, vm_gid, ds_quotas);
}
delete ds_quotas;
@ -2008,6 +2019,8 @@ void LifeCycleManager::disk_snapshot_failure(int vid)
bool has_snaps = false;
string error_str;
bool img_owner, vm_owner;
VirtualMachine * vm = vmpool->get(vid,true);
if ( vm == 0 )
@ -2036,7 +2049,8 @@ void LifeCycleManager::disk_snapshot_failure(int vid)
case VirtualMachine::DISK_SNAPSHOT_POWEROFF:
case VirtualMachine::DISK_SNAPSHOT_SUSPENDED:
vm->log("LCM", Log::ERROR, "Could not take disk snapshot.");
vm->delete_disk_snapshot(disk_id, snap_id, &ds_quotas, &vm_quotas);
vm->delete_disk_snapshot(disk_id, snap_id, &ds_quotas, &vm_quotas,
img_owner, vm_owner);
break;
case VirtualMachine::DISK_SNAPSHOT_DELETE:
@ -2077,16 +2091,24 @@ void LifeCycleManager::disk_snapshot_failure(int vid)
if ( ds_quotas != 0 )
{
Image* img = ipool->get(img_id, true);
if(img != 0)
if ( img_owner )
{
int img_uid = img->get_uid();
int img_gid = img->get_gid();
Image* img = ipool->get(img_id, true);
img->unlock();
if(img != 0)
{
int img_uid = img->get_uid();
int img_gid = img->get_gid();
Quotas::ds_del(img_uid, img_gid, ds_quotas);
img->unlock();
Quotas::ds_del(img_uid, img_gid, ds_quotas);
}
}
if ( vm_owner)
{
Quotas::ds_del(vm_uid, vm_gid, ds_quotas);
}
delete ds_quotas;
@ -2363,9 +2385,11 @@ void LifeCycleManager::disk_resize_failure(int vid)
int vm_uid = vm->get_uid();
int vm_gid = vm->get_gid();
bool img_quota, vm_quota;
disk->vector_value("IMAGE_ID", img_id);
disk->vector_value("SIZE_PREV", size_prev);
disk->resize_quotas(size_prev, ds_deltas, vm_deltas);
disk->resize_quotas(size_prev, ds_deltas, vm_deltas, img_quota, vm_quota);
disk->clear_resize(true);
@ -2374,7 +2398,7 @@ void LifeCycleManager::disk_resize_failure(int vid)
vm->unlock();
// Restore quotas
if ( !ds_deltas.empty() && img_id != -1 )
if ( img_quota && img_id != -1 )
{
Image* img = ipool->get(img_id, true);
@ -2389,6 +2413,11 @@ void LifeCycleManager::disk_resize_failure(int vid)
}
}
if ( vm_quota )
{
Quotas::ds_del(vm_uid, vm_gid, &ds_deltas);
}
if ( !vm_deltas.empty() )
{
Quotas::vm_del(vm_uid, vm_gid, &vm_deltas);

View File

@ -744,13 +744,6 @@ module OpenNebula
self['DEPLOY_ID']
end
# Returns the deploy_id of the VirtualMachine (numeric value)
def keep_disks?
!self['USER_TEMPLATE/KEEP_DISKS_ON_DONE'].nil? &&
self['USER_TEMPLATE/KEEP_DISKS_ON_DONE'].downcase=="yes"
end
# Clones the VM's source Template, replacing the disks with live snapshots
# of the current disks. The VM capacity and NICs are also preserved
#

View File

@ -14,6 +14,18 @@
# limitations under the License. #
#--------------------------------------------------------------------------- #
# This patch changes the type of an AR to IP4_6_STATIC and also lets
# us add or change parameters. The AR must be specified in the extra
# option. For example, to change network 2, ar 1 and add ip6 and
# prefix_length you can use:
#
# onedb patch -s one.db ip4_6_static.rb \
# --extra vn=2;ar=1;ip6=2001::1;prefix_length=48
#
# You can also specify several ARs separated by ,:
#
# vn=3;ar=0;ip6=2001::1;prefix_length=48,vn=3;ar=1;ip6=2001::2;prefix_length=64
if !ONE_LOCATION
LOG_LOCATION = "/var/log/one"
else

View File

@ -121,7 +121,24 @@ bool VirtualMachineAllocate::allocate_authorization(
return false;
}
return true;
vector<Template *> ds_quotas;
vector<Template *>::iterator it;
bool ds_quota_auth = true;
VirtualMachineDisks::image_ds_quotas(&aux_tmpl, ds_quotas);
for ( it = ds_quotas.begin() ; it != ds_quotas.end() ; ++it )
{
if ( quota_authorization(*it, Quotas::DATASTORE, att) == false )
{
ds_quota_auth = false;
}
delete *it;
}
return ds_quota_auth;
}
/* -------------------------------------------------------------------------- */

View File

@ -2564,8 +2564,7 @@ void VirtualMachineDiskSnapshotCreate::request_execute(
PoolObjectAuth vm_perms;
const VirtualMachineDisk * disk;
VectorAttribute * delta_disk = 0;
VirtualMachineDisk * disk;
Template ds_deltas;
Template vm_deltas;
@ -2597,11 +2596,19 @@ void VirtualMachineDiskSnapshotCreate::request_execute(
return;
}
string disk_size = disk->vector_value("SIZE");
string ds_id = disk->vector_value("DATASTORE_ID");
/* ---------------------------------------------------------------------- */
/* Get disk information and quota usage deltas */
/* ---------------------------------------------------------------------- */
bool img_ds_quota, vm_ds_quota;
long long ssize;
disk->vector_value("SIZE", ssize);
ssize = 2 * ssize; //Sanpshot accounts as another disk of same size
disk->resize_quotas(ssize, ds_deltas, vm_deltas, img_ds_quota, vm_ds_quota);
bool is_volatile = disk->is_volatile();
bool is_system = disk->get_tm_target() == "SYSTEM";
bool do_ds_quota = disk->is_persistent() || !is_system;
int img_id = -1;
disk->vector_value("IMAGE_ID", img_id);
@ -2617,11 +2624,12 @@ void VirtualMachineDiskSnapshotCreate::request_execute(
return;
}
RequestAttributes ds_att_quota;
/* ---------- Attributes for quota update requests ---------------------- */
RequestAttributes img_att_quota;
RequestAttributes vm_att_quota;
//--------------------------- Persistent Images ----------------------------
if (do_ds_quota)
if (img_ds_quota)
{
PoolObjectAuth img_perms;
@ -2645,40 +2653,51 @@ void VirtualMachineDiskSnapshotCreate::request_execute(
return;
}
ds_att_quota = RequestAttributes(img_perms.uid, img_perms.gid, att);
ds_deltas.add("DATASTORE", ds_id);
ds_deltas.add("SIZE", disk_size);
ds_deltas.add("IMAGES", 0);
if (!quota_authorization(&ds_deltas, Quotas::DATASTORE, ds_att_quota))
{
return;
}
img_att_quota = RequestAttributes(img_perms.uid, img_perms.gid, att);
}
//--------------------- Account for System DS storage ----------------------
if (is_system)
if ( vm_ds_quota )
{
if ( vm_authorization(id, 0, 0, att, 0, 0, 0, auth_op) == false )
{
return;
}
}
vm_att_quota = RequestAttributes(vm_perms.uid, vm_perms.gid, att);
vm_att_quota = RequestAttributes(vm_perms.uid, vm_perms.gid, att);
delta_disk = new VectorAttribute("DISK");
delta_disk->replace("TYPE", "FS");
delta_disk->replace("SIZE", disk_size);
/* ---------------------------------------------------------------------- */
/* Check quotas for the new size in image/system datastoress */
/* ---------------------------------------------------------------------- */
if ( img_ds_quota && !quota_authorization(&ds_deltas, Quotas::DATASTORE,
img_att_quota) )
{
return;
}
vm_deltas.add("VMS", 0);
vm_deltas.set(delta_disk);
if ( vm_ds_quota && !quota_authorization(&ds_deltas, Quotas::DATASTORE,
vm_att_quota) )
{
if ( img_ds_quota )
{
quota_rollback(&ds_deltas, Quotas::DATASTORE, img_att_quota);
}
return;
}
if ( !vm_deltas.empty() )
{
if (!quota_resize_authorization(id, &vm_deltas, vm_att_quota))
{
if (do_ds_quota)
if ( img_ds_quota )
{
quota_rollback(&ds_deltas, Quotas::DATASTORE, ds_att_quota);
quota_rollback(&ds_deltas, Quotas::DATASTORE, img_att_quota);
}
if ( vm_ds_quota )
{
quota_rollback(&ds_deltas, Quotas::DATASTORE, vm_att_quota);
}
return;
@ -2692,12 +2711,17 @@ void VirtualMachineDiskSnapshotCreate::request_execute(
if ( rc != 0 )
{
if (do_ds_quota)
if ( img_ds_quota )
{
quota_rollback(&ds_deltas, Quotas::DATASTORE, ds_att_quota);
quota_rollback(&ds_deltas, Quotas::DATASTORE, img_att_quota);
}
if (is_system)
if ( vm_ds_quota )
{
quota_rollback(&ds_deltas, Quotas::DATASTORE, vm_att_quota);
}
if ( !vm_deltas.empty() )
{
quota_rollback(&vm_deltas, Quotas::VM, vm_att_quota);
}
@ -2982,9 +3006,9 @@ void VirtualMachineDiskResize::request_execute(
}
/* ------------- Get information about the disk and image --------------- */
bool is_persistent = disk->is_persistent();
bool img_ds_quota, vm_ds_quota;
disk->resize_quotas(size, ds_deltas, vm_deltas);
disk->resize_quotas(size, ds_deltas, vm_deltas, img_ds_quota, vm_ds_quota);
int img_id = -1;
disk->vector_value("IMAGE_ID", img_id);
@ -2996,10 +3020,10 @@ void VirtualMachineDiskResize::request_execute(
/* ---------------------------------------------------------------------- */
/* Authorize the request for VM and IMAGE for persistent disks */
/* ---------------------------------------------------------------------- */
RequestAttributes ds_att_quota;
RequestAttributes img_att_quota;
RequestAttributes vm_att_quota;
if ( is_persistent )
if ( img_ds_quota )
{
PoolObjectAuth img_perms;
@ -3026,16 +3050,15 @@ void VirtualMachineDiskResize::request_execute(
return;
}
ds_att_quota = RequestAttributes(img_perms.uid, img_perms.gid, att);
img_att_quota = RequestAttributes(img_perms.uid, img_perms.gid, att);
}
else
if ( vm_ds_quota )
{
if ( vm_authorization(id, 0, 0, att, 0, 0, 0, auth_op) == false )
{
return;
}
ds_att_quota = RequestAttributes(vm_perms.uid, vm_perms.gid, att);
}
vm_att_quota = RequestAttributes(vm_perms.uid, vm_perms.gid, att);
@ -3044,21 +3067,35 @@ void VirtualMachineDiskResize::request_execute(
/* Check quotas for the new size in image/system datastoress */
/* ---------------------------------------------------------------------- */
if ( !ds_deltas.empty() )
if ( img_ds_quota && !quota_authorization(&ds_deltas, Quotas::DATASTORE,
img_att_quota))
{
if (!quota_authorization(&ds_deltas, Quotas::DATASTORE, ds_att_quota))
return;
}
if ( vm_ds_quota && !quota_authorization(&ds_deltas, Quotas::DATASTORE,
vm_att_quota))
{
if ( img_ds_quota )
{
return;
quota_rollback(&ds_deltas, Quotas::DATASTORE, img_att_quota);
}
return;
}
if ( !vm_deltas.empty() )
{
if (!quota_resize_authorization(id, &vm_deltas, vm_att_quota))
{
if (!ds_deltas.empty())
if ( img_ds_quota )
{
quota_rollback(&ds_deltas, Quotas::DATASTORE, ds_att_quota);
quota_rollback(&ds_deltas, Quotas::DATASTORE, img_att_quota);
}
if ( vm_ds_quota )
{
quota_rollback(&ds_deltas, Quotas::DATASTORE, vm_att_quota);
}
return;
@ -3072,9 +3109,14 @@ void VirtualMachineDiskResize::request_execute(
if ( rc != 0 )
{
if ( !ds_deltas.empty() )
if ( img_ds_quota )
{
quota_rollback(&ds_deltas, Quotas::DATASTORE, ds_att_quota);
quota_rollback(&ds_deltas, Quotas::DATASTORE, img_att_quota);
}
if ( vm_ds_quota )
{
quota_rollback(&ds_deltas, Quotas::DATASTORE, vm_att_quota);
}
if ( !vm_deltas.empty() )

View File

@ -52,7 +52,7 @@ features:
# True to show an input to specify the the VMs and Template path/folder where a vCenter VM will
# deployed to
vcenter_deploy_folder: false
vcenter_vm_folder: false
tabs:
dashboard-tab:
# The following widgets can be used inside any of the '_per_row' settings

View File

@ -52,7 +52,7 @@ features:
# True to show an input to specify the the VMs and Template path/folder where a vCenter VM will
# deployed to
vcenter_deploy_folder: true
vcenter_vm_folder: true
tabs:
dashboard-tab:
# The following widgets can be used inside any of the '_per_row' settings

View File

@ -52,7 +52,7 @@ features:
# True to show an input to specify the the VMs and Template path/folder where a vCenter VM will
# deployed to
vcenter_deploy_folder: false
vcenter_vm_folder: false
tabs:
dashboard-tab:
# The following widgets can be used inside any of the '_per_row' settings

View File

@ -52,7 +52,7 @@ features:
# True to show an input to specify the the VMs and Template path/folder where a vCenter VM will
# deployed to
vcenter_deploy_folder: false
vcenter_vm_folder: false
tabs:
dashboard-tab:
# The following widgets can be used inside any of the '_per_row' settings

View File

@ -52,7 +52,7 @@ features:
# True to show an input to specify the the VMs and Template path/folder where a vCenter VM will
# deployed to
vcenter_deploy_folder: false
vcenter_vm_folder: false
tabs:
dashboard-tab:
# The following widgets can be used inside any of the '_per_row' settings

View File

@ -16,6 +16,8 @@
define(function(require) {
require('jquery');
require('jquery-ui');
require('foundation');
Foundation.Dropdown.defaults.positionClass = 'left';

View File

@ -21,6 +21,7 @@ require.config({
/* jQuery */
'jquery': '../bower_components/jquery/dist/jquery',
'jquery-ui': '../bower_components/jquery-ui/jquery-ui',
/* DataTables */
'datatables.net': '../bower_components/datatables/media/js/jquery.dataTables',

View File

@ -31,19 +31,21 @@
<label for="vcenter_password">{{tr "Password"}}</label>
<input type="password" name="vcenter_password" id="vcenter_password" required />
</div>
<div class="large-2 medium-6 columns">
<div class="large-5 medium-6 columns">
<label>{{tr "Datastore"}}</label>
<div id="vcenter_datastore_wrapper"/>
</div>
<div class="large-3 medium-6 columns">
<label>&nbsp;</label>
<button type="submit" class="button radius">
{{tr "Get Images"}}
</button>
</div>
<div class="row">
<div style="float:right">
<label>&nbsp;</label>
<button type="submit" class="button radius">
{{tr "Get Images"}}
</button>
</div>
</div>
</form>
<div class="row collapse">
{{{vCenterImagesHTML}}}
</div>
</div>
</div>

View File

@ -18,7 +18,7 @@ define(function(require) {
/*
DEPENDENCIES
*/
var Notifier = require('utils/notifier');
// require('foundation.tab');
var BaseFormPanel = require('utils/form-panels/form-panel');
var Sunstone = require('sunstone');
@ -229,23 +229,27 @@ define(function(require) {
json_template['ready_status_gate'] = ready_status_gate;
var templateStr = $('textarea#template', $("form#createServiceTemplateFormAdvanced")).val();
var template_final = TemplateUtils.mergeTemplates(templateJSON, templateStr);
if (this.action == "create") {
Sunstone.runAction("ServiceTemplate.create", json_template );
Sunstone.runAction("ServiceTemplate.create", template_final );
return false;
} else if (this.action == "update") {
Sunstone.runAction("ServiceTemplate.update",this.resourceId, JSON.stringify(json_template));
Sunstone.runAction("ServiceTemplate.update",this.resourceId, JSON.stringify(template_final));
return false;
}
}
function _submitAdvanced(context) {
var json_template = $('textarea#template', context).val();
var templateStr = $('textarea#template', context).val();
var templateJSON = this.retrieve($("form#createServiceTemplateFormWizard"));
var template_final = TemplateUtils.mergeTemplates(templateStr, templateJSON, true);
template_final = TemplateUtils.templateToString(template_final);
if (this.action == "create") {
Sunstone.runAction("ServiceTemplate.create", JSON.parse(json_template) );
Sunstone.runAction("ServiceTemplate.create", JSON.parse(template_final) );
return false;
} else if (this.action == "update") {
Sunstone.runAction("ServiceTemplate.update", this.resourceId, json_template);
Sunstone.runAction("ServiceTemplate.update", this.resourceId, template_final);
return false;
}
}
@ -399,7 +403,6 @@ define(function(require) {
$(".networks_role", role_section).show();
}
$(".vm_template_contents", role_section).val("");
$.each(selected_networks, function(){
$(".service_network_checkbox[value='"+this+"']", role_section).attr('checked', true).change();

View File

@ -300,6 +300,10 @@ define(function(require) {
if (Config.provision.dashboard.isEnabled("vms")) {
$("#provision_dashboard").append(TemplateDashboardVms());
if(!Config.isProvisionTabEnabled("provision-tab", "templates")){
$('.provision_create_vm_button').hide();
}
var start_time = Math.floor(new Date().getTime() / 1000);
// ms to s

View File

@ -1,4 +1,4 @@
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
/* Copyright 2002-2016, OpenNebula Project, OpenNebula Systems */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
@ -18,7 +18,7 @@ define(function(require) {
/*
DEPENDENCIES
*/
var Notifier = require('utils/notifier');
var BaseFormPanel = require('utils/form-panels/form-panel');
var Sunstone = require('sunstone');
var Locale = require('utils/locale');
@ -188,24 +188,30 @@ define(function(require) {
function _submitWizard(context) {
var templateJSON = this.retrieve(context);
var templateStr = $('textarea#template', $("form#createVMTemplateFormAdvanced")).val();
var template_final = TemplateUtils.mergeTemplates(templateJSON, templateStr);
if (this.action == "create") {
Sunstone.runAction(this.resource+".create", {'vmtemplate': templateJSON});
Sunstone.runAction(this.resource+".create", {'vmtemplate': template_final});
return false;
} else if (this.action == "update") {
Sunstone.runAction(this.resource+".update", this.resourceId, TemplateUtils.templateToString(templateJSON));
Sunstone.runAction(this.resource+".update", this.resourceId, TemplateUtils.templateToString(template_final));
return false;
}
}
function _submitAdvanced(context) {
var template = $('textarea#template', context).val();
var templateStr = $('textarea#template', context).val();
var templateJSON = this.retrieve($("form#createVMTemplateFormWizard"));
var template_final = TemplateUtils.mergeTemplates(templateStr, templateJSON, true);
template_final = TemplateUtils.templateToString(template_final);
if (this.action == "create") {
Sunstone.runAction(this.resource+".create", {"vmtemplate": {"template_raw": template}});
Sunstone.runAction(this.resource+".create", {"vmtemplate": {"template_raw": template_final}});
return false;
} else if (this.action == "update") {
Sunstone.runAction(this.resource+".update", this.resourceId, template);
Sunstone.runAction(this.resource+".update", this.resourceId, template_final);
return false;
}
}

View File

@ -223,7 +223,7 @@ define(function(require) {
$.each(userInputsJSON, function(key,value){
var name = key.toUpperCase();
contextJSON[name] = "$" + name;
contextJSON[name.split("_")[2]] = "$" + name;
});
var start_script = WizardFields.retrieveInput($(".START_SCRIPT", context));

View File

@ -41,7 +41,7 @@
<li class="tabs-title is-active">
<a href="#netsshTab{{uniqueId}}">{{tr "Configuration"}}</a>
</li>
<li class="tabs-title">
<li class="tabs-title hypervisor only_kvm">
<a href="#filesTab{{uniqueId}}">{{tr "Files"}}</a>
</li>
<li class="tabs-title">
@ -109,7 +109,7 @@
{{{tip (tr "Text of the script executed when the machine starts up. It can contain shebang in case it is not shell script")}}}
<textarea rows="4" type="text" class="START_SCRIPT monospace" placeholder="yum upgrade"/>
</label>
<input type="checkbox" class="ENCODE_START_SCRIPT" id="ENCODE_START_SCRIPT{{uniqueId}}">
<input type="checkbox" class="ENCODE_START_SCRIPT" id="ENCODE_START_SCRIPT{{uniqueId}}" checked>
<label for="ENCODE_START_SCRIPT{{uniqueId}}">{{tr "Encode script in Base64"}}</label>
</div>
</div>

View File

@ -182,15 +182,15 @@ define(function(require) {
templateJSON["DISK_COST"] = templateJSON["DISK_COST"] * 1024;
}
else{
templateJSON["DISK_COST"] = "0";
delete templateJSON["MEMORY_UNIT_COST"];
}
if(templateJSON["MEMORY_UNIT_COST"] == "GB")
templateJSON["MEMORY_COST"] = templateJSON["MEMORY_COST"] * 1024;
if (templateJSON["HYPERVISOR"] == 'vcenter') {
templateJSON["VCENTER_TEMPLATE_REF"] = WizardFields.retrieveInput($("#vcenter_template_ref", context));
if (Config.isFeatureEnabled("vcenter_deploy_folder")) {
templateJSON["DEPLOY_FOLDER"] = WizardFields.retrieveInput($("#vcenter_deploy_folder", context))
if (Config.isFeatureEnabled("vcenter_vm_folder")) {
templateJSON["VCENTER_VM_FOLDER"] = WizardFields.retrieveInput($("#vcenter_vm_folder", context))
}
templateJSON["KEEP_DISKS_ON_DONE"] = $("#KEEP_DISKS", context).is(':checked')?"YES":"NO"
}
@ -207,6 +207,27 @@ define(function(require) {
var userInputs = {};
// Retrieve Resource Pool Attribute
var rpInput = $(".vcenter_rp_input", context);
if (rpInput.length > 0) {
var rpModify = WizardFields.retrieveInput($('.modify_rp', rpInput));
var rpInitial = WizardFields.retrieveInput($('.initial_rp', rpInput));
var rpParams = WizardFields.retrieveInput($('.available_rps', rpInput));
if (rpModify === 'fixed' && rpInitial !== '') {
templateJSON['VCENTER_RESOURCE_POOL'] = rpInitial;
} else if (rpModify === 'list' && rpParams !== '') {
var rpUserInputs = UserInputs.marshall({
type: 'list',
description: Locale.tr("Which resource pool you want this VM to run in?"),
initial: rpInitial,
params: WizardFields.retrieveInput($('.available_rps', rpInput))
});
userInputs['VCENTER_RESOURCE_POOL'] = rpUserInputs;
}
}
// Since the USER_INPUTS section is not enabled for vCenter, we can assume that there are no more user inputs defined
if (!$.isEmptyObject(userInputs)) {
templateJSON['USER_INPUTS'] = userInputs;
@ -230,16 +251,17 @@ define(function(require) {
delete sunstone_template["NETWORK_SELECT"];
}
if (Config.isFeatureEnabled("vcenter_deploy_folder")) {
if (Config.isFeatureEnabled("vcenter_vm_folder")) {
if (templateJSON["HYPERVISOR"] == 'vcenter' &&
templateJSON["DEPLOY_FOLDER"]) {
WizardFields.fillInput($("#vcenter_deploy_folder", context), templateJSON["DEPLOY_FOLDER"]);
templateJSON["VCENTER_VM_FOLDER"]) {
WizardFields.fillInput($("#vcenter_vm_folder", context), templateJSON["VCENTER_VM_FOLDER"]);
}
} else {
$(".vcenter_deploy_folder_input", context).remove();
$(".vcenter_vm_folder_input", context).remove();
}
delete templateJSON["DEPLOY_FOLDER"];
delete templateJSON["VCENTER_VM_FOLDER"];
if (templateJSON["HYPERVISOR"] == 'vcenter') {
var publicClouds = templateJSON["PUBLIC_CLOUD"];
@ -264,30 +286,23 @@ define(function(require) {
}
if (templateJSON["USER_INPUTS"]) {
if (templateJSON["USER_INPUTS"]["VCENTER_DATASTORE"]) {
var ds = UserInputs.unmarshall(templateJSON["USER_INPUTS"]["VCENTER_DATASTORE"]);
$('.modify_datastore', context).val('list');
$('.initial_datastore', context).val(ds.initial);
$('.available_datastores', context).val(ds.params);
delete templateJSON["USER_INPUTS"]["VCENTER_DATASTORE"];
}
if (templateJSON["USER_INPUTS"]["RESOURCE_POOL"]) {
var rp = UserInputs.unmarshall(templateJSON["USER_INPUTS"]["RESOURCE_POOL"]);
if (templateJSON["USER_INPUTS"]["VCENTER_RESOURCE_POOL"]) {
var rp = UserInputs.unmarshall(templateJSON["USER_INPUTS"]["VCENTER_RESOURCE_POOL"]);
$('.modify_rp', context).val('list');
$('.initial_rp', context).val(rp.initial);
$('.available_rps', context).val(rp.params);
delete templateJSON["USER_INPUTS"]["RESOURCE_POOL"];
delete templateJSON["USER_INPUTS"]["VCENTER_RESOURCE_POOL"];
}
}
if (templateJSON["RESOURCE_POOL"]) {
if (templateJSON["VCENTER_RESOURCE_POOL"]) {
$('.modify_rp', context).val('fixed');
WizardFields.fillInput($('.initial_rp', context), templateJSON["RESOURCE_POOL"]);
WizardFields.fillInput($('.initial_rp', context), templateJSON["VCENTER_RESOURCE_POOL"]);
delete templateJSON["RESOURCE_POOL"];
delete templateJSON["VCENTER_RESOURCE_POOL"];
}
if(templateJSON["VCENTER_TEMPLATE_REF"]){

View File

@ -62,13 +62,13 @@
<input type="text" id="vcenter_template_ref"/>
</div>
</div>
<div class="vcenter_deploy_folder_input row">
<div class="vcenter_vm_folder_input row">
<div class="medium-6 columns">
<label for="vcenter_deploy_folder">
{{tr "Deployment Folder"}}
{{{tip (tr "If specified, the the VMs and Template folder path where the VM will be created inside the data center. The path is delimited by slashes e.g /Management/VMs. If no path is set the VM will be deployed in the same folder where the template exists.")}}}
<label for="vcenter_vm_folder">
{{tr "vCenter VM Folder"}}
{{{tip (tr "If specified, the the VMs and Template folder path where the VM will be created inside the data center. The path is delimited by slashes e.g /Management/VMs. If no path is set the VM will be placed in the same folder where the template is located.")}}}
</label>
<input type="text" id="vcenter_deploy_folder"/>
<input type="text" id="vcenter_vm_folder"/>
</div>
</div>
</fieldset>

View File

@ -47,6 +47,14 @@
<input type="text" wizard_field="NETWORK_UNAME" id="NETWORK_UNAME" name="NETWORK_UNAME"/>
</div>
</div>
<div class="row" style="display: none">
<div class="medium-6 columns">
<label>
{{tr "OPEN NEBULA MANAGEMENT"}}
</label>
<input type="text" wizard_field="OPENNEBULA_MANAGED" id="OPENNEBULA_MANAGED" name="OPENNEBULA_MANAGED"/>
</div>
</div>
</fieldset>
<fieldset>
<legend>{{tr "Override Network Values IPv4"}}</legend>
@ -105,10 +113,10 @@
<legend>{{tr "Override Network Values IPv6"}}</legend>
<div class="row">
<div class="medium-6 columns">
<label for="IP6_GLOBAL">
{{tr "Global address"}}
<label for="IP6">
{{tr "IP"}}
</label>
<input type="text" wizard_field="IP6_GLOBAL" id="IP6_GLOBAL" name="IP6_GLOBAL" size="3" />
<input type="text" wizard_field="IP6" id="IP6" name="IP6" size="3" />
</div>
<div class="medium-6 columns">
<label for="GATEWAY6">

View File

@ -74,19 +74,25 @@
<label for="TYPE">
{{tr "Disk type"}}
</label>
<select wizard_field="TYPE" id="TYPE" name="type">
<select class="hypervisor only_kvm" wizard_field="TYPE" id="TYPE" name="type">
<option value="fs">{{tr "FS"}}</option>
<option value="swap">{{tr "Swap"}}</option>
</select>
<select class="hypervisor only_vcenter" style="display: none" wizard_field="TYPE" id="TYPE" name="type">
<option value="fs">{{tr "FS"}}</option>
</select>
</div>
<div class="medium-6 columns">
<label for="FORMAT">
{{tr "Filesystem format"}}
</label>
<select wizard_field="FORMAT" name="format" id="FORMAT">
<select class="hypervisor only_kvm" wizard_field="FORMAT" name="format" id="FORMAT">
<option value="raw" selected="selected">raw</option>
<option value="qcow2">qcow2</option>
</select>
<select class="hypervisor only_vcenter" style="display: none" wizard_field="FORMAT" name="format" id="FORMAT">
<option value="raw" selected="selected">raw</option>
</select>
</div>
</div>
<br>

View File

@ -1,5 +1,5 @@
<div class="row">
<div class="medium-6 columns">
<div class="medium-6 columns hypervisor only_kvm">
<label for="TARGET">
{{tr "Target device"}}
{{{tip (tr "Device to map image disk. If set, it will overwrite the default device mapping.")}}}
@ -20,7 +20,7 @@
</div>
</div>
<div class="row">
<div class="medium-6 columns">
<div class="medium-6 columns hypervisor only_kvm">
<label>
{{tr "BUS"}}
<select id="disk_dev_prefix" name="disk_dev_prefix">
@ -35,7 +35,7 @@
<input type="text" id="custom_disk_dev_prefix" name="custom_disk_dev_prefix" />
</div>
</div>
<div class="medium-6 columns">
<div class="medium-6 columns hypervisor only_kvm">
<label for="READONLY">
{{tr "Read-only"}}
</label>
@ -45,6 +45,30 @@
<option value="no">{{tr "no"}}</option>
</select>
</div>
<div class="medium-6 columns hypervisor only_vcenter">
<label for="vcenter_adapter_type">
{{tr "Bus adapter controller"}}
</label>
<select wizard_field="VCENTER_ADAPTER_TYPE" name="vcenter_adapter_type" id="vcenter_adapter_type">
<option value="" selected="selected"></option>
<option value="lsiLogic">lsiLogic</option>
<option value="ide">ide</option>
<option value="busLogic">busLogic</option>
<option value="custom">custom</option>
</select>
</div>
<div class="medium-6 columns only_vcenter">
<label for="vcenter_disk_type">
{{tr "Disk provisioning type"}}
</label>
<select wizard_field="VCENTER_DISK_TYPE" name="vcenter_disk_type" id="vcenter_disk_type">
<option value="" selected="selected"></option>
<option value="thin">Thin</option>
<option value="thick">Thick</option>
<option value="eagerZeroedThick">Eager Zeroed Thick</option>
<option value="custom">custom</option>
</select>
</div>
</div>
<div class="row vm_param">
<div class="medium-6 columns hypervisor only_kvm">

View File

@ -32,7 +32,7 @@ define(function(require) {
var DisksResize = require('utils/disks-resize');
var NicsSection = require('utils/nics-section');
var VMGroupSection = require('utils/vmgroup-section');
var DeployFolder = require('utils/deploy-folder');
var VcenterVMFolder = require('utils/vcenter-vm-folder');
var CapacityInputs = require('tabs/templates-tab/form-panels/create/wizard-tabs/general/capacity-inputs');
var Config = require('sunstone-config');
@ -214,10 +214,10 @@ define(function(require) {
tmp_json.PCI = pcis;
}
if (Config.isFeatureEnabled("vcenter_deploy_folder")){
if (Config.isFeatureEnabled("vcenter_vm_folder")){
if(!$.isEmptyObject(original_tmpl.TEMPLATE.HYPERVISOR) &&
original_tmpl.TEMPLATE.HYPERVISOR === 'vcenter'){
$.extend(tmp_json, DeployFolder.retrieveChanges($(".deployFolderContext" + template_id)));
$.extend(tmp_json, VcenterVMFolder.retrieveChanges($(".vcenterVMFolderContext" + template_id)));
}
}
@ -279,9 +279,9 @@ define(function(require) {
VMGroupSection.insert(template_json,
$(".vmgroupContext"+ template_json.VMTEMPLATE.ID, context));
deployFolderContext = $(".deployFolderContext" + template_json.VMTEMPLATE.ID, context);
DeployFolder.setup(deployFolderContext);
DeployFolder.fill(deployFolderContext, template_json.VMTEMPLATE);
vcenterVMFolderContext = $(".vcenterVMFolderContext" + template_json.VMTEMPLATE.ID, context);
VcenterVMFolder.setup(vcenterVMFolderContext);
VcenterVMFolder.fill(vcenterVMFolderContext, template_json.VMTEMPLATE);
var inputs_div = $(".template_user_inputs" + template_json.VMTEMPLATE.ID, context);

View File

@ -68,8 +68,8 @@
</div>
</div>
<div class="row">
<div class="medium-6 small-12 columns deployFolderContext{{element.ID}}">
<div class="provision_deploy_folder_selector">
<div class="medium-6 small-12 columns vcenterVMFolderContext{{element.ID}}">
<div class="provision_vcenter_vm_folder_selector">
</div>
</div>
</div>

View File

@ -33,6 +33,7 @@ define(function(require) {
require('./vms-tab/dialogs/disk-resize'),
require('./vms-tab/dialogs/attach-nic'),
require('./vms-tab/dialogs/snapshot'),
require('./vms-tab/dialogs/revert'),
require('./vms-tab/dialogs/vnc'),
require('./vms-tab/dialogs/spice'),
require('./vms-tab/dialogs/saveas-template')

View File

@ -141,9 +141,11 @@ define(function(require) {
custom_classes : "state-dependent"
},
"VM.terminate_hard" : {
type: text,
text: Locale.tr("Terminate") + ' <span class="label secondary radius">' + Locale.tr("hard") + '</span>',
type: "confirm",
icon: "<i class='fa fa-trash fa-3' style='color:#ec5840'/>",
text: Locale.tr(" Terminate") + ' <span class="label secondary radius">' + Locale.tr("hard") + '</span>',
layout: "vmsdelete_buttons",
tip: Locale.tr("This will remove information from non-persistent hard disks"),
custom_classes : "state-dependent"
},
"VM.resched" : {

View File

@ -0,0 +1,92 @@
/* -------------------------------------------------------------------------- */
/* Copyright 2002-2016, OpenNebula Project, OpenNebula Systems */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
define(function(require) {
/*
DEPENDENCIES
*/
var BaseDialog = require('utils/dialogs/dialog');
var TemplateHTML = require('hbs!./revert/html');
var Sunstone = require('sunstone');
var Tips = require('utils/tips');
/*
CONSTANTS
*/
var DIALOG_ID = require('./revert/dialogId');
var TAB_ID = require('../tabId')
/*
CONSTRUCTOR
*/
function Dialog() {
this.dialogId = DIALOG_ID;
BaseDialog.call(this);
};
Dialog.DIALOG_ID = DIALOG_ID;
Dialog.prototype = Object.create(BaseDialog.prototype);
Dialog.prototype.constructor = Dialog;
Dialog.prototype.html = _html;
Dialog.prototype.onShow = _onShow;
Dialog.prototype.setup = _setup;
Dialog.prototype.setElement = _setElement;
return Dialog;
/*
FUNCTION DEFINITIONS
*/
function _html() {
return TemplateHTML({
'dialogId': this.dialogId
});
}
function _setup(context) {
var that = this;
Tips.setup(context);
$('#' + DIALOG_ID + 'Form', context).submit(function() {
var snapshot_id = $(this).parents('tr').attr('snapshot_id');
Sunstone.runAction('VM.snapshot_revert', that.element.ID, {"snapshot_id": snapshot_id});
Sunstone.getDialog(DIALOG_ID).hide();
Sunstone.getDialog(DIALOG_ID).reset();
return false;
});
return false;
}
function _onShow(context) {
this.setNames( {tabId: TAB_ID} );
return false;
}
function _setElement(element) {
this.element = element
}
});

View File

@ -0,0 +1,19 @@
/* -------------------------------------------------------------------------- */
/* Copyright 2002-2016, OpenNebula Project, OpenNebula Systems */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
define(function(require) {
return 'revertVMDialog';
});

View File

@ -0,0 +1,40 @@
{{! -------------------------------------------------------------------------- }}
{{! Copyright 2002-2016, OpenNebula Project, OpenNebula Systems }}
{{! }}
{{! Licensed under the Apache License, Version 2.0 (the "License"); you may }}
{{! not use this file except in compliance with the License. You may obtain }}
{{! a copy of the License at }}
{{! }}
{{! http://www.apache.org/licenses/LICENSE-2.0 }}
{{! }}
{{! Unless required by applicable law or agreed to in writing, software }}
{{! distributed under the License is distributed on an "AS IS" BASIS, }}
{{! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. }}
{{! See the License for the specific language governing permissions and }}
{{! limitations under the License. }}
{{! -------------------------------------------------------------------------- }}
<div id="{{dialogId}}" class="reveal small" data-reveal>
<div class="row">
<h3 class="subheader">
{{tr "Revert"}}
</h3>
</div>
<div class="confirm-resources-header"></div>
<div class="reveal-body">
<form id="{{dialogId}}Form" action="">
<div class="row">
<div id="confirm_tip">{{tr "You have to confirm this action."}}</div>
<br/>
<div id="question">{{tr "Do you want to proceed?"}}</div>
<br />
</div>
<div class="form_buttons">
<button class="button radius right success" type="submit">{{tr "OK"}}</button>
</div>
<button class="close-button" data-close aria-label="{{tr "Close modal"}}" type="button">
<span aria-hidden="true">&times;</span>
</button>
</form>
</div>
</div>

View File

@ -34,6 +34,7 @@ define(function(require) {
var TAB_ID = require('../tabId');
var PANEL_ID = require('./snapshots/panelId');
var SNAPSHOT_DIALOG_ID = require('../dialogs/snapshot/dialogId');
var REVERT_DIALOG_ID = require('../dialogs/revert/dialogId');
var RESOURCE = "VM"
var XML_ROOT = "VM"
@ -165,8 +166,9 @@ define(function(require) {
if (Config.isTabActionEnabled("vms-tab", "VM.snapshot_revert")) {
context.off('click', '.snapshot_revert');
context.on('click', '.snapshot_revert', function() {
var snapshot_id = $(this).parents('tr').attr('snapshot_id');
Sunstone.runAction('VM.snapshot_revert', that.element.ID, {"snapshot_id": snapshot_id});
var dialog = Sunstone.getDialog(REVERT_DIALOG_ID);
dialog.setElement(that.element);
dialog.show();
return false;
});
}

View File

@ -436,6 +436,10 @@ define(function(require) {
context.on('click', '#attach_disk', function() {
var dialog = Sunstone.getDialog(ATTACH_DISK_DIALOG_ID);
dialog.setElement(that.element);
if(that.element.USER_TEMPLATE.HYPERVISOR && that.element.USER_TEMPLATE.HYPERVISOR == 'vcenter'){
$('.hypervisor.only_kvm').hide();
$('.hypervisor.only_vcenter').show();
}
dialog.show();
return false;
});
@ -445,7 +449,6 @@ define(function(require) {
context.off('click', '.detachdisk');
context.on('click', '.detachdisk', function() {
var disk_id = $(this).parents('tr').attr('disk_id');
Sunstone.getDialog(CONFIRM_DIALOG_ID).setParams({
//header :
headerTabId: TAB_ID,
@ -596,7 +599,7 @@ define(function(require) {
return false;
});
}
Tree.setup(context);
}

View File

@ -126,7 +126,8 @@ define(function(require) {
$("div.mode_param [wizard_field]", context).prop('wizard_field_disabled', true);
$('input#vn_mad', context).removeAttr('required');
$('input#vn_mad', context).removeAttr('value');
$('#vcenter_switch_name', context).removeAttr('required');
switch ($(this).val()) {
case "dummy":
$("div.mode_param.dummy", context).show();
@ -164,6 +165,16 @@ define(function(require) {
$('input#bridge', context).attr('required', '');
break;
case "vcenter":
$("div.mode_param.vcenter", context).show();
$("div.mode_param.vcenter [wizard_field]", context).prop('wizard_field_disabled', false);
$('input#bridge', context).attr('value', $('#name', context).val());
$('#vcenter_switch_name', context).attr('required', '');
$('input#vn_mad', context).attr('required', '');
$('input#vn_mad', context).attr('value', 'vcenter');
$('#div_vn_mad', context).hide();
break;
case "custom":
$("div.mode_param.custom", context).show();
$("div.mode_param.custom [wizard_field]", context).prop('wizard_field_disabled', false);

View File

@ -87,10 +87,11 @@
<option value="802.1Q">{{tr "802.1Q"}}</option>
<option value="vxlan">{{tr "VXLAN"}}</option>
<option value="ovswitch">{{tr "Open vSwitch"}}</option>
<option value="vcenter">{{tr "vCenter"}}</option>
<option value="custom">{{tr "Custom"}}</option>
</select>
</div>
<div class="large-3 medium-6 columns mode_param custom">
<div class="large-3 medium-6 columns mode_param vcenter custom" id="div_vn_mad">
<label for="vn_mad">
{{tr "Network Driver (VN_MAD)"}}
</label>
@ -115,6 +116,9 @@
<div class="network_mode_description" value="ovswitch">
{{tr "Open vSwitch, restrict network access with Open vSwitch Virtual Switch. Security Groups are not applied."}}
</div>
<div class="network_mode_description" value="vcenter">
{{tr "vSphere standard switches or distributed switches with port groups. Security Groups are not applied."}}
</div>
<div class="network_mode_description" value="custom">
{{tr "Custom, use a custom virtual network driver."}}
</div>
@ -139,7 +143,7 @@
</div>
</div>
<div class="row">
<div class="medium-3 columns left mode_param 8021Q vxlan ovswitch custom">
<div class="medium-3 columns left mode_param 8021Q vxlan ovswitch vcenter custom">
<label>
{{tr "VLAN ID"}}
<select wizard_field="AUTOMATIC_VLAN_ID">
@ -159,13 +163,45 @@
</label>
<input type="text" wizard_field="PHYDEV" name="phydev" id="phydev" />
</div>
<div class="medium-3 columns left mode_param 8021Q vxlan custom">
<div class="medium-3 columns left mode_param vcenter">
<label for="phydev">
{{tr "Physical device"}}
<span class="tip">
{{tr "Physical NIC names for uplinks. Use comma to separate values (e.g vmnic1,vmnic2)"}}
</span>
</label>
<input type="text" wizard_field="PHYDEV" name="phydev" id="phydev" />
</div>
<div class="medium-3 columns left mode_param 8021Q vxlan vcenter custom">
<label for="mtu">
{{tr "MTU of the interface"}}
</label>
<input type="text" wizard_field="MTU" name="mtu" id="mtu" />
</div>
</div>
<div class="row">
<div class="medium-3 columns left mode_param vcenter">
<label for="vcenter_switch_name">
{{tr "Switch name"}}
</label>
<input type="text" wizard_field="VCENTER_SWITCH_NAME" name="vcenter_switch_name" id="vcenter_switch_name" maxlength="32" />
</div>
<div class="medium-3 columns left mode_param vcenter">
<label>
{{tr "Number of ports"}}
</label>
<input type="number" wizard_field="VCENTER_SWITCH_NPORTS" name="vcenter_switch_nports" id="vcenter_switch_nports" />
</div>
<div class="medium-3 columns left mode_param vcenter">
<label>
{{tr "Port group type"}}
<select wizard_field="VCENTER_PORTGROUP_TYPE">
<option value="Port Group">{{tr "Port group"}}</option>
<option value="Distributed Port Group">{{tr "Distributed Port Group"}}</option>
</select>
</label>
</div>
</div>
</div>
<div class="tabs-panel" id="vnetCreateARTab">
<div class="row collapse" id="vnetCreateARTabCreate">

View File

@ -44,7 +44,7 @@
<div class="row collapse ar_input type_ip4_6 type_ip6 switch left">
<label for="{{str_ar_tab_id}}_">{{tr "SLAAC"}}
</label>
<input class="switch-input slaac" wizard_field="SLAAC" name="SLAAC" id="{{str_ar_tab_id}}_slaac" type="checkbox">
<input class="switch-input slaac" id="{{str_ar_tab_id}}_slaac" type="checkbox">
<label class="switch-paddle" for="{{str_ar_tab_id}}_slaac">
</label>
</div>

View File

@ -91,6 +91,7 @@ define(function(require) {
NicsSection.insert({},
$(".nicsContext", context),
{ floatingIP: true,
forceIPv6:true,
forceIPv4:true,
management: true,
securityGroups: Config.isFeatureEnabled("secgroups")});

View File

@ -70,6 +70,7 @@ define(function(require) {
NicsSection.insert({},
$(".nicsContext", context),
{ floatingIP: true,
forceIPv6:true,
forceIPv4:true,
management: true,
securityGroups: Config.isFeatureEnabled("secgroups"),

View File

@ -145,6 +145,15 @@ define(function(require) {
}
}
var ip6 = $("input.manual_ip6", $(this)).val();
if (ip6 != undefined){
delete nic["IP6"];
if (ip6 != ""){
nic["IP6"] = ip6;
}
}
delete nic["VROUTER_MANAGEMENT"];
if ($("input.management", $(this)).prop("checked")){

View File

@ -63,6 +63,23 @@
</div>
{{/if}}
</div>
<div class="row">
{{#if options.forceIPv6}}
<div class="medium-6 columns">
<label>
{{tr "Force IPv6:"}}
<span class="tip">
{{tr "Optionally, you can force the IP assigned to the network interface."}}
</span>
{{#if options.nic.IP6}}
<input type="text" class="manual_ip6" value="{{options.nic.IP6}}"/>
{{else}}
<input type="text" class="manual_ip6"/>
{{/if}}
</label>
</div>
{{/if}}
</div>
{{#if options.securityGroups}}
<div class="row collapse">
<h6>

View File

@ -18,6 +18,7 @@ define(function(require) {
var Locale = require('utils/locale');
var Sunstone = require('sunstone');
var Notifier = require('utils/notifier');
//Escape doublequote in a string and return it
function _escapeDoubleQuotes(string) {
@ -92,7 +93,123 @@ define(function(require) {
return template_str;
}
function _merge_templates(template_master, template_slave, advanced){
if(!advanced)
template_slave = _convert_string_to_template(template_slave);
else
template_master = _convert_string_to_template(template_master);
if((advanced && template_master) || (!advanced && template_slave)){
var template_final = {};
$.extend(true, template_final, template_slave, template_master);
return template_final;
}else{
Notifier.notifyError(Locale.tr("Advanced template malformed"));
}
return template_master;
}
// Transforms an object to an opennebula template string
function _convert_string_to_template(string_json, unshown_values) {
string_json = string_json.split("\n").join(" ");
string_json = string_json.split(" ").join(" ");
string_json = string_json.split(" ").join(" ");
var match_symbols = "=[,]"
var template_json = {};
var array_string = string_json.split(" ");
var i = 0;
var array = false;
while(i < array_string.length-1){
if(!array_string[i].match('"') && !array_string[i].match(match_symbols)){ //is key
var key = array_string[i];
if(template_json[key]){ //exists key, generate Array
if(!Array.isArray(template_json[key])){
var obj = template_json[key];
template_json[key] = [];
template_json[key].push(obj);
}
array = true;
}
else{
array = false;
}
template_json[key];
i+=1;
if(array_string[i] == "="){
i+=1;
if(array_string[i] != "["){
var value = "";
if(key == "DESCRIPTION" && array_string[i][0] == '"' && array_string[i][array_string[i].length-1] != '"'){
while (array_string[i][array_string[i].length-1] != '"' && i < array_string.length-1){
value += array_string[i] + " ";
i+=1;
}
if(!value.match("="))
value = value.split('"').join("");
else{
value = value .slice(0,-1);
value = value .slice(1);
}
if(array){
template_json[key].push(value);
}else{
template_json[key] = value;
}
i+=1;
}
else if(array_string[i].match('"')){
value = array_string[i];
if(!value.match("="))
value = value.split('"').join("");
else{
value = value .slice(0,-1);
value = value .slice(1);
}
if(array){
template_json[key].push(value);
}else{
template_json[key] = value;
}
i+=1;
}
else return false;
}else{
var obj = {}
i+=1;
while(array_string[i] != ']' && i < array_string.length-1){
var sub_key;
if(!array_string[i].match('"') && !array_string[i].match(match_symbols)){
sub_key = array_string[i];
i+=1;
if(array_string[i] == "="){
i+=1;
if(array_string[i].match('"')){
if(array_string[i][array_string[i].length-1] == ","){
array_string[i] = array_string[i].slice(0,-1);
}
var value = array_string[i];
obj[sub_key] = value;
obj[sub_key] = obj[sub_key].split('"').join("");
i+=1;
}else return false;
}else return false;
}else return false;
}
if(array){
template_json[key].push(obj);
}else{
template_json[key] = {};
template_json[key] = obj;
}
i+=1;
}
}else return false;
}else return false;
}
return template_json;
}
return {
'mergeTemplates' : _merge_templates,
'stringToTemplate': _convert_string_to_template,
'templateToString': _convert_template_to_string,
'htmlDecode': _htmlDecode,
'htmlEncode': _htmlEncode,

View File

@ -55,11 +55,13 @@ define(function(require) {
function _setup(context){
context.on("click", ".add_user_input_attr", function() {
$(".user_input_attrs tbody", context).append(RowTemplateHTML());
$(".user_input_attrs tbody", context).append(RowTemplateHTML({'idInput': UniqueId.id()}));
$('tbody label').css('cursor', 'pointer');
$("select.user_input_type", context).change();
});
$('tbody').sortable();
context.on("change", "select.user_input_type", function() {
var row = $(this).closest("tr");
@ -74,12 +76,24 @@ define(function(require) {
function _retrieve(context){
var userInputsJSON = {};
var ids = [];
var index = 0;
$('.user_input_attrs tbody tr').each(function(key, value){
ids[index] = "_" + key + "_" + $(".user_input_name", $(this)).val();
index += 1;
});
index = 0;
$(".user_input_attrs tbody tr", context).each(function() {
if ($(".user_input_name", $(this)).val()) {
var attr = {};
attr.name = $(".user_input_name", $(this)).val();
attr.mandatory = true;
if($('.user_input_mandatory', $(this)).prop('checked')){
attr.mandatory = true;
} else {
attr.mandatory = false;
}
attr.type = $(".user_input_type", $(this)).val();
attr.description = $(".user_input_description", $(this)).val();
@ -89,7 +103,6 @@ define(function(require) {
case "fixed":
attr.initial = $("."+attr.type+" input.user_input_initial", $(this)).val();
break;
case "range":
case "range-float":
var min = $("."+attr.type+" input.user_input_params_min", $(this)).val();
@ -101,10 +114,15 @@ define(function(require) {
attr.params = $("."+attr.type+" input.user_input_params", $(this)).val();
attr.initial = $("."+attr.type+" input.user_input_initial", $(this)).val();
break;
case "boolean":
attr.initial = $('.user_input_initial:checked', $(this)).val();
break;
}
userInputsJSON[attr.name] = _marshall(attr);
userInputsJSON[ids[index]] = _marshall(attr);
index += 1;
}
});
return userInputsJSON;
@ -115,28 +133,51 @@ define(function(require) {
if (userInputsJSON) {
$.each(userInputsJSON, function(key, value) {
$(".add_user_input_attr", context).trigger("click");
var trcontext = $(".user_input_attrs tbody tr", context).last();
$(".user_input_name", trcontext).val(key);
var name = "";
var len = key.split("_");
for (i = 2; i < len.length; i++){
name += (len[i] + "_");
}
name = name.slice(0,-1);
$(".user_input_name", trcontext).val(name);
var attr = _unmarshall(value);
if (templateJSON[key] != undefined){
attr.initial = templateJSON[key];
}
$(".user_input_type", trcontext).val(attr.type).change();
$(".user_input_description", trcontext).val(attr.description);
if (attr.mandatory){
$('.user_input_mandatory', trcontext).attr("checked", "checked");
} else {
$('.user_input_mandatory', trcontext).removeAttr("checked");
}
switch(attr.type){
case "number":
case "number-float":
case "fixed":
$("."+attr.type+" input.user_input_initial", trcontext).val(attr.initial);
break;
case "boolean":
if(attr.initial == "YES"){
$('input#radio_yes', trcontext).attr("checked", "checked");
$('input#radio_no', trcontext).removeAttr('checked');
}
else {
$('input#radio_yes', trcontext).removeAttr("checked");
$('input#radio_no', trcontext).attr("checked", "checked");
}
break;
case "range":
case "range-float":
var values = attr.params.split(".."); // "2..8"
@ -343,6 +384,7 @@ define(function(require) {
switch (attr.type) {
case "number":
case "number-float":
case "boolean":
case "fixed":
st += ("| |" + (attr.initial != undefined ? attr.initial : "") );
@ -605,7 +647,11 @@ define(function(require) {
}
break;
case "password":
input = '<input type="password" value="'+value+'" '+wizard_field+' '+required+'/>';
input = '<br><input type="password" value="'+value+'" '+wizard_field+' '+required+'/>';
break;
case "boolean":
input = '<br>' + Locale.tr("YES ") + '<input type="radio" name="bool_' + attr.name + '" value="YES"' + wizard_field + ' ' + required + '/>';
input += Locale.tr("NO ") + '<input type="radio" name="bool_' + attr.name + '" value="NO"' + wizard_field + ' ' + required + '/>';
break;
case "number":
case "number-float":

View File

@ -1,4 +1,4 @@
<tr>
<tr style="border-style: groove; border-width: 0.5px 0">
<td>
<label>{{tr "Name"}}
<input class="user_input_name" type="text" pattern="^\w+$"/>
@ -16,6 +16,7 @@
<option value="range"> {{tr "range"}} </option>
<option value="range-float"> {{tr "range (float)"}} </option>
<option value="list"> {{tr "list"}} </option>
<option value="boolean"> {{tr "boolean"}} </option>
</select>
</label>
</td>
@ -33,6 +34,11 @@
<input type="number" step="any" class="user_input_initial" placeholder="42.5"/>
</label>
</div>
<div class="user_input_type_right boolean">
<label>{{tr "Default value"}}</label>
<input type="radio" step="any" name="bool_{{idInput}}" class="user_input_initial" id="radio_yes" value="YES"> {{tr "YES"}}
<input type="radio" step="any" name="bool_{{idInput}}" class="user_input_initial" id="radio_no" value="NO"> {{tr "NO"}}
</div>
<div class="user_input_type_right range">
<div class="row collapse">
<div class="small-6 columns">
@ -78,6 +84,11 @@
</label>
</div>
</td>
<td>
<label>{{tr "Mandatory"}}</label>
<input checked type="checkbox" name="user_input_mandatory_{{idInput}}" class="switch input user_input_mandatory slaac" id="user_input_mandatory_{{idInput}}" hidden/>
<label class="switch-paddle" for="user_input_mandatory_{{idInput}}"></label>
</td>
<td>
<br/>
<a href="#"><i class="fa fa-times-circle remove-tab"></i></a>

View File

@ -1,4 +1,4 @@
<table class="user_input_attrs policies_table dataTable">
<table class="user_input_attrs policies_table dataTable" style="cursor: pointer;">
<thead>
<tr>
<th style="width:30%"></th>

View File

@ -21,7 +21,7 @@ define(function(require){
var OpenNebulaImage = require('opennebula/image');
var UserInputs = require('utils/user-inputs');
var WizardFields = require('utils/wizard-fields');
var DeployFolderTemplate = require('hbs!./deploy-folder/html');
var VcenterVMFolderTemplate = require('hbs!./vcenter-vm-folder/html');
return {
'setup': _setup,
@ -30,27 +30,27 @@ define(function(require){
};
function _setup(context) {
if (!Config.isFeatureEnabled("vcenter_deploy_folder")){
if (!Config.isFeatureEnabled("vcenter_vm_folder")){
$(context).remove();
}
}
function _fill(context, element) {
if (Config.isFeatureEnabled("vcenter_deploy_folder")){
var deployFolderContext = context;
if (Config.isFeatureEnabled("vcenter_vm_folder")){
var vcenterVMFolderContext = context;
var template_public_cloud_type = element.TEMPLATE.HYPERVISOR
if ($.isEmptyObject(template_public_cloud_type)) {
deployFolderContext.html("");
vcenterVMFolderContext.html("");
} else {
if (template_public_cloud_type === 'vcenter') {
var deploy_folder = element.TEMPLATE.DEPLOY_FOLDER
deployFolderContext.html(DeployFolderTemplate());
$("#deploy_folder_input", deployFolderContext).val(deploy_folder);
$("#deploy_folder_input", deployFolderContext).data("original_value",deploy_folder);
var vcenter_vm_folder = element.TEMPLATE.VCENTER_VM_FOLDER
vcenterVMFolderContext.html(VcenterVMFolderTemplate());
$("#vcenter_vm_folder_input", vcenterVMFolderContext).val(vcenter_vm_folder);
$("#vcenter_vm_folder_input", vcenterVMFolderContext).data("original_value",vcenter_vm_folder);
} else {
deployFolderContext.html("");
vcenterVMFolderContext.html("");
}
}
}

View File

@ -19,11 +19,11 @@
<i class="fa fa-folder"></i> {{tr "vCenter Deployment"}}
</legend>
<div class="deployContainer">
<label for="deploy_folder_input">
{{tr "Deployment Folder"}}
{{{tip (tr "If specified, the VMs and Template folder path where the VM will be created inside the data center. The path is delimited by slashes e.g /Management/VMs. If no path is set the VM will be deployed in the same folder where the template exists.")}}}
<label for="vcenter_vm_folder_input">
{{tr "vCenter VM Folder"}}
{{{tip (tr "If specified, the VMs and Template folder path where the VM will be created inside the data center. The path is delimited by slashes e.g /Management/VMs. If no path is set the VM will be placed in the same folder where the template is located.")}}}
</label>
<input type="text" name="deploy_folder_input" id="deploy_folder_input" wizard_field="DEPLOY_FOLDER"/>
<input type="text" name="vcenter_vm_folder_input" id="vcenter_vm_folder_input" wizard_field="VCENTER_VM_FOLDER"/>
</div>
</fieldset>

View File

@ -93,6 +93,7 @@ define(function(require) {
columns : [
'<input type="checkbox" class="check_all"/>',
Locale.tr("Name"),
Locale.tr("Resource Pool"),
""
]
});
@ -102,7 +103,14 @@ define(function(require) {
$.each(elements, function(id, cluster) {
var cluster_name = cluster.cluster_name;
var opts = { name: cluster_name };
var rp_list = '<select class="select_rp"><option></option>';
if(cluster.rp_list.length > 0){
for(var i = 0; i < cluster.rp_list.length; i++){
rp_list += '<option>' + cluster.rp_list[i].name + '</option>';
}
}
rp_list += '</select>';
var opts = { name: cluster_name , rp: rp_list};
var trow = $(RowTemplate(opts)).appendTo(tbody);
$(".check_item", trow).data("cluster", cluster);
@ -164,6 +172,8 @@ define(function(require) {
VCenterCommon.importLoading({context : row_context});
var rp = $('.select_rp option:selected', row_context).val();
var host_json = {
"host": {
"name": $(this).data("cluster").cluster_name,
@ -173,7 +183,7 @@ define(function(require) {
"cluster_id": cluster_id
}
};
var cluster_ref = $(this).data("cluster").cluster_ref;
var vcenter_uuid = $(this).data("cluster").vcenter_uuid;
var vcenter_version = $(this).data("cluster").vcenter_version;
@ -187,13 +197,24 @@ define(function(require) {
message : Locale.tr("Host created successfully. ID: %1$s", response.HOST.ID)
});
var template_raw =
"VCENTER_USER=\"" + that.opts.vcenter_user + "\"\n" +
"VCENTER_PASSWORD=\"" + that.opts.vcenter_password + "\"\n" +
"VCENTER_HOST=\"" + that.opts.vcenter_host + "\"\n" +
"VCENTER_INSTANCE_ID=\"" + vcenter_uuid + "\"\n" +
"VCENTER_CCR_REF=\"" + cluster_ref + "\"\n" +
"VCENTER_VERSION=\"" + vcenter_version + "\"\n";
if(rp != ""){
var template_raw =
"VCENTER_USER=\"" + that.opts.vcenter_user + "\"\n" +
"VCENTER_PASSWORD=\"" + that.opts.vcenter_password + "\"\n" +
"VCENTER_HOST=\"" + that.opts.vcenter_host + "\"\n" +
"VCENTER_INSTANCE_ID=\"" + vcenter_uuid + "\"\n" +
"VCENTER_CCR_REF=\"" + cluster_ref + "\"\n" +
"VCENTER_VERSION=\"" + vcenter_version + "\"\n" +
"VCENTER_RESOURCE_POOL=\"" + rp + "\"\n";
} else {
var template_raw =
"VCENTER_USER=\"" + that.opts.vcenter_user + "\"\n" +
"VCENTER_PASSWORD=\"" + that.opts.vcenter_password + "\"\n" +
"VCENTER_HOST=\"" + that.opts.vcenter_host + "\"\n" +
"VCENTER_INSTANCE_ID=\"" + vcenter_uuid + "\"\n" +
"VCENTER_CCR_REF=\"" + cluster_ref + "\"\n" +
"VCENTER_VERSION=\"" + vcenter_version + "\"\n";
}
Sunstone.runAction("Host.update_template", response.HOST.ID, template_raw);
},

View File

@ -19,6 +19,7 @@
<input type="checkbox" class="check_item"/>
</td>
<td>{{name}}</td>
<td>{{{rp}}}</td>
<td>
<span class="vcenter_import_result">
</span>&nbsp;

View File

@ -45,7 +45,6 @@ define(function(require) {
/*
Retrieve the list of templates from vCenter and fill the container with them
opts = {
datacenter: "Datacenter Name",
cluster: "Cluster Name",
@ -56,6 +55,7 @@ define(function(require) {
}
*/
function _fillVCenterTemplates(opts) {
this.opts = opts;
var path = '/vcenter/templates';
var context = $(".vcenter_import", opts.container);
@ -102,9 +102,6 @@ define(function(require) {
$.each(elements, function(id, element) {
var opts = {};
if (element.ds && element.ds !== '') {
opts.datastore = UserInputs.unmarshall(element.ds);
}
if (element.rp && element.rp !== '') {
opts.resourcePool = UserInputs.unmarshall(element.rp);
@ -160,6 +157,7 @@ define(function(require) {
}
function _import(context) {
that = this;
$.each($(".vcenter_import_table", context), function() {
$.each($(this).DataTable().$(".check_item:checked"), function() {
var row_context = $(this).closest("tr");
@ -169,27 +167,6 @@ define(function(require) {
var attrs = [];
var userInputs = [];
// Retrieve Datastore Attribute
var dsInput = $(".vcenter_datastore_input", row_context);
if (dsInput.length > 0) {
var dsModify = $('.modify_datastore', dsInput).val();
var dsInitial = $('.initial_datastore', dsInput).val();
var dsParams = $('.available_datastores', dsInput).val();
if (dsModify === 'fixed' && dsInitial !== '') {
attrs.push('VCENTER_DATASTORE="' + dsInitial + '"')
} else if (dsModify === 'list' && dsParams !== '') {
var dsUserInputsStr = UserInputs.marshall({
type: 'list',
description: Locale.tr("Which datastore you want this VM to run on?"),
initial: dsInitial,
params: dsParams
});
userInputs.push('VCENTER_DATASTORE="' + dsUserInputsStr + '"');
}
}
// Retrieve Resource Pool Attribute
var rpInput = $(".vcenter_rp_input", row_context);
if (rpInput.length > 0) {
@ -198,7 +175,7 @@ define(function(require) {
var rpParams = $('.available_rps', rpInput).val();
if (rpModify === 'fixed' && rpInitial !== '') {
attrs.push('RESOURCE_POOL="' + rpInitial + '"');
attrs.push('VCENTER_RESOURCE_POOL="' + rpInitial + '"');
} else if (rpModify === 'list' && rpParams !== '') {
var rpUserInputs = UserInputs.marshall({
type: 'list',
@ -207,7 +184,7 @@ define(function(require) {
params: $('.available_rps', rpInput).val()
});
userInputs.push('RESOURCE_POOL="' + rpUserInputs + '"');
userInputs.push('VCENTER_RESOURCE_POOL="' + rpUserInputs + '"');
}
}
@ -222,29 +199,72 @@ define(function(require) {
template += "\nUSER_INPUTS=[\n" + userInputs.join(",\n") + "]";
}
var template_json = {
"vmtemplate": {
"template_raw": template
}
};
if($(this).data("import_data").import_disks_and_nics){
var path = '/vcenter/template/' + $(this).data("import_data").vcenter_ref;
$.ajax({
url: path,
type: "GET",
data: {timeout: false},
headers: {
"X-VCENTER-USER": that.opts.vcenter_user,
"X-VCENTER-PASSWORD": that.opts.vcenter_password,
"X-VCENTER-HOST": that.opts.vcenter_host
},
dataType: "json",
success: function(response){
template += "\n" + response.one;
var template_json = {
"vmtemplate": {
"template_raw": template
}
};
OpenNebulaTemplate.create({
timeout: true,
data: template_json,
success: function(request, response) {
VCenterCommon.importSuccess({
context : row_context,
message : Locale.tr("Template created successfully. ID: %1$s", response.VMTEMPLATE.ID)
});
},
error: function (request, error_json) {
VCenterCommon.importFailure({
context : row_context,
message : (error_json.error.message || Locale.tr("Cannot contact server: is it running and reachable?"))
});
}
});
},
error: function(response){
Notifier.onError({}, OpenNebulaError(response));
}
});
}
else {
var template_json = {
"vmtemplate": {
"template_raw": template
}
};
OpenNebulaTemplate.create({
timeout: true,
data: template_json,
success: function(request, response) {
VCenterCommon.importSuccess({
context : row_context,
message : Locale.tr("Template created successfully. ID: %1$s", response.VMTEMPLATE.ID)
});
},
error: function (request, error_json) {
VCenterCommon.importFailure({
context : row_context,
message : (error_json.error.message || Locale.tr("Cannot contact server: is it running and reachable?"))
});
}
});
OpenNebulaTemplate.create({
timeout: true,
data: template_json,
success: function(request, response) {
VCenterCommon.importSuccess({
context : row_context,
message : Locale.tr("Template created successfully. ID: %1$s", response.VMTEMPLATE.ID)
});
},
error: function (request, error_json) {
VCenterCommon.importFailure({
context : row_context,
message : (error_json.error.message || Locale.tr("Cannot contact server: is it running and reachable?"))
});
}
});
}
});
});
}
});
});

View File

@ -32,31 +32,6 @@
<div class="row">
<div class="large-11 large-centered columns">
<div id="{{data.vcenter_ref}}Advanced" class="content" hidden>
{{#if datastore}}
<div class="vcenter_datastore_input row">
<div class="medium-3 columns">
<label>
{{tr "Default Datastore"}}
<input type="text" class="initial_datastore" value="{{datastore.initial}}"/>
</label>
</div>
<div class="medium-3 columns">
<label>
{{tr "Type"}}
<select class="modify_datastore">
<option value="fixed">{{tr "Fixed"}}</option>
<option value="list">{{tr "Provide on instantiation"}}</option>
</select>
</label>
</div>
<div class="medium-4 columns end">
<label>
{{tr "Available Datastores"}}
<input type="text" class="available_datastores" value="{{datastore.params}}"/>
</label>
</div>
</div>
{{/if}}
{{#if resourcePool}}
<div class="vcenter_rp_input row">
<div class="medium-3 columns">

View File

@ -16,7 +16,8 @@
"jquery": "2.2.3",
"datatables": "1.10.12",
"navigo": "2.1.1",
"sprintf": "1.0.3"
"sprintf": "1.0.3",
"jquery-ui": "^1.12.1"
},
"authors": [
"Daniel Molina <dmolina@opennebula.org>",

View File

@ -78,7 +78,17 @@ end
get '/vcenter' do
begin
dc_folder = VCenterDriver::DatacenterFolder.new(vcenter_client)
rs = dc_folder.get_unimported_hosts
hpool = VCenterDriver::VIHelper.one_pool(OpenNebula::HostPool, false)
if hpool.respond_to?(:message)
msg = "Could not get OpenNebula HostPool: #{hpool.message}"
logger.error("[vCenter] " + msg)
error = Error.new(msg)
error 404, error.to_json
end
rs = dc_folder.get_unimported_hosts(hpool,vcenter_client.vim.host)
[200, rs.to_json]
rescue Exception => e
logger.error("[vCenter] " + e.message)
@ -90,7 +100,17 @@ end
get '/vcenter/templates' do
begin
dc_folder = VCenterDriver::DatacenterFolder.new(vcenter_client)
templates = dc_folder.get_unimported_templates(vcenter_client)
tpool = VCenterDriver::VIHelper.one_pool(OpenNebula::TemplatePool, false)
if tpool.respond_to?(:message)
msg = "Could not get OpenNebula TemplatePool: #{tpool.message}"
logger.error("[vCenter] " + msg)
error = Error.new(msg)
error 404, error.to_json
end
templates = dc_folder.get_unimported_templates(vcenter_client, tpool)
if templates.nil?
msg = "No datacenter found"
@ -99,7 +119,6 @@ get '/vcenter/templates' do
error 404, error.to_json
end
#ctemplates = templates.select{|t| t[:host] == params[:name]}
[200, templates.to_json]
rescue Exception => e
logger.error("[vCenter] " + e.message)
@ -108,10 +127,138 @@ get '/vcenter/templates' do
end
end
get '/vcenter/template/:vcenter_ref' do
begin
t = {}
t[:one] = ""
template_copy_ref = nil
template = nil
append = true
lc_error = nil
ref = params[:vcenter_ref]
if !ref || ref.empty?
msg = "No template ref specified"
logger.error("[vCenter] " + msg)
error = Error.new(msg)
error 404, error.to_json
end
template = VCenterDriver::Template.new_from_ref(ref, vcenter_client)
vc_uuid = vcenter_client.vim.serviceContent.about.instanceUuid
dpool = VCenterDriver::VIHelper.one_pool(OpenNebula::DatastorePool)
ipool = VCenterDriver::VIHelper.one_pool(OpenNebula::ImagePool)
npool = VCenterDriver::VIHelper.one_pool(OpenNebula::VirtualNetworkPool)
# POST params
if @request_body && !@request_body.empty?
body_hash = JSON.parse(@request_body)
use_linked_clones = body_hash['use_linked_clones'] || false
create_copy = body_hash['create_copy'] || false
template_name = body_hash['template_name'] || ""
if !use_linked_clones && (create_copy || !template_name.empty?)
msg = "Should not set create template copy or template copy name if not using linked clones"
logger.error("[vCenter] " + msg)
error = Error.new(msg)
error 403, error.to_json
end
if use_linked_clones && !create_copy && !template_name.empty?
msg = "Should not set template copy name if create template copy hasn't been selected"
logger.error("[vCenter] " + msg)
error = Error.new(msg)
error 403, error.to_json
end
if create_copy
lc_error, template_copy_ref = template.create_template_copy(template_name)
if template_copy_ref
template = VCenterDriver::Template.new_from_ref(template_copy_ref, vcenter_client)
one_template = VCenterDriver::Template.get_xml_template(template, vc_uuid, vcenter_client, vcenter_client.vim.host)
if one_template
lc_error, use_lc = template.create_delta_disks
if !lc_error
one_template[:one] << "\nVCENTER_LINKED_CLONES=\"YES\"\n"
t = one_template
append = false # t[:one] replaces the current template
end
else
lc_error = "Could not obtain the info from the template's copy"
template.delete_template if template_copy_ref
end
end
else
lc_error, use_lc = template.create_delta_disks
if !lc_error
append = true
t[:one] << "\nVCENTER_LINKED_CLONES=\"YES\"\n" if use_lc
end
end
end
# Create images or get disks information for template
error, template_disks = template.import_vcenter_disks(vc_uuid, dpool, ipool)
if !error.empty?
append = false
template.delete_template if template_copy_ref
msg = error
logger.error("[vCenter] " + msg)
error = Error.new(msg)
error 404, error.to_json
end
t[:one] << template_disks
# Create images or get nics information for template
error, template_nics = template.import_vcenter_nics(vc_uuid, npool, vcenter_client.vim.host)
if !error.empty?
append = false
template.delete_template if template_copy_ref
msg = error
logger.error("[vCenter] " + msg)
error = Error.new(msg)
error 404, error.to_json
end
t[:one] << template_nics
t[:lc_error] = lc_error
t[:append] = append
[200, t.to_json]
rescue Exception => e
template.delete_template if template_copy_ref
logger.error("[vCenter] " + e.message)
error = Error.new(e.message)
error 403, error.to_json
end
end
get '/vcenter/networks' do
begin
dc_folder = VCenterDriver::DatacenterFolder.new(vcenter_client)
networks = dc_folder.get_unimported_networks
npool = VCenterDriver::VIHelper.one_pool(OpenNebula::VirtualNetworkPool, false)
if npool.respond_to?(:message)
msg = "Could not get OpenNebula VirtualNetworkPool: #{npool.message}"
logger.error("[vCenter] " + msg)
error = Error.new(msg)
error 404, error.to_json
end
networks = dc_folder.get_unimported_networks(npool,vcenter_client.vim.host)
if networks.nil?
msg = "No datacenter found"
@ -157,7 +304,28 @@ end
get '/vcenter/datastores' do
begin
dc_folder = VCenterDriver::DatacenterFolder.new(vcenter_client)
datastores = dc_folder.get_unimported_datastores
dpool = VCenterDriver::VIHelper.one_pool(OpenNebula::DatastorePool, false)
if dpool.respond_to?(:message)
msg = "Could not get OpenNebula DatastorePool: #{dpool.message}"
logger.error("[vCenter] " + msg)
error = Error.new(msg)
error 404, error.to_json
end
hpool = VCenterDriver::VIHelper.one_pool(OpenNebula::HostPool, false)
if hpool.respond_to?(:message)
msg = "Could not get OpenNebula HostPool: #{hpool.message}"
logger.error("[vCenter] " + msg)
error = Error.new(msg)
error 404, error.to_json
end
datastores = dc_folder.get_unimported_datastores(dpool, vcenter_client.vim.host, hpool)
if datastores.nil?
msg = "No datacenter found"
logger.error("[vCenter] " + msg)

View File

@ -75,11 +75,24 @@ begin
# Get source and target ds ref
disk = one_vm.retrieve_xmlelements("TEMPLATE/DISK[DISK_ID=#{disk_id}]").first
source_ds_ref = disk["VCENTER_DS_REF"]
source_ds_ref = ""
# If disk is unmanaged get the reference from template
if disk["OPENNEBULA_MANAGED"] && disk["OPENNEBULA_MANAGED"] == "NO"
image_path = one_vm["USER_TEMPLATE/VCENTER_TEMPLATE_DISK_#{disk["DISK_ID"]}"]
src_path = image_path.sub(/^\[(.*?)\] /, "")
source_ds_ref = one_vm["USER_TEMPLATE/VCENTER_TEMPLATE_DS_DISK_#{disk["DISK_ID"]}"]
else
# Get image source path
src_path = VCenterDriver::FileHelper.get_img_name(disk, vmid, vm['name'])
source_ds_ref = disk["VCENTER_DS_REF"]
end
source_ds_vc = VCenterDriver::Datastore.new_from_ref(source_ds_ref, vi_client)
# Get image source path
src_path = VCenterDriver::FileHelper.get_img_name(disk, vmid, vm['name'])
# Get target ds ref
target_ds = VCenterDriver::VIHelper.one_item(OpenNebula::Datastore, target_ds_id)

View File

@ -36,6 +36,8 @@ $: << File.dirname(__FILE__)
require 'vcenter_driver'
VM_PREFIX_DEFAULT = "one-$i-"
path = ARGV[0]
vmid = ARGV[1]
dsid = ARGV[2]
@ -54,9 +56,22 @@ host_id = host['ID']
one_vm = VCenterDriver::VIHelper.one_item(OpenNebula::VirtualMachine, vmid)
vm_ref = one_vm['DEPLOY_ID']
vm = nil
begin
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
vm = VCenterDriver::VirtualMachine.new_from_ref(vm_ref, vi_client)
if !!vm_ref && !vm_ref.empty?
vm = VCenterDriver::VirtualMachine.new_from_ref(vm_ref, vi_client)
else
vcenter_vm = VCenterDriver::VIHelper.find_vcenter_vm_by_name(one_vm, host, vi_client)
# If no VM object retrieved, raise an exception
raise "Could not find the undeployed VM in vCenter's inventory using it's name" if !vcenter_vm
vm_ref = vcenter_vm._ref
vm = VCenterDriver::VirtualMachine.new_from_ref(vm_ref, vi_client)
end
rescue Exception => e
vi_client.close_connection if vi_client
@ -101,10 +116,106 @@ if path.match(/disk\.\d+$/)
else
# Remove the VM
begin
# All OpenNebula managed disks have been detached. The VM may have still
# disks that belong to the template (OPENNEBULA_MANAGED disks). These disks
# will be deleted with the destroy operation. If the user wants to
# save them to a VM, it can be done using the disk-saveas operation.
# If we have NICs associated to VN_MAD=vcenter we must check if pgs and
# switches must be deleted
# track pg or dpg in case they must be removed
vcenter_uuid = vm.get_vcenter_instance_uuid
networks = {}
npool = VCenterDriver::VIHelper.one_pool(OpenNebula::VirtualNetworkPool, false)
# Check nics in VM
vm.item["config.hardware.device"].each do |dv|
if vm.is_nic?(dv)
if dv.backing.respond_to?(:network)
vnet_ref = dv.backing.network._ref
end
if dv.backing.respond_to?(:port) &&
dv.backing.port.respond_to?(:portgroupKey)
vnet_ref = dv.backing.port.portgroupKey
end
one_network = VCenterDriver::VIHelper.find_by_ref(OpenNebula::VirtualNetworkPool,
"TEMPLATE/VCENTER_NET_REF",
vnet_ref,
vcenter_uuid,
npool)
next if !one_network
if one_network["VN_MAD"] == "vcenter" && !networks.key?(one_network["BRIDGE"])
networks[one_network["BRIDGE"]] = one_network
end
end
end
#Remove pgs and switches if not needed
if !networks.empty?
esx_host = VCenterDriver::ESXHost.new_from_ref(vm.item.runtime.host._ref, vi_client)
dc = vm.cluster.get_dc # Get datacenter
networks.each do |pg_name, one|
if one["TEMPLATE/VCENTER_PORTGROUP_TYPE"] == "Port Group"
begin
esx_host.lock # Exclusive lock for ESX host operation
next if !esx_host.pg_exists(pg_name)
swname = esx_host.remove_pg(pg_name)
next if !swname
# We must update XML so the VCENTER_NET_REF is unset
VCenterDriver::Network.remove_net_ref(one["ID"])
next if !esx_host.vss_exists(swname)
swname = esx_host.remove_vss(swname)
rescue Exception => e
raise e
ensure
esx_host.unlock if esx_host # Remove lock
end
end
if one["TEMPLATE/VCENTER_PORTGROUP_TYPE"] == "Distributed Port Group"
begin
dc.lock
# Explore network folder in search of dpg and dvs
net_folder = dc.network_folder
net_folder.fetch!
# Get distributed port group if it exists
dpg = dc.dpg_exists(pg_name, net_folder)
dc.remove_dpg(dpg) if dpg
# We must update XML so the VCENTER_NET_REF is unset
VCenterDriver::Network.remove_net_ref(one["ID"])
# Get distributed virtual switch and try to remove it
switch_name = one["TEMPLATE/VCENTER_SWITCH_NAME"]
dvs = dc.dvs_exists(switch_name, net_folder)
dc.remove_dvs(dvs) if dvs
rescue Exception => e
#TODO rollback
raise e
ensure
dc.unlock if dc
end
end
end
end
vm.poweroff_hard if vm.is_powered_on?
vm.destroy
rescue Exception => e
STDERR.puts "Error unregistering vm #{vmid} (#{vm_ref})."\

View File

@ -295,33 +295,42 @@ void Quotas::quota_del(QuotaType type, int uid, int gid, Template * tmpl)
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void Quotas::ds_del(map<int, Template *>& ds_quotas)
void Quotas::ds_del_recreate(int uid, int gid, vector<Template *>& ds_quotas)
{
Nebula& nd = Nebula::instance();
ImagePool * ipool = nd.get_ipool();
map<int, Template *>::iterator it;
vector<Template *>::iterator it;
for (it = ds_quotas.begin(); it != ds_quotas.end(); it++)
{
int image_id = it->first;
Template * tmpl = it->second;
int image_id = -1;
Template * tmpl = *it;
if ( tmpl == 0 )
bool vm_owner, img_owner;
tmpl->get("IMAGE_ID", image_id);
tmpl->get("VM_QUOTA", vm_owner);
tmpl->get("IMG_QUOTA", img_owner);
if ( img_owner )
{
continue;
Image* img = ipool->get(image_id, true);
if(img != 0)
{
int img_uid = img->get_uid();
int img_gid = img->get_gid();
img->unlock();
quota_del(DATASTORE, img_uid, img_gid, tmpl);
}
}
Image* img = ipool->get(image_id, true);
if(img != 0)
if ( vm_owner )
{
int img_uid = img->get_uid();
int img_gid = img->get_gid();
img->unlock();
quota_del(DATASTORE, img_uid, img_gid, tmpl);
quota_del(DATASTORE, uid, gid, tmpl);
}
delete tmpl;

View File

@ -734,7 +734,7 @@ int VirtualMachine::insert(SqlDB * db, string& error_str)
int ivalue;
float fvalue;
set<int> cluster_ids;
map<int, Template *> quotas;
vector<Template *> quotas;
ostringstream oss;
@ -2653,7 +2653,7 @@ int VirtualMachine::get_disk_images(string& error_str)
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void VirtualMachine::release_disk_images(map<int, Template *>& quotas)
void VirtualMachine::release_disk_images(vector<Template *>& quotas)
{
bool image_error = (state == ACTIVE && lcm_state != EPILOG) &&
state != PENDING && state != HOLD &&

View File

@ -270,8 +270,11 @@ int VirtualMachineDisk::revert_snapshot(int snap_id)
/* -------------------------------------------------------------------------- */
void VirtualMachineDisk::delete_snapshot(int snap_id, Template **ds_quotas,
Template **vm_quotas)
Template **vm_quotas, bool& img_owner, bool& vm_owner)
{
vm_owner = false;
img_owner = false;
if ( snapshots == 0 )
{
return;
@ -294,7 +297,10 @@ void VirtualMachineDisk::delete_snapshot(int snap_id, Template **ds_quotas,
string tm_target = get_tm_target();
if (is_persistent() || tm_target != "SYSTEM")
vm_owner = tm_target == "SELF";
img_owner = is_persistent() || tm_target == "NONE";
if ( img_owner || vm_owner )
{
*ds_quotas = new Template();
@ -345,11 +351,54 @@ long long VirtualMachineDisk::system_ds_size()
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
long long VirtualMachineDisk::image_ds_size()
{
long long disk_sz, snapshot_sz = 0;
string tm_target = get_tm_target();
if ( get_tm_target() != "SELF" )
{
return 0;
}
if ( vector_value("SIZE", disk_sz) != 0 )
{
return 0;
}
if ( vector_value("DISK_SNAPSHOT_TOTAL_SIZE", snapshot_sz) == 0 )
{
disk_sz += snapshot_sz;
}
return disk_sz;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
// Owner to update ds usage quotas
//
// +--------+-------------------------------------+
// |LN/CLONE| PERSISTENT | NO PERSISTENT |
// | |---------+---------+-----------------+
// | TARGET | created | quota | created | quota |
// +--------+---------+---------+-----------------+
// | SYSTEM | system | IMG | system | - |
// | SELF | image | IMG+VM | image | VM |
// | NONE | image | IMG | image | IMG |
// +----------------------------------------------+
/* -------------------------------------------------------------------------- */
void VirtualMachineDisk::resize_quotas(long long new_size, Template& ds_deltas,
Template& vm_deltas)
Template& vm_deltas, bool& do_img_owner, bool& do_vm_owner)
{
long long current_size, delta_size;
do_vm_owner = false;
do_img_owner= false;
if ( vector_value("SIZE", current_size) != 0 )
{
return;
@ -363,10 +412,13 @@ void VirtualMachineDisk::resize_quotas(long long new_size, Template& ds_deltas,
delta_size = - delta_size;
}
bool is_system = get_tm_target() == "SYSTEM";
string ds_id = vector_value("DATASTORE_ID");
string tm = get_tm_target();
do_vm_owner = !is_volatile() && tm == "SELF";
do_img_owner = !is_volatile() && (is_persistent() || tm == "NONE");
bool is_system = tm == "SYSTEM";
string ds_id = vector_value("DATASTORE_ID");
if ( !is_volatile() && ( is_persistent() || !is_system ) )
if ( do_vm_owner || do_img_owner )
{
ds_deltas.add("DATASTORE", ds_id);
ds_deltas.add("SIZE", delta_size);
@ -482,33 +534,32 @@ long long VirtualMachineDisks::system_ds_size(Template * ds_tmpl)
return disks.system_ds_size();
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
/*
void VirtualMachineDisks::image_ds_size(bool resize_snapshot, long long system,
std::map<int, long long>& ds_size) const
void VirtualMachineDisks::image_ds_quotas(Template * tmpl,
vector<Template *>& ds_quotas)
{
int ds_id;
long long system_sz, image_sz;
VirtualMachineDisks disks(tmpl, false);
for ( disk_iterator disk = begin() ; disk != end() ; ++disk )
for (disk_iterator it = disks.begin(); it != disks.end() ; ++it)
{
(*disk)->ds_size(resize_snapshot, ds_id, image_sz, system_sz);
long long ds_size = (*it)->image_ds_size();
system += system_sz;
if ( ds_size != 0 )
{
Template * d_ds = new Template();
if ( ds_id != -1 && image_sz > 0 )
{
if (ds_size.count(ds_id) == 0)
{
ds_size[ds_id] = 0;
}
d_ds->add("DATASTORE", (*it)->vector_value("DATASTORE_ID"));
d_ds->add("SIZE", ds_size);
d_ds->add("IMAGES", 0);
ds_size[ds_id] += image_sz;
}
ds_quotas.push_back(d_ds);
}
}
}
*/
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
@ -772,7 +823,7 @@ error_common:
/* -------------------------------------------------------------------------- */
void VirtualMachineDisks::release_images(int vmid, bool image_error,
map<int, Template *>& ds_quotas)
vector<Template *>& ds_quotas)
{
Nebula& nd = Nebula::instance();
ImageManager * imagem = nd.get_imagem();
@ -796,23 +847,16 @@ void VirtualMachineDisks::release_images(int vmid, bool image_error,
imagem->set_image_size(iid, size);
}
/* ------- Update snapshots on source image if needed ---------- */
/* ------- Update snapshots on source image if needed ----------- */
if ( (*it)->has_snapshots() )
{
imagem->set_image_snapshots(iid, *(*it)->get_snapshots());
}
/* --------- Compute space to free on image datastore ----------- */
if ( !(*it)->is_persistent() && (*it)->get_tm_target() != "SYSTEM" )
if ( (*it)->get_tm_target() == "SELF" )
{
long long delta_size = 0;
if ( size > original_size )
{
delta_size = size - original_size;
}
delta_size += (*it)->get_total_snapshot_size();
long long delta_size = size + (*it)->get_total_snapshot_size();
Template * d_ds = new Template();
@ -820,7 +864,7 @@ void VirtualMachineDisks::release_images(int vmid, bool image_error,
d_ds->add("SIZE", delta_size);
d_ds->add("IMAGES", 0);
ds_quotas.insert(pair<int, Template *>(iid, d_ds));
ds_quotas.push_back(d_ds);
}
imagem->release_image(vmid, iid, image_error);
@ -1233,7 +1277,7 @@ int VirtualMachineDisks::revert_snapshot(int id, int snap_id)
/* -------------------------------------------------------------------------- */
void VirtualMachineDisks::delete_snapshot(int disk_id, int snap_id,
Template **ds_quota, Template **vm_quota)
Template **ds_quota, Template **vm_quota,bool& img_owner, bool& vm_owner)
{
VirtualMachineDisk * disk =
static_cast<VirtualMachineDisk *>(get_attribute(disk_id));
@ -1246,14 +1290,14 @@ void VirtualMachineDisks::delete_snapshot(int disk_id, int snap_id,
return;
}
disk->delete_snapshot(snap_id, ds_quota, vm_quota);
disk->delete_snapshot(snap_id, ds_quota, vm_quota, img_owner, vm_owner);
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void VirtualMachineDisks::delete_non_persistent_snapshots(Template **vm_quotas,
map<int, Template *>& ds_quotas)
vector<Template *> &ds_quotas)
{
long long system_disk = 0;
@ -1266,7 +1310,11 @@ void VirtualMachineDisks::delete_non_persistent_snapshots(Template **vm_quotas,
continue;
}
if ((*disk)->is_persistent() || tm_target != "SYSTEM" )
bool vm_owner = tm_target == "SELF";
bool img_owner = (*disk)->is_persistent();
// Decrement DS quota on disks that do not modify the original image
if ( vm_owner || img_owner )
{
int image_id;
@ -1280,8 +1328,11 @@ void VirtualMachineDisks::delete_non_persistent_snapshots(Template **vm_quotas,
d_ds->add("DATASTORE", (*disk)->vector_value("DATASTORE_ID"));
d_ds->add("SIZE", (*disk)->get_total_snapshot_size());
d_ds->add("IMAGES", 0);
d_ds->add("IMAGE_ID", image_id);
d_ds->add("VM_QUOTA", vm_owner);
d_ds->add("IMG_QUOTA", img_owner);
ds_quotas.insert(pair<int, Template *>(image_id, d_ds));
ds_quotas.push_back(d_ds);
}
if ( tm_target == "SYSTEM" )
@ -1426,7 +1477,7 @@ int VirtualMachineDisks::get_saveas_info(int& disk_id, string& source,
/* -------------------------------------------------------------------------- */
void VirtualMachineDisks::delete_non_persistent_resizes(Template **vm_quotas,
map<int, Template *>& ds_quotas)
vector<Template *>& ds_quotas)
{
long long original_size, size, delta_size, system_disk = 0;
@ -1447,7 +1498,17 @@ void VirtualMachineDisks::delete_non_persistent_resizes(Template **vm_quotas,
delta_size = original_size - size;
if ((*disk)->is_persistent() || tm_target != "SYSTEM" )
//Quotas uses del operation to substract counters, delta needs to be > 0
if ( delta_size < 0 )
{
delta_size = - delta_size;
}
bool vm_owner = tm_target == "SELF";
bool img_owner = (*disk)->is_persistent();
// Decrement DS quota on disks that do not modify the original image
if ( vm_owner || img_owner )
{
int image_id;
@ -1461,8 +1522,11 @@ void VirtualMachineDisks::delete_non_persistent_resizes(Template **vm_quotas,
d_ds->add("DATASTORE", (*disk)->vector_value("DATASTORE_ID"));
d_ds->add("SIZE", delta_size);
d_ds->add("IMAGES", 0);
d_ds->add("IMAGE_ID", image_id);
d_ds->add("VM_QUOTA", vm_owner);
d_ds->add("IMG_QUOTA", img_owner);
ds_quotas.insert(pair<int, Template *>(image_id, d_ds));
ds_quotas.push_back(d_ds);
}
if ( tm_target == "SYSTEM" )

View File

@ -1,5 +1,5 @@
module VCenterDriver
require 'set'
class DatacenterFolder
attr_accessor :items
@ -42,23 +42,61 @@ class DatacenterFolder
@vi_client.vim.serviceContent.about.apiVersion
end
def get_clusters
clusters = {}
def get_unimported_hosts(hpool, vcenter_instance_name)
host_objects = {}
vcenter_uuid = get_vcenter_instance_uuid
pool = VCenterDriver::VIHelper.one_pool(OpenNebula::HostPool, false)
if pool.respond_to?(:message)
raise "Could not get OpenNebula Pool: #{pool.message}"
end
vcenter_version = get_vcenter_api_version
fetch! if @items.empty? #Get datacenters
# Add datacenter to hash and store in an array all clusters
@items.values.each do |dc|
dc_name = dc.item.name
clusters[dc_name] = []
host_objects[dc_name] = []
host_folder = dc.host_folder
host_folder.fetch_clusters!
host_folder.items.values.each do |ccr|
one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool,
"TEMPLATE/VCENTER_CCR_REF",
ccr['_ref'],
vcenter_uuid,
hpool)
next if one_host #If the host has been already imported
cluster = VCenterDriver::ClusterComputeResource.new_from_ref(ccr['_ref'], @vi_client)
rpools = cluster.get_resource_pool_list.select {|rp| !rp[:name].empty?}
host_info = {}
host_info[:cluster_name] = "[#{vcenter_instance_name}-#{dc_name}]_#{ccr['name']}".tr(" ", "_")
host_info[:cluster_ref] = ccr['_ref']
host_info[:vcenter_uuid] = vcenter_uuid
host_info[:vcenter_version] = vcenter_version
host_info[:rp_list] = rpools
host_objects[dc_name] << host_info
end
end
return host_objects
end
def get_unimported_datastores(dpool, vcenter_instance_name, hpool)
ds_objects = {}
vcenter_uuid = get_vcenter_instance_uuid
fetch! if @items.empty? #Get datacenters
one_clusters = {}
@items.values.each do |dc|
dc_name = dc.item.name
one_clusters[dc_name] = []
host_folder = dc.host_folder
host_folder.fetch_clusters!
@ -72,82 +110,20 @@ class DatacenterFolder
attribute,
ccr['_ref'],
vcenter_uuid,
pool)
next if one_host.nil? #Cluster hasn't been imported'
cluster[:host_id] = one_host['ID']
clusters[dc_name] << cluster
end
end
clusters
end
def get_unimported_hosts
host_objects = {}
vcenter_uuid = get_vcenter_instance_uuid
vcenter_version = get_vcenter_api_version
hpool = VCenterDriver::VIHelper.one_pool(OpenNebula::HostPool, false)
if hpool.respond_to?(:message)
raise "Could not get OpenNebula HostPool: #{hpool.message}"
end
fetch! if @items.empty? #Get datacenters
@items.values.each do |dc|
dc_name = dc.item.name
host_objects[dc_name] = []
host_folder = dc.host_folder
host_folder.fetch_clusters!
host_folder.items.values.each do |host|
one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool,
"TEMPLATE/VCENTER_CCR_REF",
host['_ref'],
vcenter_uuid,
hpool)
next if one_host #If the host has been already imported
host_info = {}
host_info[:cluster_name] = host['name']
host_info[:cluster_ref] = host['_ref']
host_info[:vcenter_uuid] = vcenter_uuid
host_info[:vcenter_version] = vcenter_version
host_objects[dc_name] << host_info
if !!one_host
cluster[:host_id] = one_host['ID']
one_clusters[dc_name] << cluster
end
end
end
return host_objects
end
def get_unimported_datastores
ds_objects = {}
vcenter_uuid = get_vcenter_instance_uuid
pool = VCenterDriver::VIHelper.one_pool(OpenNebula::DatastorePool, false)
if pool.respond_to?(:message)
raise "Could not get OpenNebula DatastorePool: #{pool.message}"
end
fetch! if @items.empty? #Get datacenters
one_clusters = get_clusters
@items.values.each do |dc|
dc_name = dc.item.name
next if one_clusters[dc_name].empty? #No clusters imported, continue
ds_objects[dc_name] = []
datastore_folder = dc.datastore_folder
datastore_folder.fetch!
datastore_folder.items.values.each do |ds|
if ds.instance_of? VCenterDriver::Datastore
@ -155,23 +131,24 @@ class DatacenterFolder
clusters_in_ds = {}
hosts_in_ds.each do |host|
if !clusters_in_ds[host.key.parent._ref.to_s]
clusters_in_ds[host.key.parent._ref.to_s] = host.key.parent.name
cluster_ref = host.key.parent._ref
if !clusters_in_ds[cluster_ref]
clusters_in_ds[cluster_ref] = host.key.parent.name
end
end
clusters_in_ds.each do |ccr_ref, ccr_name|
already_image_ds = VCenterDriver::Storage.exists_one_by_ref_ccr_and_type?(ds["_ref"], ccr_ref, vcenter_uuid, "IMAGE_DS", pool)
already_image_ds = VCenterDriver::Storage.exists_one_by_ref_ccr_and_type?(ds["_ref"], ccr_ref, vcenter_uuid, "IMAGE_DS", dpool)
if !already_image_ds
object = ds.to_one_template(one_clusters[dc_name], ccr_ref, ccr_name, "IMAGE_DS", vcenter_uuid)
object = ds.to_one_template(one_clusters[dc_name], ccr_ref, ccr_name, "IMAGE_DS", vcenter_uuid, vcenter_instance_name, dc_name)
ds_objects[dc_name] << object if !object.nil?
end
already_system_ds = VCenterDriver::Storage.exists_one_by_ref_ccr_and_type?(ds["_ref"], ccr_ref, vcenter_uuid, "SYSTEM_DS", pool)
already_system_ds = VCenterDriver::Storage.exists_one_by_ref_ccr_and_type?(ds["_ref"], ccr_ref, vcenter_uuid, "SYSTEM_DS", dpool)
if !already_system_ds
object = ds.to_one_template(one_clusters[dc_name], ccr_ref, ccr_name, "SYSTEM_DS", vcenter_uuid)
object = ds.to_one_template(one_clusters[dc_name], ccr_ref, ccr_name, "SYSTEM_DS", vcenter_uuid, vcenter_instance_name, dc_name)
ds_objects[dc_name] << object if !object.nil?
end
end
@ -184,127 +161,145 @@ class DatacenterFolder
ds_in_spod.each do |sp_ds|
hosts_in_ds = sp_ds.host
hosts_in_ds.each do |host|
if !clusters_in_spod[host.key.parent._ref.to_s]
clusters_in_spod[host.key.parent._ref.to_s] = host.key.parent.name
cluster_ref = host.key.parent._ref
if !clusters_in_spod[cluster_ref]
clusters_in_spod[cluster_ref] = host.key.parent.name
end
end
end
clusters_in_spod.each do |ccr_ref, ccr_name|
already_system_ds = VCenterDriver::Storage.exists_one_by_ref_ccr_and_type?(ds["_ref"], ccr_ref, vcenter_uuid, "SYSTEM_DS", pool)
already_system_ds = VCenterDriver::Storage.exists_one_by_ref_ccr_and_type?(ds["_ref"], ccr_ref, vcenter_uuid, "SYSTEM_DS", dpool)
if !already_system_ds
object = ds.to_one_template(one_clusters[dc_name], ccr_ref, ccr_name, "SYSTEM_DS", vcenter_uuid)
object = ds.to_one_template(one_clusters[dc_name], ccr_ref, ccr_name, "SYSTEM_DS", vcenter_uuid, vcenter_instance_name, dc_name)
ds_objects[dc_name] << object if !object.nil?
end
end
end
end
end
ds_objects
end
def get_unimported_templates(vi_client)
def get_unimported_templates(vi_client, tpool)
template_objects = {}
vcenter_uuid = get_vcenter_instance_uuid
tpool = VCenterDriver::VIHelper.one_pool(OpenNebula::TemplatePool, false)
if tpool.respond_to?(:message)
raise "Could not get OpenNebula TemplatePool: #{tpool.message}"
end
vcenter_instance_name = vi_client.vim.host
fetch! if @items.empty? #Get datacenters
@items.values.each do |dc|
rp_cache = {}
dc_name = dc.item.name
template_objects[dc_name] = []
#Get datastores available in a datacenter
ds_list = []
datastore_folder = dc.datastore_folder
datastore_folder.fetch!
datastore_folder.items.values.each do |ds|
ds_hash = {}
ds_hash[:name] = ds["name"]
ds_hash[:ref] = ds["_ref"]
ds_list << ds_hash
view = vi_client.vim.serviceContent.viewManager.CreateContainerView({
container: dc.item.vmFolder,
type: ['VirtualMachine'],
recursive: true
})
pc = vi_client.vim.serviceContent.propertyCollector
filterSpec = RbVmomi::VIM.PropertyFilterSpec(
:objectSet => [
:obj => view,
:skip => true,
:selectSet => [
RbVmomi::VIM.TraversalSpec(
:name => 'traverseEntities',
:type => 'ContainerView',
:path => 'view',
:skip => false
)
]
],
:propSet => [
{ :type => 'VirtualMachine', :pathSet => ['config.template'] }
]
)
result = pc.RetrieveProperties(:specSet => [filterSpec])
vms = {}
result.each do |r|
vms[r.obj._ref] = r.to_hash if r.obj.is_a?(RbVmomi::VIM::VirtualMachine)
end
templates = []
vms.each do |ref,value|
if value["config.template"]
templates << VCenterDriver::Template.new_from_ref(ref, vi_client)
end
end
#Get templates defined in a datacenter
vm_folder = dc.vm_folder
vm_folder.fetch_templates!
vm_folder.items.values.each do |template|
view.DestroyView # Destroy the view
templates.each do |template|
one_template = VCenterDriver::VIHelper.find_by_ref(OpenNebula::TemplatePool,
"TEMPLATE/VCENTER_TEMPLATE_REF",
template['_ref'],
vcenter_uuid,
tpool)
next if one_template #If the template has been already imported
template_name = template['name']
template_ref = template['_ref']
template_ccr = template['runtime.host.parent']
cluster_name = template['runtime.host.parent.name']
one_template = VCenterDriver::Template.get_xml_template(template, vcenter_uuid, vi_client, vcenter_instance_name, dc_name, rp_cache)
#Get DS list
ds = ""
default_ds = nil
if !ds_list.empty?
ds_name_list = []
ds_list.each do |ds_hash|
ds_name_list << ds_hash[:name]
end
ds = "M|list|Which datastore you want this VM to run in? "
ds << "|#{ds_name_list.join(",")}" #List of DS
ds << "|#{ds_name_list.first}" #Default DS
default_ds = ds_name_list.first
end
template_objects[dc_name] << one_template if !!one_template
end
end
#Get resource pools
rp_cache = {}
if !rp_cache[template_ccr.name.to_s]
tmp_cluster = VCenterDriver::ClusterComputeResource.new_from_ref(template_ccr._ref, vi_client)
rp_list = tmp_cluster.get_resource_pool_list
rp = ""
if !rp_list.empty?
rp_name_list = []
rp_list.each do |rp_hash|
rp_name_list << rp_hash[:name]
end
rp = "M|list|Which resource pool you want this VM to run in? "
rp << "|#{rp_name_list.join(",")}" #List of RP
rp << "|#{rp_name_list.first}" #Default RP
end
rp_cache[template_ccr.name.to_s] = rp
end
rp = rp_cache[template_ccr.name.to_s]
object = template.to_one_template(template,
ds,
ds_list,
default_ds,
rp,
rp_list,
vcenter_uuid)
template_objects[dc_name] << object if !object.nil?
end #template loop
end #datacenter loop
return template_objects
template_objects
end
def get_unimported_networks
def get_unimported_networks(npool,vcenter_instance_name)
network_objects = {}
vcenter_uuid = get_vcenter_instance_uuid
npool = VCenterDriver::VIHelper.one_pool(OpenNebula::VirtualNetworkPool, false)
if npool.respond_to?(:message)
raise "Could not get OpenNebula VirtualNetworkPool: #{npool.message}"
pc = @vi_client.vim.serviceContent.propertyCollector
#Get all port groups and distributed port groups in vcenter instance
view = @vi_client.vim.serviceContent.viewManager.CreateContainerView({
container: @vi_client.vim.rootFolder,
type: ['Network','DistributedVirtualPortgroup'],
recursive: true
})
filterSpec = RbVmomi::VIM.PropertyFilterSpec(
:objectSet => [
:obj => view,
:skip => true,
:selectSet => [
RbVmomi::VIM.TraversalSpec(
:name => 'traverseEntities',
:type => 'ContainerView',
:path => 'view',
:skip => false
)
]
],
:propSet => [
{ :type => 'Network', :pathSet => ['name'] },
{ :type => 'DistributedVirtualPortgroup', :pathSet => ['name'] }
]
)
result = pc.RetrieveProperties(:specSet => [filterSpec])
networks = {}
result.each do |r|
networks[r.obj._ref] = r.to_hash if r.obj.is_a?(RbVmomi::VIM::DistributedVirtualPortgroup) || r.obj.is_a?(RbVmomi::VIM::Network)
networks[r.obj._ref][:network_type] = r.obj.is_a?(RbVmomi::VIM::DistributedVirtualPortgroup) ? "Distributed Port Group" : "Port Group"
end
view.DestroyView # Destroy the view
fetch! if @items.empty? #Get datacenters
@items.values.each do |dc|
@ -312,37 +307,66 @@ class DatacenterFolder
dc_name = dc.item.name
network_objects[dc_name] = []
#Get networks defined in a datacenter
network_folder = dc.network_folder
network_folder.fetch!
network_folder.items.values.each do |network|
view = @vi_client.vim.serviceContent.viewManager.CreateContainerView({
container: dc.item,
type: ['ClusterComputeResource'],
recursive: true
})
one_network = VCenterDriver::VIHelper.find_by_ref(OpenNebula::VirtualNetworkPool,
"TEMPLATE/VCENTER_NET_REF",
network['_ref'],
vcenter_uuid,
npool)
next if one_network #If the network has been already imported
filterSpec = RbVmomi::VIM.PropertyFilterSpec(
:objectSet => [
:obj => view,
:skip => true,
:selectSet => [
RbVmomi::VIM.TraversalSpec(
:name => 'traverseEntities',
:type => 'ContainerView',
:path => 'view',
:skip => false
)
]
],
:propSet => [
{ :type => 'ClusterComputeResource', :pathSet => ['name','network'] }
]
)
network_name = network['name']
network_ref = network['_ref']
result = pc.RetrieveProperties(:specSet => [filterSpec])
# TODO slow VLAN_ID retrieve for portgroups! set to nil
vlan_id = ""
if network.class == VCenterDriver::DistributedPortGroup
vlan_id = network.vlan_id
clusters = {}
result.each do |r|
clusters[r.obj._ref] = r.to_hash if r.obj.is_a?(RbVmomi::VIM::ClusterComputeResource)
end
view.DestroyView # Destroy the view
clusters.each do |ref, info|
network_obj = info['network']
network_obj.each do |n|
network_ref = n._ref
network_name = networks[network_ref]['name']
network_type = networks[network_ref][:network_type]
one_network = VCenterDriver::VIHelper.find_by_ref(OpenNebula::VirtualNetworkPool,
"TEMPLATE/VCENTER_NET_REF",
network_ref,
vcenter_uuid,
npool)
next if one_network #If the network has been already imported
one_vnet = VCenterDriver::Network.to_one_template(network_name,
network_ref,
network_type,
ref,
info['name'],
vcenter_uuid,
vcenter_instance_name,
dc_name)
network_objects[dc_name] << one_vnet
end
network.clusters.each do |ccr_ref, ccr_name|
one_vnet = VCenterDriver::Network.to_one_template(network_name,
network_ref,
network.network_type,
vlan_id,
ccr_ref,
ccr_name,
vcenter_uuid)
network_objects[dc_name] << one_vnet
end #network clusters loop
end # network loop
end #datacenters loop
@ -355,6 +379,8 @@ end # class DatatacenterFolder
class Datacenter
attr_accessor :item
DPG_CREATE_TIMEOUT = 240
def initialize(item, vi_client=nil)
if !item.instance_of? RbVmomi::VIM::Datacenter
raise "Expecting type 'RbVmomi::VIM::Datacenter'. " <<
@ -363,6 +389,8 @@ class Datacenter
@vi_client = vi_client
@item = item
@net_rollback = []
@locking = true
end
def datastore_folder
@ -381,6 +409,319 @@ class Datacenter
NetworkFolder.new(@item.networkFolder)
end
# Locking function. Similar to flock
def lock
hostlockname = @item['name'].downcase.tr(" ", "_")
if @locking
@locking_file = File.open("/tmp/vcenter-dc-#{hostlockname}-lock","w")
@locking_file.flock(File::LOCK_EX)
end
end
# Unlock driver execution mutex
def unlock
if @locking
@locking_file.close
end
end
########################################################################
# Check if distributed virtual switch exists in host
########################################################################
def dvs_exists(switch_name, net_folder)
return net_folder.items.values.select{ |dvs|
dvs.instance_of?(VCenterDriver::DistributedVirtualSwitch) &&
dvs['name'] == switch_name
}.first rescue nil
end
########################################################################
# Is the distributed switch for the distributed pg different?
########################################################################
def pg_changes_sw?(dpg, switch_name)
return dpg['config.distributedVirtualSwitch.name'] != switch_name
end
########################################################################
# Create a distributed vcenter switch in a datacenter
########################################################################
def create_dvs(switch_name, pnics, mtu=1500)
# Prepare spec for DVS creation
spec = RbVmomi::VIM::DVSCreateSpec.new
spec.configSpec = RbVmomi::VIM::VMwareDVSConfigSpec.new
spec.configSpec.name = switch_name
# Specify number of uplinks port for dpg
if pnics
pnics = pnics.split(",")
if !pnics.empty?
spec.configSpec.uplinkPortPolicy = RbVmomi::VIM::DVSNameArrayUplinkPortPolicy.new
spec.configSpec.uplinkPortPolicy.uplinkPortName = []
(0..pnics.size-1).each { |index|
spec.configSpec.uplinkPortPolicy.uplinkPortName[index]="dvUplink#{index+1}"
}
end
end
#Set maximum MTU
spec.configSpec.maxMtu = mtu
# The DVS must be created in the networkFolder of the datacenter
begin
dvs_creation_task = @item.networkFolder.CreateDVS_Task(:spec => spec)
dvs_creation_task.wait_for_completion
# If task finished successfuly we rename the uplink portgroup
dvs = nil
if dvs_creation_task.info.state == 'success'
dvs = dvs_creation_task.info.result
dvs.config.uplinkPortgroup[0].Rename_Task(:newName => "#{switch_name}-uplink-pg").wait_for_completion
else
raise "The Distributed vSwitch #{switch_name} could not be created. "
end
rescue Exception => e
raise e
end
@net_rollback << {:action => :delete_dvs, :dvs => dvs, :name => switch_name}
return VCenterDriver::DistributedVirtualSwitch.new(dvs, @vi_client)
end
########################################################################
# Update a distributed vcenter switch
########################################################################
def update_dvs(dvs, pnics, mtu)
# Prepare spec for DVS creation
spec = RbVmomi::VIM::VMwareDVSConfigSpec.new
changed = false
orig_spec = RbVmomi::VIM::VMwareDVSConfigSpec.new
orig_spec.maxMtu = dvs['config.maxMtu']
orig_spec.uplinkPortPolicy = RbVmomi::VIM::DVSNameArrayUplinkPortPolicy.new
orig_spec.uplinkPortPolicy.uplinkPortName = []
(0..dvs['config.uplinkPortgroup'].length-1).each { |index|
orig_spec.uplinkPortPolicy.uplinkPortName[index]="dvUplink#{index+1}"
}
# Add more uplinks to default uplink port group according to number of pnics
if pnics
pnics = pnics.split(",")
if !pnics.empty? && dvs['config.uplinkPortgroup'].length != pnics.size
spec.uplinkPortPolicy = RbVmomi::VIM::DVSNameArrayUplinkPortPolicy.new
spec.uplinkPortPolicy.uplinkPortName = []
(dvs['config.uplinkPortgroup'].length..num_pnics-1).each { |index|
spec.uplinkPortPolicy.uplinkPortName[index]="dvUplink#{index+1}"
}
changed = true
end
end
#Set maximum MTU
if mtu != dvs['config.maxMtu']
spec.maxMtu = mtu
changed = true
end
# The DVS must be created in the networkFolder of the datacenter
if changed
spec.configVersion = dvs['config.configVersion']
begin
dvs.item.ReconfigureDvs_Task(:spec => spec).wait_for_completion
rescue Exception => e
raise "The Distributed switch #{dvs['name']} could not be updated. "\
"Reason: #{e.message}"
end
@net_rollback << {:action => :update_dvs, :dvs => dvs.item, :name => dvs['name'], :spec => orig_spec}
end
end
########################################################################
# Remove a distributed vcenter switch in a datacenter
########################################################################
def remove_dvs(dvs)
begin
dvs.item.Destroy_Task.wait_for_completion
rescue
#Ignore destroy task exception
end
end
########################################################################
# Check if distributed port group exists in datacenter
########################################################################
def dpg_exists(pg_name, net_folder)
return net_folder.items.values.select{ |dpg|
dpg.instance_of?(VCenterDriver::DistributedPortGroup) &&
dpg['name'] == pg_name
}.first rescue nil
end
########################################################################
# Create a distributed vcenter port group
########################################################################
def create_dpg(dvs, pg_name, vlan_id, num_ports)
spec = RbVmomi::VIM::DVPortgroupConfigSpec.new
# OpenNebula use DVS static port binding with autoexpand
if num_ports
spec.autoExpand = true
spec.numPorts = num_ports
end
# Distributed port group name
spec.name = pg_name
# Set VLAN information
spec.defaultPortConfig = RbVmomi::VIM::VMwareDVSPortSetting.new
spec.defaultPortConfig.vlan = RbVmomi::VIM::VmwareDistributedVirtualSwitchVlanIdSpec.new
spec.defaultPortConfig.vlan.vlanId = vlan_id
spec.defaultPortConfig.vlan.inherited = false
# earlyBinding. A free DistributedVirtualPort will be selected and
# assigned to a VirtualMachine when the virtual machine is reconfigured
# to connect to the portgroup.
spec.type = "earlyBinding"
begin
dvs.item.AddDVPortgroup_Task(spec: [spec]).wait_for_completion
rescue Exception => e
raise "The Distributed port group #{pg_name} could not be created. "\
"Reason: #{e.message}"
end
# wait until the network is ready and we have a reference
portgroups = dvs['portgroup'].select{ |dpg|
dpg.instance_of?(RbVmomi::VIM::DistributedVirtualPortgroup) &&
dpg['name'] == pg_name
}
(0..DPG_CREATE_TIMEOUT).each do
break if !portgroups.empty?
portgroups = dvs['portgroup'].select{ |dpg|
dpg.instance_of?(RbVmomi::VIM::DistributedVirtualPortgroup) &&
dpg['name'] == pg_name
}
sleep 1
end
raise "Cannot get VCENTER_NET_REF for new distributed port group" if portgroups.empty?
@net_rollback << {:action => :delete_dpg, :dpg => portgroups.first, :name => pg_name}
return portgroups.first._ref
end
########################################################################
# Update a distributed vcenter port group
########################################################################
def update_dpg(dpg, vlan_id, num_ports)
spec = RbVmomi::VIM::DVPortgroupConfigSpec.new
changed = false
orig_spec = RbVmomi::VIM::DVPortgroupConfigSpec.new
orig_spec.numPorts = dpg['config.numPorts']
orig_spec.defaultPortConfig = RbVmomi::VIM::VMwareDVSPortSetting.new
orig_spec.defaultPortConfig.vlan = RbVmomi::VIM::VmwareDistributedVirtualSwitchVlanIdSpec.new
orig_spec.defaultPortConfig.vlan.vlanId = dpg['config.defaultPortConfig.vlan.vlanId']
orig_spec.defaultPortConfig.vlan.inherited = false
if num_ports && num_ports != orig_spec.numPorts
spec.numPorts = num_ports
changed = true
end
# earlyBinding. A free DistributedVirtualPort will be selected and
# assigned to a VirtualMachine when the virtual machine is reconfigured
# to connect to the portgroup.
spec.type = "earlyBinding"
if vlan_id != orig_spec.defaultPortConfig.vlan.vlanId
spec.defaultPortConfig = RbVmomi::VIM::VMwareDVSPortSetting.new
spec.defaultPortConfig.vlan = RbVmomi::VIM::VmwareDistributedVirtualSwitchVlanIdSpec.new
spec.defaultPortConfig.vlan.vlanId = vlan_id
spec.defaultPortConfig.vlan.inherited = false
changed = true
end
if changed
spec.configVersion = dpg['config.configVersion']
begin
dpg.item.ReconfigureDVPortgroup_Task(:spec => spec).wait_for_completion
rescue Exception => e
raise "The Distributed port group #{dpg['name']} could not be created. "\
"Reason: #{e.message}"
end
@net_rollback << {:action => :update_dpg, :dpg => dpg.item, :name => dpg['name'], :spec => orig_spec}
end
end
########################################################################
# Remove distributed port group from datacenter
########################################################################
def remove_dpg(dpg)
begin
dpg.item.Destroy_Task.wait_for_completion
rescue RbVmomi::VIM::ResourceInUse => e
STDERR.puts "The distributed portgroup #{dpg["name"]} is in use so it cannot be deleted"
return nil
rescue Exception => e
raise "The Distributed portgroup #{dpg["name"]} could not be deleted. Reason: #{e.message} "
end
end
########################################################################
# Perform vcenter network rollback operations
########################################################################
def network_rollback
@net_rollback.reverse_each do |nr|
case nr[:action]
when :update_dpg
begin
nr[:dpg].ReconfigureDVPortgroupConfigSpec_Task(:spec => nr[:spec])
rescue Exception => e
raise "A rollback operation for distributed port group #{nr[:name]} could not be performed. Reason: #{e.message}"
end
when :update_dvs
begin
nr[:dvs].ReconfigureDvs_Task(:spec => nr[:spec])
rescue Exception => e
raise "A rollback operation for distributed standard switch #{nr[:name]} could not be performed. Reason: #{e.message}"
end
when :delete_dvs
begin
nr[:dvs].Destroy_Task.wait_for_completion
rescue RbVmomi::VIM::ResourceInUse
return #Ignore if switch in use
rescue RbVmomi::VIM::NotFound
return #Ignore if switch not found
rescue Exception => e
raise "A rollback operation for standard switch #{nr[:name]} could not be performed. Reason: #{e.message}"
end
when :delete_dpg
begin
nr[:dpg].Destroy_Task.wait_for_completion
rescue RbVmomi::VIM::ResourceInUse
return #Ignore if pg in use
rescue RbVmomi::VIM::NotFound
return #Ignore if pg not found
rescue Exception => e
raise "A rollback operation for standard port group #{nr[:name]} could not be performed. Reason: #{e.message}"
end
end
end
end
def self.new_from_ref(ref, vi_client)
self.new(RbVmomi::VIM::Datacenter.new(vi_client.vim, ref), vi_client)
end

View File

@ -90,7 +90,7 @@ class Storage
one_image[:template] << "PATH=\"vcenter://#{image_path}\"\n"
one_image[:template] << "TYPE=\"#{image_type}\"\n"
one_image[:template] << "PERSISTENT=\"NO\"\n"
one_image[:template] << "OPENNEBULA_MANAGED=\"NO\"\n"
one_image[:template] << "VCENTER_IMPORTED=\"YES\"\n"
one_image[:template] << "DEV_PREFIX=\"#{VCenterDriver::VIHelper.get_default("IMAGE/TEMPLATE/DEV_PREFIX")}\"\n" #TODO get device prefix from vcenter info
else
# Return the image XML if it already exists
@ -112,6 +112,13 @@ class Storage
return element
end
# Checks if a RbVmomi::VIM::VirtualDevice is a disk or an iso file
def self.is_disk_or_iso?(device)
is_disk = !(device.class.ancestors.index(RbVmomi::VIM::VirtualDisk)).nil?
is_iso = device.backing.is_a? RbVmomi::VIM::VirtualCdromIsoBackingInfo
is_disk || is_iso
end
def monitor
summary = self['summary']
@ -147,24 +154,27 @@ class Storage
return one
end
def to_one_template(one_clusters, ccr_ref, ccr_name, type, vcenter_uuid)
def to_one_template(one_clusters, ccr_ref, ccr_name, type, vcenter_uuid, vcenter_instance_name, dc_name)
one_cluster = one_clusters.select { |ccr| ccr[:ref] == ccr_ref }.first rescue nil
return nil if one_cluster.nil?
name, capacity, freeSpace = @item.collect("name","summary.capacity","summary.freeSpace")
ds_name = ""
if type == "IMAGE_DS"
ds_name = "#{self['name']} - #{ccr_name} (IMG)"
ds_name << "[#{vcenter_instance_name} - #{dc_name}] #{name} - #{ccr_name.tr(" ", "_")} (IMG)"
else
ds_name = "#{self['name']} - #{ccr_name} (SYS)"
ds_name << "[#{vcenter_instance_name} - #{dc_name}] #{name} - #{ccr_name.tr(" ", "_")} (SYS)"
ds_name << " [StorDRS]" if self.class == VCenterDriver::StoragePod
end
one_tmp = {
:name => ds_name,
:total_mb => ((self['summary.capacity'].to_i / 1024) / 1024),
:free_mb => ((self['summary.freeSpace'].to_i / 1024) / 1024),
:total_mb => ((capacity.to_i / 1024) / 1024),
:free_mb => ((freeSpace.to_i / 1024) / 1024),
:cluster => ccr_name,
:one => to_one(ds_name, vcenter_uuid, ccr_ref, one_cluster[:host_id])
}
@ -239,10 +249,17 @@ class Datastore < Storage
def delete_virtual_disk(img_name)
ds_name = self['name']
get_vdm.DeleteVirtualDisk_Task(
:name => "[#{ds_name}] #{img_name}",
:datacenter => get_dc.item
).wait_for_completion
begin
get_vdm.DeleteVirtualDisk_Task(
:name => "[#{ds_name}] #{img_name}",
:datacenter => get_dc.item
).wait_for_completion
rescue Exception => e
# Ignore if file not found
if !e.message.start_with?('ManagedObjectNotFound')
raise e
end
end
end
# Copy a VirtualDisk
@ -348,12 +365,17 @@ class Datastore < Storage
search_task.wait_for_completion
file_size = search_task.info.result[0].file[0].fileSize rescue nil
size = 0
raise "Could not get file size" if file_size.nil?
# Try to get vmdk capacity as seen by VM
size = search_task.info.result[0].file[0].capacityKb / 1024 rescue nil
(file_size / 1024) / 1024
# Try to get file size
size = search_task.info.result[0].file[0].fileSize / 1024 / 1024 rescue nil if !size
raise "Could not get file size or capacity" if size.nil?
size
rescue
raise "Could not find file."
end
@ -361,7 +383,14 @@ class Datastore < Storage
def get_search_params(ds_name, img_path=nil, img_name=nil)
spec = RbVmomi::VIM::HostDatastoreBrowserSearchSpec.new
spec.query = [RbVmomi::VIM::VmDiskFileQuery.new,
vmdisk_query = RbVmomi::VIM::VmDiskFileQuery.new
vmdisk_query.details = RbVmomi::VIM::VmDiskFileQueryFlags(:diskType => true,
:capacityKb => true,
:hardwareVersion => true,
:controllerType => true)
spec.query = [vmdisk_query,
RbVmomi::VIM::IsoImageFileQuery.new]
spec.details = RbVmomi::VIM::FileQueryFlags(:fileOwner => true,
:fileSize => true,

View File

@ -27,12 +27,14 @@ end # class HostFolder
class ClusterComputeResource
attr_accessor :item
attr_accessor :rp_list
include Memoize
def initialize(item, vi_client=nil)
@item = item
@vi_client = vi_client
@rp_list
end
def fetch_resource_pools(rp, rp_array = [])
@ -53,61 +55,64 @@ class ClusterComputeResource
@resource_pools
end
def get_resource_pool_list(rp = nil, parent_prefix = "", rp_array = [])
def get_resource_pool_list(rp = @item.resourcePool, parent_prefix = "", rp_array = [])
current_rp = ""
if rp.nil?
rp = @item.resourcePool
else
if !parent_prefix.empty?
current_rp << parent_prefix
current_rp << "/"
end
current_rp << rp.name
if !parent_prefix.empty?
current_rp << parent_prefix
current_rp << "/"
end
if rp.resourcePool.size == 0
rp_info = {}
rp_info[:name] = current_rp
rp_info[:ref] = rp._ref
rp_array << rp_info
else
rp.resourcePool.each do |child_rp|
get_resource_pool_list(child_rp, current_rp, rp_array)
end
rp_info = {}
rp_info[:name] = current_rp
rp_info[:ref] = rp._ref
rp_array << rp_info if !current_rp.empty?
resource_pool, name = rp.collect("resourcePool","name")
current_rp << name if name != "Resources"
resource_pool.each do |child_rp|
get_resource_pool_list(child_rp, current_rp, rp_array)
end
rp_info = {}
rp_info[:name] = current_rp
rp_info[:ref] = rp._ref
rp_array << rp_info if !current_rp.empty?
rp_array
end
def monitor
#Load the host systems
summary = @item.summary
total_cpu,
num_cpu_cores,
effective_cpu,
total_memory,
effective_mem,
num_hosts,
num_eff_hosts = @item.collect("summary.totalCpu",
"summary.numCpuCores",
"summary.effectiveCpu",
"summary.totalMemory",
"summary.effectiveMemory",
"summary.numHosts",
"summary.numEffectiveHosts"
)
mhz_core = summary.totalCpu.to_f / summary.numCpuCores.to_f
eff_core = summary.effectiveCpu.to_f / mhz_core
mhz_core = total_cpu.to_f / num_cpu_cores.to_f
eff_core = effective_cpu.to_f / mhz_core
free_cpu = sprintf('%.2f', eff_core * 100).to_f
total_cpu = summary.numCpuCores.to_f * 100
total_cpu = num_cpu_cores.to_f * 100
used_cpu = sprintf('%.2f', total_cpu - free_cpu).to_f
total_mem = summary.totalMemory.to_i / 1024
free_mem = summary.effectiveMemory.to_i * 1024
total_mem = total_memory.to_i / 1024
free_mem = effective_mem.to_i * 1024
str_info = ""
# Get cluster name for informative purposes
str_info << "VCENTER_NAME=" << self['name'] << "\n"
# Get cluster name for informative purposes (replace space with _ if any)
str_info << "VCENTER_NAME=" << self['name'].tr(" ", "_") << "\n"
# System
str_info << "HYPERVISOR=vcenter\n"
str_info << "TOTALHOST=" << summary.numHosts.to_s << "\n"
str_info << "AVAILHOST=" << summary.numEffectiveHosts.to_s << "\n"
str_info << "TOTALHOST=" << num_hosts.to_s << "\n"
str_info << "AVAILHOST=" << num_eff_hosts.to_s << "\n"
# CPU
str_info << "CPUSPEED=" << mhz_core.to_s << "\n"
@ -118,39 +123,92 @@ class ClusterComputeResource
# Memory
str_info << "TOTALMEMORY=" << total_mem.to_s << "\n"
str_info << "FREEMEMORY=" << free_mem.to_s << "\n"
str_info << "USEDMEMORY=" << (total_mem - free_mem).to_s
str_info << "USEDMEMORY=" << (total_mem - free_mem).to_s << "\n"
str_info << "VCENTER_LAST_PERF_POLL=" << Time.now.to_i.to_s << "\n"
str_info << monitor_resource_pools(@item.resourcePool, "", mhz_core)
str_info << monitor_resource_pools(mhz_core)
end
def monitor_resource_pools(parent_rp, parent_prefix, mhz_core)
return "" if parent_rp.resourcePool.size == 0
def monitor_resource_pools(mhz_core)
@rp_list = get_resource_pool_list
view = @vi_client.vim.serviceContent.viewManager.CreateContainerView({
container: @item, #View for RPs inside this cluster
type: ['ResourcePool'],
recursive: true
})
pc = @vi_client.vim.serviceContent.propertyCollector
monitored_properties = [
"config.cpuAllocation.expandableReservation",
"config.cpuAllocation.limit",
"config.cpuAllocation.reservation",
"config.cpuAllocation.shares.level",
"config.cpuAllocation.shares.shares",
"config.memoryAllocation.expandableReservation",
"config.memoryAllocation.limit",
"config.memoryAllocation.reservation",
"config.memoryAllocation.shares.level",
"config.memoryAllocation.shares.shares"
]
filterSpec = RbVmomi::VIM.PropertyFilterSpec(
:objectSet => [
:obj => view,
:skip => true,
:selectSet => [
RbVmomi::VIM.TraversalSpec(
:name => 'traverseEntities',
:type => 'ContainerView',
:path => 'view',
:skip => false
)
]
],
:propSet => [
{ :type => 'ResourcePool', :pathSet => monitored_properties }
]
)
result = pc.RetrieveProperties(:specSet => [filterSpec])
rps = {}
result.each do |r|
hashed_properties = r.to_hash
if r.obj.is_a?(RbVmomi::VIM::ResourcePool)
rps[r.obj._ref] = hashed_properties
end
end
return "" if rps.empty?
rp_info = ""
parent_rp.resourcePool.each{|rp|
rpcpu = rp.config.cpuAllocation
rpmem = rp.config.memoryAllocation
rps.each{|ref, info|
# CPU
cpu_expandable = rpcpu.expandableReservation ? "YES" : "NO"
cpu_limit = rpcpu.limit == "-1" ? "UNLIMITED" : rpcpu.limit
cpu_reservation = rpcpu.reservation
cpu_num = rpcpu.reservation.to_f / mhz_core
cpu_shares_level = rpcpu.shares.level
cpu_shares = rpcpu.shares.shares
cpu_expandable = info["config.cpuAllocation.expandableReservation"] ? "YES" : "NO"
cpu_limit = info["config.cpuAllocation.limit"] == "-1" ? "UNLIMITED" : info["config.cpuAllocation.limit"]
cpu_reservation = info["config.cpuAllocation.reservation"]
cpu_num = cpu_reservation.to_f / mhz_core
cpu_shares_level = info["config.cpuAllocation.shares.level"]
cpu_shares = info["config.cpuAllocation.shares.shares"]
# MEMORY
mem_expandable = rpmem.expandableReservation ? "YES" : "NO"
mem_limit = rpmem.limit == "-1" ? "UNLIMITED" : rpmem.limit
mem_reservation = rpmem.reservation.to_f
mem_shares_level = rpmem.shares.level
mem_shares = rpmem.shares.shares
mem_expandable = info["config.memoryAllocation.expandableReservation"] ? "YES" : "NO"
mem_limit = info["config.memoryAllocation.limit"] == "-1" ? "UNLIMITED" : info["config.memoryAllocation.limit"]
mem_reservation = info["config.memoryAllocation.reservation"].to_f
mem_shares_level = info["config.memoryAllocation.shares.level"]
mem_shares = info["config.memoryAllocation.shares.shares"]
rp_name = (parent_prefix.empty? ? "" : parent_prefix + "/")
rp_name += rp.name
rp_name = rp_list.select { |item| item[:ref] == ref}.first[:name] rescue ""
rp_info << "\nRESOURCE_POOL = ["
rp_name = "Resources" if rp_name.empty?
rp_info << "\nVCENTER_RESOURCE_POOL_INFO = ["
rp_info << "NAME=\"#{rp_name}\","
rp_info << "CPU_EXPANDABLE=#{cpu_expandable},"
rp_info << "CPU_LIMIT=#{cpu_limit},"
@ -164,39 +222,80 @@ class ClusterComputeResource
rp_info << "MEM_SHARES=#{mem_shares},"
rp_info << "MEM_SHARES_LEVEL=#{mem_shares_level}"
rp_info << "]"
if rp.resourcePool.size != 0
rp_info << monitor_resource_pools(rp, rp_name, mhz_core)
end
}
view.DestroyView
return rp_info
end
def monitor_host_systems
host_info = ""
@item.host.each do |h|
next if h.runtime.connectionState != "connected"
view = @vi_client.vim.serviceContent.viewManager.CreateContainerView({
container: @item, #View for Hosts inside this cluster
type: ['HostSystem'],
recursive: true
})
summary = h.summary
hw = summary.hardware
stats = summary.quickStats
pc = @vi_client.vim.serviceContent.propertyCollector
total_cpu = hw.numCpuCores * 100
used_cpu = (stats.overallCpuUsage.to_f / hw.cpuMhz.to_f) * 100
monitored_properties = [
"name",
"runtime.connectionState",
"summary.hardware.numCpuCores",
"summary.hardware.memorySize",
"summary.hardware.cpuModel",
"summary.hardware.cpuMhz",
"summary.quickStats.overallCpuUsage",
"summary.quickStats.overallMemoryUsage"
]
filterSpec = RbVmomi::VIM.PropertyFilterSpec(
:objectSet => [
:obj => view,
:skip => true,
:selectSet => [
RbVmomi::VIM.TraversalSpec(
:name => 'traverseEntities',
:type => 'ContainerView',
:path => 'view',
:skip => false
)
]
],
:propSet => [
{ :type => 'HostSystem', :pathSet => monitored_properties }
]
)
result = pc.RetrieveProperties(:specSet => [filterSpec])
hosts = {}
result.each do |r|
hashed_properties = r.to_hash
if r.obj.is_a?(RbVmomi::VIM::HostSystem)
hosts[r.obj._ref] = hashed_properties
end
end
hosts.each do |ref, info|
next if info["runtime.connectionState"] != "connected"
total_cpu = info["summary.hardware.numCpuCores"] * 100
used_cpu = (info["summary.quickStats.overallCpuUsage"].to_f / info["summary.hardware.cpuMhz"].to_f) * 100
used_cpu = sprintf('%.2f', used_cpu).to_f # Trim precission
free_cpu = total_cpu - used_cpu
total_memory = hw.memorySize/1024
used_memory = stats.overallMemoryUsage*1024
total_memory = info["summary.hardware.memorySize"]/1024
used_memory = info["summary.quickStats.overallMemoryUsage"]*1024
free_memory = total_memory - used_memory
host_info << "\nHOST=["
host_info << "STATE=on,"
host_info << "HOSTNAME=\"" << h.name.to_s << "\","
host_info << "MODELNAME=\"" << hw.cpuModel.to_s << "\","
host_info << "CPUSPEED=" << hw.cpuMhz.to_s << ","
host_info << "HOSTNAME=\"" << info["name"].to_s << "\","
host_info << "MODELNAME=\"" << info["summary.hardware.cpuModel"].to_s << "\","
host_info << "CPUSPEED=" << info["summary.hardware.cpuMhz"].to_s << ","
host_info << "MAX_CPU=" << total_cpu.to_s << ","
host_info << "USED_CPU=" << used_cpu.to_s << ","
host_info << "FREE_CPU=" << free_cpu.to_s << ","
@ -206,86 +305,201 @@ class ClusterComputeResource
host_info << "]"
end
view.DestroyView # Destroy the view
return host_info
end
def monitor_vms
str_info = ""
resource_pools.each do |rp|
str_info << monitor_vms_in_rp(rp)
vc_uuid = @vi_client.vim.serviceContent.about.instanceUuid
cluster_name = self["name"]
cluster_ref = self["_ref"]
# Get info of the host where the VM/template is located
host_id = nil
one_host = VCenterDriver::VIHelper.find_by_ref(OpenNebula::HostPool,
"TEMPLATE/VCENTER_CCR_REF",
cluster_ref,
vc_uuid)
host_id = one_host["ID"] if one_host
# Extract CPU info and name for each esx host in cluster
esx_hosts = {}
@item.host.each do |esx_host|
info = {}
info[:name] = esx_host.name
info[:cpu] = esx_host.summary.hardware.cpuMhz.to_f
esx_hosts[esx_host._ref] = info
end
return str_info
end
def monitor_vms_in_rp(rp)
@monitored_vms = Set.new
str_info = ""
host_pool = VCenterDriver::VIHelper.one_pool(OpenNebula::HostPool)
view = @vi_client.vim.serviceContent.viewManager.CreateContainerView({
container: @item, #View for VMs inside this cluster
type: ['VirtualMachine'],
recursive: true
})
ccr_host = {}
host_pool.each do |host|
ccr = host['TEMPLATE/VCENTER_CCR_REF']
ccr_host[ccr] = host['ID'] if ccr
pc = @vi_client.vim.serviceContent.propertyCollector
monitored_properties = [
"name", #VM name
"config.template", #To filter out templates
"summary.runtime.powerState", #VM power state
"summary.quickStats.hostMemoryUsage", #Memory usage
"summary.quickStats.overallCpuUsage", #CPU used by VM
"runtime.host", #ESX host
"resourcePool", #RP
"guest.guestFullName",
"guest.net", #IP addresses as seen by guest tools,
"guest.guestState",
"guest.toolsVersion",
"guest.toolsRunningStatus",
"guest.toolsVersionStatus2", #IP addresses as seen by guest tools,
"config.extraConfig", #VM extraconfig info e.g opennebula.vm.running
"config.hardware.numCPU",
"config.hardware.memoryMB",
"config.annotation"
]
filterSpec = RbVmomi::VIM.PropertyFilterSpec(
:objectSet => [
:obj => view,
:skip => true,
:selectSet => [
RbVmomi::VIM.TraversalSpec(
:name => 'traverseEntities',
:type => 'ContainerView',
:path => 'view',
:skip => false
)
]
],
:propSet => [
{ :type => 'VirtualMachine', :pathSet => monitored_properties }
]
)
result = pc.RetrieveProperties(:specSet => [filterSpec])
vms = {}
vm_objects = []
result.each do |r|
hashed_properties = r.to_hash
if r.obj.is_a?(RbVmomi::VIM::VirtualMachine)
#Only take care of VMs, not templates
if !hashed_properties["config.template"]
vms[r.obj._ref] = hashed_properties
vm_objects << r.obj
end
end
end
rp.vm.each do |v|
pm = @vi_client.vim.serviceContent.perfManager
stats = []
max_samples = 9
refresh_rate = 20 #Real time stats takes samples every 20 seconds
last_mon_time = one_host["TEMPLATE/VCENTER_LAST_PERF_POLL"]
if last_mon_time
interval = (Time.now.to_i - last_mon_time.to_i)
interval = 3601 if interval < 0
samples = (interval / refresh_rate)
samples = 1 if samples == 0
max_samples = interval > 3600 ? 9 : samples
end
stats = pm.retrieve_stats(
vm_objects,
['net.transmitted','net.bytesRx','net.bytesTx','net.received',
'virtualDisk.numberReadAveraged','virtualDisk.numberWriteAveraged',
'virtualDisk.read','virtualDisk.write'],
{max_samples: max_samples}
)
get_resource_pool_list if !@rp_list
vms.each do |vm_ref,info|
begin
vm = VirtualMachine.new(v)
vm = VCenterDriver::VirtualMachine.new_from_ref(vm_ref, @vi_client)
esx_host = esx_hosts[info["runtime.host"]._ref]
info[:esx_host_name] = esx_host[:name]
info[:esx_host_cpu] = esx_host[:cpu]
info[:cluster_name] = cluster_name
info[:cluster_ref] = cluster_ref
info[:vc_uuid] = vc_uuid
info[:host_id] = host_id
info[:rp_list] = @rp_list
vm.vm_info = info
number = -1
# Check the running flag
running_flag = vm["config.extraConfig"].select do |val|
running_flag = info["config.extraConfig"].select do |val|
val[:key] == "opennebula.vm.running"
end
if running_flag.size > 0 and running_flag[0]
if !running_flag.empty? && running_flag.first
running_flag = running_flag[0][:value]
end
next if running_flag == "no"
# Extract vmid if possible
matches = vm["name"].match(/^one-(\d*)(-(.*))?$/)
matches = info["name"].match(/^one-(\d*)(-(.*))?$/)
number = matches[1] if matches
extraconfig_vmid = vm["config.extraConfig"].select do |val|
val[:key] == "opennebula.vm.id"
# Extract vmid from ref and vcenter instance uuid if possible
if number == -1
one_vm = VCenterDriver::VIHelper.find_by_ref(OpenNebula::VirtualMachinePool,
"DEPLOY_ID",
vm_ref,
vc_uuid)
number = one_vm["ID"] if one_vm
end
if extraconfig_vmid.size > 0 and extraconfig_vmid[0]
number = extraconfig_vmid[0][:value]
if number != -1
next if @monitored_vms.include? number
@monitored_vms << number
vm.one_item if vm.get_vm_id
end
vm.monitor
next if !vm["config"]
vm.monitor(stats)
vm_name = "#{info["name"]} - #{cluster_name}"
str_info << %Q{
VM = [
ID="#{number}",
VM_NAME="#{vm["name"]} - #{vm["runtime.host.parent.name"]}",
DEPLOY_ID="#{vm["_ref"]}",
VM_NAME="#{vm_name}",
DEPLOY_ID="#{vm_ref}",
}
if number == -1
vm_template_64 = Base64.encode64(vm.to_one).gsub("\n","")
vm_template_64 = Base64.encode64(vm.vm_to_one(vm_name)).gsub("\n","")
str_info << "IMPORT_TEMPLATE=\"#{vm_template_64}\","
end
str_info << "POLL=\"#{vm.info.gsub('"', "\\\"")}\"]"
rescue Exception => e
STDERR.puts e.inspect
STDERR.puts e.backtrace
end
end
return str_info.gsub(/^\s+/,"")
end
view.DestroyView # Destroy the view
return str_info
end
def monitor_customizations
customizations = self['_connection'].serviceContent.customizationSpecManager.info
@ -316,7 +530,7 @@ class ClusterComputeResource
Datacenter.new(item)
end
def self.to_one(cluster, con_ops)
def self.to_one(cluster, con_ops, rp, one_cluster_id)
one_host = VCenterDriver::VIHelper.new_one_item(OpenNebula::Host)
@ -324,8 +538,9 @@ class ClusterComputeResource
raise "Could not create host: #{one_host.message}"
end
rc = one_host.allocate(cluster[:cluster_name], 'vcenter', 'vcenter',
::OpenNebula::ClusterPool::NONE_CLUSTER_ID)
one_cluster_id = -1 if !one_cluster_id
rc = one_host.allocate(cluster[:cluster_name], 'vcenter', 'vcenter', one_cluster_id.to_i)
if OpenNebula.is_error?(rc)
raise "Could not allocate host: #{rc.message}"
@ -338,6 +553,8 @@ class ClusterComputeResource
"VCENTER_INSTANCE_ID=\"#{cluster[:vcenter_uuid]}\"\n"\
"VCENTER_VERSION=\"#{cluster[:vcenter_version]}\"\n"\
template << "VCENTER_RESOURCE_POOL=\"#{rp}\"" if rp
rc = one_host.update(template, false)
if OpenNebula.is_error?(rc)
@ -359,4 +576,388 @@ class ClusterComputeResource
end
end # class ClusterComputeResource
class ESXHost
attr_accessor :item
include Memoize
PG_CREATE_TIMEOUT = 240 # We will wait for 4 minutes for the pg creation
def initialize(item, vi_client=nil)
@net_rollback = []
@locking = true
@item = item
@vi_client = vi_client
end
def self.new_from_ref(ref, vi_client)
self.new(RbVmomi::VIM::HostSystem.new(vi_client.vim, ref), vi_client)
end
# Locking function. Similar to flock
def lock
hostlockname = @item['name'].downcase.tr(" ", "_")
if @locking
@locking_file = File.open("/tmp/vcenter-#{hostlockname}-lock","w")
@locking_file.flock(File::LOCK_EX)
end
end
# Unlock driver execution mutex
def unlock
if @locking
@locking_file.close
end
end
########################################################################
# Check if standard switch exists in host
########################################################################
def vss_exists(vswitch_name)
vswitches = @item.configManager.networkSystem.networkInfo.vswitch
return vswitches.select{|vs| vs.name == vswitch_name }.first rescue nil
end
########################################################################
# Create a standard vcenter switch in an ESX host
########################################################################
def create_vss(name, pnics=nil, num_ports=128, mtu=1500, pnics_available=nil)
# Get NetworkSystem
nws = self['configManager.networkSystem']
vswitchspec = nil
hostbridge = nil
nics = []
if pnics
pnics = pnics.split(",")
pnics.each do |pnic|
#Add nics if not in use
nics << pnic if pnics_available.include?(pnic)
end
if !nics.empty?
hostbridge = RbVmomi::VIM::HostVirtualSwitchBondBridge(:nicDevice => nics)
end
end
#Create spec
vswitchspec = RbVmomi::VIM::HostVirtualSwitchSpec(:bridge => hostbridge, :mtu => mtu, :numPorts => num_ports)
#add vSwitch to the host
begin
nws.AddVirtualSwitch(:vswitchName => name, :spec => vswitchspec)
rescue Exception => e
raise "The standard vSwitch #{name} could not be created. AddVirtualSwitch failed Reason: #{e.message}."
end
@net_rollback << {:action => :delete_sw, :name => name}
return name
end
########################################################################
# Update a standard vcenter switch in an ESX host
########################################################################
def update_vss(switch, name, pnics, num_ports, mtu)
pnics = pnics.split(",") rescue []
#Backup switch spec for rollback
orig_spec = switch.spec
#Compare current configuration and return if switch hasn't changed
same_switch = false
same_switch = switch.spec.respond_to?(:mtu) && switch.spec.mtu == mtu &&
switch.spec.respond_to?(:numPorts) && switch.spec.mtu == num_ports &&
(!pnics || (pnics && switch.spec.respond_to?(:bridge) &&
switch.spec.bridge.respond_to?(:nicDevice) &&
switch.spec.bridge.nicDevice.uniq.sort == pnics.uniq.sort))
return if same_switch
# Let's create a new spec and update the switch
vswitchspec = nil
hostbridge = nil
nws = self['configManager.networkSystem']
hostbridge = RbVmomi::VIM::HostVirtualSwitchBondBridge(:nicDevice => pnics) if !pnics.empty?
vswitchspec = RbVmomi::VIM::HostVirtualSwitchSpec(:bridge => hostbridge, :mtu => mtu, :numPorts => num_ports)
begin
nws.UpdateVirtualSwitch(:vswitchName => name, :spec => vswitchspec)
rescue Exception => e
raise "The standard switch with name #{name} could not be updated. Reason: #{e.message}"
end
@net_rollback << {:action => :update_sw, :name => name, :spec => orig_spec}
end
########################################################################
# Remove a standard vswitch from the host
########################################################################
def remove_vss(vswitch_name)
nws = self['configManager.networkSystem']
begin
nws.RemoveVirtualSwitch(:vswitchName => vswitch_name)
rescue RbVmomi::VIM::ResourceInUse
STDERR.puts "The standard switch #{vswitch_name} is in use so it cannot be deleted"
return nil
rescue RbVmomi::VIM::NotFound
STDERR.puts "The standard switch #{vswitch_name} was not found in vCenter"
return nil
rescue Exception => e
raise "There was a failure while deleting a vcenter standard switch #{vswitch_name}. Reason: #{e.message}"
end
return vswitch_name
end
########################################################################
# Get physical nics that are available in a host
########################################################################
def get_available_pnics
pnics_in_use = []
pnics_available = []
# Get pnics in use in standard switches
@item.config.network.vswitch.each do |vs|
vs.pnic.each do |pnic|
pnic.slice!("key-vim.host.PhysicalNic-")
pnics_in_use << pnic
end
end
# Get pnics in host
self['config.network'].pnic.each do |pnic|
pnics_available << pnic.device if !pnics_in_use.include?(pnic.device)
end
return pnics_available
end
########################################################################
# Check if proxy switch exists in host for distributed virtual switch
########################################################################
def proxy_switch_exists(switch_name)
nws = self['configManager.networkSystem']
proxy_switches = nws.networkInfo.proxySwitch
return proxy_switches.select{|ps| ps.dvsName == switch_name }.first rescue nil
end
########################################################################
# Assign a host to a a distributed vcenter switch (proxy switch)
########################################################################
def assign_proxy_switch(dvs, switch_name, pnics, pnics_available)
dvs = dvs.item
# Prepare spec for DVS reconfiguration
configSpec = RbVmomi::VIM::VMwareDVSConfigSpec.new
configSpec.name = switch_name
configSpec.configVersion = dvs['config.configVersion']
# Check if host is already assigned to distributed switch
operation = "add"
operation = "edit" if !dvs['config.host'].select { |host| host.config.host._ref == self['_ref'] }.empty?
# Add host members to the distributed virtual switch
host_member_spec = RbVmomi::VIM::DistributedVirtualSwitchHostMemberConfigSpec.new
host_member_spec.host = @item
host_member_spec.operation = operation
host_member_spec.backing = RbVmomi::VIM::DistributedVirtualSwitchHostMemberPnicBacking.new
host_member_spec.backing.pnicSpec = []
# If pnics are needed assign pnics for uplinks
if pnics
pnics = pnics.split(",")
# Get uplink portgroup from dvswitch
uplink_key = dvs['config.uplinkPortgroup'].select{
|ul| ul.name == "#{switch_name}-uplink-pg"}.first.key rescue nil
raise "Cannot find the uplink portgroup for #{switch_name}" if !uplink_key
pnics.each {|pnic|
pnicSpec = RbVmomi::VIM::DistributedVirtualSwitchHostMemberPnicSpec.new
pnicSpec.pnicDevice = pnic
pnicSpec.uplinkPortgroupKey = uplink_key
host_member_spec.backing.pnicSpec << pnicSpec
}
end
configSpec.host = [host_member_spec]
# The DVS must be reconfigured
dvs_reconfigure_task = dvs.ReconfigureDvs_Task(:spec => configSpec)
dvs_reconfigure_task.wait_for_completion
if dvs_reconfigure_task.info.state != 'success'
raise "It wasn't possible to assign host #{self["name"]} as a member of #{switch_name}'"
end
return dvs
end
########################################################################
# Create a standard port group
########################################################################
def create_pg(pgname, vswitch, vlan=0)
spec = RbVmomi::VIM.HostPortGroupSpec(
:name => pgname,
:vlanId => vlan,
:vswitchName => vswitch,
:policy => RbVmomi::VIM.HostNetworkPolicy
)
nws = self['configManager.networkSystem']
begin
nws.AddPortGroup(:portgrp => spec)
rescue Exception => e
raise "A port group with name #{pgname} could not be created. Reason: #{e.message}"
end
@net_rollback << {:action => :delete_pg, :name => pgname}
# wait until the network is ready and we have a reference
networks = @item['network'].select{ |net| net.name == pgname }
(0..PG_CREATE_TIMEOUT).each do
break if !networks.empty?
networks = @item['network'].select{ |net| net.name == pgname }
sleep 1
end
raise "Cannot get VCENTER_NET_REF for new port group" if networks.empty?
return networks.first._ref
end
########################################################################
# Check if standard port group exists in host
########################################################################
def pg_exists(pg_name)
nws = self['configManager.networkSystem']
portgroups = nws.networkInfo.portgroup
return portgroups.select{|pg| pg.spec.name == pg_name }.first rescue nil
end
########################################################################
# Is the switch for the pg different?
########################################################################
def pg_changes_sw?(pg, switch_name)
return pg.spec.respond_to?(:vswitchName) && pg.spec.vswitchName != switch_name
end
########################################################################
# Update a standard port group
########################################################################
def update_pg(pg, switch_name, vlan_id)
if pg.spec.respond_to?(:vlanId) && pg.spec.vlanId != vlan_id
# Backup original spec
orig_spec = pg.spec
# Create new spec
pg_name = pg.spec.name
spec = RbVmomi::VIM.HostPortGroupSpec(
:name => pg_name,
:vlanId => vlan_id,
:vswitchName => switch_name,
:policy => RbVmomi::VIM.HostNetworkPolicy
)
nws = self['configManager.networkSystem']
begin
nws.UpdatePortGroup(:pgName => pg_name, :portgrp => spec)
rescue Exception => e
raise "A port group with name #{pg_name} could not be updated. Reason: #{e.message}"
end
# Set rollback operation
@net_rollback << {:action => :update_pg, :name => pg_name, :spec => orig_spec}
end
end
########################################################################
# Remove a standard port group from the host
########################################################################
def remove_pg(pgname)
nws = self['configManager.networkSystem']
swname = nil
begin
portgroups = nws.networkConfig.portgroup
portgroups.each {|pg|
if pg.spec.name == pgname
swname = pg.spec.vswitchName
break
end
}
nws.RemovePortGroup(:pgName => pgname)
rescue RbVmomi::VIM::ResourceInUse
STDERR.puts "The standard portgroup #{pgname} is in use so it cannot be deleted"
return nil
rescue RbVmomi::VIM::NotFound
STDERR.puts "The standard portgroup #{pgname} was not found in vCenter"
return nil
rescue Exception => e
raise "There was a failure while deleting a standard portgroup #{pgname} in vCenter. Reason: #{e.message}"
end
return swname
end
def network_rollback
nws = self['configManager.networkSystem']
@net_rollback.reverse_each do |nr|
case nr[:action]
when :update_pg
begin
nws.UpdatePortGroup(:pgName => nr[:name], :portgrp => nr[:spec])
rescue Exception => e
raise "A rollback operation for standard port group #{nr[:name]} could not be performed. Reason: #{e.message}"
end
when :update_sw
begin
nws.UpdateVirtualSwitch(:vswitchName => nr[:name], :spec => nr[:spec])
rescue Exception => e
raise "A rollback operation for standard switch #{nr[:name]} could not be performed. Reason: #{e.message}"
end
when :delete_sw
begin
nws.RemoveVirtualSwitch(:vswitchName=> nr[:name])
rescue RbVmomi::VIM::ResourceInUse
return #Ignore if switch in use
rescue RbVmomi::VIM::NotFound
return #Ignore if switch not found
rescue Exception => e
raise "A rollback operation for standard switch #{nr[:name]} could not be performed. Reason: #{e.message}"
end
when :delete_pg
begin
nws.RemovePortGroup(:pgName => nr[:name])
rescue RbVmomi::VIM::ResourceInUse
return #Ignore if pg in use
rescue RbVmomi::VIM::NotFound
return #Ignore if pg not found
rescue Exception => e
raise "A rollback operation for standard port group #{nr[:name]} could not be performed. Reason: #{e.message}"
end
end
end
end
end # class ESXHost
end # module VCenterDriver

View File

@ -16,13 +16,28 @@ def self.import_clusters(con_ops, options)
dc_folder = VCenterDriver::DatacenterFolder.new(vi_client)
# Get vcenter intance uuid as moref is unique for each vcenter
vc_uuid = vi_client.vim.serviceContent.about.instanceUuid
vcenter_instance_name = vi_client.vim.host
# Get vcenter API version
vc_version = vi_client.vim.serviceContent.about.apiVersion
# OpenNebula's ClusterPool
cpool = VCenterDriver::VIHelper.one_pool(OpenNebula::ClusterPool, false)
rs = dc_folder.get_unimported_hosts
if cpool.respond_to?(:message)
raise "Could not get OpenNebula ClusterPool: #{cpool.message}"
end
cluster_list = {}
cpool.each do |c|
cluster_list[c["ID"]] = c["NAME"]
end
# Get OpenNebula's host pool
hpool = VCenterDriver::VIHelper.one_pool(OpenNebula::HostPool, false)
if hpool.respond_to?(:message)
raise "Could not get OpenNebula HostPool: #{hpool.message}"
end
rs = dc_folder.get_unimported_hosts(hpool,vcenter_instance_name)
STDOUT.print "done!\n\n"
@ -34,20 +49,63 @@ def self.import_clusters(con_ops, options)
end
if clusters.empty?
STDOUT.puts " No new clusters found in #{dc}..."
STDOUT.puts "\n No new clusters found in #{dc}..."
next
end
clusters.each{ |cluster|
one_cluster_id = nil
rpool = nil
if !use_defaults
STDOUT.print " * Import cluster #{cluster[:cluster_name]} (y/[n])? "
STDOUT.print "\n * Import cluster #{cluster[:cluster_name]} (y/[n])? "
next if STDIN.gets.strip.downcase != 'y'
if cluster_list.size > 1
STDOUT.print "\n In which OpenNebula cluster do you want the vCenter cluster to be included?\n "
cluster_list_str = "\n"
cluster_list.each do |key, value|
cluster_list_str << " - ID: " << key << " - NAME: " << value << "\n"
end
STDOUT.print "\n #{cluster_list_str}"
STDOUT.print "\n Specify the ID of the cluster or Enter to use the default cluster: "
answer = STDIN.gets.strip
one_cluster_id = answer if !answer.empty?
end
rpools = cluster[:rp_list]
if !rpools.empty?
STDOUT.print "\n Do you want to confine this cluster in "\
"a resource pool (y/[n])? "
if STDIN.gets.strip.downcase == 'y'
rpool_list = ""
rpools.each do |rp|
rpool_list << " - " << rp[:name] << "\n"
end
STDOUT.print "\n Please specify one resource pool from "\
"the following list:\n\n#{rpool_list}"
STDOUT.print "\n Your resource pool choice: "
answer = STDIN.gets.strip
rpool = answer if !answer.empty?
end
end
end
one_host = VCenterDriver::ClusterComputeResource.to_one(cluster,
con_ops)
con_ops,
rpool,
one_cluster_id)
STDOUT.puts " OpenNebula host #{cluster[:cluster_name]} with"\
STDOUT.puts "\n OpenNebula host #{cluster[:cluster_name]} with"\
" id #{one_host.id} successfully created."
STDOUT.puts
}
@ -77,7 +135,14 @@ def self.import_templates(con_ops, options)
dc_folder = VCenterDriver::DatacenterFolder.new(vi_client)
rs = dc_folder.get_unimported_templates(vi_client)
# Get OpenNebula's templates pool
tpool = VCenterDriver::VIHelper.one_pool(OpenNebula::TemplatePool, false)
if tpool.respond_to?(:message)
raise "Could not get OpenNebula TemplatePool: #{tpool.message}"
end
rs = dc_folder.get_unimported_templates(vi_client, tpool)
STDOUT.print "done!\n"
@ -104,6 +169,9 @@ def self.import_templates(con_ops, options)
end
tmps.each{ |t|
template = nil
template_copy_ref = nil
template_xml = nil
if !use_defaults
STDOUT.print "\n * VM Template found:\n"\
@ -115,120 +183,131 @@ def self.import_templates(con_ops, options)
next if STDIN.gets.strip.downcase != 'y'
end
# Linked Clones
if !use_defaults
template = VCenterDriver::Template.new_from_ref(t[:vcenter_ref], vi_client)
STDOUT.print "\n For faster deployment operations"\
" and lower disk usage, OpenNebula"\
" can create new VMs as linked clones."\
"\n Would you like to use Linked Clones with VMs based on this template (y/[n])? "
if STDIN.gets.strip.downcase == 'y'
STDOUT.print "\n Linked clones requires that delta"\
" disks must be created for each disk in the template."\
" This operation may change the template contents."\
" \n Do you want OpenNebula to create a copy of the template,"\
" so the original template remains untouched ([y]/n)? "
template = t[:template]
if STDIN.gets.strip.downcase != 'n'
STDOUT.print "\n The new template will be named"\
" adding a one- prefix to the name"\
" of the original template. \n"\
" If you prefer a different name"\
" please specify or press Enter"\
" to use defaults: "
template_name = STDIN.gets.strip.downcase
STDOUT.print "\n WARNING!!! The cloning operation can take some time"\
" depending on the size of disks. Please wait...\n"
error, template_copy_ref = template.create_template_copy(template_name)
if template_copy_ref
template = VCenterDriver::Template.new_from_ref(template_copy_ref, vi_client)
one_template = VCenterDriver::Template.get_xml_template(template, vc_uuid, vi_client, options[:vcenter], dc)
if one_template
#Now create delta disks
STDOUT.print "\n Delta disks are being created, please be patient..."
lc_error, use_lc = template.create_delta_disks
if lc_error
STDOUT.print "\n ERROR. Something was wrong with the create delta disks on the template operation: #{lc_error}.\n"\
"\n Linked Clones will not be used with this template.\n"
else
one_template[:one] << "\nVCENTER_LINKED_CLONES=\"YES\"\n"
t = one_template
end
else
STDOUT.print "\n ERROR. Something was wrong obtaining the info from the template's copy.\n"\
"\n Linked Clones will not be used with this template.\n"
template.delete_template if template_copy_ref
end
else
STDOUT.print "\n ERROR. #{error}\n"
end
else
# Create linked clones on top of the existing template
# Create a VirtualMachine object from the template_copy_ref
STDOUT.print "\n Delta disks are being created, please be patient..."
lc_error, use_lc = template.create_delta_disks
if lc_error
STDOUT.print "\n ERROR. Something was wrong with the create delta disks on the template operation: #{lc_error}.\n"\
"\n Linked Clones will not be used with this template.\n"
end
t[:one] << "\nVCENTER_LINKED_CLONES=\"YES\"\n" if use_lc
end
end
end
vcenter_vm_folder = ""
if !use_defaults
STDOUT.print "\n\n Do you want to specify a folder where"\
" the deployed VMs based on this template will appear"\
" in vSphere's VM and Templates section?"\
"\n If no path is set, VMs will be placed in the same"\
" location where the template lives."\
"\n Please specify a path using slashes to separate folders"\
" e.g /Management/VMs or press Enter to use defaults: "\
vcenter_vm_folder = STDIN.gets.strip
t[:one] << "VCENTER_VM_FOLDER=\"#{vcenter_vm_folder}\"\n" if !vcenter_vm_folder.empty?
end
## Add existing disks to template (OPENNEBULA_MANAGED)
template = t[:template]
STDOUT.print "\n The existing disks and networks in the template"\
" are being imported, please be patient..."
error, template_disks_and_nics = template.import_vcenter_disks(vc_uuid,
template = t[:template] if !template
error, template_disks = template.import_vcenter_disks(vc_uuid,
dpool,
ipool)
if error.empty?
t[:one] << template_disks_and_nics
t[:one] << template_disks
else
STDOUT.puts error
template.delete_template if template_copy_ref
next
end
error, template_nics = template.import_vcenter_nics(vc_uuid,
npool)
npool,
options[:vcenter],
dc)
if error.empty?
t[:one] << template_nics
else
STDOUT.puts error
template.delete_template if template_copy_ref
next
end
# Datastore placement
ds_input = ""
if !use_defaults
STDOUT.print "\n This template is currently set to be "\
"deployed in datastore #{t[:default_ds]}."\
"\n Press y to keep the default, n to select"\
" a new default datastore or d to delegate "\
" the choice to the user ([y]/n/d)? "
answer = STDIN.gets.strip.downcase
case answer
when 'd'
ds_split = t[:ds].split("|")
list_of_ds = ds_split[-2]
default_ds = ds_split[-1]
ds_input = ds_split[0] + "|" + ds_split[1] + "|" +
ds_split[2] + "|"
# Available list of datastores
input_str = " The list of available datastores to be"\
" presented to the user are \"#{list_of_ds}\""
input_str+= "\n Press y to agree, or input a comma"\
" separated list of datastores to edit "\
"[y/comma separated list] "
STDOUT.print input_str
answer = STDIN.gets.strip
if answer.downcase == 'y'
ds_input += ds_split[3] + "|"
else
ds_input += answer + "|"
end
# Default
input_str = " The default datastore presented to "\
"the end user is set to \"#{default_ds}\"."
input_str+= "\n Press y to agree, or input a new "\
"datastore [y/datastore name] "
STDOUT.print input_str
answer = STDIN.gets.strip
if answer.downcase == 'y'
ds_input += ds_split[4]
else
ds_input += answer
end
when 'n'
ds_split = t[:ds].split("|")
list_of_ds = ds_split[-2]
input_str = " The list of available datastores is:\n"
STDOUT.print input_str
dashes = ""
100.times do
dashes << "-"
end
list_str = "\n [Index] Datastore :"\
"\n #{dashes}\n"
STDOUT.print list_str
index = 1
t[:ds_list].each do |ds|
list_str = " [#{index}] #{ds[:name]}\n"
index += 1
STDOUT.print list_str
end
input_str = "\n Please input the new default"\
" datastore index in the list (e.g 1): "
STDOUT.print input_str
answer = STDIN.gets.strip
t[:one] += "VCENTER_DS_REF=\"#{t[:ds_list][answer.to_i - 1][:ref]}\"\n"
end
end
# Resource Pools
rp_input = ""
rp_split = t[:rp].split("|")
@ -236,7 +315,7 @@ def self.import_templates(con_ops, options)
if !use_defaults
if rp_split.size > 3
STDOUT.print "\n This template is currently set to "\
STDOUT.print "\n\n This template is currently set to "\
"launch VMs in the default resource pool."\
"\n Press y to keep this behaviour, n to select"\
" a new resource pool or d to delegate the choice"\
@ -257,15 +336,15 @@ def self.import_templates(con_ops, options)
"\"#{list_of_rp}\""
input_str+= "\n Press y to agree, or input a comma"\
" separated list of resource pools to edit "\
"[y/comma separated list] "
"([y]/comma separated list) "
STDOUT.print input_str
answer = STDIN.gets.strip
if answer.downcase == 'y'
rp_input += rp_split[3] + "|"
else
if !answer.empty? && answer.downcase != 'y'
rp_input += answer + "|"
else
rp_input += rp_split[3] + "|"
end
# Default
@ -273,57 +352,44 @@ def self.import_templates(con_ops, options)
"to the end user is set to"\
" \"#{default_rp}\"."
input_str+= "\n Press y to agree, or input a new "\
"resource pool [y/resource pool name] "
"resource pool ([y]/resource pool name) "
STDOUT.print input_str
answer = STDIN.gets.strip
if answer.downcase == 'y'
rp_input += rp_split[4]
else
if !answer.empty? && answer.downcase != 'y'
rp_input += answer
else
rp_input += rp_split[4]
end
when 'n'
list_of_rp = rp_split[-2]
input_str = " The list of available resource pools is:\n"
STDOUT.print input_str
dashes = ""
100.times do
dashes << "-"
end
list_str = "\n [Index] Resource pool :"\
"\n #{dashes}\n"
STDOUT.print list_str
STDOUT.print " The list of available resource pools is:\n\n"
index = 1
t[:rp_list].each do |rp|
list_str = " [#{index}] #{rp[:name]}\n"
t[:rp_list].each do |r|
list_str = " - #{r[:name]}\n"
index += 1
STDOUT.print list_str
end
input_str = "\n Please input the new default"\
" resource pool index in the list (e.g 1): "
" resource pool name: "
STDOUT.print input_str
answer = STDIN.gets.strip
t[:one] << "VCENTER_RP_REF=\"#{t[:rp_list][answer.to_i - 1][:ref]}\"\n"
t[:one] << "VCENTER_RESOURCE_POOL=\"#{answer}\"\n"
end
end
end
if !ds_input.empty? || !rp_input.empty?
if !rp_input.empty?
t[:one] << "USER_INPUTS=["
t[:one] << "VCENTER_DS_LIST=\"#{ds_input}\"," if !ds_input.empty?
t[:one] << "VCENTER_RP_LIST=\"#{rp_input}\"," if !rp_input.empty?
t[:one] << "VCENTER_RESOURCE_POOL=\"#{rp_input}\"," if !rp_input.empty?
t[:one] = t[:one][0..-2]
t[:one] << "]"
end
@ -334,6 +400,7 @@ def self.import_templates(con_ops, options)
if ::OpenNebula.is_error?(rc)
STDOUT.puts " Error creating template: #{rc.message}\n"
template.delete_template if template_copy_ref
else
STDOUT.puts " OpenNebula template #{one_t.id} created!\n"
end
@ -363,7 +430,14 @@ def self.import_networks(con_ops, options)
dc_folder = VCenterDriver::DatacenterFolder.new(vi_client)
rs = dc_folder.get_unimported_networks
# OpenNebula's VirtualNetworkPool
npool = VCenterDriver::VIHelper.one_pool(OpenNebula::VirtualNetworkPool, false)
if npool.respond_to?(:message)
raise "Could not get OpenNebula VirtualNetworkPool: #{npool.message}"
end
rs = dc_folder.get_unimported_networks(npool,options[:vcenter])
STDOUT.print "done!\n"
@ -381,12 +455,11 @@ def self.import_networks(con_ops, options)
end
tmps.each do |n|
one_cluster_id = nil
if !use_defaults
print_str = "\n * Network found:\n"\
" - Name : #{n[:name]}\n"\
" - Type : #{n[:type]}\n"
print_str << " - VLAN ID : #{n[:vlan_id]}\n" if !n[:vlan_id].empty?
" - Name : #{n[:name]}\n"\
" - Type : #{n[:type]}\n"
print_str << " - Cluster : #{n[:cluster]}\n"
print_str << " Import this Network (y/[n])? "
@ -486,10 +559,10 @@ def self.import_networks(con_ops, options)
rc = one_vn.allocate(n[:one])
if ::OpenNebula.is_error?(rc)
STDOUT.puts " Error creating virtual network: " +
STDOUT.puts "\n Error creating virtual network: " +
" #{rc.message}\n"
else
STDOUT.puts " OpenNebula virtual network " +
STDOUT.puts "\n OpenNebula virtual network " +
"#{one_vn.id} created with size #{size}!\n"
end
end
@ -518,7 +591,20 @@ def self.import_datastore(con_ops, options)
dc_folder = VCenterDriver::DatacenterFolder.new(vi_client)
rs = dc_folder.get_unimported_datastores
dpool = VCenterDriver::VIHelper.one_pool(OpenNebula::DatastorePool, false)
if dpool.respond_to?(:message)
raise "Could not get OpenNebula DatastorePool: #{dpool.message}"
end
# Get OpenNebula's host pool
hpool = VCenterDriver::VIHelper.one_pool(OpenNebula::HostPool, false)
if hpool.respond_to?(:message)
raise "Could not get OpenNebula HostPool: #{hpool.message}"
end
rs = dc_folder.get_unimported_datastores(dpool, options[:vcenter], hpool)
STDOUT.print "done!\n"
@ -535,6 +621,7 @@ def self.import_datastore(con_ops, options)
end
tmps.each{ |d|
one_cluster_id = nil
if !use_defaults
STDOUT.print "\n * Datastore found:\n"\
" - Name : #{d[:name]}\n"\
@ -548,17 +635,21 @@ def self.import_datastore(con_ops, options)
one_d = VCenterDriver::VIHelper.new_one_item(OpenNebula::Datastore)
rc = one_d.allocate(d[:one])
if one_cluster_id
rc = one_d.allocate(d[:one], one_cluster_id.to_i)
else
rc = one_d.allocate(d[:one])
end
if ::OpenNebula.is_error?(rc)
STDOUT.puts " Error creating datastore: #{rc.message}\n"\
STDOUT.puts " \nError creating datastore: #{rc.message}\n"\
" One datastore can exist only once, and "\
"can be used in any vCenter Cluster that "\
"has access to it. Also, no spaces allowed "\
"in datastore name (rename it in vCenter "\
"and try again)"
else
STDOUT.puts " OpenNebula datastore #{one_d.id} created!\n"
STDOUT.puts " \nOpenNebula datastore #{one_d.id} created!\n"
end
}
}

View File

@ -23,6 +23,11 @@ class NetworkFolder
item_name = item._ref
@items[item_name.to_sym] = DistributedPortGroup.new(item)
end
VIClient.get_entities(@item, "VmwareDistributedVirtualSwitch").each do |item|
item_name = item._ref
@items[item_name.to_sym] = DistributedVirtualSwitch.new(item)
end
end
########################################################################
@ -56,24 +61,32 @@ class Network
@item = item
end
# Checks if a RbVmomi::VIM::VirtualDevice is a network interface
def self.is_nic?(device)
!device.class.ancestors.index(RbVmomi::VIM::VirtualEthernetCard).nil?
end
def self.to_one_template(network_name, network_ref, network_type,
ccr_ref, ccr_name, vcenter_uuid,
vcenter_instance_name, dc_name)
def self.to_one_template(network_name, network_ref, network_type, vlan_id,
ccr_ref, ccr_name, vcenter_uuid)
one_tmp = {}
one_tmp[:name] = "#{network_name} - #{ccr_name}"
network_import_name = "[#{vcenter_instance_name} - #{dc_name}] #{network_name} - #{ccr_name.tr(" ", "_")}"
one_tmp[:name] = network_import_name
one_tmp[:bridge] = network_name
one_tmp[:type] = network_type
one_tmp[:cluster] = ccr_name
one_tmp[:vlan_id] = vlan_id
one_tmp[:vcenter_ccr_ref] = ccr_ref
one_tmp[:one] = to_one(network_name, network_ref, network_type, vlan_id,
ccr_ref, ccr_name, vcenter_uuid)
one_tmp[:one] = to_one(network_import_name, network_name, network_ref, network_type,
ccr_ref, vcenter_uuid)
return one_tmp
end
def self.to_one(network_name, network_ref, network_type, vlan_id,
ccr_ref, ccr_name, vcenter_uuid)
template = "NAME=\"#{network_name} - #{ccr_name}\"\n"\
def self.to_one(network_import_name, network_name, network_ref, network_type,
ccr_ref, vcenter_uuid)
template = "NAME=\"#{network_import_name}\"\n"\
"BRIDGE=\"#{network_name}\"\n"\
"VN_MAD=\"dummy\"\n"\
"VCENTER_PORTGROUP_TYPE=\"#{network_type}\"\n"\
@ -81,8 +94,6 @@ class Network
"VCENTER_CCR_REF=\"#{ccr_ref}\"\n"\
"VCENTER_INSTANCE_ID=\"#{vcenter_uuid}\"\n"
template << "VLAN_TAGGED_ID=#{vlan_id}\n" if !vlan_id.empty?
return template
end
@ -105,6 +116,55 @@ class Network
return element
end
def self.remove_net_ref(network_id)
one_vnet = VCenterDriver::VIHelper.one_item(OpenNebula::VirtualNetwork, network_id)
one_vnet.info
one_vnet.delete_element("TEMPLATE/VCENTER_NET_REF")
one_vnet.delete_element("TEMPLATE/VCENTER_INSTANCE_ID")
tmp_str = one_vnet.template_str
one_vnet.update(tmp_str)
one_vnet.info
end
def self.vcenter_networks_to_be_removed(device_change_nics, vcenter_uuid)
networks = {}
npool = VCenterDriver::VIHelper.one_pool(OpenNebula::VirtualNetworkPool, false)
device_change_nics.each do |nic|
if nic[:operation] == :remove
vnet_ref = nil
# Port group
if nic[:device].backing.respond_to?(:network)
vnet_ref = nic[:device].backing.network._ref
end
# Distributed port group
if nic[:device].backing.respond_to?(:port) &&
nic[:device].backing.port.respond_to?(:portgroupKey)
vnet_ref = nic[:device].backing.port.portgroupKey
end
# Find vnet_ref in OpenNebula's pool of networks
one_network = VCenterDriver::VIHelper.find_by_ref(OpenNebula::VirtualNetworkPool,
"TEMPLATE/VCENTER_NET_REF",
vnet_ref,
vcenter_uuid,
npool)
next if !one_network
# Add pg or dpg name that are in vcenter but not in
# OpenNebula's VM to a hash for later removal
if one_network["VN_MAD"] == "vcenter" && !networks.key?(one_network["BRIDGE"])
networks[one_network["BRIDGE"]] = one_network
end
end
end
networks
end
# This is never cached
def self.new_from_ref(ref, vi_client)
self.new(RbVmomi::VIM::Network.new(vi_client.vim, ref), vi_client)
@ -135,24 +195,6 @@ class PortGroup < Network
net_clusters
end
def vlan_id
id = ""
host_members = self['host']
host = host_members.first
# This is pretty slow as the host id subsystem has to be queried
cm = host.configManager
nws = cm.networkSystem
nc = nws.networkConfig
pgs = nc.portgroup
pgs.each do |pg|
if pg.spec.name == self["name"]
id << pg.spec.vlanId.to_s if pg.spec.vlanId != 0
break
end
end
id
end
def network_type
"Port Group"
end
@ -181,32 +223,23 @@ class DistributedPortGroup < Network
net_clusters
end
def vlan_id
id = ""
pc = self['config.defaultPortConfig']
if pc.respond_to?(:vlan) && pc.vlan.respond_to?(:vlanId)
vlan = pc.vlan.vlanId
if vlan.is_a? Array
vlan.each do |v|
id << v.start.to_s
id << ".."
id << v.end.to_s
id << ","
end
id.chop!
else
id = vlan.to_s if vlan != 0
end
end
return id
end
def network_type
"Distributed Port Group"
end
end # class DistributedPortGroup
class DistributedVirtualSwitch < Network
def initialize(item, vi_client=nil)
if !item.instance_of?(RbVmomi::VIM::VmwareDistributedVirtualSwitch )
raise "Expecting type 'RbVmomi::VIM::VmwareDistributedVirtualSwitch'. " <<
"Got '#{item.class} instead."
end
@vi_client = vi_client
@item = item
end
end # class DistributedVirtualSwitch
end # module VCenterDriver

View File

@ -7,17 +7,63 @@ class VIClient
attr_accessor :rp
def initialize(opts)
opts = {:insecure => true}.merge(opts)
@vim = RbVmomi::VIM.connect(opts)
rp_ref = opts.delete(:rp)
@rp = RbVmomi::VIM::ResourcePool(@vim, rp_ref) if rp_ref
# Get ccr and get rp
ccr_ref = opts.delete(:ccr)
if ccr_ref
ccr = RbVmomi::VIM::ClusterComputeResource.new(@vim, ccr_ref)
#Get ref for rp
if ccr
rp = opts.delete(:rp)
if rp
rp_list = get_resource_pools(ccr)
rp_ref = rp_list.select { |r| r[:name] == rp }.first._ref rescue nil
@rp = RbVmomi::VIM::ResourcePool(@vim, rp_ref) if rp_ref
end
end
end
end
def rp_confined?
!!@rp
end
def get_resource_pools(ccr, rp = nil, parent_prefix = "", rp_array = [])
current_rp = ""
if !rp
rp = ccr.resourcePool
else
if !parent_prefix.empty?
current_rp << parent_prefix
current_rp << "/"
end
current_rp << rp.name
end
if rp.resourcePool.size == 0
rp_info = {}
rp_info[:name] = current_rp
rp_info[:ref] = rp._ref
rp_array << rp_info
else
rp.resourcePool.each do |child_rp|
get_resource_pools(ccr, child_rp, current_rp, rp_array)
end
rp_info = {}
rp_info[:name] = current_rp
rp_info[:ref] = rp._ref
rp_array << rp_info if !current_rp.empty?
end
rp_array
end
def close_connection
@vim.close
end
@ -66,7 +112,8 @@ class VIClient
connection = {
:host => host["TEMPLATE/VCENTER_HOST"],
:user => host["TEMPLATE/VCENTER_USER"],
:rp => host["TEMPLATE/VCENTER_RP_REF"],
:rp => host["TEMPLATE/VCENTER_RESOURCE_POOL"],
:ccr => host["TEMPLATE/VCENTER_CCR_REF"],
:password => password
}

View File

@ -51,11 +51,35 @@ class VIHelper
pool = one_pool(the_class, false) if pool.nil?
element = pool.select{|e|
e["#{attribute}"] == ref &&
e["TEMPLATE/VCENTER_INSTANCE_ID"] == vcenter_uuid}.first rescue nil
(e["TEMPLATE/VCENTER_INSTANCE_ID"] == vcenter_uuid ||
e["USER_TEMPLATE/VCENTER_INSTANCE_ID"] == vcenter_uuid)}.first rescue nil
return element
end
def self.find_vcenter_vm_by_name(one_vm, host, vi_client)
# Let's try to find the VM object only by its name
# Let's build the VM name
vm_prefix = host['TEMPLATE/VM_PREFIX']
vm_prefix = VM_PREFIX_DEFAULT if vm_prefix.nil? || vm_prefix.empty?
vm_prefix.gsub!("$i", one_vm['ID'])
vm_name = vm_prefix + one_vm['NAME']
# We have no DEPLOY_ID, the VM has never been deployed
# let's use a view to try to find the VM from the root folder
view = vi_client.vim.serviceContent.viewManager.CreateContainerView({
container: vi_client.vim.rootFolder,
type: ['VirtualMachine'],
recursive: true
})
vcenter_vm = view.view.find{ |v| v.name == vm_name } if !!view.view && !view.view.empty?
view.DestroyView # Destroy the view
return vcenter_vm
end
def self.get_default(xpath)
begin
xml = OpenNebula::XMLElement.new

File diff suppressed because it is too large Load Diff

View File

@ -54,12 +54,14 @@ begin
vm = VCenterDriver::VirtualMachine.new
# Clone the VM from template and provide XML info
vc_template_ref = drv_action['USER_TEMPLATE/VCENTER_TEMPLATE_REF']
vm.clone_vm(drv_action, vi_client)
one_vm = VCenterDriver::VIHelper.one_item(OpenNebula::VirtualMachine, vm_id)
vm.one_item = one_vm
# Set reference to template disks in VM template for detach ops
vm.reference_imported_disks
vm.reference_imported_disks(vc_template_ref)
# Set reference to template nics in VM template for detach ops
vm.reference_imported_nics

View File

@ -41,7 +41,7 @@ begin
vm = VCenterDriver::VirtualMachine.new_from_ref(vm_ref, vi_client)
vm.monitor
vm.monitor_poll_vm
puts vm.info

View File

@ -52,10 +52,7 @@ end
# Get disk elements from drv_action
disks = drv_action.retrieve_xmlelements("VM/TEMPLATE/DISK")
## TODO keep_disks and copy_template
keep_disks =
!drv_action['/VMM_DRIVER_ACTION_DATA/VM/USER_TEMPLATE/KEEP_DISKS_ON_DONE'].nil? &&
drv_action['/VMM_DRIVER_ACTION_DATA/VM/USER_TEMPLATE/KEEP_DISKS_ON_DONE'].downcase=="yes"
## TODO copy_template
# Manage instantiate to persistent
instantiate_to_persistent =
@ -73,12 +70,6 @@ begin
# If Terminate action has been called
if lcm_state_str == "SHUTDOWN"
# TODO: KEEP_DISKS_ON_DONE deprecation / invisible disks
if keep_disks
# Detach all disks from VM so they are not deleted if VM is destroyed
vm.detach_all_disks
end
# If the VM was instantiated to persistent keep the VM
if instantiate_to_persistent
vm.mark_as_template #Convert VM to template in vCenter

View File

@ -31,6 +31,7 @@ require 'vcenter_driver'
vm_ref = ARGV[0]
snap_id = ARGV[1]
vm_id = ARGV[2]
vc_cluster_name = ARGV[3]
host = VCenterDriver::VIHelper.find_by_name(OpenNebula::HostPool, vc_cluster_name)
@ -41,7 +42,11 @@ begin
vm = VCenterDriver::VirtualMachine.new_from_ref(vm_ref, vi_client)
vm.delete_snapshot(snap_id)
# Get snapshot name
one_vm = VCenterDriver::VIHelper.one_item(OpenNebula::VirtualMachine, vm_id)
snap_name = one_vm["TEMPLATE/SNAPSHOT[SNAPSHOT_ID=#{snap_id}]/NAME"]
vm.delete_snapshot(snap_id, snap_name)
rescue Exception => e
STDERR.puts "Snapshot of VM #{vm_ref} on vCenter cluster "\

View File

@ -31,6 +31,7 @@ require 'vcenter_driver'
vm_ref = ARGV[0]
snap_id = ARGV[1]
vm_id = ARGV[2]
vc_cluster_name = ARGV[3]
host = VCenterDriver::VIHelper.find_by_name(OpenNebula::HostPool, vc_cluster_name)
@ -41,7 +42,11 @@ begin
vm = VCenterDriver::VirtualMachine.new_from_ref(vm_ref, vi_client)
vm.revert_snapshot(snap_id)
# Get snapshot name
one_vm = VCenterDriver::VIHelper.one_item(OpenNebula::VirtualMachine, vm_id)
snap_name = one_vm["TEMPLATE/SNAPSHOT[SNAPSHOT_ID=#{snap_id}]/NAME"]
vm.revert_snapshot(snap_id, snap_name)
rescue Exception => e
STDERR.puts "Snapshot of VM #{vm_ref} on vCenter cluster "\

View File

@ -0,0 +1,19 @@
#!/bin/sh
# -------------------------------------------------------------------------- #
# Copyright 2002-2016, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
exit 0

181
src/vnm_mad/remotes/vcenter/clean Executable file
View File

@ -0,0 +1,181 @@
#!/usr/bin/env ruby
# ---------------------------------------------------------------------------- #
# Copyright 2002-2017, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ---------------------------------------------------------------------------- #
ONE_LOCATION=ENV["ONE_LOCATION"] if !defined?(ONE_LOCATION)
if !ONE_LOCATION
RUBY_LIB_LOCATION="/usr/lib/one/ruby" if !defined?(RUBY_LIB_LOCATION)
else
RUBY_LIB_LOCATION=ONE_LOCATION+"/lib/ruby" if !defined?(RUBY_LIB_LOCATION)
end
$: << RUBY_LIB_LOCATION
$: << File.dirname(__FILE__)
require 'vcenter_driver'
drv_action_enc = ARGV[0]
drv_action = OpenNebula::XMLElement.new
drv_action.initialize_xml(Base64.decode64(drv_action_enc), 'VM')
#Get more VM's info from OpenNebula
vm_id = drv_action["ID"]
one_vm = VCenterDriver::VIHelper.one_item(OpenNebula::VirtualMachine, vm_id)
vc_cluster_name = one_vm["HISTORY_RECORDS/HISTORY[last()]/HOSTNAME"]
# Get host information
host = VCenterDriver::VIHelper.find_by_name(OpenNebula::HostPool, vc_cluster_name)
host_id = host['ID']
# Create VM object
vm_ref = drv_action["DEPLOY_ID"]
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
vm = VCenterDriver::VirtualMachine.new_from_ref(vm_ref, vi_client)
vm.one_item = one_vm
# Check if clean operation is due to a hotplug detach nic
hotplug_nic = drv_action.retrieve_xmlelements("TEMPLATE/NIC[ATTACH=\"YES\"]").first rescue nil
if hotplug_nic # A nic has been hotplug detached
pg_name = hotplug_nic["BRIDGE"]
switch_name = hotplug_nic["VCENTER_SWITCH_NAME"]
vnet_ref = hotplug_nic["VCENTER_NET_REF"]
if hotplug_nic["VCENTER_PORTGROUP_TYPE"] == "Port Group"
esx_host = VCenterDriver::ESXHost.new_from_ref(vm["runtime.host._ref"], vi_client)
begin
esx_host.lock # Exclusive lock for ESX host operation
if esx_host.pg_exists(pg_name)
swname = esx_host.remove_pg(pg_name)
exit if !swname || switch_name != swname
# We must update XML so the VCENTER_NET_REF is unset
VCenterDriver::Network.remove_net_ref(hotplug_nic["NETWORK_ID"])
exit if !esx_host.vss_exists(switch_name)
swname = esx_host.remove_vss(switch_name)
end
rescue Exception => e
raise e
ensure
esx_host.unlock # Remove lock
vi_client.close_connection if vi_client
end
end
if hotplug_nic["VCENTER_PORTGROUP_TYPE"] == "Distributed Port Group"
begin
dc = vm.cluster.get_dc # Get datacenter
dc.lock
# Explore network folder in search of dpg and dvs
net_folder = dc.network_folder
net_folder.fetch!
# Get distributed port group if it exists
dpg = dc.dpg_exists(pg_name, net_folder)
dc.remove_dpg(dpg) if dpg
# We must update XML so the VCENTER_NET_REF is unset
VCenterDriver::Network.remove_net_ref(hotplug_nic["NETWORK_ID"])
# Get distributed virtual switch and try to remove it
dvs = dc.dvs_exists(switch_name, net_folder)
dc.remove_dvs(dvs) if dvs
rescue Exception => e
#TODO rollback
raise e
ensure
dc.unlock if dc
end
end
else # VM is being terminated
# If vm shutdown has been called
if one_vm["/VM/LCM_STATE"] == "12"
esx_host = VCenterDriver::ESXHost.new_from_ref(vm["runtime.host._ref"], vi_client)
vm.detach_all_nics # Detach all NICs to prevent Resource in use when deleting
begin
esx_host.lock # Exclusive lock for ESX host operation
nics = one_vm.retrieve_xmlelements("TEMPLATE/NIC[VN_MAD=\"vcenter\"]")
nics.each do |nic|
pg_name = nic["BRIDGE"]
switch_name = nic["VCENTER_SWITCH_NAME"]
vnet_ref = nic["VCENTER_NET_REF"]
if nic["VCENTER_PORTGROUP_TYPE"] == "Port Group"
begin
next if !esx_host.pg_exists(pg_name)
swname = esx_host.remove_pg(pg_name)
next if !swname || switch_name != swname
# We must update XML so the VCENTER_NET_REF is unset
VCenterDriver::Network.remove_net_ref(nic["NETWORK_ID"])
next if !esx_host.vss_exists(switch_name)
swname = esx_host.remove_vss(switch_name)
rescue Exception => e
raise e
end
end
if nic["VCENTER_PORTGROUP_TYPE"] == "Distributed Port Group"
begin
dc = vm.cluster.get_dc # Get datacenter
dc.lock
# Explore network folder in search of dpg and dvs
net_folder = dc.network_folder
net_folder.fetch!
# Try to remove distributed port group if it exists
dpg = dc.dpg_exists(pg_name, net_folder)
next if !dpg
dc.remove_dpg(dpg)
# We must update XML so the VCENTER_NET_REF is unset
VCenterDriver::Network.remove_net_ref(nic["NETWORK_ID"])
# Get distributed virtual switch and try to remove it
dvs = dc.dvs_exists(switch_name, net_folder)
next if !dvs
dc.remove_dvs(dvs) if dvs
rescue Exception => e
raise e
ensure
dc.unlock if dc
end
end
end
rescue Exception => e
raise e
ensure
esx_host.unlock if esx_host
vi_client.close_connection if vi_client
end
end
end

View File

@ -0,0 +1 @@
../common/dummy.sh

View File

@ -0,0 +1 @@
../common/dummy.sh