mirror of
https://github.com/OpenNebula/one.git
synced 2025-03-23 22:50:09 +03:00
f #5725: State for Virtual Networks
* VNETs include state to flag their configuration status (READY/ERROR/LOCK) * Hooks can be program based on VNET state transitions * Include Java/Go/Ruby bindings * New vCenter network drivers * Place-holder to customize actions (create/delete) for other netowrk drivers * Sunstone integration * OneFlow syncs flow creation on VNET status * Remove deprecated vcenter hooks and autoregistration co-authored-by: Frederick Ernesto Borges Noronha <fborges@opennebula.io> co-authored-by: Alejandro Huertas <ahuertas@opennebula.io> co-authored-by: Pavel Czerny <pczerny@opennebula.systems>
This commit is contained in:
parent
72f1e5dee5
commit
4b746348b3
80
include/HookStateVirtualNetwork.h
Normal file
80
include/HookStateVirtualNetwork.h
Normal file
@ -0,0 +1,80 @@
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Copyright 2002-2021, OpenNebula Project, OpenNebula Systems */
|
||||
/* */
|
||||
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
|
||||
/* not use this file except in compliance with the License. You may obtain */
|
||||
/* a copy of the License at */
|
||||
/* */
|
||||
/* http://www.apache.org/licenses/LICENSE-2.0 */
|
||||
/* */
|
||||
/* Unless required by applicable law or agreed to in writing, software */
|
||||
/* distributed under the License is distributed on an "AS IS" BASIS, */
|
||||
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
|
||||
/* See the License for the specific language governing permissions and */
|
||||
/* limitations under the License. */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
#ifndef HOOK_STATE_VIRTUAL_NETWORK_H_
|
||||
#define HOOK_STATE_VIRTUAL_NETWORK_H_
|
||||
|
||||
#include "HookImplementation.h"
|
||||
#include "VirtualNetwork.h"
|
||||
|
||||
class HookStateVirtualNetwork : public HookImplementation
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* @return true if an state hook needs to be trigger for this Virtual Network
|
||||
*/
|
||||
static bool trigger(VirtualNetwork * vn);
|
||||
|
||||
/**
|
||||
* Function to build a XML message for a state hook
|
||||
*/
|
||||
static std::string format_message(VirtualNetwork * vn);
|
||||
|
||||
private:
|
||||
friend class Hook;
|
||||
|
||||
// *************************************************************************
|
||||
// Constructor/Destructor
|
||||
// *************************************************************************
|
||||
|
||||
HookStateVirtualNetwork():state(VirtualNetwork::INIT){};
|
||||
|
||||
virtual ~HookStateVirtualNetwork() = default;
|
||||
|
||||
/**
|
||||
* Check if type dependent attributes are well defined.
|
||||
* @param tmpl pointer to the Hook template
|
||||
* @param error_str string with error information
|
||||
* @return 0 on success
|
||||
*/
|
||||
int parse_template(Template *tmpl, std::string& error_str);
|
||||
|
||||
/**
|
||||
* Rebuilds the object from a template
|
||||
* @param tmpl The template
|
||||
*
|
||||
* @return 0 on success, -1 otherwise
|
||||
*/
|
||||
int from_template(const Template * tmpl, std::string& error);
|
||||
|
||||
/* Checks the mandatory template attributes
|
||||
* @param tmpl The hook template
|
||||
* @param error string describing the error if any
|
||||
* @return 0 on success
|
||||
*/
|
||||
int post_update_template(Template * tmpl, std::string& error);
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Hook API Attributes
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* States hook_state state which trigger the hook
|
||||
*/
|
||||
VirtualNetwork::VirtualNetworkState state;
|
||||
};
|
||||
|
||||
#endif
|
@ -57,7 +57,7 @@ public:
|
||||
*
|
||||
* @return 0 on success.
|
||||
*/
|
||||
virtual int update(PoolObjectSQL * objsql);
|
||||
int update(PoolObjectSQL * objsql) override;
|
||||
|
||||
/**
|
||||
* Gets an object from the pool (if needed the object is loaded from the
|
||||
|
@ -85,6 +85,16 @@ public:
|
||||
*/
|
||||
void trigger_free_address(IPAMRequest& ir);
|
||||
|
||||
/**
|
||||
* Call vnet_create action
|
||||
*/
|
||||
void trigger_vnet_create(int vnid, const std::string& xml64);
|
||||
|
||||
/**
|
||||
* Call vnet_create action
|
||||
*/
|
||||
void trigger_vnet_delete(int vnid, const std::string& xml64);
|
||||
|
||||
private:
|
||||
/**
|
||||
* Timer action async execution
|
||||
@ -110,11 +120,21 @@ private:
|
||||
|
||||
/**
|
||||
* This function initializes a request to call the IPAM driver
|
||||
* @param type Message type
|
||||
* @param ir the IPAM request
|
||||
* @return pointer to the IPAM driver to use, 0 on failure
|
||||
*/
|
||||
void send_request(IPAMManagerMessages type, IPAMRequest& ir);
|
||||
|
||||
/**
|
||||
* This function send an action message to IPAM driver
|
||||
* @param type Message type
|
||||
* @param oid Object ID
|
||||
* @param xml Object xml data
|
||||
*/
|
||||
void send_message(IPAMManagerMessages type,
|
||||
int oid,
|
||||
const std::string& xml);
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Protocol implementation, procesing messages from driver
|
||||
// -------------------------------------------------------------------------
|
||||
@ -128,6 +148,10 @@ private:
|
||||
*/
|
||||
void _notify_request(std::unique_ptr<ipam_msg_t> msg);
|
||||
|
||||
void _vnet_create(std::unique_ptr<ipam_msg_t> msg);
|
||||
|
||||
void _vnet_delete(std::unique_ptr<ipam_msg_t> msg);
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
|
@ -142,6 +142,15 @@ public:
|
||||
return _payload;
|
||||
}
|
||||
|
||||
std::string payload64() const
|
||||
{
|
||||
std::string buffer;
|
||||
|
||||
ssl_util::base64_decode(_payload, buffer);
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
void payload(const std::string& p)
|
||||
{
|
||||
_payload = p;
|
||||
|
@ -369,7 +369,7 @@ public:
|
||||
* @param values of the attribute
|
||||
* @return the number of values
|
||||
*/
|
||||
template<typename T>
|
||||
template<typename T>
|
||||
int get_template_attribute(const std::string& name,
|
||||
std::vector<const T*>& values) const
|
||||
{
|
||||
@ -378,15 +378,15 @@ public:
|
||||
|
||||
/**
|
||||
* These methods gets the value of a SingleAttribute and converts it to the
|
||||
* target type
|
||||
* target type
|
||||
* @param name of the attribute
|
||||
* @param value of the attribute, will be ""/0/false if not defined or
|
||||
* not a single attribute
|
||||
*
|
||||
*
|
||||
* @return true if the attribute was found and is a valid type for the
|
||||
* target value
|
||||
* target value
|
||||
*/
|
||||
template<typename T>
|
||||
template<typename T>
|
||||
bool get_template_attribute(const std::string& name, T& value) const
|
||||
{
|
||||
return obj_template->get(name, value);
|
||||
@ -396,10 +396,10 @@ public:
|
||||
* These methods get and remove a string based attribute (single)
|
||||
* @param name of the attribute
|
||||
* @param value of the attribute (a string), will be ""/0/false if not
|
||||
* defined or not a single attribute, depending on the target value type
|
||||
* defined or not a single attribute, depending on the target value type
|
||||
* @return the number of attributes erased
|
||||
*/
|
||||
template<typename T>
|
||||
template<typename T>
|
||||
int erase_template_attribute(const std::string& name, T& value)
|
||||
{
|
||||
obj_template->get(name, value);
|
||||
@ -413,7 +413,7 @@ public:
|
||||
* @param value of the new attribute
|
||||
* @return 0 on success
|
||||
*/
|
||||
template<typename T>
|
||||
template<typename T>
|
||||
int replace_template_attribute(const std::string& name, const T& value)
|
||||
{
|
||||
return obj_template->replace(name, value);
|
||||
@ -426,7 +426,7 @@ public:
|
||||
* @param values a vector containing a pointer to the attributes
|
||||
* @return the number of attributes removed
|
||||
*/
|
||||
template<typename T>
|
||||
template<typename T>
|
||||
int remove_template_attribute(const std::string& n, std::vector<T *>& v)
|
||||
{
|
||||
return obj_template->remove(n, v);
|
||||
@ -466,7 +466,7 @@ public:
|
||||
* @param att_name Name for the attribute
|
||||
* @param att_val Message string
|
||||
*/
|
||||
template<typename T>
|
||||
template<typename T>
|
||||
void add_template_attribute(const std::string& name, const T& value)
|
||||
{
|
||||
obj_template->add(name, value);
|
||||
|
@ -96,6 +96,8 @@ enum class IPAMManagerMessages : unsigned short int
|
||||
GET_ADDRESS,
|
||||
ALLOCATE_ADDRESS,
|
||||
FREE_ADDRESS,
|
||||
VNET_CREATE,
|
||||
VNET_DELETE,
|
||||
LOG,
|
||||
ENUM_MAX
|
||||
};
|
||||
|
@ -85,8 +85,6 @@ public:
|
||||
class VirtualNetworkRmAddressRange : public Request
|
||||
{
|
||||
public:
|
||||
|
||||
|
||||
VirtualNetworkRmAddressRange(
|
||||
const std::string& name = "one.vn.rm_ar",
|
||||
const std::string& sign = "A:sii",
|
||||
@ -226,6 +224,28 @@ public:
|
||||
RequestAttributes& att) override;
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
class VirtualNetworkRecover : public Request
|
||||
{
|
||||
public:
|
||||
VirtualNetworkRecover()
|
||||
: Request("one.vn.recover",
|
||||
"A:sii",
|
||||
"Recover Virtual Network from ERROR or LOCKED state")
|
||||
{
|
||||
Nebula& nd = Nebula::instance();
|
||||
pool = nd.get_vnpool();
|
||||
|
||||
auth_object = PoolObjectSQL::NET;
|
||||
auth_op = AuthRequest::MANAGE;
|
||||
};
|
||||
|
||||
void request_execute(xmlrpc_c::paramList const& _paramList,
|
||||
RequestAttributes& att) override;
|
||||
};
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
@ -133,7 +133,7 @@ public:
|
||||
*
|
||||
* @return 0 on success.
|
||||
*/
|
||||
virtual int update(PoolObjectSQL * objsql);
|
||||
int update(PoolObjectSQL * objsql) override;
|
||||
|
||||
/**
|
||||
* Gets a VM ID by its deploy_id, the dedploy_id - VM id mapping is keep
|
||||
|
@ -67,6 +67,15 @@ public:
|
||||
BRNONE = 5
|
||||
};
|
||||
|
||||
enum VirtualNetworkState {
|
||||
INIT = 0, //!< Initialization state
|
||||
READY = 1, //!< Virtual Network ready to use
|
||||
LOCK_CREATE = 2, //!< Driver create in progress
|
||||
LOCK_DELETE = 3, //!< Driver delete in progress
|
||||
DONE = 4, //!< The Virtual Network is being deleted
|
||||
ERROR = 5 //!< Driver operation failed
|
||||
};
|
||||
|
||||
static std::string driver_to_str(VirtualNetworkDriver ob)
|
||||
{
|
||||
switch (ob)
|
||||
@ -177,6 +186,51 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns the string representation of an VirtualNetworkState
|
||||
* @param state The state
|
||||
* @return the string representation
|
||||
*/
|
||||
static std::string state_to_str(VirtualNetworkState state)
|
||||
{
|
||||
switch (state)
|
||||
{
|
||||
case INIT: return "INIT"; break;
|
||||
case READY: return "READY"; break;
|
||||
case LOCK_CREATE: return "LOCK_CREATE"; break;
|
||||
case LOCK_DELETE: return "LOCK_DELETE"; break;
|
||||
case DONE: return "DONE"; break;
|
||||
case ERROR: return "ERROR"; break;
|
||||
default: return "";
|
||||
}
|
||||
}
|
||||
|
||||
static VirtualNetworkState str_to_state(const std::string& str_state)
|
||||
{
|
||||
if ( str_state == "READY" )
|
||||
{
|
||||
return READY;
|
||||
}
|
||||
else if ( str_state == "LOCK_CREATE" )
|
||||
{
|
||||
return LOCK_CREATE;
|
||||
}
|
||||
else if ( str_state == "LOCK_DELETE" )
|
||||
{
|
||||
return LOCK_DELETE;
|
||||
}
|
||||
else if ( str_state == "DONE" )
|
||||
{
|
||||
return DONE;
|
||||
}
|
||||
else if ( str_state == "ERROR")
|
||||
{
|
||||
return ERROR;
|
||||
}
|
||||
|
||||
return INIT;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check consistency of PHYDEV, BRIDGE and VLAN attributes depending on
|
||||
* the network driver
|
||||
@ -199,6 +253,39 @@ public:
|
||||
|
||||
virtual ~VirtualNetwork() = default;
|
||||
|
||||
/**
|
||||
* Return state of Virtual Network
|
||||
*/
|
||||
VirtualNetworkState get_state()
|
||||
{
|
||||
return state;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set state of Virtual Network
|
||||
*/
|
||||
void set_state(VirtualNetworkState _state)
|
||||
{
|
||||
state = _state;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the previous state
|
||||
*/
|
||||
void set_prev_state()
|
||||
{
|
||||
prev_state = state;
|
||||
}
|
||||
|
||||
/**
|
||||
* Test if the Image has changed state since last time prev state was set
|
||||
* @return true if state changed
|
||||
*/
|
||||
bool has_changed_state() const
|
||||
{
|
||||
return prev_state != state;
|
||||
}
|
||||
|
||||
/**
|
||||
* Factory method for virtual network templates
|
||||
*/
|
||||
@ -604,6 +691,10 @@ private:
|
||||
// Virtual Network Private Attributes
|
||||
// *************************************************************************
|
||||
|
||||
VirtualNetworkState state;
|
||||
|
||||
VirtualNetworkState prev_state;
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Binded physical attributes
|
||||
// -------------------------------------------------------------------------
|
||||
|
@ -71,6 +71,15 @@ public:
|
||||
const std::set<int> &cluster_ids,
|
||||
std::string& error_str);
|
||||
|
||||
/**
|
||||
* Updates a Virtual Network in the data base. It also updates the previous state
|
||||
* after executing the hooks.
|
||||
* @param objsql a pointer to the Host
|
||||
*
|
||||
* @return 0 on success.
|
||||
*/
|
||||
int update(PoolObjectSQL * objsql) override;
|
||||
|
||||
/**
|
||||
* Drops a Virtual Network and the associated VLAN_ID if needed
|
||||
*/
|
||||
@ -277,6 +286,8 @@ public:
|
||||
int reserve_addr_by_mac(int pid, int rid, unsigned int rsize,
|
||||
unsigned int ar_id, const std::string& mac, std::string& err);
|
||||
|
||||
void delete_success(std::unique_ptr<VirtualNetwork> vn);
|
||||
|
||||
private:
|
||||
/**
|
||||
* Holds the system-wide MAC prefix
|
||||
|
66
install.sh
66
install.sh
@ -486,8 +486,6 @@ VAR_DIRS="$VAR_LOCATION/remotes \
|
||||
$VAR_LOCATION/remotes/hooks \
|
||||
$VAR_LOCATION/remotes/hooks/autostart \
|
||||
$VAR_LOCATION/remotes/hooks/ft \
|
||||
$VAR_LOCATION/remotes/hooks/vcenter \
|
||||
$VAR_LOCATION/remotes/hooks/vcenter/templates \
|
||||
$VAR_LOCATION/remotes/hooks/raft \
|
||||
$VAR_LOCATION/remotes/datastore \
|
||||
$VAR_LOCATION/remotes/datastore/dummy \
|
||||
@ -759,8 +757,6 @@ INSTALL_FILES=(
|
||||
BACKUP_VMS_SHARE_FILE:$SHARE_LOCATION
|
||||
HOOK_AUTOSTART_FILES:$VAR_LOCATION/remotes/hooks/autostart
|
||||
HOOK_FT_FILES:$VAR_LOCATION/remotes/hooks/ft
|
||||
HOOK_VCENTER_FILES:$VAR_LOCATION/remotes/hooks/vcenter
|
||||
HOOK_VCENTER_TMPLS:$VAR_LOCATION/remotes/hooks/vcenter/templates
|
||||
HOOK_RAFT_FILES:$VAR_LOCATION/remotes/hooks/raft
|
||||
COMMON_CLOUD_LIB_FILES:$LIB_LOCATION/ruby/cloud
|
||||
CLOUD_AUTH_LIB_FILES:$LIB_LOCATION/ruby/cloud/CloudAuth
|
||||
@ -1738,53 +1734,71 @@ NETWORK_8021Q_FILES="src/vnm_mad/remotes/802.1Q/clean \
|
||||
src/vnm_mad/remotes/802.1Q/post \
|
||||
src/vnm_mad/remotes/802.1Q/pre \
|
||||
src/vnm_mad/remotes/802.1Q/update_sg \
|
||||
src/vnm_mad/remotes/802.1Q/vlan_tag_driver.rb"
|
||||
src/vnm_mad/remotes/802.1Q/vlan_tag_driver.rb \
|
||||
src/vnm_mad/remotes/802.1Q/vnet_create \
|
||||
src/vnm_mad/remotes/802.1Q/vnet_delete"
|
||||
|
||||
NETWORK_VXLAN_FILES="src/vnm_mad/remotes/vxlan/clean \
|
||||
src/vnm_mad/remotes/vxlan/post \
|
||||
src/vnm_mad/remotes/vxlan/pre \
|
||||
src/vnm_mad/remotes/vxlan/update_sg \
|
||||
src/vnm_mad/remotes/vxlan/vxlan.rb \
|
||||
src/vnm_mad/remotes/vxlan/vxlan_driver.rb"
|
||||
|
||||
src/vnm_mad/remotes/vxlan/vxlan_driver.rb \
|
||||
src/vnm_mad/remotes/vxlan/vnet_create \
|
||||
src/vnm_mad/remotes/vxlan/vnet_delete"
|
||||
|
||||
NETWORK_DUMMY_FILES="src/vnm_mad/remotes/dummy/clean \
|
||||
src/vnm_mad/remotes/dummy/post \
|
||||
src/vnm_mad/remotes/dummy/update_sg \
|
||||
src/vnm_mad/remotes/dummy/pre"
|
||||
src/vnm_mad/remotes/dummy/pre \
|
||||
src/vnm_mad/remotes/dummy/vnet_create \
|
||||
src/vnm_mad/remotes/dummy/vnet_delete"
|
||||
|
||||
NETWORK_BRIDGE_FILES="src/vnm_mad/remotes/bridge/clean \
|
||||
src/vnm_mad/remotes/bridge/post \
|
||||
src/vnm_mad/remotes/bridge/update_sg \
|
||||
src/vnm_mad/remotes/bridge/pre"
|
||||
src/vnm_mad/remotes/bridge/pre \
|
||||
src/vnm_mad/remotes/bridge/vnet_create \
|
||||
src/vnm_mad/remotes/bridge/vnet_delete"
|
||||
|
||||
NETWORK_EBTABLES_FILES="src/vnm_mad/remotes/ebtables/clean \
|
||||
src/vnm_mad/remotes/ebtables/post \
|
||||
src/vnm_mad/remotes/ebtables/pre \
|
||||
src/vnm_mad/remotes/ebtables/update_sg \
|
||||
src/vnm_mad/remotes/ebtables/Ebtables.rb"
|
||||
src/vnm_mad/remotes/ebtables/Ebtables.rb \
|
||||
src/vnm_mad/remotes/ebtables/vnet_create \
|
||||
src/vnm_mad/remotes/ebtables/vnet_delete"
|
||||
|
||||
NETWORK_FW_FILES="src/vnm_mad/remotes/fw/post \
|
||||
src/vnm_mad/remotes/fw/pre \
|
||||
src/vnm_mad/remotes/fw/update_sg \
|
||||
src/vnm_mad/remotes/fw/clean"
|
||||
src/vnm_mad/remotes/fw/clean \
|
||||
src/vnm_mad/remotes/fw/vnet_create \
|
||||
src/vnm_mad/remotes/fw/vnet_delete"
|
||||
|
||||
NETWORK_OVSWITCH_FILES="src/vnm_mad/remotes/ovswitch/clean \
|
||||
src/vnm_mad/remotes/ovswitch/post \
|
||||
src/vnm_mad/remotes/ovswitch/pre \
|
||||
src/vnm_mad/remotes/ovswitch/update_sg \
|
||||
src/vnm_mad/remotes/ovswitch/OpenvSwitch.rb"
|
||||
src/vnm_mad/remotes/ovswitch/OpenvSwitch.rb \
|
||||
src/vnm_mad/remotes/ovswitch/vnet_create \
|
||||
src/vnm_mad/remotes/ovswitch/vnet_delete"
|
||||
|
||||
NETWORK_OVSWITCH_VXLAN_FILES="src/vnm_mad/remotes/ovswitch_vxlan/clean \
|
||||
src/vnm_mad/remotes/ovswitch_vxlan/post \
|
||||
src/vnm_mad/remotes/ovswitch_vxlan/pre \
|
||||
src/vnm_mad/remotes/ovswitch_vxlan/update_sg \
|
||||
src/vnm_mad/remotes/ovswitch_vxlan/OpenvSwitchVXLAN.rb"
|
||||
src/vnm_mad/remotes/ovswitch_vxlan/OpenvSwitchVXLAN.rb \
|
||||
src/vnm_mad/remotes/ovswitch_vxlan/vnet_create \
|
||||
src/vnm_mad/remotes/ovswitch_vxlan/vnet_delete"
|
||||
|
||||
NETWORK_VCENTER_FILES="src/vnm_mad/remotes/vcenter/pre \
|
||||
src/vnm_mad/remotes/vcenter/post \
|
||||
src/vnm_mad/remotes/vcenter/clean \
|
||||
src/vnm_mad/remotes/vcenter/update_sg"
|
||||
src/vnm_mad/remotes/vcenter/update_sg \
|
||||
src/vnm_mad/remotes/vcenter/virtual_network_xml.rb \
|
||||
src/vnm_mad/remotes/vcenter/vnet_create \
|
||||
src/vnm_mad/remotes/vcenter/vnet_delete"
|
||||
|
||||
NETWORK_ELASTIC_FILES="src/vnm_mad/remotes/elastic/elastic.rb \
|
||||
src/vnm_mad/remotes/elastic/clean \
|
||||
@ -1792,13 +1806,17 @@ NETWORK_ELASTIC_FILES="src/vnm_mad/remotes/elastic/elastic.rb \
|
||||
src/vnm_mad/remotes/elastic/post \
|
||||
src/vnm_mad/remotes/elastic/remote_post \
|
||||
src/vnm_mad/remotes/elastic/pre \
|
||||
src/vnm_mad/remotes/elastic/update_sg"
|
||||
src/vnm_mad/remotes/elastic/update_sg \
|
||||
src/vnm_mad/remotes/elastic/vnet_create \
|
||||
src/vnm_mad/remotes/elastic/vnet_delete"
|
||||
|
||||
NETWORK_NODEPORT_FILES="src/vnm_mad/remotes/nodeport/nodeport.rb \
|
||||
src/vnm_mad/remotes/nodeport/clean \
|
||||
src/vnm_mad/remotes/nodeport/post \
|
||||
src/vnm_mad/remotes/nodeport/pre \
|
||||
src/vnm_mad/remotes/nodeport/update_sg"
|
||||
src/vnm_mad/remotes/nodeport/update_sg \
|
||||
src/vnm_mad/remotes/nodeport/vnet_create \
|
||||
src/vnm_mad/remotes/nodeport/vnet_delete"
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Virtual Network Manager drivers configuration to be installed under $REMOTES_LOCATION/etc/vnm
|
||||
@ -2309,22 +2327,6 @@ HOOK_AUTOSTART_FILES="share/hooks/autostart/host \
|
||||
HOOK_FT_FILES="share/hooks/ft/host_error.rb \
|
||||
share/hooks/ft/fence_host.sh"
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# HOOK scripts, to be installed under $VAR_LOCATION/remotes/hooks/vcenter
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
HOOK_VCENTER_FILES="share/hooks/vcenter/create_vcenter_net.rb \
|
||||
share/hooks/vcenter/delete_vcenter_net.rb"
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# HOOK templates, to be installed under
|
||||
# $VAR_LOCATION/remotes/hooks/vcenter/templates
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
HOOK_VCENTER_TMPLS="share/hooks/vcenter/templates/create_vcenter_net.tmpl \
|
||||
share/hooks/vcenter/templates/delete_vcenter_net.tmpl \
|
||||
share/hooks/vcenter/templates/instantiate_vcenter_net.tmpl"
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# HOOK RAFT scripts, to be installed under $VAR_LOCATION/remotes/hooks/raft
|
||||
#-------------------------------------------------------------------------------
|
||||
|
@ -44,6 +44,8 @@
|
||||
</xs:element>
|
||||
<xs:element name="BRIDGE" type="xs:string"/>
|
||||
<xs:element name="BRIDGE_TYPE" type="xs:string" minOccurs="0"/>
|
||||
<xs:element name="STATE" type="xs:integer"/>
|
||||
<xs:element name="PREV_STATE" type="xs:integer"/>
|
||||
<xs:element name="PARENT_NETWORK_ID" type="xs:string"/>
|
||||
<xs:element name="VN_MAD" type="xs:string"/>
|
||||
<xs:element name="PHYDEV" type="xs:string"/>
|
||||
|
@ -38,6 +38,8 @@
|
||||
</xs:element>
|
||||
<xs:element name="BRIDGE" type="xs:string"/>
|
||||
<xs:element name="BRIDGE_TYPE" type="xs:string" minOccurs="0"/>
|
||||
<xs:element name="STATE" type="xs:integer"/>
|
||||
<xs:element name="PREV_STATE" type="xs:integer"/>
|
||||
<xs:element name="PARENT_NETWORK_ID" type="xs:string"/>
|
||||
<xs:element name="VN_MAD" type="xs:string"/>
|
||||
<xs:element name="PHYDEV" type="xs:string"/>
|
||||
|
@ -1,572 +0,0 @@
|
||||
#!/usr/bin/env ruby
|
||||
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2022, OpenNebula Project, OpenNebula Systems #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
# a copy of the License at #
|
||||
# #
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 #
|
||||
# #
|
||||
# Unless required by applicable law or agreed to in writing, software #
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, #
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
# Define libraries location
|
||||
ONE_LOCATION = ENV['ONE_LOCATION']
|
||||
|
||||
if !ONE_LOCATION
|
||||
RUBY_LIB_LOCATION = '/usr/lib/one/ruby'
|
||||
GEMS_LOCATION = '/usr/share/one/gems'
|
||||
VMDIR = '/var/lib/one'
|
||||
CONFIG_FILE = '/var/lib/one/config'
|
||||
else
|
||||
RUBY_LIB_LOCATION = ONE_LOCATION + '/lib/ruby'
|
||||
GEMS_LOCATION = ONE_LOCATION + '/share/gems'
|
||||
VMDIR = ONE_LOCATION + '/var'
|
||||
CONFIG_FILE = ONE_LOCATION + '/var/config'
|
||||
end
|
||||
|
||||
# %%RUBYGEMS_SETUP_BEGIN%%
|
||||
if File.directory?(GEMS_LOCATION)
|
||||
real_gems_path = File.realpath(GEMS_LOCATION)
|
||||
if !defined?(Gem) || Gem.path != [real_gems_path]
|
||||
$LOAD_PATH.reject! {|l| l =~ /vendor_ruby/ }
|
||||
|
||||
# Suppress warnings from Rubygems
|
||||
# https://github.com/OpenNebula/one/issues/5379
|
||||
begin
|
||||
verb = $VERBOSE
|
||||
$VERBOSE = nil
|
||||
require 'rubygems'
|
||||
Gem.use_paths(real_gems_path)
|
||||
ensure
|
||||
$VERBOSE = verb
|
||||
end
|
||||
end
|
||||
end
|
||||
# %%RUBYGEMS_SETUP_END%%
|
||||
|
||||
$LOAD_PATH << RUBY_LIB_LOCATION
|
||||
|
||||
# Hook dependencies
|
||||
require 'opennebula'
|
||||
require 'vcenter_driver'
|
||||
require 'base64'
|
||||
require 'nsx_driver'
|
||||
|
||||
# Exceptions
|
||||
class AllocateNetworkError < StandardError; end
|
||||
|
||||
class CreateNetworkError < StandardError; end
|
||||
|
||||
class UpdateNetworkError < StandardError; end
|
||||
|
||||
# FUNCTIONS
|
||||
def update_net(vnet, content)
|
||||
vnet.unlock
|
||||
rc = vnet.update(content, true)
|
||||
vnet.lock(1)
|
||||
return unless OpenNebula.is_error?(rc)
|
||||
|
||||
err_msg = "Could not update the virtual network: #{rc.message}"
|
||||
raise UpdateNetworkError, err_msg
|
||||
end
|
||||
|
||||
# waits for a vlan_id attribute to be generated
|
||||
# only if automatic_vlan activated
|
||||
def wait_vlanid(vnet)
|
||||
retries = 5
|
||||
i = 0
|
||||
while vnet['VLAN_ID'].nil?
|
||||
raise CreateNetworkError, 'cannot get vlan_id' if i >= retries
|
||||
|
||||
sleep 1
|
||||
i += 1
|
||||
vnet.info
|
||||
end
|
||||
end
|
||||
|
||||
# Creates a distributed port group in a datacenter
|
||||
def create_dpg(one_vnet, dc, clusters, vi_client)
|
||||
begin
|
||||
# Get parameters needed to create the network
|
||||
pnics = one_vnet['TEMPLATE/PHYDEV']
|
||||
pg_name = one_vnet['BRIDGE']
|
||||
sw_name = one_vnet['TEMPLATE/VCENTER_SWITCH_NAME']
|
||||
mtu = one_vnet['TEMPLATE/MTU']
|
||||
vlan_id = one_vnet['VLAN_ID'] || 0
|
||||
|
||||
if one_vnet['TEMPLATE/VCENTER_SWITCH_NPORTS']
|
||||
nports = one_vnet['TEMPLATE/VCENTER_SWITCH_NPORTS']
|
||||
else
|
||||
nports = 8
|
||||
end
|
||||
|
||||
dc.lock
|
||||
net_folder = dc.network_folder
|
||||
net_folder.fetch!
|
||||
|
||||
# Get distributed port group if it exists
|
||||
dpg = dc.dpg_exists(pg_name, net_folder)
|
||||
|
||||
# Disallow changes of switch name for existing pg
|
||||
if dpg && dc.pg_changes_sw?(dpg, sw_name)
|
||||
err_msg = "The port group's switch name can not be modified"\
|
||||
" for OpenNebula's virtual network."
|
||||
raise CreateNetworkError, err_msg
|
||||
end
|
||||
|
||||
if !dpg
|
||||
# Get distributed virtual switch if it exists
|
||||
dvs = dc.dvs_exists(sw_name, net_folder)
|
||||
|
||||
if !dvs
|
||||
dvs = dc.create_dvs(sw_name, pnics, mtu)
|
||||
end
|
||||
# Creates distributed port group
|
||||
new_dpg = dc.create_dpg(dvs, pg_name, vlan_id, nports)
|
||||
# Attach dpg to esxi hosts
|
||||
|
||||
clusters.each do |cluster|
|
||||
cluster['host'].each do |host|
|
||||
begin
|
||||
esx_host = VCenterDriver::ESXHost.new_from_ref(
|
||||
host._ref,
|
||||
vi_client
|
||||
)
|
||||
esx_host.lock
|
||||
if dvs
|
||||
pnics_available = nil
|
||||
if pnics && !pnics.empty?
|
||||
pnics_available = esx_host.available_pnics
|
||||
end
|
||||
esx_host.assign_proxy_switch(
|
||||
dvs,
|
||||
sw_name,
|
||||
pnics,
|
||||
pnics_available
|
||||
)
|
||||
end
|
||||
rescue StandardError => e
|
||||
message = 'Error adding distributed port group ' \
|
||||
" #{pg_name} to host #{host._ref}." \
|
||||
" Reason: \"#{e.message}\"."
|
||||
if VCenterDriver::CONFIG[:debug_information]
|
||||
message += "#{message} #{e.backtrace}"
|
||||
end
|
||||
raise message
|
||||
ensure
|
||||
esx_host.unlock if esx_host
|
||||
end
|
||||
end
|
||||
end
|
||||
else
|
||||
err_msg = "Port group #{pg_name} already exists"
|
||||
raise CreateNetworkError, err_msg
|
||||
end
|
||||
new_dpg
|
||||
ensure
|
||||
dc.unlock if dc
|
||||
end
|
||||
end
|
||||
|
||||
# Creates a standard port group in a host
|
||||
def create_pg(one_vnet, esx_host)
|
||||
begin
|
||||
# Get parameters needed to create the network
|
||||
pnics = one_vnet['TEMPLATE/PHYDEV']
|
||||
pg_name = one_vnet['BRIDGE']
|
||||
sw_name = one_vnet['TEMPLATE/VCENTER_SWITCH_NAME']
|
||||
mtu = one_vnet['TEMPLATE/MTU']
|
||||
vlan_id = one_vnet['VLAN_ID'] || 0
|
||||
|
||||
if one_vnet['TEMPLATE/VCENTER_SWITCH_NPORTS']
|
||||
nports = one_vnet['TEMPLATE/VCENTER_SWITCH_NPORTS']
|
||||
else
|
||||
nports = 128
|
||||
end
|
||||
esx_host.lock # Exclusive lock for ESX host operation
|
||||
|
||||
pnics_available = nil
|
||||
pnics_available = esx_host.available_pnics if pnics
|
||||
|
||||
# Get port group if it exists
|
||||
pg = esx_host.pg_exists(pg_name)
|
||||
|
||||
# Disallow changes of switch name for existing pg
|
||||
if pg && esx_host.pg_changes_sw?(pg, sw_name)
|
||||
err_msg = 'The port group already exists in this host '\
|
||||
'for a different vCenter standard switch and '\
|
||||
'this kind of hange is not supported.'
|
||||
raise CreateNetworkError, err_msg
|
||||
end
|
||||
|
||||
# Pg does not exist
|
||||
if !pg
|
||||
# Get standard switch if it exists
|
||||
vs = esx_host.vss_exists(sw_name)
|
||||
|
||||
if !vs
|
||||
sw_name = esx_host.create_vss(sw_name,
|
||||
nports,
|
||||
pnics,
|
||||
mtu,
|
||||
pnics_available)
|
||||
end
|
||||
|
||||
new_pg = esx_host.create_pg(pg_name, sw_name, vlan_id)
|
||||
else
|
||||
err_msg = "Port group #{pg_name} already exists"
|
||||
raise CreateNetworkError, err_msg
|
||||
end
|
||||
new_pg
|
||||
ensure
|
||||
esx_host.unlock if esx_host # Remove lock
|
||||
end
|
||||
end
|
||||
|
||||
def create_opaque_network(one_vnet, host_id)
|
||||
# NSX parameters
|
||||
ls_name = one_vnet['NAME']
|
||||
ls_description = one_vnet['TEMPLATE/DESCRIPTION']
|
||||
tz_id = one_vnet['TEMPLATE/NSX_TZ_ID']
|
||||
replication_mode = one_vnet['TEMPLATE/NSX_REP_MODE']
|
||||
admin_state = one_vnet['TEMPLATE/NSX_ADMIN_STATUS']
|
||||
|
||||
nsx_client = NSXDriver::NSXClient.new_from_id(host_id)
|
||||
|
||||
opaque_network_spec = %(
|
||||
{
|
||||
"transport_zone_id": "#{tz_id}",
|
||||
"replication_mode": "#{replication_mode}",
|
||||
"admin_state": "#{admin_state}",
|
||||
"display_name": "#{ls_name}",
|
||||
"description": "#{ls_description}"
|
||||
}
|
||||
)
|
||||
|
||||
NSXDriver::OpaqueNetwork.new(nsx_client, nil, tz_id, opaque_network_spec)
|
||||
end
|
||||
|
||||
def create_virtual_wire(one_vnet, host_id)
|
||||
# NSX parameters
|
||||
ls_name = one_vnet['NAME']
|
||||
ls_description = one_vnet['TEMPLATE/DESCRIPTION']
|
||||
tz_id = one_vnet['TEMPLATE/NSX_TZ_ID']
|
||||
replication_mode = one_vnet['TEMPLATE/NSX_REP_MODE']
|
||||
|
||||
nsx_client = NSXDriver::NSXClient.new_from_id(host_id)
|
||||
|
||||
virtual_wire_spec =
|
||||
"<virtualWireCreateSpec>\
|
||||
<name>#{ls_name}</name>\
|
||||
<description>#{ls_description}</description>\
|
||||
<tenantId>virtual wire tenant</tenantId>\
|
||||
<controlPlaneMode>#{replication_mode}</controlPlaneMode>\
|
||||
<guestVlanAllowed>false</guestVlanAllowed>\
|
||||
</virtualWireCreateSpec>"
|
||||
|
||||
NSXDriver::VirtualWire.new(nsx_client, nil, tz_id, virtual_wire_spec)
|
||||
end
|
||||
|
||||
def add_vnet_to_cluster(one_vnet, cluster_id)
|
||||
if cluster_id
|
||||
one_cluster = VCenterDriver::VIHelper
|
||||
.one_item(OpenNebula::Cluster, cluster_id, false)
|
||||
if OpenNebula.is_error?(one_cluster)
|
||||
err_msg = "Error retrieving cluster #{cluster_id}: "\
|
||||
"#{rc.message}. You may have to place this vnet "\
|
||||
'in the right cluster by hand'
|
||||
raise CreateNetworkError, err_msg
|
||||
end
|
||||
|
||||
one_vnet.unlock
|
||||
network_id = one_vnet['ID'].to_i
|
||||
rc = one_cluster.addvnet(network_id)
|
||||
if OpenNebula.is_error?(rc)
|
||||
err_msg = "Error adding vnet #{network_id} to OpenNebula "\
|
||||
"cluster #{cluster_id}: #{rc.message}. "\
|
||||
'You may have to place this vnet in the '\
|
||||
'right cluster by hand'
|
||||
raise CreateNetworkError, err_msg
|
||||
end
|
||||
|
||||
default_cluster = VCenterDriver::VIHelper
|
||||
.one_item(OpenNebula::Cluster, '0', false)
|
||||
if OpenNebula.is_error?(default_cluster)
|
||||
err_msg = "Error retrieving default cluster: #{rc.message}."
|
||||
raise CreateNetworkError, err_msg
|
||||
end
|
||||
|
||||
else
|
||||
err_msg = 'Missing cluster ID'
|
||||
raise CreateNetworkError, err_msg
|
||||
end
|
||||
end
|
||||
|
||||
# Constants
|
||||
SUCCESS_XPATH = '//PARAMETER[TYPE="OUT" and POSITION="1"]/VALUE'
|
||||
NETWORK_ID_XPATH = '//PARAMETER[TYPE="OUT" and POSITION="2"]/VALUE'
|
||||
ERROR_XPATH = '//PARAMETER[TYPE="OUT" and POSITION="3"]/VALUE'
|
||||
|
||||
# Changes due to new hook subsystem
|
||||
# https://github.com/OpenNebula/one/issues/3380
|
||||
arguments_raw = Base64.decode64(STDIN.read)
|
||||
arguments_xml = Nokogiri::XML(arguments_raw)
|
||||
network_id = arguments_xml.xpath(NETWORK_ID_XPATH).text.to_i
|
||||
success = arguments_xml.xpath(SUCCESS_XPATH).text != 'false'
|
||||
|
||||
net_info = ''
|
||||
esx_rollback = [] # Track hosts that require a rollback
|
||||
|
||||
begin
|
||||
# Check if the API call (one.vn.allocate) has been successful
|
||||
# and exit otherwise
|
||||
unless success
|
||||
err_msg = arguments_xml.xpath(ERROR_XPATH).text
|
||||
raise AllocateNetworkError, err_msg
|
||||
end
|
||||
|
||||
# Create client to communicate with OpenNebula
|
||||
one_client = OpenNebula::Client.new
|
||||
|
||||
# Get the network XML from OpenNebula
|
||||
# This is potentially different from the Netowrk Template
|
||||
# provided as the API call argument
|
||||
one_vnet = OpenNebula::VirtualNetwork.new_with_id(network_id, one_client)
|
||||
rc = one_vnet.info
|
||||
if OpenNebula.is_error?(rc)
|
||||
err_msg = rc.message
|
||||
raise CreateNetworkError, err_msg
|
||||
end
|
||||
|
||||
managed = one_vnet['TEMPLATE/OPENNEBULA_MANAGED'] != 'NO'
|
||||
imported = one_vnet['TEMPLATE/VCENTER_IMPORTED']
|
||||
|
||||
unless one_vnet['VN_MAD'] == 'vcenter' && managed && imported.nil?
|
||||
msg = 'Network is being imported in OpenNebula, as it is already \
|
||||
present in vCenter. No actions needed in the hook, exiting.'
|
||||
STDOUT.puts msg
|
||||
one_vnet.unlock
|
||||
exit(0)
|
||||
end
|
||||
|
||||
# Step 0. Only execute for vcenter network driver && managed by one
|
||||
one_vnet.lock(1)
|
||||
|
||||
if one_vnet['VLAN_ID_AUTOMATIC'] == '1'
|
||||
wait_vlanid(one_vnet)
|
||||
end
|
||||
|
||||
# Step 1. Extract vnet settings
|
||||
pg_type = one_vnet['TEMPLATE/VCENTER_PORTGROUP_TYPE']
|
||||
unless pg_type
|
||||
err_msg = ' Missing port group type'
|
||||
raise CreateNetworkError, err_msg
|
||||
end
|
||||
|
||||
if pg_type == VCenterDriver::Network::NETWORK_TYPE_DPG
|
||||
hosts_id = one_vnet['TEMPLATE/VCENTER_ONE_HOST_ID']
|
||||
unless hosts_id
|
||||
err_msg = 'Missing VCENTER_ONE_HOST_ID'
|
||||
raise CreateNetworkError, err_msg
|
||||
end
|
||||
|
||||
dc = nil
|
||||
clusters = []
|
||||
clusters_ids = []
|
||||
vi_client = nil
|
||||
vc_uuid = nil
|
||||
|
||||
# Step 2. Get vnet, contact cluster and extract cluster's info
|
||||
hosts_id.split(',').each do |host_id|
|
||||
host_id = host_id.to_i
|
||||
|
||||
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
|
||||
vc_uuid = vi_client.vim.serviceContent.about.instanceUuid
|
||||
one_host = OpenNebula::Host.new_with_id(host_id, one_client)
|
||||
|
||||
rc = one_host.info
|
||||
if OpenNebula.is_error?(rc)
|
||||
err_msg = rc.message
|
||||
raise CreateNetworkError, err_msg
|
||||
end
|
||||
|
||||
cluster_id = one_host['CLUSTER_ID']
|
||||
|
||||
clusters_ids.push(cluster_id)
|
||||
|
||||
ccr_ref = one_host['TEMPLATE/VCENTER_CCR_REF']
|
||||
cluster = VCenterDriver::ClusterComputeResource.new_from_ref(
|
||||
ccr_ref,
|
||||
vi_client
|
||||
)
|
||||
clusters.push(cluster)
|
||||
dc = cluster.datacenter
|
||||
end
|
||||
|
||||
# With DVS we have to work at datacenter level and then for each host
|
||||
vnet_ref = create_dpg(one_vnet, dc, clusters, vi_client)
|
||||
net_info << "VCENTER_NET_REF=\"#{vnet_ref}\"\n"
|
||||
net_info << "VCENTER_INSTANCE_ID=\"#{vc_uuid}\"\n"
|
||||
clusters_ids.each do |cluster_id|
|
||||
add_vnet_to_cluster(one_vnet, cluster_id)
|
||||
end
|
||||
net_info << "VCENTER_NET_STATE=\"READY\"\n"
|
||||
update_net(one_vnet, net_info)
|
||||
else
|
||||
host_id = one_vnet['TEMPLATE/VCENTER_ONE_HOST_ID']
|
||||
unless host_id
|
||||
err_msg = 'Missing VCENTER_ONE_HOST_ID'
|
||||
raise CreateNetworkError, err_msg
|
||||
end
|
||||
|
||||
# Step 2. Contact vCenter cluster and extract cluster's info
|
||||
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
|
||||
vc_uuid = vi_client.vim.serviceContent.about.instanceUuid
|
||||
one_host = OpenNebula::Host.new_with_id(host_id, one_client)
|
||||
|
||||
rc = one_host.info
|
||||
if OpenNebula.is_error?(rc)
|
||||
err_msg = rc.message
|
||||
raise CreateNetworkError, err_msg
|
||||
end
|
||||
|
||||
cluster_id = one_host['CLUSTER_ID']
|
||||
|
||||
vnet_ref = nil
|
||||
ccr_ref = one_host['TEMPLATE/VCENTER_CCR_REF']
|
||||
cluster = VCenterDriver::ClusterComputeResource.new_from_ref(
|
||||
ccr_ref,
|
||||
vi_client
|
||||
)
|
||||
dc = cluster.datacenter
|
||||
|
||||
# Step 3. Create the port groups based on each type
|
||||
if pg_type == VCenterDriver::Network::NETWORK_TYPE_NSXV
|
||||
begin
|
||||
logical_switch = create_virtual_wire(one_vnet, host_id)
|
||||
vnet_ref = logical_switch.ls_vcenter_ref
|
||||
ls_vni = logical_switch.ls_vni
|
||||
ls_name = logical_switch.ls_name
|
||||
net_info << "VCENTER_NET_REF=\"#{vnet_ref}\"\n"
|
||||
net_info << "VCENTER_INSTANCE_ID=\"#{vc_uuid}\"\n"
|
||||
net_info << "NSX_ID=\"#{logical_switch.ls_id}\"\n"
|
||||
net_info << "NSX_VNI=\"#{ls_vni}\"\n"
|
||||
net_info << "BRIDGE=\"#{ls_name}\"\n"
|
||||
add_vnet_to_cluster(one_vnet, cluster_id)
|
||||
net_info << "VCENTER_NET_STATE=\"READY\"\n"
|
||||
update_net(one_vnet, net_info)
|
||||
rescue StandardError => e
|
||||
err_msg = e.message
|
||||
raise CreateNetworkError, err_msg
|
||||
end
|
||||
end
|
||||
|
||||
if pg_type == VCenterDriver::Network::NETWORK_TYPE_NSXT
|
||||
begin
|
||||
logical_switch = create_opaque_network(one_vnet, host_id)
|
||||
vnet_ref = dc.nsx_network(logical_switch.ls_id, pg_type)
|
||||
ls_vni = logical_switch.ls_vni
|
||||
ls_name = logical_switch.ls_name
|
||||
net_info << "VCENTER_NET_REF=\"#{vnet_ref}\"\n"
|
||||
net_info << "VCENTER_INSTANCE_ID=\"#{vc_uuid}\"\n"
|
||||
net_info << "NSX_ID=\"#{logical_switch.ls_id}\"\n"
|
||||
net_info << "NSX_VNI=\"#{ls_vni}\"\n"
|
||||
net_info << "BRIDGE=\"#{ls_name}\"\n"
|
||||
add_vnet_to_cluster(one_vnet, cluster_id)
|
||||
net_info << "VCENTER_NET_STATE=\"READY\"\n"
|
||||
update_net(one_vnet, net_info)
|
||||
rescue StandardError => e
|
||||
err_msg = e.message
|
||||
raise CreateNetworkError, err_msg
|
||||
end
|
||||
end
|
||||
|
||||
if pg_type == VCenterDriver::Network::NETWORK_TYPE_PG
|
||||
# With DVS we have to work at esxi host level
|
||||
cluster['host'].each do |host|
|
||||
esx_host = VCenterDriver::ESXHost.new_from_ref(
|
||||
host._ref,
|
||||
vi_client
|
||||
)
|
||||
esx_rollback << esx_host
|
||||
vnet_ref = create_pg(one_vnet, esx_host)
|
||||
end
|
||||
net_info << "VCENTER_NET_REF=\"#{vnet_ref}\"\n"
|
||||
net_info << "VCENTER_INSTANCE_ID=\"#{vc_uuid}\"\n"
|
||||
add_vnet_to_cluster(one_vnet, cluster_id)
|
||||
net_info << "VCENTER_NET_STATE=\"READY\"\n"
|
||||
update_net(one_vnet, net_info)
|
||||
end
|
||||
end
|
||||
|
||||
one_vnet.unlock
|
||||
|
||||
exit(0)
|
||||
rescue AllocateNetworkError => e
|
||||
# Here there is no one_vnet allocated
|
||||
STDERR.puts e.message
|
||||
STDERR.puts e.backtrace if VCenterDriver::CONFIG[:debug_information]
|
||||
exit(-1)
|
||||
rescue CreateNetworkError => e
|
||||
STDERR.puts e.message
|
||||
STDERR.puts e.backtrace if VCenterDriver::CONFIG[:debug_information]
|
||||
net_info << "VCENTER_NET_STATE=\"ERROR\"\n"
|
||||
net_info << "VCENTER_NET_ERROR=\"#{e.message}\"\n"
|
||||
update_net(one_vnet, net_info)
|
||||
one_vnet.lock(1)
|
||||
exit(-1)
|
||||
rescue UpdateNetworkError => e
|
||||
STDERR.puts e.message
|
||||
STDERR.puts e.backtrace if VCenterDriver::CONFIG[:debug_information]
|
||||
net_info << "VCENTER_NET_STATE=\"ERROR\"\n"
|
||||
net_info << "VCENTER_NET_ERROR=\"#{e.message}\"\n"
|
||||
update_net(one_vnet, net_info)
|
||||
one_vnet.lock(1)
|
||||
exit(-1)
|
||||
rescue StandardError => e
|
||||
STDERR.puts e.message
|
||||
STDERR.puts e.backtrace if VCenterDriver::CONFIG[:debug_information]
|
||||
net_info << "VCENTER_NET_STATE=\"ERROR\"\n"
|
||||
net_info << "VCENTER_NET_ERROR=\"#{e.message}\"\n"
|
||||
update_net(one_vnet, net_info)
|
||||
|
||||
esx_rollback.each do |esx_host|
|
||||
begin
|
||||
esx_host.lock
|
||||
esx_host.network_rollback
|
||||
rescue StandardError => e
|
||||
err_msg = 'here was an issue performing the rollback in '\
|
||||
"host #{esx_host['name']} you may have to perform "\
|
||||
'some actions by hand'
|
||||
STDERR.puts(err_msg)
|
||||
ensure
|
||||
esx_host.unlock
|
||||
end
|
||||
end
|
||||
|
||||
if dc && pg_type == VCenterDriver::Network::NETWORK_TYPE_DPG
|
||||
begin
|
||||
dc.lock
|
||||
dc.network_rollback
|
||||
rescue StandardError => e
|
||||
err_msg = 'There was an issue performing the rollback in '\
|
||||
"datacenter #{dc['name']} you may have to perform "\
|
||||
'some actions by hand'
|
||||
STDERR.puts(err_msg)
|
||||
ensure
|
||||
dc.unlock
|
||||
end
|
||||
end
|
||||
|
||||
one_vnet.lock(1)
|
||||
exit(-1)
|
||||
ensure
|
||||
vi_client.close_connection if vi_client
|
||||
end
|
@ -1,274 +0,0 @@
|
||||
#!/usr/bin/env ruby
|
||||
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2022, OpenNebula Project, OpenNebula Systems #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
# a copy of the License at #
|
||||
# #
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 #
|
||||
# #
|
||||
# Unless required by applicable law or agreed to in writing, software #
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, #
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
ONE_LOCATION = ENV['ONE_LOCATION']
|
||||
|
||||
if !ONE_LOCATION
|
||||
RUBY_LIB_LOCATION = '/usr/lib/one/ruby'
|
||||
GEMS_LOCATION = '/usr/share/one/gems'
|
||||
VMDIR = '/var/lib/one'
|
||||
CONFIG_FILE = '/var/lib/one/config'
|
||||
else
|
||||
RUBY_LIB_LOCATION = ONE_LOCATION + '/lib/ruby'
|
||||
GEMS_LOCATION = ONE_LOCATION + '/share/gems'
|
||||
VMDIR = ONE_LOCATION + '/var'
|
||||
CONFIG_FILE = ONE_LOCATION + '/var/config'
|
||||
end
|
||||
|
||||
# %%RUBYGEMS_SETUP_BEGIN%%
|
||||
if File.directory?(GEMS_LOCATION)
|
||||
real_gems_path = File.realpath(GEMS_LOCATION)
|
||||
if !defined?(Gem) || Gem.path != [real_gems_path]
|
||||
$LOAD_PATH.reject! {|l| l =~ /vendor_ruby/ }
|
||||
|
||||
# Suppress warnings from Rubygems
|
||||
# https://github.com/OpenNebula/one/issues/5379
|
||||
begin
|
||||
verb = $VERBOSE
|
||||
$VERBOSE = nil
|
||||
require 'rubygems'
|
||||
Gem.use_paths(real_gems_path)
|
||||
ensure
|
||||
$VERBOSE = verb
|
||||
end
|
||||
end
|
||||
end
|
||||
# %%RUBYGEMS_SETUP_END%%
|
||||
|
||||
$LOAD_PATH << RUBY_LIB_LOCATION
|
||||
|
||||
require 'opennebula'
|
||||
require 'vcenter_driver'
|
||||
require 'base64'
|
||||
require 'nsx_driver'
|
||||
|
||||
# Exceptions
|
||||
class DeleteNetworkError < StandardError; end
|
||||
|
||||
class DeletePortgroupError < StandardError; end
|
||||
|
||||
# FUNCTIONS
|
||||
def update_net(vnet, content)
|
||||
vnet.unlock
|
||||
rc = vnet.update(content, true)
|
||||
vnet.lock(1)
|
||||
return unless OpenNebula.is_error?(rc)
|
||||
|
||||
err_msg = "Could not update the virtual network: #{rc.message}"
|
||||
raise UpdateNetworkError, err_msg
|
||||
end
|
||||
|
||||
SUCCESS_XPATH = '//PARAMETER[TYPE="OUT" and POSITION="1"]/VALUE'
|
||||
ERROR_XPATH = '//PARAMETER[TYPE="OUT" and POSITION="2"]/VALUE'
|
||||
VNET_XPATH = '//EXTRA/VNET'
|
||||
|
||||
# Changes due to new hooks
|
||||
arguments_raw = Base64.decode64(STDIN.read)
|
||||
arguments_xml = Nokogiri::XML(arguments_raw)
|
||||
success = arguments_xml.xpath(SUCCESS_XPATH).text != 'false'
|
||||
|
||||
unless success
|
||||
err_msg = arguments_xml.xpath(ERROR_XPATH).text
|
||||
raise DeleteNetworkError, err_msg
|
||||
end
|
||||
|
||||
vnet_xml = arguments_xml.xpath(VNET_XPATH).to_s
|
||||
|
||||
template = OpenNebula::XMLElement.new
|
||||
template.initialize_xml(vnet_xml, 'VNET')
|
||||
managed = template['TEMPLATE/OPENNEBULA_MANAGED'] != 'NO'
|
||||
imported = template['TEMPLATE/VCENTER_IMPORTED']
|
||||
error = template['TEMPLATE/VCENTER_NET_STATE'] == 'ERROR'
|
||||
|
||||
begin
|
||||
# Step 0. Only execute for vcenter network driver
|
||||
unless template['VN_MAD'] == 'vcenter' && managed && !error && imported.nil?
|
||||
msg = 'Nothing to do. Network is not a vcenter network or is not ' \
|
||||
'managed or is an imported network'
|
||||
STDOUT.puts msg
|
||||
exit(0)
|
||||
end
|
||||
|
||||
# Step 1. Extract vnet settings
|
||||
network_id = template['ID']
|
||||
pg_name = template['BRIDGE']
|
||||
pg_type = template['TEMPLATE/VCENTER_PORTGROUP_TYPE']
|
||||
sw_name = template['TEMPLATE/VCENTER_SWITCH_NAME']
|
||||
|
||||
one_vnet = nil
|
||||
|
||||
# With DVS we have to work at datacenter level and then for each host
|
||||
if pg_type == VCenterDriver::Network::NETWORK_TYPE_DPG
|
||||
begin
|
||||
hosts_id = template['TEMPLATE/VCENTER_ONE_HOST_ID']
|
||||
raise 'Missing VCENTER_ONE_HOST_ID' unless hosts_id
|
||||
|
||||
# Step 2. Get vnet, contact cluster and extract cluster's info
|
||||
hosts_id.split(',').each do |host_id|
|
||||
host_id = host_id.to_i
|
||||
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
|
||||
one_client = OpenNebula::Client.new
|
||||
one_vnet = OpenNebula::VirtualNetwork.new_with_id(
|
||||
network_id,
|
||||
one_client
|
||||
)
|
||||
one_host = OpenNebula::Host.new_with_id(host_id, one_client)
|
||||
rc = one_host.info
|
||||
raise rc.message if OpenNebula.is_error? rc
|
||||
|
||||
ccr_ref = one_host['TEMPLATE/VCENTER_CCR_REF']
|
||||
cluster = VCenterDriver::ClusterComputeResource.new_from_ref(
|
||||
ccr_ref,
|
||||
vi_client
|
||||
)
|
||||
dc = cluster.datacenter
|
||||
|
||||
dc.lock
|
||||
|
||||
# Explore network folder in search of dpg and dvs
|
||||
net_folder = dc.network_folder
|
||||
net_folder.fetch!
|
||||
|
||||
# Get distributed port group and dvs if they exists
|
||||
dvs = dc.dvs_exists(sw_name, net_folder)
|
||||
dpg = dc.dpg_exists(pg_name, net_folder)
|
||||
dc.remove_dpg(dpg) if dpg
|
||||
|
||||
# Only remove switch if the port group being removed is
|
||||
# the last and only port group in the switch
|
||||
|
||||
if dvs && dvs.item.summary.portgroupName.size == 1 &&
|
||||
dvs.item.summary.portgroupName[0] == "#{sw_name}-uplink-pg"
|
||||
dc.remove_dvs(dvs)
|
||||
end
|
||||
end
|
||||
rescue StandardError => e
|
||||
err_msg = e.message
|
||||
raise DeletePortgroupError, err_msg
|
||||
ensure
|
||||
dc.unlock if dc
|
||||
end
|
||||
else
|
||||
host_id = template['TEMPLATE/VCENTER_ONE_HOST_ID']
|
||||
raise 'Missing VCENTER_ONE_HOST_ID' unless host_id
|
||||
|
||||
# Step 2. Get vnet, contact cluster and extract cluster's info
|
||||
vi_client = VCenterDriver::VIClient.new_from_host(host_id)
|
||||
one_client = OpenNebula::Client.new
|
||||
one_vnet = OpenNebula::VirtualNetwork.new_with_id(
|
||||
network_id,
|
||||
one_client
|
||||
)
|
||||
one_host = OpenNebula::Host.new_with_id(host_id, one_client)
|
||||
rc = one_host.info
|
||||
raise rc.message if OpenNebula.is_error? rc
|
||||
|
||||
ccr_ref = one_host['TEMPLATE/VCENTER_CCR_REF']
|
||||
cluster = VCenterDriver::ClusterComputeResource.new_from_ref(
|
||||
ccr_ref,
|
||||
vi_client
|
||||
)
|
||||
|
||||
# NSX
|
||||
ls_id = template['TEMPLATE/NSX_ID']
|
||||
# NSX
|
||||
|
||||
if pg_type == VCenterDriver::Network::NETWORK_TYPE_PG
|
||||
cluster['host'].each do |host|
|
||||
# Step 3. Loop through hosts in clusters
|
||||
esx_host = VCenterDriver::ESXHost.new_from_ref(
|
||||
host._ref,
|
||||
vi_client
|
||||
)
|
||||
|
||||
begin
|
||||
esx_host.lock # Exclusive lock for ESX host operation
|
||||
|
||||
next unless esx_host.pg_exists(pg_name)
|
||||
|
||||
swname = esx_host.remove_pg(pg_name)
|
||||
next if !swname || sw_name != swname
|
||||
|
||||
vswitch = esx_host.vss_exists(sw_name)
|
||||
next unless vswitch
|
||||
|
||||
# Only remove switch if the port group being removed is
|
||||
# the last and only port group in the switch
|
||||
if vswitch.portgroup.empty?
|
||||
esx_host.remove_vss(sw_name)
|
||||
end
|
||||
rescue StandardError => e
|
||||
err_msg = e.message
|
||||
raise DeletePortgroupError, err_msg
|
||||
ensure
|
||||
esx_host.unlock if esx_host # Remove host lock
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
if pg_type == VCenterDriver::Network::NETWORK_TYPE_NSXV
|
||||
begin
|
||||
nsx_client = NSXDriver::NSXClient.new_from_id(host_id)
|
||||
logical_switch = NSXDriver::VirtualWire.new(
|
||||
nsx_client,
|
||||
ls_id,
|
||||
nil,
|
||||
nil
|
||||
)
|
||||
logical_switch.delete_logical_switch
|
||||
rescue StandardError => e
|
||||
err_msg = e.message
|
||||
raise DeletePortgroupError, err_msg
|
||||
end
|
||||
end
|
||||
|
||||
if pg_type == VCenterDriver::Network::NETWORK_TYPE_NSXT
|
||||
begin
|
||||
nsx_client = NSXDriver::NSXClient.new_from_id(host_id)
|
||||
logical_switch = NSXDriver::OpaqueNetwork.new(
|
||||
nsx_client,
|
||||
ls_id,
|
||||
nil,
|
||||
nil
|
||||
)
|
||||
logical_switch.delete_logical_switch
|
||||
rescue StandardError => e
|
||||
err_msg = e.message
|
||||
raise DeletePortgroupError, err_msg
|
||||
end
|
||||
end
|
||||
end
|
||||
rescue DeleteNetworkError => e
|
||||
STDERR.puts e.message
|
||||
STDERR.puts e.backtrace if VCenterDriver::CONFIG[:debug_information]
|
||||
net_info << "VCENTER_NET_STATE=\"ERROR\"\n"
|
||||
net_info << "VCENTER_NET_ERROR=\"#{e.message}\"\n"
|
||||
update_net(one_vnet, net_info)
|
||||
one_vnet.lock(1)
|
||||
exit(-1)
|
||||
rescue DeletePortgroupError => e
|
||||
STDERR.puts e.message
|
||||
STDERR.puts e.backtrace if VCenterDriver::CONFIG[:debug_information]
|
||||
exit(-1)
|
||||
rescue StandardError => e
|
||||
STDERR.puts e.message
|
||||
STDERR.puts e.backtrace if VCenterDriver::CONFIG[:debug_information]
|
||||
exit(-1)
|
||||
ensure
|
||||
vi_client.close_connection if vi_client
|
||||
end
|
@ -1,6 +0,0 @@
|
||||
NAME = vcenter_net_create
|
||||
TYPE = api
|
||||
COMMAND = vcenter/create_vcenter_net.rb
|
||||
CALL = "one.vn.allocate"
|
||||
ARGUMENTS = "$API"
|
||||
ARGUMENTS_STDIN = yes
|
@ -1,6 +0,0 @@
|
||||
NAME = vcenter_net_delete
|
||||
TYPE = api
|
||||
COMMAND = vcenter/delete_vcenter_net.rb
|
||||
CALL = "one.vn.delete"
|
||||
ARGUMENTS = "$API"
|
||||
ARGUMENTS_STDIN = yes
|
@ -1,6 +0,0 @@
|
||||
NAME = vcenter_net_instantiate
|
||||
TYPE = api
|
||||
COMMAND = vcenter/create_vcenter_net.rb
|
||||
CALL = "one.vntemplate.instantiate"
|
||||
ARGUMENTS = "$API"
|
||||
ARGUMENTS_STDIN = yes
|
@ -345,17 +345,21 @@ module OneGate
|
||||
|
||||
module Service
|
||||
STATE = {
|
||||
'PENDING' => 0,
|
||||
'DEPLOYING' => 1,
|
||||
'RUNNING' => 2,
|
||||
'UNDEPLOYING' => 3,
|
||||
'WARNING' => 4,
|
||||
'DONE' => 5,
|
||||
'FAILED_UNDEPLOYING' => 6,
|
||||
'FAILED_DEPLOYING' => 7,
|
||||
'SCALING' => 8,
|
||||
'FAILED_SCALING' => 9,
|
||||
'COOLDOWN' => 10
|
||||
'PENDING' => 0,
|
||||
'DEPLOYING' => 1,
|
||||
'RUNNING' => 2,
|
||||
'UNDEPLOYING' => 3,
|
||||
'WARNING' => 4,
|
||||
'DONE' => 5,
|
||||
'FAILED_UNDEPLOYING' => 6,
|
||||
'FAILED_DEPLOYING' => 7,
|
||||
'SCALING' => 8,
|
||||
'FAILED_SCALING' => 9,
|
||||
'COOLDOWN' => 10,
|
||||
'DEPLOYING_NETS' => 11,
|
||||
'UNDEPLOYING_NETS' => 12,
|
||||
'FAILED_DEPLOYING_NETS' => 13,
|
||||
'FAILED_UNDEPLOYING_NETS' => 14
|
||||
}
|
||||
|
||||
STATE_STR = [
|
||||
@ -369,7 +373,11 @@ module OneGate
|
||||
'FAILED_DEPLOYING',
|
||||
'SCALING',
|
||||
'FAILED_SCALING',
|
||||
'COOLDOWN'
|
||||
'COOLDOWN',
|
||||
'DEPLOYING_NETS',
|
||||
'UNDEPLOYING_NETS',
|
||||
'FAILED_DEPLOYING_NETS',
|
||||
'FAILED_UNDEPLOYING_NETS'
|
||||
]
|
||||
|
||||
# Returns the string representation of the service state
|
||||
|
@ -188,9 +188,17 @@ module CLIHelper
|
||||
error
|
||||
ERROR
|
||||
FAILED_DEPLOYING
|
||||
FAILED_DEPLOYING_NETS
|
||||
FAILED_UNDEPLOYING
|
||||
FAILED_UNDEPLOYING_NETS
|
||||
FAILED_SCALING]
|
||||
REGULAR_STATES = %w[PENDING DEPLOYING CONFIGURING WARNING]
|
||||
REGULAR_STATES = %w[PENDING
|
||||
DEPLOYING
|
||||
DEPLOYING_NETS
|
||||
UNDEPLOYING
|
||||
UNDEPLOYING_NETS
|
||||
CONFIGURING
|
||||
WARNING]
|
||||
|
||||
# Set state color
|
||||
#
|
||||
|
@ -31,6 +31,12 @@
|
||||
:left: true
|
||||
:expand: true
|
||||
|
||||
:STATE:
|
||||
:desc: State of the Virtual Network
|
||||
:size: 6
|
||||
:left: true
|
||||
:expand: true
|
||||
|
||||
:LEASES:
|
||||
:desc: Number of this Virtual Networks given leases
|
||||
:size: 6
|
||||
@ -42,4 +48,5 @@
|
||||
- :NAME
|
||||
- :CLUSTERS
|
||||
- :BRIDGE
|
||||
- :STATE
|
||||
- :LEASES
|
||||
|
@ -138,6 +138,12 @@ class OneVNetHelper < OpenNebulaHelper::OneHelper
|
||||
'onevnet.yaml'
|
||||
end
|
||||
|
||||
def self.state_to_str(id)
|
||||
id = id.to_i
|
||||
state_str = VirtualNetwork::VN_STATES[id]
|
||||
VirtualNetwork::SHORT_VN_STATES[state_str]
|
||||
end
|
||||
|
||||
def format_pool(options)
|
||||
config_file = self.class.table_conf
|
||||
|
||||
@ -170,12 +176,18 @@ class OneVNetHelper < OpenNebulaHelper::OneHelper
|
||||
d['BRIDGE']
|
||||
end
|
||||
|
||||
column :STATE, 'State of the Virtual Network', :left,
|
||||
:size=>6 do |d|
|
||||
OneVNetHelper.state_to_str(d['STATE'])
|
||||
end
|
||||
|
||||
column :LEASES, "Number of this Virtual Network's given leases",
|
||||
:size=>6 do |d|
|
||||
d['USED_LEASES']
|
||||
end
|
||||
|
||||
default :ID, :USER, :GROUP, :NAME, :CLUSTERS, :BRIDGE, :LEASES
|
||||
default :ID, :USER, :GROUP, :NAME, :CLUSTERS, :BRIDGE, :STATE,
|
||||
:LEASES
|
||||
end
|
||||
end
|
||||
|
||||
@ -315,6 +327,7 @@ class OneVNetHelper < OpenNebulaHelper::OneHelper
|
||||
vn.retrieve_elements('CLUSTERS/ID')
|
||||
))
|
||||
puts format(str, 'BRIDGE', vn['BRIDGE'])
|
||||
puts format(str, 'STATE', vn.state_str)
|
||||
puts format(str, 'VN_MAD', vn['VN_MAD']) unless vn['VN_MAD'].empty?
|
||||
puts format(str, 'PHYSICAL DEVICE',
|
||||
vn['PHYDEV']) unless vn['PHYDEV'].empty?
|
||||
|
@ -94,6 +94,26 @@ CommandParser::CmdParser.new(ARGV) do
|
||||
'bypass the consistency checks'
|
||||
}
|
||||
|
||||
SUCCESS = {
|
||||
:name => 'success',
|
||||
:large => '--success',
|
||||
:description => 'Recover a Virtual Network by succeeding ' \
|
||||
'the pending action'
|
||||
}
|
||||
|
||||
FAILURE = {
|
||||
:name => 'failure',
|
||||
:large => '--failure',
|
||||
:description => 'Recover a Virtual Network by failing ' \
|
||||
'the pending action'
|
||||
}
|
||||
|
||||
DELETE = {
|
||||
:name => 'delete',
|
||||
:large => '--delete',
|
||||
:description => 'No recover action possible, delete the Virtual Network'
|
||||
}
|
||||
|
||||
########################################################################
|
||||
# Global Options
|
||||
########################################################################
|
||||
@ -428,4 +448,41 @@ CommandParser::CmdParser.new(ARGV) do
|
||||
|
||||
return 0
|
||||
end
|
||||
|
||||
recover_desc = <<-EOT.unindent
|
||||
Recovers a Virtual Network in ERROR state or waiting for a driver operation
|
||||
to complete.
|
||||
The recovery may be done by failing, succeeding or retrying the current operation.
|
||||
YOU NEED TO MANUALLY CHECK THE VN STATUS, to decide if the
|
||||
operation was successful or not, or if it can be retried.
|
||||
|
||||
States for success/failure recovers: LOCK_CREATE, LOCK_DELETE state.
|
||||
States for a retry recover: LOCK_CREATE, LOCK_DELETE state
|
||||
States for delete: Any but READY
|
||||
EOT
|
||||
|
||||
command :recover, recover_desc, [:range, :vmid_list],
|
||||
:options => [SUCCESS, FAILURE, DELETE] do
|
||||
if !options[:success].nil?
|
||||
result = 1
|
||||
elsif !options[:failure].nil?
|
||||
result = 0
|
||||
elsif !options[:delete].nil?
|
||||
result = 2
|
||||
else
|
||||
error_message = <<-EOT.unindent
|
||||
Need to specify the result of the pending action.
|
||||
\t--success recover the VN by succeeding the missing action.
|
||||
\t--failure recover the VN by failing the missing action.
|
||||
\t--delete no recover possible, delete the VN.
|
||||
EOT
|
||||
|
||||
STDERR.puts error_message
|
||||
exit(-1)
|
||||
end
|
||||
|
||||
helper.perform_actions(args[0], options, 'recovering') do |vn|
|
||||
vn.recover(result)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
@ -26,15 +26,17 @@ class EventManager
|
||||
|
||||
LOG_COMP = 'EM'
|
||||
|
||||
ACTIONS = {
|
||||
'WAIT_DEPLOY' => :wait_deploy,
|
||||
'WAIT_UNDEPLOY' => :wait_undeploy,
|
||||
'WAIT_SCALEUP' => :wait_scaleup,
|
||||
'WAIT_SCALEDOWN' => :wait_scaledown,
|
||||
'WAIT_ADD' => :wait_add,
|
||||
'WAIT_REMOVE' => :wait_remove,
|
||||
'WAIT_COOLDOWN' => :wait_cooldown
|
||||
}
|
||||
ACTIONS = [
|
||||
:wait_deploy_action,
|
||||
:wait_undeploy_action,
|
||||
:wait_scaleup_action,
|
||||
:wait_scaledown_action,
|
||||
:wait_add_action,
|
||||
:wait_remove_action,
|
||||
:wait_cooldown_action,
|
||||
:wait_deploy_nets_action,
|
||||
:wait_undeploy_nets_action
|
||||
]
|
||||
|
||||
FAILURE_STATES = %w[
|
||||
BOOT_FAILURE
|
||||
@ -80,20 +82,9 @@ class EventManager
|
||||
@subscriber_endpoint = @cloud_auth.conf[:subscriber_endpoint]
|
||||
|
||||
# Register Action Manager actions
|
||||
@am.register_action(ACTIONS['WAIT_DEPLOY'],
|
||||
method('wait_deploy_action'))
|
||||
@am.register_action(ACTIONS['WAIT_UNDEPLOY'],
|
||||
method('wait_undeploy_action'))
|
||||
@am.register_action(ACTIONS['WAIT_COOLDOWN'],
|
||||
method('wait_cooldown'))
|
||||
@am.register_action(ACTIONS['WAIT_SCALEUP'],
|
||||
method('wait_scaleup_action'))
|
||||
@am.register_action(ACTIONS['WAIT_ADD'],
|
||||
method('wait_add_action'))
|
||||
@am.register_action(ACTIONS['WAIT_REMOVE'],
|
||||
method('wait_remove_action'))
|
||||
@am.register_action(ACTIONS['WAIT_SCALEDOWN'],
|
||||
method('wait_scaledown_action'))
|
||||
ACTIONS.each do |m|
|
||||
@am.register_action(m, method(m.to_s))
|
||||
end
|
||||
|
||||
Thread.new { @am.start_listener }
|
||||
end
|
||||
@ -132,6 +123,42 @@ class EventManager
|
||||
end
|
||||
end
|
||||
|
||||
# Wait for networks to e ready
|
||||
#
|
||||
# @param client [OpenNebula::Client] Client to perform requests
|
||||
# @param service_id [Integer] Service ID
|
||||
# @param networks [Array] Network IDs to wait until ready
|
||||
def wait_deploy_nets_action(client, service_id, networks)
|
||||
Log.info LOG_COMP, "Waiting networks #{networks} to be (READY)"
|
||||
rc = wait_nets(networks, 'READY')
|
||||
|
||||
if rc[0]
|
||||
action = :deploy_nets_cb
|
||||
else
|
||||
action = :deploy_nets_failure_cb
|
||||
end
|
||||
|
||||
@lcm.trigger_action(action, service_id, client, service_id)
|
||||
end
|
||||
|
||||
# Wait for networks to e ready
|
||||
#
|
||||
# @param client [OpenNebula::Client] Client to perform requests
|
||||
# @param service_id [Integer] Service ID
|
||||
# @param networks [Array] Network IDs to wait until ready
|
||||
def wait_undeploy_nets_action(client, service_id, networks)
|
||||
Log.info LOG_COMP, "Waiting networks #{networks} to be (DONE)"
|
||||
rc = wait_nets(networks, 'DONE')
|
||||
|
||||
if rc[0]
|
||||
action = :undeploy_nets_cb
|
||||
else
|
||||
action = :undeploy_nets_failure_cb
|
||||
end
|
||||
|
||||
@lcm.trigger_action(action, service_id, client, service_id)
|
||||
end
|
||||
|
||||
# Wait for nodes to be in DONE
|
||||
# @param [service_id] the service id
|
||||
# @param [role_name] the role name of the role which contains the VMs
|
||||
@ -264,7 +291,7 @@ class EventManager
|
||||
# @param [service_id] the service id
|
||||
# @param [role_name] the role name of the role which contains the VMs
|
||||
# @param [nodes] the list of nodes (VMs) to wait for
|
||||
def wait_cooldown(client, service_id, role_name, cooldown_time)
|
||||
def wait_cooldown_action(client, service_id, role_name, cooldown_time)
|
||||
Log.info LOG_COMP, "Waiting #{cooldown_time}s for cooldown for " \
|
||||
"service #{service_id} and role #{role_name}."
|
||||
|
||||
@ -309,10 +336,10 @@ class EventManager
|
||||
end
|
||||
|
||||
nodes.each do |node|
|
||||
subscribe(node, state, lcm_state, subscriber)
|
||||
subscribe(subscriber, 'VM', node, state, lcm_state)
|
||||
|
||||
(SUBSCRIBE_STATES + ['DONE']).each do |s|
|
||||
subscribe(node, s, 'LCM_INIT', subscriber)
|
||||
subscribe(subscriber, 'VM', node, s, 'LCM_INIT')
|
||||
end
|
||||
end
|
||||
|
||||
@ -339,10 +366,10 @@ class EventManager
|
||||
next if !nodes.empty? && rc_nodes[:failure].empty?
|
||||
|
||||
nodes.each do |id|
|
||||
unsubscribe(id, state, lcm_state, subscriber)
|
||||
unsubscribe(subscriber, 'VM', id, state, lcm_state)
|
||||
|
||||
(SUBSCRIBE_STATES + ['DONE']).each do |s|
|
||||
unsubscribe(id, s, 'LCM_INIT', subscriber)
|
||||
unsubscribe(subscriber, 'VM', id, s, 'LCM_INIT')
|
||||
end
|
||||
end
|
||||
|
||||
@ -364,6 +391,98 @@ class EventManager
|
||||
[true, rc_nodes]
|
||||
end
|
||||
|
||||
def wait_nets(networks, state)
|
||||
subscriber = gen_subscriber
|
||||
|
||||
rc_nets = { :successful => {}, :failure => {} }
|
||||
rc = check_nets(networks, state, subscriber)
|
||||
|
||||
# rc_nets[:successful] has the following structure
|
||||
#
|
||||
# vnet_id => boolean
|
||||
#
|
||||
# = true means the VNET was deleted by external user
|
||||
# = false means the VNET state is in SUBSCRIBE_STATES
|
||||
rc_nets[:successful].merge!(rc[:successful])
|
||||
rc_nets[:failure].merge!(rc[:failure])
|
||||
|
||||
if networks.empty? && rc_nets[:failure].empty?
|
||||
subscriber.close
|
||||
|
||||
return [true, rc_nets]
|
||||
end
|
||||
|
||||
networks.each do |network|
|
||||
[state, 'DONE', 'ERROR'].each do |s|
|
||||
subscribe(subscriber, 'NET', network, s)
|
||||
end
|
||||
end
|
||||
|
||||
key = ''
|
||||
content = ''
|
||||
|
||||
until networks.empty?
|
||||
rc = subscriber.recv_string(key)
|
||||
rc = subscriber.recv_string(content) if rc != -1
|
||||
|
||||
# rubocop:disable Style/GuardClause
|
||||
if rc == -1 && ZMQ::Util.errno != ZMQ::EAGAIN
|
||||
# rubocop:enable Style/GuardClause
|
||||
next Log.error LOG_COMP, 'Error reading from subscriber.'
|
||||
elsif rc == -1
|
||||
Log.info LOG_COMP, "Timeout reached for VNET #{networks} =>"\
|
||||
" (#{state})"
|
||||
|
||||
rc = check_nets(networks, state, subscriber)
|
||||
|
||||
rc_nets[:successful].merge!(rc[:successful])
|
||||
rc_nets[:failure].merge!(rc[:failure])
|
||||
|
||||
next if !networks.empty? && rc_nets[:failure].empty?
|
||||
|
||||
networks.each do |network|
|
||||
[state, 'DONE', 'ERROR'].each do |s|
|
||||
unsubscribe(subscriber, 'NET', network, s)
|
||||
end
|
||||
end
|
||||
|
||||
# If any node is in error wait action will fails
|
||||
return [false, rc_nets] unless rc_nets[:failure].empty?
|
||||
|
||||
return [true, rc_nets] # (networks.empty? && fail_nets.empty?)
|
||||
end
|
||||
|
||||
# Read information from hook message
|
||||
id = retrieve_id(key)
|
||||
xml = Nokogiri::XML(Base64.decode64(content))
|
||||
h_state = xml.xpath('//HOOK_MESSAGE/STATE').text
|
||||
|
||||
Log.info LOG_COMP, "VNET #{id} reached (#{h_state})"
|
||||
|
||||
case h_state
|
||||
when 'DONE'
|
||||
rc_nets[:successful][id] = true
|
||||
when 'ERROR'
|
||||
Log.error LOG_COMP, "VNET #{id} is in ERROR state"
|
||||
rc_nets[:failure][id] = false
|
||||
when h_state == state
|
||||
rc_nets[:successful][id] = false
|
||||
end
|
||||
|
||||
[state, 'DONE', 'ERROR'].each do |s|
|
||||
unsubscribe(subscriber, 'NET', id, s)
|
||||
end
|
||||
|
||||
networks.delete(id)
|
||||
end
|
||||
|
||||
subscriber.close
|
||||
|
||||
return [false, rc_nets] unless rc_nets[:failure].empty?
|
||||
|
||||
[true, rc_nets]
|
||||
end
|
||||
|
||||
def wait_report_ready(nodes)
|
||||
rc_nodes = { :successful => {}, :failure => {} }
|
||||
rc = check_nodes_report(nodes)
|
||||
@ -385,7 +504,7 @@ class EventManager
|
||||
|
||||
nodes.each do |node|
|
||||
(SUBSCRIBE_STATES + ['DONE']).each do |s|
|
||||
subscribe(node, s, 'LCM_INIT', subscriber)
|
||||
subscribe(subscriber, 'VM', node, s, 'LCM_INIT')
|
||||
end
|
||||
end
|
||||
|
||||
@ -415,7 +534,7 @@ class EventManager
|
||||
|
||||
nodes.each do |node|
|
||||
(SUBSCRIBE_STATES + ['DONE']).each do |s|
|
||||
unsubscribe(node, s, 'LCM_INIT', subscriber)
|
||||
unsubscribe(subscriber, 'VM', node, s, 'LCM_INIT')
|
||||
end
|
||||
end
|
||||
|
||||
@ -439,7 +558,7 @@ class EventManager
|
||||
Log.info LOG_COMP, "Node #{rc[0]} reported ready"
|
||||
|
||||
(SUBSCRIBE_STATES + ['DONE']).each do |s|
|
||||
unsubscribe(rc[0], s, 'LCM_INIT', subscriber)
|
||||
unsubscribe(subscriber, 'VM', rc[0], s, 'LCM_INIT')
|
||||
end
|
||||
|
||||
nodes.delete(rc[0])
|
||||
@ -501,10 +620,10 @@ class EventManager
|
||||
|
||||
rc_nodes[:failure][id] = false
|
||||
else
|
||||
unsubscribe(id, state, lcm_state, subscriber)
|
||||
unsubscribe(subscriber, 'VM', id, state, lcm_state)
|
||||
|
||||
(SUBSCRIBE_STATES + ['DONE']).each do |s|
|
||||
unsubscribe(id, s, 'LCM_INIT', subscriber)
|
||||
unsubscribe(subscriber, 'VM', id, s, 'LCM_INIT')
|
||||
end
|
||||
|
||||
nodes.delete(id)
|
||||
@ -531,7 +650,7 @@ class EventManager
|
||||
|
||||
if vm_state == 'DONE' ||
|
||||
(vm_state == state && vm_lcm_state == lcm_state)
|
||||
unsubscribe(node, state, lcm_state, subscriber)
|
||||
unsubscribe(subscriber, 'VM', node, state, lcm_state)
|
||||
|
||||
rc_nodes[:successful][node] = true
|
||||
next true
|
||||
@ -554,6 +673,54 @@ class EventManager
|
||||
rc_nodes
|
||||
end
|
||||
|
||||
def check_nets(networks, state, subscriber)
|
||||
rc = { :successful => {}, :failure => {} }
|
||||
|
||||
networks.delete_if do |id|
|
||||
vnet = OpenNebula::VirtualNetwork.new_with_id(
|
||||
id,
|
||||
@cloud_auth.client
|
||||
)
|
||||
|
||||
if OpenNebula.is_error?(vnet.info)
|
||||
Log.info LOG_COMP, "VNET #{id} reached (#{state})"
|
||||
|
||||
[state, 'ERROR', 'DONE'].each do |s|
|
||||
unsubscribe(subscriber, 'NET', id, s)
|
||||
end
|
||||
|
||||
rc[:successful][id] = true
|
||||
next true
|
||||
end
|
||||
|
||||
vnet_state = OpenNebula::VirtualNetwork::VN_STATES[vnet.state]
|
||||
|
||||
if vnet_state == state
|
||||
Log.info LOG_COMP, "VNET #{id} reached (#{vnet_state})"
|
||||
|
||||
[state, 'ERROR', 'DONE'].each do |s|
|
||||
unsubscribe(subscriber, 'NET', id, s)
|
||||
end
|
||||
|
||||
rc[:successful][id] = true
|
||||
next true
|
||||
elsif vnet_state == 'ERROR'
|
||||
Log.error LOG_COMP, "VNET #{id} is in FAILURE state"
|
||||
|
||||
[state, 'ERROR', 'DONE'].each do |s|
|
||||
unsubscribe(subscriber, 'NET', id, s)
|
||||
end
|
||||
|
||||
rc[:failure][id] = false
|
||||
next true
|
||||
end
|
||||
|
||||
false
|
||||
end
|
||||
|
||||
rc
|
||||
end
|
||||
|
||||
def check_nodes_report(nodes)
|
||||
rc_nodes = { :successful => {}, :failure => {} }
|
||||
|
||||
@ -598,30 +765,34 @@ class EventManager
|
||||
end
|
||||
|
||||
############################################################################
|
||||
# Functionns to subscribe/unsuscribe to event changes on VM
|
||||
# Functionns to subscribe/unsuscribe to event changes on VM/VNET
|
||||
############################################################################
|
||||
|
||||
def gen_subscriber
|
||||
subscriber = @context.socket(ZMQ::SUB)
|
||||
# Set timeout (TODO add option for customize timeout)
|
||||
|
||||
subscriber.setsockopt(ZMQ::RCVTIMEO, @wait_timeout * 10**3)
|
||||
subscriber.connect(@subscriber_endpoint)
|
||||
|
||||
subscriber
|
||||
end
|
||||
|
||||
def subscribe(vm_id, state, lcm_state, subscriber)
|
||||
subscriber.setsockopt(ZMQ::SUBSCRIBE,
|
||||
gen_filter(vm_id, state, lcm_state))
|
||||
def subscribe(subscriber, object, id, state, lcm_state = nil)
|
||||
subscriber.setsockopt(
|
||||
ZMQ::SUBSCRIBE,
|
||||
gen_filter(object, id, state, lcm_state)
|
||||
)
|
||||
end
|
||||
|
||||
def unsubscribe(vm_id, state, lcm_state, subscriber)
|
||||
subscriber.setsockopt(ZMQ::UNSUBSCRIBE,
|
||||
gen_filter(vm_id, state, lcm_state))
|
||||
def unsubscribe(subscriber, object, id, state, lcm_state = nil)
|
||||
subscriber.setsockopt(
|
||||
ZMQ::UNSUBSCRIBE,
|
||||
gen_filter(object, id, state, lcm_state)
|
||||
)
|
||||
end
|
||||
|
||||
def gen_filter(vm_id, state, lcm_state)
|
||||
"EVENT STATE VM/#{state}/#{lcm_state}/#{vm_id}"
|
||||
def gen_filter(object, id, state, lcm_state)
|
||||
"EVENT STATE #{object}/#{state}/#{lcm_state}/#{id}"
|
||||
end
|
||||
|
||||
end
|
||||
|
@ -27,28 +27,33 @@ class ServiceLCM
|
||||
attr_reader :am
|
||||
|
||||
LOG_COMP = 'LCM'
|
||||
|
||||
ACTIONS = {
|
||||
ACTIONS = [
|
||||
# Callbacks
|
||||
'DEPLOY_CB' => :deploy_cb,
|
||||
'DEPLOY_FAILURE_CB' => :deploy_failure_cb,
|
||||
'UNDEPLOY_CB' => :undeploy_cb,
|
||||
'UNDEPLOY_FAILURE_CB' => :undeploy_failure_cb,
|
||||
'COOLDOWN_CB' => :cooldown_cb,
|
||||
'SCALEUP_CB' => :scaleup_cb,
|
||||
'SCALEUP_FAILURE_CB' => :scaleup_failure_cb,
|
||||
'SCALEDOWN_CB' => :scaledown_cb,
|
||||
'SCALEDOWN_FAILURE_CB' => :scaledown_failure_cb,
|
||||
'ADD_CB' => :add_cb,
|
||||
'ADD_FAILURE_CB' => :add_failure_cb,
|
||||
'REMOVE_CB' => :remove_cb,
|
||||
'REMOVE_FAILURE_CB' => :remove_failure_cb,
|
||||
:deploy_cb,
|
||||
:deploy_failure_cb,
|
||||
:undeploy_cb,
|
||||
:undeploy_failure_cb,
|
||||
:cooldown_cb,
|
||||
:scaleup_cb,
|
||||
:scaleup_failure_cb,
|
||||
:scaledown_cb,
|
||||
:scaledown_failure_cb,
|
||||
:add_cb,
|
||||
:add_failure_cb,
|
||||
:remove_cb,
|
||||
:remove_failure_cb,
|
||||
|
||||
# Network callbacks
|
||||
:deploy_nets_cb,
|
||||
:deploy_nets_failure_cb,
|
||||
:undeploy_nets_cb,
|
||||
:undeploy_nets_failure_cb,
|
||||
|
||||
# WD callbacks
|
||||
'ERROR_WD_CB' => :error_wd_cb,
|
||||
'DONE_WD_CB' => :done_wd_cb,
|
||||
'RUNNING_WD_CB' => :running_wd_cb
|
||||
}
|
||||
:error_wd_cb,
|
||||
:done_wd_cb,
|
||||
:running_wd_cb
|
||||
]
|
||||
|
||||
def initialize(client, concurrency, cloud_auth)
|
||||
@cloud_auth = cloud_auth
|
||||
@ -61,42 +66,12 @@ class ServiceLCM
|
||||
}
|
||||
|
||||
@event_manager = EventManager.new(em_conf).am
|
||||
|
||||
@wd = ServiceWD.new(em_conf)
|
||||
@wd = ServiceWD.new(em_conf)
|
||||
|
||||
# Register Action Manager actions
|
||||
@am.register_action(ACTIONS['DEPLOY_CB'],
|
||||
method('deploy_cb'))
|
||||
@am.register_action(ACTIONS['DEPLOY_FAILURE_CB'],
|
||||
method('deploy_failure_cb'))
|
||||
@am.register_action(ACTIONS['UNDEPLOY_CB'],
|
||||
method('undeploy_cb'))
|
||||
@am.register_action(ACTIONS['UNDEPLOY_FAILURE_CB'],
|
||||
method('undeploy_failure_cb'))
|
||||
@am.register_action(ACTIONS['SCALEUP_CB'],
|
||||
method('scaleup_cb'))
|
||||
@am.register_action(ACTIONS['SCALEUP_FAILURE_CB'],
|
||||
method('scaleup_failure_cb'))
|
||||
@am.register_action(ACTIONS['SCALEDOWN_CB'],
|
||||
method('scaledown_cb'))
|
||||
@am.register_action(ACTIONS['SCALEDOWN_FAILURE_CB'],
|
||||
method('scaledown_failure_cb'))
|
||||
@am.register_action(ACTIONS['COOLDOWN_CB'],
|
||||
method('cooldown_cb'))
|
||||
@am.register_action(ACTIONS['ADD_CB'],
|
||||
method('add_cb'))
|
||||
@am.register_action(ACTIONS['add_FAILURE_CB'],
|
||||
method('add_failure_cb'))
|
||||
@am.register_action(ACTIONS['REMOVE_CB'],
|
||||
method('remove_cb'))
|
||||
@am.register_action(ACTIONS['REMOVE_FAILURE_CB'],
|
||||
method('remove_failure_cb'))
|
||||
@am.register_action(ACTIONS['ERROR_WD_CB'],
|
||||
method('error_wd_cb'))
|
||||
@am.register_action(ACTIONS['DONE_WD_CB'],
|
||||
method('done_wd_cb'))
|
||||
@am.register_action(ACTIONS['RUNNING_WD_CB'],
|
||||
method('running_wd_cb'))
|
||||
ACTIONS.each do |m|
|
||||
@am.register_action(m, method(m.to_s))
|
||||
end
|
||||
|
||||
Thread.new { @am.start_listener }
|
||||
|
||||
@ -105,10 +80,11 @@ class ServiceLCM
|
||||
Thread.new { @wd.start(@srv_pool) }
|
||||
|
||||
Thread.new do
|
||||
auto_scaler = ServiceAutoScaler.new(@srv_pool,
|
||||
@cloud_auth,
|
||||
self)
|
||||
auto_scaler.start
|
||||
ServiceAutoScaler.new(
|
||||
@srv_pool,
|
||||
@cloud_auth,
|
||||
self
|
||||
).start
|
||||
end
|
||||
end
|
||||
|
||||
@ -223,13 +199,17 @@ class ServiceLCM
|
||||
rc
|
||||
end
|
||||
|
||||
# Create new service
|
||||
############################################################################
|
||||
# Life cycle manager actions
|
||||
############################################################################
|
||||
|
||||
# Deploy service networks
|
||||
#
|
||||
# @param client [OpenNebula::Client] Client executing action
|
||||
# @param service_id [Integer] Service ID
|
||||
#
|
||||
# @return [OpenNebula::Error] Error if any
|
||||
def deploy_action(client, service_id)
|
||||
def deploy_nets_action(client, service_id)
|
||||
rc = @srv_pool.get(service_id, client) do |service|
|
||||
# Create vnets only first time action is called
|
||||
if service.state == Service::STATE['PENDING']
|
||||
@ -243,6 +223,58 @@ class ServiceLCM
|
||||
end
|
||||
end
|
||||
|
||||
service.set_state(Service::STATE['DEPLOYING_NETS'])
|
||||
|
||||
@event_manager.trigger_action(
|
||||
:wait_deploy_nets_action,
|
||||
service.id,
|
||||
client,
|
||||
service.id,
|
||||
service.networks(true)
|
||||
)
|
||||
|
||||
service.update
|
||||
end
|
||||
|
||||
Log.error LOG_COMP, rc.message if OpenNebula.is_error?(rc)
|
||||
|
||||
rc
|
||||
end
|
||||
|
||||
# Undeploy service networks
|
||||
#
|
||||
# @param client [OpenNebula::Client] Client executing action
|
||||
# @param service_id [Integer] Service ID
|
||||
#
|
||||
# @return [OpenNebula::Error] Error if any
|
||||
def undeploy_nets_action(client, service_id)
|
||||
rc = @srv_pool.get(service_id, client) do |service|
|
||||
service.set_state(Service::STATE['UNDEPLOYING_NETS'])
|
||||
|
||||
@event_manager.trigger_action(
|
||||
:wait_undeploy_nets_action,
|
||||
service.id,
|
||||
client,
|
||||
service.id,
|
||||
service.networks(false)
|
||||
)
|
||||
|
||||
service.update
|
||||
end
|
||||
|
||||
Log.error LOG_COMP, rc.message if OpenNebula.is_error?(rc)
|
||||
|
||||
rc
|
||||
end
|
||||
|
||||
# Create new service
|
||||
#
|
||||
# @param client [OpenNebula::Client] Client executing action
|
||||
# @param service_id [Integer] Service ID
|
||||
#
|
||||
# @return [OpenNebula::Error] Error if any
|
||||
def deploy_action(client, service_id)
|
||||
rc = @srv_pool.get(service_id, client) do |service|
|
||||
set_deploy_strategy(service)
|
||||
|
||||
roles = service.roles_deploy
|
||||
@ -264,7 +296,7 @@ class ServiceLCM
|
||||
roles,
|
||||
'DEPLOYING',
|
||||
'FAILED_DEPLOYING',
|
||||
:wait_deploy,
|
||||
:wait_deploy_action,
|
||||
service.report_ready?)
|
||||
|
||||
if !OpenNebula.is_error?(rc)
|
||||
@ -316,7 +348,7 @@ class ServiceLCM
|
||||
roles,
|
||||
'UNDEPLOYING',
|
||||
'FAILED_UNDEPLOYING',
|
||||
:wait_undeploy)
|
||||
:wait_undeploy_action)
|
||||
|
||||
if !OpenNebula.is_error?(rc)
|
||||
service.set_state(Service::STATE['UNDEPLOYING'])
|
||||
@ -383,7 +415,7 @@ class ServiceLCM
|
||||
{ role_name => role },
|
||||
'SCALING',
|
||||
'FAILED_SCALING',
|
||||
:wait_scaleup,
|
||||
:wait_scaleup_action,
|
||||
service.report_ready?)
|
||||
elsif cardinality_diff < 0
|
||||
role.scale_way('DOWN')
|
||||
@ -392,7 +424,7 @@ class ServiceLCM
|
||||
{ role_name => role },
|
||||
'SCALING',
|
||||
'FAILED_SCALING',
|
||||
:wait_scaledown)
|
||||
:wait_scaledown_action)
|
||||
else
|
||||
break OpenNebula::Error.new(
|
||||
"Cardinality of #{role_name} is already at #{cardinality}"
|
||||
@ -438,6 +470,10 @@ class ServiceLCM
|
||||
|
||||
service.replace_client(client)
|
||||
recover_scale(client, service)
|
||||
elsif service.can_recover_deploy_nets?
|
||||
recover_nets(:wait_deploy_nets_action, client, service)
|
||||
elsif service.can_recover_undeploy_nets?
|
||||
recover_nets(:wait_undeploy_nets_action, client, service)
|
||||
elsif Service::STATE['COOLDOWN'] == service.state
|
||||
service.set_state(Service::STATE['RUNNING'])
|
||||
|
||||
@ -623,7 +659,7 @@ class ServiceLCM
|
||||
service.roles_deploy,
|
||||
'DEPLOYING',
|
||||
'FAILED_DEPLOYING',
|
||||
:wait_deploy,
|
||||
:wait_deploy_action,
|
||||
service.report_ready?)
|
||||
end
|
||||
|
||||
@ -657,7 +693,25 @@ class ServiceLCM
|
||||
Log.error LOG_COMP, rc.message if OpenNebula.is_error?(rc)
|
||||
end
|
||||
|
||||
def deploy_nets_cb(client, service_id)
|
||||
deploy_action(client, service_id)
|
||||
end
|
||||
|
||||
def deploy_nets_failure_cb(client, service_id)
|
||||
rc = @srv_pool.get(service_id, client) do |service|
|
||||
# stop actions for the service if deploy fails
|
||||
@event_manager.cancel_action(service_id)
|
||||
|
||||
service.set_state(Service::STATE['FAILED_DEPLOYING_NETS'])
|
||||
service.update
|
||||
end
|
||||
|
||||
Log.error LOG_COMP, rc.message if OpenNebula.is_error?(rc)
|
||||
end
|
||||
|
||||
def undeploy_cb(client, service_id, role_name, nodes)
|
||||
undeploy_nets = false
|
||||
|
||||
rc = @srv_pool.get(service_id, client) do |service|
|
||||
service.roles[role_name].set_state(Role::STATE['DONE'])
|
||||
|
||||
@ -674,7 +728,7 @@ class ServiceLCM
|
||||
"Virtual Networks #{rc}"
|
||||
end
|
||||
|
||||
service.delete
|
||||
undeploy_nets = true
|
||||
|
||||
break
|
||||
elsif service.strategy == 'straight'
|
||||
@ -684,12 +738,34 @@ class ServiceLCM
|
||||
service.roles_shutdown,
|
||||
'UNDEPLOYING',
|
||||
'FAILED_UNDEPLOYING',
|
||||
:wait_undeploy)
|
||||
:wait_undeploy_action)
|
||||
end
|
||||
|
||||
service.update
|
||||
end
|
||||
|
||||
undeploy_nets_action(client, service_id) if undeploy_nets
|
||||
|
||||
Log.error LOG_COMP, rc.message if OpenNebula.is_error?(rc)
|
||||
end
|
||||
|
||||
def undeploy_nets_cb(client, service_id)
|
||||
rc = @srv_pool.get(service_id, client) do |service|
|
||||
service.delete
|
||||
end
|
||||
|
||||
Log.error LOG_COMP, rc.message if OpenNebula.is_error?(rc)
|
||||
end
|
||||
|
||||
def undeploy_nets_failure_cb(client, service_id)
|
||||
rc = @srv_pool.get(service_id, client) do |service|
|
||||
# stop actions for the service if deploy fails
|
||||
@event_manager.cancel_action(service_id)
|
||||
|
||||
service.set_state(Service::STATE['FAILED_UNDEPLOYING_NETS'])
|
||||
service.update
|
||||
end
|
||||
|
||||
Log.error LOG_COMP, rc.message if OpenNebula.is_error?(rc)
|
||||
end
|
||||
|
||||
@ -726,7 +802,7 @@ class ServiceLCM
|
||||
service.set_state(Service::STATE['COOLDOWN'])
|
||||
service.roles[role_name].set_state(Role::STATE['COOLDOWN'])
|
||||
|
||||
@event_manager.trigger_action(:wait_cooldown,
|
||||
@event_manager.trigger_action(:wait_cooldown_action,
|
||||
service.id,
|
||||
client,
|
||||
service.id,
|
||||
@ -751,7 +827,7 @@ class ServiceLCM
|
||||
nodes[:successful].include?(node['deploy_id'])
|
||||
end
|
||||
|
||||
@event_manager.trigger_action(:wait_cooldown,
|
||||
@event_manager.trigger_action(:wait_cooldown_action,
|
||||
service.id,
|
||||
client,
|
||||
service.id,
|
||||
@ -1091,7 +1167,7 @@ class ServiceLCM
|
||||
|
||||
nodes = role.recover_deploy(service.report_ready?)
|
||||
|
||||
@event_manager.trigger_action(:wait_deploy,
|
||||
@event_manager.trigger_action(:wait_deploy_action,
|
||||
service.id,
|
||||
client,
|
||||
service.id,
|
||||
@ -1107,7 +1183,7 @@ class ServiceLCM
|
||||
|
||||
nodes = role.recover_undeploy
|
||||
|
||||
@event_manager.trigger_action(:wait_undeploy,
|
||||
@event_manager.trigger_action(:wait_undeploy_action,
|
||||
service.id,
|
||||
client,
|
||||
service.id,
|
||||
@ -1123,7 +1199,7 @@ class ServiceLCM
|
||||
nodes, up = role.recover_scale(service.report_ready?)
|
||||
|
||||
if up
|
||||
@event_manager.trigger_action(:wait_scaleup,
|
||||
@event_manager.trigger_action(:wait_scaleup_action,
|
||||
service.id,
|
||||
client,
|
||||
service.id,
|
||||
@ -1131,7 +1207,7 @@ class ServiceLCM
|
||||
nodes,
|
||||
service.report_ready?)
|
||||
else
|
||||
@event_manager.trigger_action(:wait_scaledown,
|
||||
@event_manager.trigger_action(:wait_scaledown_action,
|
||||
service.id,
|
||||
client,
|
||||
service.id,
|
||||
@ -1141,6 +1217,18 @@ class ServiceLCM
|
||||
end
|
||||
end
|
||||
|
||||
def recover_nets(action, client, service)
|
||||
action == :wait_deploy_nets_action ? deploy = true : deploy = false
|
||||
|
||||
@event_manager.trigger_action(
|
||||
action,
|
||||
service.id,
|
||||
client,
|
||||
service.id,
|
||||
service.networks(deploy)
|
||||
)
|
||||
end
|
||||
|
||||
def add_role(client, service, role)
|
||||
@wd.remove_service(service.id)
|
||||
|
||||
@ -1150,7 +1238,7 @@ class ServiceLCM
|
||||
{ role.name => role },
|
||||
'DEPLOYING',
|
||||
'FAILED_DEPLOYING',
|
||||
:wait_add,
|
||||
:wait_add_action,
|
||||
service.report_ready?)
|
||||
|
||||
if !OpenNebula.is_error?(rc)
|
||||
@ -1175,7 +1263,7 @@ class ServiceLCM
|
||||
{ role.name => role },
|
||||
'UNDEPLOYING',
|
||||
'FAILED_UNDEPLOYING',
|
||||
:wait_remove)
|
||||
:wait_remove_action)
|
||||
|
||||
if !OpenNebula.is_error?(rc)
|
||||
service.set_state(Service::STATE['UNDEPLOYING'])
|
||||
|
@ -24,17 +24,21 @@ module OpenNebula
|
||||
DOCUMENT_TYPE = 100
|
||||
|
||||
STATE = {
|
||||
'PENDING' => 0,
|
||||
'DEPLOYING' => 1,
|
||||
'RUNNING' => 2,
|
||||
'UNDEPLOYING' => 3,
|
||||
'WARNING' => 4,
|
||||
'DONE' => 5,
|
||||
'FAILED_UNDEPLOYING' => 6,
|
||||
'FAILED_DEPLOYING' => 7,
|
||||
'SCALING' => 8,
|
||||
'FAILED_SCALING' => 9,
|
||||
'COOLDOWN' => 10
|
||||
'PENDING' => 0,
|
||||
'DEPLOYING' => 1,
|
||||
'RUNNING' => 2,
|
||||
'UNDEPLOYING' => 3,
|
||||
'WARNING' => 4,
|
||||
'DONE' => 5,
|
||||
'FAILED_UNDEPLOYING' => 6,
|
||||
'FAILED_DEPLOYING' => 7,
|
||||
'SCALING' => 8,
|
||||
'FAILED_SCALING' => 9,
|
||||
'COOLDOWN' => 10,
|
||||
'DEPLOYING_NETS' => 11,
|
||||
'UNDEPLOYING_NETS' => 12,
|
||||
'FAILED_DEPLOYING_NETS' => 13,
|
||||
'FAILED_UNDEPLOYING_NETS' => 14
|
||||
}
|
||||
|
||||
STATE_STR = %w[
|
||||
@ -49,6 +53,10 @@ module OpenNebula
|
||||
SCALING
|
||||
FAILED_SCALING
|
||||
COOLDOWN
|
||||
DEPLOYING_NETS
|
||||
UNDEPLOYING_NETS
|
||||
FAILED_DEPLOYING_NETS
|
||||
FAILED_UNDEPLOYING_NETS
|
||||
]
|
||||
|
||||
TRANSIENT_STATES = %w[
|
||||
@ -56,12 +64,16 @@ module OpenNebula
|
||||
UNDEPLOYING
|
||||
SCALING
|
||||
COOLDOWN
|
||||
DEPLOYING_NETS
|
||||
UNDEPLOYING_NETS
|
||||
]
|
||||
|
||||
FAILED_STATES = %w[
|
||||
FAILED_DEPLOYING
|
||||
FAILED_UNDEPLOYING
|
||||
FAILED_SCALING
|
||||
FAILED_DEPLOYING_NETS
|
||||
FAILED_UNDEPLOYING_NETS
|
||||
]
|
||||
|
||||
RECOVER_DEPLOY_STATES = %w[
|
||||
@ -73,6 +85,7 @@ module OpenNebula
|
||||
RECOVER_UNDEPLOY_STATES = %w[
|
||||
FAILED_UNDEPLOYING
|
||||
UNDEPLOYING
|
||||
FAILED_UNDEPLOYING_NETS
|
||||
]
|
||||
|
||||
RECOVER_SCALE_STATES = %w[
|
||||
@ -80,6 +93,13 @@ module OpenNebula
|
||||
SCALING
|
||||
]
|
||||
|
||||
RECOVER_DEPLOY_NETS_STATES = %w[DEPLOYING_NETS FAILED_DEPLOYING_NETS]
|
||||
|
||||
RECOVER_UNDEPLOY_NETS_STATES = %w[
|
||||
UNDEPLOYING_NETS
|
||||
FAILED_UNDEPLOYING_NETS
|
||||
]
|
||||
|
||||
# List of attributes that can't be changed in update operation
|
||||
#
|
||||
# custom_attrs: it only has sense when deploying, not in running
|
||||
@ -168,6 +188,14 @@ module OpenNebula
|
||||
RECOVER_SCALE_STATES.include? STATE_STR[state]
|
||||
end
|
||||
|
||||
def can_recover_deploy_nets?
|
||||
RECOVER_DEPLOY_NETS_STATES.include?(STATE_STR[state])
|
||||
end
|
||||
|
||||
def can_recover_undeploy_nets?
|
||||
RECOVER_UNDEPLOY_NETS_STATES.include?(STATE_STR[state])
|
||||
end
|
||||
|
||||
# Return true if the service is running
|
||||
# @return true if the service is runnning, false otherwise
|
||||
def running?
|
||||
@ -237,6 +265,24 @@ module OpenNebula
|
||||
true
|
||||
end
|
||||
|
||||
# Returns virtual networks IDs
|
||||
# @return [Array] Array of integers containing the IDs
|
||||
def networks(deploy)
|
||||
ret = []
|
||||
|
||||
return ret unless @body['networks_values']
|
||||
|
||||
@body['networks_values'].each do |vnet|
|
||||
vnet.each do |_, net|
|
||||
next if net.keys.first == 'id' && !deploy
|
||||
|
||||
ret << net['id'].to_i
|
||||
end
|
||||
end
|
||||
|
||||
ret
|
||||
end
|
||||
|
||||
# Create a new service based on the template provided
|
||||
# @param [String] template_json
|
||||
# @return [nil, OpenNebula::Error] nil in case of success, Error
|
||||
@ -570,17 +616,22 @@ module OpenNebula
|
||||
|
||||
return if body['networks_values'].nil?
|
||||
|
||||
body['networks_values'].each do |net|
|
||||
rc = create_vnet(net) if net[net.keys[0]].key?('template_id')
|
||||
body['networks_values'].each do |vnet|
|
||||
vnet.each do |name, net|
|
||||
key = net.keys.first
|
||||
|
||||
if OpenNebula.is_error?(rc)
|
||||
return rc
|
||||
end
|
||||
case key
|
||||
when 'id'
|
||||
next
|
||||
when 'template_id'
|
||||
rc = create_vnet(name, net)
|
||||
when 'reserve_from'
|
||||
rc = reserve(name, net)
|
||||
end
|
||||
|
||||
rc = reserve(net) if net[net.keys[0]].key?('reserve_from')
|
||||
return rc if OpenNebula.is_error?(rc)
|
||||
|
||||
if OpenNebula.is_error?(rc)
|
||||
return rc
|
||||
net['id'] = rc
|
||||
end
|
||||
end if deploy
|
||||
|
||||
@ -593,22 +644,25 @@ module OpenNebula
|
||||
end
|
||||
|
||||
def delete_networks
|
||||
vnets = @body['networks_values']
|
||||
vnets = @body['networks_values']
|
||||
vnets_failed = []
|
||||
|
||||
return if vnets.nil?
|
||||
|
||||
vnets.each do |vnet|
|
||||
next unless vnet[vnet.keys[0]].key?('template_id') ||
|
||||
vnet[vnet.keys[0]].key?('reserve_from')
|
||||
vnet.each do |_, net|
|
||||
key = net.keys.first
|
||||
|
||||
vnet_id = vnet[vnet.keys[0]]['id'].to_i
|
||||
next unless %w[template_id reserve_from].include?(key)
|
||||
|
||||
rc = OpenNebula::VirtualNetwork
|
||||
.new_with_id(vnet_id, @client).delete
|
||||
rc = OpenNebula::VirtualNetwork.new_with_id(
|
||||
net['id'],
|
||||
@client
|
||||
).delete
|
||||
|
||||
if OpenNebula.is_error?(rc)
|
||||
vnets_failed << vnet_id
|
||||
next unless OpenNebula.is_error?(rc)
|
||||
|
||||
vnets_failed << net['id']
|
||||
end
|
||||
end
|
||||
|
||||
@ -677,44 +731,32 @@ module OpenNebula
|
||||
@body['log'] = @body['log'].last(MAX_LOG)
|
||||
end
|
||||
|
||||
def create_vnet(net)
|
||||
def create_vnet(name, net)
|
||||
extra = ''
|
||||
extra = net['extra'] if net.key? 'extra'
|
||||
|
||||
extra = net[net.keys[0]]['extra'] if net[net.keys[0]].key? 'extra'
|
||||
|
||||
vntmpl_id = OpenNebula::VNTemplate
|
||||
.new_with_id(net[net.keys[0]]['template_id']
|
||||
.to_i, @client).instantiate(get_vnet_name(net), extra)
|
||||
|
||||
# TODO, check which error should be returned
|
||||
return vntmpl_id if OpenNebula.is_error?(vntmpl_id)
|
||||
|
||||
net[net.keys[0]]['id'] = vntmpl_id
|
||||
|
||||
true
|
||||
OpenNebula::VNTemplate.new_with_id(
|
||||
net['template_id'].to_i,
|
||||
@client
|
||||
).instantiate(get_vnet_name(name), extra)
|
||||
end
|
||||
|
||||
def reserve(net)
|
||||
get_vnet_name(net)
|
||||
extra = net[net.keys[0]]['extra'] if net[net.keys[0]].key? 'extra'
|
||||
def reserve(name, net)
|
||||
extra = ''
|
||||
extra = net['extra'] if net.key? 'extra'
|
||||
|
||||
return false if !extra || extra.empty?
|
||||
|
||||
extra.concat("\nNAME=\"#{get_vnet_name(net)}\"\n")
|
||||
extra.concat("\nNAME=\"#{get_vnet_name(name)}\"\n")
|
||||
|
||||
reserve_id = OpenNebula::VirtualNetwork
|
||||
.new_with_id(net[net.keys[0]]['reserve_from']
|
||||
.to_i, @client).reserve_with_extra(extra)
|
||||
|
||||
return reserve_id if OpenNebula.is_error?(reserve_id)
|
||||
|
||||
net[net.keys[0]]['id'] = reserve_id
|
||||
|
||||
true
|
||||
OpenNebula::VirtualNetwork.new_with_id(
|
||||
net['reserve_from'].to_i,
|
||||
@client
|
||||
).reserve_with_extra(extra)
|
||||
end
|
||||
|
||||
def get_vnet_name(net)
|
||||
"#{net.keys[0]}-#{id}"
|
||||
"#{net}-#{id}"
|
||||
end
|
||||
|
||||
def resolve_attributes(template)
|
||||
|
@ -701,7 +701,7 @@ post '/service_template/:id/action' do
|
||||
return internal_error(service.message, GENERAL_EC)
|
||||
else
|
||||
# Starts service deployment async
|
||||
rc = lcm.deploy_action(@client, service.id)
|
||||
rc = lcm.deploy_nets_action(@client, service.id)
|
||||
|
||||
if OpenNebula.is_error?(rc)
|
||||
return internal_error(rc.message, one_error_to_http(rc.errno))
|
||||
|
@ -107,6 +107,10 @@ module HEMHook
|
||||
when 'STATE'
|
||||
object = event.xpath('//HOOK_OBJECT')[0].text.upcase
|
||||
|
||||
# Fix inconsistency in Virtual Network object name
|
||||
# and root xml node
|
||||
object = 'VNET' if object == 'NET'
|
||||
|
||||
template = event.xpath("//#{object}")[0].to_s
|
||||
template = Base64.strict_encode64(template)
|
||||
end
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include "HookStateHost.h"
|
||||
#include "HookStateVM.h"
|
||||
#include "HookStateImage.h"
|
||||
#include "HookStateVirtualNetwork.h"
|
||||
#include "HookLog.h"
|
||||
#include "OneDB.h"
|
||||
|
||||
@ -387,6 +388,10 @@ int Hook::set_hook(HookType hook_type, string& error)
|
||||
_hook = new HookStateImage();
|
||||
break;
|
||||
|
||||
case PoolObjectSQL::NET:
|
||||
_hook = new HookStateVirtualNetwork();
|
||||
break;
|
||||
|
||||
default:
|
||||
error = "Invalid resource type: " + resource;
|
||||
return -1;
|
||||
|
125
src/hm/HookStateVirtualNetwork.cc
Normal file
125
src/hm/HookStateVirtualNetwork.cc
Normal file
@ -0,0 +1,125 @@
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Copyright 2002-2021, OpenNebula Project, OpenNebula Systems */
|
||||
/* */
|
||||
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
|
||||
/* not use this file except in compliance with the License. You may obtain */
|
||||
/* a copy of the License at */
|
||||
/* */
|
||||
/* http://www.apache.org/licenses/LICENSE-2.0 */
|
||||
/* */
|
||||
/* Unless required by applicable law or agreed to in writing, software */
|
||||
/* distributed under the License is distributed on an "AS IS" BASIS, */
|
||||
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
|
||||
/* See the License for the specific language governing permissions and */
|
||||
/* limitations under the License. */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
#include "HookStateVirtualNetwork.h"
|
||||
#include "NebulaLog.h"
|
||||
#include "SSLUtil.h"
|
||||
|
||||
using std::string;
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
bool HookStateVirtualNetwork::trigger(VirtualNetwork * vn)
|
||||
{
|
||||
return vn->has_changed_state();
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
string HookStateVirtualNetwork::format_message(VirtualNetwork * vn)
|
||||
{
|
||||
std::ostringstream oss;
|
||||
string vn_xml;
|
||||
|
||||
oss << "<HOOK_MESSAGE>"
|
||||
<< "<HOOK_TYPE>STATE</HOOK_TYPE>"
|
||||
<< "<HOOK_OBJECT>NET</HOOK_OBJECT>"
|
||||
<< "<STATE>" << VirtualNetwork::state_to_str(vn->get_state()) << "</STATE>"
|
||||
<< "<RESOURCE_ID>" << vn->get_oid() << "</RESOURCE_ID>"
|
||||
<< vn->to_xml(vn_xml)
|
||||
<< "</HOOK_MESSAGE>";
|
||||
|
||||
string base64;
|
||||
ssl_util::base64_encode(oss.str(), base64);
|
||||
|
||||
return base64;
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
int HookStateVirtualNetwork::parse_template(Template * tmpl, string& error_str)
|
||||
{
|
||||
string state_str;
|
||||
|
||||
tmpl->get("STATE", state_str);
|
||||
tmpl->erase("STATE");
|
||||
|
||||
state = VirtualNetwork::str_to_state(state_str);
|
||||
if (state == VirtualNetwork::INIT)
|
||||
{
|
||||
error_str = "Invalid STATE: " + state_str;
|
||||
return -1;
|
||||
}
|
||||
|
||||
tmpl->add("STATE", VirtualNetwork::state_to_str(state));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
int HookStateVirtualNetwork::from_template(const Template * tmpl, string& error_str)
|
||||
{
|
||||
string state_str;
|
||||
|
||||
if ( tmpl->get("STATE", state_str) )
|
||||
{
|
||||
VirtualNetwork::VirtualNetworkState _state = VirtualNetwork::str_to_state(state_str);
|
||||
|
||||
if (_state == VirtualNetwork::INIT)
|
||||
{
|
||||
error_str = "Invalid or unknown STATE attribute";
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
error_str = "STATE attribute not found or invalid";
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
int HookStateVirtualNetwork::post_update_template(Template * tmpl, string& error)
|
||||
{
|
||||
string new_state_str;
|
||||
|
||||
if ( tmpl->get("STATE", new_state_str) )
|
||||
{
|
||||
VirtualNetwork::VirtualNetworkState new_state = VirtualNetwork::str_to_state(new_state_str);
|
||||
|
||||
if ( new_state != VirtualNetwork::INIT )
|
||||
{
|
||||
state = new_state;
|
||||
tmpl->replace("STATE", new_state_str);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
error = "The STATE attribute is not defined or it's invalid.";
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
@ -28,6 +28,7 @@ source_files=[
|
||||
'HookStateVM.cc',
|
||||
'HookStateHost.cc',
|
||||
'HookStateImage.cc',
|
||||
'HookStateVirtualNetwork.cc',
|
||||
'HookManager.cc',
|
||||
'HookManagerProtocol.cc',
|
||||
'ExecuteHook.cc',
|
||||
|
@ -48,6 +48,12 @@ int IPAMManager::start()
|
||||
register_action(IPAMManagerMessages::FREE_ADDRESS,
|
||||
bind(&IPAMManager::_notify_request, this, _1));
|
||||
|
||||
register_action(IPAMManagerMessages::VNET_CREATE,
|
||||
bind(&IPAMManager::_vnet_create, this, _1));
|
||||
|
||||
register_action(IPAMManagerMessages::VNET_DELETE,
|
||||
bind(&IPAMManager::_vnet_delete, this, _1));
|
||||
|
||||
register_action(IPAMManagerMessages::LOG,
|
||||
&IPAMManager::_log);
|
||||
|
||||
@ -92,6 +98,24 @@ void IPAMManager::send_request(IPAMManagerMessages type, IPAMRequest& ir)
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
void IPAMManager::send_message(IPAMManagerMessages type,
|
||||
int oid,
|
||||
const string& xml)
|
||||
{
|
||||
auto ipammd = get();
|
||||
|
||||
if (ipammd == nullptr)
|
||||
{
|
||||
NebulaLog::error("IPM", "Unable to find IPAM Manager driver");
|
||||
return;
|
||||
}
|
||||
|
||||
ipam_msg_t msg(type, "", oid, xml);
|
||||
ipammd->write(msg);
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
void IPAMManager::trigger_register_address_range(IPAMRequest& ir)
|
||||
{
|
||||
trigger([&] {
|
||||
@ -135,6 +159,20 @@ void IPAMManager::trigger_free_address(IPAMRequest& ir)
|
||||
});
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
void IPAMManager::trigger_vnet_create(int vnid, const std::string& xml64)
|
||||
{
|
||||
send_message(IPAMManagerMessages::VNET_CREATE, vnid, xml64);
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
void IPAMManager::trigger_vnet_delete(int vnid, const std::string& xml64)
|
||||
{
|
||||
send_message(IPAMManagerMessages::VNET_DELETE, vnid, xml64);
|
||||
}
|
||||
|
||||
/* ************************************************************************** */
|
||||
/* MAD Loading */
|
||||
/* ************************************************************************** */
|
||||
@ -160,6 +198,15 @@ int IPAMManager::load_drivers(const std::vector<const VectorAttribute*>& _mads)
|
||||
|
||||
ipam_conf.replace("NAME", ipam_driver_name);
|
||||
|
||||
// Set default for threads
|
||||
int threads = 0;
|
||||
ipam_conf.vector_value("THREADS", threads);
|
||||
|
||||
if ( threads < 16 )
|
||||
{
|
||||
ipam_conf.replace("THREADS", 16);
|
||||
}
|
||||
|
||||
if ( load_driver(&ipam_conf) != 0 )
|
||||
{
|
||||
NebulaLog::error("ImM", "Unable to load IPAM Manager driver");
|
||||
|
@ -16,6 +16,9 @@
|
||||
|
||||
#include "IPAMManager.h"
|
||||
#include "NebulaLog.h"
|
||||
#include "Nebula.h"
|
||||
#include "ClusterPool.h"
|
||||
#include "VirtualNetworkPool.h"
|
||||
|
||||
#include <sstream>
|
||||
|
||||
@ -58,6 +61,144 @@ void IPAMManager::_notify_request(unique_ptr<ipam_msg_t> msg)
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
void IPAMManager::_vnet_create(unique_ptr<ipam_msg_t> msg)
|
||||
{
|
||||
ostringstream os;
|
||||
|
||||
os << "Message received: ";
|
||||
msg->write_to(os);
|
||||
|
||||
NebulaLog::debug("IPM", os.str());
|
||||
|
||||
auto vnpool = Nebula::instance().get_vnpool();
|
||||
|
||||
int vn_id = msg->oid();
|
||||
auto vn = vnpool->get(vn_id);
|
||||
|
||||
if (!vn)
|
||||
{
|
||||
NebulaLog::error("IPM", "Received VNET_CREATE response for non-existing "
|
||||
"VNET " + to_string(vn_id));
|
||||
return;
|
||||
}
|
||||
|
||||
if (vn->get_state() != VirtualNetwork::LOCK_CREATE)
|
||||
{
|
||||
NebulaLog::error("IPM", "Received VNET_CREATE but VNET " + to_string(vn_id)
|
||||
+ " is in wrong state " + VirtualNetwork::state_to_str(vn->get_state()));
|
||||
return;
|
||||
}
|
||||
|
||||
std::string info = msg->payload64();
|
||||
|
||||
if (msg->status() == "SUCCESS")
|
||||
{
|
||||
string error_str;
|
||||
|
||||
if (!info.empty())
|
||||
{
|
||||
if ( vn->append_template(info, false, error_str) != 0 )
|
||||
{
|
||||
vn->set_state(VirtualNetwork::ERROR);
|
||||
|
||||
vn->set_template_error_message(error_str);
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
vn->set_state(VirtualNetwork::READY);
|
||||
|
||||
vn->clear_template_error_message();
|
||||
|
||||
// Get the Address Ranges
|
||||
vector<VectorAttribute *> ars;
|
||||
|
||||
int num_ars = vn->remove_template_attribute("AR", ars);
|
||||
int rc = vn->add_var(ars, error_str);
|
||||
|
||||
for (int i=0; i < num_ars; i++)
|
||||
{
|
||||
delete ars[i];
|
||||
}
|
||||
|
||||
if (rc != 0)
|
||||
{
|
||||
vn->set_state(VirtualNetwork::ERROR);
|
||||
|
||||
vn->set_template_error_message(error_str);
|
||||
|
||||
NebulaLog::error("IPM", "Error creating address range for VNET "
|
||||
+ to_string(vn->get_oid()) + ": " + error_str);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
vn->set_state(VirtualNetwork::ERROR);
|
||||
|
||||
vn->set_template_error_message(info);
|
||||
|
||||
NebulaLog::error("IPM", "VNET " + to_string(vn_id) +
|
||||
", vnet_create failed: " + info);
|
||||
}
|
||||
|
||||
vnpool->update(vn.get());
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
void IPAMManager::_vnet_delete(unique_ptr<ipam_msg_t> msg)
|
||||
{
|
||||
ostringstream os;
|
||||
|
||||
os << "Message received: ";
|
||||
msg->write_to(os);
|
||||
|
||||
NebulaLog::debug("IPM", os.str());
|
||||
|
||||
auto vnpool = Nebula::instance().get_vnpool();
|
||||
|
||||
auto oid = msg->oid();
|
||||
auto vn = vnpool->get(oid);
|
||||
|
||||
if (!vn)
|
||||
{
|
||||
NebulaLog::error("IPM", "Received VNET_DELETE response for non-existing "
|
||||
"VNET " + to_string(oid));
|
||||
return;
|
||||
}
|
||||
|
||||
if (vn->get_state() != VirtualNetwork::LOCK_DELETE)
|
||||
{
|
||||
NebulaLog::error("IPM", "Received VNET_DELETE but VNET " + to_string(oid)
|
||||
+ " is in wrong state " + VirtualNetwork::state_to_str(vn->get_state()));
|
||||
return;
|
||||
}
|
||||
|
||||
if (msg->status() == "SUCCESS")
|
||||
{
|
||||
vnpool->delete_success(move(vn));
|
||||
}
|
||||
else
|
||||
{
|
||||
std::string info = msg->payload64();
|
||||
|
||||
vn->set_state(VirtualNetwork::ERROR);
|
||||
vn->set_template_error_message(info);
|
||||
|
||||
vnpool->update(vn.get());
|
||||
|
||||
NebulaLog::error("IPM", "VNET " + to_string(oid) +
|
||||
", vnet_delete failed: " + info);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
void IPAMManager::_log(unique_ptr<ipam_msg_t> msg)
|
||||
{
|
||||
NebulaLog::log("IPM", log_type(msg->status()[0]), msg->payload());
|
||||
|
@ -64,7 +64,9 @@ class IPAMDriver < OpenNebulaDriver
|
||||
:unregister_address_range => "UNREGISTER_ADDRESS_RANGE",
|
||||
:allocate_address => "ALLOCATE_ADDRESS",
|
||||
:get_address => "GET_ADDRESS",
|
||||
:free_address => "FREE_ADDRESS"
|
||||
:free_address => "FREE_ADDRESS",
|
||||
:vnet_create => "VNET_CREATE",
|
||||
:vnet_delete => "VNET_DELETE"
|
||||
}
|
||||
|
||||
# Init the driver
|
||||
@ -78,7 +80,9 @@ class IPAMDriver < OpenNebulaDriver
|
||||
ACTION[:unregister_address_range] => nil,
|
||||
ACTION[:allocate_address] => nil,
|
||||
ACTION[:get_address] => nil,
|
||||
ACTION[:free_address] => nil
|
||||
ACTION[:free_address] => nil,
|
||||
ACTION[:vnet_create] => nil,
|
||||
ACTION[:vnet_delete] => nil
|
||||
}
|
||||
}.merge!(options)
|
||||
|
||||
@ -106,6 +110,10 @@ class IPAMDriver < OpenNebulaDriver
|
||||
register_action(ACTION[:get_address].to_sym, method("get_address"))
|
||||
|
||||
register_action(ACTION[:free_address].to_sym, method("free_address"))
|
||||
|
||||
register_action(ACTION[:vnet_create].to_sym, method("vnet_create"))
|
||||
|
||||
register_action(ACTION[:vnet_delete].to_sym, method("vnet_delete"))
|
||||
end
|
||||
|
||||
def register_address_range(id, drv_message)
|
||||
@ -128,6 +136,14 @@ class IPAMDriver < OpenNebulaDriver
|
||||
do_ipam_action(id, :free_address, drv_message)
|
||||
end
|
||||
|
||||
def vnet_create(id, drv_message)
|
||||
do_vnet_action(id, :vnet_create, drv_message)
|
||||
end
|
||||
|
||||
def vnet_delete(id, drv_message)
|
||||
do_vnet_action(id, :vnet_delete, drv_message)
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def do_ipam_action(id, action, arguments)
|
||||
@ -135,7 +151,6 @@ class IPAMDriver < OpenNebulaDriver
|
||||
message = Base64.decode64(arguments)
|
||||
xml_doc = REXML::Document.new(message)
|
||||
|
||||
xml_doc.root
|
||||
ipam = xml_doc.elements['IPAM_DRIVER_ACTION_DATA/AR/IPAM_MAD'].text.strip
|
||||
raise if ipam.empty?
|
||||
rescue
|
||||
@ -168,6 +183,37 @@ class IPAMDriver < OpenNebulaDriver
|
||||
return false
|
||||
end
|
||||
end
|
||||
|
||||
def do_vnet_action(id, action, arguments)
|
||||
begin
|
||||
message = Base64.decode64(arguments)
|
||||
xml_doc = REXML::Document.new(message)
|
||||
|
||||
xml_doc.root
|
||||
vn_mad = xml_doc.elements['VNET/VN_MAD'].text.strip
|
||||
raise if vn_mad.empty?
|
||||
rescue
|
||||
send_message(ACTION[action], RESULT[:failure], id,
|
||||
"Cannot perform #{action}, cannot find VN driver")
|
||||
return
|
||||
end
|
||||
|
||||
#return if not is_available?(vn_mad, id, action)
|
||||
|
||||
path = File.join(@local_scripts_path, '../vnm/')
|
||||
path = File.join(path, vn_mad)
|
||||
cmd = File.join(path, ACTION[action].downcase)
|
||||
cmd << " " << id
|
||||
|
||||
rc = LocalCommand.run(cmd, log_method(id), arguments)
|
||||
|
||||
result, info = get_info_from_execution(rc)
|
||||
|
||||
info = Base64::encode64(info).strip.delete("\n")
|
||||
|
||||
send_message(ACTION[action], result, id, info)
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
################################################################################
|
||||
|
@ -18,6 +18,7 @@ package virtualnetwork
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
|
||||
"github.com/OpenNebula/one/src/oca/go/src/goca/schemas/shared"
|
||||
)
|
||||
@ -42,6 +43,7 @@ type VirtualNetwork struct {
|
||||
Bridge string `xml:"BRIDGE,omitempty"`
|
||||
BridgeType string `xml:"BRIDGE_TYPE,omitempty"` // minOccurs=0
|
||||
ParentNetworkID string `xml:"PARENT_NETWORK_ID,omitempty"`
|
||||
StateRaw int `xml:"STATE,omitempty"`
|
||||
VNMad string `xml:"VN_MAD"`
|
||||
PhyDev string `xml:"PHYDEV,omitempty"`
|
||||
VlanID string `xml:"VLAN_ID,omitempty"` // minOccurs=0
|
||||
@ -93,3 +95,63 @@ type Lease struct {
|
||||
VNet int `xml:"VNET,omitempty"`
|
||||
VRouter int `xml:"VROUTER,omitempty"`
|
||||
}
|
||||
|
||||
// State is the state of the Virtual Network
|
||||
type State int
|
||||
|
||||
const (
|
||||
// Init Virtual Network is being initialized
|
||||
Init State = iota
|
||||
|
||||
// Ready Virtual Network is ready to be used
|
||||
Ready
|
||||
|
||||
// LockCreate Virtual Network driver vnet_create pending
|
||||
LockCreate
|
||||
|
||||
// LockDelete Virtual Network driver vnet_delete pending
|
||||
LockDelete
|
||||
|
||||
// Done Virtual Netowrk finalized
|
||||
Done
|
||||
|
||||
// Error Virtual Network is in error state
|
||||
Error
|
||||
)
|
||||
|
||||
func (s State) isValid() bool {
|
||||
if s >= Init && s <= Error {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// String returns the string version of the State
|
||||
func (s State) String() string {
|
||||
return [...]string{
|
||||
"INIT",
|
||||
"READY",
|
||||
"LOCK_CREATE",
|
||||
"LOCK_DELETE",
|
||||
"DONE",
|
||||
"ERROR",
|
||||
}[s]
|
||||
}
|
||||
|
||||
// State looks up the state of the Virtual Network and returns the State
|
||||
func (vn *VirtualNetwork) State() (State, error) {
|
||||
state := State(vn.StateRaw)
|
||||
if !state.isValid() {
|
||||
return -1, fmt.Errorf("Virtual Network State: this state value is not currently handled: %d\n", vn.StateRaw)
|
||||
}
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// StateString returns the state in string format
|
||||
func (vn *VirtualNetwork) StateString() (string, error) {
|
||||
state := State(vn.StateRaw)
|
||||
if !state.isValid() {
|
||||
return "", fmt.Errorf("Virtual Network State: this state value is not currently handled: %d\n", vn.StateRaw)
|
||||
}
|
||||
return state.String(), nil
|
||||
}
|
@ -230,3 +230,25 @@ func (vc *VirtualNetworkController) Unlock() error {
|
||||
_, err := vc.c.Client.Call("one.vn.unlock", vc.ID)
|
||||
return err
|
||||
}
|
||||
|
||||
// Recover recovers a stuck Virtual Network
|
||||
// * op: (0) failure, (1) success, (2) retry, (3) delete
|
||||
func (vc *VirtualNetworkController) Recover(op int) error {
|
||||
_, err := vc.c.Client.Call("one.vn.recover", vc.ID, op)
|
||||
return err
|
||||
}
|
||||
|
||||
// RecoverFailure forces a failure
|
||||
func (vc *VirtualNetworkController) RecoverFailure() error {
|
||||
return vc.Recover(0)
|
||||
}
|
||||
|
||||
// RecoverSuccess forces a success
|
||||
func (vc *VirtualNetworkController) RecoverSuccess() error {
|
||||
return vc.Recover(1)
|
||||
}
|
||||
|
||||
// RecoverDelete delete the network, call driver cleanup action
|
||||
func (vc *VirtualNetworkController) RecoverDelete() error {
|
||||
return vc.Recover(2)
|
||||
}
|
||||
|
@ -49,6 +49,20 @@ func createVirtualNetwork(t *testing.T) (*vn.VirtualNetwork, int) {
|
||||
return vnet, id
|
||||
}
|
||||
|
||||
func WaitState(t *testing.T, vnetC *VirtualNetworkController, state string) {
|
||||
wait := WaitResource(func() bool {
|
||||
vnet, _ := vnetC.Info(false)
|
||||
|
||||
st, _ := vnet.StateString()
|
||||
return st == state
|
||||
})
|
||||
|
||||
if !wait {
|
||||
t.Error("Virtual Network should be in ERROR state")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestVirtualNetwork(t *testing.T) {
|
||||
var err error
|
||||
|
||||
@ -129,7 +143,7 @@ func TestVirtualNetwork(t *testing.T) {
|
||||
gname = vnet.GName
|
||||
|
||||
if "serveradmin" != uname {
|
||||
t.Error("Virtual network owner is not oenadmin")
|
||||
t.Error("Virtual network owner is not oneadmin")
|
||||
}
|
||||
|
||||
// Compare with caller group
|
||||
@ -143,3 +157,33 @@ func TestVirtualNetwork(t *testing.T) {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVirtualNetworkRecover(t *testing.T) {
|
||||
var err error
|
||||
|
||||
vnTpl := "NAME = vn_invalid_ar\n" +
|
||||
"BRIDGE = vbr0\n" +
|
||||
"VN_MAD = dummy\n" +
|
||||
"NETWORK_ADDRESS = 192.168.0.0\n"+
|
||||
"AR = [ TYPE = IP4, IP = 192.168.0.1, SIZE = -1 ]\n"
|
||||
|
||||
id, err := testCtrl.VirtualNetworks().Create(vnTpl, -1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Get Virtual Network by ID
|
||||
vnetC := testCtrl.VirtualNetwork(id)
|
||||
|
||||
WaitState(t, vnetC, "ERROR")
|
||||
|
||||
vnetC.RecoverSuccess()
|
||||
|
||||
WaitState(t, vnetC, "READY")
|
||||
|
||||
// Delete template
|
||||
err = vnetC.Delete()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
@ -44,6 +44,13 @@ public class VirtualNetwork extends PoolElement{
|
||||
private static final String FREEAR = METHOD_PREFIX + "free_ar";
|
||||
private static final String LOCK = METHOD_PREFIX + "lock";
|
||||
private static final String UNLOCK = METHOD_PREFIX + "unlock";
|
||||
private static final String RECOVER = METHOD_PREFIX + "recover";
|
||||
|
||||
private static final String[] VN_STATES =
|
||||
{"INIT", "READY", "LOCK_CREATE", "LOCK_DELETE", "DONE", "ERROR"};
|
||||
|
||||
private static final String[] SHORT_VN_STATES =
|
||||
{"init", "rdy", "lock", "lock", "done", "err"};
|
||||
|
||||
/**
|
||||
* Creates a new virtual network representation.
|
||||
@ -403,6 +410,20 @@ public class VirtualNetwork extends PoolElement{
|
||||
return client.call(UNLOCK, id);
|
||||
}
|
||||
|
||||
/**
|
||||
* Recovers a stuck Virtual Network.
|
||||
*
|
||||
* @param client XML-RPC Client.
|
||||
* @param id The Virtual Network ID of the target instance.
|
||||
* @param operation to recover the VM: (0) failure, (1) success,
|
||||
* (2) delete
|
||||
* @return If an error occurs the error message contains the reason.
|
||||
*/
|
||||
public static OneResponse recover(Client client, int id, int operation)
|
||||
{
|
||||
return client.call(RECOVER, id, operation);
|
||||
}
|
||||
|
||||
// =================================
|
||||
// Instanced object XML-RPC methods
|
||||
// =================================
|
||||
@ -741,8 +762,46 @@ public class VirtualNetwork extends PoolElement{
|
||||
return unlock(client, id);
|
||||
}
|
||||
|
||||
/**
|
||||
* Recovers a stuck Virtual Network.
|
||||
*
|
||||
* @param operation to recover the VM: (0) failure, (1) success,
|
||||
* (2) delete
|
||||
* @return If an error occurs the error message contains the reason.
|
||||
*/
|
||||
public OneResponse recover(int operation)
|
||||
{
|
||||
return recover(client, id, operation);
|
||||
}
|
||||
|
||||
// =================================
|
||||
// Helpers
|
||||
// =================================
|
||||
|
||||
/**
|
||||
* Returns the state of the Virtual Network.
|
||||
* <br>
|
||||
* The method {@link VirtualNetowrk#info()} must be called before.
|
||||
*
|
||||
* @return The state of the Virtual Network.
|
||||
*/
|
||||
public String stateString()
|
||||
{
|
||||
int state = state();
|
||||
return state != -1 ? VN_STATES[state] : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the short string state of the Virtual Network.
|
||||
* <br>
|
||||
* The method {@link VirtualNetowrk#info()} must be called before.
|
||||
*
|
||||
* @return The short string state of the Virtual Network.
|
||||
*/
|
||||
public String shortStateStr()
|
||||
{
|
||||
int state = state();
|
||||
return state != -1 ? SHORT_VN_STATES[state] : null;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -45,17 +45,21 @@ module Role
|
||||
]
|
||||
|
||||
STATE = {
|
||||
'PENDING' => 0,
|
||||
'DEPLOYING' => 1,
|
||||
'RUNNING' => 2,
|
||||
'UNDEPLOYING' => 3,
|
||||
'WARNING' => 4,
|
||||
'DONE' => 5,
|
||||
'FAILED_UNDEPLOYING' => 6,
|
||||
'FAILED_DEPLOYING' => 7,
|
||||
'SCALING' => 8,
|
||||
'FAILED_SCALING' => 9,
|
||||
'COOLDOWN' => 10
|
||||
'PENDING' => 0,
|
||||
'DEPLOYING' => 1,
|
||||
'RUNNING' => 2,
|
||||
'UNDEPLOYING' => 3,
|
||||
'WARNING' => 4,
|
||||
'DONE' => 5,
|
||||
'FAILED_UNDEPLOYING' => 6,
|
||||
'FAILED_DEPLOYING' => 7,
|
||||
'SCALING' => 8,
|
||||
'FAILED_SCALING' => 9,
|
||||
'COOLDOWN' => 10,
|
||||
'DEPLOYING_NETS' => 11,
|
||||
'UNDEPLOYING_NETS' => 12,
|
||||
'FAILED_DEPLOYING_NETS' => 13,
|
||||
'FAILED_UNDEPLOYING_NETS' => 14
|
||||
}
|
||||
|
||||
STATE_STR = [
|
||||
@ -69,7 +73,11 @@ module Role
|
||||
'FAILED_DEPLOYING',
|
||||
'SCALING',
|
||||
'FAILED_SCALING',
|
||||
'COOLDOWN'
|
||||
'COOLDOWN',
|
||||
'DEPLOYING_NETS',
|
||||
'UNDEPLOYING_NETS',
|
||||
'FAILED_DEPLOYING_NETS',
|
||||
'FAILED_UNDEPLOYING_NETS'
|
||||
]
|
||||
|
||||
# Returns the string representation of the role state
|
||||
@ -83,17 +91,21 @@ end
|
||||
module Service
|
||||
|
||||
STATE = {
|
||||
'PENDING' => 0,
|
||||
'DEPLOYING' => 1,
|
||||
'RUNNING' => 2,
|
||||
'UNDEPLOYING' => 3,
|
||||
'WARNING' => 4,
|
||||
'DONE' => 5,
|
||||
'FAILED_UNDEPLOYING' => 6,
|
||||
'FAILED_DEPLOYING' => 7,
|
||||
'SCALING' => 8,
|
||||
'FAILED_SCALING' => 9,
|
||||
'COOLDOWN' => 10
|
||||
'PENDING' => 0,
|
||||
'DEPLOYING' => 1,
|
||||
'RUNNING' => 2,
|
||||
'UNDEPLOYING' => 3,
|
||||
'WARNING' => 4,
|
||||
'DONE' => 5,
|
||||
'FAILED_UNDEPLOYING' => 6,
|
||||
'FAILED_DEPLOYING' => 7,
|
||||
'SCALING' => 8,
|
||||
'FAILED_SCALING' => 9,
|
||||
'COOLDOWN' => 10,
|
||||
'DEPLOYING_NETS' => 11,
|
||||
'UNDEPLOYING_NETS' => 12,
|
||||
'FAILED_DEPLOYING_NETS' => 13,
|
||||
'FAILED_UNDEPLOYING_NETS' => 14
|
||||
}
|
||||
|
||||
STATE_STR = [
|
||||
@ -107,7 +119,11 @@ module Service
|
||||
'FAILED_DEPLOYING',
|
||||
'SCALING',
|
||||
'FAILED_SCALING',
|
||||
'COOLDOWN'
|
||||
'COOLDOWN',
|
||||
'DEPLOYING_NETS',
|
||||
'UNDEPLOYING_NETS',
|
||||
'FAILED_DEPLOYING_NETS',
|
||||
'FAILED_UNDEPLOYING_NETS'
|
||||
]
|
||||
|
||||
# Returns the string representation of the service state
|
||||
|
@ -40,7 +40,19 @@ module OpenNebula
|
||||
:reserve => "vn.reserve",
|
||||
:free_ar => "vn.free_ar",
|
||||
:lock => "vn.lock",
|
||||
:unlock => "vn.unlock"
|
||||
:unlock => "vn.unlock",
|
||||
:recover => "vn.recover"
|
||||
}
|
||||
|
||||
VN_STATES=%w{INIT READY LOCK_CREATE LOCK_DELETE DONE ERROR}
|
||||
|
||||
SHORT_VN_STATES={
|
||||
"INIT" => "init",
|
||||
"READY" => "rdy",
|
||||
"LOCK_CREATE" => "lock",
|
||||
"LOCK_DELETE" => "lock",
|
||||
"DONE" => "done",
|
||||
"ERROR" => "err"
|
||||
}
|
||||
|
||||
# Creates a VirtualNetwork description with just its identifier
|
||||
@ -311,6 +323,16 @@ module OpenNebula
|
||||
return call(VN_METHODS[:rename], @pe_id, name)
|
||||
end
|
||||
|
||||
# Recovers an stuck Virtual Network
|
||||
#
|
||||
# @param result [Integer] Recover with failure (0), success (1),
|
||||
# delete (2)
|
||||
# @return [nil, OpenNebula::Error] nil in case of success, Error
|
||||
# otherwise
|
||||
def recover(result)
|
||||
return call(VN_METHODS[:recover], @pe_id, result)
|
||||
end
|
||||
|
||||
#######################################################################
|
||||
# Helpers to get VirtualNetwork information
|
||||
#######################################################################
|
||||
@ -340,6 +362,21 @@ module OpenNebula
|
||||
return array
|
||||
end
|
||||
|
||||
# Returns the state of the Virtual Network (numeric value)
|
||||
def state
|
||||
self['STATE'].to_i
|
||||
end
|
||||
|
||||
# Returns the state of the Virtual Network (string value)
|
||||
def state_str
|
||||
VN_STATES[state]
|
||||
end
|
||||
|
||||
# Returns the state of the Virtual Network (string value)
|
||||
def short_state_str
|
||||
SHORT_VN_STATES[state_str]
|
||||
end
|
||||
|
||||
private
|
||||
def set_publish(published)
|
||||
group_u = published ? 1 : 0
|
||||
|
@ -86,6 +86,8 @@ const EString<IPAMManagerMessages> ipam_msg_t::_type_str({
|
||||
{"GET_ADDRESS", IPAMManagerMessages::GET_ADDRESS},
|
||||
{"ALLOCATE_ADDRESS", IPAMManagerMessages::ALLOCATE_ADDRESS},
|
||||
{"FREE_ADDRESS", IPAMManagerMessages::FREE_ADDRESS},
|
||||
{"VNET_CREATE", IPAMManagerMessages::VNET_CREATE},
|
||||
{"VNET_DELETE", IPAMManagerMessages::VNET_DELETE},
|
||||
{"LOG", IPAMManagerMessages::LOG},
|
||||
});
|
||||
|
||||
|
@ -360,6 +360,7 @@ void RequestManager::register_xml_methods()
|
||||
xmlrpc_c::methodPtr vn_hold(new VirtualNetworkHold());
|
||||
xmlrpc_c::methodPtr vn_release(new VirtualNetworkRelease());
|
||||
xmlrpc_c::methodPtr vn_reserve(new VirtualNetworkReserve());
|
||||
xmlrpc_c::methodPtr vn_recover(new VirtualNetworkRecover());
|
||||
|
||||
// Update Template Methods
|
||||
xmlrpc_c::methodPtr image_update(new ImageUpdateTemplate());
|
||||
@ -697,6 +698,7 @@ void RequestManager::register_xml_methods()
|
||||
RequestManagerRegistry.addMethod("one.vn.rename", vn_rename);
|
||||
RequestManagerRegistry.addMethod("one.vn.lock", vn_lock);
|
||||
RequestManagerRegistry.addMethod("one.vn.unlock", vn_unlock);
|
||||
RequestManagerRegistry.addMethod("one.vn.recover", vn_recover);
|
||||
|
||||
RequestManagerRegistry.addMethod("one.vnpool.info", vnpool_info);
|
||||
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include "FedReplicaManager.h"
|
||||
#include "ImageManager.h"
|
||||
#include "InformationManager.h"
|
||||
#include "IPAMManager.h"
|
||||
#include "MarketPlaceManager.h"
|
||||
|
||||
#include "ClusterPool.h"
|
||||
@ -323,13 +324,23 @@ int VirtualNetworkDelete::drop(std::unique_ptr<PoolObjectSQL> object, bool r, Re
|
||||
return -1;
|
||||
}
|
||||
|
||||
switch(vnet->get_state())
|
||||
{
|
||||
case VirtualNetwork::LOCK_CREATE:
|
||||
case VirtualNetwork::LOCK_DELETE:
|
||||
case VirtualNetwork::DONE:
|
||||
att.resp_msg = "Can not remove a Virtual Network, wrong state "
|
||||
+ vnet->state_to_str(vnet->get_state());
|
||||
return -1;
|
||||
case VirtualNetwork::INIT:
|
||||
case VirtualNetwork::READY:
|
||||
case VirtualNetwork::ERROR:
|
||||
break;
|
||||
}
|
||||
|
||||
Nebula& nd = Nebula::instance();
|
||||
VirtualNetworkPool * vnpool = nd.get_vnpool();
|
||||
|
||||
int pvid = vnet->get_parent();
|
||||
int uid = vnet->get_uid();
|
||||
int gid = vnet->get_gid();
|
||||
|
||||
// Delete all address ranges to call IPAM if needed
|
||||
string error_msg;
|
||||
|
||||
@ -342,63 +353,15 @@ int VirtualNetworkDelete::drop(std::unique_ptr<PoolObjectSQL> object, bool r, Re
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = RequestManagerDelete::drop(std::move(object), false, att);
|
||||
vnet->set_state(VirtualNetwork::LOCK_DELETE);
|
||||
|
||||
if (pvid != -1)
|
||||
{
|
||||
int freed = 0;
|
||||
if (auto vnet = pool->get<VirtualNetwork>(pvid))
|
||||
{
|
||||
freed = vnet->free_addr_by_owner(PoolObjectSQL::NET, oid);
|
||||
vnpool->update(vnet);
|
||||
|
||||
pool->update(vnet.get());
|
||||
}
|
||||
else
|
||||
{
|
||||
return rc;
|
||||
}
|
||||
string xml64;
|
||||
vnet->to_xml64(xml64);
|
||||
|
||||
if (freed > 0)
|
||||
{
|
||||
ostringstream oss;
|
||||
Template tmpl;
|
||||
|
||||
for (int i= 0 ; i < freed ; i++)
|
||||
{
|
||||
oss << " NIC = [ NETWORK_ID = " << pvid << " ]" << endl;
|
||||
}
|
||||
|
||||
tmpl.parse_str_or_xml(oss.str(), att.resp_msg);
|
||||
|
||||
Quotas::quota_del(Quotas::NETWORK, uid, gid, &tmpl);
|
||||
}
|
||||
}
|
||||
|
||||
if (rc != 0)
|
||||
{
|
||||
return rc;
|
||||
}
|
||||
|
||||
// Remove virtual network from VDC
|
||||
int zone_id = nd.get_zone_id();
|
||||
|
||||
VdcPool * vdcpool = nd.get_vdcpool();
|
||||
|
||||
std::string error;
|
||||
std::vector<int> vdcs;
|
||||
|
||||
vdcpool->list(vdcs);
|
||||
|
||||
for (int vdcId : vdcs)
|
||||
{
|
||||
if ( auto vdc = vdcpool->get(vdcId) )
|
||||
{
|
||||
if ( vdc->del_vnet(zone_id, oid, error) == 0 )
|
||||
{
|
||||
vdcpool->update(vdc.get());
|
||||
}
|
||||
}
|
||||
}
|
||||
auto ipamm = nd.get_ipamm();
|
||||
ipamm->trigger_vnet_delete(oid, xml64);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include "RequestManagerVirtualNetwork.h"
|
||||
#include "VirtualNetworkTemplate.h"
|
||||
#include "ClusterPool.h"
|
||||
#include "IPAMManager.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
@ -56,6 +57,15 @@ void RequestManagerVirtualNetwork::
|
||||
return;
|
||||
}
|
||||
|
||||
if (vn->get_state() != VirtualNetwork::READY)
|
||||
{
|
||||
att.resp_msg = "Could not execute " + method_name +
|
||||
"Virtual Network is in wrong state: "
|
||||
+ vn->state_to_str(vn->get_state());
|
||||
failure_response(ACTION, att);
|
||||
return;
|
||||
}
|
||||
|
||||
rc = leases_action(vn.get(), &tmpl, att, att.resp_msg);
|
||||
|
||||
if ( rc < 0 )
|
||||
@ -110,6 +120,15 @@ void VirtualNetworkRmAddressRange::
|
||||
// -------------------------------------------------------------------------
|
||||
if ( auto vn = pool->get<VirtualNetwork>(id) )
|
||||
{
|
||||
if (vn->get_state() != VirtualNetwork::READY)
|
||||
{
|
||||
att.resp_msg = "Could not remove Adress Range, "
|
||||
"Virtual Network is in wrong state: "
|
||||
+ vn->state_to_str(vn->get_state());
|
||||
failure_response(ACTION, att);
|
||||
return;
|
||||
}
|
||||
|
||||
parent = vn->get_parent();
|
||||
parent_ar = vn->get_ar_parent(ar_id);
|
||||
|
||||
@ -482,3 +501,134 @@ void VirtualNetworkReserve::request_execute(
|
||||
|
||||
success_response(rid, att);
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
void VirtualNetworkRecover::request_execute(
|
||||
xmlrpc_c::paramList const& paramList, RequestAttributes& att)
|
||||
{
|
||||
int id = xmlrpc_c::value_int(paramList.getInt(1));
|
||||
int op = xmlrpc_c::value_int(paramList.getInt(2));
|
||||
|
||||
Nebula& nd = Nebula::instance();
|
||||
auto ipamm = nd.get_ipamm();
|
||||
auto vnpool = static_cast<VirtualNetworkPool*>(pool);
|
||||
|
||||
ostringstream oss;
|
||||
|
||||
if (op < 0 || op > 2)
|
||||
{
|
||||
att.resp_msg = "Wrong recovery operation code";
|
||||
failure_response(ACTION, att);
|
||||
}
|
||||
|
||||
if ( basic_authorization(id, att) == false )
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
auto vn = pool->get<VirtualNetwork>(id);
|
||||
|
||||
if ( vn == nullptr )
|
||||
{
|
||||
att.resp_id = id;
|
||||
failure_response(NO_EXISTS, att);
|
||||
return;
|
||||
}
|
||||
|
||||
auto state = vn->get_state();
|
||||
|
||||
if (state == VirtualNetwork::READY)
|
||||
{
|
||||
att.resp_msg = "Unable to recover from " + vn->state_to_str(state);
|
||||
failure_response(INTERNAL, att);
|
||||
return;
|
||||
}
|
||||
|
||||
switch (op)
|
||||
{
|
||||
case 0: //recover-failure
|
||||
switch(state)
|
||||
{
|
||||
case VirtualNetwork::INIT:
|
||||
case VirtualNetwork::LOCK_CREATE:
|
||||
case VirtualNetwork::LOCK_DELETE:
|
||||
vn->set_state(VirtualNetwork::ERROR);
|
||||
vn->set_template_error_message("Failure forced by user");
|
||||
pool->update(vn.get());
|
||||
break;
|
||||
|
||||
case VirtualNetwork::DONE:
|
||||
case VirtualNetwork::READY:
|
||||
case VirtualNetwork::ERROR:
|
||||
oss << "Could not perform 'recover failure' on VN " << id
|
||||
<< ", wrong state " << vn->state_to_str(state) << ".";
|
||||
|
||||
att.resp_msg = oss.str();
|
||||
failure_response(INTERNAL, att);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
|
||||
case 1: //recover-success
|
||||
vn->clear_template_error_message();
|
||||
|
||||
switch(state)
|
||||
{
|
||||
case VirtualNetwork::INIT:
|
||||
case VirtualNetwork::LOCK_CREATE:
|
||||
case VirtualNetwork::ERROR:
|
||||
vn->set_state(VirtualNetwork::READY);
|
||||
pool->update(vn.get());
|
||||
break;
|
||||
|
||||
case VirtualNetwork::LOCK_DELETE:
|
||||
vn->set_state(VirtualNetwork::DONE);
|
||||
vnpool->delete_success(move(vn));
|
||||
break;
|
||||
|
||||
case VirtualNetwork::DONE:
|
||||
case VirtualNetwork::READY:
|
||||
oss << "Could not perform 'recover success' on VN " << id
|
||||
<< ", wrong state " << vn->state_to_str(state) << ".";
|
||||
|
||||
att.resp_msg = oss.str();
|
||||
failure_response(INTERNAL, att);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
|
||||
case 2: //delete
|
||||
switch(state)
|
||||
{
|
||||
case VirtualNetwork::INIT:
|
||||
case VirtualNetwork::LOCK_CREATE:
|
||||
case VirtualNetwork::LOCK_DELETE:
|
||||
case VirtualNetwork::ERROR:
|
||||
{
|
||||
string xml64;
|
||||
vn->to_xml64(xml64);
|
||||
|
||||
vnpool->delete_success(move(vn));
|
||||
|
||||
ipamm->trigger_vnet_delete(id, xml64);
|
||||
}
|
||||
break;
|
||||
|
||||
case VirtualNetwork::DONE:
|
||||
case VirtualNetwork::READY:
|
||||
oss << "Could not perform 'recover delete' on VN " << id
|
||||
<< ", wrong state " << vn->state_to_str(state) << ".";
|
||||
|
||||
att.resp_msg = oss.str();
|
||||
failure_response(INTERNAL, att);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
success_response(id, att);
|
||||
|
||||
return;
|
||||
}
|
||||
|
@ -19,6 +19,26 @@ define(function(require) {
|
||||
|
||||
var RESOURCE = "VNET";
|
||||
|
||||
var STATES = {
|
||||
INIT : 0,
|
||||
READY : 1,
|
||||
LOCK_CREATE : 2,
|
||||
LOCK_DELETE : 3,
|
||||
LOCKED : 4,
|
||||
DONE : 5,
|
||||
ERROR : 6
|
||||
};
|
||||
|
||||
var STATES_STR = [
|
||||
"INIT",
|
||||
"READY",
|
||||
"LOCK_CREATE",
|
||||
"LOCK_DELETE",
|
||||
"LOCKED",
|
||||
"DONE",
|
||||
"ERROR"
|
||||
];
|
||||
|
||||
var Network = {
|
||||
"resource": RESOURCE,
|
||||
"create": function(params) {
|
||||
@ -97,6 +117,10 @@ define(function(require) {
|
||||
"unlock" : function(params) {
|
||||
OpenNebulaAction.simple_action(params, RESOURCE, "unlock");
|
||||
},
|
||||
"stateStr": function(stateId) {
|
||||
return STATES_STR[stateId];
|
||||
},
|
||||
"STATES": STATES
|
||||
}
|
||||
|
||||
return Network;
|
||||
|
@ -36,21 +36,29 @@ define(function(require) {
|
||||
Locale.tr("FAILED_DEPLOYING"),
|
||||
Locale.tr("SCALING"),
|
||||
Locale.tr("FAILED_SCALING"),
|
||||
Locale.tr("COOLDOWN")
|
||||
Locale.tr("COOLDOWN"),
|
||||
Locale.tr("DEPLOYING_NETS"),
|
||||
Locale.tr("UNDEPLOYING_NETS"),
|
||||
Locale.tr("FAILED_DEPLOYING_NETS"),
|
||||
Locale.tr("FAILED_UNDEPLOYING_NETS")
|
||||
];
|
||||
|
||||
var STATES = {
|
||||
PENDING : 0,
|
||||
DEPLOYING : 1,
|
||||
RUNNING : 2,
|
||||
UNDEPLOYING : 3,
|
||||
WARNING : 4,
|
||||
DONE : 5,
|
||||
FAILED_UNDEPLOYING : 6,
|
||||
FAILED_DEPLOYING : 7,
|
||||
SCALING : 8,
|
||||
FAILED_SCALING : 9,
|
||||
COOLDOWN : 10
|
||||
PENDING : 0,
|
||||
DEPLOYING : 1,
|
||||
RUNNING : 2,
|
||||
UNDEPLOYING : 3,
|
||||
WARNING : 4,
|
||||
DONE : 5,
|
||||
FAILED_UNDEPLOYING : 6,
|
||||
FAILED_DEPLOYING : 7,
|
||||
SCALING : 8,
|
||||
FAILED_SCALING : 9,
|
||||
COOLDOWN : 10,
|
||||
DEPLOYING_NETS : 11,
|
||||
UNDEPLOYING_NETS : 12,
|
||||
FAILED_DEPLOYING_NETS : 13,
|
||||
FAILED_UNDEPLOYING_NETS : 14
|
||||
};
|
||||
|
||||
var Service = {
|
||||
|
@ -57,6 +57,18 @@ define(function(require) {
|
||||
STATE_ACTIONS[OpenNebulaFlow.STATES.COOLDOWN] =
|
||||
['Role.scale_dialog','Role.terminate', 'Role.terminate_hard'];
|
||||
|
||||
STATE_ACTIONS[OpenNebulaFlow.STATES.DEPLOYING_NETS] =
|
||||
['Role.scale_dialog','Role.terminate', 'Role.terminate_hard'];
|
||||
|
||||
STATE_ACTIONS[OpenNebulaFlow.STATES.UNDEPLOYING_NETS] =
|
||||
['Role.scale_dialog','Role.terminate', 'Role.terminate_hard'];
|
||||
|
||||
STATE_ACTIONS[OpenNebulaFlow.STATES.FAILED_DEPLOYING_NETS] =
|
||||
['Role.scale_dialog','Role.terminate', 'Role.terminate_hard'];
|
||||
|
||||
STATE_ACTIONS[OpenNebulaFlow.STATES.FAILED_UNDEPLOYING_NETS] =
|
||||
['Role.scale_dialog','Role.terminate', 'Role.terminate_hard'];
|
||||
|
||||
return {
|
||||
'disableStateButton': disableStateButton,
|
||||
'enableStateButton': enableStateButton,
|
||||
|
@ -687,6 +687,22 @@ define(function(require) {
|
||||
state_color = "off";
|
||||
state_str = Locale.tr("DONE");
|
||||
break;
|
||||
case OpenNebula.Service.STATES.DEPLOYING_NETS:
|
||||
state_color = "deploying";
|
||||
state_str = Locale.tr("DEPLOYING NETS");
|
||||
break;
|
||||
case OpenNebula.Service.STATES.UNDEPLOYING_NETS:
|
||||
state_color = "deploying";
|
||||
state_str = Locale.tr("UNDEPLOYING NETS");
|
||||
break;
|
||||
case OpenNebula.Service.STATES.FAILED_DEPLOYING_NETS:
|
||||
state_color = "error";
|
||||
state_str = Locale.tr("FAILED DEPLOYING NETS");
|
||||
break;
|
||||
case OpenNebula.Service.STATES.FAILED_UNDEPLOYING_NETS:
|
||||
state_color = "error";
|
||||
state_str = Locale.tr("FAILED UNDEPLOYING NETS");
|
||||
break;
|
||||
default:
|
||||
state_color = "powering_off";
|
||||
state_str = Locale.tr("UNKNOWN");
|
||||
|
@ -71,6 +71,7 @@ define(function(require) {
|
||||
Locale.tr("Name"),
|
||||
Locale.tr("Owner"),
|
||||
Locale.tr("Group"),
|
||||
Locale.tr("Status"),
|
||||
Locale.tr("Reservation"),
|
||||
Locale.tr("Cluster"),
|
||||
Locale.tr("Bridge"),
|
||||
@ -155,6 +156,7 @@ define(function(require) {
|
||||
element.NAME,
|
||||
element.UNAME,
|
||||
element.GNAME,
|
||||
OpenNebulaNetwork.stateStr(element.STATE),
|
||||
element.PARENT_NETWORK_ID.length ? Locale.tr("Yes") : Locale.tr("No"),
|
||||
clusters,
|
||||
element.BRIDGE,
|
||||
|
@ -110,6 +110,7 @@ define(function(require) {
|
||||
|
||||
return TemplateInfo({
|
||||
'element': this.element,
|
||||
'stateStr': OpenNebulaNetwork.stateStr(this.element.STATE),
|
||||
'renameTrHTML': renameTrHTML,
|
||||
'reservationTrHTML': reservationTrHTML,
|
||||
'permissionsTableHTML': permissionsTableHTML,
|
||||
|
@ -30,6 +30,11 @@
|
||||
</tr>
|
||||
{{{renameTrHTML}}}
|
||||
{{{reservationTrHTML}}}
|
||||
<tr>
|
||||
<td class="key_td">{{tr "State"}}</td>
|
||||
<td class="value_td">{{stateStr}}</td>
|
||||
<td></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="key_td">{{tr "VLAN ID"}}</td>
|
||||
<td class="value_td">{{valOrDefault element.VLAN_ID "--"}}</td>
|
||||
|
@ -72,7 +72,20 @@ define(function(require) {
|
||||
FAILED_DEPLOYING:"#ec5840",
|
||||
SCALING:"#4DBBD3",
|
||||
FAILED_SCALING:"#ec5840",
|
||||
COOLDOWN:"#4DBBD3"
|
||||
COOLDOWN:"#4DBBD3",
|
||||
DEPLOYING_NETS:"#4DBBD3",
|
||||
UNDEPLOYING_NETS:"#4DBBD3",
|
||||
FAILED_DEPLOYING_NETS:"#ec5840",
|
||||
FAILED_UNDEPLOYING_NETS:"#ec5840"
|
||||
},
|
||||
VNET:{
|
||||
INIT: "#4DBBD3",
|
||||
READY: "#3adb76",
|
||||
LOCK_CREATE: "lightsalmon",
|
||||
LOCK_DELETE: "lightsalmon",
|
||||
LOCKED: "lightsalmon",
|
||||
DONE: "#ec5840",
|
||||
ERROR: "#ec5840"
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -380,7 +380,7 @@ module VCenterDriver
|
||||
# rubocop:enable Layout/LineLength
|
||||
end
|
||||
end
|
||||
VCenterDriver::VcImporter.register_hooks
|
||||
|
||||
rescue Interrupt
|
||||
puts "\n"
|
||||
exit 0 # Ctrl+C
|
||||
@ -396,16 +396,6 @@ module VCenterDriver
|
||||
end
|
||||
end
|
||||
|
||||
def self.register_hooks
|
||||
hooks_path = HOOK_LOCATION + '/vcenter/templates'
|
||||
client = OpenNebula::Client.new
|
||||
hook = OpenNebula::Hook.new(OpenNebula::Hook.build_xml, client)
|
||||
hook_files = Dir["#{hooks_path}/*.tmpl"]
|
||||
hook_files.each do |hook_file|
|
||||
hook.allocate(File.open(hook_file).read)
|
||||
end
|
||||
end
|
||||
|
||||
def self.sanitize(text)
|
||||
bad_chars = ['|']
|
||||
bad_chars.each do |bad_char|
|
||||
|
@ -46,6 +46,8 @@ VirtualNetwork::VirtualNetwork(int _uid,
|
||||
unique_ptr<VirtualNetworkTemplate> _vn_template):
|
||||
PoolObjectSQL(-1,NET,"",_uid,_gid,_uname,_gname,one_db::vn_table),
|
||||
Clusterable(_cluster_ids),
|
||||
state(LOCK_CREATE),
|
||||
prev_state(INIT),
|
||||
bridge(""),
|
||||
vlan_id_automatic(false),
|
||||
outer_vlan_id_automatic(false),
|
||||
@ -189,7 +191,6 @@ int VirtualNetwork::parse_phydev_vlans(const Template* tmpl, const string& vn_ma
|
||||
|
||||
int VirtualNetwork::insert(SqlDB * db, string& error_str)
|
||||
{
|
||||
vector<VectorAttribute *> ars;
|
||||
ostringstream ose;
|
||||
|
||||
string sg_str, vis;
|
||||
@ -198,7 +199,7 @@ int VirtualNetwork::insert(SqlDB * db, string& error_str)
|
||||
string name;
|
||||
string prefix;
|
||||
|
||||
int rc, num_ars;
|
||||
int rc;
|
||||
|
||||
ostringstream oss;
|
||||
|
||||
@ -314,23 +315,6 @@ int VirtualNetwork::insert(SqlDB * db, string& error_str)
|
||||
|
||||
add_template_attribute("BRIDGE", bridge);
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
// Get the Address Ranges
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
num_ars = remove_template_attribute("AR", ars);
|
||||
rc = add_var(ars, error_str);
|
||||
|
||||
for (int i=0; i < num_ars; i++)
|
||||
{
|
||||
delete ars[i];
|
||||
}
|
||||
|
||||
if ( rc != 0)
|
||||
{
|
||||
goto error_ar;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
// Add default Security Group
|
||||
//--------------------------------------------------------------------------
|
||||
@ -378,7 +362,6 @@ error_vn_mad:
|
||||
|
||||
error_parse:
|
||||
error_db:
|
||||
error_ar:
|
||||
error_common:
|
||||
NebulaLog::log("VNM", Log::ERROR, error_str);
|
||||
return -1;
|
||||
@ -600,7 +583,9 @@ string& VirtualNetwork::to_xml_extended(string& xml, bool extended_and_check,
|
||||
perms_to_xml(perm_str) <<
|
||||
Clusterable::to_xml(clusters_xml) <<
|
||||
"<BRIDGE>" << one_util::escape_xml(bridge) << "</BRIDGE>"
|
||||
"<BRIDGE_TYPE>" << one_util::escape_xml(bridge_type) << "</BRIDGE_TYPE>";
|
||||
"<BRIDGE_TYPE>" << one_util::escape_xml(bridge_type) << "</BRIDGE_TYPE>"
|
||||
"<STATE>" << one_util::escape_xml(state) << "</STATE>"
|
||||
"<PREV_STATE>" << one_util::escape_xml(prev_state) << "</PREV_STATE>";
|
||||
|
||||
if (parent_vid != -1)
|
||||
{
|
||||
@ -694,6 +679,7 @@ int VirtualNetwork::from_xml(const string &xml_str)
|
||||
|
||||
int rc = 0;
|
||||
|
||||
int int_state;
|
||||
int int_vlan_id_automatic;
|
||||
int int_outer_vlan_id_automatic;
|
||||
|
||||
@ -708,6 +694,13 @@ int VirtualNetwork::from_xml(const string &xml_str)
|
||||
rc += xpath(gname, "/VNET/GNAME", "not_found");
|
||||
rc += xpath(name, "/VNET/NAME", "not_found");
|
||||
rc += xpath(bridge, "/VNET/BRIDGE","not_found");
|
||||
rc += xpath(int_state, "/VNET/STATE", 0);
|
||||
|
||||
state = static_cast<VirtualNetworkState>(int_state);
|
||||
|
||||
rc += xpath(int_state, "/VNET/PREV_STATE", 0);
|
||||
|
||||
prev_state = static_cast<VirtualNetworkState>(int_state);
|
||||
|
||||
rc += lock_db_from_xml();
|
||||
|
||||
|
@ -21,7 +21,12 @@
|
||||
#include "PoolObjectAuth.h"
|
||||
#include "AuthManager.h"
|
||||
#include "AddressRange.h"
|
||||
#include "IPAMManager.h"
|
||||
#include "VirtualMachineNic.h"
|
||||
#include "ClusterPool.h"
|
||||
#include "HookManager.h"
|
||||
#include "HookStateVirtualNetwork.h"
|
||||
#include "VdcPool.h"
|
||||
|
||||
#include <sstream>
|
||||
#include <ctype.h>
|
||||
@ -143,24 +148,32 @@ int VirtualNetworkPool::allocate (
|
||||
// Insert the VN in the DB
|
||||
*oid = PoolSQL::allocate(vn, error_str);
|
||||
|
||||
// Get a free VLAN_ID from the pool if needed
|
||||
if ( *oid != -1 )
|
||||
if ( *oid == -1)
|
||||
{
|
||||
if ( auto vnet = get(*oid) )
|
||||
{
|
||||
if ( set_vlan_id(vnet.get()) != 0 )
|
||||
{
|
||||
error_str = "Cannot automatically assign VLAN_ID to network.";
|
||||
drop(vnet.get(), error_str);
|
||||
return *oid;
|
||||
}
|
||||
|
||||
*oid = -1;
|
||||
}
|
||||
}
|
||||
else
|
||||
// Get a free VLAN_ID from the pool if needed
|
||||
if ( auto vnet = get(*oid) )
|
||||
{
|
||||
if ( set_vlan_id(vnet.get()) != 0 )
|
||||
{
|
||||
error_str = "An error occurred while allocating the virtual network.";
|
||||
error_str = "Cannot automatically assign VLAN_ID to network.";
|
||||
drop(vnet.get(), error_str);
|
||||
|
||||
goto error_common;
|
||||
}
|
||||
|
||||
string xml64;
|
||||
vnet->to_xml64(xml64);
|
||||
|
||||
auto ipamm = Nebula::instance().get_ipamm();
|
||||
ipamm->trigger_vnet_create(*oid, xml64);
|
||||
}
|
||||
else
|
||||
{
|
||||
error_str = "An error occurred while allocating the virtual network.";
|
||||
goto error_common;
|
||||
}
|
||||
|
||||
return *oid;
|
||||
@ -182,6 +195,30 @@ error_common:
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
int VirtualNetworkPool::update(PoolObjectSQL * objsql)
|
||||
{
|
||||
VirtualNetwork * vn = dynamic_cast<VirtualNetwork *>(objsql);
|
||||
|
||||
if ( vn == nullptr )
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ( HookStateVirtualNetwork::trigger(vn) )
|
||||
{
|
||||
std::string event = HookStateVirtualNetwork::format_message(vn);
|
||||
|
||||
Nebula::instance().get_hm()->trigger_send_event(event);
|
||||
}
|
||||
|
||||
vn->set_prev_state();
|
||||
|
||||
return vn->update(db);
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
unique_ptr<VirtualNetwork> VirtualNetworkPool::get_nic_by_name(
|
||||
VirtualMachineNic * nic,
|
||||
const string& name,
|
||||
@ -799,3 +836,112 @@ int VirtualNetworkPool::reserve_addr_by_mac(int pid, int rid, unsigned int rsize
|
||||
return add_ar(rid, rar, err);
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
void VirtualNetworkPool::delete_success(std::unique_ptr<VirtualNetwork> vn)
|
||||
{
|
||||
Nebula& nd = Nebula::instance();
|
||||
|
||||
int oid = vn->get_oid();
|
||||
int pvid = vn->get_parent();
|
||||
int uid = vn->get_uid();
|
||||
int gid = vn->get_gid();
|
||||
|
||||
set<int> cluster_ids = vn->get_cluster_ids();
|
||||
|
||||
vn->set_state(VirtualNetwork::DONE);
|
||||
vn->clear_template_error_message();
|
||||
|
||||
update(vn.get());
|
||||
|
||||
string err;
|
||||
auto rc = drop(vn.get(), err);
|
||||
|
||||
if ( rc != 0 )
|
||||
{
|
||||
NebulaLog::error("IPM", "Unable to delete Virtual Network id = "
|
||||
+ to_string(oid));
|
||||
return;
|
||||
}
|
||||
|
||||
vn.reset();
|
||||
|
||||
// Remove from clusters
|
||||
auto clpool = nd.get_clpool();
|
||||
|
||||
for (auto cid : cluster_ids)
|
||||
{
|
||||
if ( auto cluster = clpool->get(cid) )
|
||||
{
|
||||
rc = clpool->del_from_cluster(PoolObjectSQL::NET, cluster.get(), oid, err);
|
||||
|
||||
if ( rc < 0 )
|
||||
{
|
||||
NebulaLog::error("IPM", "Unable to remove Virtual Network id="
|
||||
+ to_string(oid)
|
||||
+ " from cluster id=" + to_string(cluster->get_oid())
|
||||
+ ", error: " + err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Relase from parent network
|
||||
if (pvid != -1)
|
||||
{
|
||||
int freed = 0;
|
||||
|
||||
if (auto vnet = get(pvid))
|
||||
{
|
||||
freed = vnet->free_addr_by_owner(PoolObjectSQL::NET, oid);
|
||||
|
||||
update(vnet.get());
|
||||
}
|
||||
else
|
||||
{
|
||||
NebulaLog::error("IPM", "VN " + to_string(oid) +
|
||||
" unable to free resources from parent network id=" +
|
||||
to_string(pvid) + ", it doesn't exists");
|
||||
}
|
||||
|
||||
if (freed > 0)
|
||||
{
|
||||
ostringstream oss;
|
||||
Template tmpl;
|
||||
|
||||
for (int i= 0 ; i < freed ; i++)
|
||||
{
|
||||
oss << " NIC = [ NETWORK_ID = " << pvid << " ]" << endl;
|
||||
}
|
||||
|
||||
tmpl.parse_str_or_xml(oss.str(), err);
|
||||
|
||||
Quotas::quota_del(Quotas::NETWORK, uid, gid, &tmpl);
|
||||
}
|
||||
}
|
||||
|
||||
// Remove virtual network from VDC
|
||||
int zone_id = nd.get_zone_id();
|
||||
|
||||
VdcPool * vdcpool = nd.get_vdcpool();
|
||||
|
||||
std::vector<int> vdcs;
|
||||
|
||||
vdcpool->list(vdcs);
|
||||
|
||||
for (int vdcId : vdcs)
|
||||
{
|
||||
if ( auto vdc = vdcpool->get(vdcId) )
|
||||
{
|
||||
if ( vdc->del_vnet(zone_id, oid, err) == 0 )
|
||||
{
|
||||
vdcpool->update(vdc.get());
|
||||
}
|
||||
else
|
||||
{
|
||||
NebulaLog::error("IPM", "Unable to remove Virtual Network id="
|
||||
+ to_string(oid) + " from VDC, error: " + err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
1
src/vnm_mad/remotes/802.1Q/vnet_create
Symbolic link
1
src/vnm_mad/remotes/802.1Q/vnet_create
Symbolic link
@ -0,0 +1 @@
|
||||
../common/dummy.sh
|
1
src/vnm_mad/remotes/802.1Q/vnet_delete
Symbolic link
1
src/vnm_mad/remotes/802.1Q/vnet_delete
Symbolic link
@ -0,0 +1 @@
|
||||
../common/dummy.sh
|
1
src/vnm_mad/remotes/bridge/vnet_create
Symbolic link
1
src/vnm_mad/remotes/bridge/vnet_create
Symbolic link
@ -0,0 +1 @@
|
||||
../common/dummy.sh
|
1
src/vnm_mad/remotes/bridge/vnet_delete
Symbolic link
1
src/vnm_mad/remotes/bridge/vnet_delete
Symbolic link
@ -0,0 +1 @@
|
||||
../common/dummy.sh
|
1
src/vnm_mad/remotes/dummy/vnet_create
Symbolic link
1
src/vnm_mad/remotes/dummy/vnet_create
Symbolic link
@ -0,0 +1 @@
|
||||
../common/dummy.sh
|
1
src/vnm_mad/remotes/dummy/vnet_delete
Symbolic link
1
src/vnm_mad/remotes/dummy/vnet_delete
Symbolic link
@ -0,0 +1 @@
|
||||
../common/dummy.sh
|
1
src/vnm_mad/remotes/ebtables/vnet_create
Symbolic link
1
src/vnm_mad/remotes/ebtables/vnet_create
Symbolic link
@ -0,0 +1 @@
|
||||
../common/dummy.sh
|
1
src/vnm_mad/remotes/ebtables/vnet_delete
Symbolic link
1
src/vnm_mad/remotes/ebtables/vnet_delete
Symbolic link
@ -0,0 +1 @@
|
||||
../common/dummy.sh
|
1
src/vnm_mad/remotes/elastic/vnet_create
Symbolic link
1
src/vnm_mad/remotes/elastic/vnet_create
Symbolic link
@ -0,0 +1 @@
|
||||
../common/dummy.sh
|
1
src/vnm_mad/remotes/elastic/vnet_delete
Symbolic link
1
src/vnm_mad/remotes/elastic/vnet_delete
Symbolic link
@ -0,0 +1 @@
|
||||
../common/dummy.sh
|
1
src/vnm_mad/remotes/fw/vnet_create
Symbolic link
1
src/vnm_mad/remotes/fw/vnet_create
Symbolic link
@ -0,0 +1 @@
|
||||
../common/dummy.sh
|
1
src/vnm_mad/remotes/fw/vnet_delete
Symbolic link
1
src/vnm_mad/remotes/fw/vnet_delete
Symbolic link
@ -0,0 +1 @@
|
||||
../common/dummy.sh
|
1
src/vnm_mad/remotes/nodeport/vnet_create
Symbolic link
1
src/vnm_mad/remotes/nodeport/vnet_create
Symbolic link
@ -0,0 +1 @@
|
||||
../common/dummy.sh
|
1
src/vnm_mad/remotes/nodeport/vnet_delete
Symbolic link
1
src/vnm_mad/remotes/nodeport/vnet_delete
Symbolic link
@ -0,0 +1 @@
|
||||
../common/dummy.sh
|
1
src/vnm_mad/remotes/ovswitch/vnet_create
Symbolic link
1
src/vnm_mad/remotes/ovswitch/vnet_create
Symbolic link
@ -0,0 +1 @@
|
||||
../common/dummy.sh
|
1
src/vnm_mad/remotes/ovswitch/vnet_delete
Symbolic link
1
src/vnm_mad/remotes/ovswitch/vnet_delete
Symbolic link
@ -0,0 +1 @@
|
||||
../common/dummy.sh
|
1
src/vnm_mad/remotes/ovswitch_vxlan/vnet_create
Symbolic link
1
src/vnm_mad/remotes/ovswitch_vxlan/vnet_create
Symbolic link
@ -0,0 +1 @@
|
||||
../common/dummy.sh
|
1
src/vnm_mad/remotes/ovswitch_vxlan/vnet_delete
Symbolic link
1
src/vnm_mad/remotes/ovswitch_vxlan/vnet_delete
Symbolic link
@ -0,0 +1 @@
|
||||
../common/dummy.sh
|
455
src/vnm_mad/remotes/vcenter/virtual_network_xml.rb
Executable file
455
src/vnm_mad/remotes/vcenter/virtual_network_xml.rb
Executable file
@ -0,0 +1,455 @@
|
||||
#!/usr/bin/env ruby
|
||||
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
# a copy of the License at #
|
||||
# #
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 #
|
||||
# #
|
||||
# Unless required by applicable law or agreed to in writing, software #
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, #
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
class VirtualNetworkXML
|
||||
#-----------------------------------------------------------------------
|
||||
# Class attributes
|
||||
# - @vnet_xml [REXML] of the vnet
|
||||
# - @oneclient [OpenNebula::Client] to use oned API
|
||||
# - @clusters [Array] of vCenter clusters with following hash
|
||||
# :cli VIClient object for the vCenter
|
||||
# :ccr Cluster resource
|
||||
# :dc Datacenter for the cluster
|
||||
# :uuid vCenter instance uuid
|
||||
#-----------------------------------------------------------------------
|
||||
def initialize(xml64)
|
||||
@oneclient = OpenNebula::Client.new
|
||||
|
||||
vnet_raw = Base64.decode64(xml64)
|
||||
@vnet_xml = REXML::Document.new(vnet_raw).root
|
||||
|
||||
raise "Error parsing XML document: #{vnet_raw}" unless @vnet_xml
|
||||
|
||||
@debug = VCenterDriver::CONFIG[:debug_information]
|
||||
|
||||
#-----------------------------------------------------------------------
|
||||
# Initialize Clusters
|
||||
#-----------------------------------------------------------------------
|
||||
@clusters = []
|
||||
vc_ids = []
|
||||
|
||||
# TODO NSX?
|
||||
self.each('CLUSTERS/ID') do |cid|
|
||||
cxml = OpenNebula::Cluster.build_xml(cid)
|
||||
cluster = OpenNebula::Cluster.new(cxml, @oneclient)
|
||||
|
||||
rc = cluster.info
|
||||
next if OpenNebula.is_error?(rc)
|
||||
|
||||
hid = cluster.host_ids[0]
|
||||
next unless hid
|
||||
hxml = OpenNebula::Host.build_xml(hid)
|
||||
|
||||
host = OpenNebula::Host.new(hxml, @oneclient)
|
||||
|
||||
rc = host.info
|
||||
next if OpenNebula.is_error?(rc)
|
||||
|
||||
ref = host['TEMPLATE/VCENTER_CCR_REF']
|
||||
cli = VCenterDriver::VIClient.new_from_host(hid)
|
||||
ccr = VCenterDriver::ClusterComputeResource.new_from_ref(ref, cli)
|
||||
|
||||
@clusters << {
|
||||
:hid => hid,
|
||||
:cli => cli,
|
||||
:ccr => ccr,
|
||||
:dc => ccr.datacenter,
|
||||
:uuid => cli.vim.serviceContent.about.instanceUuid
|
||||
}
|
||||
|
||||
vc_ids << host['TEMPLATE/VCENTER_INSTANCE_ID']
|
||||
end
|
||||
|
||||
raise "Clusters for VNET don't include any Host" if @clusters.empty?
|
||||
|
||||
raise "Multiple vCenter on VNET Clusters" unless vc_ids.uniq.length == 1
|
||||
end
|
||||
|
||||
#---------------------------------------------------------------------------
|
||||
# XML helpers
|
||||
#---------------------------------------------------------------------------
|
||||
def [](xpath)
|
||||
e = @vnet_xml.elements["#{xpath}"]
|
||||
e.text.strip
|
||||
rescue StandardError
|
||||
""
|
||||
end
|
||||
|
||||
def each(xpath, &block)
|
||||
@vnet_xml.elements.each("#{xpath}") { |elem|
|
||||
txt = elem.text
|
||||
block.call(txt.strip) if txt && !txt.empty?
|
||||
}
|
||||
end
|
||||
|
||||
#---------------------------------------------------------------------------
|
||||
# Methods to create port groups
|
||||
#---------------------------------------------------------------------------
|
||||
|
||||
# Creates a port group in each ESX raises exceptions with
|
||||
# error description
|
||||
# @return String with the parameters to add to the OpenNebula VNET
|
||||
def create_pg
|
||||
#-----------------------------------------------------------------------
|
||||
# Get parameters needed to create the network
|
||||
#-----------------------------------------------------------------------
|
||||
vlan_id = self['VLAN_ID'] || "0"
|
||||
pg_name = self['BRIDGE']
|
||||
pnics = self['TEMPLATE/PHYDEV']
|
||||
sw_name = self['TEMPLATE/VCENTER_SWITCH_NAME']
|
||||
mtu = self['TEMPLATE/MTU']
|
||||
nports = self['TEMPLATE/VCENTER_SWITCH_NPORTS']
|
||||
|
||||
nports = 128 if nports.empty?
|
||||
mtu = nil if mtu.empty?
|
||||
pnics = nil if pnics.empty?
|
||||
|
||||
esxs = []
|
||||
newpgs = []
|
||||
errors = []
|
||||
|
||||
#-----------------------------------------------------------------------
|
||||
# Initialize a ESX references for all ESX in all Clusters
|
||||
#-----------------------------------------------------------------------
|
||||
@clusters.each do |cluster|
|
||||
cli = cluster[:cli]
|
||||
|
||||
cluster[:ccr]['host'].each do |host|
|
||||
esxs << VCenterDriver::ESXHost.new_from_ref(host._ref, cli)
|
||||
end
|
||||
end
|
||||
|
||||
#-----------------------------------------------------------------------
|
||||
# Check PG does not exists and create it on all ESX hosts
|
||||
#-----------------------------------------------------------------------
|
||||
esxs.each do |esx|
|
||||
pg = esx.pg_exists(pg_name)
|
||||
raise "Port Group #{pg_name} already exists" if pg
|
||||
end
|
||||
|
||||
esxs.each do |esx|
|
||||
begin
|
||||
apnic = nil
|
||||
apnic = esx.available_pnics if pnics
|
||||
|
||||
vs = esx.vss_exists(sw_name)
|
||||
|
||||
esx.create_vss(sw_name, nports, pnics, mtu, apnic) unless vs
|
||||
|
||||
newpgs << esx.create_pg(pg_name, sw_name, vlan_id)
|
||||
rescue StandardError => e
|
||||
msg = "\tHost #{esx['name']}. Reason: \"#{e.message}\".\n"
|
||||
msg << "#{e.backtrace}\n" if @debug
|
||||
|
||||
errors << msg
|
||||
end
|
||||
end
|
||||
|
||||
#-----------------------------------------------------------------------
|
||||
# Sanity Check all new_pg references should be the same
|
||||
# Rollback PG creation in case of any error
|
||||
#-----------------------------------------------------------------------
|
||||
unless errors.empty?
|
||||
message = "Error adding port group to hosts:\n"
|
||||
message << errors.join
|
||||
|
||||
esxs.each do |esx|
|
||||
begin
|
||||
esx.network_rollback
|
||||
rescue StandardError => e
|
||||
message << "Error in rollback for #{esx['name']}: #{e.message}\n"
|
||||
end
|
||||
end
|
||||
|
||||
raise message
|
||||
end
|
||||
|
||||
raise "Different PG refs!:\n#{newpgs}" if newpgs.uniq.length != 1
|
||||
|
||||
"VCENTER_NET_REF = \"#{newpgs[0]}\"\n"\
|
||||
"VCENTER_INSTANCE_ID = \"#{@clusters[0][:uuid]}\"\n"
|
||||
end
|
||||
|
||||
# Deletes a port group in each ESX
|
||||
def delete_pg
|
||||
pg_name = self['BRIDGE']
|
||||
sw_name = self['TEMPLATE/VCENTER_SWITCH_NAME']
|
||||
|
||||
raise "Missing BRIDGE from VNET template" if pg_name.empty?
|
||||
raise "Missing VCENTER_SWTICH_NAME from VNET template" if sw_name.empty?
|
||||
|
||||
errors = []
|
||||
|
||||
#-----------------------------------------------------------------------
|
||||
# Iterate over all clusters and hosts to remove PG
|
||||
#-----------------------------------------------------------------------
|
||||
@clusters.each do |cluster|
|
||||
cli = cluster[:cli]
|
||||
|
||||
cluster[:ccr]['host'].each do |host|
|
||||
begin
|
||||
esx = VCenterDriver::ESXHost.new_from_ref(host._ref, cli)
|
||||
|
||||
next unless esx.pg_exists(pg_name)
|
||||
|
||||
sw = esx.remove_pg(pg_name)
|
||||
|
||||
next if !sw || sw_name != sw
|
||||
|
||||
vswitch = esx.vss_exists(sw_name)
|
||||
next unless vswitch
|
||||
|
||||
# Remove switch if the port group being removed is the last one
|
||||
esx.remove_vss(sw_name) if vswitch.portgroup.empty?
|
||||
|
||||
rescue StandardError => e
|
||||
msg = "\tHost #{esx._ref}. Reason: \"#{e.message}\".\n"
|
||||
msg << "#{e.backtrace}\n" if @debug
|
||||
|
||||
errors << msg
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
#-----------------------------------------------------------------------
|
||||
# Notify error on PG removal error
|
||||
#-----------------------------------------------------------------------
|
||||
unless errors.empty?
|
||||
message = "Error deleting port group from hosts:\n"
|
||||
message << errors.join
|
||||
|
||||
raise message
|
||||
end
|
||||
end
|
||||
|
||||
# Creates a distributed port group in a datacenter raises exceptions with
|
||||
# error description
|
||||
# @return String with the parameters to add to the OpenNebula VNET
|
||||
def create_dpg
|
||||
#-----------------------------------------------------------------------
|
||||
# Get parameters needed to create the network
|
||||
#-----------------------------------------------------------------------
|
||||
vlan_id = self['VLAN_ID'] || "0"
|
||||
pg_name = self['BRIDGE']
|
||||
pnics = self['TEMPLATE/PHYDEV']
|
||||
sw_name = self['TEMPLATE/VCENTER_SWITCH_NAME']
|
||||
mtu = self['TEMPLATE/MTU']
|
||||
nports = self['TEMPLATE/VCENTER_SWITCH_NPORTS']
|
||||
|
||||
nports = 128 if nports.empty?
|
||||
mtu = nil if mtu.empty?
|
||||
pnics = nil if pnics.empty?
|
||||
|
||||
#-----------------------------------------------------------------------
|
||||
# Use first cluster/dc to check the distributed portgroup
|
||||
#-----------------------------------------------------------------------
|
||||
dc = @clusters[0][:dc]
|
||||
|
||||
raise "vCenter Dataceter not initialized" unless dc
|
||||
|
||||
net_folder = dc.network_folder
|
||||
net_folder.fetch!
|
||||
|
||||
dpg = dc.dpg_exists(pg_name, net_folder)
|
||||
|
||||
# Disallow changes of switch name for existing pg
|
||||
raise "Port group #{pg_name} already exists" if dpg
|
||||
|
||||
# Get distributed virtual switch if it exists
|
||||
dvs = dc.dvs_exists(sw_name, net_folder)
|
||||
dvs = dc.create_dvs(sw_name, pnics, mtu) unless dvs
|
||||
|
||||
raise "Cannot create Distributed Virtual Switch" unless dvs
|
||||
|
||||
# Creates distributed port group
|
||||
# TODO raise?
|
||||
new_dpg = dc.create_dpg(dvs, pg_name, vlan_id, nports)
|
||||
|
||||
#-----------------------------------------------------------------------
|
||||
# Attach dpg to esxi hosts for each cluster
|
||||
#-----------------------------------------------------------------------
|
||||
errors = []
|
||||
|
||||
@clusters.each do |cluster|
|
||||
cli = cluster[:cli]
|
||||
|
||||
cluster[:ccr]['host'].each do |host|
|
||||
begin
|
||||
esx = VCenterDriver::ESXHost.new_from_ref(host._ref, cli)
|
||||
|
||||
avail_pnics = nil
|
||||
avail_pnics = esx.available_pnics if pnics
|
||||
|
||||
esx.assign_proxy_switch(dvs, sw_name, pnics, avail_pnics)
|
||||
rescue StandardError => e
|
||||
msg = "\tHost #{host._ref}. Reason: \"#{e.message}\".\n"
|
||||
msg << "#{e.backtrace}\n" if @debug
|
||||
|
||||
errors << msg
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
#-----------------------------------------------------------------------
|
||||
# Rollback DPG creation in case of any error
|
||||
#-----------------------------------------------------------------------
|
||||
unless errors.empty?
|
||||
message = "Error adding distributed port group to hosts:\n"
|
||||
message << errors.join
|
||||
|
||||
dc.network_rollback
|
||||
|
||||
raise message
|
||||
end
|
||||
|
||||
"VCENTER_NET_REF = \"#{new_dpg}\"\n"\
|
||||
"VCENTER_INSTANCE_ID = \"#{@clusters[0][:uuid]}\"\n"
|
||||
end
|
||||
|
||||
def delete_dpg
|
||||
pg_name = self['BRIDGE']
|
||||
sw_name = self['TEMPLATE/VCENTER_SWITCH_NAME']
|
||||
|
||||
raise "Missing BRIDGE from VNET template" if pg_name.empty?
|
||||
raise "Missing VCENTER_SWTICH_NAME from VNET template" if sw_name.empty?
|
||||
|
||||
@clusters.each do |cluster|
|
||||
dc = cluster[:dc]
|
||||
|
||||
# Explore network folder in search of dpg and dvs
|
||||
net_folder = dc.network_folder
|
||||
net_folder.fetch!
|
||||
|
||||
# Get distributed port group and dvs if they exists
|
||||
dvs = dc.dvs_exists(sw_name, net_folder)
|
||||
dpg = dc.dpg_exists(pg_name, net_folder)
|
||||
dc.remove_dpg(dpg) if dpg
|
||||
|
||||
if dvs && dvs.item.summary.portgroupName.size == 1 &&
|
||||
dvs.item.summary.portgroupName[0] == "#{sw_name}-uplink-pg"
|
||||
dc.remove_dvs(dvs)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Creates a distributed port group in a datacenter raises exceptions with
|
||||
# error description
|
||||
# @return String with the parameters to add to the OpenNebula VNET
|
||||
def create_nsxv
|
||||
#-----------------------------------------------------------------------
|
||||
# Get NSX parameters needed to create the network
|
||||
#-----------------------------------------------------------------------
|
||||
ls_name = self['NAME']
|
||||
ls_description = self['TEMPLATE/DESCRIPTION']
|
||||
tz_id = self['TEMPLATE/NSX_TZ_ID']
|
||||
rep_mode = self['TEMPLATE/NSX_REP_MODE']
|
||||
|
||||
#-----------------------------------------------------------------------
|
||||
# Use first cluster/dc to create the virtual wire
|
||||
#-----------------------------------------------------------------------
|
||||
host_id = @cluster[0][:hid]
|
||||
uuid = @cluster[0][:uuid]
|
||||
|
||||
nsx_client = NSXDriver::NSXClient.new_from_id(host_id)
|
||||
|
||||
vwire_spec =
|
||||
"<virtualWireCreateSpec>\
|
||||
<name>#{ls_name}</name>\
|
||||
<description>#{ls_description}</description>\
|
||||
<tenantId>virtual wire tenant</tenantId>\
|
||||
<controlPlaneMode>#{rep_mode}</controlPlaneMode>\
|
||||
<guestVlanAllowed>false</guestVlanAllowed>\
|
||||
</virtualWireCreateSpec>"
|
||||
|
||||
lsw = NSXDriver::VirtualWire.new(nsx_client, nil, tz_id, vwire_spec)
|
||||
|
||||
"VCENTER_NET_REF = '#{lsw.ls_vcenter_ref}'\n"\
|
||||
"VCENTER_INSTANCE_ID = '#{uuid}'\n"\
|
||||
"NSX_ID = '#{lsw.ls_id}'\n"\
|
||||
"NSX_VNI = '#{lsw.ls_vni}'\n"\
|
||||
"BRIDGE = '#{lsw.ls_name}'\n"
|
||||
end
|
||||
|
||||
def delete_nsxv
|
||||
host_id = @cluster[0][:hid]
|
||||
ls_id = self['TEMPLATE/NSX_ID']
|
||||
|
||||
raise "Missing NSX_ID attribute in the virtual network" unless ls_id
|
||||
|
||||
nsx_client = NSXDriver::NSXClient.new_from_id(host_id)
|
||||
|
||||
lswitch = NSXDriver::VirtualWire.new(nsx_client, ls_id, nil, nil)
|
||||
lswitch.delete_logical_switch
|
||||
end
|
||||
|
||||
# Creates a distributed port group in a datacenter raises exceptions with
|
||||
# error description
|
||||
# @return String with the parameters to add to the OpenNebula VNET
|
||||
def create_nsxt
|
||||
#-----------------------------------------------------------------------
|
||||
# Get NSX parameters needed to create the network
|
||||
#-----------------------------------------------------------------------
|
||||
ls_name = self['NAME']
|
||||
ls_description = self['TEMPLATE/DESCRIPTION']
|
||||
tz_id = self['TEMPLATE/NSX_TZ_ID']
|
||||
rep_mode = self['TEMPLATE/NSX_REP_MODE']
|
||||
admin_status = self['TEMPLATE/NSX_ADMIN_STATUS']
|
||||
|
||||
#-----------------------------------------------------------------------
|
||||
# Use first cluster/dc to create the virtual wire
|
||||
#-----------------------------------------------------------------------
|
||||
host_id = @cluster[0][:hid]
|
||||
uuid = @cluster[0][:uuid]
|
||||
dc = @cluster[0][:dc]
|
||||
|
||||
nsx_client = NSXDriver::NSXClient.new_from_id(host_id)
|
||||
|
||||
opaque_spec = %(
|
||||
{
|
||||
"transport_zone_id": "#{tz_id}",
|
||||
"replication_mode": "#{rep_mode}",
|
||||
"admin_state": "#{admin_status}",
|
||||
"display_name": "#{ls_name}",
|
||||
"description": "#{ls_description}"
|
||||
}
|
||||
)
|
||||
|
||||
lsw = NSXDriver::OpaqueNetwork.new(nsx_client, nil, tz_id, opaque_spec)
|
||||
|
||||
vnet_ref = dc.nsx_network(lsw.ls_id,
|
||||
VCenterDriver::Network::NETWORK_TYPE_NSXT)
|
||||
|
||||
"VCENTER_NET_REF = '#{vnet_ref}'\n"\
|
||||
"VCENTER_INSTANCE_ID = '#{uuid}'\n"\
|
||||
"NSX_ID = '#{lsw.ls_id}'\n"\
|
||||
"NSX_VNI = '#{lsw.ls_vni}'\n"\
|
||||
"BRIDGE = '#{lsw.ls_name}'\n"
|
||||
end
|
||||
|
||||
def delete_nsxt
|
||||
host_id = @cluster[0][:hid]
|
||||
ls_id = self['TEMPLATE/NSX_ID']
|
||||
|
||||
raise "Missing NSX_ID attribute in the virtual network" unless ls_id
|
||||
|
||||
nsx_client = NSXDriver::NSXClient.new_from_id(host_id)
|
||||
|
||||
lswitch = NSXDriver::OpaqueNetwork.new( nsx_client, ls_id, nil, nil)
|
||||
lswitch.delete_logical_switch
|
||||
end
|
||||
end
|
105
src/vnm_mad/remotes/vcenter/vnet_create
Executable file
105
src/vnm_mad/remotes/vcenter/vnet_create
Executable file
@ -0,0 +1,105 @@
|
||||
#!/usr/bin/env ruby
|
||||
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
# a copy of the License at #
|
||||
# #
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 #
|
||||
# #
|
||||
# Unless required by applicable law or agreed to in writing, software #
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, #
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
# Define libraries location
|
||||
ONE_LOCATION = ENV['ONE_LOCATION']
|
||||
|
||||
if !ONE_LOCATION
|
||||
RUBY_LIB_LOCATION = '/usr/lib/one/ruby'
|
||||
GEMS_LOCATION = '/usr/share/one/gems'
|
||||
VMDIR = '/var/lib/one'
|
||||
CONFIG_FILE = '/var/lib/one/config'
|
||||
else
|
||||
RUBY_LIB_LOCATION = ONE_LOCATION + '/lib/ruby'
|
||||
GEMS_LOCATION = ONE_LOCATION + '/share/gems'
|
||||
VMDIR = ONE_LOCATION + '/var'
|
||||
CONFIG_FILE = ONE_LOCATION + '/var/config'
|
||||
end
|
||||
|
||||
# %%RUBYGEMS_SETUP_BEGIN%%
|
||||
if File.directory?(GEMS_LOCATION)
|
||||
real_gems_path = File.realpath(GEMS_LOCATION)
|
||||
if !defined?(Gem) || Gem.path != [real_gems_path]
|
||||
$LOAD_PATH.reject! {|l| l =~ /vendor_ruby/ }
|
||||
|
||||
# Suppress warnings from Rubygems
|
||||
# https://github.com/OpenNebula/one/issues/5379
|
||||
begin
|
||||
verb = $VERBOSE
|
||||
$VERBOSE = nil
|
||||
require 'rubygems'
|
||||
Gem.use_paths(real_gems_path)
|
||||
ensure
|
||||
$VERBOSE = verb
|
||||
end
|
||||
end
|
||||
end
|
||||
# %%RUBYGEMS_SETUP_END%%
|
||||
|
||||
$LOAD_PATH << RUBY_LIB_LOCATION
|
||||
|
||||
# Hook dependencies
|
||||
require 'opennebula'
|
||||
require 'vcenter_driver'
|
||||
require 'base64'
|
||||
require 'nsx_driver'
|
||||
|
||||
require_relative 'virtual_network_xml'
|
||||
|
||||
# Driver Message
|
||||
# VNET_CREATE vnet_id
|
||||
|
||||
begin
|
||||
_vnet_id = ARGV[0]
|
||||
vnet_xml = VirtualNetworkXML.new(STDIN.read)
|
||||
|
||||
estr = "Error importing network:"
|
||||
|
||||
vnmad = vnet_xml['VN_MAD']
|
||||
raise "#{estr} driver is not vcenter" unless vnmad == 'vcenter'
|
||||
|
||||
unmanaged = vnet_xml['TEMPLATE/OPENNEBULA_MANAGED'].casecmp('NO') == 0
|
||||
raise "#{estr} OPENNEBULA_MANAGED is set to NO" if unmanaged
|
||||
|
||||
imported = vnet_xml['TEMPLATE/VCENTER_IMPORTED']
|
||||
raise "#{estr} network is already imported" unless imported.empty?
|
||||
|
||||
pg_type = vnet_xml['TEMPLATE/VCENTER_PORTGROUP_TYPE']
|
||||
|
||||
raise "#{estr} Missing port group type (VCENTER_PORTGROUP_TYPE)" if pg_type.empty?
|
||||
|
||||
extra_info = case pg_type
|
||||
when VCenterDriver::Network::NETWORK_TYPE_PG
|
||||
vnet_xml.create_pg
|
||||
when VCenterDriver::Network::NETWORK_TYPE_DPG
|
||||
vnet_xml.create_dpg
|
||||
when VCenterDriver::Network::NETWORK_TYPE_NSXV
|
||||
vnet_xml.create_nsxv
|
||||
when VCenterDriver::Network::NETWORK_TYPE_NSXT
|
||||
vnet_xml.create_nsxt
|
||||
else
|
||||
raise "Unknown portgroup type"
|
||||
end
|
||||
|
||||
STDOUT.puts extra_info
|
||||
rescue StandardError => e
|
||||
STDERR.puts e.message
|
||||
STDERR.puts e.backtrace if VCenterDriver::CONFIG[:debug_information]
|
||||
exit(-1)
|
||||
end
|
||||
|
97
src/vnm_mad/remotes/vcenter/vnet_delete
Executable file
97
src/vnm_mad/remotes/vcenter/vnet_delete
Executable file
@ -0,0 +1,97 @@
|
||||
#!/usr/bin/env ruby
|
||||
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2021, OpenNebula Project, OpenNebula Systems #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
# a copy of the License at #
|
||||
# #
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 #
|
||||
# #
|
||||
# Unless required by applicable law or agreed to in writing, software #
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, #
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
ONE_LOCATION = ENV['ONE_LOCATION']
|
||||
|
||||
if !ONE_LOCATION
|
||||
RUBY_LIB_LOCATION = '/usr/lib/one/ruby'
|
||||
GEMS_LOCATION = '/usr/share/one/gems'
|
||||
VMDIR = '/var/lib/one'
|
||||
CONFIG_FILE = '/var/lib/one/config'
|
||||
else
|
||||
RUBY_LIB_LOCATION = ONE_LOCATION + '/lib/ruby'
|
||||
GEMS_LOCATION = ONE_LOCATION + '/share/gems'
|
||||
VMDIR = ONE_LOCATION + '/var'
|
||||
CONFIG_FILE = ONE_LOCATION + '/var/config'
|
||||
end
|
||||
|
||||
# %%RUBYGEMS_SETUP_BEGIN%%
|
||||
if File.directory?(GEMS_LOCATION)
|
||||
real_gems_path = File.realpath(GEMS_LOCATION)
|
||||
if !defined?(Gem) || Gem.path != [real_gems_path]
|
||||
$LOAD_PATH.reject! {|l| l =~ /vendor_ruby/ }
|
||||
|
||||
# Suppress warnings from Rubygems
|
||||
# https://github.com/OpenNebula/one/issues/5379
|
||||
begin
|
||||
verb = $VERBOSE
|
||||
$VERBOSE = nil
|
||||
require 'rubygems'
|
||||
Gem.use_paths(real_gems_path)
|
||||
ensure
|
||||
$VERBOSE = verb
|
||||
end
|
||||
end
|
||||
end
|
||||
# %%RUBYGEMS_SETUP_END%%
|
||||
|
||||
$LOAD_PATH << RUBY_LIB_LOCATION
|
||||
|
||||
require 'opennebula'
|
||||
require 'vcenter_driver'
|
||||
require 'base64'
|
||||
require 'nsx_driver'
|
||||
|
||||
require_relative 'virtual_network_xml'
|
||||
|
||||
begin
|
||||
_vnet_id = ARGV[0]
|
||||
vnet_xml = VirtualNetworkXML.new(STDIN.read)
|
||||
|
||||
estr = "Error importing network:"
|
||||
|
||||
vnmad = vnet_xml['VN_MAD']
|
||||
raise "#{estr} driver is not vcenter" unless vnmad == 'vcenter'
|
||||
|
||||
unmanaged = vnet_xml['TEMPLATE/OPENNEBULA_MANAGED'].casecmp('NO') == 0
|
||||
raise "#{estr} OPENNEBULA_MANAGED is set to NO" if unmanaged
|
||||
|
||||
imported = vnet_xml['TEMPLATE/VCENTER_IMPORTED']
|
||||
raise "#{estr} network is already imported" unless imported.empty?
|
||||
|
||||
pg_type = vnet_xml['TEMPLATE/VCENTER_PORTGROUP_TYPE']
|
||||
|
||||
raise "#{estr} Missing port group type (VCENTER_PORTGROUP_TYPE)" if pg_type.empty?
|
||||
|
||||
case pg_type
|
||||
when VCenterDriver::Network::NETWORK_TYPE_PG
|
||||
vnet_xml.delete_pg
|
||||
when VCenterDriver::Network::NETWORK_TYPE_DPG
|
||||
vnet_xml.delete_dpg
|
||||
when VCenterDriver::Network::NETWORK_TYPE_NSXV
|
||||
vnet_xml.delete_nsxv
|
||||
when VCenterDriver::Network::NETWORK_TYPE_NSXT
|
||||
vnet_xml.delete_nsxt
|
||||
else
|
||||
raise "Unknown portgroup type"
|
||||
end
|
||||
rescue StandardError => e
|
||||
STDERR.puts e.message
|
||||
STDERR.puts e.backtrace if VCenterDriver::CONFIG[:debug_information]
|
||||
exit(-1)
|
||||
end
|
1
src/vnm_mad/remotes/vxlan/vnet_create
Symbolic link
1
src/vnm_mad/remotes/vxlan/vnet_create
Symbolic link
@ -0,0 +1 @@
|
||||
../common/dummy.sh
|
1
src/vnm_mad/remotes/vxlan/vnet_delete
Symbolic link
1
src/vnm_mad/remotes/vxlan/vnet_delete
Symbolic link
@ -0,0 +1 @@
|
||||
../common/dummy.sh
|
Loading…
x
Reference in New Issue
Block a user