mirror of
https://github.com/OpenNebula/one.git
synced 2025-03-20 10:50:08 +03:00
Merge branch 'fixes-5.12.0.4' into one-5.12
This commit is contained in:
commit
cb0864de08
@ -79,10 +79,10 @@ public:
|
||||
* @param op The operation to be authorized
|
||||
* @return true if the authorization is granted by any rule
|
||||
*/
|
||||
const bool authorize(int uid,
|
||||
const set<int>& user_groups,
|
||||
const PoolObjectAuth& obj_perms,
|
||||
AuthRequest::Operation op);
|
||||
bool authorize(int uid,
|
||||
const set<int>& user_groups,
|
||||
const PoolObjectAuth& obj_perms,
|
||||
AuthRequest::Operation op) const;
|
||||
|
||||
/**
|
||||
* Takes an authorization request for oneadmin
|
||||
@ -92,8 +92,8 @@ public:
|
||||
* @param op The operation to be authorized
|
||||
* @return true if the authorization is granted for oneadmin
|
||||
*/
|
||||
const bool oneadmin_authorize(const PoolObjectAuth& obj_perms,
|
||||
AuthRequest::Operation op);
|
||||
bool oneadmin_authorize(const PoolObjectAuth& obj_perms,
|
||||
AuthRequest::Operation op) const;
|
||||
|
||||
/**
|
||||
* Adds a new rule to the ACL rule set
|
||||
@ -285,7 +285,7 @@ private:
|
||||
long long resource_oid_mask,
|
||||
long long resource_gid_mask,
|
||||
long long resource_cid_mask,
|
||||
const multimap<long long, AclRule*>& rules);
|
||||
const multimap<long long, AclRule*>& rules) const;
|
||||
/**
|
||||
* Wrapper for match_rules. It will check if any rules in the temporary
|
||||
* multimap or in the internal one grants permission.
|
||||
@ -313,7 +313,7 @@ private:
|
||||
long long individual_obj_type,
|
||||
long long group_obj_type,
|
||||
long long cluster_obj_type,
|
||||
const multimap<long long, AclRule*> &tmp_rules);
|
||||
const multimap<long long, AclRule*> &tmp_rules) const;
|
||||
/**
|
||||
* Deletes all rules that match the user mask
|
||||
*
|
||||
@ -353,17 +353,17 @@ private:
|
||||
/**
|
||||
* Function to lock the manager
|
||||
*/
|
||||
void lock()
|
||||
void lock() const
|
||||
{
|
||||
pthread_mutex_lock(&mutex);
|
||||
pthread_mutex_lock(const_cast<pthread_mutex_t *>(&mutex));
|
||||
};
|
||||
|
||||
/**
|
||||
* Function to unlock the manager
|
||||
*/
|
||||
void unlock()
|
||||
void unlock() const
|
||||
{
|
||||
pthread_mutex_unlock(&mutex);
|
||||
pthread_mutex_unlock(const_cast<pthread_mutex_t *>(&mutex));
|
||||
};
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
|
@ -459,8 +459,8 @@ public:
|
||||
void replace(const string& name, const string& value);
|
||||
|
||||
/**
|
||||
* Removes given the vector attribute
|
||||
* @param name of the vector attribute
|
||||
* Removes the given attribute from the vector
|
||||
* @param name of the attribute
|
||||
*/
|
||||
void remove(const string& name);
|
||||
|
||||
|
@ -244,10 +244,12 @@ public:
|
||||
*
|
||||
* @param oss the output stream to dump the pool contents
|
||||
* @param where filter for the objects, defaults to all
|
||||
* @param seconds Retrieve monitor records in the last seconds
|
||||
*
|
||||
* @return 0 on success
|
||||
*/
|
||||
int dump_monitoring(string& oss, const string& where);
|
||||
int dump_monitoring(std::string& oss, const std::string& where,
|
||||
const int seconds);
|
||||
|
||||
/**
|
||||
* Dumps the HOST monitoring information for a single HOST
|
||||
@ -263,7 +265,7 @@ public:
|
||||
|
||||
filter << "oid = " << hostid;
|
||||
|
||||
return dump_monitoring(oss, filter.str());
|
||||
return dump_monitoring(oss, filter.str(), -1);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -135,6 +135,18 @@ public:
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
class VirtualMachinePoolInfoSet : public RequestManagerPoolInfoFilter
|
||||
{
|
||||
public:
|
||||
VirtualMachinePoolInfoSet();
|
||||
|
||||
void request_execute(
|
||||
xmlrpc_c::paramList const& paramList, RequestAttributes& att) override;
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
class VirtualMachinePoolAccounting : public RequestManagerPoolInfoFilter
|
||||
{
|
||||
public:
|
||||
|
@ -634,7 +634,7 @@ public:
|
||||
* function MUST be called before this one.
|
||||
* @return the action that closed the current history record
|
||||
*/
|
||||
const VMActions::Action get_action() const
|
||||
VMActions::Action get_action() const
|
||||
{
|
||||
return history->action;
|
||||
};
|
||||
@ -643,7 +643,7 @@ public:
|
||||
* Returns the action that closed the history record in the previous host
|
||||
* @return the action that closed the history record in the previous host
|
||||
*/
|
||||
const VMActions::Action get_previous_action() const
|
||||
VMActions::Action get_previous_action() const
|
||||
{
|
||||
return previous_history->action;
|
||||
};
|
||||
@ -847,6 +847,19 @@ public:
|
||||
previous_history->req_id = rid;
|
||||
};
|
||||
|
||||
/**
|
||||
* Release the previous VNC port when a VM is migrated to another cluster
|
||||
* (GRAPHICS/PREVIOUS_PORT present)
|
||||
*/
|
||||
void release_previous_vnc_port();
|
||||
|
||||
/**
|
||||
* Frees current PORT from **current** cluster and sets it to PREVIOUS_PORT
|
||||
* (which is allocated in previous cluster). This function is called when
|
||||
* the migration fails.
|
||||
*/
|
||||
void rollback_previous_vnc_port();
|
||||
|
||||
// ------------------------------------------------------------------------
|
||||
// Template & Object Representation
|
||||
// ------------------------------------------------------------------------
|
||||
|
@ -324,10 +324,12 @@ public:
|
||||
*
|
||||
* @param oss the output stream to dump the pool contents
|
||||
* @param where filter for the objects, defaults to all
|
||||
* @param seconds Retrieve monitor records in the last seconds
|
||||
*
|
||||
* @return 0 on success
|
||||
*/
|
||||
int dump_monitoring(string& oss, const string& where);
|
||||
int dump_monitoring(std::string& oss, const std::string& where,
|
||||
const int seconds);
|
||||
|
||||
/**
|
||||
* Dumps the VM monitoring information for a single VM
|
||||
@ -343,7 +345,7 @@ public:
|
||||
|
||||
filter << "oid = " << vmid;
|
||||
|
||||
return dump_monitoring(oss, filter.str());
|
||||
return dump_monitoring(oss, filter.str(), -1);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -41,6 +41,16 @@ public:
|
||||
|
||||
VirtualMachineTemplate(VirtualMachineTemplate& vmt):Template(vmt){};
|
||||
|
||||
VirtualMachineTemplate& operator=(const VirtualMachineTemplate& t)
|
||||
{
|
||||
if (this != &t)
|
||||
{
|
||||
Template::operator=(t);
|
||||
}
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
void set_xml_root(const char * _xml_root)
|
||||
{
|
||||
Template::set_xml_root(_xml_root);
|
||||
|
@ -2542,7 +2542,8 @@ src/sunstone/public/locale/languages/tr_datatable.txt"
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
ONEGATE_FILES="src/onegate/onegate-server.rb \
|
||||
src/onegate/config.ru"
|
||||
src/onegate/config.ru \
|
||||
share/onegate/onegate"
|
||||
|
||||
ONEGATE_BIN_FILES="src/onegate/bin/onegate-server"
|
||||
|
||||
|
233
share/install_gems/Fedora33/Gemfile.lock
Normal file
233
share/install_gems/Fedora33/Gemfile.lock
Normal file
@ -0,0 +1,233 @@
|
||||
GEM
|
||||
remote: https://rubygems.org/
|
||||
specs:
|
||||
activesupport (4.2.11.3)
|
||||
i18n (~> 0.7)
|
||||
minitest (~> 5.1)
|
||||
thread_safe (~> 0.3, >= 0.3.4)
|
||||
tzinfo (~> 1.1)
|
||||
amazon-ec2 (0.9.17)
|
||||
xml-simple (>= 1.0.12)
|
||||
android_key_attestation (0.3.0)
|
||||
augeas (0.6.4)
|
||||
awrence (1.1.1)
|
||||
aws-eventstream (1.1.0)
|
||||
aws-partitions (1.385.0)
|
||||
aws-sdk-cloudwatch (1.45.0)
|
||||
aws-sdk-core (~> 3, >= 3.109.0)
|
||||
aws-sigv4 (~> 1.1)
|
||||
aws-sdk-core (3.109.1)
|
||||
aws-eventstream (~> 1, >= 1.0.2)
|
||||
aws-partitions (~> 1, >= 1.239.0)
|
||||
aws-sigv4 (~> 1.1)
|
||||
jmespath (~> 1.0)
|
||||
aws-sdk-ec2 (1.202.0)
|
||||
aws-sdk-core (~> 3, >= 3.109.0)
|
||||
aws-sigv4 (~> 1.1)
|
||||
aws-sdk-kms (1.39.0)
|
||||
aws-sdk-core (~> 3, >= 3.109.0)
|
||||
aws-sigv4 (~> 1.1)
|
||||
aws-sdk-s3 (1.83.1)
|
||||
aws-sdk-core (~> 3, >= 3.109.0)
|
||||
aws-sdk-kms (~> 1)
|
||||
aws-sigv4 (~> 1.1)
|
||||
aws-sigv4 (1.2.2)
|
||||
aws-eventstream (~> 1, >= 1.0.2)
|
||||
azure_mgmt_compute (0.20.0)
|
||||
ms_rest_azure (~> 0.12.0)
|
||||
azure_mgmt_monitor (0.17.6)
|
||||
ms_rest_azure (~> 0.12.0)
|
||||
azure_mgmt_network (0.24.1)
|
||||
ms_rest_azure (~> 0.12.0)
|
||||
azure_mgmt_resources (0.18.0)
|
||||
ms_rest_azure (~> 0.12.0)
|
||||
azure_mgmt_storage (0.22.0)
|
||||
ms_rest_azure (~> 0.12.0)
|
||||
bindata (2.4.8)
|
||||
builder (3.2.4)
|
||||
cbor (0.5.9.6)
|
||||
chunky_png (1.3.13)
|
||||
concurrent-ruby (1.1.7)
|
||||
configparser (0.1.7)
|
||||
cose (1.2.0)
|
||||
cbor (~> 0.5.9)
|
||||
openssl-signature_algorithm (~> 1.0)
|
||||
curb (0.9.10)
|
||||
daemons (1.3.1)
|
||||
dalli (2.7.11)
|
||||
domain_name (0.5.20190701)
|
||||
unf (>= 0.0.5, < 1.0.0)
|
||||
eventmachine (1.2.7)
|
||||
faraday (0.17.3)
|
||||
multipart-post (>= 1.2, < 3)
|
||||
faraday-cookie_jar (0.0.7)
|
||||
faraday (>= 0.8.0)
|
||||
http-cookie (~> 1.0.0)
|
||||
faraday_middleware (0.14.0)
|
||||
faraday (>= 0.7.4, < 1.0)
|
||||
ffi (1.13.1)
|
||||
ffi-rzmq (2.0.7)
|
||||
ffi-rzmq-core (>= 1.0.7)
|
||||
ffi-rzmq-core (1.0.7)
|
||||
ffi
|
||||
gnuplot (2.6.2)
|
||||
hashie (4.1.0)
|
||||
highline (1.7.10)
|
||||
http-cookie (1.0.3)
|
||||
domain_name (~> 0.5)
|
||||
i18n (0.9.5)
|
||||
concurrent-ruby (~> 1.0)
|
||||
inflection (1.0.0)
|
||||
ipaddress (0.8.3)
|
||||
jmespath (1.4.0)
|
||||
json (2.3.1)
|
||||
jwt (2.2.2)
|
||||
memcache-client (1.8.5)
|
||||
mime-types (3.3.1)
|
||||
mime-types-data (~> 3.2015)
|
||||
mime-types-data (3.2020.0512)
|
||||
mini_portile2 (2.4.0)
|
||||
minitest (5.14.2)
|
||||
ms_rest (0.7.6)
|
||||
concurrent-ruby (~> 1.0)
|
||||
faraday (>= 0.9, < 2.0.0)
|
||||
timeliness (~> 0.3.10)
|
||||
ms_rest_azure (0.12.0)
|
||||
concurrent-ruby (~> 1.0)
|
||||
faraday (>= 0.9, < 2.0.0)
|
||||
faraday-cookie_jar (~> 0.0.6)
|
||||
ms_rest (~> 0.7.6)
|
||||
multipart-post (2.1.1)
|
||||
mustermann (1.1.1)
|
||||
ruby2_keywords (~> 0.0.1)
|
||||
mysql2 (0.5.3)
|
||||
net-ldap (0.16.3)
|
||||
nokogiri (1.10.10)
|
||||
mini_portile2 (~> 2.4.0)
|
||||
openssl (2.2.0)
|
||||
openssl-signature_algorithm (1.0.0)
|
||||
optimist (3.0.1)
|
||||
ox (2.13.4)
|
||||
parse-cron (0.1.4)
|
||||
pg (1.2.3)
|
||||
polyglot (0.3.5)
|
||||
public_suffix (4.0.6)
|
||||
rack (2.2.3)
|
||||
rack-protection (2.1.0)
|
||||
rack
|
||||
rbvmomi (2.2.0)
|
||||
builder (~> 3.0)
|
||||
json (>= 1.8)
|
||||
nokogiri (~> 1.5)
|
||||
optimist (~> 3.0)
|
||||
rotp (6.2.0)
|
||||
rqrcode (1.1.2)
|
||||
chunky_png (~> 1.0)
|
||||
rqrcode_core (~> 0.1)
|
||||
rqrcode_core (0.1.2)
|
||||
ruby2_keywords (0.0.2)
|
||||
safety_net_attestation (0.4.0)
|
||||
jwt (~> 2.0)
|
||||
securecompare (1.0.0)
|
||||
sequel (5.37.0)
|
||||
sinatra (2.1.0)
|
||||
mustermann (~> 1.0)
|
||||
rack (~> 2.2)
|
||||
rack-protection (= 2.1.0)
|
||||
tilt (~> 2.0)
|
||||
sqlite3 (1.4.2)
|
||||
thin (1.7.2)
|
||||
daemons (~> 1.0, >= 1.0.9)
|
||||
eventmachine (~> 1.0, >= 1.0.4)
|
||||
rack (>= 1, < 3)
|
||||
thread_safe (0.3.6)
|
||||
tilt (2.0.10)
|
||||
timeliness (0.3.10)
|
||||
tpm-key_attestation (0.10.0)
|
||||
bindata (~> 2.4)
|
||||
openssl-signature_algorithm (~> 1.0)
|
||||
treetop (1.6.11)
|
||||
polyglot (~> 0.3)
|
||||
tzinfo (1.2.7)
|
||||
thread_safe (~> 0.1)
|
||||
unf (0.1.4)
|
||||
unf_ext
|
||||
unf_ext (0.0.7.7)
|
||||
uuidtools (2.2.0)
|
||||
vsphere-automation-cis (0.4.7)
|
||||
vsphere-automation-runtime (~> 0.4.6)
|
||||
vsphere-automation-runtime (0.4.7)
|
||||
vsphere-automation-vcenter (0.4.7)
|
||||
vsphere-automation-cis (~> 0.4.6)
|
||||
vsphere-automation-runtime (~> 0.4.6)
|
||||
webauthn (2.4.0)
|
||||
android_key_attestation (~> 0.3.0)
|
||||
awrence (~> 1.1)
|
||||
bindata (~> 2.4)
|
||||
cbor (~> 0.5.9)
|
||||
cose (~> 1.1)
|
||||
openssl (~> 2.0)
|
||||
safety_net_attestation (~> 0.4.0)
|
||||
securecompare (~> 1.0)
|
||||
tpm-key_attestation (~> 0.10.0)
|
||||
xml-simple (1.1.5)
|
||||
xmlrpc (0.3.0)
|
||||
zendesk_api (1.28.0)
|
||||
faraday (>= 0.9.0, < 2.0.0)
|
||||
hashie (>= 3.5.2, < 5.0.0)
|
||||
inflection
|
||||
mime-types
|
||||
multipart-post (~> 2.0)
|
||||
|
||||
PLATFORMS
|
||||
ruby
|
||||
|
||||
DEPENDENCIES
|
||||
activesupport (~> 4.2)
|
||||
amazon-ec2
|
||||
augeas (~> 0.6)
|
||||
aws-sdk-cloudwatch
|
||||
aws-sdk-ec2 (>= 1.151)
|
||||
aws-sdk-s3
|
||||
azure_mgmt_compute
|
||||
azure_mgmt_monitor
|
||||
azure_mgmt_network
|
||||
azure_mgmt_resources
|
||||
azure_mgmt_storage
|
||||
configparser
|
||||
curb
|
||||
dalli
|
||||
faraday (~> 0.15)
|
||||
faraday_middleware (~> 0.12)
|
||||
ffi-rzmq (~> 2.0.7)
|
||||
gnuplot
|
||||
highline (~> 1.7)
|
||||
i18n (~> 0.9)
|
||||
ipaddress (~> 0.8.3)
|
||||
json
|
||||
memcache-client
|
||||
mysql2
|
||||
net-ldap
|
||||
nokogiri
|
||||
ox
|
||||
parse-cron
|
||||
pg
|
||||
public_suffix
|
||||
rack
|
||||
rbvmomi (~> 2.2.0)
|
||||
rotp
|
||||
rqrcode
|
||||
sequel
|
||||
sinatra
|
||||
sqlite3
|
||||
thin
|
||||
treetop (>= 1.6.3)
|
||||
uuidtools
|
||||
vsphere-automation-cis (~> 0.4.6)
|
||||
vsphere-automation-vcenter (~> 0.4.6)
|
||||
webauthn
|
||||
xmlrpc
|
||||
zendesk_api
|
||||
|
||||
BUNDLED WITH
|
||||
1.17.3
|
233
share/install_gems/Ubuntu2010/Gemfile.lock
Normal file
233
share/install_gems/Ubuntu2010/Gemfile.lock
Normal file
@ -0,0 +1,233 @@
|
||||
GEM
|
||||
remote: https://rubygems.org/
|
||||
specs:
|
||||
activesupport (4.2.11.3)
|
||||
i18n (~> 0.7)
|
||||
minitest (~> 5.1)
|
||||
thread_safe (~> 0.3, >= 0.3.4)
|
||||
tzinfo (~> 1.1)
|
||||
amazon-ec2 (0.9.17)
|
||||
xml-simple (>= 1.0.12)
|
||||
android_key_attestation (0.3.0)
|
||||
augeas (0.6.4)
|
||||
awrence (1.1.1)
|
||||
aws-eventstream (1.1.0)
|
||||
aws-partitions (1.385.0)
|
||||
aws-sdk-cloudwatch (1.45.0)
|
||||
aws-sdk-core (~> 3, >= 3.109.0)
|
||||
aws-sigv4 (~> 1.1)
|
||||
aws-sdk-core (3.109.1)
|
||||
aws-eventstream (~> 1, >= 1.0.2)
|
||||
aws-partitions (~> 1, >= 1.239.0)
|
||||
aws-sigv4 (~> 1.1)
|
||||
jmespath (~> 1.0)
|
||||
aws-sdk-ec2 (1.202.0)
|
||||
aws-sdk-core (~> 3, >= 3.109.0)
|
||||
aws-sigv4 (~> 1.1)
|
||||
aws-sdk-kms (1.39.0)
|
||||
aws-sdk-core (~> 3, >= 3.109.0)
|
||||
aws-sigv4 (~> 1.1)
|
||||
aws-sdk-s3 (1.83.1)
|
||||
aws-sdk-core (~> 3, >= 3.109.0)
|
||||
aws-sdk-kms (~> 1)
|
||||
aws-sigv4 (~> 1.1)
|
||||
aws-sigv4 (1.2.2)
|
||||
aws-eventstream (~> 1, >= 1.0.2)
|
||||
azure_mgmt_compute (0.20.0)
|
||||
ms_rest_azure (~> 0.12.0)
|
||||
azure_mgmt_monitor (0.17.6)
|
||||
ms_rest_azure (~> 0.12.0)
|
||||
azure_mgmt_network (0.24.1)
|
||||
ms_rest_azure (~> 0.12.0)
|
||||
azure_mgmt_resources (0.18.0)
|
||||
ms_rest_azure (~> 0.12.0)
|
||||
azure_mgmt_storage (0.22.0)
|
||||
ms_rest_azure (~> 0.12.0)
|
||||
bindata (2.4.8)
|
||||
builder (3.2.4)
|
||||
cbor (0.5.9.6)
|
||||
chunky_png (1.3.12)
|
||||
concurrent-ruby (1.1.7)
|
||||
configparser (0.1.7)
|
||||
cose (1.2.0)
|
||||
cbor (~> 0.5.9)
|
||||
openssl-signature_algorithm (~> 1.0)
|
||||
curb (0.9.10)
|
||||
daemons (1.3.1)
|
||||
dalli (2.7.11)
|
||||
domain_name (0.5.20190701)
|
||||
unf (>= 0.0.5, < 1.0.0)
|
||||
eventmachine (1.2.7)
|
||||
faraday (0.17.3)
|
||||
multipart-post (>= 1.2, < 3)
|
||||
faraday-cookie_jar (0.0.7)
|
||||
faraday (>= 0.8.0)
|
||||
http-cookie (~> 1.0.0)
|
||||
faraday_middleware (0.14.0)
|
||||
faraday (>= 0.7.4, < 1.0)
|
||||
ffi (1.13.1)
|
||||
ffi-rzmq (2.0.7)
|
||||
ffi-rzmq-core (>= 1.0.7)
|
||||
ffi-rzmq-core (1.0.7)
|
||||
ffi
|
||||
gnuplot (2.6.2)
|
||||
hashie (4.1.0)
|
||||
highline (1.7.10)
|
||||
http-cookie (1.0.3)
|
||||
domain_name (~> 0.5)
|
||||
i18n (0.9.5)
|
||||
concurrent-ruby (~> 1.0)
|
||||
inflection (1.0.0)
|
||||
ipaddress (0.8.3)
|
||||
jmespath (1.4.0)
|
||||
json (2.3.1)
|
||||
jwt (2.2.2)
|
||||
memcache-client (1.8.5)
|
||||
mime-types (3.3.1)
|
||||
mime-types-data (~> 3.2015)
|
||||
mime-types-data (3.2020.0512)
|
||||
mini_portile2 (2.4.0)
|
||||
minitest (5.14.2)
|
||||
ms_rest (0.7.6)
|
||||
concurrent-ruby (~> 1.0)
|
||||
faraday (>= 0.9, < 2.0.0)
|
||||
timeliness (~> 0.3.10)
|
||||
ms_rest_azure (0.12.0)
|
||||
concurrent-ruby (~> 1.0)
|
||||
faraday (>= 0.9, < 2.0.0)
|
||||
faraday-cookie_jar (~> 0.0.6)
|
||||
ms_rest (~> 0.7.6)
|
||||
multipart-post (2.1.1)
|
||||
mustermann (1.1.1)
|
||||
ruby2_keywords (~> 0.0.1)
|
||||
mysql2 (0.5.3)
|
||||
net-ldap (0.16.3)
|
||||
nokogiri (1.10.10)
|
||||
mini_portile2 (~> 2.4.0)
|
||||
openssl (2.2.0)
|
||||
openssl-signature_algorithm (1.0.0)
|
||||
optimist (3.0.1)
|
||||
ox (2.13.4)
|
||||
parse-cron (0.1.4)
|
||||
pg (1.2.3)
|
||||
polyglot (0.3.5)
|
||||
public_suffix (4.0.6)
|
||||
rack (2.2.3)
|
||||
rack-protection (2.1.0)
|
||||
rack
|
||||
rbvmomi (2.2.0)
|
||||
builder (~> 3.0)
|
||||
json (>= 1.8)
|
||||
nokogiri (~> 1.5)
|
||||
optimist (~> 3.0)
|
||||
rotp (6.2.0)
|
||||
rqrcode (1.1.2)
|
||||
chunky_png (~> 1.0)
|
||||
rqrcode_core (~> 0.1)
|
||||
rqrcode_core (0.1.2)
|
||||
ruby2_keywords (0.0.2)
|
||||
safety_net_attestation (0.4.0)
|
||||
jwt (~> 2.0)
|
||||
securecompare (1.0.0)
|
||||
sequel (5.37.0)
|
||||
sinatra (2.1.0)
|
||||
mustermann (~> 1.0)
|
||||
rack (~> 2.2)
|
||||
rack-protection (= 2.1.0)
|
||||
tilt (~> 2.0)
|
||||
sqlite3 (1.4.2)
|
||||
thin (1.7.2)
|
||||
daemons (~> 1.0, >= 1.0.9)
|
||||
eventmachine (~> 1.0, >= 1.0.4)
|
||||
rack (>= 1, < 3)
|
||||
thread_safe (0.3.6)
|
||||
tilt (2.0.10)
|
||||
timeliness (0.3.10)
|
||||
tpm-key_attestation (0.10.0)
|
||||
bindata (~> 2.4)
|
||||
openssl-signature_algorithm (~> 1.0)
|
||||
treetop (1.6.11)
|
||||
polyglot (~> 0.3)
|
||||
tzinfo (1.2.7)
|
||||
thread_safe (~> 0.1)
|
||||
unf (0.1.4)
|
||||
unf_ext
|
||||
unf_ext (0.0.7.7)
|
||||
uuidtools (2.2.0)
|
||||
vsphere-automation-cis (0.4.7)
|
||||
vsphere-automation-runtime (~> 0.4.6)
|
||||
vsphere-automation-runtime (0.4.7)
|
||||
vsphere-automation-vcenter (0.4.7)
|
||||
vsphere-automation-cis (~> 0.4.6)
|
||||
vsphere-automation-runtime (~> 0.4.6)
|
||||
webauthn (2.4.0)
|
||||
android_key_attestation (~> 0.3.0)
|
||||
awrence (~> 1.1)
|
||||
bindata (~> 2.4)
|
||||
cbor (~> 0.5.9)
|
||||
cose (~> 1.1)
|
||||
openssl (~> 2.0)
|
||||
safety_net_attestation (~> 0.4.0)
|
||||
securecompare (~> 1.0)
|
||||
tpm-key_attestation (~> 0.10.0)
|
||||
xml-simple (1.1.5)
|
||||
xmlrpc (0.3.0)
|
||||
zendesk_api (1.28.0)
|
||||
faraday (>= 0.9.0, < 2.0.0)
|
||||
hashie (>= 3.5.2, < 5.0.0)
|
||||
inflection
|
||||
mime-types
|
||||
multipart-post (~> 2.0)
|
||||
|
||||
PLATFORMS
|
||||
ruby
|
||||
|
||||
DEPENDENCIES
|
||||
activesupport (~> 4.2)
|
||||
amazon-ec2
|
||||
augeas (~> 0.6)
|
||||
aws-sdk-cloudwatch
|
||||
aws-sdk-ec2 (>= 1.151)
|
||||
aws-sdk-s3
|
||||
azure_mgmt_compute
|
||||
azure_mgmt_monitor
|
||||
azure_mgmt_network
|
||||
azure_mgmt_resources
|
||||
azure_mgmt_storage
|
||||
configparser
|
||||
curb
|
||||
dalli
|
||||
faraday (~> 0.15)
|
||||
faraday_middleware (~> 0.12)
|
||||
ffi-rzmq (~> 2.0.7)
|
||||
gnuplot
|
||||
highline (~> 1.7)
|
||||
i18n (~> 0.9)
|
||||
ipaddress (~> 0.8.3)
|
||||
json
|
||||
memcache-client
|
||||
mysql2
|
||||
net-ldap
|
||||
nokogiri
|
||||
ox
|
||||
parse-cron
|
||||
pg
|
||||
public_suffix
|
||||
rack
|
||||
rbvmomi (~> 2.2.0)
|
||||
rotp
|
||||
rqrcode
|
||||
sequel
|
||||
sinatra
|
||||
sqlite3
|
||||
thin
|
||||
treetop (>= 1.6.3)
|
||||
uuidtools
|
||||
vsphere-automation-cis (~> 0.4.6)
|
||||
vsphere-automation-vcenter (~> 0.4.6)
|
||||
webauthn
|
||||
xmlrpc
|
||||
zendesk_api
|
||||
|
||||
BUNDLED WITH
|
||||
1.17.3
|
@ -573,6 +573,9 @@ Layout/SpaceAroundMethodCallOperator:
|
||||
Layout/EmptyLinesAroundAttributeAccessor:
|
||||
Enabled: true
|
||||
|
||||
Layout/BeginEndAlignment:
|
||||
Enabled: true
|
||||
|
||||
#######
|
||||
# STYLE
|
||||
#######
|
||||
@ -719,6 +722,34 @@ Style/RedundantRegexpCharacterClass:
|
||||
Style/RedundantRegexpEscape:
|
||||
Enabled: False
|
||||
|
||||
Style/CombinableLoops:
|
||||
Enabled: false
|
||||
|
||||
Style/ExplicitBlockArgument:
|
||||
Enabled: true
|
||||
|
||||
Style/GlobalStdStream:
|
||||
Enabled: false
|
||||
|
||||
Style/KeywordParametersOrder:
|
||||
Enabled: true
|
||||
|
||||
Style/OptionalBooleanParameter:
|
||||
Enabled: false
|
||||
|
||||
Style/RedundantSelfAssignment:
|
||||
Enabled: true
|
||||
|
||||
Style/SingleArgumentDig:
|
||||
Enabled: true
|
||||
|
||||
Style/SoleNestedConditional:
|
||||
Enabled: false
|
||||
|
||||
Style/StringConcatenation:
|
||||
Enabled: false
|
||||
>>>>>>> one-5.12-new
|
||||
|
||||
######
|
||||
# LINT
|
||||
######
|
||||
@ -777,6 +808,54 @@ Lint/DuplicateElsifCondition:
|
||||
Lint/MixedRegexpCaptureTypes:
|
||||
Enabled: True
|
||||
|
||||
Lint/BinaryOperatorWithIdenticalOperands:
|
||||
Enabled: true
|
||||
|
||||
Lint/ConstantDefinitionInBlock:
|
||||
Enabled: false
|
||||
|
||||
Lint/DuplicateRequire:
|
||||
Enabled: true
|
||||
|
||||
Lint/DuplicateRescueException:
|
||||
Enabled: true
|
||||
|
||||
Lint/EmptyConditionalBody:
|
||||
Enabled: true
|
||||
|
||||
Lint/EmptyFile:
|
||||
Enabled: true
|
||||
|
||||
Lint/FloatComparison:
|
||||
Enabled: false
|
||||
|
||||
Lint/IdentityComparison:
|
||||
Enabled: true
|
||||
|
||||
Lint/MissingSuper:
|
||||
Enabled: true
|
||||
|
||||
Lint/OutOfRangeRegexpRef:
|
||||
Enabled: true
|
||||
|
||||
Lint/SelfAssignment:
|
||||
Enabled: true
|
||||
|
||||
Lint/TopLevelReturnWithArgument:
|
||||
Enabled: true
|
||||
Lint/TrailingCommaInAttributeDeclaration:
|
||||
Enabled: true
|
||||
|
||||
Lint/UnreachableLoop:
|
||||
Enabled: true
|
||||
|
||||
Lint/UselessMethodDefinition:
|
||||
Enabled: true
|
||||
|
||||
Lint/UselessTimes:
|
||||
Enabled: true
|
||||
|
||||
>>>>>>> one-5.12-new
|
||||
#########
|
||||
# METRICS
|
||||
########
|
||||
|
@ -394,6 +394,35 @@ module OneGate
|
||||
end
|
||||
end
|
||||
|
||||
# Virtual Router module
|
||||
module VirtualRouter
|
||||
|
||||
def self.print(json_hash, _extended = false)
|
||||
OneGate.print_header('VROUTER ' + json_hash['VROUTER']['ID'])
|
||||
OneGate.print_key_value('NAME', json_hash['VROUTER']['NAME'])
|
||||
|
||||
vms_ids = Array(json_hash['VROUTER']['VMS']['ID'])
|
||||
|
||||
vms = vms_ids.join(',')
|
||||
|
||||
OneGate.print_key_value('VMS', vms)
|
||||
puts
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
# Virtual Network module
|
||||
module VirtualNetwork
|
||||
|
||||
def self.print(json_hash, _extended = false)
|
||||
OneGate.print_header('VNET')
|
||||
OneGate.print_key_value('ID', json_hash['VNET']['ID'])
|
||||
|
||||
puts
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
class Client
|
||||
def initialize(opts={})
|
||||
@vmid = ENV["VMID"]
|
||||
@ -473,8 +502,8 @@ module OneGate
|
||||
|
||||
def self.parse_json(response)
|
||||
if CloudClient::is_error?(response)
|
||||
puts "ERROR: "
|
||||
puts response.message
|
||||
STDERR.puts 'ERROR: '
|
||||
STDERR.puts response.message
|
||||
exit -1
|
||||
else
|
||||
return JSON.parse(response.body)
|
||||
@ -537,6 +566,10 @@ Available commands
|
||||
$ onegate service show [--json][--extended]
|
||||
|
||||
$ onegate service scale --role ROLE --cardinality CARDINALITY
|
||||
|
||||
$ onegate vrouter show [--json]
|
||||
|
||||
$ onegate vnet show VNETID [--json][--extended]
|
||||
EOT
|
||||
end
|
||||
end
|
||||
@ -576,7 +609,7 @@ OptionParser.new do |opts|
|
||||
end
|
||||
|
||||
opts.on("-h", "--help", "Show this message") do
|
||||
puts OneGate.help_str
|
||||
STDERR.puts OneGate.help_str
|
||||
exit
|
||||
end
|
||||
end.parse!
|
||||
@ -601,7 +634,7 @@ when "vm"
|
||||
end
|
||||
when "update"
|
||||
if !options[:data] && !options[:erase]
|
||||
puts "You have to provide the data as a param (--data, --erase)"
|
||||
STDERR.puts 'You have to provide the data as a param (--data, --erase)'
|
||||
exit -1
|
||||
end
|
||||
|
||||
@ -618,8 +651,8 @@ when "vm"
|
||||
end
|
||||
|
||||
if CloudClient::is_error?(response)
|
||||
puts "ERROR: "
|
||||
puts response.message
|
||||
STDERR.puts 'ERROR: '
|
||||
STDERR.puts response.message
|
||||
exit -1
|
||||
end
|
||||
when "resume",
|
||||
@ -649,18 +682,18 @@ when "vm"
|
||||
response = client.post("/vms/"+ARGV[2]+"/action", action_hash.to_json)
|
||||
|
||||
if CloudClient::is_error?(response)
|
||||
puts "ERROR: "
|
||||
puts response.message
|
||||
STDERR.puts 'ERROR: '
|
||||
STDERR.puts response.message
|
||||
exit -1
|
||||
end
|
||||
else
|
||||
puts "You have to provide a VM ID"
|
||||
STDERR.puts 'You have to provide a VM ID'
|
||||
exit -1
|
||||
end
|
||||
else
|
||||
puts OneGate.help_str
|
||||
puts
|
||||
puts "Action #{ARGV[1]} not supported"
|
||||
STDERR.puts OneGate.help_str
|
||||
STDERR.puts
|
||||
STDERR.puts "Action #{ARGV[1]} not supported"
|
||||
exit -1
|
||||
end
|
||||
when "service"
|
||||
@ -693,18 +726,79 @@ when "service"
|
||||
}.to_json)
|
||||
|
||||
if CloudClient::is_error?(response)
|
||||
puts "ERROR: "
|
||||
puts response.message
|
||||
STDERR.puts 'ERROR: '
|
||||
STDERR.puts response.message
|
||||
exit -1
|
||||
end
|
||||
else
|
||||
puts OneGate.help_str
|
||||
puts
|
||||
puts "Action #{ARGV[1]} not supported"
|
||||
STDERR.puts OneGate.help_str
|
||||
STDERR.puts
|
||||
STDERR.puts "Action #{ARGV[1]} not supported"
|
||||
exit -1
|
||||
end
|
||||
when 'vrouter'
|
||||
case ARGV[1]
|
||||
when 'show'
|
||||
if options[:extended]
|
||||
extra = {}
|
||||
extra['extended'] = true
|
||||
|
||||
extra = URI.encode_www_form(extra)
|
||||
end
|
||||
|
||||
response = client.get('/vrouter', extra)
|
||||
json_hash = OneGate.parse_json(response)
|
||||
|
||||
if options[:json]
|
||||
puts JSON.pretty_generate(json_hash)
|
||||
else
|
||||
if options[:extended]
|
||||
OneGate::VirtualRouter.print(json_hash, true)
|
||||
else
|
||||
OneGate::VirtualRouter.print(json_hash)
|
||||
end
|
||||
end
|
||||
else
|
||||
STDERR.puts OneGate.help_str
|
||||
STDERR.puts
|
||||
STDERR.puts "Action #{ARGV[1]} not supported"
|
||||
exit(-1)
|
||||
end
|
||||
when 'vnet'
|
||||
case ARGV[1]
|
||||
when 'show'
|
||||
if ARGV[2]
|
||||
if options[:extended]
|
||||
extra = {}
|
||||
extra['extended'] = true
|
||||
|
||||
extra = URI.encode_www_form(extra)
|
||||
end
|
||||
|
||||
response = client.get('/vnet/'+ARGV[2], extra)
|
||||
json_hash = OneGate.parse_json(response)
|
||||
|
||||
if options[:json]
|
||||
puts JSON.pretty_generate(json_hash)
|
||||
else
|
||||
if options[:extended]
|
||||
OneGate::VirtualNetwork.print(json_hash, true)
|
||||
else
|
||||
OneGate::VirtualNetwork.print(json_hash)
|
||||
end
|
||||
end
|
||||
else
|
||||
STDERR.puts 'You have to provide a VNET ID'
|
||||
exit -1
|
||||
end
|
||||
else
|
||||
STDERR.puts OneGate.help_str
|
||||
STDERR.puts
|
||||
STDERR.puts "Action #{ARGV[1]} not supported"
|
||||
exit(-1)
|
||||
end
|
||||
else
|
||||
puts OneGate.help_str
|
||||
STDERR.puts OneGate.help_str
|
||||
exit -1
|
||||
end
|
||||
|
||||
|
@ -217,11 +217,11 @@ AclManager::~AclManager()
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
const bool AclManager::authorize(
|
||||
bool AclManager::authorize(
|
||||
int uid,
|
||||
const set<int>& user_groups,
|
||||
const PoolObjectAuth& obj_perms,
|
||||
AuthRequest::Operation op)
|
||||
AuthRequest::Operation op) const
|
||||
{
|
||||
bool auth = false;
|
||||
|
||||
@ -420,9 +420,9 @@ const bool AclManager::authorize(
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
const bool AclManager::oneadmin_authorize(
|
||||
bool AclManager::oneadmin_authorize(
|
||||
const PoolObjectAuth& obj_perms,
|
||||
AuthRequest::Operation op)
|
||||
AuthRequest::Operation op) const
|
||||
{
|
||||
if (static_cast<long long int>(op) & 0x10LL) //No lockable object
|
||||
{
|
||||
@ -449,7 +449,7 @@ bool AclManager::match_rules_wrapper(
|
||||
long long individual_obj_type,
|
||||
long long group_obj_type,
|
||||
long long cluster_obj_type,
|
||||
const multimap<long long, AclRule*> &tmp_rules)
|
||||
const multimap<long long, AclRule*> &tmp_rules) const
|
||||
{
|
||||
bool auth = false;
|
||||
|
||||
@ -525,7 +525,7 @@ bool AclManager::match_rules(
|
||||
long long resource_oid_mask,
|
||||
long long resource_gid_mask,
|
||||
long long resource_cid_mask,
|
||||
const multimap<long long, AclRule*> &rules)
|
||||
const multimap<long long, AclRule*> &rules) const
|
||||
|
||||
{
|
||||
bool auth = false;
|
||||
|
@ -56,17 +56,17 @@ begin
|
||||
|
||||
user = URI_PARSER.unescape(xml['/AUTHN/USERNAME'])
|
||||
secret = URI_PARSER.unescape(xml['/AUTHN/SECRET'])
|
||||
rescue
|
||||
STDERR.puts "Invalid XML input"
|
||||
rescue StandardError
|
||||
STDERR.puts 'Invalid XML input'
|
||||
exit(-1)
|
||||
end
|
||||
|
||||
options=YAML.load(File.read(ETC_LOCATION+'/auth/ldap_auth.conf'))
|
||||
|
||||
user_full = user
|
||||
order,user = get_server_order(options, user)
|
||||
order, user = get_server_order(options, user)
|
||||
|
||||
STDERR.puts "Using group of servers: #{servers.join(', ')}" if order.length>1
|
||||
STDERR.puts "Using group of servers: #{order.join(' ')}" if order.length>1
|
||||
|
||||
order.each do |servers|
|
||||
servers.each do |server_name|
|
||||
@ -74,7 +74,7 @@ order.each do |servers|
|
||||
|
||||
server_conf=options[server_name]
|
||||
if !server_conf
|
||||
STDERR.puts "Configuration for server not found"
|
||||
STDERR.puts 'Configuration for server not found'
|
||||
break
|
||||
end
|
||||
|
||||
@ -95,7 +95,7 @@ order.each do |servers|
|
||||
|
||||
if !user_uid.nil? && user_uid.downcase != user.downcase
|
||||
STDERR.puts "User \"#{user}\" and \"#{user_uid}\" "\
|
||||
"differes (leading/trailing whitespace)"
|
||||
'differes (leading/trailing whitespace)'
|
||||
break
|
||||
end
|
||||
|
||||
@ -110,7 +110,7 @@ order.each do |servers|
|
||||
groups = ldap.get_groups
|
||||
if groups.empty?
|
||||
if !server_conf[:mapping_default]
|
||||
STDERR.puts "User does not belong to a mapped group"
|
||||
STDERR.puts 'User does not belong to a mapped group'
|
||||
break
|
||||
else
|
||||
groups = [server_conf[:mapping_default]]
|
||||
@ -126,7 +126,7 @@ order.each do |servers|
|
||||
puts "ldap #{escaped_user} #{escaped_secret} #{group_list}"
|
||||
exit
|
||||
else
|
||||
STDERR.puts "Bad user/password"
|
||||
STDERR.puts 'Bad user/password'
|
||||
break
|
||||
end
|
||||
end
|
||||
|
@ -532,7 +532,7 @@ module CLIHelper
|
||||
# @param data [Array] Array with data to show
|
||||
# @param del [Char] CSV delimiter
|
||||
def print_csv_data(data, del)
|
||||
del ? del = del : del = ','
|
||||
del ||= ','
|
||||
|
||||
data.each do |l|
|
||||
result = []
|
||||
|
@ -20,6 +20,10 @@ require 'one_helper/onevm_helper'
|
||||
# CLI helper for oneimage command
|
||||
class OneImageHelper < OpenNebulaHelper::OneHelper
|
||||
|
||||
# This list contains prefixes that should skip adding user home to the path
|
||||
# This must have the same content as the case $FROM in downloader.sh
|
||||
PREFIXES = %w[http https ssh s3 rbd vcenter lxd docker]
|
||||
|
||||
TEMPLATE_OPTIONS=[
|
||||
{
|
||||
:name => 'name',
|
||||
@ -82,7 +86,7 @@ class OneImageHelper < OpenNebulaHelper::OneHelper
|
||||
:description => 'Path of the image file',
|
||||
:format => String,
|
||||
:proc => lambda do |o, _options|
|
||||
next [0, o] if o.match(%r{^(https?|docker)://})
|
||||
next [0, o] if o.match(%r{^(#{PREFIXES.join('|')})://})
|
||||
|
||||
if o[0, 1]=='/'
|
||||
path=o
|
||||
|
@ -201,6 +201,10 @@ class OneUserHelper < OpenNebulaHelper::OneHelper
|
||||
login_client = self.get_login_client(username, options)
|
||||
end
|
||||
|
||||
if (login_client.is_a? Array) && login_client[0] == -1
|
||||
return login_client
|
||||
end
|
||||
|
||||
user = OpenNebula::User.new(User.build_xml, login_client)
|
||||
|
||||
egid = options[:group] || -1
|
||||
|
@ -100,7 +100,8 @@ class OneVMHelper < OpenNebulaHelper::OneHelper
|
||||
if File.file?(o)
|
||||
options[:file] = o
|
||||
else
|
||||
exit - 1
|
||||
STDERR.puts "File `#{options[:file]}` doesn't exist"
|
||||
exit(-1)
|
||||
end
|
||||
}
|
||||
}
|
||||
|
@ -155,6 +155,7 @@ CommandParser::CmdParser.new(ARGV) do
|
||||
begin
|
||||
args = helper.parse_opts(options)
|
||||
args[:filter] = true
|
||||
args[:short] = true
|
||||
vi_client = VCenterDriver::VIClient.new_from_host(options[:host])
|
||||
importer = VCenterDriver::VcImporter
|
||||
.new_child(helper.client, vi_client, options[:object])
|
||||
@ -185,6 +186,7 @@ CommandParser::CmdParser.new(ARGV) do
|
||||
begin
|
||||
args = helper.parse_opts(options)
|
||||
args[:filter] = false
|
||||
args[:short] = true
|
||||
vi_client = VCenterDriver::VIClient.new_from_host(options[:host])
|
||||
importer = VCenterDriver::VcImporter
|
||||
.new_child(helper.client, vi_client, options[:object])
|
||||
|
@ -106,8 +106,9 @@ if VCenterDriver::FileHelper.is_remote_or_needs_unpack?(img_path)
|
||||
downsh_args << '--convert vmdk'
|
||||
|
||||
downloader = "#{File.dirname(__FILE__)}/../downloader.sh #{downsh_args}"
|
||||
b64 = Base64.encode64(drv_action.to_xml).gsub!("\n", '')
|
||||
|
||||
rc = system("#{downloader} #{img_path} #{temp_file}")
|
||||
rc = system("DRV_ACTION=#{b64} #{downloader} #{img_path} #{temp_file}")
|
||||
|
||||
if there_is_not_system_error?(rc)
|
||||
STDERR.puts "Error downloading #{img_path}"
|
||||
@ -171,17 +172,17 @@ files_to_upload.each_with_index do |f, index|
|
||||
|
||||
cmd = "#{File.dirname(__FILE__)}/../vcenter_uploader.rb #{uploader_args}"
|
||||
|
||||
stdout_str, status = Open3.capture2(cmd)
|
||||
target, stderr, status = Open3.capture3(cmd)
|
||||
|
||||
if last_file_to_upload?(index, files_to_upload)
|
||||
puts stdout_str
|
||||
if !status.success?
|
||||
STDERR.puts "Cannot upload file #{f}: #{stderr}"
|
||||
FileUtils.rm_rf(temp_file) if temp_file
|
||||
exit(-1)
|
||||
end
|
||||
|
||||
next if status.success?
|
||||
|
||||
STDERR.puts "Cannot upload file #{f}"
|
||||
FileUtils.rm_rf(temp_file) if temp_file
|
||||
exit(-1)
|
||||
if last_file_to_upload?(index, files_to_upload)
|
||||
puts "#{target.gsub("\n", '')} vmdk"
|
||||
end
|
||||
end
|
||||
|
||||
FileUtils.rm_rf(temp_file) if temp_file
|
||||
|
@ -397,8 +397,8 @@ class EventManager
|
||||
rc_nodes = { :successful => [], :failure => [] }
|
||||
|
||||
nodes.delete_if do |node|
|
||||
vm = OpenNebula::VirtualMachine.new_with_id(node, @cloud_auth.client)
|
||||
|
||||
vm = OpenNebula::VirtualMachine.new_with_id(node,
|
||||
@cloud_auth.client)
|
||||
vm.info
|
||||
|
||||
vm_lcm_state = OpenNebula::VirtualMachine::LCM_STATE[vm.lcm_state]
|
||||
|
@ -778,9 +778,9 @@ module OpenNebula
|
||||
scheduled_pol ||= []
|
||||
|
||||
scheduled_pol.each do |policy|
|
||||
diff = scale_time?(policy)
|
||||
diff, cooldown_duration = scale_time?(policy)
|
||||
|
||||
return [diff, 0] if diff != 0
|
||||
return [diff, cooldown_duration] if diff != 0
|
||||
end
|
||||
|
||||
elasticity_pol.each do |policy|
|
||||
@ -959,10 +959,11 @@ module OpenNebula
|
||||
|
||||
new_cardinality = calculate_new_cardinality(elasticity_pol)
|
||||
|
||||
return new_cardinality - cardinality
|
||||
return [new_cardinality - cardinality,
|
||||
elasticity_pol['cooldown']]
|
||||
end
|
||||
|
||||
0
|
||||
[0, elasticity_pol['cooldown']]
|
||||
end
|
||||
|
||||
# Returns a positive, 0, or negative number of nodes to adjust,
|
||||
|
@ -284,7 +284,7 @@ module OpenNebula
|
||||
if OpenNebula.is_error?(rc)
|
||||
log_info("Error deleting vnet #{net_id}: #{rc}")
|
||||
end
|
||||
end
|
||||
end if networks
|
||||
|
||||
super()
|
||||
end
|
||||
|
@ -141,7 +141,7 @@ int HookLog::dump_log(std::string &xml_log)
|
||||
int HookLog::drop(SqlDB *db, const int hook_id)
|
||||
{
|
||||
ostringstream oss;
|
||||
|
||||
|
||||
oss << "DELETE FROM " << table << " WHERE hkid =" << hook_id;
|
||||
|
||||
return db->exec_wr(oss);
|
||||
@ -164,7 +164,7 @@ int HookLog::add(int hkid, int hkrc, std::string &xml_result)
|
||||
|
||||
cb.set_callback(&query_output);
|
||||
|
||||
oss << "SELECT IFNULL(MAX(exeid), -1), COUNT(*) FROM hook_log" << " WHERE hkid = " << hkid;
|
||||
oss << "SELECT coalesce(MAX(exeid), -1), COUNT(*) FROM " << table << " WHERE hkid = " << hkid;
|
||||
|
||||
int rc = db->exec_rd(oss, &cb);
|
||||
|
||||
|
@ -165,20 +165,72 @@ int HostPool::update(PoolObjectSQL * objsql)
|
||||
|
||||
int HostPool::dump_monitoring(
|
||||
string& oss,
|
||||
const string& where)
|
||||
const string& where,
|
||||
const int seconds)
|
||||
{
|
||||
ostringstream cmd;
|
||||
|
||||
cmd << "SELECT " << one_db::host_monitor_table << ".body FROM "
|
||||
<< one_db::host_monitor_table << " INNER JOIN " << one_db::host_table
|
||||
<< " ON hid = oid";
|
||||
|
||||
if ( !where.empty() )
|
||||
switch(seconds)
|
||||
{
|
||||
cmd << " WHERE " << where;
|
||||
}
|
||||
case 0: //Get last monitor value
|
||||
/*
|
||||
* SELECT host_monitoring.body
|
||||
* FROM host_monitoring
|
||||
* INNER JOIN (
|
||||
* SELECT hid, MAX(last_mon_time) as last_mon_time
|
||||
* FROM host_monitoring
|
||||
* GROUP BY hid
|
||||
* ) lmt on lmt.hid = host_monitoring.hid AND lmt.last_mon_time = host_monitoring.last_mon_time
|
||||
* INNER JOIN host_pool ON host_monitoring.hid = oid
|
||||
* ORDER BY oid;
|
||||
*/
|
||||
cmd << "SELECT " << one_db::host_monitor_table << ".body "
|
||||
<< "FROM " << one_db::host_monitor_table << " INNER JOIN ("
|
||||
<< "SELECT hid, MAX(last_mon_time) as last_mon_time FROM "
|
||||
<< one_db::host_monitor_table << " GROUP BY hid) as lmt "
|
||||
<< "ON lmt.hid = " << one_db::host_monitor_table << ".hid "
|
||||
<< "AND lmt.last_mon_time = " << one_db::host_monitor_table
|
||||
<< ".last_mon_time INNER JOIN " << one_db::host_table
|
||||
<< " ON " << one_db::host_monitor_table << ".hid = oid";
|
||||
|
||||
cmd << " ORDER BY hid, " << one_db::host_monitor_table << ".last_mon_time;";
|
||||
if ( !where.empty() )
|
||||
{
|
||||
cmd << " WHERE " << where;
|
||||
}
|
||||
|
||||
cmd << " ORDER BY oid";
|
||||
|
||||
break;
|
||||
|
||||
case -1: //Get all monitoring
|
||||
cmd << "SELECT " << one_db::host_monitor_table << ".body FROM "
|
||||
<< one_db::host_monitor_table << " INNER JOIN " << one_db::host_table
|
||||
<< " ON hid = oid";
|
||||
|
||||
if ( !where.empty() )
|
||||
{
|
||||
cmd << " WHERE " << where;
|
||||
}
|
||||
|
||||
cmd << " ORDER BY hid, " << one_db::host_monitor_table << ".last_mon_time;";
|
||||
|
||||
break;
|
||||
|
||||
default: //Get monitor in last s seconds
|
||||
cmd << "SELECT " << one_db::host_monitor_table << ".body FROM "
|
||||
<< one_db::host_monitor_table << " INNER JOIN " << one_db::host_table
|
||||
<< " ON hid = oid WHERE " << one_db::host_monitor_table
|
||||
<< ".last_mon_time > " << time(nullptr) - seconds;
|
||||
|
||||
if ( !where.empty() )
|
||||
{
|
||||
cmd << " AND " << where;
|
||||
}
|
||||
|
||||
cmd << " ORDER BY hid, " << one_db::host_monitor_table << ".last_mon_time;";
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
return PoolSQL::dump(oss, "MONITORING_DATA", cmd);
|
||||
}
|
||||
|
@ -642,7 +642,7 @@ void HostShareNode::set_hugepage(unsigned long size, unsigned int nr,
|
||||
return;
|
||||
}
|
||||
|
||||
HugePage h = {size, nr, fr, usage};
|
||||
HugePage h = {size, nr, fr, usage, 0};
|
||||
|
||||
pages.insert(make_pair(h.size_kb, h));
|
||||
|
||||
@ -1349,12 +1349,14 @@ void HostShareNUMA::del(HostShareCapacity &sr)
|
||||
|
||||
if ( pt != mem_node.pages.end() )
|
||||
{
|
||||
pt->second.usage -= num_hp;
|
||||
|
||||
if ( pt->second.usage < 0 )
|
||||
if ( pt->second.usage < num_hp )
|
||||
{
|
||||
pt->second.usage = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
pt->second.usage -= num_hp;
|
||||
}
|
||||
|
||||
mem_node.update_hugepages();
|
||||
}
|
||||
|
@ -307,30 +307,19 @@ class Cluster
|
||||
end
|
||||
|
||||
def cluster_monitoring
|
||||
metrics = @cluster.item.collect(*(CLUSTER_PROPERTIES[0..4]))
|
||||
resource_usage_summary = @cluster.item.GetResourceUsage()
|
||||
|
||||
total_cpu = metrics[0].to_f
|
||||
num_cpu_cores = metrics[1].to_f
|
||||
effective_cpu = metrics[2].to_f
|
||||
total_memory = metrics[3].to_i
|
||||
effective_mem = metrics[4].to_i
|
||||
total_cpu = resource_usage_summary.cpuCapacityMHz.to_i
|
||||
used_cpu = resource_usage_summary.cpuUsedMHz.to_i
|
||||
total_memory = resource_usage_summary.memCapacityMB.to_i
|
||||
used_mem = resource_usage_summary.memUsedMB.to_i
|
||||
|
||||
if num_cpu_cores > 0
|
||||
mhz_core = total_cpu / num_cpu_cores
|
||||
eff_core = effective_cpu / mhz_core
|
||||
else
|
||||
eff_core = 0
|
||||
end
|
||||
|
||||
free_cpu = (eff_core * 100).to_i
|
||||
used_cpu = (total_cpu - free_cpu).to_i
|
||||
|
||||
total_mem = total_memory / 1024
|
||||
free_mem = effective_mem * 1024
|
||||
free_cpu = total_cpu - used_cpu
|
||||
free_mem = total_memory - used_mem
|
||||
|
||||
unindent(<<-EOS)
|
||||
HYPERVISOR = vcenter
|
||||
USEDMEMORY = "#{(total_mem - free_mem)}"
|
||||
USEDMEMORY = "#{(used_mem * 1024)}"
|
||||
FREEMEMORY = "#{free_mem}"
|
||||
USEDCPU = "#{used_cpu}"
|
||||
FREECPU = "#{free_cpu}"
|
||||
@ -825,7 +814,7 @@ class ClusterSet
|
||||
next unless (Time.now.to_i - last_mon) > probe_frequency
|
||||
|
||||
# Refresh the vCenter connection in the least frequent probe
|
||||
if probe_name.eql?('system_host')
|
||||
if probe_name.eql?(:system_host)
|
||||
c[:cluster].connect_vcenter
|
||||
end
|
||||
|
||||
|
@ -658,9 +658,10 @@ void Image::disk_attribute(VirtualMachineDisk * disk,
|
||||
|
||||
for (it = inherit_attrs.begin(); it != inherit_attrs.end(); it++)
|
||||
{
|
||||
string current_val = disk->vector_value(*it);
|
||||
get_template_attribute(*it, inherit_val);
|
||||
|
||||
if (!inherit_val.empty())
|
||||
if (current_val.empty() && !inherit_val.empty())
|
||||
{
|
||||
disk->replace(*it, inherit_val);
|
||||
}
|
||||
|
@ -334,6 +334,8 @@ void LifeCycleManager::migrate_action(const LCMAction& la)
|
||||
if ( vm->get_hid() != vm->get_previous_hid() )
|
||||
{
|
||||
hpool->del_capacity(vm->get_previous_hid(), sr);
|
||||
|
||||
vm->release_previous_vnc_port();
|
||||
}
|
||||
|
||||
vm->set_stime(the_time);
|
||||
|
@ -54,6 +54,8 @@ void LifeCycleManager::start_prolog_migrate(VirtualMachine* vm)
|
||||
if ( vm->get_hid() != vm->get_previous_hid() )
|
||||
{
|
||||
hpool->del_capacity(vm->get_previous_hid(), sr);
|
||||
|
||||
vm->release_previous_vnc_port();
|
||||
}
|
||||
|
||||
vmpool->update(vm);
|
||||
@ -84,6 +86,8 @@ void LifeCycleManager::revert_migrate_after_failure(VirtualMachine* vm)
|
||||
if ( vm->get_hid() != vm->get_previous_hid() )
|
||||
{
|
||||
hpool->del_capacity(vm->get_hid(), sr);
|
||||
|
||||
vm->rollback_previous_vnc_port();
|
||||
}
|
||||
|
||||
vm->set_previous_etime(the_time);
|
||||
@ -261,6 +265,8 @@ void LifeCycleManager::deploy_success_action(int vid)
|
||||
|
||||
hpool->del_capacity(vm->get_previous_hid(), sr);
|
||||
|
||||
vm->release_previous_vnc_port();
|
||||
|
||||
vm->set_state(VirtualMachine::RUNNING);
|
||||
|
||||
if ( !vmm->is_keep_snapshots(vm->get_vmm_mad()) )
|
||||
@ -349,6 +355,8 @@ void LifeCycleManager::deploy_failure_action(int vid)
|
||||
|
||||
hpool->del_capacity(vm->get_hid(), sr);
|
||||
|
||||
vm->rollback_previous_vnc_port();
|
||||
|
||||
// --- Add new record by copying the previous one
|
||||
|
||||
vm->cp_previous_history();
|
||||
|
@ -209,7 +209,7 @@ int MarketPlaceAppPool::import(const std::string& t64, int mp_id,
|
||||
|
||||
app->market_id = mp_id;
|
||||
app->market_name = mp_name;
|
||||
app->zone_id = Nebula::instance().get_zone_id();
|
||||
app->zone_id = Nebula::instance().get_zone_id();
|
||||
|
||||
if ( !PoolObjectSQL::name_is_valid(app->name, error_str) )
|
||||
{
|
||||
@ -234,7 +234,8 @@ int MarketPlaceAppPool::import(const std::string& t64, int mp_id,
|
||||
{
|
||||
app_id = mp_aux->oid;
|
||||
|
||||
if ( mp_aux->version != app->version || mp_aux->md5 != app->md5 )
|
||||
if ( mp_aux->version != app->version || mp_aux->md5 != app->md5 ||
|
||||
mp_aux->source != app->source )
|
||||
{
|
||||
mp_aux->from_template64(t64, error_str);
|
||||
update(mp_aux);
|
||||
|
@ -111,10 +111,17 @@ end
|
||||
|
||||
cmd = "#{UTILS_PATH}/downloader.sh #{import_source} -"
|
||||
|
||||
Open3.popen3(cmd) do |_, o, _, _|
|
||||
Open3.popen3(cmd) do |_, o, e, _|
|
||||
body = o.read(read_length)
|
||||
|
||||
if o.eof?
|
||||
error = Thread.new { e.read }.value
|
||||
|
||||
unless error.empty?
|
||||
STDERR.puts error
|
||||
exit 1
|
||||
end
|
||||
|
||||
s3.put_object(body)
|
||||
else
|
||||
s3.create_multipart_upload
|
||||
|
@ -88,7 +88,7 @@ public:
|
||||
*/
|
||||
void write(const std::string& str) const
|
||||
{
|
||||
::write(to_drv, str.c_str(), str.size());
|
||||
(void) ::write(to_drv, str.c_str(), str.size());
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -159,6 +159,20 @@ func (vc *VMsController) InfoExtendedFilter(f *VMFilter) (*vm.Pool, error) {
|
||||
return vmPool, nil
|
||||
}
|
||||
|
||||
// InfoSet connects to OpenNebula and fetches a VM_POOL containing the VMs in vmIds
|
||||
func (vc *VMsController) InfoSet(vmIds string, extended bool) (*vm.Pool, error) {
|
||||
response, err := vc.c.Client.Call("one.vmpool.infoset", vmIds, extended)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vmPool := &vm.Pool{}
|
||||
err = xml.Unmarshal([]byte(response.Body()), vmPool)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return vmPool, nil
|
||||
}
|
||||
|
||||
// Info connects to OpenNebula and fetches the information of the VM
|
||||
func (vc *VMController) Info(decrypt bool) (*vm.VM, error) {
|
||||
response, err := vc.c.Client.Call("one.vm.info", vc.ID, decrypt)
|
||||
|
@ -34,6 +34,7 @@ public class VirtualMachinePool extends Pool implements Iterable<VirtualMachine>
|
||||
private static final String ELEMENT_NAME = "VM";
|
||||
private static final String INFO_METHOD = "vmpool.info";
|
||||
private static final String INFO_EXTENDED_METHOD = "vmpool.infoextended";
|
||||
private static final String INFO_SET_METHOD = "vmpool.infoset";
|
||||
private static final String MONITORING = "vmpool.monitoring";
|
||||
|
||||
/**
|
||||
@ -133,6 +134,20 @@ public class VirtualMachinePool extends Pool implements Iterable<VirtualMachine>
|
||||
return client.call(INFO_EXTENDED_METHOD, filter, -1, -1, NOT_DONE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves all of the Virtual Machines in the vm_ids list.
|
||||
*
|
||||
* @param client XML-RPC Client.
|
||||
* @param vm_ids Comma separated list of VM IDs.
|
||||
* @param extended If true the extended body is retrieved.
|
||||
* @return If successful the message contains the string
|
||||
* with the information returned by OpenNebula.
|
||||
*/
|
||||
public static OneResponse info_extended(Client client, int vm_ids, boolean extended)
|
||||
{
|
||||
return client.call(INFO_SET_METHOD, vm_ids, extended);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves all the Virtual Machines in the pool.
|
||||
*
|
||||
|
@ -23,14 +23,14 @@ module OpenNebula
|
||||
# Constants and Class attribute accessors
|
||||
#######################################################################
|
||||
|
||||
|
||||
VM_POOL_METHODS = {
|
||||
:info => "vmpool.info",
|
||||
:info_extended => "vmpool.infoextended",
|
||||
:monitoring => "vmpool.monitoring",
|
||||
:accounting => "vmpool.accounting",
|
||||
:showback => "vmpool.showback",
|
||||
:calculate_showback => "vmpool.calculateshowback"
|
||||
:info => 'vmpool.info',
|
||||
:info_extended => 'vmpool.infoextended',
|
||||
:info_set => 'vmpool.infoset',
|
||||
:monitoring => 'vmpool.monitoring',
|
||||
:accounting => 'vmpool.accounting',
|
||||
:showback => 'vmpool.showback',
|
||||
:calculate_showback => 'vmpool.calculateshowback'
|
||||
}
|
||||
|
||||
# Constants for info queries (include/RequestManagerPoolInfoFilter.h)
|
||||
@ -41,7 +41,6 @@ module OpenNebula
|
||||
# Class constructor & Pool Methods
|
||||
#######################################################################
|
||||
|
||||
|
||||
# +client+ a Client object that represents a XML-RPC connection
|
||||
# +user_id+ is to refer to a Pool with VirtualMachines from that user
|
||||
def initialize(client, user_id=0)
|
||||
@ -102,21 +101,21 @@ module OpenNebula
|
||||
end
|
||||
|
||||
# Define info methods shortcuts for different filters
|
||||
# info_all()
|
||||
# info_all()
|
||||
# info_all!()
|
||||
# info_all_extended
|
||||
# info_all_extended
|
||||
# info_all_extended!()
|
||||
# info_mine()
|
||||
# info_mine()
|
||||
# info_mine!()
|
||||
# info_mine_extended
|
||||
# info_mine_extended
|
||||
# info_mine_extended!()
|
||||
# info_group()
|
||||
# info_group()
|
||||
# info_group!()
|
||||
# info_group_extended
|
||||
# info_group_extended
|
||||
# info_group_extended!()
|
||||
# info_primary_group()
|
||||
# info_primary_group()
|
||||
# info_primary_group!()
|
||||
# info_primary_group_extended
|
||||
# info_primary_group_extended
|
||||
# info_primary_group_extended!()
|
||||
%w[mine all group primary_group].each do |ifilter|
|
||||
const_name = "OpenNebula::Pool::INFO_#{ifilter.upcase}"
|
||||
@ -160,6 +159,14 @@ module OpenNebula
|
||||
default_args[:query])
|
||||
end
|
||||
|
||||
# Retrieves the set of VMs especified in vm_ids
|
||||
#
|
||||
# @param [String] comma separated list of vm ids.
|
||||
# @param [Boolean] if true extended body is retrieved.
|
||||
#
|
||||
def info_set(vm_ids, extended)
|
||||
xmlrpc_info(VM_POOL_METHODS[:info_set], vm_ids, extended)
|
||||
end
|
||||
|
||||
# Retrieves the monitoring data for all the VMs in the pool
|
||||
#
|
||||
|
@ -50,6 +50,7 @@ require 'rubygems'
|
||||
require 'sinatra'
|
||||
require 'yaml'
|
||||
require 'json'
|
||||
require 'set'
|
||||
|
||||
require 'CloudAuth'
|
||||
require 'CloudServer'
|
||||
@ -75,6 +76,11 @@ RESTRICTED_ACTIONS = [
|
||||
'reboot'
|
||||
]
|
||||
|
||||
# Attrs of the Virtual Network template that will be retrieved
|
||||
# with onegate vnet | get /vnet/:id requests.
|
||||
VNET_TEMPLATE_ATTRIBUTES = %w[NETWORK_ADDRESS NETWORK_MASK GATEWAY GATEWAY6 DNS
|
||||
GUEST_MTU CONTEXT_FORCE_IPV4 SEARCH_DOMAIN]
|
||||
|
||||
include OpenNebula
|
||||
|
||||
begin
|
||||
@ -162,13 +168,24 @@ helpers do
|
||||
|
||||
def get_requested_vm(requested_vm_id, request_env, client)
|
||||
source_vm = get_source_vm(request_env, client)
|
||||
if source_vm['ID'].to_i != requested_vm_id
|
||||
|
||||
return source_vm if Integer(source_vm['ID']) == requested_vm_id
|
||||
|
||||
if !source_vm['USER_TEMPLATE/SERVICE_ID'].nil?
|
||||
service_id = source_vm['USER_TEMPLATE/SERVICE_ID']
|
||||
check_vm_in_service(requested_vm_id, service_id, client)
|
||||
|
||||
requested_vm = get_vm(requested_vm_id, client)
|
||||
get_vm(requested_vm_id, client)
|
||||
elsif !source_vm['TEMPLATE/VROUTER_ID'].nil?
|
||||
vrouter_id = source_vm['TEMPLATE/VROUTER_ID']
|
||||
vrouter_hash = get_vrouter(vrouter_id, client).to_hash
|
||||
check_vm_in_vrouter(requested_vm_id, vrouter_hash, source_vm)
|
||||
get_vm(requested_vm_id, client)
|
||||
else
|
||||
requested_vm = source_vm
|
||||
error_msg = 'This VM does not belong to any Virtual Router or '\
|
||||
'Service, so it cannot retrieve information '\
|
||||
'from other VMs'
|
||||
logger.error {error_msg}
|
||||
halt 400, error_msg
|
||||
end
|
||||
end
|
||||
|
||||
@ -241,6 +258,48 @@ helpers do
|
||||
service.body
|
||||
end
|
||||
|
||||
def get_vrouter(vrouter_id, client)
|
||||
begin
|
||||
vrouter_id = Integer(vrouter_id)
|
||||
rescue TypeError
|
||||
error_msg = 'Empty or invalid VROUTER_ID'
|
||||
logger.error {error_msg}
|
||||
halt 400, error_msg
|
||||
end
|
||||
|
||||
vrouter = VirtualRouter.new_with_id(vrouter_id, client)
|
||||
rc = vrouter.info
|
||||
|
||||
if OpenNebula.is_error?(rc)
|
||||
error_msg = "Virtual router #{vrouter_id} not found"
|
||||
logger.error {error_msg}
|
||||
halt 404, error_msg
|
||||
end
|
||||
|
||||
vrouter
|
||||
end
|
||||
|
||||
def get_vnet(vnet_id, client)
|
||||
begin
|
||||
vnet_id = Integer(vnet_id)
|
||||
rescue TypeError
|
||||
error_msg = 'Empty or invalid VNET_ID'
|
||||
logger.error {error_msg}
|
||||
halt 400, error_msg
|
||||
end
|
||||
|
||||
vnet = VirtualNetwork.new_with_id(vnet_id, client)
|
||||
rc = vnet.info
|
||||
|
||||
if OpenNebula.is_error?(rc)
|
||||
error_msg = "Virtual network #{vnet_id} not found"
|
||||
logger.error {error_msg}
|
||||
halt 404, error_msg
|
||||
end
|
||||
|
||||
vnet
|
||||
end
|
||||
|
||||
def parse_json(json_str, root_element)
|
||||
begin
|
||||
hash = JSON.parse(json_str)
|
||||
@ -294,6 +353,10 @@ helpers do
|
||||
end
|
||||
end
|
||||
|
||||
def get_vnet_template_attributes
|
||||
$conf[:vnet_template_attributes] || VNET_TEMPLATE_ATTRIBUTES
|
||||
end
|
||||
|
||||
# Check if the source VM is part of a service and if the requested
|
||||
# VM is part of the same Service as the source VM.
|
||||
#
|
||||
@ -326,6 +389,91 @@ helpers do
|
||||
return response
|
||||
end
|
||||
|
||||
# Check if the source VM is part of a virtual router and if the
|
||||
# requested VM is part of the same virtual router as the source VM.
|
||||
# If false a halt is triggered
|
||||
#
|
||||
# @param requested_vm_id [Integer]
|
||||
# @param vrouter_hash [Hash]
|
||||
# @param source_vm [OpenNebula::VirtualMachine]
|
||||
#
|
||||
def check_vm_in_vrouter(requested_vm_id, vrouter_hash, source_vm)
|
||||
# Check that the user has not spoofed the VROUTER_ID
|
||||
vrouter_vm_ids = Array(vrouter_hash['VROUTER']['VMS']['ID']).map! do |vm|
|
||||
Integer(vm)
|
||||
end
|
||||
|
||||
if !vrouter_vm_ids.include?(requested_vm_id) ||
|
||||
!vrouter_vm_ids.include?(source_vm.id)
|
||||
|
||||
error_msg = "Virtual Router #{vrouter_hash['VROUTER']['ID']} does "\
|
||||
"not contain VM #{requested_vm_id}."
|
||||
logger.error {error_msg}
|
||||
halt 400, error_msg
|
||||
end
|
||||
end
|
||||
|
||||
# Check if the requested VNET can be accessed from the curren VROUTER.
|
||||
# If false a halt is triggered.
|
||||
#
|
||||
# @param req_vnet [OpenNebula::VirtualNetwork]
|
||||
# @param vrouter_hash [Hash]
|
||||
# @param client [OpenNebula::Client]
|
||||
#
|
||||
def check_vnet_in_vrouter(req_vnet, vrouter_hash, client)
|
||||
valid_vnets = Set[]
|
||||
|
||||
# Get VR nics
|
||||
nics = vrouter_hash['VROUTER']['TEMPLATE']['NIC']
|
||||
|
||||
if !nics.is_a?(Array)
|
||||
nics = [nics]
|
||||
end
|
||||
|
||||
# Get only one nic if multiple nic in same network
|
||||
nics.uniq! {|n| n['NETWORK_ID'] }
|
||||
|
||||
have_access = false
|
||||
nics.each do |nic|
|
||||
# Get nic VNET
|
||||
nic_vnet = get_vnet(nic['NETWORK_ID'], client)
|
||||
|
||||
# Provide access to nic's VNET
|
||||
valid_vnets.add(Integer(nic['NETWORK_ID']))
|
||||
# Provide access to nic's VNET parent (if exists)
|
||||
if !nic_vnet['PARENT_NETWORK_ID'].nil? &&
|
||||
!nic_vnet['PARENT_NETWORK_ID'].empty?
|
||||
valid_vnets.add(Integer(nic_vnet['PARENT_NETWORK_ID']))
|
||||
end
|
||||
# Provide access to nic's VNET childs
|
||||
xpath = '//LEASE/VNET'
|
||||
childs = nic_vnet.retrieve_xmlelements(xpath)
|
||||
|
||||
childs.each do |c|
|
||||
valid_vnets.add(Integer(c.text))
|
||||
end
|
||||
# Provide access to VNETs with same parent as NIC network
|
||||
if !valid_vnets.include?(req_vnet.id)
|
||||
# Get requested vnet parent
|
||||
if !req_vnet['PARENT_NETWORK_ID'].empty?
|
||||
req_parent = Integer(req_vnet['PARENT_NETWORK_ID'])
|
||||
end
|
||||
|
||||
next unless valid_vnets.include?(req_parent)
|
||||
end
|
||||
|
||||
have_access = true
|
||||
break
|
||||
end
|
||||
|
||||
return if have_access
|
||||
|
||||
error_msg = "Virtual Network #{req_vnet.id} cannot be retrieved"\
|
||||
" from Virtual router #{vrouter_hash['VROUTER']['ID']}."
|
||||
logger.error {error_msg}
|
||||
halt 400, error_msg
|
||||
end
|
||||
|
||||
# Escape data from user
|
||||
def scape_attr(attr)
|
||||
ret = ''
|
||||
@ -420,7 +568,7 @@ helpers do
|
||||
end
|
||||
end
|
||||
|
||||
NIC_VALID_KEYS = %w(IP IP6_LINK IP6_SITE IP6_GLOBAL NETWORK MAC NAME PARENT)
|
||||
NIC_VALID_KEYS = %w(IP IP6_LINK IP6_SITE IP6_GLOBAL NETWORK MAC NAME PARENT EXTERNAL)
|
||||
USER_TEMPLATE_INVALID_KEYS = %w(SCHED_MESSAGE)
|
||||
|
||||
def build_vm_hash(vm_hash)
|
||||
@ -514,6 +662,161 @@ def build_service_hash(service_hash, client = nil, extended = false)
|
||||
}
|
||||
end
|
||||
|
||||
def build_vrouter_hash(vrouter_hash, _client = nil, _extended = false)
|
||||
vrouter = {
|
||||
'VROUTER' => {
|
||||
'NAME' => vrouter_hash['VROUTER']['NAME'],
|
||||
'ID' => vrouter_hash['VROUTER']['ID'],
|
||||
'VMS' => vrouter_hash['VROUTER']['VMS'],
|
||||
'TEMPLATE' => vrouter_hash['VROUTER']['TEMPLATE']
|
||||
}
|
||||
}
|
||||
|
||||
# Manage special cases (arrays)
|
||||
if !vrouter['VROUTER']['TEMPLATE']['NIC'].is_a?(Array)
|
||||
if vrouter['VROUTER']['TEMPLATE']['NIC'].nil?
|
||||
vrouter['VROUTER']['TEMPLATE']['NIC'] = []
|
||||
else
|
||||
vrouter['VROUTER']['TEMPLATE']['NIC'] = [
|
||||
vrouter['VROUTER']['TEMPLATE']['NIC']
|
||||
]
|
||||
end
|
||||
end
|
||||
|
||||
if !vrouter_hash['VROUTER']['VMS']['ID'].is_a?(Array)
|
||||
if vrouter_hash['VROUTER']['VMS']['ID'].nil?
|
||||
vrouter_hash['VROUTER']['VMS']['ID'] = []
|
||||
else
|
||||
vrouter_hash['VROUTER']['VMS']['ID'] = [
|
||||
vrouter_hash['VROUTER']['VMS']['ID']
|
||||
]
|
||||
end
|
||||
end
|
||||
|
||||
vrouter
|
||||
end
|
||||
|
||||
VNET_ATTRIBUTES = %w[ID NAME USED_LEASES VROUTERS PARENT_NETWORK_ID AR_POOL]
|
||||
|
||||
def process_vnet(vnet_hash)
|
||||
template = {}
|
||||
|
||||
get_vnet_template_attributes.each do |key|
|
||||
value = vnet_hash['VNET']['TEMPLATE'][key]
|
||||
template[key] = value unless value.nil?
|
||||
end
|
||||
|
||||
vnet = {}
|
||||
VNET_ATTRIBUTES.each do |key|
|
||||
vnet[key] = vnet_hash['VNET'][key]
|
||||
end
|
||||
|
||||
vnet['TEMPLATE'] = template
|
||||
|
||||
# Manage special cases (arrays)
|
||||
if !vnet['AR_POOL']['AR'].is_a?(Array)
|
||||
if vnet['AR_POOL']['AR'].nil?
|
||||
vnet['AR_POOL']['AR'] = []
|
||||
else
|
||||
vnet['AR_POOL']['AR'] = [vnet['AR_POOL']['AR']]
|
||||
end
|
||||
end
|
||||
|
||||
vnet['AR_POOL']['AR'].each do |ar|
|
||||
if !ar['LEASES']['LEASE'].is_a?(Array)
|
||||
if ar['LEASES']['LEASE'].nil?
|
||||
ar['LEASES']['LEASE'] = []
|
||||
else
|
||||
ar['LEASES']['LEASE'] = [ar['LEASES']['LEASE']]
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
if !vnet['VROUTERS']['ID'].is_a?(Array)
|
||||
if vnet['VROUTERS']['ID'].nil?
|
||||
!vnet['VROUTERS']['ID'] = []
|
||||
else
|
||||
vnet['VROUTERS']['ID'] = [vnet['VROUTERS']['ID']]
|
||||
end
|
||||
end
|
||||
|
||||
vnet
|
||||
end
|
||||
|
||||
def build_vnet_hash(vnet, client, extended)
|
||||
# if extended flag is not set
|
||||
if extended.nil? || extended.downcase != 'true'
|
||||
vnet = vnet.to_hash
|
||||
vnet['VNET'] = process_vnet(vnet)
|
||||
|
||||
return vnet
|
||||
end
|
||||
|
||||
vm_pool = VirtualMachinePool.new(client)
|
||||
|
||||
# get VMs that are using the VNET
|
||||
vms = ''
|
||||
vnet.retrieve_xmlelements('//LEASE/VM').each do |vm|
|
||||
vms << ',' unless vms.empty?
|
||||
vms << vm.text
|
||||
end
|
||||
|
||||
vnet = vnet.to_hash
|
||||
vnet['VNET'] = process_vnet(vnet)
|
||||
|
||||
rc = vm_pool.info_set(vms, true)
|
||||
if OpenNebula.is_error?(rc)
|
||||
logger.error {"vmpool.info error: #{rc.message}"}
|
||||
halt 404, rc.message
|
||||
end
|
||||
|
||||
# Get ARs array
|
||||
ars = vnet['VNET']['AR_POOL']['AR']
|
||||
# rubocop:disable Style/ArrayCoercion
|
||||
ars = [ars] unless ars.is_a?(Array)
|
||||
|
||||
ars.each do |ar|
|
||||
leases = ar['LEASES']['LEASE']
|
||||
|
||||
next if leases.nil?
|
||||
|
||||
leases = [leases] unless leases.is_a?(Array)
|
||||
# rubocop:enable Style/ArrayCoercion
|
||||
|
||||
leases.each do |lease|
|
||||
next if lease['VM'].nil?
|
||||
|
||||
# Get the corresponding VM from pool
|
||||
xpath = "/VM_POOL/VM[ID=#{lease['VM']}]"
|
||||
vm = vm_pool.retrieve_xmlelements(xpath)[0]
|
||||
|
||||
# Get corresponding NIC from VM (MAC should be unique)
|
||||
xpath = "./TEMPLATE/NIC[MAC=\"#{lease['MAC']}\"]"
|
||||
nic = vm.retrieve_xmlelements(xpath)[0]
|
||||
|
||||
if nic.nil?
|
||||
xpath = "./TEMPLATE/NIC_ALIAS[MAC=\"#{lease['MAC']}\"]"
|
||||
nic = vm.retrieve_xmlelements(xpath)[0]
|
||||
|
||||
# get parent network
|
||||
xpath = "./TEMPLATE/NIC[NIC_ID=\"#{nic['PARENT_ID']}\"]/NETWORK_ID"
|
||||
parent_id = vm.retrieve_xmlelements(xpath)[0].text
|
||||
|
||||
# Get ALIAS extended info
|
||||
lease['PARENT'] = nic['PARENT']
|
||||
lease['PARENT_NETWORK_ID'] = parent_id
|
||||
lease['EXTERNAL'] = !nic['EXTERNAL'].nil? &&
|
||||
nic['EXTERNAL'].downcase == 'yes'
|
||||
end
|
||||
|
||||
# Get extended info
|
||||
lease['NIC_NAME'] = nic['NAME']
|
||||
end
|
||||
end
|
||||
|
||||
vnet
|
||||
end
|
||||
|
||||
get '/' do
|
||||
client = authenticate(request.env, params)
|
||||
|
||||
@ -565,6 +868,70 @@ get '/service' do
|
||||
[200, response.to_json]
|
||||
end
|
||||
|
||||
get '/vrouter' do
|
||||
check_permissions(:vrouter, :show)
|
||||
client = authenticate(request.env, params)
|
||||
|
||||
source_vm = get_source_vm(request.env, client)
|
||||
vrouter_id = source_vm['TEMPLATE/VROUTER_ID']
|
||||
vrouter_hash = get_vrouter(vrouter_id, client).to_hash
|
||||
|
||||
check_vm_in_vrouter(Integer(source_vm['ID']), vrouter_hash, source_vm)
|
||||
|
||||
response = build_vrouter_hash(vrouter_hash, client, params['extended']) rescue nil
|
||||
if response.nil?
|
||||
error_msg = "Virtual router #{vrouter_id} is empty."
|
||||
logger.error {error_msg}
|
||||
halt 400, error_msg
|
||||
end
|
||||
|
||||
[200, response.to_json]
|
||||
end
|
||||
|
||||
get '/vnet/:id' do
|
||||
check_permissions(:vnet, :show_by_id)
|
||||
client = authenticate(request.env, params)
|
||||
|
||||
# Check :id is an integer
|
||||
vnet_id = begin
|
||||
Integer(params[:id])
|
||||
rescue ArgumentError
|
||||
error_msg = "Invalid id format (ID: #{params[:id]}). "\
|
||||
'ID must be an integer.'
|
||||
logger.error { error_msg }
|
||||
halt 400, error_msg
|
||||
end
|
||||
|
||||
source_vm = get_source_vm(request.env, client)
|
||||
vrouter_id = source_vm['TEMPLATE/VROUTER_ID']
|
||||
|
||||
# Check if current VM is a VROUTER
|
||||
if vrouter_id.nil? || vrouter_id.empty?
|
||||
error_msg = 'Virtual networks information can only be' \
|
||||
' retrieved from Virtual Routers.'
|
||||
logger.error {error_msg}
|
||||
halt 400, error_msg
|
||||
end
|
||||
|
||||
# Retrieve VROUTER information
|
||||
vrouter_hash = get_vrouter(vrouter_id, client).to_hash
|
||||
check_vm_in_vrouter(Integer(source_vm['ID']), vrouter_hash, source_vm)
|
||||
|
||||
# Retrieve requested VNET
|
||||
req_vnet = get_vnet(Integer(vnet_id), client)
|
||||
check_vnet_in_vrouter(req_vnet, vrouter_hash, client)
|
||||
|
||||
response = build_vnet_hash(req_vnet, client, params['extended']) rescue nil
|
||||
|
||||
if response.nil?
|
||||
error_msg = "Virtual router #{vrouter_hash['VROUTER']['ID']} is empty."
|
||||
logger.error {error_msg}
|
||||
halt 400, error_msg
|
||||
end
|
||||
|
||||
[200, response.to_json]
|
||||
end
|
||||
|
||||
get '/vms/:id' do
|
||||
check_permissions(:vm, :show_by_id)
|
||||
client = authenticate(request.env, params)
|
||||
@ -617,3 +984,11 @@ put '/vms/:id' do
|
||||
|
||||
[200, ""]
|
||||
end
|
||||
|
||||
%w[get head post put delete options patch].each do |method|
|
||||
send method, '/*' do
|
||||
error_msg = 'OneGate server doesn\'t support this feature'
|
||||
logger.error {error_msg}
|
||||
halt 400, error_msg
|
||||
end
|
||||
end
|
||||
|
@ -32,6 +32,7 @@ class OneProvisionLoopException < RuntimeError
|
||||
attr_reader :text
|
||||
|
||||
def initialize(text = nil)
|
||||
super
|
||||
@text = text
|
||||
end
|
||||
|
||||
|
@ -241,9 +241,11 @@ int RequestManager::setup_socket()
|
||||
int rc;
|
||||
int yes = 1;
|
||||
|
||||
struct addrinfo hints = {0};
|
||||
struct addrinfo hints;
|
||||
struct addrinfo * result;
|
||||
|
||||
memset(&hints, 0, sizeof hints);
|
||||
|
||||
hints.ai_family = AF_UNSPEC;
|
||||
hints.ai_socktype = SOCK_STREAM;
|
||||
hints.ai_flags = AI_PASSIVE;
|
||||
@ -488,6 +490,7 @@ void RequestManager::register_xml_methods()
|
||||
xmlrpc_c::methodPtr datastorepool_info(new DatastorePoolInfo());
|
||||
xmlrpc_c::methodPtr vm_pool_info(new VirtualMachinePoolInfo());
|
||||
xmlrpc_c::methodPtr vm_pool_info_extended(new VirtualMachinePoolInfoExtended());
|
||||
xmlrpc_c::methodPtr vm_pool_info_set(new VirtualMachinePoolInfoSet());
|
||||
xmlrpc_c::methodPtr template_pool_info(new TemplatePoolInfo());
|
||||
xmlrpc_c::methodPtr vnpool_info(new VirtualNetworkPoolInfo());
|
||||
xmlrpc_c::methodPtr vntemplate_pool_info(new VirtualNetworkTemplatePoolInfo());
|
||||
@ -615,6 +618,7 @@ void RequestManager::register_xml_methods()
|
||||
|
||||
RequestManagerRegistry.addMethod("one.vmpool.info", vm_pool_info);
|
||||
RequestManagerRegistry.addMethod("one.vmpool.infoextended", vm_pool_info_extended);
|
||||
RequestManagerRegistry.addMethod("one.vmpool.infoset", vm_pool_info_set);
|
||||
RequestManagerRegistry.addMethod("one.vmpool.accounting", vm_pool_acct);
|
||||
RequestManagerRegistry.addMethod("one.vmpool.monitoring", vm_pool_monitoring);
|
||||
RequestManagerRegistry.addMethod("one.vmpool.showback", vm_pool_showback);
|
||||
|
@ -368,6 +368,47 @@ void VirtualMachinePoolInfo::request_execute(
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
VirtualMachinePoolInfoSet::VirtualMachinePoolInfoSet()
|
||||
: RequestManagerPoolInfoFilter("one.vmpool.infoset",
|
||||
"Returns a virtual machine instances set",
|
||||
"A:ss")
|
||||
{
|
||||
Nebula& nd = Nebula::instance();
|
||||
pool = nd.get_vmpool();
|
||||
auth_object = PoolObjectSQL::VM;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
void VirtualMachinePoolInfoSet::request_execute(
|
||||
xmlrpc_c::paramList const& paramList,
|
||||
RequestAttributes& att)
|
||||
{
|
||||
std::string ids_str = xmlrpc_c::value_string(paramList.getString(1));
|
||||
extended = xmlrpc_c::value_boolean(paramList.getBoolean(2));
|
||||
|
||||
ostringstream and_filter;
|
||||
ostringstream oss;
|
||||
std::set<unsigned int> ids;
|
||||
|
||||
one_util::split_unique(ids_str, ',', ids);
|
||||
|
||||
if (ids.empty())
|
||||
{
|
||||
std::string empty_pool = "<VM_POOL></VM_POOL>";
|
||||
success_response(empty_pool, att);
|
||||
return;
|
||||
}
|
||||
|
||||
and_filter << "oid in (" << one_util::join(ids, ',') << ")";
|
||||
|
||||
dump(att, -2, -1, -1, and_filter.str(), "");
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
VirtualMachinePoolAccounting::VirtualMachinePoolAccounting()
|
||||
: RequestManagerPoolInfoFilter("one.vmpool.accounting",
|
||||
"Returns the virtual machine history records",
|
||||
@ -499,9 +540,10 @@ void VirtualMachinePoolMonitoring::request_execute(
|
||||
|
||||
string oss;
|
||||
string where;
|
||||
string and_clause = "";
|
||||
int rc;
|
||||
|
||||
int seconds = -1;
|
||||
|
||||
if ( filter_flag < GROUP )
|
||||
{
|
||||
att.resp_msg = "Incorrect filter_flag";
|
||||
@ -511,33 +553,12 @@ void VirtualMachinePoolMonitoring::request_execute(
|
||||
|
||||
if (paramList.size() > 2)
|
||||
{
|
||||
ostringstream oss;
|
||||
int s = xmlrpc_c::value_int(paramList.getInt(2));
|
||||
|
||||
switch (s)
|
||||
{
|
||||
case 0: //Get last monitor value
|
||||
oss << one_db::vm_monitor_table << ".last_poll = "
|
||||
<< "(SELECT MAX(last_poll) FROM " << one_db::vm_monitor_table
|
||||
<< " AS t WHERE t.vmid = " << one_db::vm_monitor_table << ".vmid)";
|
||||
|
||||
and_clause = oss.str();
|
||||
break;
|
||||
|
||||
case -1: //Get all monitoring
|
||||
and_clause = "";
|
||||
break;
|
||||
|
||||
default: //Get monitor in last s seconds
|
||||
oss << one_db::vm_monitor_table << ".last_poll > " << time(nullptr) - s;
|
||||
and_clause = oss.str();
|
||||
break;
|
||||
}
|
||||
seconds = xmlrpc_c::value_int(paramList.getInt(2));
|
||||
}
|
||||
|
||||
where_filter(att, filter_flag, -1, -1, and_clause, "", false, false, false, where);
|
||||
where_filter(att, filter_flag, -1, -1, "", "", false, false, false, where);
|
||||
|
||||
rc = (static_cast<VirtualMachinePool *>(pool))->dump_monitoring(oss, where);
|
||||
rc = (static_cast<VirtualMachinePool *>(pool))->dump_monitoring(oss, where, seconds);
|
||||
|
||||
if ( rc != 0 )
|
||||
{
|
||||
@ -719,41 +740,19 @@ void HostPoolMonitoring::request_execute(
|
||||
{
|
||||
string oss;
|
||||
string where;
|
||||
string and_clause = "";
|
||||
|
||||
int rc;
|
||||
|
||||
int seconds = -1;
|
||||
|
||||
if (paramList.size() > 1)
|
||||
{
|
||||
ostringstream oss;
|
||||
int s = xmlrpc_c::value_int(paramList.getInt(1));
|
||||
|
||||
switch (s)
|
||||
{
|
||||
case 0: //Get last monitor value
|
||||
oss << one_db::host_monitor_table << ".last_mon_time = "
|
||||
<< "(SELECT MAX(last_mon_time) FROM " << one_db::host_monitor_table
|
||||
<< " AS t WHERE t.hid = " << one_db::host_monitor_table << ".hid)";
|
||||
|
||||
and_clause = oss.str();
|
||||
break;
|
||||
|
||||
case -1: //Get all monitoring
|
||||
and_clause = "";
|
||||
break;
|
||||
|
||||
default: //Get monitor in last s seconds
|
||||
oss << one_db::host_monitor_table << ".last_mon_time > " << time(nullptr) - s;
|
||||
and_clause = oss.str();
|
||||
break;
|
||||
}
|
||||
|
||||
and_clause = oss.str();
|
||||
seconds = xmlrpc_c::value_int(paramList.getInt(1));
|
||||
}
|
||||
|
||||
where_filter(att, ALL, -1, -1, and_clause, "", false, false, false, where);
|
||||
where_filter(att, ALL, -1, -1, "", "", false, false, false, where);
|
||||
|
||||
rc = (static_cast<HostPool *>(pool))->dump_monitoring(oss, where);
|
||||
rc = (static_cast<HostPool *>(pool))->dump_monitoring(oss, where, seconds);
|
||||
|
||||
if ( rc != 0 )
|
||||
{
|
||||
|
@ -505,6 +505,8 @@ void VirtualMachineAction::request_execute(xmlrpc_c::paramList const& paramList,
|
||||
|
||||
VirtualMachine * vm;
|
||||
|
||||
RequestAttributes& att_aux(att);
|
||||
|
||||
// Compatibility with 4.x
|
||||
if (action_st == "shutdown-hard" || action_st == "delete" )
|
||||
{
|
||||
@ -520,12 +522,12 @@ void VirtualMachineAction::request_execute(xmlrpc_c::paramList const& paramList,
|
||||
// Update the authorization level for the action
|
||||
att.set_auth_op(action);
|
||||
|
||||
if (vm_authorization(id, 0, 0, att, 0, 0, 0) == false)
|
||||
if ((vm = get_vm_ro(id, att)) == nullptr)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if ((vm = get_vm(id, att)) == nullptr)
|
||||
if (vm_authorization(id, 0, 0, att, 0, 0, 0) == false)
|
||||
{
|
||||
return;
|
||||
}
|
||||
@ -542,35 +544,6 @@ void VirtualMachineAction::request_execute(xmlrpc_c::paramList const& paramList,
|
||||
return;
|
||||
}
|
||||
|
||||
// Generate quota information for resume action
|
||||
RequestAttributes& att_aux(att);
|
||||
|
||||
if (action == VMActions::RESUME_ACTION)
|
||||
{
|
||||
vm->get_template_attribute("MEMORY", memory);
|
||||
vm->get_template_attribute("CPU", cpu);
|
||||
|
||||
quota_tmpl.add("RUNNING_MEMORY", memory);
|
||||
quota_tmpl.add("RUNNING_CPU", cpu);
|
||||
quota_tmpl.add("RUNNING_VMS", 1);
|
||||
|
||||
quota_tmpl.add("VMS", 0);
|
||||
quota_tmpl.add("MEMORY", 0);
|
||||
quota_tmpl.add("CPU", 0);
|
||||
|
||||
att_aux.uid = vm->get_uid();
|
||||
att_aux.gid = vm->get_gid();
|
||||
}
|
||||
|
||||
vm->unlock();
|
||||
|
||||
if (action == VMActions::RESUME_ACTION && !quota_authorization("a_tmpl,
|
||||
Quotas::VIRTUALMACHINE, att_aux, att.resp_msg))
|
||||
{
|
||||
failure_response(ACTION, att);
|
||||
return;
|
||||
}
|
||||
|
||||
switch (action)
|
||||
{
|
||||
case VMActions::TERMINATE_ACTION:
|
||||
@ -592,7 +565,36 @@ void VirtualMachineAction::request_execute(xmlrpc_c::paramList const& paramList,
|
||||
rc = dm->suspend(id, att, error);
|
||||
break;
|
||||
case VMActions::RESUME_ACTION:
|
||||
// Generate quota information for resume action
|
||||
vm->get_template_attribute("MEMORY", memory);
|
||||
vm->get_template_attribute("CPU", cpu);
|
||||
|
||||
quota_tmpl.add("RUNNING_MEMORY", memory);
|
||||
quota_tmpl.add("RUNNING_CPU", cpu);
|
||||
quota_tmpl.add("RUNNING_VMS", 1);
|
||||
|
||||
quota_tmpl.add("VMS", 0);
|
||||
quota_tmpl.add("MEMORY", 0);
|
||||
quota_tmpl.add("CPU", 0);
|
||||
|
||||
att_aux.uid = vm->get_uid();
|
||||
att_aux.gid = vm->get_gid();
|
||||
|
||||
|
||||
if (!quota_authorization("a_tmpl, Quotas::VIRTUALMACHINE, att_aux, att.resp_msg))
|
||||
{
|
||||
vm->unlock();
|
||||
failure_response(ACTION, att);
|
||||
return;
|
||||
}
|
||||
|
||||
rc = dm->resume(id, att, error);
|
||||
|
||||
if (rc < 0)
|
||||
{
|
||||
quota_rollback("a_tmpl, Quotas::VIRTUALMACHINE, att_aux);
|
||||
}
|
||||
|
||||
break;
|
||||
case VMActions::REBOOT_ACTION:
|
||||
rc = dm->reboot(id, false, att, error);
|
||||
@ -623,6 +625,8 @@ void VirtualMachineAction::request_execute(xmlrpc_c::paramList const& paramList,
|
||||
break;
|
||||
}
|
||||
|
||||
vm->unlock();
|
||||
|
||||
switch (rc)
|
||||
{
|
||||
case 0:
|
||||
@ -742,6 +746,52 @@ int set_vnc_port(VirtualMachine *vm, int cluster_id, RequestAttributes& att)
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
||||
static int set_migrate_vnc_port(VirtualMachine *vm, int cluster_id, bool keep)
|
||||
{
|
||||
ClusterPool * cpool = Nebula::instance().get_clpool();
|
||||
|
||||
VectorAttribute * graphics = vm->get_template_attribute("GRAPHICS");
|
||||
|
||||
unsigned int previous_port;
|
||||
unsigned int port;
|
||||
|
||||
int rc;
|
||||
|
||||
// Do not update VM if no GRAPHICS or GRAPHICS/PORT defined
|
||||
if (graphics == nullptr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (graphics->vector_value("PORT", previous_port) != 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
//live migrations need to keep VNC port
|
||||
if (keep)
|
||||
{
|
||||
rc = cpool->set_vnc_port(cluster_id, previous_port);
|
||||
|
||||
port = previous_port;
|
||||
}
|
||||
else
|
||||
{
|
||||
rc = cpool->get_vnc_port(cluster_id, vm->get_oid(), port);
|
||||
}
|
||||
|
||||
if ( rc != 0 )
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
graphics->replace("PREVIOUS_PORT", previous_port);
|
||||
graphics->replace("PORT", port);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
@ -1063,6 +1113,7 @@ void VirtualMachineMigrate::request_execute(xmlrpc_c::paramList const& paramList
|
||||
PoolObjectAuth * auth_ds_perms;
|
||||
|
||||
int c_hid;
|
||||
int c_cluster_id;
|
||||
int c_ds_id;
|
||||
string c_tm_mad, tm_mad;
|
||||
bool c_is_public_cloud;
|
||||
@ -1282,6 +1333,7 @@ void VirtualMachineMigrate::request_execute(xmlrpc_c::paramList const& paramList
|
||||
}
|
||||
|
||||
c_is_public_cloud = host->is_public_cloud();
|
||||
c_cluster_id = host->get_cluster_id();
|
||||
|
||||
host->unlock();
|
||||
|
||||
@ -1380,7 +1432,8 @@ void VirtualMachineMigrate::request_execute(xmlrpc_c::paramList const& paramList
|
||||
{
|
||||
ostringstream oss;
|
||||
|
||||
oss << "Cannot migrate VM [" << id << "] to host [" << hid << "] and system datastore [" << ds_id << "]. Host is in cluster ["
|
||||
oss << "Cannot migrate VM [" << id << "] to host [" << hid
|
||||
<< "] and system datastore [" << ds_id << "]. Host is in cluster ["
|
||||
<< cluster_id << "], and the datastore is in cluster ["
|
||||
<< one_util::join(ds_cluster_ids, ',') << "]";
|
||||
|
||||
@ -1392,6 +1445,22 @@ void VirtualMachineMigrate::request_execute(xmlrpc_c::paramList const& paramList
|
||||
return;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Request a new VNC port in the new cluster
|
||||
// -------------------------------------------------------------------------
|
||||
if ( c_cluster_id != cluster_id )
|
||||
{
|
||||
if ( set_migrate_vnc_port(vm, cluster_id, live) == -1 )
|
||||
{
|
||||
att.resp_msg = "No free VNC port available in the new cluster";
|
||||
failure_response(ACTION, att);
|
||||
|
||||
vm->unlock();
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------
|
||||
// Add a new history record and update volatile DISK attributes
|
||||
// ------------------------------------------------------------------------
|
||||
@ -1416,7 +1485,7 @@ void VirtualMachineMigrate::request_execute(xmlrpc_c::paramList const& paramList
|
||||
// Migrate the VM
|
||||
// ------------------------------------------------------------------------
|
||||
|
||||
if (live == true && vm->get_lcm_state() == VirtualMachine::RUNNING )
|
||||
if (live && vm->get_lcm_state() == VirtualMachine::RUNNING )
|
||||
{
|
||||
dm->live_migrate(vm, att);
|
||||
}
|
||||
|
@ -41,7 +41,7 @@ public:
|
||||
* @param obj, pointer to the object to schedule
|
||||
*
|
||||
*/
|
||||
const void schedule(ObjectXML * obj)
|
||||
void schedule(ObjectXML * obj)
|
||||
{
|
||||
vector<float> priority;
|
||||
const vector<Resource *> resources = get_match_resources(obj);
|
||||
|
@ -202,6 +202,9 @@ MySqlDB::MySqlDB(const string& s, int p, const string& u, const string& _p,
|
||||
{
|
||||
connections[i] = mysql_init(NULL);
|
||||
|
||||
bool reconnect = true;
|
||||
mysql_options(connections[i], MYSQL_OPT_RECONNECT, &reconnect);
|
||||
|
||||
rc = mysql_real_connect(connections[i], server.c_str(), user.c_str(),
|
||||
password.c_str(), 0, port, NULL, 0);
|
||||
|
||||
|
@ -163,6 +163,29 @@ int PostgreSqlDB::exec_ext(std::ostringstream& cmd, Callbackable *obj, bool quie
|
||||
|
||||
PGresult* res = PQexec(conn, c_str);
|
||||
|
||||
if ( PQstatus(conn) == CONNECTION_BAD )
|
||||
{
|
||||
PQreset(conn);
|
||||
|
||||
if ( PQstatus(conn) == CONNECTION_BAD )
|
||||
{
|
||||
NebulaLog::error("ONE", "Lost connection to DB, unable to reconnect");
|
||||
|
||||
PQclear(res);
|
||||
free_db_connection(conn);
|
||||
|
||||
return SqlDB::CONNECTION;
|
||||
}
|
||||
else
|
||||
{
|
||||
NebulaLog::info("ONE", "Succesfully reconnected to DB");
|
||||
|
||||
// Re-execute the query
|
||||
PQclear(res);
|
||||
res = PQexec(conn, c_str);
|
||||
}
|
||||
}
|
||||
|
||||
if ( PQresultStatus(res) != PGRES_COMMAND_OK &&
|
||||
PQresultStatus(res) != PGRES_TUPLES_OK )
|
||||
{
|
||||
|
@ -138,8 +138,12 @@ class SunstoneServer < CloudServer
|
||||
if OpenNebula.is_error?(rc)
|
||||
return [500, rc.to_json]
|
||||
else
|
||||
resource.info
|
||||
return [201, resource.to_json]
|
||||
rc = resource.info
|
||||
if OpenNebula.is_error?(rc)
|
||||
return [201, "{\"#{kind.upcase}\": {\"ID\": \"#{resource.id}\"}}"]
|
||||
else
|
||||
return [201, resource.to_json]
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@ -217,6 +221,7 @@ class SunstoneServer < CloudServer
|
||||
return [404, resource.to_json]
|
||||
end
|
||||
rc = resource.perform_action(action_json)
|
||||
|
||||
if OpenNebula.is_error?(rc)
|
||||
return [500, rc.to_json]
|
||||
else
|
||||
|
@ -29,7 +29,7 @@ define(function(require) {
|
||||
var _html = function(host, hostShareFlag) {
|
||||
var hostShare = hostShareFlag ? host : host && host.HOST_SHARE;
|
||||
var hostMonitoring = hostShareFlag ? host : host.MONITORING && host.MONITORING.CAPACITY
|
||||
var maxCPU = parseInt(hostShare.TOTAL_CPU||0);
|
||||
var maxCPU = parseInt(hostShare.MAX_CPU||0);
|
||||
var infoStr;
|
||||
var allocatedCPUBar
|
||||
if (hostShare.CPU_USAGE) {
|
||||
|
@ -209,16 +209,19 @@ define(function(require) {
|
||||
});
|
||||
|
||||
// Custom Adapter Type
|
||||
var custom_attrs = ["vcenter_adapter_type",
|
||||
"vcenter_disk_type",
|
||||
"img_dev_prefix",
|
||||
"img_driver"];
|
||||
var custom_attrs = [
|
||||
"vcenter_adapter_type",
|
||||
"vcenter_disk_type",
|
||||
"img_dev_prefix",
|
||||
"img_driver"
|
||||
];
|
||||
|
||||
for (var i in custom_attrs){
|
||||
var field = custom_attrs[i];
|
||||
$(custom_attrs).each(function(_, field) {
|
||||
$('input[name="custom_'+field+'"]',context).parent().hide();
|
||||
$('select#'+field,context).change(function(){
|
||||
|
||||
$('select#'+field, context).on("change", function() {
|
||||
var field = $(this).attr('name');
|
||||
|
||||
if ($(this).val() == "custom"){
|
||||
$('input[name="custom_'+field+'"]',context).parent().show();
|
||||
$('input[name="custom_'+field+'"]',context).attr('required', '');
|
||||
@ -227,7 +230,7 @@ define(function(require) {
|
||||
$('input[name="custom_'+field+'"]',context).removeAttr('required');
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
$('#img_path,#img_size,#file-uploader', context).closest('.row').hide();
|
||||
|
||||
|
@ -19,30 +19,30 @@ define(function(require) {
|
||||
DEPENDENCIES
|
||||
*/
|
||||
|
||||
var BaseFormPanel = require('utils/form-panels/form-panel');
|
||||
var Sunstone = require('sunstone');
|
||||
var Locale = require('utils/locale');
|
||||
var Notifier = require('utils/notifier');
|
||||
var Tips = require('utils/tips');
|
||||
var ImagesTable = require('tabs/images-tab/datatable');
|
||||
var MarketPlacesTable = require('tabs/marketplaces-tab/datatable');
|
||||
var Config = require('sunstone-config');
|
||||
var WizardFields = require('utils/wizard-fields');
|
||||
var OpenNebula = require('opennebula');
|
||||
var BaseFormPanel = require("utils/form-panels/form-panel");
|
||||
var Sunstone = require("sunstone");
|
||||
var Locale = require("utils/locale");
|
||||
var Notifier = require("utils/notifier");
|
||||
var Tips = require("utils/tips");
|
||||
var ImagesTable = require("tabs/images-tab/datatable");
|
||||
var MarketPlacesTable = require("tabs/marketplaces-tab/datatable");
|
||||
var Config = require("sunstone-config");
|
||||
var WizardFields = require("utils/wizard-fields");
|
||||
var OpenNebula = require("opennebula");
|
||||
|
||||
/*
|
||||
TEMPLATES
|
||||
*/
|
||||
|
||||
var TemplateWizardHTML = require('hbs!./create/wizard');
|
||||
var TemplateAdvancedHTML = require('hbs!./create/advanced');
|
||||
var TemplateWizardHTML = require("hbs!./create/wizard");
|
||||
var TemplateAdvancedHTML = require("hbs!./create/advanced");
|
||||
|
||||
/*
|
||||
CONSTANTS
|
||||
*/
|
||||
|
||||
var FORM_PANEL_ID = require('./create/formPanelId');
|
||||
var TAB_ID = require('../tabId');
|
||||
var FORM_PANEL_ID = require("./create/formPanelId");
|
||||
var TAB_ID = require("../tabId");
|
||||
|
||||
/*
|
||||
CONSTRUCTOR
|
||||
@ -52,52 +52,48 @@ define(function(require) {
|
||||
this.formPanelId = FORM_PANEL_ID;
|
||||
this.tabId = TAB_ID;
|
||||
this.actions = {
|
||||
'create': {
|
||||
'title': Locale.tr("Create MarketPlace App"),
|
||||
'buttonText': Locale.tr("Create"),
|
||||
'resetButton': true
|
||||
"create": {
|
||||
"title": Locale.tr("Create MarketPlace App"),
|
||||
"buttonText": Locale.tr("Create"),
|
||||
"resetButton": true
|
||||
},
|
||||
'export': {
|
||||
'title': Locale.tr("Create MarketPlace App from Image"),
|
||||
'buttonText': Locale.tr("Create"),
|
||||
'resetButton': true
|
||||
"export": {
|
||||
"title": Locale.tr("Create MarketPlace App from Image"),
|
||||
"buttonText": Locale.tr("Create"),
|
||||
"resetButton": true
|
||||
}
|
||||
};
|
||||
|
||||
this.imagesTable = new ImagesTable(
|
||||
FORM_PANEL_ID + 'imagesTable',
|
||||
{ 'select': true,
|
||||
'selectOptions': {
|
||||
'filter_fn': function(image) {
|
||||
FORM_PANEL_ID + "imagesTable",
|
||||
{ "select": true,
|
||||
"selectOptions": {
|
||||
"filter_fn": function(image) {
|
||||
return OpenNebula.Datastore.isMarketExportSupported(image.DATASTORE_ID);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
this.marketPlacesTable = new MarketPlacesTable(
|
||||
FORM_PANEL_ID + 'marketPlacesTable',
|
||||
{ 'select': true,
|
||||
'selectOptions': {
|
||||
'filter_fn': function(market) {
|
||||
FORM_PANEL_ID + "marketPlacesTable",
|
||||
{ "select": true,
|
||||
"selectOptions": {
|
||||
"filter_fn": function(market) {
|
||||
var valid = market.ZONE_ID == config.zone_id;
|
||||
|
||||
if (valid){
|
||||
valid = $(config.oned_conf.MARKET_MAD_CONF)
|
||||
valid = $(config.oned_conf.MARKET_MAD_CONF)
|
||||
.filter(function(_, marketMad){
|
||||
return marketMad.NAME == market.MARKET_MAD && marketMad.APP_ACTIONS.indexOf('create') !== -1;
|
||||
return marketMad.NAME == market.MARKET_MAD && marketMad.APP_ACTIONS.indexOf("create") !== -1;
|
||||
}).length > 0;
|
||||
}
|
||||
|
||||
return valid;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
this.marketPlacesTableAdvanced = new MarketPlacesTable(
|
||||
FORM_PANEL_ID + 'marketPlacesTableAdvanced',
|
||||
{ 'select': true,
|
||||
'selectOptions': {
|
||||
'filter_fn': function(market) {
|
||||
FORM_PANEL_ID + "marketPlacesTableAdvanced",
|
||||
{ "select": true,
|
||||
"selectOptions": {
|
||||
"filter_fn": function(market) {
|
||||
return market.ZONE_ID == config.zone_id;
|
||||
}
|
||||
}
|
||||
@ -125,16 +121,16 @@ define(function(require) {
|
||||
|
||||
function _htmlWizard() {
|
||||
return TemplateWizardHTML({
|
||||
'formPanelId': this.formPanelId,
|
||||
'imagesTableHTML': this.imagesTable.dataTableHTML,
|
||||
'marketPlacesTableHTML': this.marketPlacesTable.dataTableHTML
|
||||
"formPanelId": this.formPanelId,
|
||||
"imagesTableHTML": this.imagesTable.dataTableHTML,
|
||||
"marketPlacesTableHTML": this.marketPlacesTable.dataTableHTML
|
||||
});
|
||||
}
|
||||
|
||||
function _htmlAdvanced() {
|
||||
return TemplateAdvancedHTML({
|
||||
'formPanelId': this.formPanelId,
|
||||
'marketPlacesTableAdvancedHTML': this.marketPlacesTableAdvanced.dataTableHTML
|
||||
"formPanelId": this.formPanelId,
|
||||
"marketPlacesTableAdvancedHTML": this.marketPlacesTableAdvanced.dataTableHTML
|
||||
});
|
||||
}
|
||||
|
||||
@ -151,8 +147,8 @@ define(function(require) {
|
||||
function _setImageId(imageId) {
|
||||
var selectedResources = {
|
||||
ids : imageId
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
this.imagesTable.selectResourceTableSelect(selectedResources);
|
||||
}
|
||||
|
||||
@ -165,11 +161,11 @@ define(function(require) {
|
||||
this.marketPlacesTableAdvanced.initialize();
|
||||
|
||||
this.imagesTable.idInput().
|
||||
attr('required', '').
|
||||
attr('wizard_field', 'ORIGIN_ID');
|
||||
attr("required", "").
|
||||
attr("wizard_field", "ORIGIN_ID");
|
||||
|
||||
this.marketPlacesTable.idInput().attr('required', '');
|
||||
this.marketPlacesTableAdvanced.idInput().attr('required', '');
|
||||
this.marketPlacesTable.idInput().attr("required", "");
|
||||
this.marketPlacesTableAdvanced.idInput().attr("required", "");
|
||||
}
|
||||
|
||||
|
||||
@ -187,7 +183,7 @@ define(function(require) {
|
||||
}
|
||||
|
||||
function _submitAdvanced(context) {
|
||||
var template = $('#template', context).val();
|
||||
var template = $("#template", context).val();
|
||||
var marketPlaceAppObj = {
|
||||
"marketplaceapp" : {
|
||||
"marketplaceapp_raw" : template
|
||||
|
@ -254,38 +254,52 @@ define(function(require) {
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
$.each(that.service_template_json.DOCUMENT.TEMPLATE.BODY.roles, function(index, role){
|
||||
var temp_role = role;
|
||||
var temp_role = {};
|
||||
$.extend( temp_role, role);
|
||||
|
||||
var div_id = "user_input_role_"+index;
|
||||
var tmp_json = {};
|
||||
var stringCustomValues = TemplateUtils.templateToString(extra_info.merge_template.custom_attrs_values);
|
||||
|
||||
$.extend( tmp_json, WizardFields.retrieve($("#"+div_id, context)) );
|
||||
role.user_inputs_values = tmp_json;
|
||||
temp_role.user_inputs_values = tmp_json;
|
||||
|
||||
var stringCustomValues = TemplateUtils.templateToString(extra_info.merge_template.custom_attrs_values);
|
||||
if (stringCustomValues) {
|
||||
(temp_role.vm_template_contents)
|
||||
? temp_role.vm_template_contents += stringCustomValues
|
||||
: temp_role.vm_template_contents = stringCustomValues;
|
||||
}
|
||||
$("#instantiate_service_role_user_inputs").find("select").each(function(key, vm_group){
|
||||
|
||||
$("#instantiate_service_role_user_inputs").find("select").each(function(_, vm_group){
|
||||
var element = $(vm_group);
|
||||
rolevm_group = element.attr("data-role");
|
||||
vm_group_value = element.children("option:selected").val();
|
||||
|
||||
if(rolevm_group && role.name && rolevm_group === role.name && vm_group_value){
|
||||
if(temp_role.vm_template_contents === undefined){
|
||||
temp_role.vm_template_contents = "";
|
||||
}
|
||||
temp_role.vm_template_contents += TemplateUtils.templateToString({VMGROUP:{ROLE:role.name,VMGROUP_ID:vm_group_value}});
|
||||
|
||||
temp_role.vm_template_contents += TemplateUtils.templateToString({
|
||||
VMGROUP:{
|
||||
ROLE: role.name,
|
||||
VMGROUP_ID: vm_group_value
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
if(charters && charters.length){
|
||||
if(temp_role.vm_template_contents !== undefined){
|
||||
temp_role.vm_template_contents += charters;
|
||||
}else{
|
||||
temp_role.vm_template_contents = charters;
|
||||
}
|
||||
(temp_role.vm_template_contents !== undefined)
|
||||
? temp_role.vm_template_contents += charters
|
||||
: temp_role.vm_template_contents = charters;
|
||||
}
|
||||
|
||||
extra_info.merge_template.roles.push(temp_role);
|
||||
});
|
||||
|
||||
charters = "";
|
||||
}
|
||||
if (!service_name.length){ //empty name
|
||||
|
@ -22,7 +22,7 @@ define(function(require) {
|
||||
var Config = require("sunstone-config");
|
||||
var Locale = require("utils/locale");
|
||||
var Tips = require("utils/tips");
|
||||
var ImageTable = require("tabs/images-tab/datatable")
|
||||
var ImageTable = require("tabs/images-tab/datatable");
|
||||
var WizardFields = require("utils/wizard-fields");
|
||||
var UniqueId = require("utils/unique-id");
|
||||
var TemplateUtils = require("utils/template-utils");
|
||||
@ -82,6 +82,7 @@ define(function(require) {
|
||||
"select_callback": function(aData, options) {
|
||||
// If the image is selected by Id, avoid overwriting it with name+uname
|
||||
if ($("#IMAGE_ID", context).val() != aData[options.id_index]) {
|
||||
$("input[wizard_field]", context).val("");
|
||||
$("#IMAGE_ID", context).val("");
|
||||
$("#IMAGE", context).val(aData[options.name_index]);
|
||||
$("#IMAGE_UNAME", context).val(aData[options.uname_index]);
|
||||
@ -90,7 +91,7 @@ define(function(require) {
|
||||
}
|
||||
}
|
||||
});
|
||||
$("table#"+this.imageTable.dataTableId).css("table-layout", "fixed")
|
||||
$("table#"+this.imageTable.dataTableId).css("table-layout", "fixed");
|
||||
that.imageTable.refreshResourceTableSelect();
|
||||
|
||||
// Select Image or Volatile disk. The div is hidden depending on the selection, and the
|
||||
@ -209,7 +210,7 @@ define(function(require) {
|
||||
tmpl.SIZE = tmpl.SIZE * 1048576;
|
||||
tmpl.SIZE = tmpl.SIZE.toString();
|
||||
}
|
||||
|
||||
|
||||
var formatKvm = $("#FORMAT_KVM", context).val();
|
||||
var formatVcenter = $("#FORMAT_VCENTER", context).val();
|
||||
|
||||
@ -247,7 +248,7 @@ define(function(require) {
|
||||
if (templateJSON.IMAGE_ID != undefined) {
|
||||
var selectedResources = {
|
||||
ids : templateJSON.IMAGE_ID
|
||||
}
|
||||
};
|
||||
|
||||
this.imageTable.selectResourceTableSelect(selectedResources);
|
||||
|
||||
@ -257,7 +258,7 @@ define(function(require) {
|
||||
name: templateJSON.IMAGE,
|
||||
uname: templateJSON.IMAGE_UNAME
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
this.imageTable.selectResourceTableSelect(selectedResources);
|
||||
}
|
||||
|
@ -19,19 +19,21 @@ define(function(require) {
|
||||
DEPENDENCIES
|
||||
*/
|
||||
|
||||
var Locale = require('utils/locale');
|
||||
var Utils = require('../utils/common');
|
||||
var ResourcesTab = require('../utils/resources-tab');
|
||||
var OpenNebulaZone = require('opennebula/zone');
|
||||
var Locale = require("utils/locale");
|
||||
var Utils = require("../utils/common");
|
||||
var ResourcesTab = require("../utils/resources-tab");
|
||||
var OpenNebulaZone = require("opennebula/zone");
|
||||
var OpenNebula = require("opennebula");
|
||||
|
||||
/*
|
||||
CONSTANTS
|
||||
*/
|
||||
|
||||
var TAB_ID = require('../tabId');
|
||||
var PANEL_ID = require('./resources/panelId');
|
||||
var TAB_ID = require("../tabId");
|
||||
var PANEL_ID = require("./resources/panelId");
|
||||
var RESOURCE = "Vdc";
|
||||
var XML_ROOT = "VDC";
|
||||
var ZONES = [];
|
||||
|
||||
/*
|
||||
CONSTRUCTOR
|
||||
@ -50,6 +52,7 @@ define(function(require) {
|
||||
Panel.PANEL_ID = PANEL_ID;
|
||||
Panel.prototype.html = _html;
|
||||
Panel.prototype.setup = _setup;
|
||||
Panel.prototype.onShow = _onShow;
|
||||
|
||||
return Panel;
|
||||
|
||||
@ -61,9 +64,28 @@ define(function(require) {
|
||||
return this.resourcesTab.html();
|
||||
}
|
||||
|
||||
function _onShow(context){
|
||||
var that = this;
|
||||
var renderZones = "";
|
||||
$("select.vdc_zones_select", context).empty();
|
||||
if(ZONES && ZONES.length){
|
||||
ZONES.map(function(zone){
|
||||
if(zone.ZONE){
|
||||
renderZones += "<option value=\""+zone.ZONE.ID+"\">"+zone.ZONE.NAME+"</option>";
|
||||
}
|
||||
});
|
||||
$("select.vdc_zones_select", context).append(renderZones);
|
||||
}
|
||||
}
|
||||
|
||||
function _setup(context) {
|
||||
var that = this;
|
||||
var indexed_resources = Utils.indexedVdcResources(this.element);
|
||||
var indexed_resources = Utils.indexedVdcResources(that.element);
|
||||
if(indexed_resources && !indexed_resources.ZONE){
|
||||
OpenNebula.Zone.list({success:function(request, obj_list){
|
||||
ZONES = obj_list;
|
||||
}});
|
||||
}
|
||||
$.each(indexed_resources, function(zone_id,objects){
|
||||
that.resourcesTab.addResourcesZone(
|
||||
zone_id,
|
||||
@ -71,7 +93,6 @@ define(function(require) {
|
||||
context,
|
||||
indexed_resources);
|
||||
});
|
||||
|
||||
that.resourcesTab.setup(context);
|
||||
that.resourcesTab.onShow(context);
|
||||
}
|
||||
|
@ -19,21 +19,21 @@ define(function(require) {
|
||||
DEPENDENCIES
|
||||
*/
|
||||
|
||||
var OpennebulaVM = require('opennebula/vm');
|
||||
var BaseDialog = require('utils/dialogs/dialog');
|
||||
var TemplateHTML = require('hbs!./attach-nic/html');
|
||||
var Sunstone = require('sunstone');
|
||||
var Notifier = require('utils/notifier');
|
||||
var Tips = require('utils/tips');
|
||||
var NicTab = require('tabs/templates-tab/form-panels/create/wizard-tabs/network/nic-tab');
|
||||
var WizardFields = require('utils/wizard-fields');
|
||||
var OpennebulaVM = require("opennebula/vm");
|
||||
var BaseDialog = require("utils/dialogs/dialog");
|
||||
var TemplateHTML = require("hbs!./attach-nic/html");
|
||||
var Sunstone = require("sunstone");
|
||||
var Notifier = require("utils/notifier");
|
||||
var Tips = require("utils/tips");
|
||||
var NicTab = require("tabs/templates-tab/form-panels/create/wizard-tabs/network/nic-tab");
|
||||
var WizardFields = require("utils/wizard-fields");
|
||||
|
||||
/*
|
||||
CONSTANTS
|
||||
*/
|
||||
|
||||
var DIALOG_ID = require('./attach-nic/dialogId');
|
||||
var TAB_ID = require('../tabId')
|
||||
var DIALOG_ID = require("./attach-nic/dialogId");
|
||||
var TAB_ID = require("../tabId");
|
||||
|
||||
/*
|
||||
CONSTRUCTOR
|
||||
@ -42,7 +42,7 @@ define(function(require) {
|
||||
function Dialog() {
|
||||
this.dialogId = DIALOG_ID;
|
||||
|
||||
this.nicTab = new NicTab(DIALOG_ID + 'NickTab');
|
||||
this.nicTab = new NicTab(DIALOG_ID + "NickTab");
|
||||
|
||||
BaseDialog.call(this);
|
||||
};
|
||||
@ -64,8 +64,8 @@ define(function(require) {
|
||||
|
||||
function _html() {
|
||||
return TemplateHTML({
|
||||
'dialogId': this.dialogId,
|
||||
'nicTabHTML': this.nicTab.html()
|
||||
"dialogId": this.dialogId,
|
||||
"nicTabHTML": this.nicTab.html()
|
||||
});
|
||||
}
|
||||
|
||||
@ -84,8 +84,9 @@ define(function(require) {
|
||||
$("#parent", context).toggle(this.checked);
|
||||
});
|
||||
|
||||
$('#' + DIALOG_ID + 'Form', context).submit(function() {
|
||||
$("#" + DIALOG_ID + "Form", context).submit(function() {
|
||||
var templateJSON = that.nicTab.retrieve(context);
|
||||
var selectedNetwork = Object.keys(templateJSON).length > 0 && templateJSON.constructor === Object;
|
||||
|
||||
if($("#cb_attach_rdp", context).prop("checked")) {
|
||||
templateJSON.RDP = "YES";
|
||||
@ -96,17 +97,21 @@ define(function(require) {
|
||||
|
||||
var obj = {
|
||||
"NIC_ALIAS": templateJSON
|
||||
}
|
||||
};
|
||||
} else {
|
||||
var obj = {
|
||||
"NIC": templateJSON
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
Sunstone.runAction('VM.attachnic', that.element.ID, obj);
|
||||
if(selectedNetwork){
|
||||
Sunstone.runAction("VM.attachnic", that.element.ID, obj);
|
||||
Sunstone.getDialog(DIALOG_ID).hide();
|
||||
Sunstone.getDialog(DIALOG_ID).reset();
|
||||
}else{
|
||||
Notifier.notifyError("Select a network");
|
||||
}
|
||||
|
||||
Sunstone.getDialog(DIALOG_ID).hide();
|
||||
Sunstone.getDialog(DIALOG_ID).reset();
|
||||
return false;
|
||||
});
|
||||
|
||||
@ -117,9 +122,9 @@ define(function(require) {
|
||||
this.setNames( {tabId: TAB_ID} );
|
||||
|
||||
this.nicTab.onShow(context);
|
||||
$('#cb_attach_alias').prop('checked', false).change();
|
||||
|
||||
var showRdp = false, template = this.element.TEMPLATE
|
||||
$("#cb_attach_alias").prop("checked", false).change();
|
||||
|
||||
var showRdp = false, template = this.element.TEMPLATE;
|
||||
if (template.NIC) {
|
||||
showRdp = OpennebulaVM.hasRDP(template.NIC);
|
||||
|
||||
@ -133,7 +138,7 @@ define(function(require) {
|
||||
}
|
||||
|
||||
function _setElement(element) {
|
||||
this.element = element
|
||||
this.element = element;
|
||||
}
|
||||
|
||||
function _setNicsNames(nicsNames) {
|
||||
|
@ -348,18 +348,8 @@ define(function(require) {
|
||||
|
||||
if (Config.isTabActionEnabled("vms-tab", "VM.disk_resize")) {
|
||||
if (validateState(that,"VM.disk_resize") && !disk.CONTEXT) {
|
||||
if(Array.isArray(that.element.HISTORY_RECORDS.HISTORY)){
|
||||
var historyLenght = that.element.HISTORY_RECORDS.HISTORY.length - 1;
|
||||
if(that.element.LCM_STATE != "3" || that.element.HISTORY_RECORDS.HISTORY[historyLenght].VM_MAD != "vcenter"){
|
||||
actions += ('<a class="disk_resize nowrap" >\
|
||||
<i class="fas fa-expand-arrows-alt fa-fw" title="Resize"></i></a>');
|
||||
}
|
||||
} else {
|
||||
if(that.element.LCM_STATE != "3" || that.element.HISTORY_RECORDS.HISTORY.VM_MAD != "vcenter"){
|
||||
actions += ('<a class="disk_resize nowrap" >\
|
||||
<i class="fas fa-expand-arrows-alt fa-fw" title="Resize"></i></a>');
|
||||
}
|
||||
}
|
||||
actions += ('<a class="disk_resize nowrap" >\
|
||||
<i class="fas fa-expand-arrows-alt fa-fw" title="Resize"></i></a>');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -109,7 +109,8 @@ define(function(require) {
|
||||
"min-width":"8rem",
|
||||
"font-family": '"Lato","Helvetica Neue",Helvetica,Roboto,Arial,sans-serif',
|
||||
"font-weight": "100",
|
||||
"color":"#000"
|
||||
"color":"#000",
|
||||
"font-weight": "bold"
|
||||
};
|
||||
$(".describeCharter").off("mouseenter").on("mouseenter",function(e){
|
||||
$(this).find(".charterInfo").remove();
|
||||
@ -121,7 +122,7 @@ define(function(require) {
|
||||
if(typeof date === "number"){
|
||||
$(this).append(
|
||||
$("<div/>",{"class":classInfo}).css(styleTips).append(
|
||||
$("<b/>").css({"display":"inline"}).text(action).add(
|
||||
$("<div/>").css({"display":"inline"}).text(action).add(
|
||||
$("<div/>").css({"display":"inline"}).text(
|
||||
" "+Locale.tr("will run in")+" "+ScheduleActions.parseTime(date)
|
||||
)
|
||||
|
@ -184,6 +184,20 @@ define(function(require) {
|
||||
|
||||
if ( !nic["NETWORK_MODE"] || ( nic["NETWORK_MODE"] && nic["NETWORK_MODE"] !== "auto" ) )
|
||||
{
|
||||
var ip4 = $("input.manual_ip4", $(this)).val();
|
||||
if (ip4 != undefined){
|
||||
delete nic["IP"];
|
||||
if (ip4 != ""){
|
||||
nic["IP"] = ip4;
|
||||
}
|
||||
}
|
||||
var ip6 = $("input.manual_ip6", $(this)).val();
|
||||
if (ip6 != undefined){
|
||||
delete nic["IP6"];
|
||||
if (ip6 != ""){
|
||||
nic["IP6"] = ip6;
|
||||
}
|
||||
}
|
||||
var val = $(this).data("vnetsTable").retrieveResourceTableSelect();
|
||||
var tempNetwork = nic["NETWORK"];
|
||||
var preserveNetwork = false;
|
||||
@ -217,20 +231,6 @@ define(function(require) {
|
||||
if ($("input.floating_ip", $(this)).prop("checked")){
|
||||
nic["FLOATING_IP"] = "YES";
|
||||
}
|
||||
var ip4 = $("input.manual_ip4", $(this)).val();
|
||||
if (ip4 != undefined){
|
||||
delete nic["IP"];
|
||||
if (ip4 != ""){
|
||||
nic["IP"] = ip4;
|
||||
}
|
||||
}
|
||||
var ip6 = $("input.manual_ip6", $(this)).val();
|
||||
if (ip6 != undefined){
|
||||
delete nic["IP6"];
|
||||
if (ip6 != ""){
|
||||
nic["IP6"] = ip6;
|
||||
}
|
||||
}
|
||||
delete nic["VROUTER_MANAGEMENT"];
|
||||
if ($("input.management", $(this)).prop("checked")){
|
||||
nic["VROUTER_MANAGEMENT"] = "YES";
|
||||
|
@ -22,7 +22,10 @@ define(function(require) {
|
||||
};
|
||||
|
||||
function _getExtraInfo(context) {
|
||||
var custom_attrs_json = WizardFields.retrieve($(".custom_attr_class", context));
|
||||
var custom_attrs_json = WizardFields.retrieve(
|
||||
$("#instantiate_service_user_inputs .custom_attr_class", context)
|
||||
);
|
||||
|
||||
var networks_json = WizardFields.retrieve($(".network_attrs_class", context));
|
||||
var typePrefix = "type_";
|
||||
|
||||
|
@ -16,7 +16,7 @@
|
||||
|
||||
{{#if searchDropdownHTML}}
|
||||
<div id="{{dataTableSearchId}}-wrapper" class="input-group">
|
||||
<input class="input-group-field" id="{{dataTableSearchId}}" name="{{dataTableSearchId}}" type="search" placeholder="{{tr "Search"}}" />
|
||||
<input class="input-group-field" id="{{dataTableSearchId}}" name="{{dataTableSearchId}}" autocomplete="off" type="search" placeholder="{{tr "Search"}}" />
|
||||
<div class="input-group-button">
|
||||
<i class="fas fa-filter search-button" data-toggle="{{dataTableSearchId}}-dropdown"></i>
|
||||
</div>
|
||||
|
@ -362,8 +362,7 @@ helpers do
|
||||
logger.info { 'Unauthorized login attempt' }
|
||||
return [401, '']
|
||||
end
|
||||
|
||||
client = $cloud_auth.client(result)
|
||||
client = $cloud_auth.client(result, $conf[:one_xmlrpc])
|
||||
user_id = OpenNebula::User::SELF
|
||||
|
||||
user = OpenNebula::User.new_with_id(user_id, client)
|
||||
@ -381,7 +380,6 @@ helpers do
|
||||
if !two_factor_auth_token || two_factor_auth_token == ""
|
||||
return [202, { code: "two_factor_auth", uid: user.id }.to_json]
|
||||
end
|
||||
serverResponse =
|
||||
isTwoFactorAuthSuccessful = false
|
||||
if isHOTPConfigured && Sunstone2FAuth.authenticate(user[TWO_FACTOR_AUTH_SECRET_XPATH], two_factor_auth_token)
|
||||
isTwoFactorAuthSuccessful = true
|
||||
|
@ -3657,3 +3657,50 @@ void VirtualMachine::get_quota_template(VirtualMachineTemplate& quota_tmpl,
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
void VirtualMachine::release_previous_vnc_port()
|
||||
{
|
||||
ClusterPool * cpool = Nebula::instance().get_clpool();
|
||||
|
||||
VectorAttribute * graphics = get_template_attribute("GRAPHICS");
|
||||
|
||||
unsigned int previous_port;
|
||||
|
||||
if (graphics == nullptr ||
|
||||
graphics->vector_value("PREVIOUS_PORT", previous_port) != 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
cpool->release_vnc_port(previous_history->cid, previous_port);
|
||||
|
||||
graphics->remove("PREVIOUS_PORT");
|
||||
};
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
void VirtualMachine::rollback_previous_vnc_port()
|
||||
{
|
||||
ClusterPool * cpool = Nebula::instance().get_clpool();
|
||||
|
||||
VectorAttribute * graphics = get_template_attribute("GRAPHICS");
|
||||
|
||||
unsigned int previous_port;
|
||||
unsigned int port;
|
||||
|
||||
if (graphics == nullptr ||
|
||||
graphics->vector_value("PREVIOUS_PORT", previous_port) != 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if ( graphics->vector_value("PORT", port) == 0 )
|
||||
{
|
||||
cpool->release_vnc_port(history->cid, port);
|
||||
}
|
||||
|
||||
graphics->replace("PORT", previous_port);
|
||||
|
||||
graphics->remove("PREVIOUS_PORT");
|
||||
};
|
||||
|
||||
>>>>>>> one-5.12-new
|
||||
|
@ -413,10 +413,11 @@ int VirtualMachine::parse_graphics(string& error_str, Template * tmpl)
|
||||
}
|
||||
|
||||
string random_passwd = graphics->vector_value("RANDOM_PASSWD");
|
||||
string password = graphics->vector_value("PASSWD");
|
||||
|
||||
if ( !random_passwd.empty() )
|
||||
if ( !random_passwd.empty() && password.empty() )
|
||||
{
|
||||
string password = one_util::random_password();
|
||||
password = one_util::random_password();
|
||||
|
||||
if ( graphics->vector_value("TYPE") == "SPICE" )
|
||||
{
|
||||
|
@ -334,21 +334,72 @@ int VirtualMachinePool::dump_showback(string& oss,
|
||||
|
||||
int VirtualMachinePool::dump_monitoring(
|
||||
string& oss,
|
||||
const string& where)
|
||||
const string& where,
|
||||
const int seconds)
|
||||
{
|
||||
ostringstream cmd;
|
||||
|
||||
cmd << "SELECT " << one_db::vm_monitor_table << ".body FROM "
|
||||
<< one_db::vm_monitor_table
|
||||
<< " INNER JOIN " << one_db::vm_table
|
||||
<< " WHERE vmid = oid";
|
||||
|
||||
if ( !where.empty() )
|
||||
switch (seconds)
|
||||
{
|
||||
cmd << " AND " << where;
|
||||
}
|
||||
case 0: //Get last monitor value
|
||||
/*
|
||||
* SELECT vm_monitoring.body
|
||||
* FROM vm_monitoring
|
||||
* INNER JOIN (
|
||||
* SELECT vmid, MAX(last_poll) as last_poll
|
||||
* FROM vm_monitoring
|
||||
* GROUP BY vmid
|
||||
* ) lpt on lpt.vmid = vm_monitoring.vmid AND lpt.last_poll = vm_monitoring.last_poll
|
||||
* INNER JOIN vm_pool ON vm_monitoring.vmid = oid
|
||||
* ORDER BY oid;
|
||||
*/
|
||||
cmd << "SELECT " << one_db::vm_monitor_table << ".body "
|
||||
<< "FROM " << one_db::vm_monitor_table << " INNER JOIN ("
|
||||
<< "SELECT vmid, MAX(last_poll) as last_poll FROM "
|
||||
<< one_db::vm_monitor_table << " GROUP BY vmid) as lpt "
|
||||
<< "ON lpt.vmid = " << one_db::vm_monitor_table << ".vmid "
|
||||
<< "AND lpt.last_poll = " << one_db::vm_monitor_table
|
||||
<< ".last_poll INNER JOIN " << one_db::vm_table
|
||||
<< " ON " << one_db::vm_monitor_table << ".vmid = oid";
|
||||
|
||||
cmd << " ORDER BY vmid, " << one_db::vm_monitor_table << ".last_poll;";
|
||||
if ( !where.empty() )
|
||||
{
|
||||
cmd << " WHERE " << where;
|
||||
}
|
||||
|
||||
cmd << " ORDER BY oid";
|
||||
|
||||
break;
|
||||
|
||||
case -1: //Get all monitoring
|
||||
cmd << "SELECT " << one_db::vm_monitor_table << ".body FROM "
|
||||
<< one_db::vm_monitor_table << " INNER JOIN " << one_db::vm_table
|
||||
<< " ON vmid = oid";
|
||||
|
||||
if ( !where.empty() )
|
||||
{
|
||||
cmd << " WHERE " << where;
|
||||
}
|
||||
|
||||
cmd << " ORDER BY vmid, " << one_db::vm_monitor_table << ".last_poll;";
|
||||
|
||||
break;
|
||||
|
||||
default: //Get monitor in last s seconds
|
||||
cmd << "SELECT " << one_db::vm_monitor_table << ".body FROM "
|
||||
<< one_db::vm_monitor_table << " INNER JOIN " << one_db::vm_table
|
||||
<< " ON vmid = oid WHERE " << one_db::vm_monitor_table
|
||||
<< ".last_poll > " << time(nullptr) - seconds;
|
||||
|
||||
if ( !where.empty() )
|
||||
{
|
||||
cmd << " ANS " << where;
|
||||
}
|
||||
|
||||
cmd << " ORDER BY vmid, " << one_db::vm_monitor_table << ".last_poll;";
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
return PoolSQL::dump(oss, "MONITORING_DATA", cmd);
|
||||
}
|
||||
@ -966,7 +1017,7 @@ void VirtualMachinePool::delete_attach_disk(int vid)
|
||||
void VirtualMachinePool::delete_attach_nic(int vid)
|
||||
{
|
||||
VirtualMachine * vm;
|
||||
VirtualMachineNic * nic;
|
||||
VirtualMachineNic * nic, * p_nic;
|
||||
|
||||
int uid;
|
||||
int gid;
|
||||
@ -1010,6 +1061,18 @@ void VirtualMachinePool::delete_attach_nic(int vid)
|
||||
nic->vector_value("ALIAS_ID", alias_id);
|
||||
|
||||
vm->clear_nic_alias_context(parent_id, alias_id);
|
||||
|
||||
p_nic = vm->get_nic(parent_id);
|
||||
|
||||
// As NIC is an alias, parent ALIAS_IDS array should be updated
|
||||
// to remove the alias_id
|
||||
std::set<int> p_a_ids;
|
||||
|
||||
one_util::split_unique(p_nic->vector_value("ALIAS_IDS"), ',', p_a_ids);
|
||||
|
||||
p_a_ids.erase(nic_id);
|
||||
|
||||
p_nic->replace("ALIAS_IDS", one_util::join(p_a_ids, ','));
|
||||
}
|
||||
|
||||
uid = vm->get_uid();
|
||||
|
@ -30,13 +30,12 @@ DATA=`virsh --connect $LIBVIRT_URI create $DEP_FILE`
|
||||
if [ "x$?" = "x0" ]; then
|
||||
|
||||
DOMAIN_ID=$(echo $DATA | sed 's/Domain //' | sed 's/ created from .*$//')
|
||||
echo $DOMAIN_ID
|
||||
UUID=$(virsh --connect $LIBVIRT_URI dominfo $DOMAIN_ID | grep UUID: | awk '{print $2}')
|
||||
echo $UUID
|
||||
|
||||
# redefine potential snapshots
|
||||
for SNAPSHOT_MD_XML in $(ls ${DEP_FILE_LOCATION}/snap-*.xml 2>/dev/null); do
|
||||
|
||||
# query UUID, but only once
|
||||
UUID=${UUID:-$(virsh --connect $LIBVIRT_URI dominfo $DOMAIN_ID | grep UUID: | awk '{print $2}')}
|
||||
|
||||
# replace uuid in the snapshot metadata xml
|
||||
sed -i "s%<uuid>[[:alnum:]-]*</uuid>%<uuid>$UUID</uuid>%" $SNAPSHOT_MD_XML
|
||||
|
@ -39,6 +39,7 @@ class FirecrackerConfiguration < Hash
|
||||
FIRECRACKERRC = '../../etc/vmm/firecracker/firecrackerrc'
|
||||
|
||||
def initialize
|
||||
super
|
||||
replace(DEFAULT_CONFIGURATION)
|
||||
|
||||
begin
|
||||
|
@ -54,11 +54,6 @@ module NSXDriver
|
||||
# ATTRIBUTES
|
||||
attr_reader :one_section_name
|
||||
|
||||
# CONSTRUCTOR
|
||||
def initialize(nsx_client)
|
||||
super(nsx_client)
|
||||
end
|
||||
|
||||
def self.new_child(nsx_client)
|
||||
case nsx_client
|
||||
when NSXTClient
|
||||
|
@ -22,10 +22,6 @@ module NSXDriver
|
||||
|
||||
# CONSTRUCTOR
|
||||
|
||||
def initialize(nsx_client)
|
||||
super(nsx_client)
|
||||
end
|
||||
|
||||
def self.new_child(nsx_client, id = nil)
|
||||
case nsx_client.nsx_type.upcase
|
||||
when NSXConstants::NSXT
|
||||
|
@ -25,12 +25,6 @@ module NSXDriver
|
||||
attr_reader :display_name
|
||||
attr_reader :description
|
||||
|
||||
# CONSTRUCTOR
|
||||
|
||||
def initialize(nsx_client)
|
||||
super(nsx_client)
|
||||
end
|
||||
|
||||
def ls?; end
|
||||
|
||||
# Get logical switch's name
|
||||
|
@ -21,11 +21,6 @@ module NSXDriver
|
||||
# ATTRIBUTES
|
||||
attr_reader :tz_id
|
||||
|
||||
# CONSTRUCTOR
|
||||
def initialize(nsx_client)
|
||||
super(nsx_client)
|
||||
end
|
||||
|
||||
def self.new_child(nsx_client)
|
||||
case nsx_client.nsx_type.upcase
|
||||
when NSXConstants::NSXT
|
||||
|
@ -372,12 +372,18 @@ class DatacenterFolder
|
||||
end
|
||||
|
||||
# Determine if a network must be excluded from the list
|
||||
def exclude_network?(vc_network, one_host, args)
|
||||
def exclude_network?(vc_network, one_host, args, vc_network_hash)
|
||||
|
||||
vc_network_ref = vc_network_hash[:vc_network_ref]
|
||||
vc_network_name = vc_network_hash[:vc_network_name]
|
||||
vc_network_host = vc_network_hash[:vc_network_host]
|
||||
vc_network_tag = vc_network_hash[:vc_network_tag]
|
||||
|
||||
# Exclude some networks if filter = true
|
||||
if args[:filter]
|
||||
if ( one_host && one_host['TEMPLATE/NSX_PASSWORD'].nil?)
|
||||
# Only NSX-V and NSX-T can be excluded
|
||||
network_type = VCenterDriver::Network.get_network_type(vc_network)
|
||||
network_type = VCenterDriver::Network.get_network_type(vc_network, vc_network_name)
|
||||
if network_type == VCenterDriver::Network::NETWORK_TYPE_NSXT ||\
|
||||
network_type == VCenterDriver::Network::NETWORK_TYPE_NSXV
|
||||
|
||||
@ -385,17 +391,17 @@ class DatacenterFolder
|
||||
end
|
||||
end
|
||||
# Exclude networks without hosts
|
||||
if vc_network['host'].empty?
|
||||
if vc_network_host.empty?
|
||||
return true
|
||||
end
|
||||
# Exclude DVS uplinks
|
||||
unless vc_network['tag'].empty?
|
||||
if vc_network['tag'][0][:key] == 'SYSTEM/DVS.UPLINKPG'
|
||||
unless vc_network_tag.empty?
|
||||
if vc_network_tag[0][:key] == 'SYSTEM/DVS.UPLINKPG'
|
||||
return true
|
||||
end
|
||||
end
|
||||
# Exclude portgroup used for VXLAN communication in NSX
|
||||
if vc_network['name'].match(/^vxw-vmknicPg-dvs-(.*)/)
|
||||
if vc_network_name.match(/^vxw-vmknicPg-dvs-(.*)/)
|
||||
return true
|
||||
end
|
||||
return false
|
||||
@ -411,67 +417,85 @@ class DatacenterFolder
|
||||
one_host,
|
||||
args)
|
||||
|
||||
full_process = !args[:short]
|
||||
|
||||
vc_network_ref = vc_network._ref
|
||||
vc_network_name = vc_network.name
|
||||
vc_network_host = vc_network['host']
|
||||
vc_network_tag = vc_network['tag']
|
||||
|
||||
vc_network_hash = {}
|
||||
vc_network_hash[:vc_network_ref] = vc_network_ref
|
||||
vc_network_hash[:vc_network_name] = vc_network_name
|
||||
vc_network_hash[:vc_network_host] = vc_network_host
|
||||
vc_network_hash[:vc_network_tag] = vc_network_tag
|
||||
|
||||
# Initialize network hash
|
||||
network = {}
|
||||
# Add name to network hash
|
||||
network[vc_network._ref] = {'name' => vc_network.name}
|
||||
network[vc_network_ref] = {'name' => vc_network_name}
|
||||
# By default no network is excluded
|
||||
network[vc_network._ref][:excluded] = false
|
||||
network[vc_network_ref][:excluded] = false
|
||||
|
||||
# Initialize opts hash used to inject data into one template
|
||||
opts = {}
|
||||
|
||||
# Add network type to network hash
|
||||
network_type = VCenterDriver::Network.get_network_type(vc_network)
|
||||
network[vc_network._ref][:network_type] = network_type
|
||||
if full_process
|
||||
# Add network type to network hash
|
||||
network_type = VCenterDriver::Network.get_network_type(vc_network, vc_network_name)
|
||||
network[vc_network_ref][:network_type] = network_type
|
||||
end
|
||||
|
||||
# Determine if the network must be excluded
|
||||
network[vc_network._ref][:excluded] = exclude_network?(vc_network,
|
||||
network[vc_network_ref][:excluded] = exclude_network?(vc_network,
|
||||
one_host,
|
||||
args)
|
||||
return nil if network[vc_network._ref][:excluded] == true
|
||||
args,
|
||||
vc_network_hash)
|
||||
return nil if network[vc_network_ref][:excluded] == true
|
||||
|
||||
case network[vc_network._ref][:network_type]
|
||||
if full_process
|
||||
case network[vc_network_ref][:network_type]
|
||||
# Distributed PortGroups
|
||||
when VCenterDriver::Network::NETWORK_TYPE_DPG
|
||||
network[vc_network._ref][:sw_name] = \
|
||||
network[vc_network_ref][:sw_name] = \
|
||||
vc_network.config.distributedVirtualSwitch.name
|
||||
# For DistributedVirtualPortgroups there is networks and uplinks
|
||||
network[vc_network._ref][:uplink] = \
|
||||
network[vc_network_ref][:uplink] = \
|
||||
vc_network.config.uplink
|
||||
#network[vc_network._ref][:uplink] = false
|
||||
#network[vc_network_ref][:uplink] = false
|
||||
# NSX-V PortGroups
|
||||
when VCenterDriver::Network::NETWORK_TYPE_NSXV
|
||||
network[vc_network._ref][:sw_name] = \
|
||||
network[vc_network_ref][:sw_name] = \
|
||||
vc_network.config.distributedVirtualSwitch.name
|
||||
# For NSX-V ( is the same as DistributedVirtualPortgroups )
|
||||
# there is networks and uplinks
|
||||
network[vc_network._ref][:uplink] = \
|
||||
network[vc_network_ref][:uplink] = \
|
||||
vc_network.config.uplink
|
||||
network[vc_network._ref][:uplink] = false
|
||||
network[vc_network_ref][:uplink] = false
|
||||
# Standard PortGroups
|
||||
when VCenterDriver::Network::NETWORK_TYPE_PG
|
||||
# There is no uplinks for standard portgroups, so all Standard
|
||||
# PortGroups are networks and no uplinks
|
||||
network[vc_network._ref][:uplink] = false
|
||||
network[vc_network._ref][:sw_name] = vSwitch(vc_network)
|
||||
network[vc_network_ref][:uplink] = false
|
||||
network[vc_network_ref][:sw_name] = vSwitch(vc_network)
|
||||
# NSX-T PortGroups
|
||||
when VCenterDriver::Network::NETWORK_TYPE_NSXT
|
||||
network[vc_network._ref][:sw_name] = \
|
||||
network[vc_network_ref][:sw_name] = \
|
||||
vc_network.summary.opaqueNetworkType
|
||||
# There is no uplinks for NSX-T networks, so all NSX-T networks
|
||||
# are networks and no uplinks
|
||||
network[vc_network._ref][:uplink] = false
|
||||
network[vc_network_ref][:uplink] = false
|
||||
else
|
||||
raise 'Unknown network type: ' \
|
||||
"#{network[vc_network._ref][:network_type]}"
|
||||
"#{network[vc_network_ref][:network_type]}"
|
||||
end
|
||||
end
|
||||
|
||||
# Multicluster nets support
|
||||
network[vc_network._ref][:clusters] = {}
|
||||
network[vc_network._ref][:clusters][:refs] = []
|
||||
network[vc_network._ref][:clusters][:one_ids] = []
|
||||
network[vc_network._ref][:clusters][:names] = []
|
||||
network[vc_network_ref][:clusters] = {}
|
||||
network[vc_network_ref][:clusters][:refs] = []
|
||||
network[vc_network_ref][:clusters][:one_ids] = []
|
||||
network[vc_network_ref][:clusters][:names] = []
|
||||
|
||||
# Get hosts related to this network and add them if is not
|
||||
# excluded
|
||||
@ -479,43 +503,52 @@ class DatacenterFolder
|
||||
vc_hosts.each do |vc_host|
|
||||
# Get vCenter Cluster
|
||||
vc_cluster = vc_host.parent
|
||||
vc_cluster_ref = vc_cluster._ref
|
||||
vc_cluster_name = vc_cluster.name
|
||||
# Get one host from each vCenter cluster
|
||||
one_host = VCenterDriver::VIHelper
|
||||
.find_by_ref(OpenNebula::HostPool,
|
||||
"TEMPLATE/VCENTER_CCR_REF",
|
||||
vc_cluster._ref,
|
||||
vc_cluster_ref,
|
||||
vcenter_uuid)
|
||||
# Check if network is excluded from each host
|
||||
next if exclude_network?(vc_network,one_host,args)
|
||||
next if exclude_network?(vc_network,one_host,args, vc_network_hash)
|
||||
# Insert vCenter cluster ref
|
||||
network[vc_network._ref][:clusters][:refs] << vc_cluster._ref
|
||||
network[vc_network_ref][:clusters][:refs] << vc_cluster_ref
|
||||
# Insert OpenNebula cluster id
|
||||
cluster_id = one_cluster_id(one_host)
|
||||
network[vc_network._ref][:clusters][:one_ids] << cluster_id
|
||||
network[vc_network_ref][:clusters][:one_ids] << cluster_id
|
||||
# Insert vCenter cluster name
|
||||
network[vc_network._ref][:clusters][:names] << vc_cluster.name
|
||||
opts[:dc_name] = vc_cluster.name
|
||||
network[vc_network_ref][:clusters][:names] << vc_cluster_name
|
||||
|
||||
opts[:dc_name] = vc_cluster_name
|
||||
end
|
||||
|
||||
# Remove duplicate entries
|
||||
network[vc_network._ref][:clusters][:refs].uniq!
|
||||
network[vc_network._ref][:clusters][:one_ids].uniq!
|
||||
network[vc_network._ref][:clusters][:names].uniq!
|
||||
network[vc_network_ref][:clusters][:refs].uniq!
|
||||
network[vc_network_ref][:clusters][:one_ids].uniq!
|
||||
network[vc_network_ref][:clusters][:names].uniq!
|
||||
|
||||
# Mark network as processed
|
||||
network[vc_network._ref][:processed] = true
|
||||
network[vc_network_ref][:processed] = true
|
||||
|
||||
if full_process
|
||||
# General net_info related to datacenter
|
||||
opts[:vcenter_uuid] = vcenter_uuid
|
||||
opts[:vcenter_instance_name] = vcenter_instance_name
|
||||
opts[:network_name] = network[vc_network._ref]['name']
|
||||
opts[:network_name] = network[vc_network_ref]['name']
|
||||
opts[:network_ref] = network.keys.first
|
||||
opts[:network_type] = network[vc_network._ref][:network_type]
|
||||
opts[:sw_name] = network[vc_network._ref][:sw_name]
|
||||
opts[:network_type] = network[vc_network_ref][:network_type]
|
||||
opts[:sw_name] = network[vc_network_ref][:sw_name]
|
||||
|
||||
network[vc_network._ref] = \
|
||||
network[vc_network._ref].merge(VCenterDriver::Network
|
||||
network[vc_network_ref] = \
|
||||
network[vc_network_ref].merge(VCenterDriver::Network
|
||||
.to_one_template(opts))
|
||||
else
|
||||
network[vc_network_ref][:ref] = vc_network_ref
|
||||
network[vc_network_ref][:name] = network[vc_network_ref]['name']
|
||||
end
|
||||
|
||||
network
|
||||
end
|
||||
|
||||
|
@ -49,11 +49,11 @@ class FileHelper
|
||||
end
|
||||
|
||||
def self.is_remote_or_needs_unpack?(file)
|
||||
return !is_remote?(file).nil? || needs_unpack?(file)
|
||||
return !remote?(file).nil? || needs_unpack?(file)
|
||||
end
|
||||
|
||||
def self.is_remote?(file)
|
||||
file.match(%r{^https?://})
|
||||
def self.remote?(file)
|
||||
file.match(%r{^https?://}) || file.match(%r{^s3?://})
|
||||
end
|
||||
|
||||
def self.is_vmdk?(file)
|
||||
@ -203,7 +203,7 @@ class FileHelper
|
||||
descriptor_name = File.basename vcenter_url.path
|
||||
temp_folder = VAR_LOCATION + "/vcenter/" + descriptor_name + "/"
|
||||
FileUtils.mkdir_p(temp_folder) if !File.directory?(temp_folder)
|
||||
|
||||
|
||||
image_path = File.dirname(vcenter_url.host+vcenter_url.path)
|
||||
self.download_vmdks(files_to_download, image_path, temp_folder, ds)
|
||||
|
||||
|
@ -248,10 +248,10 @@ class Network
|
||||
one_vn
|
||||
end
|
||||
|
||||
def self.get_network_type(network)
|
||||
def self.get_network_type(network, network_name)
|
||||
case network
|
||||
when RbVmomi::VIM::DistributedVirtualPortgroup
|
||||
if network['name']
|
||||
if network_name
|
||||
.match(/^vxw-dvs-(.*)-virtualwire-(.*)-sid-(.*)/)
|
||||
VCenterDriver::Network::NETWORK_TYPE_NSXV
|
||||
else
|
||||
@ -383,7 +383,7 @@ class NetImporter < VCenterDriver::VcImporter
|
||||
end
|
||||
|
||||
def get_list(args = {})
|
||||
dc_folder = VCenterDriver::DatacenterFolder.new(@vi_client)
|
||||
dc_folder = VCenterDriver::DatacenterFolder.new(@vi_client)
|
||||
|
||||
# OpenNebula's VirtualNetworkPool
|
||||
npool = VCenterDriver::VIHelper.one_pool(OpenNebula::VirtualNetworkPool, false)
|
||||
|
@ -20,10 +20,6 @@ module VirtualMachineDevice
|
||||
|
||||
attr_reader :size
|
||||
|
||||
def initialize(id, one_res, vc_res)
|
||||
super(id, one_res, vc_res)
|
||||
end
|
||||
|
||||
# Create the OpenNebula disk representation
|
||||
# Allow us to create the class without vCenter representation
|
||||
# example: attached disks not synced with vCenter
|
||||
|
@ -18,10 +18,6 @@ module VirtualMachineDevice
|
||||
# Nic class
|
||||
class Nic < Device
|
||||
|
||||
def initialize(id, one_res, vc_res)
|
||||
super(id, one_res, vc_res)
|
||||
end
|
||||
|
||||
# Create the OpenNebula nic representation
|
||||
# Allow as to create the class without vCenter representation
|
||||
# example: attached nics not synced with vCenter
|
||||
|
@ -465,7 +465,10 @@ module VirtualMachineMonitor
|
||||
info_disks.each do |disk|
|
||||
next if disk[1].no_exists?
|
||||
|
||||
str_info << "DISK_#{disk[0]}_ACTUAL_PATH=\"[" <<
|
||||
# Delete special characters
|
||||
name = disk[0].gsub(/[^0-9A-Za-z]/, '_')
|
||||
|
||||
str_info << "DISK_#{name}_ACTUAL_PATH=\"[" <<
|
||||
disk[1].ds.name << '] ' << disk[1].path << '" ' << "\n"
|
||||
end
|
||||
|
||||
|
@ -835,7 +835,7 @@ class Template
|
||||
|
||||
res[:net_name] = deviceNetwork.name
|
||||
res[:net_ref] = deviceNetwork._ref
|
||||
res[:pg_type] = VCenterDriver::Network.get_network_type(deviceNetwork)
|
||||
res[:pg_type] = VCenterDriver::Network.get_network_type(deviceNetwork, res[:net_name])
|
||||
res[:network] = deviceNetwork
|
||||
|
||||
res
|
||||
|
@ -58,6 +58,7 @@ class VCenterConf < Hash
|
||||
}
|
||||
|
||||
def initialize
|
||||
super
|
||||
replace(DEFAULT_CONFIGURATION)
|
||||
begin
|
||||
vcenterrc_path = "#{VAR_LOCATION}/remotes/etc/vmm/vcenter/vcenterrc"
|
||||
|
@ -1266,9 +1266,10 @@ void AddressRange::set_vnet(VectorAttribute *nic, const vector<string> &inherit)
|
||||
|
||||
for (it = inherit.begin(); it != inherit.end(); it++)
|
||||
{
|
||||
string current_val = nic->vector_value(*it);
|
||||
string inherit_val = attr->vector_value(*it);
|
||||
|
||||
if (!inherit_val.empty())
|
||||
if (current_val.empty() && !inherit_val.empty())
|
||||
{
|
||||
nic->replace((*it).c_str(), inherit_val);
|
||||
}
|
||||
@ -1525,7 +1526,7 @@ int AddressRange::free_addr(PoolObjectSQL::ObjectType ot, int obid,
|
||||
|
||||
unsigned int index = mac_i[0] - mac[0];
|
||||
|
||||
if (index < 0 || index >= size)
|
||||
if ( mac[0] > mac_i[0] || index >= size)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
@ -1561,7 +1562,7 @@ int AddressRange::free_addr_by_ip(PoolObjectSQL::ObjectType ot, int obid,
|
||||
|
||||
unsigned int index = ip_i - ip;
|
||||
|
||||
if (index < 0 || index >= size)
|
||||
if (ip > ip_i || index >= size)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
@ -1596,7 +1597,7 @@ int AddressRange::free_addr_by_ip6(PoolObjectSQL::ObjectType ot, int obid,
|
||||
|
||||
unsigned int index = ip_i[0] - ip6[0];
|
||||
|
||||
if (index < 0 || index >= size || ip6[3] != ip_i[3] || ip6[2] != ip_i[2]
|
||||
if (ip6[0] > ip_i[0] || index >= size || ip6[3] != ip_i[3] || ip6[2] != ip_i[2]
|
||||
|| ip6[1] != ip_i[1])
|
||||
{
|
||||
return -1;
|
||||
@ -1663,7 +1664,7 @@ int AddressRange::free_addr_by_range(PoolObjectSQL::ObjectType ot, int obid,
|
||||
|
||||
string error_msg;
|
||||
|
||||
if ((0 <= index) && (index < size))
|
||||
if ((mac[0] <= mac_i[0]) && (index < size))
|
||||
{
|
||||
map<unsigned int, long long>::iterator it = allocated.find(index);
|
||||
|
||||
|
@ -850,16 +850,6 @@ int VirtualNetwork::nic_attribute(
|
||||
nic->replace("BRIDGE_TYPE", bridge_type);
|
||||
}
|
||||
|
||||
for (it = inherit_attrs.begin(); it != inherit_attrs.end(); it++)
|
||||
{
|
||||
PoolObjectSQL::get_template_attribute(*it, inherit_val);
|
||||
|
||||
if (!inherit_val.empty())
|
||||
{
|
||||
nic->replace(*it, inherit_val);
|
||||
}
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
// Get the lease from the Virtual Network
|
||||
//--------------------------------------------------------------------------
|
||||
@ -899,6 +889,17 @@ int VirtualNetwork::nic_attribute(
|
||||
inherit_attrs);
|
||||
}
|
||||
|
||||
for (it = inherit_attrs.begin(); it != inherit_attrs.end(); it++)
|
||||
{
|
||||
string current_val = nic->vector_value(*it);
|
||||
PoolObjectSQL::get_template_attribute(*it, inherit_val);
|
||||
|
||||
if (current_val.empty() && !inherit_val.empty())
|
||||
{
|
||||
nic->replace(*it, inherit_val);
|
||||
}
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
// Copy the security group IDs
|
||||
//--------------------------------------------------------------------------
|
||||
|
Loading…
x
Reference in New Issue
Block a user