diff --git a/.gitignore b/.gitignore index 4dc9354b9a..8a549e71de 100644 --- a/.gitignore +++ b/.gitignore @@ -28,7 +28,7 @@ src/vmm_mad/remotes/lxd/tests/ src/oca/python/pyone/bindings src/oca/python/build/ src/oca/python/dist/ -src/oca/python/opennebula.egg-info/ +src/oca/python/pyone.egg-info/ src/oca/python/doc/ src/docker_machine/pkg diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000000..e73b1c1b51 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,27 @@ +# -------------------------------------------------------------------------- # +# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +dist: xenial +language: generic +before_install: + - sudo apt-get install -y libsystemd-dev bash-completion bison debhelper default-jdk flex javahelper libmysql++-dev libsqlite3-dev libssl-dev libws-commons-util-java libxml2-dev libxmlrpc3-client-java libxmlrpc3-common-java libxslt1-dev libcurl4-openssl-dev ruby scons libxmlrpc-c++8-dev npm libvncserver-dev + - gem install rubocop + - sudo npm install -g bower + - sudo npm install -g grunt + - sudo npm install -g grunt-cli + - (cd src/sunstone/public && npm install && bower install) +script: + - set -o errexit; source .travis/smoke_tests.sh diff --git a/.travis/README.md b/.travis/README.md new file mode 100644 index 0000000000..2c7562b5a2 --- /dev/null +++ b/.travis/README.md @@ -0,0 +1,10 @@ +## Travis smoke tests + +The `.travis/tests` directory contains scripts for each smoke test. + +The smoke_test.sh script is called which iterates on each script, and it exits and logs on any failure. To add more tests, simply create a new file on `.travis/tests`. + +Each test should: + + - have a number as prefix to define the order. Renaming is allowed, the rule is to execute the less costly tests (in time) first + - return 0 on success, other number on error diff --git a/.travis/smoke_tests.sh b/.travis/smoke_tests.sh new file mode 100755 index 0000000000..b7638830b0 --- /dev/null +++ b/.travis/smoke_tests.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +# -------------------------------------------------------------------------- # +# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +#------------------------------------------------------------------------------- +# Smoke tests for OpenNebula, to be triggered by travis or manually +# It executes all scripts in 'tests' folder and expects 0 exit code +#------------------------------------------------------------------------------- + +# default parameters values + +LOG_FILE='smoke_tests.results' + +check_test() { + local TEST=$1 + + echo "Executing test $TEST" >> ${LOG_FILE} + eval $TEST >> ${LOG_FILE} 2>&1 + RC=$? + echo "RC for $TEST is $RC" + return $RC +} + +for smoke_test in .travis/tests/*.sh; do + check_test "$smoke_test" || break +done + +if [ $RC == 0 ]; then + echo "All tests OK!" +else + echo "Test failed: "$smoke_test + echo "Log follows:" + cat $LOG_FILE +fi + +exit $RC diff --git a/.travis/tests/01-rubocop.sh b/.travis/tests/01-rubocop.sh new file mode 100755 index 0000000000..8a85967068 --- /dev/null +++ b/.travis/tests/01-rubocop.sh @@ -0,0 +1,19 @@ +# -------------------------------------------------------------------------- # +# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +# lint ruby code + +ln -s share/linters/.rubocop.yml . && rubocop diff --git a/.travis/tests/02-scons.sh b/.travis/tests/02-scons.sh new file mode 100755 index 0000000000..b148eb2372 --- /dev/null +++ b/.travis/tests/02-scons.sh @@ -0,0 +1,19 @@ +# -------------------------------------------------------------------------- # +# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +# check that OpenNebula compiles + +scons sunstone=yes mysql=yes systemd=yes new_xmlrpc=yes diff --git a/.travis/tests/03-oned.sh b/.travis/tests/03-oned.sh new file mode 100755 index 0000000000..54fe1bb7e4 --- /dev/null +++ b/.travis/tests/03-oned.sh @@ -0,0 +1,33 @@ +# -------------------------------------------------------------------------- # +# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +# install oned system wide + +sudo ./install.sh -u travis + +# Set credentials +mkdir $HOME/.one +echo "oneadmin:opennebula" > $HOME/.one/one_auth + +# Install gems +/usr/share/one/install_gems --yes + +# start oned + +one start + +# check it's up +timeout 60 sh -c 'until nc -z $0 $1; do sleep 1; done' localhost 2633 diff --git a/.travis/tests/04-sunstone.sh b/.travis/tests/04-sunstone.sh new file mode 100755 index 0000000000..dc37e263b2 --- /dev/null +++ b/.travis/tests/04-sunstone.sh @@ -0,0 +1,30 @@ +# -------------------------------------------------------------------------- # +# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +# start sunstone +sunstone-server start + +# check it's up +RC=`timeout 60 sh -c 'until nc -z $0 $1; do sleep 1; done' localhost 9869` + +echo "Sunstone log" +cat /var/log/one/sunstone.log +echo +echo "Sunstone error log" +cat /var/log/one/sunstone.error +echo "---------" + +exit $RC diff --git a/include/Attribute.h b/include/Attribute.h index 2b88e00cfa..7a12c7c24b 100644 --- a/include/Attribute.h +++ b/include/Attribute.h @@ -83,7 +83,11 @@ public: * by the calling function. * @return a string (allocated in the heap) holding the attribute value. */ - virtual string * to_xml() const = 0; + virtual void to_xml(std::ostringstream& s) const = 0; + + virtual void to_json(std::ostringstream& s) const = 0; + + virtual void to_token(std::ostringstream& s) const = 0; /** * Builds a new attribute from a string. @@ -158,17 +162,31 @@ public: * * attribute_value * - * The string MUST be freed by the calling function. - * @return a string (allocated in the heap) holding the attribute value. + * @paran s the stream to write the attribute. */ - string * to_xml() const + void to_xml(std::ostringstream& s) const { - string * xml = new string; + s << "<" << attribute_name << ">" << one_util::escape_xml(attribute_value) + << ""<< attribute_name << ">"; - *xml = "<" + name() + ">" + one_util::escape_xml(attribute_value) + - ""+ name() + ">"; + } - return xml; + void to_json(std::ostringstream& s) const + { + one_util::escape_json(attribute_value, s); + } + + void to_token(std::ostringstream& s) const + { + if (attribute_name.empty() || attribute_value.empty()) + { + return; + } + + one_util::escape_token(attribute_name, s); + s << "="; + one_util::escape_token(attribute_value, s); + s << std::endl; } /** @@ -350,12 +368,11 @@ public: * The string MUST be freed by the calling function. * @return a string (allocated in the heap) holding the attribute value. */ - string * to_xml() const; + void to_xml(std::ostringstream& s) const; - /** - * Same as above but the attribute is written in an string stream; - */ - void to_xml(ostringstream &oss) const; + void to_json(std::ostringstream& s) const; + + void to_token(std::ostringstream& s) const; /** * Builds a new attribute from a string of the form: diff --git a/include/ExtendedAttribute.h b/include/ExtendedAttribute.h index 31d96caf6f..447e54bdf9 100644 --- a/include/ExtendedAttribute.h +++ b/include/ExtendedAttribute.h @@ -92,9 +92,19 @@ protected: return va->marshall(_sep); }; - string * to_xml() const + void to_xml(std::ostringstream& s) const { - return va->to_xml(); + return va->to_xml(s); + }; + + void to_json(std::ostringstream& s) const + { + return va->to_json(s); + }; + + void to_token(std::ostringstream& s) const + { + return va->to_token(s); }; void unmarshall(const std::string& sattr, const char * _sep = 0) diff --git a/include/History.h b/include/History.h index 7c6f03fac3..3e45417d50 100644 --- a/include/History.h +++ b/include/History.h @@ -258,6 +258,10 @@ private: */ string& to_xml(string& xml, bool database) const; + string& to_json(string& json) const; + + string& to_token(string& text) const; + /** * Rebuilds the object from an xml node * @param node The xml node pointer diff --git a/include/LifeCycleManager.h b/include/LifeCycleManager.h index 5f80676b10..1ef784e71d 100644 --- a/include/LifeCycleManager.h +++ b/include/LifeCycleManager.h @@ -310,7 +310,9 @@ private: // ------------------------------------------------------------------------- // Internal Actions, triggered by OpenNebula components & drivers // ------------------------------------------------------------------------- - void start_prolog_migrate(VirtualMachine* vm, int vid); + void start_prolog_migrate(VirtualMachine* vm); + + void revert_migrate_after_failure(VirtualMachine* vm); void save_success_action(int vid); void save_failure_action(int vid); diff --git a/include/LogDB.h b/include/LogDB.h index 43d8f82fe0..1eb46362ed 100644 --- a/include/LogDB.h +++ b/include/LogDB.h @@ -251,6 +251,11 @@ public: int next_federated(int index); + bool fts_available() + { + return db->fts_available(); + } + protected: int exec(std::ostringstream& cmd, Callbackable* obj, bool quiet) { @@ -416,6 +421,11 @@ public: return _logdb->limit_support(); } + bool fts_available() + { + return _logdb->fts_available(); + } + protected: int exec(std::ostringstream& cmd, Callbackable* obj, bool quiet) { diff --git a/include/MySqlDB.h b/include/MySqlDB.h index bd25ee2dca..87f7ef4a79 100644 --- a/include/MySqlDB.h +++ b/include/MySqlDB.h @@ -84,6 +84,11 @@ public: */ bool limit_support(); + /** + * Return true if the backend allows FTS index + */ + bool fts_available(); + protected: /** * Wraps the mysql_query function call @@ -171,6 +176,8 @@ public: bool limit_support(){return true;}; + bool fts_available(){return false;}; + protected: int exec(ostringstream& cmd, Callbackable* obj, bool quiet){return -1;}; }; diff --git a/include/Nebula.h b/include/Nebula.h index 508f9cd31a..01715ce4e9 100644 --- a/include/Nebula.h +++ b/include/Nebula.h @@ -362,7 +362,7 @@ public: */ static string code_version() { - return "5.7.85"; // bump version + return "5.7.90"; // bump version } /** @@ -371,7 +371,7 @@ public: */ static string shared_db_version() { - return "5.7.80"; + return "5.6.0"; } /** @@ -578,6 +578,15 @@ public: return nebula_configuration->to_xml(xml); }; + /** + * Gets the database backend type + * @return database backend type + */ + string get_db_backend() const + { + return db_backend_type; + } + // ----------------------------------------------------------------------- // Default Quotas // ----------------------------------------------------------------------- @@ -695,7 +704,7 @@ private: "/DEFAULT_GROUP_QUOTAS/NETWORK_QUOTA", "/DEFAULT_GROUP_QUOTAS/IMAGE_QUOTA", "/DEFAULT_GROUP_QUOTAS/VM_QUOTA"), - system_db(0), logdb(0), fed_logdb(0), + system_db(0), db_backend_type("sqlite"), logdb(0), fed_logdb(0), vmpool(0), hpool(0), vnpool(0), upool(0), ipool(0), gpool(0), tpool(0), dspool(0), clpool(0), docpool(0), zonepool(0), secgrouppool(0), vdcpool(0), vrouterpool(0), marketpool(0), apppool(0), vmgrouppool(0), @@ -819,6 +828,7 @@ private: // --------------------------------------------------------------- SystemDB * system_db; + string db_backend_type; // --------------------------------------------------------------- // Nebula Pools diff --git a/include/NebulaUtil.h b/include/NebulaUtil.h index b02fcd48b3..e350812b50 100644 --- a/include/NebulaUtil.h +++ b/include/NebulaUtil.h @@ -208,6 +208,11 @@ namespace one_util { return escape(v, "'", "'"); } + + void escape_json(const std::string& str, std::ostringstream& s); + + void escape_token(const std::string& str, std::ostringstream& s); + /** * Checks if a strings matches a regular expression * @@ -236,7 +241,8 @@ namespace one_util const std::string& replacement); template - std::set set_intersection(const std::set &first, const std::set &second) + std::set set_intersection(const std::set &first, const std::set + &second) { std::set output; @@ -247,48 +253,48 @@ namespace one_util return output; } - /** + /** * Compress the input string unsing zlib * @param in input string * @param bool64 true to base64 encode output * @return pointer to the compressed sting (must be freed) or 0 in case * of error */ - std::string * zlib_compress(const std::string& in, bool base64); + std::string * zlib_compress(const std::string& in, bool base64); - /** + /** * Decompress the input string unsing zlib * @param in input string * @param base64 true if the input is base64 encoded * @return pointer to the decompressed sting (must be freed) or 0 in case * of error */ - std::string * zlib_decompress(const std::string& in, bool base64); + std::string * zlib_decompress(const std::string& in, bool base64); - extern "C" void sslmutex_lock_callback(int mode, int type, char *file, - int line); + extern "C" void sslmutex_lock_callback(int mode, int type, char *file, + int line); - extern "C" unsigned long sslmutex_id_callback(); + extern "C" unsigned long sslmutex_id_callback(); - class SSLMutex - { - public: - static void initialize(); + class SSLMutex + { + public: + static void initialize(); - static void finalize(); + static void finalize(); - private: - friend void sslmutex_lock_callback(int mode, int type, char *file, - int line); + private: + friend void sslmutex_lock_callback(int mode, int type, char *file, + int line); - SSLMutex(); + SSLMutex(); - ~SSLMutex(); + ~SSLMutex(); - static SSLMutex * ssl_mutex; + static SSLMutex * ssl_mutex; - static std::vector vmutex; - }; + static std::vector vmutex; + }; }; #endif /* _NEBULA_UTIL_H_ */ diff --git a/include/PoolSQL.h b/include/PoolSQL.h index a641b2adef..af90515cf1 100644 --- a/include/PoolSQL.h +++ b/include/PoolSQL.h @@ -251,6 +251,35 @@ public: static void oid_filter(int start_id, int end_id, string& filter); + + /** + * This function returns a legal SQL string that can be used in an SQL + * statement. The string is encoded to an escaped SQL string, taking into + * account the current character set of the connection. + * @param str the string to be escaped + * @return a valid SQL string or NULL in case of failure + */ + char * escape_str(const string& str) + { + return db->escape_str(str); + } + + /** + * Frees a previously scaped string + * @param str pointer to the str + */ + void free_str(char * str) + { + db->free_str(str); + } + + /** + * Return true if FTS is available. + */ + bool is_fts_available() + { + return db->fts_available(); + } protected: /** diff --git a/include/RequestManagerChmod.h b/include/RequestManagerChmod.h index ba304df443..53c19a2593 100644 --- a/include/RequestManagerChmod.h +++ b/include/RequestManagerChmod.h @@ -103,7 +103,7 @@ class VirtualNetworkTemplateChmod : public RequestManagerChmod public: VirtualNetworkTemplateChmod(): RequestManagerChmod("one.vntemplate.chmod", "Changes permission bits of a " - "virtual network template", "A:siiiiiiiiiib") + "virtual network template") { Nebula& nd = Nebula::instance(); pool = nd.get_vntpool(); diff --git a/include/RequestManagerClone.h b/include/RequestManagerClone.h index 3430b6914a..eccab5a0f2 100644 --- a/include/RequestManagerClone.h +++ b/include/RequestManagerClone.h @@ -132,7 +132,7 @@ class VNTemplateClone : public RequestManagerClone public: VNTemplateClone(): RequestManagerClone("one.vntemplate.clone", - "Clone a virtual network template", "A:sisb") + "Clone a virtual network template", "A:sis") { Nebula& nd = Nebula::instance(); pool = nd.get_vntpool(); @@ -144,9 +144,9 @@ public: ~VNTemplateClone(){}; ErrorCode request_execute(int source_id, const string &name, int &new_id, - bool recursive, const string& s_uattrs, RequestAttributes& att) + const string& s_uattrs, RequestAttributes& att) { - return clone(source_id, name, new_id, recursive, s_uattrs, att); + return clone(source_id, name, new_id, false, s_uattrs, att); }; protected: diff --git a/include/RequestManagerDelete.h b/include/RequestManagerDelete.h index b32dc045ec..a8efea092e 100644 --- a/include/RequestManagerDelete.h +++ b/include/RequestManagerDelete.h @@ -121,7 +121,7 @@ class VirtualNetworkTemplateDelete : public RequestManagerDelete public: VirtualNetworkTemplateDelete(): RequestManagerDelete("one.vntemplate.delete", - "A:sib", + "A:si", "Deletes a virtual network template") { Nebula& nd = Nebula::instance(); @@ -133,7 +133,7 @@ public: ErrorCode request_execute(int oid, bool recursive, RequestAttributes& att) { - return delete_object(oid, recursive, att, auth_op); + return delete_object(oid, false, att, auth_op); } }; diff --git a/include/RequestManagerPoolInfoFilter.h b/include/RequestManagerPoolInfoFilter.h index 247643cd6b..1e30a9eb90 100644 --- a/include/RequestManagerPoolInfoFilter.h +++ b/include/RequestManagerPoolInfoFilter.h @@ -113,7 +113,7 @@ public: VirtualMachinePoolInfo(): RequestManagerPoolInfoFilter("one.vmpool.info", "Returns the virtual machine instances pool", - "A:siiii") + "A:siiiis") { Nebula& nd = Nebula::instance(); pool = nd.get_vmpool(); diff --git a/include/SqlDB.h b/include/SqlDB.h index 9c78c8887e..64b8a4b3b8 100644 --- a/include/SqlDB.h +++ b/include/SqlDB.h @@ -96,6 +96,11 @@ public: */ virtual bool limit_support() = 0; + /** + * Return true if the backend allows FTS index + */ + virtual bool fts_available() = 0; + protected: /** * Performs a DB transaction diff --git a/include/SqliteDB.h b/include/SqliteDB.h index b46f4ec356..13c2974cca 100644 --- a/include/SqliteDB.h +++ b/include/SqliteDB.h @@ -79,6 +79,10 @@ public: */ bool limit_support(); + bool fts_available() + { + return false; + } protected: /** * Wraps the sqlite3_exec function call, and locks the DB mutex. diff --git a/include/Template.h b/include/Template.h index dac30fd7e7..aa086d671b 100644 --- a/include/Template.h +++ b/include/Template.h @@ -170,6 +170,10 @@ public: */ string& to_xml(string& xml) const; + string& to_json(string& xml) const; + + string& to_token(string& xml) const; + /** * Writes the template in a plain text string * @param str string that hold the template representation diff --git a/include/VirtualMachine.h b/include/VirtualMachine.h index 42f21dfebe..cbf67c4a6a 100644 --- a/include/VirtualMachine.h +++ b/include/VirtualMachine.h @@ -984,6 +984,14 @@ public: // ------------------------------------------------------------------------ // Timers & Requirements // ------------------------------------------------------------------------ + /** + * @return time when the VM was created (in epoch) + */ + time_t get_stime() const + { + return stime; + }; + /** * Gets time from last information polling. * @return time of last poll (epoch) or 0 if never polled @@ -1274,8 +1282,7 @@ public: * @param uid for template owner * @param ar the AuthRequest object * @param tmpl the virtual machine template - * @param - * lock for check if the resource is lock or not + * @param check_lock for check if the resource is lock or not */ static void set_auth_request(int uid, AuthRequest& ar, VirtualMachineTemplate *tmpl, bool check_lock); @@ -1884,6 +1891,10 @@ private: */ string& to_xml_extended(string& xml, int n_history) const; + string& to_json(string& json) const; + + string& to_token(string& text) const; + // ------------------------------------------------------------------------- // Attribute Parser // ------------------------------------------------------------------------- diff --git a/include/VirtualMachineManager.h b/include/VirtualMachineManager.h index 387116a602..1a61253d42 100644 --- a/include/VirtualMachineManager.h +++ b/include/VirtualMachineManager.h @@ -188,6 +188,21 @@ public: return vmd->is_keep_snapshots(); } + /** + * Returns a pointer to a Virtual Machine Manager driver. The driver is + * searched by its name. + * @param name the name of the driver + * @return the VM driver owned by uid with attribute name equal to value + * or 0 in not found + */ + const VirtualMachineManagerDriver * get( + const string& name) + { + string _name("NAME"); + return static_cast + (MadManager::get(0,_name,name)); + }; + private: /** * Thread id for the Virtual Machine Manager @@ -256,21 +271,6 @@ private: (MadManager::get(0,name,value)); }; - /** - * Returns a pointer to a Virtual Machine Manager driver. The driver is - * searched by its name. - * @param name the name of the driver - * @return the VM driver owned by uid with attribute name equal to value - * or 0 in not found - */ - const VirtualMachineManagerDriver * get( - const string& name) - { - string _name("NAME"); - return static_cast - (MadManager::get(0,_name,name)); - }; - // ------------------------------------------------------------------------- // Action Listener interface // ------------------------------------------------------------------------- diff --git a/include/VirtualMachineManagerDriver.h b/include/VirtualMachineManagerDriver.h index 59fb6ef80d..d6f8de6026 100644 --- a/include/VirtualMachineManagerDriver.h +++ b/include/VirtualMachineManagerDriver.h @@ -103,6 +103,14 @@ public: return keep_snapshots; } + /** + * @return true if datastore live migration + */ + bool is_ds_live_migration() const + { + return ds_live_migration; + } + protected: /** * Gets a configuration attr from driver configuration file (single @@ -159,6 +167,11 @@ private: */ bool keep_snapshots; + /** + * Set to true if live migration between datastores is allowed. + */ + bool ds_live_migration; + /** * Pointer to the Virtual Machine Pool, to access VMs */ diff --git a/install.sh b/install.sh index a14c090816..f24ec12280 100755 --- a/install.sh +++ b/install.sh @@ -219,6 +219,7 @@ fi SHARE_DIRS="$SHARE_LOCATION/examples \ $SHARE_LOCATION/websockify \ + $SHARE_LOCATION/websockify/websockify \ $SHARE_LOCATION/esx-fw-vnc \ $SHARE_LOCATION/oneprovision" @@ -253,6 +254,7 @@ LIB_DIRS="$LIB_LOCATION/ruby \ VAR_DIRS="$VAR_LOCATION/remotes \ $VAR_LOCATION/remotes/etc \ + $VAR_LOCATION/remotes/etc/tm/fs_lvm \ $VAR_LOCATION/remotes/etc/datastore/ceph \ $VAR_LOCATION/remotes/etc/im/kvm-probes.d \ $VAR_LOCATION/remotes/etc/im/lxd-probes.d \ @@ -445,6 +447,7 @@ INSTALL_FILES=( TM_FILES:$VAR_LOCATION/remotes/tm TM_SHARED_FILES:$VAR_LOCATION/remotes/tm/shared TM_FS_LVM_FILES:$VAR_LOCATION/remotes/tm/fs_lvm + TM_FS_LVM_ETC_FILES:$VAR_LOCATION/remotes/etc/tm/fs_lvm/fs_lvm.conf TM_QCOW2_FILES:$VAR_LOCATION/remotes/tm/qcow2 TM_SSH_FILES:$VAR_LOCATION/remotes/tm/ssh TM_CEPH_FILES:$VAR_LOCATION/remotes/tm/ceph @@ -477,7 +480,8 @@ INSTALL_FILES=( NETWORK_OVSWITCH_VXLAN_FILES:$VAR_LOCATION/remotes/vnm/ovswitch_vxlan NETWORK_VCENTER_FILES:$VAR_LOCATION/remotes/vnm/vcenter EXAMPLE_SHARE_FILES:$SHARE_LOCATION/examples - WEBSOCKIFY_SHARE_FILES:$SHARE_LOCATION/websockify + WEBSOCKIFY_SHARE_RUN_FILES:$SHARE_LOCATION/websockify + WEBSOCKIFY_SHARE_MODULE_FILES:$SHARE_LOCATION/websockify/websockify ESX_FW_VNC_SHARE_FILES:$SHARE_LOCATION/esx-fw-vnc INSTALL_GEMS_SHARE_FILES:$SHARE_LOCATION ONETOKEN_SHARE_FILE:$SHARE_LOCATION @@ -999,6 +1003,7 @@ IM_PROBES_LXD_PROBES_FILES="src/im_mad/remotes/lxd-probes.d/lxd.rb \ src/im_mad/remotes/lxd-probes.d/pci.rb \ src/im_mad/remotes/lxd-probes.d/monitor_ds.sh \ src/im_mad/remotes/lxd-probes.d/version.sh \ + src/im_mad/remotes/lxd-probes.d/profiles.sh \ src/im_mad/remotes/lxd-probes.d/collectd-client-shepherd.sh" IM_PROBES_LXD_FILES="src/im_mad/remotes/lxd.d/collectd-client_control.sh \ @@ -1171,6 +1176,9 @@ TM_FS_LVM_FILES="src/tm_mad/fs_lvm/activate \ src/tm_mad/fs_lvm/delete \ src/tm_mad/fs_lvm/resize" +TM_FS_LVM_ETC_FILES="src/tm_mad/fs_lvm/fs_lvm.conf" + + TM_QCOW2_FILES="src/tm_mad/qcow2/clone \ src/tm_mad/qcow2/delete \ src/tm_mad/qcow2/ln \ @@ -1457,8 +1465,7 @@ ONEDB_SHARED_MIGRATOR_FILES="src/onedb/shared/2.0_to_2.9.80.rb \ src/onedb/shared/5.3.80_to_5.4.0.rb \ src/onedb/shared/5.4.0_to_5.4.1.rb \ src/onedb/shared/5.4.1_to_5.5.80.rb \ - src/onedb/shared/5.5.80_to_5.6.0.rb \ - src/onedb/shared/5.6.0_to_5.7.80.rb" + src/onedb/shared/5.5.80_to_5.6.0.rb" ONEDB_LOCAL_MIGRATOR_FILES="src/onedb/local/4.5.80_to_4.7.80.rb \ src/onedb/local/4.7.80_to_4.9.80.rb \ @@ -1531,9 +1538,12 @@ EXAMPLE_SHARE_FILES="share/examples/vm.template \ # Files required to interact with the websockify server #------------------------------------------------------------------------------- -WEBSOCKIFY_SHARE_FILES="share/websockify/websocketproxy.py \ - share/websockify/websocket.py \ - share/websockify/websockify" +WEBSOCKIFY_SHARE_RUN_FILES="share/websockify/run" +WEBSOCKIFY_SHARE_MODULE_FILES="share/websockify/websockify/__init__.py \ + share/websockify/websockify/auth_plugins.py \ + share/websockify/websockify/token_plugins.py \ + share/websockify/websockify/websocket.py \ + share/websockify/websockify/websocketproxy.py" #------------------------------------------------------------------------------- # Installation packages for ESX hosts to enable VNC ports diff --git a/share/doc/xsd/vm.xsd b/share/doc/xsd/vm.xsd index f533e34c3c..948ec8ec3c 100644 --- a/share/doc/xsd/vm.xsd +++ b/share/doc/xsd/vm.xsd @@ -27,15 +27,10 @@ - - - diff --git a/share/etc/oned.conf b/share/etc/oned.conf index c1055fafc5..6b6c8abe82 100644 --- a/share/etc/oned.conf +++ b/share/etc/oned.conf @@ -566,7 +566,7 @@ VM_MAD = [ ARGUMENTS = "-t 15 -r 0 kvm", DEFAULT = "vmm_exec/vmm_exec_kvm.conf", TYPE = "kvm", - KEEP_SNAPSHOTS = "no", + KEEP_SNAPSHOTS = "yes", IMPORTED_VMS_ACTIONS = "terminate, terminate-hard, hold, release, suspend, resume, delete, reboot, reboot-hard, resched, unresched, disk-attach, disk-detach, nic-attach, nic-detach, snapshot-create, snapshot-delete" @@ -611,13 +611,14 @@ VM_MAD = [ # -w Timeout in seconds to execute external commands (default unlimited) #------------------------------------------------------------------------------- VM_MAD = [ - NAME = "vcenter", - SUNSTONE_NAME = "VMWare vCenter", - EXECUTABLE = "one_vmm_sh", - ARGUMENTS = "-p -t 15 -r 0 vcenter -s sh", - DEFAULT = "vmm_exec/vmm_exec_vcenter.conf", - TYPE = "xml", - KEEP_SNAPSHOTS = "yes", + NAME = "vcenter", + SUNSTONE_NAME = "VMWare vCenter", + EXECUTABLE = "one_vmm_sh", + ARGUMENTS = "-p -t 15 -r 0 vcenter -s sh", + DEFAULT = "vmm_exec/vmm_exec_vcenter.conf", + TYPE = "xml", + KEEP_SNAPSHOTS = "yes", + DS_LIVE_MIGRATION = "yes", IMPORTED_VMS_ACTIONS = "terminate, terminate-hard, hold, release, suspend, resume, delete, reboot, reboot-hard, resched, unresched, poweroff, poweroff-hard, disk-attach, disk-detach, nic-attach, nic-detach, @@ -1307,7 +1308,7 @@ TM_MAD_CONF = [ TM_MAD_CONF = [ NAME = "ceph", LN_TARGET = "NONE", CLONE_TARGET = "SELF", SHARED = "YES", DS_MIGRATE = "NO", DRIVER = "raw", ALLOW_ORPHANS="mixed", - TM_MAD_SYSTEM = "ssh", LN_TARGET_SSH = "SYSTEM", CLONE_TARGET_SSH = "SYSTEM", + TM_MAD_SYSTEM = "ssh,shared", LN_TARGET_SSH = "SYSTEM", CLONE_TARGET_SSH = "SYSTEM", DISK_TYPE_SSH = "FILE", TM_MAD_SYSTEM = "shared", LN_TARGET_SHARED = "NONE", CLONE_TARGET_SHARED = "SELF", DISK_TYPE_SHARED = "RBD" ] diff --git a/share/install_gems/CentOS7/Gemfile.lock b/share/install_gems/CentOS7/Gemfile.lock index 8d9f639072..dc432bd824 100644 --- a/share/install_gems/CentOS7/Gemfile.lock +++ b/share/install_gems/CentOS7/Gemfile.lock @@ -1,23 +1,23 @@ GEM remote: https://rubygems.org/ specs: - activesupport (4.2.10) + activesupport (4.2.11) i18n (~> 0.7) minitest (~> 5.1) thread_safe (~> 0.3, >= 0.3.4) tzinfo (~> 1.1) - addressable (2.5.2) + addressable (2.6.0) public_suffix (>= 2.0.2, < 4.0) amazon-ec2 (0.9.17) xml-simple (>= 1.0.12) - aws-sdk (2.11.39) - aws-sdk-resources (= 2.11.39) - aws-sdk-core (2.11.39) + aws-sdk (2.11.212) + aws-sdk-resources (= 2.11.212) + aws-sdk-core (2.11.212) aws-sigv4 (~> 1.0) jmespath (~> 1.0) - aws-sdk-resources (2.11.39) - aws-sdk-core (= 2.11.39) - aws-sigv4 (1.0.2) + aws-sdk-resources (2.11.212) + aws-sdk-core (= 2.11.212) + aws-sigv4 (1.0.3) azure (0.7.10) addressable (~> 2.3) azure-core (~> 0.1) @@ -27,46 +27,46 @@ GEM nokogiri (~> 1.6) systemu (~> 2.6) thor (~> 0.19) - azure-core (0.1.14) + azure-core (0.1.15) faraday (~> 0.9) faraday_middleware (~> 0.10) nokogiri (~> 1.6) builder (3.2.3) - concurrent-ruby (1.1.3) + concurrent-ruby (1.1.4) configparser (0.1.7) - curb (0.9.4) - daemons (1.2.6) - eventmachine (1.2.5) - faraday (0.15.0) + curb (0.9.8) + daemons (1.3.1) + eventmachine (1.2.7) + faraday (0.15.4) multipart-post (>= 1.2, < 3) - faraday_middleware (0.12.2) + faraday_middleware (0.13.0) faraday (>= 0.7.4, < 1.0) - hashie (3.5.7) + hashie (3.6.0) highline (1.7.10) i18n (0.9.5) concurrent-ruby (~> 1.0) inflection (1.0.0) jmespath (1.4.0) memcache-client (1.8.5) - mime-types (3.1) + mime-types (3.2.2) mime-types-data (~> 3.2015) - mime-types-data (3.2016.0521) + mime-types-data (3.2018.0812) mini_portile2 (2.1.0) minitest (5.11.3) multipart-post (2.0.0) - mysql2 (0.5.1) + mysql2 (0.5.2) net-ldap (0.16.1) nokogiri (1.6.8.1) mini_portile2 (~> 2.1.0) - ox (2.9.2) + ox (2.10.0) parse-cron (0.1.4) polyglot (0.3.5) public_suffix (2.0.5) - rack (1.6.10) + rack (1.6.11) rack-protection (1.5.5) rack scrub_rb (1.0.1) - sequel (5.7.1) + sequel (5.17.0) sinatra (1.4.8) rack (~> 1.5) rack-protection (~> 1.4) @@ -77,12 +77,12 @@ GEM daemons (~> 1.0, >= 1.0.9) eventmachine (~> 1.0, >= 1.0.4) rack (>= 1, < 3) - thor (0.20.0) + thor (0.20.3) thread_safe (0.3.6) - tilt (2.0.8) + tilt (2.0.9) treetop (1.6.10) polyglot (~> 0.3) - trollop (2.1.2) + trollop (2.9.9) tzinfo (1.2.5) thread_safe (~> 0.1) uuidtools (2.1.5) @@ -112,7 +112,7 @@ DEPENDENCIES memcache-client mysql2 net-ldap - nokogiri + nokogiri (< 1.7) ox parse-cron public_suffix (< 3.0.0) @@ -128,4 +128,4 @@ DEPENDENCIES zendesk_api BUNDLED WITH - 1.17.1 + 1.11.2 diff --git a/share/install_gems/Debian9/Gemfile.lock b/share/install_gems/Debian9/Gemfile.lock index d7e8260887..4a1f682029 100644 --- a/share/install_gems/Debian9/Gemfile.lock +++ b/share/install_gems/Debian9/Gemfile.lock @@ -1,23 +1,23 @@ GEM remote: https://rubygems.org/ specs: - activesupport (4.2.10) + activesupport (4.2.11) i18n (~> 0.7) minitest (~> 5.1) thread_safe (~> 0.3, >= 0.3.4) tzinfo (~> 1.1) - addressable (2.5.2) + addressable (2.6.0) public_suffix (>= 2.0.2, < 4.0) amazon-ec2 (0.9.17) xml-simple (>= 1.0.12) - aws-sdk (2.11.39) - aws-sdk-resources (= 2.11.39) - aws-sdk-core (2.11.39) + aws-sdk (2.11.212) + aws-sdk-resources (= 2.11.212) + aws-sdk-core (2.11.212) aws-sigv4 (~> 1.0) jmespath (~> 1.0) - aws-sdk-resources (2.11.39) - aws-sdk-core (= 2.11.39) - aws-sigv4 (1.0.2) + aws-sdk-resources (2.11.212) + aws-sdk-core (= 2.11.212) + aws-sigv4 (1.0.3) azure (0.7.10) addressable (~> 2.3) azure-core (~> 0.1) @@ -27,61 +27,63 @@ GEM nokogiri (~> 1.6) systemu (~> 2.6) thor (~> 0.19) - azure-core (0.1.14) + azure-core (0.1.15) faraday (~> 0.9) faraday_middleware (~> 0.10) nokogiri (~> 1.6) builder (3.2.3) - concurrent-ruby (1.1.3) + concurrent-ruby (1.1.4) configparser (0.1.7) - curb (0.9.4) - daemons (1.2.6) - eventmachine (1.2.5) - faraday (0.15.0) + curb (0.9.8) + daemons (1.3.1) + eventmachine (1.2.7) + faraday (0.15.4) multipart-post (>= 1.2, < 3) - faraday_middleware (0.12.2) + faraday_middleware (0.13.0) faraday (>= 0.7.4, < 1.0) - hashie (3.5.7) + hashie (3.6.0) highline (1.7.10) i18n (0.9.5) concurrent-ruby (~> 1.0) inflection (1.0.0) jmespath (1.4.0) memcache-client (1.8.5) - mime-types (3.1) + mime-types (3.2.2) mime-types-data (~> 3.2015) - mime-types-data (3.2016.0521) - mini_portile2 (2.3.0) + mime-types-data (3.2018.0812) + mini_portile2 (2.4.0) minitest (5.11.3) multipart-post (2.0.0) - mysql2 (0.5.1) + mustermann (1.0.3) + mysql2 (0.5.2) net-ldap (0.16.1) - nokogiri (1.8.2) - mini_portile2 (~> 2.3.0) - ox (2.9.2) + nokogiri (1.10.1) + mini_portile2 (~> 2.4.0) + ox (2.10.0) parse-cron (0.1.4) polyglot (0.3.5) - public_suffix (3.0.2) - rack (1.6.10) - rack-protection (1.5.5) + public_suffix (3.0.3) + rack (2.0.6) + rack-protection (2.0.5) rack - sequel (5.7.1) - sinatra (1.4.8) - rack (~> 1.5) - rack-protection (~> 1.4) - tilt (>= 1.3, < 3) + sequel (5.17.0) + sinatra (2.0.5) + mustermann (~> 1.0) + rack (~> 2.0) + rack-protection (= 2.0.5) + tilt (~> 2.0) sqlite3 (1.3.13) systemu (2.6.5) thin (1.7.2) daemons (~> 1.0, >= 1.0.9) eventmachine (~> 1.0, >= 1.0.4) rack (>= 1, < 3) - thor (0.20.0) + thor (0.20.3) thread_safe (0.3.6) - tilt (2.0.8) + tilt (2.0.9) treetop (1.6.10) polyglot (~> 0.3) - trollop (2.1.2) + trollop (2.9.9) tzinfo (1.2.5) thread_safe (~> 0.1) uuidtools (2.1.5) @@ -114,7 +116,8 @@ DEPENDENCIES nokogiri ox parse-cron - rack (< 2.0.0) + public_suffix + rack sequel sinatra sqlite3 @@ -125,4 +128,4 @@ DEPENDENCIES zendesk_api BUNDLED WITH - 1.17.1 + 1.11.2 diff --git a/share/install_gems/Gemfile b/share/install_gems/Gemfile index 4890692cf0..e03ab46bb1 100644 --- a/share/install_gems/Gemfile +++ b/share/install_gems/Gemfile @@ -1,17 +1,34 @@ source 'https://rubygems.org' -if RUBY_VERSION < '1.9.0' - gem 'nokogiri', '< 1.6.0' - gem 'net-ldap', '< 0.9' - gem 'zendesk_api', '< 1.5' +if RUBY_VERSION < '2.1.0' + gem 'nokogiri', '< 1.7' +elsif RUBY_VERSION < '2.3.0' + gem 'nokogiri', '< 1.10' else gem 'nokogiri' - gem 'net-ldap' - gem 'zendesk_api' end -if RUBY_VERSION >= '2.0.0' and RUBY_VERSION < '2.1.0' +if RUBY_VERSION < '2.0.0' + gem 'net-ldap', '< 0.13' # auth + gem 'mysql2', '< 0.5.0' # onedb + gem 'mime-types', '< 3.0' # hybrid (azure) +else + gem 'net-ldap' # auth + gem 'mysql2' # onedb +end + +if RUBY_VERSION < '2.0.0' + gem 'public_suffix', '< 1.5.0' +elsif RUBY_VERSION < '2.1.0' gem 'public_suffix', '< 3.0.0' +else + gem 'public_suffix' +end + +if RUBY_VERSION < '2.2.0' + gem 'rack', '< 2.0.0' # sunstone, cloud, oneflow +else + gem 'rack' # sunstone, cloud, oneflow end if RUBY_VERSION >= '2.4.0' @@ -27,8 +44,8 @@ gem 'treetop', '>= 1.6.3' # oneflow gem 'sequel' # quota, oneb gem 'sinatra' # sunstone, cloud, oneflow gem 'thin' # sunstone, cloud -gem 'rack', '< 2.0.0' # sunstone, cloud, oneflow gem 'memcache-client' # sunstone +gem 'zendesk_api' # sunstone gem 'amazon-ec2' # cloud gem 'uuidtools' # cloud gem 'curb' # cloud @@ -39,7 +56,6 @@ gem 'trollop' # vmware gem 'parse-cron' # oneflow gem 'aws-sdk', '~> 2.5' # ec2_hybrid gem 'ox' # oca -gem 'mysql2' # onedb gem 'highline', '~> 1.7' # oneprovision gem 'faraday', '~> 0.15' # packethost gem 'faraday_middleware', '~> 0.12' # packethost diff --git a/share/install_gems/Ubuntu1404/Gemfile.lock b/share/install_gems/Ubuntu1404/Gemfile.lock index ded78f8dbf..f6098aedee 100644 --- a/share/install_gems/Ubuntu1404/Gemfile.lock +++ b/share/install_gems/Ubuntu1404/Gemfile.lock @@ -1,7 +1,7 @@ GEM remote: https://rubygems.org/ specs: - activesupport (4.2.10) + activesupport (4.2.11) i18n (~> 0.7) minitest (~> 5.1) thread_safe (~> 0.3, >= 0.3.4) @@ -9,14 +9,14 @@ GEM addressable (2.4.0) amazon-ec2 (0.9.17) xml-simple (>= 1.0.12) - aws-sdk (2.11.39) - aws-sdk-resources (= 2.11.39) - aws-sdk-core (2.11.39) + aws-sdk (2.11.212) + aws-sdk-resources (= 2.11.212) + aws-sdk-core (2.11.212) aws-sigv4 (~> 1.0) jmespath (~> 1.0) - aws-sdk-resources (2.11.39) - aws-sdk-core (= 2.11.39) - aws-sigv4 (1.0.2) + aws-sdk-resources (2.11.212) + aws-sdk-core (= 2.11.212) + aws-sigv4 (1.0.3) azure (0.7.10) addressable (~> 2.3) azure-core (~> 0.1) @@ -26,21 +26,21 @@ GEM nokogiri (~> 1.6) systemu (~> 2.6) thor (~> 0.19) - azure-core (0.1.14) + azure-core (0.1.15) faraday (~> 0.9) faraday_middleware (~> 0.10) nokogiri (~> 1.6) builder (3.2.3) - concurrent-ruby (1.1.3) + concurrent-ruby (1.1.4) configparser (0.1.7) - curb (0.9.4) - daemons (1.2.6) - eventmachine (1.2.5) - faraday (0.15.0) + curb (0.9.8) + daemons (1.3.1) + eventmachine (1.2.7) + faraday (0.15.4) multipart-post (>= 1.2, < 3) - faraday_middleware (0.12.2) + faraday_middleware (0.13.0) faraday (>= 0.7.4, < 1.0) - hashie (3.5.7) + hashie (3.6.0) highline (1.7.10) i18n (0.9.5) concurrent-ruby (~> 1.0) @@ -55,14 +55,15 @@ GEM net-ldap (0.12.1) nokogiri (1.6.8.1) mini_portile2 (~> 2.1.0) - ox (2.9.2) + ox (2.10.0) parse-cron (0.1.4) polyglot (0.3.5) - rack (1.6.10) + public_suffix (1.4.6) + rack (1.6.11) rack-protection (1.5.5) rack scrub_rb (1.0.1) - sequel (5.7.1) + sequel (5.17.0) sinatra (1.4.8) rack (~> 1.5) rack-protection (~> 1.4) @@ -73,12 +74,12 @@ GEM daemons (~> 1.0, >= 1.0.9) eventmachine (~> 1.0, >= 1.0.4) rack (>= 1, < 3) - thor (0.20.0) + thor (0.20.3) thread_safe (0.3.6) - tilt (2.0.8) + tilt (2.0.9) treetop (1.6.10) polyglot (~> 0.3) - trollop (2.1.2) + trollop (2.9.9) tzinfo (1.2.5) thread_safe (~> 0.1) uuidtools (2.1.5) @@ -106,11 +107,13 @@ DEPENDENCIES highline (~> 1.7) i18n (~> 0.9) memcache-client - mysql2 - net-ldap - nokogiri + mime-types (< 3.0) + mysql2 (< 0.5.0) + net-ldap (< 0.13) + nokogiri (< 1.7) ox parse-cron + public_suffix (< 1.5.0) rack (< 2.0.0) scrub_rb sequel @@ -123,4 +126,4 @@ DEPENDENCIES zendesk_api BUNDLED WITH - 1.17.1 + 1.11.2 diff --git a/share/install_gems/Ubuntu1604/Gemfile.lock b/share/install_gems/Ubuntu1604/Gemfile.lock index d7e8260887..4a1f682029 100644 --- a/share/install_gems/Ubuntu1604/Gemfile.lock +++ b/share/install_gems/Ubuntu1604/Gemfile.lock @@ -1,23 +1,23 @@ GEM remote: https://rubygems.org/ specs: - activesupport (4.2.10) + activesupport (4.2.11) i18n (~> 0.7) minitest (~> 5.1) thread_safe (~> 0.3, >= 0.3.4) tzinfo (~> 1.1) - addressable (2.5.2) + addressable (2.6.0) public_suffix (>= 2.0.2, < 4.0) amazon-ec2 (0.9.17) xml-simple (>= 1.0.12) - aws-sdk (2.11.39) - aws-sdk-resources (= 2.11.39) - aws-sdk-core (2.11.39) + aws-sdk (2.11.212) + aws-sdk-resources (= 2.11.212) + aws-sdk-core (2.11.212) aws-sigv4 (~> 1.0) jmespath (~> 1.0) - aws-sdk-resources (2.11.39) - aws-sdk-core (= 2.11.39) - aws-sigv4 (1.0.2) + aws-sdk-resources (2.11.212) + aws-sdk-core (= 2.11.212) + aws-sigv4 (1.0.3) azure (0.7.10) addressable (~> 2.3) azure-core (~> 0.1) @@ -27,61 +27,63 @@ GEM nokogiri (~> 1.6) systemu (~> 2.6) thor (~> 0.19) - azure-core (0.1.14) + azure-core (0.1.15) faraday (~> 0.9) faraday_middleware (~> 0.10) nokogiri (~> 1.6) builder (3.2.3) - concurrent-ruby (1.1.3) + concurrent-ruby (1.1.4) configparser (0.1.7) - curb (0.9.4) - daemons (1.2.6) - eventmachine (1.2.5) - faraday (0.15.0) + curb (0.9.8) + daemons (1.3.1) + eventmachine (1.2.7) + faraday (0.15.4) multipart-post (>= 1.2, < 3) - faraday_middleware (0.12.2) + faraday_middleware (0.13.0) faraday (>= 0.7.4, < 1.0) - hashie (3.5.7) + hashie (3.6.0) highline (1.7.10) i18n (0.9.5) concurrent-ruby (~> 1.0) inflection (1.0.0) jmespath (1.4.0) memcache-client (1.8.5) - mime-types (3.1) + mime-types (3.2.2) mime-types-data (~> 3.2015) - mime-types-data (3.2016.0521) - mini_portile2 (2.3.0) + mime-types-data (3.2018.0812) + mini_portile2 (2.4.0) minitest (5.11.3) multipart-post (2.0.0) - mysql2 (0.5.1) + mustermann (1.0.3) + mysql2 (0.5.2) net-ldap (0.16.1) - nokogiri (1.8.2) - mini_portile2 (~> 2.3.0) - ox (2.9.2) + nokogiri (1.10.1) + mini_portile2 (~> 2.4.0) + ox (2.10.0) parse-cron (0.1.4) polyglot (0.3.5) - public_suffix (3.0.2) - rack (1.6.10) - rack-protection (1.5.5) + public_suffix (3.0.3) + rack (2.0.6) + rack-protection (2.0.5) rack - sequel (5.7.1) - sinatra (1.4.8) - rack (~> 1.5) - rack-protection (~> 1.4) - tilt (>= 1.3, < 3) + sequel (5.17.0) + sinatra (2.0.5) + mustermann (~> 1.0) + rack (~> 2.0) + rack-protection (= 2.0.5) + tilt (~> 2.0) sqlite3 (1.3.13) systemu (2.6.5) thin (1.7.2) daemons (~> 1.0, >= 1.0.9) eventmachine (~> 1.0, >= 1.0.4) rack (>= 1, < 3) - thor (0.20.0) + thor (0.20.3) thread_safe (0.3.6) - tilt (2.0.8) + tilt (2.0.9) treetop (1.6.10) polyglot (~> 0.3) - trollop (2.1.2) + trollop (2.9.9) tzinfo (1.2.5) thread_safe (~> 0.1) uuidtools (2.1.5) @@ -114,7 +116,8 @@ DEPENDENCIES nokogiri ox parse-cron - rack (< 2.0.0) + public_suffix + rack sequel sinatra sqlite3 @@ -125,4 +128,4 @@ DEPENDENCIES zendesk_api BUNDLED WITH - 1.17.1 + 1.11.2 diff --git a/share/install_gems/Ubuntu1804/Gemfile.lock b/share/install_gems/Ubuntu1804/Gemfile.lock index ddecc8bbc8..affde1d116 100644 --- a/share/install_gems/Ubuntu1804/Gemfile.lock +++ b/share/install_gems/Ubuntu1804/Gemfile.lock @@ -1,23 +1,23 @@ GEM remote: https://rubygems.org/ specs: - activesupport (4.2.10) + activesupport (4.2.11) i18n (~> 0.7) minitest (~> 5.1) thread_safe (~> 0.3, >= 0.3.4) tzinfo (~> 1.1) - addressable (2.5.2) + addressable (2.6.0) public_suffix (>= 2.0.2, < 4.0) amazon-ec2 (0.9.17) xml-simple (>= 1.0.12) - aws-sdk (2.11.39) - aws-sdk-resources (= 2.11.39) - aws-sdk-core (2.11.39) + aws-sdk (2.11.212) + aws-sdk-resources (= 2.11.212) + aws-sdk-core (2.11.212) aws-sigv4 (~> 1.0) jmespath (~> 1.0) - aws-sdk-resources (2.11.39) - aws-sdk-core (= 2.11.39) - aws-sigv4 (1.0.2) + aws-sdk-resources (2.11.212) + aws-sdk-core (= 2.11.212) + aws-sigv4 (1.0.3) azure (0.7.10) addressable (~> 2.3) azure-core (~> 0.1) @@ -27,61 +27,63 @@ GEM nokogiri (~> 1.6) systemu (~> 2.6) thor (~> 0.19) - azure-core (0.1.14) + azure-core (0.1.15) faraday (~> 0.9) faraday_middleware (~> 0.10) nokogiri (~> 1.6) builder (3.2.3) - concurrent-ruby (1.1.3) + concurrent-ruby (1.1.4) configparser (0.1.7) - curb (0.9.4) - daemons (1.2.6) - eventmachine (1.2.5) - faraday (0.15.0) + curb (0.9.8) + daemons (1.3.1) + eventmachine (1.2.7) + faraday (0.15.4) multipart-post (>= 1.2, < 3) - faraday_middleware (0.12.2) + faraday_middleware (0.13.0) faraday (>= 0.7.4, < 1.0) - hashie (3.5.7) + hashie (3.6.0) highline (1.7.10) i18n (0.9.5) concurrent-ruby (~> 1.0) inflection (1.0.0) jmespath (1.4.0) memcache-client (1.8.5) - mime-types (3.1) + mime-types (3.2.2) mime-types-data (~> 3.2015) - mime-types-data (3.2016.0521) - mini_portile2 (2.3.0) + mime-types-data (3.2018.0812) + mini_portile2 (2.4.0) minitest (5.11.3) multipart-post (2.0.0) - mysql2 (0.5.1) + mustermann (1.0.3) + mysql2 (0.5.2) net-ldap (0.16.1) - nokogiri (1.8.2) - mini_portile2 (~> 2.3.0) - ox (2.9.2) + nokogiri (1.10.1) + mini_portile2 (~> 2.4.0) + ox (2.10.0) parse-cron (0.1.4) polyglot (0.3.5) - public_suffix (3.0.2) - rack (1.6.10) - rack-protection (1.5.5) + public_suffix (3.0.3) + rack (2.0.6) + rack-protection (2.0.5) rack - sequel (5.7.1) - sinatra (1.4.8) - rack (~> 1.5) - rack-protection (~> 1.4) - tilt (>= 1.3, < 3) + sequel (5.17.0) + sinatra (2.0.5) + mustermann (~> 1.0) + rack (~> 2.0) + rack-protection (= 2.0.5) + tilt (~> 2.0) sqlite3 (1.3.13) systemu (2.6.5) thin (1.7.2) daemons (~> 1.0, >= 1.0.9) eventmachine (~> 1.0, >= 1.0.4) rack (>= 1, < 3) - thor (0.20.0) + thor (0.20.3) thread_safe (0.3.6) - tilt (2.0.8) + tilt (2.0.9) treetop (1.6.10) polyglot (~> 0.3) - trollop (2.1.2) + trollop (2.9.9) tzinfo (1.2.5) thread_safe (~> 0.1) uuidtools (2.1.5) @@ -115,7 +117,8 @@ DEPENDENCIES nokogiri ox parse-cron - rack (< 2.0.0) + public_suffix + rack sequel sinatra sqlite3 @@ -127,4 +130,4 @@ DEPENDENCIES zendesk_api BUNDLED WITH - 1.17.1 + 1.11.2 diff --git a/share/install_gems/Ubuntu1810/Gemfile.lock b/share/install_gems/Ubuntu1810/Gemfile.lock index 19a28e5bba..affde1d116 100644 --- a/share/install_gems/Ubuntu1810/Gemfile.lock +++ b/share/install_gems/Ubuntu1810/Gemfile.lock @@ -1,22 +1,22 @@ GEM remote: https://rubygems.org/ specs: - activesupport (4.2.10) + activesupport (4.2.11) i18n (~> 0.7) minitest (~> 5.1) thread_safe (~> 0.3, >= 0.3.4) tzinfo (~> 1.1) - addressable (2.5.2) + addressable (2.6.0) public_suffix (>= 2.0.2, < 4.0) amazon-ec2 (0.9.17) xml-simple (>= 1.0.12) - aws-sdk (2.11.166) - aws-sdk-resources (= 2.11.166) - aws-sdk-core (2.11.166) + aws-sdk (2.11.212) + aws-sdk-resources (= 2.11.212) + aws-sdk-core (2.11.212) aws-sigv4 (~> 1.0) jmespath (~> 1.0) - aws-sdk-resources (2.11.166) - aws-sdk-core (= 2.11.166) + aws-sdk-resources (2.11.212) + aws-sdk-core (= 2.11.212) aws-sigv4 (1.0.3) azure (0.7.10) addressable (~> 2.3) @@ -27,19 +27,19 @@ GEM nokogiri (~> 1.6) systemu (~> 2.6) thor (~> 0.19) - azure-core (0.1.14) + azure-core (0.1.15) faraday (~> 0.9) faraday_middleware (~> 0.10) nokogiri (~> 1.6) builder (3.2.3) - concurrent-ruby (1.1.3) + concurrent-ruby (1.1.4) configparser (0.1.7) - curb (0.9.6) - daemons (1.2.6) + curb (0.9.8) + daemons (1.3.1) eventmachine (1.2.7) - faraday (0.15.3) + faraday (0.15.4) multipart-post (>= 1.2, < 3) - faraday_middleware (0.12.2) + faraday_middleware (0.13.0) faraday (>= 0.7.4, < 1.0) hashie (3.6.0) highline (1.7.10) @@ -51,34 +51,36 @@ GEM mime-types (3.2.2) mime-types-data (~> 3.2015) mime-types-data (3.2018.0812) - mini_portile2 (2.3.0) + mini_portile2 (2.4.0) minitest (5.11.3) multipart-post (2.0.0) + mustermann (1.0.3) mysql2 (0.5.2) net-ldap (0.16.1) - nokogiri (1.8.5) - mini_portile2 (~> 2.3.0) + nokogiri (1.10.1) + mini_portile2 (~> 2.4.0) ox (2.10.0) parse-cron (0.1.4) polyglot (0.3.5) public_suffix (3.0.3) - rack (1.6.11) - rack-protection (1.5.5) + rack (2.0.6) + rack-protection (2.0.5) rack - sequel (5.14.0) - sinatra (1.4.8) - rack (~> 1.5) - rack-protection (~> 1.4) - tilt (>= 1.3, < 3) + sequel (5.17.0) + sinatra (2.0.5) + mustermann (~> 1.0) + rack (~> 2.0) + rack-protection (= 2.0.5) + tilt (~> 2.0) sqlite3 (1.3.13) systemu (2.6.5) thin (1.7.2) daemons (~> 1.0, >= 1.0.9) eventmachine (~> 1.0, >= 1.0.4) rack (>= 1, < 3) - thor (0.20.0) + thor (0.20.3) thread_safe (0.3.6) - tilt (2.0.8) + tilt (2.0.9) treetop (1.6.10) polyglot (~> 0.3) trollop (2.9.9) @@ -115,7 +117,8 @@ DEPENDENCIES nokogiri ox parse-cron - rack (< 2.0.0) + public_suffix + rack sequel sinatra sqlite3 @@ -127,4 +130,4 @@ DEPENDENCIES zendesk_api BUNDLED WITH - 1.17.1 + 1.11.2 diff --git a/share/linters/.rubocop.yml b/share/linters/.rubocop.yml index 68d291e0bc..a88d22710d 100644 --- a/share/linters/.rubocop.yml +++ b/share/linters/.rubocop.yml @@ -1,11 +1,613 @@ +# -------------------------------------------------------------------------- # +# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + # # Configuration file for rubocop linter # Applies to every ruby file on the OpenNebula repository # AllCops: - Include: - - 'src/cli' + Exclude: + - src/sunstone/public/node_modules/**/* + - src/tm_mad + - share/onegate/onegate + - share/scons/get_xmlrpc_config + - share/rubygems/generate + - share/hooks/raft/follower_cleanup + - share/esx-fw-vnc/Vagrantfile + - share/vendor/ruby/gems/rbvmomi/Rakefile + - share/vendor/ruby/gems/packethost/Rakefile + - share/vendor/ruby/gems/packethost/Gemfile + - share/vendor/ruby/gems/packethost/packethost.gemspec + - share/install_gems/Gemfile + - share/install_gems/install_gems + - src/authm_mad/remotes/dummy/authenticate + - src/authm_mad/remotes/plain/authenticate + - src/authm_mad/remotes/ldap/authenticate + - src/authm_mad/remotes/server_x509/authenticate + - src/authm_mad/remotes/server_cipher/authenticate + - src/authm_mad/remotes/x509/authenticate + - src/authm_mad/remotes/ssh/authenticate + - src/sunstone/bin/novnc-server + - src/sunstone/config.ru + - src/pm_mad/remotes/dummy/cancel + - src/pm_mad/remotes/dummy/shutdown + - src/pm_mad/remotes/dummy/reboot + - src/pm_mad/remotes/dummy/deploy + - src/pm_mad/remotes/dummy/reset + - src/pm_mad/remotes/dummy/poll + - src/pm_mad/remotes/packet/cancel + - src/pm_mad/remotes/packet/shutdown + - src/pm_mad/remotes/packet/reboot + - src/pm_mad/remotes/packet/deploy + - src/pm_mad/remotes/packet/reset + - src/pm_mad/remotes/packet/poll + - src/pm_mad/remotes/ec2/cancel + - src/pm_mad/remotes/ec2/shutdown + - src/pm_mad/remotes/ec2/reboot + - src/pm_mad/remotes/ec2/deploy + - src/pm_mad/remotes/ec2/reset + - src/pm_mad/remotes/ec2/poll + - src/onegate/config.ru + - src/datastore_mad/remotes/vcenter/monitor + - src/datastore_mad/remotes/vcenter/mkfs + - src/datastore_mad/remotes/vcenter/stat + - src/datastore_mad/remotes/vcenter/clone + - src/datastore_mad/remotes/vcenter/cp + - src/datastore_mad/remotes/vcenter/export + - src/datastore_mad/remotes/vcenter/rm + - src/im_mad/remotes/lxd-probes.d/pci.rb + - src/im_mad/remotes/kvm-probes.d/pci.rb + - src/im_mad/remotes/kvm.d/collectd-client.rb + - src/im_mad/remotes/lxd.d/collectd-client.rb + - src/im_mad/remotes/vcenter.d/poll + - src/im_mad/remotes/packet.d/poll + - src/im_mad/remotes/ec2.d/poll + - src/im_mad/remotes/one.d/poll + - src/im_mad/remotes/az.d/poll + - src/vnm_mad/remotes/ovswitch/post + - src/vnm_mad/remotes/ovswitch/clean + - src/vnm_mad/remotes/ovswitch/pre + - src/vnm_mad/remotes/802.1Q/post + - src/vnm_mad/remotes/802.1Q/update_sg + - src/vnm_mad/remotes/802.1Q/clean + - src/vnm_mad/remotes/802.1Q/pre + - src/vnm_mad/remotes/ebtables/post + - src/vnm_mad/remotes/ebtables/update_sg + - src/vnm_mad/remotes/ebtables/clean + - src/vnm_mad/remotes/ebtables/pre + - src/vnm_mad/remotes/bridge/clean + - src/vnm_mad/remotes/bridge/pre + - src/vnm_mad/remotes/vxlan/post + - src/vnm_mad/remotes/vxlan/update_sg + - src/vnm_mad/remotes/vxlan/clean + - src/vnm_mad/remotes/vxlan/pre + - src/vnm_mad/remotes/fw/post + - src/vnm_mad/remotes/fw/update_sg + - src/vnm_mad/remotes/fw/clean + - src/vnm_mad/remotes/fw/pre + - src/vnm_mad/remotes/ovswitch_vxlan/post + - src/vnm_mad/remotes/ovswitch_vxlan/clean + - src/vnm_mad/remotes/ovswitch_vxlan/pre + - src/vmm_mad/remotes/vcenter/attach_nic + - src/vmm_mad/remotes/vcenter/cancel + - src/vmm_mad/remotes/vcenter/snapshot_revert + - src/vmm_mad/remotes/vcenter/detach_nic + - src/vmm_mad/remotes/vcenter/snapshot_delete + - src/vmm_mad/remotes/vcenter/detach_disk + - src/vmm_mad/remotes/vcenter/shutdown + - src/vmm_mad/remotes/vcenter/attach_disk + - src/vmm_mad/remotes/vcenter/reboot + - src/vmm_mad/remotes/vcenter/deploy + - src/vmm_mad/remotes/vcenter/reset + - src/vmm_mad/remotes/vcenter/reconfigure + - src/vmm_mad/remotes/vcenter/save + - src/vmm_mad/remotes/vcenter/restore + - src/vmm_mad/remotes/vcenter/snapshot_create + - src/vmm_mad/remotes/vcenter/poll + - src/vmm_mad/remotes/lxd/attach_nic + - src/vmm_mad/remotes/lxd/detach_nic + - src/vmm_mad/remotes/lxd/detach_disk + - src/vmm_mad/remotes/lxd/shutdown + - src/vmm_mad/remotes/lxd/attach_disk + - src/vmm_mad/remotes/lxd/reboot + - src/vmm_mad/remotes/lxd/deploy + - src/vmm_mad/remotes/lxd/prereconfigure + - src/vmm_mad/remotes/lxd/reconfigure + - src/vmm_mad/remotes/lxd/poll + - src/vmm_mad/remotes/one/cancel + - src/vmm_mad/remotes/one/shutdown + - src/vmm_mad/remotes/one/reboot + - src/vmm_mad/remotes/one/deploy + - src/vmm_mad/remotes/one/reset + - src/vmm_mad/remotes/one/save + - src/vmm_mad/remotes/one/restore + - src/vmm_mad/remotes/one/poll + - src/vmm_mad/remotes/kvm/poll + - src/vmm_mad/remotes/az/cancel + - src/vmm_mad/remotes/az/shutdown + - src/vmm_mad/remotes/az/reboot + - src/vmm_mad/remotes/az/deploy + - src/vmm_mad/remotes/az/save + - src/vmm_mad/remotes/az/restore + - src/vmm_mad/remotes/az/poll + - src/vmm_mad/remotes/packet/cancel + - src/vmm_mad/remotes/packet/shutdown + - src/vmm_mad/remotes/packet/reboot + - src/vmm_mad/remotes/packet/deploy + - src/vmm_mad/remotes/packet/reset + - src/vmm_mad/remotes/packet/poll + - src/vmm_mad/remotes/ec2/cancel + - src/vmm_mad/remotes/ec2/shutdown + - src/vmm_mad/remotes/ec2/reboot + - src/vmm_mad/remotes/ec2/deploy + - src/vmm_mad/remotes/ec2/save + - src/vmm_mad/remotes/ec2/restore + - src/vmm_mad/remotes/ec2/poll + - src/cloud/ec2/bin/econe-detach-volume + - src/cloud/ec2/bin/econe-start-instances + - src/cloud/ec2/bin/econe-associate-address + - src/cloud/ec2/bin/econe-create-volume + - src/cloud/ec2/bin/econe-delete-volume + - src/cloud/ec2/bin/econe-attach-volume + - src/cloud/ec2/bin/econe-stop-instances + - src/cloud/ec2/bin/econe-delete-keypair + - src/cloud/ec2/bin/econe-register + - src/cloud/ec2/bin/econe-release-address + - src/cloud/ec2/bin/econe-describe-images + - src/cloud/ec2/bin/econe-terminate-instances + - src/cloud/ec2/bin/econe-describe-keypairs + - src/cloud/ec2/bin/econe-describe-instances + - src/cloud/ec2/bin/econe-reboot-instances + - src/cloud/ec2/bin/econe-allocate-address + - src/cloud/ec2/bin/econe-upload + - src/cloud/ec2/bin/econe-describe-addresses + - src/cloud/ec2/bin/econe-run-instances + - src/cloud/ec2/bin/econe-disassociate-address + - src/cloud/ec2/bin/econe-create-keypair + - src/cloud/ec2/bin/econe-describe-volumes + - src/onedb/onedb + - src/market_mad/remotes/s3/monitor + - src/market_mad/remotes/s3/delete + - src/market_mad/remotes/s3/import + - src/market_mad/remotes/linuxcontainers/monitor + - src/market_mad/remotes/one/monitor + - src/tm_mad/vcenter/monitor + - src/tm_mad/vcenter/delete + - src/tm_mad/vcenter/mvds + - src/tm_mad/vcenter/mkimage + - src/tm_mad/vcenter/cpds + - src/tm_mad/vcenter/clone + - src/tm_mad/vcenter/mv + - src/tm_mad/vcenter/resize + - src/flow/config.ru + - src/flow/Gemfile + - src/cli/oneprovision + - share/scons/po2json.rb + - share/sudoers/sudo_commands.rb + - share/hooks/vcenter/delete_vcenter_net.rb + - share/hooks/vcenter/create_vcenter_net.rb + - share/hooks/ft/host_error.rb + - share/instance_types/ec2-instance-types.rb + - share/instance_types/az-instance-types.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/Datastore.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/DynamicTypeMgrDataTypeInfo.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/HostSystem.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ManagedObject.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/DynamicTypeMgrManagedTypeInfo.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ServiceInstance.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/PropertyCollector.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/DynamicTypeMgrAllTypeInfo.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ObjectUpdate.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ManagedEntity.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/Folder.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ReflectManagedMethodExecuter.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ResourcePool.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/OvfManager.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/Datacenter.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ObjectContent.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/VirtualMachine.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ComputeResource.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/PerfCounterInfo.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/PerformanceManager.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/Task.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/sms.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/fault.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/trivial_soap.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/sms/SmsStorageManager.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/deserialization.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/connection.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/basic_types.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/utils/perfdump.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/utils/admission_control.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/utils/deploy.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/utils/leases.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/trollop.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/version.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/pbm.rb + - share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/type_loader.rb + - share/vendor/ruby/gems/packethost/spec/spec_helper.rb + - share/vendor/ruby/gems/packethost/spec/support/webmock.rb + - share/vendor/ruby/gems/packethost/spec/support/fake_packet.rb + - share/vendor/ruby/gems/packethost/spec/support/shared/entity.rb + - share/vendor/ruby/gems/packethost/spec/lib/packet_spec.rb + - share/vendor/ruby/gems/packethost/spec/lib/packet/client_spec.rb + - share/vendor/ruby/gems/packethost/spec/lib/packet/project_spec.rb + - share/vendor/ruby/gems/packethost/spec/lib/packet/configuration_spec.rb + - share/vendor/ruby/gems/packethost/spec/lib/packet/device_spec.rb + - share/vendor/ruby/gems/packethost/lib/packet.rb + - share/vendor/ruby/gems/packethost/lib/packet/project.rb + - share/vendor/ruby/gems/packethost/lib/packet/errors.rb + - share/vendor/ruby/gems/packethost/lib/packet/entity/base.rb + - share/vendor/ruby/gems/packethost/lib/packet/entity/serialization.rb + - share/vendor/ruby/gems/packethost/lib/packet/entity/associations.rb + - share/vendor/ruby/gems/packethost/lib/packet/entity/finders.rb + - share/vendor/ruby/gems/packethost/lib/packet/entity/timestamps.rb + - share/vendor/ruby/gems/packethost/lib/packet/entity/persistence.rb + - share/vendor/ruby/gems/packethost/lib/packet/entity.rb + - share/vendor/ruby/gems/packethost/lib/packet/facility.rb + - share/vendor/ruby/gems/packethost/lib/packet/operating_system.rb + - share/vendor/ruby/gems/packethost/lib/packet/device.rb + - share/vendor/ruby/gems/packethost/lib/packet/ip_range.rb + - share/vendor/ruby/gems/packethost/lib/packet/client.rb + - share/vendor/ruby/gems/packethost/lib/packet/version.rb + - share/vendor/ruby/gems/packethost/lib/packet/global_id.rb + - share/vendor/ruby/gems/packethost/lib/packet/configuration.rb + - share/vendor/ruby/gems/packethost/lib/packet/client/devices.rb + - share/vendor/ruby/gems/packethost/lib/packet/client/projects.rb + - share/vendor/ruby/gems/packethost/lib/packet/client/plans.rb + - share/vendor/ruby/gems/packethost/lib/packet/client/facilities.rb + - share/vendor/ruby/gems/packethost/lib/packet/client/ip_ranges.rb + - share/vendor/ruby/gems/packethost/lib/packet/client/ssh_keys.rb + - share/vendor/ruby/gems/packethost/lib/packet/client/users.rb + - share/vendor/ruby/gems/packethost/lib/packet/client/operating_systems.rb + - share/vendor/ruby/gems/packethost/lib/packet/ssh_key.rb + - share/vendor/ruby/gems/packethost/lib/packet/plan.rb + - share/vendor/ruby/gems/packethost/lib/packet/user.rb + - share/vendor/ruby/gems/packethost/lib/packethost.rb + - share/router/vmcontext.rb + - src/authm_mad/one_auth_mad.rb + - src/authm_mad/remotes/ldap/test/ldap_auth_spec.rb + - src/authm_mad/remotes/ldap/ldap_auth.rb + - src/authm_mad/remotes/server_x509/server_x509_auth.rb + - src/authm_mad/remotes/server_cipher/server_cipher_auth.rb + - src/authm_mad/remotes/x509/x509_auth.rb + - src/authm_mad/remotes/ssh/ssh_auth.rb + - src/sunstone/sunstone-server.rb + - src/sunstone/test/spec/spec_helper.rb + - src/sunstone/test/spec/vnet_spec.rb + - src/sunstone/test/spec/image_spec.rb + - src/sunstone/test/spec/vm_spec.rb + - src/sunstone/test/spec/host_spec.rb + - src/sunstone/test/spec/user_spec.rb + - src/sunstone/OpenNebulaAddons.rb + - src/sunstone/OpenNebulaVNC.rb + - src/sunstone/models/OpenNebulaJSON.rb + - src/sunstone/models/SunstoneViews.rb + - src/sunstone/models/SunstoneServer.rb + - src/sunstone/models/OpenNebulaJSON/SecurityGroupJSON.rb + - src/sunstone/models/OpenNebulaJSON/HostJSON.rb + - src/sunstone/models/OpenNebulaJSON/PoolJSON.rb + - src/sunstone/models/OpenNebulaJSON/TemplateJSON.rb + - src/sunstone/models/OpenNebulaJSON/DatastoreJSON.rb + - src/sunstone/models/OpenNebulaJSON/UserJSON.rb + - src/sunstone/models/OpenNebulaJSON/AclJSON.rb + - src/sunstone/models/OpenNebulaJSON/JSONUtils.rb + - src/sunstone/models/OpenNebulaJSON/VirtualMachineJSON.rb + - src/sunstone/models/OpenNebulaJSON/MarketPlaceAppJSON.rb + - src/sunstone/models/OpenNebulaJSON/ImageJSON.rb + - src/sunstone/models/OpenNebulaJSON/ZoneJSON.rb + - src/sunstone/models/OpenNebulaJSON/MarketPlaceJSON.rb + - src/sunstone/models/OpenNebulaJSON/VirtualNetworkJSON.rb + - src/sunstone/models/OpenNebulaJSON/VirtualRouterJSON.rb + - src/sunstone/models/OpenNebulaJSON/VirtualNetworkTemplateJSON.rb + - src/sunstone/models/OpenNebulaJSON/GroupJSON.rb + - src/sunstone/models/OpenNebulaJSON/ClusterJSON.rb + - src/sunstone/models/OpenNebulaJSON/VMGroupJSON.rb + - src/sunstone/models/OpenNebulaJSON/VdcJSON.rb + - src/sunstone/public/config.rb + - src/sunstone/routes/oneflow.rb + - src/sunstone/routes/vcenter.rb + - src/sunstone/routes/support.rb + - src/onegate/onegate-server.rb + - src/datastore_mad/remotes/vcenter_downloader.rb + - src/datastore_mad/remotes/vcenter_uploader.rb + - src/datastore_mad/remotes/xpath.rb + - src/datastore_mad/remotes/url.rb + - src/datastore_mad/one_datastore.rb + - src/im_mad/dummy/one_im_dummy.rb + - src/im_mad/im_exec/one_im_exec.rb + - src/im_mad/remotes/lxd-probes.d/lxd.rb + - src/im_mad/remotes/kvm-probes.d/kvm.rb + - src/im_mad/remotes/kvm-probes.d/machines-models.rb + - src/im_mad/remotes/common.d/collectd-client.rb + - src/im_mad/remotes/node-probes.d/pci.rb + - src/hm_mad/one_hm.rb + - src/vnm_mad/one_vnm.rb + - src/vnm_mad/remotes/ovswitch/OpenvSwitch.rb + - src/vnm_mad/remotes/802.1Q/vlan_tag_driver.rb + - src/vnm_mad/remotes/ebtables/Ebtables.rb + - src/vnm_mad/remotes/vxlan/vxlan.rb + - src/vnm_mad/remotes/vxlan/vxlan_driver.rb + - src/vnm_mad/remotes/ovswitch_vxlan/OpenvSwitchVXLAN.rb + - src/vnm_mad/remotes/lib/no_vlan.rb + - src/vnm_mad/remotes/lib/nic.rb + - src/vnm_mad/remotes/lib/vnmmad.rb + - src/vnm_mad/remotes/lib/security_groups.rb + - src/vnm_mad/remotes/lib/security_groups_iptables.rb + - src/vnm_mad/remotes/lib/vm.rb + - src/vnm_mad/remotes/lib/vnm_driver.rb + - src/vnm_mad/remotes/lib/sg_driver.rb + - src/vnm_mad/remotes/lib/command.rb + - src/vnm_mad/remotes/lib/vlan.rb + - src/vnm_mad/remotes/lib/address.rb + - src/cli/command_parser.rb + - src/cli/cli_helper.rb + - src/cli/one_helper.rb + - src/cli/one_helper/onevmgroup_helper.rb + - src/cli/one_helper/onemarket_helper.rb + - src/cli/one_helper/onesecgroup_helper.rb + - src/cli/one_helper/onezone_helper.rb + - src/cli/one_helper/onetemplate_helper.rb + - src/cli/one_helper/onevm_helper.rb + - src/cli/one_helper/oneacct_helper.rb + - src/cli/one_helper/onequota_helper.rb + - src/cli/one_helper/oneuser_helper.rb + - src/cli/one_helper/oneimage_helper.rb + - src/cli/one_helper/onemarketapp_helper.rb + - src/cli/one_helper/onegroup_helper.rb + - src/cli/one_helper/onevnet_helper.rb + - src/cli/one_helper/oneacl_helper.rb + - src/cli/one_helper/onevcenter_helper.rb + - src/cli/one_helper/onecluster_helper.rb + - src/cli/one_helper/onevntemplate_helper.rb + - src/cli/one_helper/onevrouter_helper.rb + - src/cli/one_helper/oneprovision_helpers/host_helper.rb + - src/cli/one_helper/oneprovision_helpers/provision_helper.rb + - src/cli/one_helper/oneprovision_helpers/cluster_helper.rb + - src/cli/one_helper/oneprovision_helpers/common_helper.rb + - src/cli/one_helper/oneprovision_helpers/datastore_helper.rb + - src/cli/one_helper/oneprovision_helpers/vnet_helper.rb + - src/cli/one_helper/oneprovision_helpers/ansible_helper.rb + - src/cli/one_helper/onevdc_helper.rb + - src/cli/one_helper/onedatastore_helper.rb + - src/oca/ruby/test/VirtualMachine_spec.rb + - src/oca/ruby/test/VirtualMachinePool_spec.rb + - src/oca/ruby/test/XMLUtils_spec.rb + - src/oca/ruby/test/UserPool_spec.rb + - src/oca/ruby/test/Host_spec.rb + - src/oca/ruby/test/User_spec.rb + - src/oca/ruby/test/helpers/MockClient.rb + - src/oca/ruby/test/VirtualNetwork_spec.rb + - src/oca/ruby/test/HostPool_spec.rb + - src/oca/ruby/test/VirtualNetworkPool_spec.rb + - src/oca/ruby/opennebula.rb + - src/oca/ruby/opennebula/image.rb + - src/oca/ruby/opennebula/datastore.rb + - src/oca/ruby/opennebula/group_pool.rb + - src/oca/ruby/opennebula/template_pool.rb + - src/oca/ruby/opennebula/marketplaceapp_pool.rb + - src/oca/ruby/opennebula/acl_pool.rb + - src/oca/ruby/opennebula/virtual_machine_pool.rb + - src/oca/ruby/opennebula/pool.rb + - src/oca/ruby/opennebula/host_pool.rb + - src/oca/ruby/opennebula/security_group.rb + - src/oca/ruby/opennebula/cluster_pool.rb + - src/oca/ruby/opennebula/document.rb + - src/oca/ruby/opennebula/zone.rb + - src/oca/ruby/opennebula/virtual_router_pool.rb + - src/oca/ruby/opennebula/user_pool.rb + - src/oca/ruby/opennebula/xml_utils.rb + - src/oca/ruby/opennebula/virtual_router.rb + - src/oca/ruby/opennebula/document_json.rb + - src/oca/ruby/opennebula/marketplace.rb + - src/oca/ruby/opennebula/virtual_machine.rb + - src/oca/ruby/opennebula/xml_element.rb + - src/oca/ruby/opennebula/template.rb + - src/oca/ruby/opennebula/group.rb + - src/oca/ruby/opennebula/virtual_network.rb + - src/oca/ruby/opennebula/security_group_pool.rb + - src/oca/ruby/opennebula/pool_element.rb + - src/oca/ruby/opennebula/document_pool.rb + - src/oca/ruby/opennebula/vm_group_pool.rb + - src/oca/ruby/opennebula/vntemplate_pool.rb + - src/oca/ruby/opennebula/vdc_pool.rb + - src/oca/ruby/opennebula/datastore_pool.rb + - src/oca/ruby/opennebula/cluster.rb + - src/oca/ruby/opennebula/utils.rb + - src/oca/ruby/opennebula/acl.rb + - src/oca/ruby/opennebula/vntemplate.rb + - src/oca/ruby/opennebula/oneflow_client.rb + - src/oca/ruby/opennebula/host.rb + - src/oca/ruby/opennebula/vm_group.rb + - src/oca/ruby/opennebula/xml_pool.rb + - src/oca/ruby/opennebula/client.rb + - src/oca/ruby/opennebula/document_pool_json.rb + - src/oca/ruby/opennebula/zone_pool.rb + - src/oca/ruby/opennebula/error.rb + - src/oca/ruby/opennebula/image_pool.rb + - src/oca/ruby/opennebula/virtual_network_pool.rb + - src/oca/ruby/opennebula/system.rb + - src/oca/ruby/opennebula/marketplaceapp.rb + - src/oca/ruby/opennebula/marketplace_pool.rb + - src/oca/ruby/opennebula/vdc.rb + - src/oca/ruby/opennebula/user.rb + - src/oca/ruby/deprecated/OpenNebula.rb + - src/vmm_mad/dummy/one_vmm_dummy.rb + - src/vmm_mad/remotes/vcenter/vcenter_driver.rb + - src/vmm_mad/remotes/one/opennebula_driver.rb + - src/vmm_mad/remotes/lib/vcenter_driver/datastore.rb + - src/vmm_mad/remotes/lib/vcenter_driver/vcenter_importer.rb + - src/vmm_mad/remotes/lib/vcenter_driver/vi_helper.rb + - src/vmm_mad/remotes/lib/vcenter_driver/network.rb + - src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb + - src/vmm_mad/remotes/lib/vcenter_driver/vm_template.rb + - src/vmm_mad/remotes/lib/vcenter_driver/file_helper.rb + - src/vmm_mad/remotes/lib/vcenter_driver/vi_client.rb + - src/vmm_mad/remotes/lib/vcenter_driver/host.rb + - src/vmm_mad/remotes/lib/vcenter_driver/datacenter.rb + - src/vmm_mad/remotes/lib/vcenter_driver/memoize.rb + - src/vmm_mad/remotes/lib/lxd/mapper/qcow2.rb + - src/vmm_mad/remotes/lib/lxd/mapper/mapper.rb + - src/vmm_mad/remotes/lib/lxd/mapper/rbd.rb + - src/vmm_mad/remotes/lib/lxd/mapper/raw.rb + - src/vmm_mad/remotes/lib/lxd/container.rb + - src/vmm_mad/remotes/lib/lxd/client.rb + - src/vmm_mad/remotes/lib/lxd/opennebula_vm.rb + - src/vmm_mad/remotes/lib/lxd/command.rb + - src/vmm_mad/remotes/lib/poll_common.rb + - src/vmm_mad/remotes/az/az_driver.rb + - src/vmm_mad/remotes/packet/packet_driver.rb + - src/vmm_mad/remotes/ec2/ec2_driver.rb + - src/vmm_mad/exec/one_vmm_exec.rb + - src/mad/ruby/ssh_stream.rb + - src/mad/ruby/test/MonkeyPatcher.rb + - src/mad/ruby/test/OpenNebulaDriver_spec.rb + - src/mad/ruby/VirtualMachineDriver.rb + - src/mad/ruby/ActionManager.rb + - src/mad/ruby/DriverExecHelper.rb + - src/mad/ruby/CommandManager.rb + - src/mad/ruby/test_mad.rb + - src/mad/ruby/OpenNebulaDriver.rb + - src/mad/ruby/scripts_common.rb + - src/ipamm_mad/one_ipam.rb + - src/cloud/common/CloudClient.rb + - src/cloud/common/CloudServer.rb + - src/cloud/common/CloudAuth.rb + - src/cloud/common/CloudAuth/RemoteCloudAuth.rb + - src/cloud/common/CloudAuth/X509CloudAuth.rb + - src/cloud/common/CloudAuth/EC2CloudAuth.rb + - src/cloud/common/CloudAuth/SunstoneCloudAuth.rb + - src/cloud/common/CloudAuth/OneGateCloudAuth.rb + - src/cloud/common/CloudAuth/OpenNebulaCloudAuth.rb + - src/cloud/ec2/lib/ebs.rb + - src/cloud/ec2/lib/elastic_ip.rb + - src/cloud/ec2/lib/ImageEC2.rb + - src/cloud/ec2/lib/net_ssh_replacement.rb + - src/cloud/ec2/lib/keypair.rb + - src/cloud/ec2/lib/EC2QueryServer.rb + - src/cloud/ec2/lib/econe-server.rb + - src/cloud/ec2/lib/EC2QueryClient.rb + - src/cloud/ec2/lib/tags.rb + - src/cloud/ec2/lib/instance.rb + - src/cloud/ec2/lib/econe_application.rb + - src/onedb/database_schema.rb + - src/onedb/fsck/image.rb + - src/onedb/fsck/datastore.rb + - src/onedb/fsck/quotas.rb + - src/onedb/fsck/history.rb + - src/onedb/fsck/vrouter.rb + - src/onedb/fsck/pool_control.rb + - src/onedb/fsck/marketplace.rb + - src/onedb/fsck/network.rb + - src/onedb/fsck/template.rb + - src/onedb/fsck/group.rb + - src/onedb/fsck/cluster.rb + - src/onedb/fsck/cluster_vnc_bitmap.rb + - src/onedb/fsck/host.rb + - src/onedb/fsck/vm.rb + - src/onedb/fsck/marketplaceapp.rb + - src/onedb/fsck/user.rb + - src/onedb/fsck.rb + - src/onedb/patches/ip4_ip6_static.rb + - src/onedb/patches/history_times.rb + - src/onedb/patches/marketapps_clean.rb + - src/onedb/patches/4.14_monitoring.rb + - src/onedb/patches/next_snapshot.rb + - src/onedb/vcenter_one54_pre.rb + - src/onedb/vcenter_one54.rb + - src/onedb/onedb.rb + - src/onedb/shared/4.2.0_to_4.3.80.rb + - src/onedb/shared/5.3.80_to_5.4.0.rb + - src/onedb/shared/3.0.0_to_3.1.0.rb + - src/onedb/shared/3.2.0_to_3.2.1.rb + - src/onedb/shared/3.6.0_to_3.7.80.rb + - src/onedb/shared/3.5.80_to_3.6.0.rb + - src/onedb/shared/5.6.0_to_5.7.80.rb + - src/onedb/shared/4.4.1_to_4.5.80.rb + - src/onedb/shared/3.1.0_to_3.1.80.rb + - src/onedb/shared/3.8.0_to_3.8.1.rb + - src/onedb/shared/2.0_to_2.9.80.rb + - src/onedb/shared/3.8.5_to_3.9.80.rb + - src/onedb/shared/4.5.80_to_4.6.0.rb + - src/onedb/shared/3.8.2_to_3.8.3.rb + - src/onedb/shared/4.0.1_to_4.1.80.rb + - src/onedb/shared/4.11.80_to_4.90.0.rb + - src/onedb/shared/3.9.80_to_3.9.90.rb + - src/onedb/shared/4.90.0_to_5.2.0.rb + - src/onedb/shared/4.6.0_to_4.11.80.rb + - src/onedb/shared/3.3.0_to_3.3.80.rb + - src/onedb/shared/3.4.1_to_3.5.80.rb + - src/onedb/shared/5.4.1_to_5.5.80.rb + - src/onedb/shared/4.3.85_to_4.3.90.rb + - src/onedb/shared/4.4.0_to_4.4.1.rb + - src/onedb/shared/3.8.1_to_3.8.2.rb + - src/onedb/shared/2.9.90_to_3.0.0.rb + - src/onedb/shared/3.8.3_to_3.8.4.rb + - src/onedb/shared/2.9.80_to_2.9.85.rb + - src/onedb/shared/5.2.0_to_5.3.80.rb + - src/onedb/shared/3.4.0_to_3.4.1.rb + - src/onedb/shared/5.4.0_to_5.4.1.rb + - src/onedb/shared/4.3.80_to_4.3.85.rb + - src/onedb/shared/3.2.1_to_3.3.0.rb + - src/onedb/shared/2.9.85_to_2.9.90.rb + - src/onedb/shared/3.8.4_to_3.8.5.rb + - src/onedb/shared/3.7.80_to_3.8.0.rb + - src/onedb/shared/3.1.80_to_3.2.0.rb + - src/onedb/shared/5.5.80_to_5.6.0.rb + - src/onedb/shared/3.3.80_to_3.4.0.rb + - src/onedb/shared/4.3.90_to_4.4.0.rb + - src/onedb/shared/3.9.90_to_4.0.0.rb + - src/onedb/shared/4.0.0_to_4.0.1.rb + - src/onedb/shared/4.1.80_to_4.2.0.rb + - src/onedb/local/5.3.80_to_5.4.0.rb + - src/onedb/local/4.13.80_to_4.13.85.rb + - src/onedb/local/5.6.0_to_5.7.80.rb + - src/onedb/local/5.4.1_to_5.5.80.rb + - src/onedb/local/4.13.85_to_4.90.0.rb + - src/onedb/local/5.4.0_to_5.4.1.rb + - src/onedb/local/4.11.80_to_4.13.80.rb + - src/onedb/local/4.10.3_to_4.11.80.rb + - src/onedb/local/4.90.0_to_5.3.80.rb + - src/onedb/local/4.5.80_to_4.7.80.rb + - src/onedb/local/4.9.80_to_4.10.3.rb + - src/onedb/local/5.5.80_to_5.6.0.rb + - src/onedb/local/4.7.80_to_4.9.80.rb + - src/onedb/onedb_live.rb + - src/onedb/onedb_backend.rb + - src/onedb/sqlite2mysql.rb + - src/market_mad/remotes/s3/S3.rb + - src/market_mad/one_market.rb + - src/tm_mad/one_tm.rb + - src/flow/lib/models/service_template_pool.rb + - src/flow/lib/models/service_pool.rb + - src/flow/lib/models/service_template.rb + - src/flow/lib/models/role.rb + - src/flow/lib/models/service.rb + - src/flow/lib/strategy.rb + - src/flow/lib/grammar.rb + - src/flow/lib/LifeCycleManager.rb + - src/flow/lib/log.rb + - src/flow/lib/models.rb + - src/flow/lib/validator.rb + - src/flow/lib/strategy/straight.rb + - src/flow/oneflow-server.rb + ######## # LAYOUT @@ -169,6 +771,12 @@ Metrics/BlockLength: Metrics/LineLength: Max: 80 +Metrics/ModuleLength: + Enabled: False + +Metrics/ClassLength: + Enabled: False + # Parameter list config: Metrics/ParameterLists: Max: 5 diff --git a/share/onegate/onegate b/share/onegate/onegate index 938c8706c7..822eaa6bf0 100755 --- a/share/onegate/onegate +++ b/share/onegate/onegate @@ -13,7 +13,7 @@ require 'pp' module CloudClient # OpenNebula version - VERSION = '5.7.85' + VERSION = '5.7.90' # ######################################################################### # Default location for the authentication file diff --git a/share/rubygems/generate b/share/rubygems/generate index cb814c6474..22330c6db0 100755 --- a/share/rubygems/generate +++ b/share/rubygems/generate @@ -19,7 +19,7 @@ require 'fileutils' require 'tmpdir' -VERSION = "5.7.85" +VERSION = "5.7.90" def version v = VERSION diff --git a/share/vendor/ruby/gems/packethost/.rubocop.yml b/share/vendor/ruby/gems/packethost/.rubocop.yml deleted file mode 100644 index 854e769306..0000000000 --- a/share/vendor/ruby/gems/packethost/.rubocop.yml +++ /dev/null @@ -1,18 +0,0 @@ -AllCops: - Include: - - '**/Rakefile' - - lib/**/* - -Documentation: - Enabled: false - -LineLength: - Enabled: false - -Style/PredicateName: - NamePrefix: - - is_ - - have_ - -Style/MethodMissing: - Enabled: false diff --git a/share/websockify/run b/share/websockify/run new file mode 100755 index 0000000000..9ad217c04c --- /dev/null +++ b/share/websockify/run @@ -0,0 +1,5 @@ +#!/usr/bin/env python + +import websockify + +websockify.websocketproxy.websockify_init() diff --git a/share/websockify/websockify b/share/websockify/websockify deleted file mode 120000 index 9558147b09..0000000000 --- a/share/websockify/websockify +++ /dev/null @@ -1 +0,0 @@ -websocketproxy.py \ No newline at end of file diff --git a/share/websockify/websockify/__init__.py b/share/websockify/websockify/__init__.py new file mode 100644 index 0000000000..37a6f47b45 --- /dev/null +++ b/share/websockify/websockify/__init__.py @@ -0,0 +1,2 @@ +from websockify.websocket import * +from websockify.websocketproxy import * diff --git a/share/websockify/websockify/auth_plugins.py b/share/websockify/websockify/auth_plugins.py new file mode 100644 index 0000000000..93f1385504 --- /dev/null +++ b/share/websockify/websockify/auth_plugins.py @@ -0,0 +1,83 @@ +class BasePlugin(object): + def __init__(self, src=None): + self.source = src + + def authenticate(self, headers, target_host, target_port): + pass + + +class AuthenticationError(Exception): + def __init__(self, log_msg=None, response_code=403, response_headers={}, response_msg=None): + self.code = response_code + self.headers = response_headers + self.msg = response_msg + + if log_msg is None: + log_msg = response_msg + + super(AuthenticationError, self).__init__('%s %s' % (self.code, log_msg)) + + +class InvalidOriginError(AuthenticationError): + def __init__(self, expected, actual): + self.expected_origin = expected + self.actual_origin = actual + + super(InvalidOriginError, self).__init__( + response_msg='Invalid Origin', + log_msg="Invalid Origin Header: Expected one of " + "%s, got '%s'" % (expected, actual)) + + +class BasicHTTPAuth(object): + """Verifies Basic Auth headers. Specify src as username:password""" + + def __init__(self, src=None): + self.src = src + + def authenticate(self, headers, target_host, target_port): + import base64 + auth_header = headers.get('Authorization') + if auth_header: + if not auth_header.startswith('Basic '): + raise AuthenticationError(response_code=403) + + try: + user_pass_raw = base64.b64decode(auth_header[6:]) + except TypeError: + raise AuthenticationError(response_code=403) + + try: + # http://stackoverflow.com/questions/7242316/what-encoding-should-i-use-for-http-basic-authentication + user_pass_as_text = user_pass_raw.decode('ISO-8859-1') + except UnicodeDecodeError: + raise AuthenticationError(response_code=403) + + user_pass = user_pass_as_text.split(':', 1) + if len(user_pass) != 2: + raise AuthenticationError(response_code=403) + + if not self.validate_creds(*user_pass): + raise AuthenticationError(response_code=403) + + else: + raise AuthenticationError(response_code=401, + response_headers={'WWW-Authenticate': 'Basic realm="Websockify"'}) + + def validate_creds(self, username, password): + if '%s:%s' % (username, password) == self.src: + return True + else: + return False + +class ExpectOrigin(object): + def __init__(self, src=None): + if src is None: + self.source = [] + else: + self.source = src.split() + + def authenticate(self, headers, target_host, target_port): + origin = headers.get('Origin', None) + if origin is None or origin not in self.source: + raise InvalidOriginError(expected=self.source, actual=origin) diff --git a/share/websockify/websockify/token_plugins.py b/share/websockify/websockify/token_plugins.py new file mode 100644 index 0000000000..92494eb03f --- /dev/null +++ b/share/websockify/websockify/token_plugins.py @@ -0,0 +1,83 @@ +import os + +class BasePlugin(object): + def __init__(self, src): + self.source = src + + def lookup(self, token): + return None + + +class ReadOnlyTokenFile(BasePlugin): + # source is a token file with lines like + # token: host:port + # or a directory of such files + def __init__(self, *args, **kwargs): + super(ReadOnlyTokenFile, self).__init__(*args, **kwargs) + self._targets = None + + def _load_targets(self): + if os.path.isdir(self.source): + cfg_files = [os.path.join(self.source, f) for + f in os.listdir(self.source)] + else: + cfg_files = [self.source] + + self._targets = {} + for f in cfg_files: + for line in [l.strip() for l in open(f).readlines()]: + if line and not line.startswith('#'): + tok, target = line.split(': ') + self._targets[tok] = target.strip().rsplit(':', 1) + + def lookup(self, token): + if self._targets is None: + self._load_targets() + + if token in self._targets: + return self._targets[token] + else: + return None + + +# the above one is probably more efficient, but this one is +# more backwards compatible (although in most cases +# ReadOnlyTokenFile should suffice) +class TokenFile(ReadOnlyTokenFile): + # source is a token file with lines like + # token: host:port + # or a directory of such files + def lookup(self, token): + self._load_targets() + + return super(TokenFile, self).lookup(token) + + +class BaseTokenAPI(BasePlugin): + # source is a url with a '%s' in it where the token + # should go + + # we import things on demand so that other plugins + # in this file can be used w/o unecessary dependencies + + def process_result(self, resp): + return resp.text.split(':') + + def lookup(self, token): + import requests + + resp = requests.get(self.source % token) + + if resp.ok: + return self.process_result(resp) + else: + return None + + +class JSONTokenApi(BaseTokenAPI): + # source is a url with a '%s' in it where the token + # should go + + def process_result(self, resp): + resp_json = resp.json() + return (resp_json['host'], resp_json['port']) diff --git a/share/websockify/websocket.py b/share/websockify/websockify/websocket.py similarity index 89% rename from share/websockify/websocket.py rename to share/websockify/websockify/websocket.py index d161f648c5..274a0047fc 100644 --- a/share/websockify/websocket.py +++ b/share/websockify/websockify/websocket.py @@ -104,6 +104,8 @@ class WebSocketRequestHandler(SimpleHTTPRequestHandler): self.handler_id = getattr(server, "handler_id", False) self.file_only = getattr(server, "file_only", False) self.traffic = getattr(server, "traffic", False) + self.auto_pong = getattr(server, "auto_pong", False) + self.strict_mode = getattr(server, "strict_mode", True) self.logger = getattr(server, "logger", None) if self.logger is None: @@ -111,6 +113,9 @@ class WebSocketRequestHandler(SimpleHTTPRequestHandler): SimpleHTTPRequestHandler.__init__(self, req, addr, server) + def log_message(self, format, *args): + self.logger.info("%s - - [%s] %s" % (self.address_string(), self.log_date_time_string(), format % args)) + @staticmethod def unmask(buf, hlen, plen): pstart = hlen + 4 @@ -118,20 +123,24 @@ class WebSocketRequestHandler(SimpleHTTPRequestHandler): if numpy: b = c = s2b('') if plen >= 4: - mask = numpy.frombuffer(buf, dtype=numpy.dtype('') + mask = numpy.frombuffer(buf, dtype, offset=hlen, count=1) + data = numpy.frombuffer(buf, dtype, offset=pstart, + count=int(plen / 4)) #b = numpy.bitwise_xor(data, mask).data b = numpy.bitwise_xor(data, mask).tostring() if plen % 4: #self.msg("Partial unmask") - mask = numpy.frombuffer(buf, dtype=numpy.dtype('B'), - offset=hlen, count=(plen % 4)) - data = numpy.frombuffer(buf, dtype=numpy.dtype('B'), - offset=pend - (plen % 4), + dtype=numpy.dtype('B') + if sys.byteorder == 'big': + dtype = dtype.newbyteorder('>') + mask = numpy.frombuffer(buf, dtype, offset=hlen, count=(plen % 4)) + data = numpy.frombuffer(buf, dtype, + offset=pend - (plen % 4), count=(plen % 4)) c = numpy.bitwise_xor(data, mask).tostring() return b + c else: @@ -172,7 +181,7 @@ class WebSocketRequestHandler(SimpleHTTPRequestHandler): return header + buf, len(header), 0 @staticmethod - def decode_hybi(buf, base64=False, logger=None): + def decode_hybi(buf, base64=False, logger=None, strict=True): """ Decode HyBi style WebSocket packets. Returns: {'fin' : 0_or_1, @@ -238,6 +247,10 @@ class WebSocketRequestHandler(SimpleHTTPRequestHandler): f['length']) else: logger.debug("Unmasked frame: %s" % repr(buf)) + + if strict: + raise WebSocketRequestHandler.CClose(1002, "The client sent an unmasked frame.") + f['payload'] = buf[(f['hlen'] + f['masked'] * 4):full_len] if base64 and f['opcode'] in [1, 2]: @@ -346,7 +359,8 @@ class WebSocketRequestHandler(SimpleHTTPRequestHandler): while buf: frame = self.decode_hybi(buf, base64=self.base64, - logger=self.logger) + logger=self.logger, + strict=self.strict_mode) #self.msg("Received buf: %s, frame: %s", repr(buf), frame) if frame['payload'] == None: @@ -360,6 +374,15 @@ class WebSocketRequestHandler(SimpleHTTPRequestHandler): closed = {'code': frame['close_code'], 'reason': frame['close_reason']} break + elif self.auto_pong and frame['opcode'] == 0x9: # ping + self.print_traffic("} ping %s\n" % + repr(frame['payload'])) + self.send_pong(frame['payload']) + return [], False + elif frame['opcode'] == 0xA: # pong + self.print_traffic("} pong %s\n" % + repr(frame['payload'])) + return [], False self.print_traffic("}") @@ -388,10 +411,20 @@ class WebSocketRequestHandler(SimpleHTTPRequestHandler): def send_close(self, code=1000, reason=''): """ Send a WebSocket orderly close frame. """ - msg = pack(">H%ds" % len(reason), code, reason) + msg = pack(">H%ds" % len(reason), code, s2b(reason)) buf, h, t = self.encode_hybi(msg, opcode=0x08, base64=False) self.request.send(buf) + def send_pong(self, data=''): + """ Send a WebSocket pong frame. """ + buf, h, t = self.encode_hybi(s2b(data), opcode=0x0A, base64=False) + self.request.send(buf) + + def send_ping(self, data=''): + """ Send a WebSocket ping frame. """ + buf, h, t = self.encode_hybi(s2b(data), opcode=0x09, base64=False) + self.request.send(buf) + def do_websocket_handshake(self): h = self.headers @@ -444,9 +477,13 @@ class WebSocketRequestHandler(SimpleHTTPRequestHandler): """Upgrade a connection to Websocket, if requested. If this succeeds, new_websocket_client() will be called. Otherwise, False is returned. """ + if (self.headers.get('upgrade') and self.headers.get('upgrade').lower() == 'websocket'): + # ensure connection is authorized, and determine the target + self.validate_connection() + if not self.do_websocket_handshake(): return False @@ -519,6 +556,10 @@ class WebSocketRequestHandler(SimpleHTTPRequestHandler): """ Do something with a WebSockets client connection. """ raise Exception("WebSocketRequestHandler.new_websocket_client() must be overloaded") + def validate_connection(self): + """ Ensure that the connection is a valid connection, and set the target. """ + pass + def do_HEAD(self): if self.only_upgrade: self.send_error(405, "Method Not Allowed") @@ -567,7 +608,7 @@ class WebSocketServer(object): file_only=False, run_once=False, timeout=0, idle_timeout=0, traffic=False, tcp_keepalive=True, tcp_keepcnt=None, tcp_keepidle=None, - tcp_keepintvl=None): + tcp_keepintvl=None, auto_pong=False, strict_mode=True): # settings self.RequestHandlerClass = RequestHandlerClass @@ -581,6 +622,8 @@ class WebSocketServer(object): self.timeout = timeout self.idle_timeout = idle_timeout self.traffic = traffic + self.file_only = file_only + self.strict_mode = strict_mode self.launch_time = time.time() self.ws_connection = False @@ -592,6 +635,7 @@ class WebSocketServer(object): self.tcp_keepidle = tcp_keepidle self.tcp_keepintvl = tcp_keepintvl + self.auto_pong = auto_pong # Make paths settings absolute self.cert = os.path.abspath(cert) self.key = self.web = self.record = '' @@ -618,7 +662,10 @@ class WebSocketServer(object): self.listen_host, self.listen_port) self.msg(" - Flash security policy server") if self.web: - self.msg(" - Web server. Web root: %s", self.web) + if self.file_only: + self.msg(" - Web server (no directory listings). Web root: %s", self.web) + else: + self.msg(" - Web server. Web root: %s", self.web) if ssl: if os.path.exists(self.cert): self.msg(" - SSL/TLS support") @@ -701,6 +748,10 @@ class WebSocketServer(object): @staticmethod def daemonize(keepfd=None, chdir='/'): + + if keepfd is None: + keepfd = [] + os.umask(0) if chdir: os.chdir(chdir) @@ -723,7 +774,7 @@ class WebSocketServer(object): if maxfd == resource.RLIM_INFINITY: maxfd = 256 for fd in reversed(range(maxfd)): try: - if fd != keepfd: + if fd not in keepfd: os.close(fd) except OSError: _, exc, _ = sys.exc_info() @@ -753,7 +804,7 @@ class WebSocketServer(object): """ ready = select.select([sock], [], [], 3)[0] - + if not ready: raise self.EClose("ignoring socket not ready") # Peek, but do not read the data so that we have a opportunity @@ -761,7 +812,7 @@ class WebSocketServer(object): handshake = sock.recv(1024, socket.MSG_PEEK) #self.msg("Handshake [%s]" % handshake) - if handshake == "": + if not handshake: raise self.EClose("ignoring empty handshake") elif handshake.startswith(s2b("")): @@ -844,11 +895,14 @@ class WebSocketServer(object): raise self.Terminate() def multiprocessing_SIGCHLD(self, sig, stack): - self.vmsg('Reaping zombies, active child count is %s', len(multiprocessing.active_children())) + # TODO: figure out a way to actually log this information without + # calling `log` in the signal handlers + multiprocessing.active_children() def fallback_SIGCHLD(self, sig, stack): # Reap zombies when using os.fork() (python 2.4) - self.vmsg("Got SIGCHLD, reaping zombies") + # TODO: figure out a way to actually log this information without + # calling `log` in the signal handlers try: result = os.waitpid(-1, os.WNOHANG) while result[0]: @@ -858,16 +912,18 @@ class WebSocketServer(object): pass def do_SIGINT(self, sig, stack): - self.msg("Got SIGINT, exiting") + # TODO: figure out a way to actually log this information without + # calling `log` in the signal handlers self.terminate() def do_SIGTERM(self, sig, stack): - self.msg("Got SIGTERM, exiting") + # TODO: figure out a way to actually log this information without + # calling `log` in the signal handlers self.terminate() def top_new_client(self, startsock, address): """ Do something with a WebSockets client connection. """ - # handler process + # handler process client = None try: try: @@ -890,6 +946,18 @@ class WebSocketServer(object): # Original socket closed by caller client.close() + def get_log_fd(self): + """ + Get file descriptors for the loggers. + They should not be closed when the process is forked. + """ + descriptors = [] + for handler in self.logger.parent.handlers: + if isinstance(handler, logging.FileHandler): + descriptors.append(handler.stream.fileno()) + + return descriptors + def start_server(self): """ Daemonize if requested. Listen for for connections. Run @@ -905,7 +973,9 @@ class WebSocketServer(object): tcp_keepintvl=self.tcp_keepintvl) if self.daemon: - self.daemonize(keepfd=lsock.fileno(), chdir=self.web) + keepfd = self.get_log_fd() + keepfd.append(lsock.fileno()) + self.daemonize(keepfd=keepfd, chdir=self.web) self.started() # Some things need to happen after daemonizing @@ -1009,8 +1079,17 @@ class WebSocketServer(object): except (self.Terminate, SystemExit, KeyboardInterrupt): self.msg("In exit") + # terminate all child processes + if multiprocessing and not self.run_once: + children = multiprocessing.active_children() + + for child in children: + self.msg("Terminating child %s" % child.pid) + child.terminate() + break except Exception: + exc = sys.exc_info()[1] self.msg("handler exception: %s", str(exc)) self.vmsg("exception", exc_info=True) diff --git a/share/websockify/websocketproxy.py b/share/websockify/websockify/websocketproxy.py old mode 100644 new mode 100755 similarity index 72% rename from share/websockify/websocketproxy.py rename to share/websockify/websockify/websocketproxy.py index 7b3ec111df..81e2248f21 --- a/share/websockify/websocketproxy.py +++ b/share/websockify/websockify/websocketproxy.py @@ -11,13 +11,14 @@ as taken from http://docs.python.org/dev/library/ssl.html#certificates ''' -import signal, socket, optparse, time, os, sys, subprocess, logging +import signal, socket, optparse, time, os, sys, subprocess, logging, errno try: from socketserver import ForkingMixIn except: from SocketServer import ForkingMixIn try: from http.server import HTTPServer except: from BaseHTTPServer import HTTPServer -from select import select -import websocket +import select +from websockify import websocket +from websockify import auth_plugins as auth try: from urllib.parse import parse_qs, urlparse except: @@ -38,14 +39,33 @@ Traffic Legend: <. - Client send partial """ + def send_auth_error(self, ex): + self.send_response(ex.code, ex.msg) + self.send_header('Content-Type', 'text/html') + for name, val in ex.headers.items(): + self.send_header(name, val) + + self.end_headers() + + def validate_connection(self): + if self.server.token_plugin: + (self.server.target_host, self.server.target_port) = self.get_target(self.server.token_plugin, self.path) + + if self.server.auth_plugin: + try: + self.server.auth_plugin.authenticate( + headers=self.headers, target_host=self.server.target_host, + target_port=self.server.target_port) + except auth.AuthenticationError: + ex = sys.exc_info()[1] + self.send_auth_error(ex) + raise + def new_websocket_client(self): """ Called after a new WebSocket connection has been established. """ - # Checks if we receive a token, and look - # for a valid target for it then - if self.server.target_cfg: - (self.server.target_host, self.server.target_port) = self.get_target(self.server.target_cfg, self.path) + # Checking for a token is done in validate_connection() # Connect to the target if self.server.wrap_cmd: @@ -73,15 +93,15 @@ Traffic Legend: if tsock: tsock.shutdown(socket.SHUT_RDWR) tsock.close() - if self.verbose: + if self.verbose: self.log_message("%s:%s: Closed target", self.server.target_host, self.server.target_port) raise - def get_target(self, target_cfg, path): + def get_target(self, target_plugin, path): """ - Parses the path, extracts a token, and looks for a valid - target for that token in the configuration file(s). Sets + Parses the path, extracts a token, and looks up a target + for that token using the token plugin. Sets target_host and target_port if successful """ # The files in targets contain the lines @@ -90,32 +110,17 @@ Traffic Legend: # Extract the token parameter from url args = parse_qs(urlparse(path)[4]) # 4 is the query from url - if not args.has_key('token') or not len(args['token']): - raise self.EClose("Token not present") + if not 'token' in args or not len(args['token']): + raise self.server.EClose("Token not present") token = args['token'][0].rstrip('\n') - # target_cfg can be a single config file or directory of - # config files - if os.path.isdir(target_cfg): - cfg_files = [os.path.join(target_cfg, f) - for f in os.listdir(target_cfg)] + result_pair = target_plugin.lookup(token) + + if result_pair is not None: + return result_pair else: - cfg_files = [target_cfg] - - targets = {} - for f in cfg_files: - for line in [l.strip() for l in file(f).readlines()]: - if line and not line.startswith('#'): - ttoken, target = line.split(': ') - targets[ttoken] = target.strip() - - self.vmsg("Target config: %s" % repr(targets)) - - if targets.has_key(token): - return targets[token].split(':') - else: - raise self.EClose("Token '%s' not found" % token) + raise self.server.EClose("Token '%s' not found" % token) def do_proxy(self, target): """ @@ -126,12 +131,37 @@ Traffic Legend: tqueue = [] rlist = [self.request, target] + if self.server.heartbeat: + now = time.time() + self.heartbeat = now + self.server.heartbeat + else: + self.heartbeat = None + while True: wlist = [] + if self.heartbeat is not None: + now = time.time() + if now > self.heartbeat: + self.heartbeat = now + self.server.heartbeat + self.send_ping() + if tqueue: wlist.append(target) if cqueue or c_pend: wlist.append(self.request) - ins, outs, excepts = select(rlist, wlist, [], 1) + try: + ins, outs, excepts = select.select(rlist, wlist, [], 1) + except (select.error, OSError): + exc = sys.exc_info()[1] + if hasattr(exc, 'errno'): + err = exc.errno + else: + err = exc[0] + + if err != errno.EINTR: + raise + else: + continue + if excepts: raise Exception("Socket exception") if self.request in outs: @@ -147,7 +177,7 @@ Traffic Legend: if closed: # TODO: What about blocking on client socket? - if self.verbose: + if self.verbose: self.log_message("%s:%s: Client closed connection", self.server.target_host, self.server.target_port) raise self.CClose(closed['code'], closed['reason']) @@ -195,7 +225,11 @@ class WebSocketProxy(websocket.WebSocketServer): self.wrap_mode = kwargs.pop('wrap_mode', None) self.unix_target = kwargs.pop('unix_target', None) self.ssl_target = kwargs.pop('ssl_target', None) - self.target_cfg = kwargs.pop('target_cfg', None) + self.heartbeat = kwargs.pop('heartbeat', None) + + self.token_plugin = kwargs.pop('token_plugin', None) + self.auth_plugin = kwargs.pop('auth_plugin', None) + # Last 3 timestamps command was run self.wrap_times = [0, 0, 0] @@ -251,9 +285,9 @@ class WebSocketProxy(websocket.WebSocketServer): else: dst_string = "%s:%s" % (self.target_host, self.target_port) - if self.target_cfg: - msg = " - proxying from %s:%s to targets in %s" % ( - self.listen_host, self.listen_port, self.target_cfg) + if self.token_plugin: + msg = " - proxying from %s:%s to targets generated by %s" % ( + self.listen_host, self.listen_port, type(self.token_plugin).__name__) else: msg = " - proxying from %s:%s to %s" % ( self.listen_host, self.listen_port, dst_string) @@ -352,20 +386,69 @@ def websockify_init(): parser.add_option("--prefer-ipv6", "-6", action="store_true", dest="source_is_ipv6", help="prefer IPv6 when resolving source_addr") + parser.add_option("--libserver", action="store_true", + help="use Python library SocketServer engine") parser.add_option("--target-config", metavar="FILE", dest="target_cfg", help="Configuration file containing valid targets " "in the form 'token: host:port' or, alternatively, a " - "directory containing configuration files of this form") - parser.add_option("--libserver", action="store_true", - help="use Python library SocketServer engine") + "directory containing configuration files of this form " + "(DEPRECATED: use `--token-plugin TokenFile --token-source " + " path/to/token/file` instead)") + parser.add_option("--token-plugin", default=None, metavar="PLUGIN", + help="use the given Python class to process tokens " + "into host:port pairs") + parser.add_option("--token-source", default=None, metavar="ARG", + help="an argument to be passed to the token plugin" + "on instantiation") + parser.add_option("--auth-plugin", default=None, metavar="PLUGIN", + help="use the given Python class to determine if " + "a connection is allowed") + parser.add_option("--auth-source", default=None, metavar="ARG", + help="an argument to be passed to the auth plugin" + "on instantiation") + parser.add_option("--auto-pong", action="store_true", + help="Automatically respond to ping frames with a pong") + parser.add_option("--heartbeat", type=int, default=0, + help="send a ping to the client every HEARTBEAT seconds") + parser.add_option("--log-file", metavar="FILE", + dest="log_file", + help="File where logs will be saved") + + (opts, args) = parser.parse_args() + if opts.log_file: + opts.log_file = os.path.abspath(opts.log_file) + handler = logging.FileHandler(opts.log_file) + handler.setLevel(logging.DEBUG) + handler.setFormatter(logging.Formatter("%(message)s")) + logging.getLogger(WebSocketProxy.log_prefix).addHandler(handler) + + del opts.log_file + if opts.verbose: logging.getLogger(WebSocketProxy.log_prefix).setLevel(logging.DEBUG) + if opts.token_source and not opts.token_plugin: + parser.error("You must use --token-plugin to use --token-source") + + if opts.auth_source and not opts.auth_plugin: + parser.error("You must use --auth-plugin to use --auth-source") + + + # Transform to absolute path as daemon may chdir + if opts.target_cfg: + opts.target_cfg = os.path.abspath(opts.target_cfg) + + if opts.target_cfg: + opts.token_plugin = 'TokenFile' + opts.token_source = opts.target_cfg + + del opts.target_cfg + # Sanity checks - if len(args) < 2 and not (opts.target_cfg or opts.unix_target): + if len(args) < 2 and not (opts.token_plugin or opts.unix_target): parser.error("Too few arguments") if sys.argv.count('--'): opts.wrap_cmd = args[1:] @@ -390,7 +473,7 @@ def websockify_init(): try: opts.listen_port = int(opts.listen_port) except: parser.error("Error parsing listen port") - if opts.wrap_cmd or opts.unix_target or opts.target_cfg: + if opts.wrap_cmd or opts.unix_target or opts.token_plugin: opts.target_host = None opts.target_port = None else: @@ -402,9 +485,32 @@ def websockify_init(): try: opts.target_port = int(opts.target_port) except: parser.error("Error parsing target port") - # Transform to absolute path as daemon may chdir - if opts.target_cfg: - opts.target_cfg = os.path.abspath(opts.target_cfg) + if opts.token_plugin is not None: + if '.' not in opts.token_plugin: + opts.token_plugin = ( + 'websockify.token_plugins.%s' % opts.token_plugin) + + token_plugin_module, token_plugin_cls = opts.token_plugin.rsplit('.', 1) + + __import__(token_plugin_module) + token_plugin_cls = getattr(sys.modules[token_plugin_module], token_plugin_cls) + + opts.token_plugin = token_plugin_cls(opts.token_source) + + del opts.token_source + + if opts.auth_plugin is not None: + if '.' not in opts.auth_plugin: + opts.auth_plugin = 'websockify.auth_plugins.%s' % opts.auth_plugin + + auth_plugin_module, auth_plugin_cls = opts.auth_plugin.rsplit('.', 1) + + __import__(auth_plugin_module) + auth_plugin_cls = getattr(sys.modules[auth_plugin_module], auth_plugin_cls) + + opts.auth_plugin = auth_plugin_cls(opts.auth_source) + + del opts.auth_source # Create and start the WebSockets proxy libserver = opts.libserver @@ -433,9 +539,13 @@ class LibProxyServer(ForkingMixIn, HTTPServer): self.wrap_mode = kwargs.pop('wrap_mode', None) self.unix_target = kwargs.pop('unix_target', None) self.ssl_target = kwargs.pop('ssl_target', None) - self.target_cfg = kwargs.pop('target_cfg', None) + self.token_plugin = kwargs.pop('token_plugin', None) + self.auth_plugin = kwargs.pop('auth_plugin', None) + self.heartbeat = kwargs.pop('heartbeat', None) + + self.token_plugin = None + self.auth_plugin = None self.daemon = False - self.target_cfg = None # Server configuration listen_host = kwargs.pop('listen_host', '') @@ -456,8 +566,8 @@ class LibProxyServer(ForkingMixIn, HTTPServer): if web: os.chdir(web) - - HTTPServer.__init__(self, (listen_host, listen_port), + + HTTPServer.__init__(self, (listen_host, listen_port), RequestHandlerClass) diff --git a/src/authm_mad/remotes/server_cipher/server_cipher_auth.rb b/src/authm_mad/remotes/server_cipher/server_cipher_auth.rb index ff07f453c0..c3dc0c78c1 100644 --- a/src/authm_mad/remotes/server_cipher/server_cipher_auth.rb +++ b/src/authm_mad/remotes/server_cipher/server_cipher_auth.rb @@ -108,12 +108,15 @@ class OpenNebula::ServerCipherAuth end # auth method for auth_mad - def authenticate(srv_user,srv_pass, signed_text) + def authenticate(srv_user, srv_pass, signed_text) begin # truncate token to 32-bytes for Ruby >= 2.4 @key = srv_pass[0..31] - s_user, t_user, expires = decrypt(signed_text).split(':') + token_array = decrypt(signed_text).split(':') + + s_user = token_array[0] + expires = token_array[-1] return "User name missmatch" if s_user != srv_user diff --git a/src/authm_mad/remotes/server_x509/server_x509_auth.rb b/src/authm_mad/remotes/server_x509/server_x509_auth.rb index 66e5048d19..43763b6843 100644 --- a/src/authm_mad/remotes/server_x509/server_x509_auth.rb +++ b/src/authm_mad/remotes/server_x509/server_x509_auth.rb @@ -87,7 +87,10 @@ class OpenNebula::ServerX509Auth < OpenNebula::X509Auth # auth method for auth_mad def authenticate(server_user, server_pass, signed_text) begin - s_user, t_user, expires = decrypt(signed_text).split(':') + token_array = decrypt(signed_text).split(':') + + s_user = token_array[0] + expires = token_array[-1] return "Server password missmatch" if server_pass != password diff --git a/src/cli/cli_helper.rb b/src/cli/cli_helper.rb index 212c2cb989..213442fbdd 100644 --- a/src/cli/cli_helper.rb +++ b/src/cli/cli_helper.rb @@ -303,7 +303,7 @@ module CLIHelper begin if options[:csv] - puts CSV.generate_line(@default_columns) + puts CSV.generate_line(@default_columns) if !options[:noheader] res_data.each {|l| puts CSV.generate_line(l) } else res_data.each{|l| diff --git a/src/cli/one_helper.rb b/src/cli/one_helper.rb index a0d1771cdd..42faae4134 100644 --- a/src/cli/one_helper.rb +++ b/src/cli/one_helper.rb @@ -516,11 +516,13 @@ EOT # List pool functions #----------------------------------------------------------------------- def start_pager - pager = ENV['ONE_PAGER'] || 'less' + pager = ENV['ONE_PAGER'] || 'more' # Start pager, defaults to less p_r, p_w = IO.pipe + Signal.trap('PIPE', 'SIG_IGN') + lpid = fork do $stdin.reopen(p_r) @@ -531,7 +533,7 @@ EOT exec([pager, pager]) end - + # Send listing to pager pipe $stdout.close $stdout = p_w.dup @@ -547,6 +549,9 @@ EOT begin Process.wait(lpid) + rescue Interrupt + Process.kill("TERM", lpid) + Process.wait(lpid) rescue Errno::ECHILD end end @@ -558,12 +563,11 @@ EOT elements = 0 page = "" - pool.each {|e| - elements += 1 + pool.each {|e| + elements += 1 page << e.to_xml(true) << "\n" } else - pname = pool.pool_name ename = pool.element_name @@ -585,8 +589,8 @@ EOT # output #----------------------------------------------------------------------- def list_pool_table(table, pool, options, filter_flag) - if $stdout.isatty and (!options.key?:no_pager) - size = $stdout.winsize[0] - 1 + if $stdout.isatty and (!options.key?:no_pager) + size = $stdout.winsize[0] - 1 # ----------- First page, check if pager is needed ------------- rc = pool.get_page(size, 0) @@ -662,8 +666,8 @@ EOT # List pool in XML format, pagination is used in interactive output #----------------------------------------------------------------------- def list_pool_xml(pool, options, filter_flag) - if $stdout.isatty - size = $stdout.winsize[0] - 1 + if $stdout.isatty + size = $stdout.winsize[0] - 1 # ----------- First page, check if pager is needed ------------- rc = pool.get_page(size, 0) diff --git a/src/cli/one_helper/onehost_helper.rb b/src/cli/one_helper/onehost_helper.rb index dcdf41fcb0..889b31175c 100644 --- a/src/cli/one_helper/onehost_helper.rb +++ b/src/cli/one_helper/onehost_helper.rb @@ -18,8 +18,10 @@ require 'one_helper' require 'one_helper/onevm_helper' require 'rubygems' +# implements onehost command class OneHostHelper < OpenNebulaHelper::OneHelper - TEMPLATE_XPATH = '//HOST/TEMPLATE' + + TEMPLATE_XPATH = '//HOST/TEMPLATE' HYBRID = { :ec2 => { :help => <<-EOT.unindent, @@ -41,7 +43,7 @@ class OneHostHelper < OpenNebulaHelper::OneHelper # See your ec2_driver.conf for more information # #----------------------------------------------------------------------- - EOT + EOT }, :az => { :help => <<-EOT.unindent, @@ -67,110 +69,112 @@ class OneHostHelper < OpenNebulaHelper::OneHelper # AZ_ENDPOINT = # #----------------------------------------------------------------------- - EOT + EOT } } - - VERSION_XPATH = "#{TEMPLATE_XPATH}/VERSION" + VERSION_XPATH = "#{TEMPLATE_XPATH}/VERSION" def self.rname - "HOST" + 'HOST' end def self.conf_file - "onehost.yaml" + 'onehost.yaml' end def self.state_to_str(id) id = id.to_i state_str = Host::HOST_STATES[id] - return Host::SHORT_HOST_STATES[state_str] + Host::SHORT_HOST_STATES[state_str] end def format_pool(options) config_file = self.class.table_conf table = CLIHelper::ShowTable.new(config_file, self) do - column :ID, "ONE identifier for Host", :size=>4 do |d| - d["ID"] + column :ID, 'ONE identifier for Host', :size => 4 do |d| + d['ID'] end - column :NAME, "Name of the Host", :left, :size=>15 do |d| - d["NAME"] + column :NAME, 'Name of the Host', :left, :size => 15 do |d| + d['NAME'] end - column :CLUSTER, "Name of the Cluster", :left, :size=>9 do |d| - OpenNebulaHelper.cluster_str(d["CLUSTER"]) + column :CLUSTER, 'Name of the Cluster', :left, :size => 9 do |d| + OpenNebulaHelper.cluster_str(d['CLUSTER']) end - column :TVM, "Total Virtual Machines allocated to the Host", :size=>3 do |d| - d["HOST_SHARE"]["RUNNING_VMS"] || 0 + column :TVM, 'Total Virtual Machines allocated to the Host', + :size => 3 do |d| + d['HOST_SHARE']['RUNNING_VMS'] || 0 end - column :ZVM, "Number of Virtual Machine zombies", :size=>3 do |d| - d["TEMPLATE"]["TOTAL_ZOMBIES"] || 0 + column :ZVM, 'Number of Virtual Machine zombies', :size => 3 do |d| + d['TEMPLATE']['TOTAL_ZOMBIES'] || 0 end - column :TCPU, "Total CPU percentage", :size=>4 do |d| - d["HOST_SHARE"]["MAX_CPU"] || 0 + column :TCPU, 'Total CPU percentage', :size => 4 do |d| + d['HOST_SHARE']['MAX_CPU'] || 0 end - column :FCPU, "Free CPU percentage", :size=>4 do |d| - d["HOST_SHARE"]["MAX_CPU"].to_i- - d["HOST_SHARE"]["USED_CPU"].to_i rescue "-" + column :FCPU, 'Free CPU percentage', :size => 4 do |d| + d['HOST_SHARE']['MAX_CPU'].to_i - + d['HOST_SHARE']['USED_CPU'].to_i rescue '-' end - column :ACPU, "Available cpu percentage (not reserved by VMs)", - :size=>4 do |d| - max_cpu=d["HOST_SHARE"]["MAX_CPU"].to_i - max_cpu=100 if max_cpu==0 - max_cpu-d["HOST_SHARE"]["CPU_USAGE"].to_i + column :ACPU, 'Available cpu percentage (not reserved by VMs)', + :size => 4 do |d| + max_cpu = d['HOST_SHARE']['MAX_CPU'].to_i + max_cpu = 100 if max_cpu.zero? + max_cpu - d['HOST_SHARE']['CPU_USAGE'].to_i end - column :TMEM, "Total Memory", :size=>7 do |d| + column :TMEM, 'Total Memory', :size => 7 do |d| OpenNebulaHelper.unit_to_str( - d["HOST_SHARE"]["MAX_MEM"].to_i, - options) rescue "-" + d['HOST_SHARE']['MAX_MEM'].to_i, + options + ) rescue '-' end - column :FMEM, "Free Memory", :size=>7 do |d| + column :FMEM, 'Free Memory', :size => 7 do |d| OpenNebulaHelper.unit_to_str( - d["HOST_SHARE"]["FREE_MEM"].to_i, - options) rescue "-" + d['HOST_SHARE']['FREE_MEM'].to_i, + options + ) rescue '-' end - column :AMEM, "Available Memory (not reserved by VMs)", - :size=>7 do |d| - acpu=d["HOST_SHARE"]["MAX_MEM"].to_i- - d["HOST_SHARE"]["MEM_USAGE"].to_i - OpenNebulaHelper.unit_to_str(acpu,options) + column :AMEM, 'Available Memory (not reserved by VMs)', + :size => 7 do |d| + acpu = d['HOST_SHARE']['MAX_MEM'].to_i - + d['HOST_SHARE']['MEM_USAGE'].to_i + OpenNebulaHelper.unit_to_str(acpu, options) end - column :REAL_CPU, "Real CPU", :size=>18 do |d| - max_cpu = d["HOST_SHARE"]["MAX_CPU"].to_i + column :REAL_CPU, 'Real CPU', :size => 18 do |d| + max_cpu = d['HOST_SHARE']['MAX_CPU'].to_i if max_cpu != 0 - used_cpu = d["HOST_SHARE"]["USED_CPU"].to_i - ratio = (used_cpu*100) / max_cpu + used_cpu = d['HOST_SHARE']['USED_CPU'].to_i + ratio = (used_cpu * 100) / max_cpu "#{used_cpu} / #{max_cpu} (#{ratio}%)" else '-' end end - column :ALLOCATED_CPU, "Allocated CPU)", :size=>18 do |d| - max_cpu = d["HOST_SHARE"]["MAX_CPU"].to_i - cpu_usage = d["HOST_SHARE"]["CPU_USAGE"].to_i + column :ALLOCATED_CPU, 'Allocated CPU)', :size => 18 do |d| + max_cpu = d['HOST_SHARE']['MAX_CPU'].to_i + cpu_usage = d['HOST_SHARE']['CPU_USAGE'].to_i - if max_cpu == 0 && cpu_usage == 0 + if max_cpu.zero? && cpu_usage.zero? '-' else - cpu_usage = d["HOST_SHARE"]["CPU_USAGE"].to_i + cpu_usage = d['HOST_SHARE']['CPU_USAGE'].to_i if max_cpu != 0 - ratio = (cpu_usage*100) / max_cpu + ratio = (cpu_usage * 100) / max_cpu "#{cpu_usage} / #{max_cpu} (#{ratio}%)" else "#{cpu_usage} / -" @@ -178,43 +182,46 @@ class OneHostHelper < OpenNebulaHelper::OneHelper end end - column :REAL_MEM, "Real MEM", :size=>18 do |d| - max_mem = d["HOST_SHARE"]["MAX_MEM"].to_i + column :REAL_MEM, 'Real MEM', :size => 18 do |d| + max_mem = d['HOST_SHARE']['MAX_MEM'].to_i if max_mem != 0 - used_mem = d["HOST_SHARE"]["USED_MEM"].to_i - ratio = (used_mem*100) / max_mem - "#{OpenNebulaHelper.unit_to_str(used_mem,options)} / #{OpenNebulaHelper.unit_to_str(max_mem,options)} (#{ratio}%)" + used_mem = d['HOST_SHARE']['USED_MEM'].to_i + ratio = (used_mem * 100) / max_mem + "#{OpenNebulaHelper.unit_to_str(used_mem, options)} / "\ + "#{OpenNebulaHelper.unit_to_str(max_mem, options)} "\ + "(#{ratio}%)" else '-' end end - column :ALLOCATED_MEM, "Allocated MEM", :size=>18 do |d| - max_mem = d["HOST_SHARE"]["MAX_MEM"].to_i - mem_usage = d["HOST_SHARE"]["MEM_USAGE"].to_i + column :ALLOCATED_MEM, 'Allocated MEM', :size => 18 do |d| + max_mem = d['HOST_SHARE']['MAX_MEM'].to_i + mem_usage = d['HOST_SHARE']['MEM_USAGE'].to_i - if max_mem == 0 && mem_usage == 0 + if max_mem.zero? && mem_usage.zero? '-' + elsif max_mem != 0 + ratio = (mem_usage * 100) / max_mem + "#{OpenNebulaHelper.unit_to_str(mem_usage, options)} / "\ + "#{OpenNebulaHelper.unit_to_str(max_mem, options)} "\ + "(#{ratio}%)" else - if max_mem != 0 - ratio = (mem_usage*100) / max_mem - "#{OpenNebulaHelper.unit_to_str(mem_usage,options)} / #{OpenNebulaHelper.unit_to_str(max_mem,options)} (#{ratio}%)" - else - "#{OpenNebulaHelper.unit_to_str(mem_usage,options)} / -" - end + "#{OpenNebulaHelper.unit_to_str(mem_usage, options)} / -" end end - column :PROVIDER, "Host provider", :left, :size=>6 do |d| + column :PROVIDER, "Host provider", :left, :size => 6 do |d| d['TEMPLATE']['PM_MAD'].nil? ? '-' : d['TEMPLATE']['PM_MAD'] end - column :STAT, "Host status", :left, :size=>6 do |d| + column :STAT, "Host status", :left, :size => 6 do |d| OneHostHelper.state_to_str(d["STATE"]) end - default :ID, :NAME, :CLUSTER, :TVM, :ALLOCATED_CPU, :ALLOCATED_MEM, :STAT + default :ID, :NAME, :CLUSTER, :TVM, + :ALLOCATED_CPU, :ALLOCATED_MEM, :STAT end table @@ -222,34 +229,37 @@ class OneHostHelper < OpenNebulaHelper::OneHelper def set_hybrid(type, path) k = type.to_sym - if HYBRID.key?(k) - str = path.nil? ? OpenNebulaHelper.editor_input(HYBRID[k][:help]): File.read(path) - end + + return unless HYBRID.key?(k) + + return OpenNebulaHelper.editor_input(HYBRID[k][:help]) if path.nil? + + File.read(path) end NUM_THREADS = 15 def sync(host_ids, options) - if `id -u`.to_i == 0 || `id -G`.split.collect{|e| e.to_i}.include?(0) + if Process.uid.zero? || Process.gid.zero? STDERR.puts("Cannot run 'onehost sync' as root") - exit -1 + exit(-1) end begin - current_version = File.read(REMOTES_LOCATION+'/VERSION').strip - rescue + current_version = File.read(REMOTES_LOCATION + '/VERSION').strip + rescue StandardError STDERR.puts("Could not read #{REMOTES_LOCATION}/VERSION") exit(-1) end if current_version.empty? - STDERR.puts "Remotes version can not be empty" + STDERR.puts 'Remotes version can not be empty' exit(-1) end begin current_version = Gem::Version.new(current_version) - rescue - STDERR.puts "VERSION file is malformed, use semantic versioning." + rescue StandardError + STDERR.puts 'VERSION file is malformed, use semantic versioning.' end cluster_id = options[:cluster] @@ -263,13 +273,13 @@ class OneHostHelper < OpenNebulaHelper::OneHelper # Verify the existence of REMOTES_LOCATION if !File.directory? REMOTES_LOCATION - error_msg = "'#{REMOTES_LOCATION}' does not exist. " << - "This command must be run in the frontend." - return -1,error_msg + error_msg = "'#{REMOTES_LOCATION}' does not exist. " \ + 'This command must be run in the frontend.' + return -1, error_msg end # Touch the update file - FileUtils.touch(File.join(REMOTES_LOCATION,'.update')) + FileUtils.touch(File.join(REMOTES_LOCATION, '.update')) # Get the Host pool filter_flag ||= OpenNebula::Pool::INFO_ALL @@ -280,18 +290,16 @@ class OneHostHelper < OpenNebulaHelper::OneHelper return -1, rc.message if OpenNebula.is_error?(rc) # Assign hosts to threads - i = 0 - queue = Array.new + queue = [] pool.each do |host| if host_ids - next if !host_ids.include?(host['ID'].to_i) + next unless host_ids.include?(host['ID'].to_i) elsif cluster_id next if host['CLUSTER_ID'].to_i != cluster_id end vm_mad = host['VM_MAD'].downcase - remote_remotes = host['TEMPLATE/REMOTE_REMOTES'] state = host['STATE'] # Skip this host from remote syncing if it's a PUBLIC_CLOUD host @@ -301,20 +309,22 @@ class OneHostHelper < OpenNebulaHelper::OneHelper next if Host::HOST_STATES[state.to_i] == 'OFFLINE' # Skip this host if it is a vCenter cluster - next if vm_mad == "vcenter" + next if vm_mad == 'vcenter' - host_version=host['TEMPLATE/VERSION'] + host_version = host['TEMPLATE/VERSION'] begin host_version = Gem::Version.new(host_version) - rescue + rescue StandardError + nil end if !options[:force] begin next if host_version && host_version >= current_version - rescue - STDERR.puts "Error comparing versions for host #{host['NAME']}." + rescue StandardError + STDERR.puts 'Error comparing versions '\ + " for host #{host['NAME']}." end end @@ -324,45 +334,45 @@ class OneHostHelper < OpenNebulaHelper::OneHelper end # Run the jobs in threads - host_errors = Array.new + host_errors = [] queue_lock = Mutex.new error_lock = Mutex.new total = queue.length - if total==0 - puts "No hosts are going to be updated." + if total.zero? + puts 'No hosts are going to be updated.' exit(0) end - ts = (1..NUM_THREADS).map do |t| + ts = (1..NUM_THREADS).map do |_t| Thread.new do - while true do + loop do host = nil size = 0 queue_lock.synchronize do - host=queue.shift - size=queue.length + host = queue.shift + size = queue.length end - break if !host + break unless host - print_update_info(total-size, total, host['NAME']) + print_update_info(total - size, total, host['NAME']) if options[:rsync] - sync_cmd = "rsync -Laz --delete #{REMOTES_LOCATION}" << - " #{host['NAME']}:#{remote_dir}" + sync_cmd = "rsync -Laz --delete #{REMOTES_LOCATION}" \ + " #{host['NAME']}:#{remote_dir}" else - sync_cmd = "scp -rp #{REMOTES_LOCATION}/. " << - "#{host['NAME']}:#{remote_dir} 2> /dev/null" + sync_cmd = "scp -rp #{REMOTES_LOCATION}/. " \ + "#{host['NAME']}:#{remote_dir} 2> /dev/null" end `#{sync_cmd} 2>/dev/null` - if !$?.success? - error_lock.synchronize { + if !$CHILD_STATUS.success? + error_lock.synchronize do host_errors << host['NAME'] - } + end else update_version(host, current_version) end @@ -371,16 +381,108 @@ class OneHostHelper < OpenNebulaHelper::OneHelper end # Wait for threads to finish - ts.each{|t| t.join} + ts.each {|t| t.join } puts if host_errors.empty? - puts "All hosts updated successfully." + puts 'All hosts updated successfully.' 0 else - STDERR.puts "Failed to update the following hosts:" - host_errors.each{|h| STDERR.puts "* #{h}"} + STDERR.puts 'Failed to update the following hosts:' + host_errors.each {|h| STDERR.puts "* #{h}" } + -1 + end + end + + def forceupdate(host_ids, options) + if Process.uid.zero? || Process.gid.zero? + STDERR.puts("Cannot run 'onehost forceupdate' as root") + exit(-1) + end + + cluster_id = options[:cluster] + + # Get the Host pool + filter_flag ||= OpenNebula::Pool::INFO_ALL + + pool = factory_pool(filter_flag) + + rc = pool.info + return -1, rc.message if OpenNebula.is_error?(rc) + + # Assign hosts to threads + queue = [] + + pool.each do |host| + if host_ids + next unless host_ids.include?(host['ID'].to_i) + elsif cluster_id + next if host['CLUSTER_ID'].to_i != cluster_id + end + + vm_mad = host['VM_MAD'].downcase + state = host['STATE'] + + # Skip this host from remote syncing if it's a PUBLIC_CLOUD host + next if host['TEMPLATE/PUBLIC_CLOUD'] == 'YES' + + # Skip this host from remote syncing if it's OFFLINE + next if Host::HOST_STATES[state.to_i] == 'OFFLINE' + + # Skip this host if it is a vCenter cluster + next if vm_mad == 'vcenter' + + queue << host + end + + # Run the jobs in threads + host_errors = [] + queue_lock = Mutex.new + error_lock = Mutex.new + total = queue.length + + if total.zero? + puts 'No hosts are going to be forced.' + exit(0) + end + + ts = (1..NUM_THREADS).map do |_t| + Thread.new do + loop do + host = nil + size = 0 + + queue_lock.synchronize do + host = queue.shift + size = queue.length + end + + break unless host + + cmd = 'cat /tmp/one-collectd-client.pid | xargs kill -HUP' + system("ssh #{host['NAME']} \"#{cmd}\" 2>/dev/null") + + if !$CHILD_STATUS.success? + error_lock.synchronize do + host_errors << host['NAME'] + end + else + puts "#{host['NAME']} monitoring forced" + end + end + end + end + + # Wait for threads to finish + ts.each {|t| t.join } + + if host_errors.empty? + puts 'All hosts updated successfully.' + 0 + else + STDERR.puts 'Failed to update the following hosts:' + host_errors.each {|h| STDERR.puts "* #{h}" } -1 end end @@ -388,22 +490,22 @@ class OneHostHelper < OpenNebulaHelper::OneHelper private def print_update_info(current, total, host) - bar_length=40 + bar_length = 40 - percentage=current.to_f/total.to_f - done=(percentage*bar_length).floor + percentage = current.to_f / total.to_f + done = (percentage * bar_length).floor - bar="[" - bar+="="*done - bar+="-"*(bar_length-done) - bar+="]" + bar = '[' + bar += '=' * done + bar += '-' * (bar_length - done) + bar += ']' - info="#{current}/#{total}" + info = "#{current}/#{total}" - str="#{bar} #{info} " - name=host[0..(79-str.length)] - str=str+name - str=str+" "*(80-str.length) + str = "#{bar} #{info} " + name = host[0..(79 - str.length)] + str += name + str += ' ' * (80 - str.length) print "#{str}\r" STDOUT.flush @@ -416,54 +518,65 @@ class OneHostHelper < OpenNebulaHelper::OneHelper host.add_element(TEMPLATE_XPATH, 'VERSION' => version) - template=host.template_str + template = host.template_str host.update(template) end - def factory(id=nil) + def factory(id = nil) if id OpenNebula::Host.new_with_id(id, @client) else - xml=OpenNebula::Host.build_xml + xml = OpenNebula::Host.build_xml OpenNebula::Host.new(xml, @client) end end - def factory_pool(user_flag=-2) - #TBD OpenNebula::HostPool.new(@client, user_flag) + def factory_pool(_user_flag = -2) + # TBD OpenNebula::HostPool.new(@client, user_flag) OpenNebula::HostPool.new(@client) end - def format_resource(host, options = {}) - str = "%-22s: %-20s" - str_h1 = "%-80s" + def format_resource(host, _options = {}) + str = '%-22s: %-20s' + str_h1 = '%-80s' CLIHelper.print_header( - str_h1 % "HOST #{host.id.to_s} INFORMATION", true) + str_h1 % "HOST #{host.id} INFORMATION", true + ) - puts str % ["ID", host.id.to_s] - puts str % ["NAME", host.name] - puts str % ["CLUSTER", OpenNebulaHelper.cluster_str(host['CLUSTER'])] - puts str % ["STATE", host.state_str] - puts str % ["IM_MAD", host['IM_MAD']] - puts str % ["VM_MAD", host['VM_MAD']] - puts str % ["LAST MONITORING TIME", OpenNebulaHelper.time_to_str(host['LAST_MON_TIME'])] + puts format(str, 'ID', host.id.to_s) + puts format(str, 'NAME', host.name) + puts format(str, 'CLUSTER', + OpenNebulaHelper.cluster_str(host['CLUSTER'])) + puts format(str, 'STATE', host.state_str) + puts format(str, 'IM_MAD', host['IM_MAD']) + puts format(str, 'VM_MAD', host['VM_MAD']) + puts format(str, 'LAST MONITORING TIME', + OpenNebulaHelper.time_to_str(host['LAST_MON_TIME'])) puts - CLIHelper.print_header(str_h1 % "HOST SHARES", false) - puts str % ["RUNNING VMS", host['HOST_SHARE/RUNNING_VMS']] + CLIHelper.print_header(str_h1 % 'HOST SHARES', false) + puts format(str, 'RUNNING VMS', host['HOST_SHARE/RUNNING_VMS']) - CLIHelper.print_header(str_h1 % "MEMORY", false) - puts str % [" TOTAL", OpenNebulaHelper.unit_to_str(host['HOST_SHARE/TOTAL_MEM'].to_i, {})] - puts str % [" TOTAL +/- RESERVED", OpenNebulaHelper.unit_to_str(host['HOST_SHARE/MAX_MEM'].to_i, {})] - puts str % [" USED (REAL)", OpenNebulaHelper.unit_to_str(host['HOST_SHARE/USED_MEM'].to_i, {})] - puts str % [" USED (ALLOCATED)", OpenNebulaHelper.unit_to_str(host['HOST_SHARE/MEM_USAGE'].to_i, {})] + CLIHelper.print_header(str_h1 % 'MEMORY', false) + puts format(str, ' TOTAL', + OpenNebulaHelper.unit_to_str(host['HOST_SHARE/TOTAL_MEM'] + .to_i, {})) + puts format(str, ' TOTAL +/- RESERVED', + OpenNebulaHelper.unit_to_str(host['HOST_SHARE/MAX_MEM'] + .to_i, {})) + puts format(str, ' USED (REAL)', + OpenNebulaHelper.unit_to_str(host['HOST_SHARE/USED_MEM'] + .to_i, {})) + puts format(str, ' USED (ALLOCATED)', + OpenNebulaHelper.unit_to_str(host['HOST_SHARE/MEM_USAGE'] + .to_i, {})) - CLIHelper.print_header(str_h1 % "CPU", false) - puts str % [" TOTAL", host['HOST_SHARE/TOTAL_CPU']] - puts str % [" TOTAL +/- RESERVED", host['HOST_SHARE/MAX_CPU']] - puts str % [" USED (REAL)", host['HOST_SHARE/USED_CPU']] - puts str % [" USED (ALLOCATED)", host['HOST_SHARE/CPU_USAGE']] + CLIHelper.print_header(str_h1 % 'CPU', false) + puts format(str, ' TOTAL', host['HOST_SHARE/TOTAL_CPU']) + puts format(str, ' TOTAL +/- RESERVED', host['HOST_SHARE/MAX_CPU']) + puts format(str, ' USED (REAL)', host['HOST_SHARE/USED_CPU']) + puts format(str, ' USED (ALLOCATED)', host['HOST_SHARE/CPU_USAGE']) puts datastores = host.to_hash['HOST']['HOST_SHARE']['DATASTORES']['DS'] @@ -475,26 +588,34 @@ class OneHostHelper < OpenNebulaHelper::OneHelper end datastores.each do |datastore| - CLIHelper.print_header(str_h1 % "LOCAL SYSTEM DATASTORE ##{datastore['ID']} CAPACITY", false) - puts str % ["TOTAL:", OpenNebulaHelper.unit_to_str(datastore['TOTAL_MB'].to_i, {},'M')] - puts str % ["USED: ", OpenNebulaHelper.unit_to_str(datastore['USED_MB'].to_i, {},'M')] - puts str % ["FREE:", OpenNebulaHelper.unit_to_str(datastore['FREE_MB'].to_i, {},'M')] + CLIHelper.print_header(str_h1 % + 'LOCAL SYSTEM DATASTORE '\ + "##{datastore['ID']} CAPACITY", false) + puts format(str, 'TOTAL:', + OpenNebulaHelper.unit_to_str(datastore['TOTAL_MB'] + .to_i, {}, 'M')) + puts format(str, 'USED: ', + OpenNebulaHelper.unit_to_str(datastore['USED_MB'] + .to_i, {}, 'M')) + puts format(str, 'FREE:', + OpenNebulaHelper.unit_to_str(datastore['FREE_MB'] + .to_i, {}, 'M')) puts end - CLIHelper.print_header(str_h1 % "MONITORING INFORMATION", false) + CLIHelper.print_header(str_h1 % 'MONITORING INFORMATION', false) wilds = host.wilds begin pcis = [host.to_hash['HOST']['HOST_SHARE']['PCI_DEVICES']['PCI']] pcis = pcis.flatten.compact - rescue + rescue StandardError pcis = nil end - host.delete_element("TEMPLATE/VM") - host.delete_element("TEMPLATE_WILDS") + host.delete_element('TEMPLATE/VM') + host.delete_element('TEMPLATE_WILDS') puts host.template_str @@ -503,81 +624,85 @@ class OneHostHelper < OpenNebulaHelper::OneHelper end puts - CLIHelper.print_header("WILD VIRTUAL MACHINES", false) + CLIHelper.print_header('WILD VIRTUAL MACHINES', false) puts - format = "%-30.30s %36s %4s %10s" - CLIHelper.print_header(format % ["NAME", "IMPORT_ID", "CPU", "MEMORY"], + format = '%-30.30s %36s %4s %10s' + CLIHelper.print_header(format(format, 'NAME', + 'IMPORT_ID', 'CPU', 'MEMORY'), true) wilds.each do |wild| - if wild['IMPORT_TEMPLATE'] - wild_tmplt = Base64::decode64(wild['IMPORT_TEMPLATE']).split("\n") - name = wild['VM_NAME'] - import = wild_tmplt.select { |line| - line[/^IMPORT_VM_ID/] - }[0].split("=")[1].gsub("\"", " ").strip - memory = wild_tmplt.select { |line| - line[/^MEMORY/] - }[0].split("=")[1].gsub("\"", " ").strip - cpu = wild_tmplt.select { |line| - line[/^CPU/] - }[0].split("=")[1].gsub("\"", " ").strip - else - name = wild['DEPLOY_ID'] - import = memory = cpu = "-" - end + if wild['IMPORT_TEMPLATE'] + wild_tmplt = Base64.decode64(wild['IMPORT_TEMPLATE']) + .split("\n") + name = wild['VM_NAME'] + import = wild_tmplt.select do |line| + line[/^IMPORT_VM_ID/] + end[0].split('=')[1].tr('"', ' ').strip + memory = wild_tmplt.select do |line| + line[/^MEMORY/] + end[0].split('=')[1].tr('"', ' ').strip + cpu = wild_tmplt.select do |line| + line[/^CPU/] + end[0].split('=')[1].tr('"', ' ').strip + else + name = wild['DEPLOY_ID'] + import = memory = cpu = '-' + end - puts format % [name, import, cpu, memory] + puts format(format, name, import, cpu, memory) end puts - CLIHelper.print_header("VIRTUAL MACHINES", false) + CLIHelper.print_header('VIRTUAL MACHINES', false) puts - onevm_helper=OneVMHelper.new - onevm_helper.client=@client - onevm_helper.list_pool({:filter=>["HOST=#{host.name}"], :no_pager => true}, false) + onevm_helper = OneVMHelper.new + onevm_helper.client = @client + onevm_helper.list_pool({ :filter => ["HOST=#{host.name}"], + :no_pager => true }, + false) end def print_pcis(pcis) puts - CLIHelper.print_header("PCI DEVICES", false) + CLIHelper.print_header('PCI DEVICES', false) puts - table=CLIHelper::ShowTable.new(nil, self) do - column :VM, "Used by VM", :size => 5, :left => false do |d| - if d["VMID"] == "-1" - "" + table = CLIHelper::ShowTable.new(nil, self) do + column :VM, 'Used by VM', :size => 5, :left => false do |d| + if d['VMID'] == '-1' + '' else - d["VMID"] + d['VMID'] end end - column :ADDR, "PCI Address", :size => 7, :left => true do |d| - d["SHORT_ADDRESS"] + column :ADDR, 'PCI Address', :size => 7, :left => true do |d| + d['SHORT_ADDRESS'] end - column :TYPE, "Type", :size => 14, :left => true do |d| - d["TYPE"] + column :TYPE, 'Type', :size => 14, :left => true do |d| + d['TYPE'] end - column :CLASS, "Class", :size => 12, :left => true do |d| - d["CLASS_NAME"] + column :CLASS, 'Class', :size => 12, :left => true do |d| + d['CLASS_NAME'] end - column :NAME, "Name", :size => 50, :left => true do |d| - d["DEVICE_NAME"] + column :NAME, 'Name', :size => 50, :left => true do |d| + d['DEVICE_NAME'] end - column :VENDOR, "Vendor", :size => 8, :left => true do |d| - d["VENDOR_NAME"] + column :VENDOR, 'Vendor', :size => 8, :left => true do |d| + d['VENDOR_NAME'] end default :VM, :ADDR, :TYPE, :NAME - end table.show(pcis) end + end diff --git a/src/cli/one_helper/onevm_helper.rb b/src/cli/one_helper/onevm_helper.rb index 625fc25cc9..e66fcf7c37 100644 --- a/src/cli/one_helper/onevm_helper.rb +++ b/src/cli/one_helper/onevm_helper.rb @@ -198,6 +198,13 @@ class OneVMHelper < OpenNebulaHelper::OneHelper :format => String } + SEARCH = { + :name => "search", + :large => "--search search", + :description=> "query in KEY=VALUE format", + :format => String + } + def self.rname "VM" end diff --git a/src/cli/onehost b/src/cli/onehost index 5e6f3a09e0..3ab77000d1 100755 --- a/src/cli/onehost +++ b/src/cli/onehost @@ -285,4 +285,17 @@ CommandParser::CmdParser.new(ARGV) do o.import_wild(args[1]) end end + + forceupdate_desc = <<-EOT.unindent + Forces host monitoring update + Examples: + onehost forceupdate host01 + onehost forceupdate host01,host02,host03 + onehost forceupdate -c myCluster + EOT + + command :forceupdate, forceupdate_desc, [:range, :hostid_list, nil], + :options => [OneClusterHelper::CLUSTER] do + helper.forceupdate(args[0], options) + end end diff --git a/src/cli/onevm b/src/cli/onevm index 7b62b22164..b6603ec1dd 100755 --- a/src/cli/onevm +++ b/src/cli/onevm @@ -557,8 +557,10 @@ CommandParser::CmdParser.new(ARGV) do EOT command :migrate, migrate_desc, [:range, :vmid_list], :hostid, - [:datastoreid, nil], :options => [ENFORCE, OneVMHelper::LIVE, - OneVMHelper::POFF, OneVMHelper::POFFHARD] do + [:datastoreid, nil], :options => [ENFORCE, + OneVMHelper::LIVE, + OneVMHelper::POFF, + OneVMHelper::POFFHARD] do host_id = args[1] verbose = "migrating to #{host_id}" @@ -795,7 +797,8 @@ CommandParser::CmdParser.new(ARGV) do if ip if !nic_alias && !nic_name - template = "NIC = [ NETWORK_ID = #{network_id}, IP = #{ip} ]" + template = "NIC = [ NETWORK_ID = #{network_id}, \ + IP = #{ip} ]" elsif !nic_alias && nic_name template = "NIC = [ NETWORK_ID = #{network_id}, IP = #{ip}, @@ -1044,8 +1047,28 @@ CommandParser::CmdParser.new(ARGV) do command :list, list_desc, [:filterflag, nil], :options => CLIHelper::OPTIONS + OpenNebulaHelper::OPTIONS + - [OpenNebulaHelper::DESCRIBE] do - helper.list_pool(options, false, args[0]) + [OpenNebulaHelper::DESCRIBE] + [OneVMHelper::SEARCH] do + if !options[:search] + helper.list_pool(options, false, args[0]) + else + table = helper.format_pool(options) + pool = OpenNebula::VirtualMachinePool.new(OneVMHelper.get_client) + + rc = pool.info_search(:query => options[:search]) + + if !rc.nil? + puts rc.message + exit(-1) + end + + if options[:xml] + puts pool.to_xml + else + table.show(pool.to_hash, options) + end + + 0 + end end show_desc = <<-EOT.unindent @@ -1128,11 +1151,12 @@ CommandParser::CmdParser.new(ARGV) do This command accepts a template file or opens an editor, the full list of configuration attributes are: - OS = ["ARCH", "MACHINE", "KERNEL", "INITRD", "BOOTLOADER", "BOOT"] - FEATURES = ["ACPI", "PAE", "APIC", "LOCALTIME", "HYPERV", "GUEST_AGENT"] - INPUT = ["TYPE", "BUS"] - GRAPHICS = ["TYPE", "LISTEN", "PASSWD", "KEYMAP" ] - RAW = ["DATA", "DATA_VMX", "TYPE"] + OS = ["ARCH", "MACHINE", "KERNEL", "INITRD", "BOOTLOADER", "BOOT"] + FEATURES = ["ACPI", "PAE", "APIC", "LOCALTIME", "HYPERV", "GUEST_AGENT"] + INPUT = ["TYPE", "BUS"] + GRAPHICS = ["TYPE", "LISTEN", "PASSWD", "KEYMAP" ] + RAW = ["DATA", "DATA_VMX", "TYPE"] + CPU_MODEL = ["MODEL"] CONTEXT (any value, **variable substitution will be made**) EOT @@ -1153,7 +1177,8 @@ CommandParser::CmdParser.new(ARGV) do template = vm.template_like_str('TEMPLATE', true, 'OS | FEATURES | INPUT | '\ - 'GRAPHICS | RAW | CONTEXT') + 'GRAPHICS | RAW | CONTEXT | '\ + 'CPU_MODEL') template = OpenNebulaHelper.editor_input(template) end diff --git a/src/cli/onevntemplate b/src/cli/onevntemplate index 2d2a6faf79..647320d78e 100755 --- a/src/cli/onevntemplate +++ b/src/cli/onevntemplate @@ -30,7 +30,6 @@ $LOAD_PATH << RUBY_LIB_LOCATION + '/cli' require 'command_parser' require 'one_helper/onevntemplate_helper' require 'one_helper/onevnet_helper' -require 'pry' CommandParser::CmdParser.new(ARGV) do usage '`onevntemplate` [] []' diff --git a/src/cloud/common/CloudAuth/OpenNebulaCloudAuth.rb b/src/cloud/common/CloudAuth/OpenNebulaCloudAuth.rb index 92433c44eb..0fd5114a0e 100644 --- a/src/cloud/common/CloudAuth/OpenNebulaCloudAuth.rb +++ b/src/cloud/common/CloudAuth/OpenNebulaCloudAuth.rb @@ -31,6 +31,7 @@ module OpenNebulaCloudAuth # def do_auth(env, params={}) auth = Rack::Auth::Basic::Request.new(env) + if auth.provided? && auth.basic? username, password = auth.credentials authenticated = false @@ -63,11 +64,14 @@ module OpenNebulaCloudAuth end username = parser.escape(username) - password = parser.escape(password) - client = OpenNebula::Client.new("#{username}:#{password}", @conf[:one_xmlrpc]) + epassword = parser.escape(password) + + client = OpenNebula::Client.new("#{username}:#{epassword}", @conf[:one_xmlrpc]) user = OpenNebula::User.new_with_id(OpenNebula::User::SELF, client) + rc = user.info end + if OpenNebula.is_error?(rc) if logger logger.error{ "User #{username} could not be authenticated"} @@ -77,7 +81,14 @@ module OpenNebulaCloudAuth return nil end - return user.name + # Check if the user authenticated with a scoped token. In this case + # encode the EGID in the username as "user:egid" + egid = user["//LOGIN_TOKEN [ TOKEN = \"#{password}\" ]/EGID"] + + auth_name = user.name + auth_name = "#{auth_name}:#{egid}" if egid + + return auth_name end return nil diff --git a/src/cloud/common/CloudClient.rb b/src/cloud/common/CloudClient.rb index ce53022955..4e9ef3e385 100644 --- a/src/cloud/common/CloudClient.rb +++ b/src/cloud/common/CloudClient.rb @@ -50,7 +50,7 @@ end module CloudClient # OpenNebula version - VERSION = '5.7.85' + VERSION = '5.7.90' # ######################################################################### # Default location for the authentication file diff --git a/src/common/Attribute.cc b/src/common/Attribute.cc index 6b6790a7be..bcc2d3f38b 100644 --- a/src/common/Attribute.cc +++ b/src/common/Attribute.cc @@ -65,25 +65,9 @@ string * VectorAttribute::marshall(const char * _sep) const /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ -string * VectorAttribute::to_xml() const -{ - ostringstream oss; - - to_xml(oss); - - string * xml = new string; - - *xml = oss.str(); - - return xml; -} - -/* -------------------------------------------------------------------------- */ -/* -------------------------------------------------------------------------- */ - void VectorAttribute::to_xml(ostringstream &oss) const { - map::const_iterator it; + map::const_iterator it; oss << "<" << name() << ">"; @@ -102,6 +86,60 @@ void VectorAttribute::to_xml(ostringstream &oss) const oss << ""<< name() << ">"; } +void VectorAttribute::to_json(std::ostringstream& s) const +{ + if ( attribute_value.empty() ) + { + s << "{}"; + return; + } + + map::const_iterator it = attribute_value.begin(); + bool is_first = true; + + s << "{"; + + for (++it; it!=attribute_value.end(); it++) + { + if ( it->first.empty() ) + { + continue; + } + + if ( !is_first ) + { + s << ","; + } + else + { + is_first = false; + } + + s << "\"" << it->first << "\": "; + one_util::escape_json(it->second, s); + } + + s << "}"; +} + +void VectorAttribute::to_token(std::ostringstream& s) const +{ + map::const_iterator it; + + for (it=attribute_value.begin(); it!=attribute_value.end(); it++) + { + if (it->first.empty() || it->second.empty()) + { + continue; + } + + one_util::escape_token(it->first, s); + s << "="; + one_util::escape_token(it->second, s); + s << std::endl; + } +} + /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ diff --git a/src/common/NebulaUtil.cc b/src/common/NebulaUtil.cc index f572b0595f..39ad93fdf7 100644 --- a/src/common/NebulaUtil.cc +++ b/src/common/NebulaUtil.cc @@ -380,6 +380,57 @@ std::string one_util::gsub(const std::string& st, const std::string& sfind, /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ +void one_util::escape_json(const std::string& str, std::ostringstream& s) +{ + std::string::const_iterator it; + + s << "\""; + + for (it = str.begin(); it != str.end(); ++it) + { + switch (*it) + { + case '\\': s << "\\\\"; break; + case '"' : s << "\\\""; break; + case '/' : s << "\\/"; break; + case '\b': s << "\\b"; break; + case '\f': s << "\\f"; break; + case '\n': s << "\\n"; break; + case '\r': s << "\\r"; break; + case '\t': s << "\\t"; break; + default : s << *it; + } + } + + s << "\""; +} + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + +void one_util::escape_token(const std::string& str, std::ostringstream& s) +{ + std::string::const_iterator it; + + for (it = str.begin(); it != str.end(); ++it) + { + switch (*it) + { + case '-': + case '_': + case '.': + case ':': + s << '_'; + break; + default : + s << *it; + } + } +} + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + namespace one_util { template<> diff --git a/src/datastore_mad/remotes/libfs.sh b/src/datastore_mad/remotes/libfs.sh index 8986bb9fdc..eb10478907 100644 --- a/src/datastore_mad/remotes/libfs.sh +++ b/src/datastore_mad/remotes/libfs.sh @@ -451,22 +451,24 @@ function check_restricted { } #------------------------------------------------------------------------------- -# Filter out hosts which are not ON +# Filter out hosts which are OFF, ERROR or DISABLED # @param $1 - space separated list of hosts -# @return - space separated list of hosts which are in ON state +# @return - space separated list of hosts which are not in OFF, ERROR or +# DISABLED sate #------------------------------------------------------------------------------- -function get_only_on_hosts { - INPUT_ARRAY=($1) +function remove_off_hosts { + ALL_HOSTS_ARRAY=($1) + OFF_HOSTS_STR=$(onehost list --no-pager --csv \ + --filter="STAT=off,STAT=err,STAT=dsbl" --list=NAME,STAT 2>/dev/null) - ONEHOST_LIST_ON_CMD='onehost list --no-pager --csv --filter="STAT=on" --list=NAME,STAT' - ON_HOSTS_STR=$(eval "$ONEHOST_LIST_ON_CMD 2>/dev/null") - - if [[ $? = 0 ]]; then - ON_HOSTS_ARRAY=($( echo "$ON_HOSTS_STR" | $AWK -F, '{ if (NR>1) print $1 }')) - for A in "${INPUT_ARRAY[@]}"; do - for B in "${ON_HOSTS_ARRAY[@]}"; do - [[ $A == $B ]] && { echo $A; break; } + if [ $? -eq 0 ]; then + OFF_HOSTS_ARRAY=($( echo "$OFF_HOSTS_STR" | awk -F, '{ if (NR>1) print $1 }')) + for HOST in "${ALL_HOSTS_ARRAY[@]}"; do + OFF=false + for OFF_HOST in "${OFF_HOSTS_ARRAY[@]}"; do + [ $HOST = $OFF_HOST ] && { OFF=true; break; } done + $OFF || echo -ne "$HOST " done else # onehost cmd failed, can't filter anything, better return unchanged @@ -483,8 +485,14 @@ function get_only_on_hosts { # @return host to be used as bridge #------------------------------------------------------------------------------- function get_destination_host { - BRIDGE_LIST=$(get_only_on_hosts "$BRIDGE_LIST") - HOSTS_ARRAY=($BRIDGE_LIST) + REDUCED_LIST=$(remove_off_hosts "$BRIDGE_LIST") + + if [ -z "$REDUCED_LIST" -a -n "$BRIDGE_LIST" ]; then + error_message "All hosts from 'BRIDGE_LIST' are offline, error or disabled" + exit -1 + fi + + HOSTS_ARRAY=($REDUCED_LIST) N_HOSTS=${#HOSTS_ARRAY[@]} if [ -n "$1" ]; then diff --git a/src/datastore_mad/remotes/vcenter/clone b/src/datastore_mad/remotes/vcenter/clone index 1fa69dfe99..762e98db82 100755 --- a/src/datastore_mad/remotes/vcenter/clone +++ b/src/datastore_mad/remotes/vcenter/clone @@ -73,7 +73,9 @@ begin rescue Exception => e message = "Error cloning img #{src_path} to #{target_ds_name}"\ " Reason: \"#{e.message}\"\n#{e.backtrace}" - STDERR.puts error_message(message) + OpenNebula.log_error(message) + STDERR.puts "#{message} #{e.backtrace}" if VCenterDriver::CONFIG[:debug_information] + exit -1 ensure vi_client.close_connection if vi_client diff --git a/src/datastore_mad/remotes/vcenter/mkfs b/src/datastore_mad/remotes/vcenter/mkfs index de07dee2b9..09e48c441f 100755 --- a/src/datastore_mad/remotes/vcenter/mkfs +++ b/src/datastore_mad/remotes/vcenter/mkfs @@ -75,7 +75,9 @@ begin rescue Exception => e message = "Error creating virtual disk #{img_name}."\ " Reason: \"#{e.message}\"\n#{e.backtrace}" - STDERR.puts error_message(message) + OpenNebula.log_error(message) + STDERR.puts "#{message} #{e.backtrace}" if VCenterDriver::CONFIG[:debug_information] + exit -1 ensure vi_client.close_connection if vi_client diff --git a/src/datastore_mad/remotes/vcenter/monitor b/src/datastore_mad/remotes/vcenter/monitor index 1dae1e5107..ad2eec3b5c 100755 --- a/src/datastore_mad/remotes/vcenter/monitor +++ b/src/datastore_mad/remotes/vcenter/monitor @@ -61,7 +61,9 @@ begin rescue Exception => e message = "Error monitoring datastore #{id}."\ " Reason: \"#{e.message}\"\n#{e.backtrace}" - STDERR.puts error_message(message) + OpenNebula.log_error(message) + STDERR.puts "#{message} #{e.backtrace}" if VCenterDriver::CONFIG[:debug_information] + exit -1 ensure vi_client.close_connection if vi_client diff --git a/src/datastore_mad/remotes/vcenter/rm b/src/datastore_mad/remotes/vcenter/rm index 72fdf0080e..8e849adb73 100755 --- a/src/datastore_mad/remotes/vcenter/rm +++ b/src/datastore_mad/remotes/vcenter/rm @@ -88,7 +88,9 @@ rescue Exception => e if !e.message.start_with?('FileNotFound') message = "Error deleting virtual disk #{img_src}."\ " Reason: \"#{e.message}\"\n#{e.backtrace}" - STDERR.puts error_message(message) + OpenNebula.log_error(message) + STDERR.puts "#{message} #{e.backtrace}" if VCenterDriver::CONFIG[:debug_information] + exit -1 end ensure diff --git a/src/dm/DispatchManagerStates.cc b/src/dm/DispatchManagerStates.cc index 698e98fc97..cb6f33edb0 100644 --- a/src/dm/DispatchManagerStates.cc +++ b/src/dm/DispatchManagerStates.cc @@ -314,6 +314,8 @@ void DispatchManager::resubmit_action(int vid) vm->set_state(VirtualMachine::PENDING); + vm->set_deploy_id(""); //reset the deploy-id + vmpool->update(vm); vm->unlock(); diff --git a/src/docker_machine/src/docker_machine/opennebula.go b/src/docker_machine/src/docker_machine/opennebula.go index 1ccc88fe23..9de6b407e7 100644 --- a/src/docker_machine/src/docker_machine/opennebula.go +++ b/src/docker_machine/src/docker_machine/opennebula.go @@ -521,8 +521,10 @@ func (d *Driver) GetIP() (string, error) { return "", err } - if ip, ok := vm.XPath("/VM/TEMPLATE/NIC/IP"); ok { - d.IPAddress = ip + if len(vm.Template.NIC) > 0 { + if vm.Template.NIC[0].IP != "" { + d.IPAddress = vm.Template.NIC[0].IP + } } if d.IPAddress == "" { diff --git a/src/im_mad/remotes/VERSION b/src/im_mad/remotes/VERSION index 2837ff5135..1099325e99 100644 --- a/src/im_mad/remotes/VERSION +++ b/src/im_mad/remotes/VERSION @@ -1 +1 @@ -5.7.85 \ No newline at end of file +5.7.90 \ No newline at end of file diff --git a/src/im_mad/remotes/common.d/collectd-client.rb b/src/im_mad/remotes/common.d/collectd-client.rb index 9830aca97b..b2d218f35c 100644 --- a/src/im_mad/remotes/common.d/collectd-client.rb +++ b/src/im_mad/remotes/common.d/collectd-client.rb @@ -88,13 +88,12 @@ class CollectdClient # Collect the Data ts = Time.now - data = run_probes + + # Send signal to itself to run probes and send the data + Process.kill('HUP', $$) run_probes_time = (Time.now - ts).to_i - # Send the Data - send data - # Sleep during the Cycle sleep_time = @monitor_push_period - run_probes_time sleep_time = 0 if sleep_time < 0 @@ -130,4 +129,16 @@ sleep rand monitor_push_period # Start push monitorization client = CollectdClient.new(hypervisor, number, host, port, probes_args, monitor_push_period) + +Signal.trap('HUP') do + # ignore another HUP until we handle this one + this_handler = Signal.trap('HUP', 'IGNORE') + + data = client.run_probes + client.send data + + # set the handler back + Signal.trap('HUP', this_handler) +end + client.monitor diff --git a/src/im_mad/remotes/kvm-probes.d/machines-models.rb b/src/im_mad/remotes/kvm-probes.d/machines-models.rb index a36e3a6643..ad559b565f 100755 --- a/src/im_mad/remotes/kvm-probes.d/machines-models.rb +++ b/src/im_mad/remotes/kvm-probes.d/machines-models.rb @@ -30,14 +30,9 @@ begin machines = [] models = [] - Open3.popen3("virsh -r -c qemu:///system capabilities") {|i, o, e, t| - if t.value.exitstatus != 0 - exit -1 - end - - capabilities = o.read - } - + cmd = 'virsh -r -c qemu:///system capabilities' + capabilities, e, s = Open3.capture3(cmd) + exit(-1) unless s.success? cap_xml = REXML::Document.new(capabilities) cap_xml = cap_xml.root @@ -94,12 +89,9 @@ begin end } - cpu_models = "" - Open3.popen3("virsh -r -c qemu:///system cpu-models #{a}") {|i, o, e, t| - break if t.value.exitstatus != 0 - - cpu_models = o.read - } + cmd = "virsh -r -c qemu:///system cpu-models #{a}" + cpu_models, e, s = Open3.capture3(cmd) + break unless s.success? cpu_models.each_line { |l| l.chomp! diff --git a/src/im_mad/remotes/lxd-probes.d/profiles.sh b/src/im_mad/remotes/lxd-probes.d/profiles.sh new file mode 100755 index 0000000000..860b0b8819 --- /dev/null +++ b/src/im_mad/remotes/lxd-probes.d/profiles.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +cmd='lxc profile list' + +profiles=$($cmd | grep -v -- -+- | grep -v NAME | grep -v default | awk '{print $2}') + +if [ "$?" -ne "0" ]; then + profiles=$(sudo $cmd | grep -v -- -+- | grep -v NAME | grep -v default | awk '{print $2}') +fi + +tmpfile=$(mktemp /tmp/lxd_probe.XXXXXX) + +echo "$profiles" > "$tmpfile" +out=$(tr '\n' ' ' < "$tmpfile") +out=${out::-1} + +echo -e LXD_PROFILES=\""$out"\" + +rm "$tmpfile" diff --git a/src/lcm/LifeCycleStates.cc b/src/lcm/LifeCycleStates.cc index e56fd8b05d..f1c20effdc 100644 --- a/src/lcm/LifeCycleStates.cc +++ b/src/lcm/LifeCycleStates.cc @@ -20,7 +20,7 @@ #include "VirtualMachineManager.h" #include "ImageManager.h" -void LifeCycleManager::start_prolog_migrate(VirtualMachine* vm, int vid) +void LifeCycleManager::start_prolog_migrate(VirtualMachine* vm) { int cpu, mem, disk; vector pci; @@ -64,7 +64,63 @@ void LifeCycleManager::start_prolog_migrate(VirtualMachine* vm, int vid) //---------------------------------------------------- - tm->trigger(TMAction::PROLOG_MIGR,vid); + tm->trigger(TMAction::PROLOG_MIGR,vm->get_oid()); +} + +void LifeCycleManager::revert_migrate_after_failure(VirtualMachine* vm) +{ + int cpu, mem, disk; + vector pci; + + time_t the_time = time(0); + + //---------------------------------------------------- + // RUNNING STATE FROM SAVE_MIGRATE + //---------------------------------------------------- + + vm->set_state(VirtualMachine::RUNNING); + + vm->set_etime(the_time); + + vm->set_vm_info(); + + vmpool->update_history(vm); + + vm->get_requirements(cpu, mem, disk, pci); + + if ( vm->get_hid() != vm->get_previous_hid() ) + { + hpool->del_capacity(vm->get_hid(), vm->get_oid(), cpu, mem, disk, pci); + } + + vm->set_previous_etime(the_time); + + vm->set_previous_vm_info(); + + vm->set_previous_running_etime(the_time); + + vmpool->update_previous_history(vm); + + // --- Add new record by copying the previous one + + vm->cp_previous_history(); + + vm->set_stime(the_time); + + vm->set_running_stime(the_time); + + vm->set_last_poll(0); + + vmpool->update_history(vm); + + vmpool->update(vm); + + vm->log("LCM", Log::INFO, "Fail to save VM state while migrating." + " Assuming that the VM is still RUNNING (will poll VM)."); + + //---------------------------------------------------- + + vmm->trigger(VMMAction::POLL,vm->get_oid()); } /* -------------------------------------------------------------------------- */ @@ -84,7 +140,7 @@ void LifeCycleManager::save_success_action(int vid) if ( vm->get_lcm_state() == VirtualMachine::SAVE_MIGRATE ) { - start_prolog_migrate(vm, vid); + start_prolog_migrate(vm); } else if (vm->get_lcm_state() == VirtualMachine::SAVE_SUSPEND) { @@ -168,58 +224,7 @@ void LifeCycleManager::save_failure_action(int vid) if ( vm->get_lcm_state() == VirtualMachine::SAVE_MIGRATE ) { - int cpu, mem, disk; - vector pci; - - time_t the_time = time(0); - - //---------------------------------------------------- - // RUNNING STATE FROM SAVE_MIGRATE - //---------------------------------------------------- - - vm->set_state(VirtualMachine::RUNNING); - - vm->set_etime(the_time); - - vm->set_vm_info(); - - vmpool->update_history(vm); - - vm->get_requirements(cpu, mem, disk, pci); - - if ( vm->get_hid() != vm->get_previous_hid() ) - { - hpool->del_capacity(vm->get_hid(), vm->get_oid(), cpu, mem, disk, pci); - } - - vm->set_previous_etime(the_time); - - vm->set_previous_vm_info(); - - vm->set_previous_running_etime(the_time); - - vmpool->update_previous_history(vm); - - // --- Add new record by copying the previous one - - vm->cp_previous_history(); - - vm->set_stime(the_time); - - vm->set_running_stime(the_time); - - vm->set_last_poll(0); - - vmpool->update_history(vm); - - vmpool->update(vm); - - vm->log("LCM", Log::INFO, "Fail to save VM state while migrating." - " Assuming that the VM is still RUNNING (will poll VM)."); - - //---------------------------------------------------- - - vmm->trigger(VMMAction::POLL,vid); + revert_migrate_after_failure(vm); } else if ( vm->get_lcm_state() == VirtualMachine::SAVE_SUSPEND || vm->get_lcm_state() == VirtualMachine::SAVE_STOP ) @@ -561,7 +566,7 @@ void LifeCycleManager::shutdown_success_action(int vid) } else if (vm->get_lcm_state() == VirtualMachine::SAVE_MIGRATE) { - start_prolog_migrate(vm, vid); + start_prolog_migrate(vm); } else { @@ -608,6 +613,10 @@ void LifeCycleManager::shutdown_failure_action(int vid) vmm->trigger(VMMAction::POLL,vid); } + else if (vm->get_lcm_state() == VirtualMachine::SAVE_MIGRATE) + { + revert_migrate_after_failure(vm); + } else { vm->log("LCM",Log::ERROR,"shutdown_failure_action, VM in a wrong state"); diff --git a/src/mad/ruby/scripts_common.rb b/src/mad/ruby/scripts_common.rb index 29642ffd25..4381299a9b 100644 --- a/src/mad/ruby/scripts_common.rb +++ b/src/mad/ruby/scripts_common.rb @@ -75,11 +75,11 @@ module OpenNebula # Executes a command, if it fails returns error message and exits # If a second parameter is present it is used as the error message when # the command fails - def self.exec_and_log(command, message=nil) + def self.exec_and_log(command, message=nil, allowed_return_code=0) output=`#{command} 2>&1 1>/dev/null` code=$?.exitstatus - if code!=0 + if code!=0 && code!=allowed_return_code log_error "Command \"#{command}\" failed." log_error output if !message diff --git a/src/mad/sh/scripts_common.sh b/src/mad/sh/scripts_common.sh index e44b7d5d75..2124ea5383 100644 --- a/src/mad/sh/scripts_common.sh +++ b/src/mad/sh/scripts_common.sh @@ -460,12 +460,23 @@ function ssh_make_path { SSH_EXEC_ERR=`$SSH $1 bash -s 2>&1 1>/dev/null < "\$(dirname $2)/.monitor" - fi +if [ ! -d $2 ]; then + mkdir -p $2 +fi + +# create or update .monitor content +if [ -n "$3" ]; then + MONITOR_FN="\$(dirname $2)/.monitor" + + MONITOR='' + if [ -f "\\${MONITOR_FN}" ]; then + MONITOR="\\$(cat "\\${MONITOR_FN}" 2>/dev/null)" + fi + + if [ "x\\${MONITOR}" != "x$3" ]; then + echo "$3" > "\\${MONITOR_FN}" + fi fi EOF` SSH_EXEC_RC=$? @@ -994,3 +1005,9 @@ function get_nic_information { OUTBOUND_PEAK_KB="${XPATH_ELEMENTS[j++]}" ORDER="${XPATH_ELEMENTS[j++]}" } + +function hup_collectd +{ + SEND_HUP='kill -HUP `cat /tmp/one-collectd-client.pid` || true' + ssh_exec_and_log_no_error $1 "$SEND_HUP" +} diff --git a/src/market/MarketPlacePool.cc b/src/market/MarketPlacePool.cc index 3fa5a86257..4f57f4ef00 100644 --- a/src/market/MarketPlacePool.cc +++ b/src/market/MarketPlacePool.cc @@ -33,12 +33,18 @@ MarketPlacePool::MarketPlacePool(SqlDB * db, bool is_federation_slave) //lastOID is set in PoolSQL::init_cb if (get_lastOID() == -1) { - // Build the default default security group + // Build the template for the OpenNebula Systems MarketPlace string default_market = "NAME=\"OpenNebula Public\"\n" "MARKET_MAD=one\n" "DESCRIPTION=\"OpenNebula Systems MarketPlace\""; + string lxc_market = + "NAME=\"Linux Containers\"\n" + "MARKET_MAD=linuxcontainers\n" + "DESCRIPTION=\"MarketPlace for the public image server fo LXC &" + " LXD hosted at linuxcontainers.org\""; + Nebula& nd = Nebula::instance(); UserPool * upool = nd.get_upool(); User * oneadmin = upool->get_ro(0); @@ -46,9 +52,12 @@ MarketPlacePool::MarketPlacePool(SqlDB * db, bool is_federation_slave) string error; MarketPlaceTemplate * default_tmpl = new MarketPlaceTemplate; + MarketPlaceTemplate * lxc_tmpl = new MarketPlaceTemplate; + char * error_parse; default_tmpl->parse(default_market, &error_parse); + lxc_tmpl->parse(lxc_market, &error_parse); MarketPlace * marketplace = new MarketPlace( oneadmin->get_uid(), @@ -58,19 +67,33 @@ MarketPlacePool::MarketPlacePool(SqlDB * db, bool is_federation_slave) oneadmin->get_umask(), default_tmpl); + MarketPlace * lxc_marketplace = new MarketPlace( + oneadmin->get_uid(), + oneadmin->get_gid(), + oneadmin->get_uname(), + oneadmin->get_gname(), + oneadmin->get_umask(), + lxc_tmpl); + oneadmin->unlock(); marketplace->set_permissions(1,1,1, 1,0,0, 1,0,0, error); + lxc_marketplace->set_permissions(1,1,1, 1,0,0, 1,0,0, error); marketplace->zone_id = Nebula::instance().get_zone_id(); + lxc_marketplace->zone_id = Nebula::instance().get_zone_id(); marketplace->parse_template(error); + lxc_marketplace->parse_template(error); - if (PoolSQL::allocate(marketplace, error) < 0) + int rc = PoolSQL::allocate(marketplace, error); + + rc += PoolSQL::allocate(lxc_marketplace, error); + + if (rc < 0) { ostringstream oss; - oss << "Error trying to create default " - << "OpenNebula Systems MarketPlace: " << error; + oss << "Error trying to create default marketplaces: " << error; NebulaLog::log("MKP", Log::ERROR, oss); throw runtime_error(oss.str()); diff --git a/src/market_mad/remotes/linuxcontainers/monitor b/src/market_mad/remotes/linuxcontainers/monitor index 246968045d..0d86f378a8 100755 --- a/src/market_mad/remotes/linuxcontainers/monitor +++ b/src/market_mad/remotes/linuxcontainers/monitor @@ -31,7 +31,7 @@ class LinuxContainersMarket #--------------------------------------------------------------------------- DEFAULTS = { :url => 'https://images.linuxcontainers.org', - :sizemb => 5120, + :sizemb => 2560, :fs => 'ext4', :format => 'raw', :agent => 'OpenNebula' diff --git a/src/nebula/Nebula.cc b/src/nebula/Nebula.cc index 9478b94bcd..291d42a52f 100644 --- a/src/nebula/Nebula.cc +++ b/src/nebula/Nebula.cc @@ -252,8 +252,6 @@ void Nebula::start(bool bootstrap_only) // ----------------------------------------------------------- try { - bool db_is_sqlite = true; - string server; int port; string user; @@ -265,12 +263,7 @@ void Nebula::start(bool bootstrap_only) if ( _db != 0 ) { - string value = _db->vector_value("BACKEND"); - - if (value == "mysql") - { - db_is_sqlite = false; - } + db_backend_type = _db->vector_value("BACKEND"); if (_db->vector_value("SERVER", server) == -1) { @@ -303,7 +296,7 @@ void Nebula::start(bool bootstrap_only) } } - if ( db_is_sqlite ) + if ( db_backend_type == "sqlite" ) { db_backend = new SqliteDB(var_location + "one.db"); } diff --git a/src/oca/go/src/goca/acl.go b/src/oca/go/src/goca/acl.go index e53a661b30..69a6469db3 100644 --- a/src/oca/go/src/goca/acl.go +++ b/src/oca/go/src/goca/acl.go @@ -1,8 +1,15 @@ package goca +import "encoding/xml" + // ACLPool represents an OpenNebula ACL list pool type ACLPool struct { - XMLResource + ID uint `xml:"ID"` + User int `xml:"USER"` + Resource int `xml:"RESOURCE"` + Rights int `xml:"RIGHTS"` + Zone int `xml:"ZONE"` + String string `xml:"STRING"` } // NewACLPool returns an acl pool. A connection to OpenNebula is @@ -13,9 +20,13 @@ func NewACLPool() (*ACLPool, error) { return nil, err } - aclpool := &ACLPool{XMLResource{body: response.Body()}} + aclPool := &ACLPool{} + err = xml.Unmarshal([]byte(response.Body()), aclPool) + if err != nil { + return nil, err + } - return aclpool, err + return aclPool, nil } // CreateACLRule adds a new ACL rule. diff --git a/src/oca/go/src/goca/cluster.go b/src/oca/go/src/goca/cluster.go index 94ac676f12..a2a5d45276 100644 --- a/src/oca/go/src/goca/cluster.go +++ b/src/oca/go/src/goca/cluster.go @@ -1,15 +1,30 @@ package goca -// Cluster represents an OpenNebula Cluster -type Cluster struct { - XMLResource - ID uint - Name string -} +import ( + "encoding/xml" + "errors" +) // ClusterPool represents an OpenNebula ClusterPool type ClusterPool struct { - XMLResource + Clusters []Cluster `xml:"CLUSTER"` +} + +// Cluster represents an OpenNebula Cluster +type Cluster struct { + ID uint `xml:"ID"` + Name string `xml:"NAME"` + HostsID []int `xml:"HOSTS>ID"` + DatastoresID []int `xml:"DATASTORES>ID"` + VnetsID []int `xml:"VNETS>ID"` + Template clusterTemplate `xml:"TEMPLATE"` +} + +type clusterTemplate struct { + // Example of reservation: https://github.com/OpenNebula/addon-storpool/blob/ba9dd3462b369440cf618c4396c266f02e50f36f/misc/reserved.sh + ReservedMem string `xml:"RESERVED_MEM"` + ReservedCpu string `xml:"RESERVED_CPU"` + Dynamic unmatchedTagsSlice `xml:",any"` } // NewClusterPool returns a cluster pool. A connection to OpenNebula is @@ -20,9 +35,13 @@ func NewClusterPool() (*ClusterPool, error) { return nil, err } - clusterpool := &ClusterPool{XMLResource{body: response.Body()}} + clusterPool := &ClusterPool{} + err = xml.Unmarshal([]byte(response.Body()), clusterPool) + if err != nil { + return nil, err + } - return clusterpool, err + return clusterPool, nil } @@ -35,14 +54,26 @@ func NewCluster(id uint) *Cluster { // OpenNebula to retrieve the pool, but doesn't perform the Info() call to // retrieve the attributes of the cluster. func NewClusterFromName(name string) (*Cluster, error) { + var id uint + clusterPool, err := NewClusterPool() if err != nil { return nil, err } - id, err := clusterPool.GetIDFromName(name, "/CLUSTER_POOL/CLUSTER") - if err != nil { - return nil, err + match := false + for i := 0; i < len(clusterPool.Clusters); i++ { + if clusterPool.Clusters[i].Name != name { + continue + } + if match { + return nil, errors.New("multiple resources with that name") + } + id = clusterPool.Clusters[i].ID + match = true + } + if !match { + return nil, errors.New("resource not found") } return NewCluster(id), nil @@ -129,6 +160,5 @@ func (cluster *Cluster) Info() error { if err != nil { return err } - cluster.body = response.Body() - return nil + return xml.Unmarshal([]byte(response.Body()), cluster) } diff --git a/src/oca/go/src/goca/datastore.go b/src/oca/go/src/goca/datastore.go index b662d37cc7..328d4746f6 100644 --- a/src/oca/go/src/goca/datastore.go +++ b/src/oca/go/src/goca/datastore.go @@ -1,15 +1,66 @@ package goca -// Datastore represents an OpenNebula Datastore -type Datastore struct { - XMLResource - ID uint - Name string -} +import ( + "encoding/xml" + "errors" + "fmt" +) // DatastorePool represents an OpenNebula DatastorePool type DatastorePool struct { - XMLResource + Datastores []Datastore `xml:"DATASTORE"` +} + +// Datastore represents an OpenNebula Datastore +type Datastore struct { + ID uint `xml:"ID"` + UID int `xml:"UID"` + GID int `xml:"GID"` + UName string `xml:"UNAME"` + GName string `xml:"GNAME"` + Name string `xml:"NAME"` + Permissions *Permissions `xml:"PERMISSIONS"` + DSMad string `xml:"DS_MAD"` + TMMad string `xml:"TM_MAD"` + BasePath string `xml:"BASE_PATH"` + Type string `xml:"TYPE"` + DiskType string `xml:"DISK_TYPE"` + StateRaw int `xml:"STATE"` + ClustersID []int `xml:"CLUSTERS>ID"` + TotalMB int `xml:"TOTAL_MB"` + FreeMB int `xml:"FREE_MB"` + UsedMB int `xml:"USED_MB"` + ImagesID []int `xml:"IMAGES>ID"` + Template datastoreTemplate `xml:"TEMPLATE"` +} + +type datastoreTemplate struct { + Dynamic unmatchedTagsSlice `xml:",any"` +} + +// DatastoreState is the state of an OpenNebula datastore +type DatastoreState int + +const ( + // DatastoreReady datastore is ready + DatastoreReady = iota + + // DatastoreDisable datastore is disabled + DatastoreDisable +) + +func (st DatastoreState) isValid() bool { + if st >= DatastoreReady && st <= DatastoreDisable { + return true + } + return false +} + +func (st DatastoreState) String() string { + return [...]string{ + "READY", + "DISABLE", + }[st] } // NewDatastorePool returns a datastore pool. A connection to OpenNebula is @@ -20,9 +71,13 @@ func NewDatastorePool() (*DatastorePool, error) { return nil, err } - datastorepool := &DatastorePool{XMLResource{body: response.Body()}} + datastorePool := &DatastorePool{} + err = xml.Unmarshal([]byte(response.Body()), datastorePool) + if err != nil { + return nil, err + } - return datastorepool, err + return datastorePool, nil } // NewDatastore finds a datastore object by ID. No connection to OpenNebula. @@ -34,14 +89,26 @@ func NewDatastore(id uint) *Datastore { // OpenNebula to retrieve the pool, but doesn't perform the Info() call to // retrieve the attributes of the datastore. func NewDatastoreFromName(name string) (*Datastore, error) { + var id uint + datastorePool, err := NewDatastorePool() if err != nil { return nil, err } - id, err := datastorePool.GetIDFromName(name, "/DATASTORE_POOL/DATASTORE") - if err != nil { - return nil, err + match := false + for i := 0; i < len(datastorePool.Datastores); i++ { + if datastorePool.Datastores[i].Name != name { + continue + } + if match { + return nil, errors.New("multiple resources with that name") + } + id = datastorePool.Datastores[i].ID + match = true + } + if !match { + return nil, errors.New("resource not found") } return NewDatastore(id), nil @@ -116,6 +183,23 @@ func (datastore *Datastore) Info() error { if err != nil { return err } - datastore.body = response.Body() - return nil + return xml.Unmarshal([]byte(response.Body()), datastore) +} + +// State looks up the state of the image and returns the DatastoreState +func (datastore *Datastore) State() (DatastoreState, error) { + state := DatastoreState(datastore.StateRaw) + if !state.isValid() { + return -1, fmt.Errorf("Datastore State: this state value is not currently handled: %d\n", datastore.StateRaw) + } + return state, nil +} + +// StateString returns the state in string format +func (datastore *Datastore) StateString() (string, error) { + state := DatastoreState(datastore.StateRaw) + if !state.isValid() { + return "", fmt.Errorf("Datastore StateString: this state value is not currently handled: %d\n", datastore.StateRaw) + } + return state.String(), nil } diff --git a/src/oca/go/src/goca/document.go b/src/oca/go/src/goca/document.go index 241806dab9..57b460a196 100644 --- a/src/oca/go/src/goca/document.go +++ b/src/oca/go/src/goca/document.go @@ -1,17 +1,31 @@ package goca -import "errors" - -// Document represents an OpenNebula Document -type Document struct { - XMLResource - ID uint - Name string -} +import ( + "encoding/xml" + "errors" +) // DocumentPool represents an OpenNebula DocumentPool type DocumentPool struct { - XMLResource + Documents []Document `xml:"DOCUMENT"` +} + +// Document represents an OpenNebula Document +type Document struct { + ID uint `xml:"ID"` + UID int `xml:"UID"` + GID int `xml:"GID"` + UName string `xml:"UNAME"` + GName string `xml:"GNAME"` + Name string `xml:"NAME"` + Type string `xml:"TYPE"` + Permissions *Permissions `xml:"PERMISSIONS"` + LockInfos *Lock `xml:"LOCK"` + Template documentTemplate `xml:"TEMPLATE"` +} + +type documentTemplate struct { + Dynamic unmatchedTagsSlice `xml:",any"` } // NewDocumentPool returns a document pool. A connection to OpenNebula is @@ -41,9 +55,13 @@ func NewDocumentPool(documentType int, args ...int) (*DocumentPool, error) { return nil, err } - documentpool := &DocumentPool{XMLResource{body: response.Body()}} + documentPool := &DocumentPool{} + err = xml.Unmarshal([]byte(response.Body()), documentPool) + if err != nil { + return nil, err + } - return documentpool, err + return documentPool, nil } // NewDocument finds a document object by ID. No connection to OpenNebula. @@ -55,14 +73,26 @@ func NewDocument(id uint) *Document { // OpenNebula to retrieve the pool, but doesn't perform the Info() call to // retrieve the attributes of the document. func NewDocumentFromName(name string, documentType int) (*Document, error) { + var id uint + documentPool, err := NewDocumentPool(documentType) if err != nil { return nil, err } - id, err := documentPool.GetIDFromName(name, "/DOCUMENT_POOL/DOCUMENT") - if err != nil { - return nil, err + match := false + for i := 0; i < len(documentPool.Documents); i++ { + if documentPool.Documents[i].Name != name { + continue + } + if match { + return nil, errors.New("multiple resources with that name") + } + id = documentPool.Documents[i].ID + match = true + } + if !match { + return nil, errors.New("resource not found") } return NewDocument(id), nil diff --git a/src/oca/go/src/goca/goca.go b/src/oca/go/src/goca/goca.go index 222ab5a8cc..b1bc568f67 100644 --- a/src/oca/go/src/goca/goca.go +++ b/src/oca/go/src/goca/goca.go @@ -38,15 +38,6 @@ type response struct { bodyBool bool } -// Resource implements an OpenNebula Resource methods. *XMLResource implements -// all these methods -type Resource interface { - Body() string - XPath(string) (string, bool) - XPathIter(string) *XMLIter - GetIDFromName(string, string) (uint, error) -} - // Initializes the client variable, used as a singleton func init() { SetClient(NewConfig("", "", "")) @@ -123,6 +114,10 @@ func SystemConfig() (string, error) { // Call is an XML-RPC wrapper. It returns a pointer to response and an error. func (c *oneClient) Call(method string, args ...interface{}) (*response, error) { + return c.endpointCall(c.url, method, args...) +} + +func (c *oneClient) endpointCall(url string, method string, args ...interface{}) (*response, error) { var ( ok bool @@ -144,7 +139,7 @@ func (c *oneClient) Call(method string, args ...interface{}) (*response, error) &ClientError{Code: ClientReqBuild, msg: "xmlrpc request encoding", err: err} } - req, err := http.NewRequest("POST", c.url, bytes.NewBuffer(buf)) + req, err := http.NewRequest("POST", url, bytes.NewBuffer(buf)) if err != nil { return nil, &ClientError{Code: ClientReqBuild, msg: "http request build", err: err} diff --git a/src/oca/go/src/goca/group.go b/src/oca/go/src/goca/group.go index 06378c3c67..e811a20fb0 100644 --- a/src/oca/go/src/goca/group.go +++ b/src/oca/go/src/goca/group.go @@ -1,15 +1,34 @@ package goca -// Group represents an OpenNebula Group -type Group struct { - XMLResource - ID uint - Name string -} +import ( + "encoding/xml" + "errors" +) // GroupPool represents an OpenNebula GroupPool type GroupPool struct { - XMLResource + Groups []groupBase `xml:"GROUP"` + Quotas []quotas `xml:"QUOTAS"` + DefaultUserQuotas quotasList `xml:"DEFAULT_USER_QUOTAS"` +} + +// Group represents an OpenNebula Group +type Group struct { + groupBase + quotasList + DefaultUserQuotas quotasList `xml:"DEFAULT_USER_QUOTAS"` +} + +type groupBase struct { + ID uint `xml:"ID"` + Name string `xml:"NAME"` + Users []int `xml:"USERS>ID"` + Admins []int `xml:"ADMINS>ID"` + Template groupTemplate `xml:"TEMPLATE"` +} + +type groupTemplate struct { + Dynamic unmatchedTagsSlice `xml:",any"` } // NewGroupPool returns a group pool. A connection to OpenNebula is @@ -20,28 +39,44 @@ func NewGroupPool() (*GroupPool, error) { return nil, err } - grouppool := &GroupPool{XMLResource{body: response.Body()}} + groupPool := &GroupPool{} + err = xml.Unmarshal([]byte(response.Body()), groupPool) + if err != nil { + return nil, err + } - return grouppool, err + return groupPool, nil } // NewGroup finds a group object by ID. No connection to OpenNebula. func NewGroup(id uint) *Group { - return &Group{ID: id} + return &Group{groupBase: groupBase{ID: id}} } // NewGroupFromName finds a group object by name. It connects to // OpenNebula to retrieve the pool, but doesn't perform the Info() call to // retrieve the attributes of the group. func NewGroupFromName(name string) (*Group, error) { + var id uint + groupPool, err := NewGroupPool() if err != nil { return nil, err } - id, err := groupPool.GetIDFromName(name, "/GROUP_POOL/GROUP") - if err != nil { - return nil, err + match := false + for i := 0; i < len(groupPool.Groups); i++ { + if groupPool.Groups[i].Name != name { + continue + } + if match { + return nil, errors.New("multiple resources with that name") + } + id = groupPool.Groups[i].ID + match = true + } + if !match { + return nil, errors.New("resource not found") } return NewGroup(id), nil @@ -69,8 +104,7 @@ func (group *Group) Info() error { if err != nil { return err } - group.body = response.Body() - return nil + return xml.Unmarshal([]byte(response.Body()), group) } // Update replaces the group template contents. diff --git a/src/oca/go/src/goca/helper_test.go b/src/oca/go/src/goca/helper_test.go index 8a5adadeb9..b7dd31965d 100644 --- a/src/oca/go/src/goca/helper_test.go +++ b/src/oca/go/src/goca/helper_test.go @@ -8,23 +8,6 @@ import ( "time" ) -// Extracts the ID of a resource -func GetID(t *testing.T, r Resource, s string) (uint, error) { - path := fmt.Sprintf("/%s/ID", s) - - sIDFromXML, ok := r.XPath(path) - if !ok { - t.Error("Could not find ID") - } - - idFromXML, err := strconv.ParseUint(sIDFromXML, 10, strconv.IntSize) - if err != nil { - t.Error(err) - } - - return uint(idFromXML), nil -} - // Appends a random string to a name func GenName(name string) string { t := strconv.FormatInt(time.Now().UnixNano(), 10) @@ -57,11 +40,6 @@ func GetUserGroup(t *testing.T, user string) (string, error){ t.Error("Cannot retreive caller user Info") } - // Get Caller Group - ugroup, ok := u.XPath("/USER/GNAME") - if !ok { - t.Errorf("Could not get caller group name") - } + return u.GName, nil - return ugroup, nil } diff --git a/src/oca/go/src/goca/host.go b/src/oca/go/src/goca/host.go index 9c67dcfe65..f049a6e071 100644 --- a/src/oca/go/src/goca/host.go +++ b/src/oca/go/src/goca/host.go @@ -1,20 +1,71 @@ package goca import ( + "encoding/xml" "errors" - "strconv" + "fmt" ) -// Host represents an OpenNebula Host -type Host struct { - XMLResource - ID uint - Name string -} - // HostPool represents an OpenNebula HostPool type HostPool struct { - XMLResource + Hosts []Host `xml:"HOST"` +} + +// Host represents an OpenNebula Host +type Host struct { + ID uint `xml:"ID"` + Name string `xml:"NAME"` + StateRaw int `xml:"STATE"` + IMMAD string `xml:"IM_MAD"` + VMMAD string `xml:"VM_MAD"` + LastMonTime int `xml:"LAST_MON_TIME"` + ClusterID int `xml:"CLUSTER_ID"` + Cluster string `xml:"CLUSTER"` + Share hostShare `xml:"HOST_SHARE"` + VMsID []int `xml:"VMS>ID"` + Template hostTemplate `xml:"TEMPLATE"` +} + +type hostShare struct { + DiskUsage int `xml:"DISK_USAGE"` + MemUsage int `xml:"MEM_USAGE"` + CPUUsage int `xml:"CPU_USAGE"` + TotalMem int `xml:"TOTAL_MEM"` + TotalCPU int `xml:"TOTAL_CPU"` + + MaxDisk int `xml:"MAX_DISK"` + MaxMem int `xml:"MAX_MEM"` + MaxCPU int `xml:"MAX_CPU"` + + FreeDisk int `xml:"FREE_DISK"` + FreeMem int `xml:"FREE_MEM"` + FreeCPU int `xml:"FREE_CPU"` + + UsedDisk int `xml:"USED_DISK"` + UsedMem int `xml:"USED_MEM"` + UsedCPU int `xml:"USED_CPU"` + + RunningVMs int `xml:"RUNNING_VMS"` + Stores hostDataStores `xml:"DATASTORES"` + PCIDevices interface{} `xml:"PCI_DEVICES>PCI"` +} + +type hostDataStores struct { + DSs []hostDS `xml:"DS"` +} + +type hostDS struct { + ID int `xml:"ID"` + UsedMB int `xml:"USED_MB"` + FreeMB int `xml:"FREE_MB"` + TotalMB int `xml:"TOTAL_MB"` +} + +type hostTemplate struct { + // Example of reservation: https://github.com/OpenNebula/addon-storpool/blob/ba9dd3462b369440cf618c4396c266f02e50f36f/misc/reserved.sh + ReservedMem int `xml:"RESERVED_MEM"` + ReservedCpu int `xml:"RESERVED_CPU"` + Dynamic unmatchedTagsSlice `xml:",any"` } // HostState is the state of an OpenNebula Host @@ -49,6 +100,13 @@ const ( HostOffline ) +func (st HostState) isValid() bool { + if st >= HostInit && st <= HostOffline { + return true + } + return false +} + func (st HostState) String() string { return [...]string{ "INIT", @@ -71,9 +129,12 @@ func NewHostPool() (*HostPool, error) { return nil, err } - hostpool := &HostPool{XMLResource{body: response.Body()}} - - return hostpool, err + hostPool := &HostPool{} + err = xml.Unmarshal([]byte(response.Body()), &hostPool) + if err != nil { + return nil, err + } + return hostPool, nil } // NewHost finds a host object by ID. No connection to OpenNebula. @@ -85,14 +146,26 @@ func NewHost(id uint) *Host { // OpenNebula to retrieve the pool, but doesn't perform the Info() call to // retrieve the attributes of the host. func NewHostFromName(name string) (*Host, error) { + var id uint + hostPool, err := NewHostPool() if err != nil { return nil, err } - id, err := hostPool.GetIDFromName(name, "/HOST_POOL/HOST") - if err != nil { - return nil, err + match := false + for i := 0; i < len(hostPool.Hosts); i++ { + if hostPool.Hosts[i].Name != name { + continue + } + if match { + return nil, errors.New("multiple resources with that name") + } + id = hostPool.Hosts[i].ID + match = true + } + if !match { + return nil, errors.New("resource not found") } return NewHost(id), nil @@ -146,8 +219,7 @@ func (host *Host) Info() error { if err != nil { return err } - host.body = response.Body() - return nil + return xml.Unmarshal([]byte(response.Body()), host) } // Monitoring returns the host monitoring records. @@ -156,23 +228,20 @@ func (host *Host) Monitoring() error { return err } -// State returns the HostState +// State looks up the state of the image and returns the ImageState func (host *Host) State() (HostState, error) { - stateString, ok := host.XPath("/HOST/STATE") - if ok != true { - return -1, errors.New("Unable to parse host State") + state := HostState(host.StateRaw) + if !state.isValid() { + return -1, fmt.Errorf("Host State: this state value is not currently handled: %d\n", host.StateRaw) } - - state, _ := strconv.Atoi(stateString) - - return HostState(state), nil + return state, nil } -// StateString returns the HostState as string +// StateString returns the state in string format func (host *Host) StateString() (string, error) { - state, err := host.State() - if err != nil { - return "", err + state := HostState(host.StateRaw) + if !state.isValid() { + return "", fmt.Errorf("Host StateString: this state value is not currently handled: %d\n", host.StateRaw) } - return HostState(state).String(), nil + return state.String(), nil } diff --git a/src/oca/go/src/goca/image.go b/src/oca/go/src/goca/image.go index f1af00f876..7ab12cf746 100644 --- a/src/oca/go/src/goca/image.go +++ b/src/oca/go/src/goca/image.go @@ -1,20 +1,50 @@ package goca import ( + "encoding/xml" "errors" - "strconv" + "fmt" ) -// Image represents an OpenNebula Image -type Image struct { - XMLResource - ID uint - Name string -} - // ImagePool represents an OpenNebula Image pool type ImagePool struct { - XMLResource + Images []Image `xml:"IMAGE"` +} + +// Image represents an OpenNebula Image +type Image struct { + ID uint `xml:"ID"` + UID int `xml:"UID"` + GID int `xml:"GID"` + UName string `xml:"UNAME"` + GName string `xml:"GNAME"` + Name string `xml:"NAME"` + LockInfos *Lock `xml:"LOCK"` + Permissions *Permissions `xml:"PERMISSIONS"` + Type int `xml:"TYPE"` + DiskType int `xml:"DISK_TYPE"` + PersistentValue int `xml:"PERSISTENT"` + RegTime int `xml:"REGTIME"` + Source string `xml:"SOURCE"` + Path string `xml:"PATH"` + FsType string `xml:"FSTYPE"` + Size int `xml:"SIZE"` + StateRaw int `xml:"STATE"` + RunningVMs int `xml:"RUNNING_VMS"` + CloningOps int `xml:"CLONING_OPS"` + CloningID int `xml:"CLONING_ID"` + TargetSnapshot int `xml:"TARGET_SNAPSHOT"` + DatastoreID int `xml:"DATASTORE_ID"` + Datastore string `xml:"DATASTORE"` + VMsID []int `xml:"VMS>ID"` + ClonesID []int `xml:"CLONES>ID"` + AppClonesID []int `xml:"APP_CLONES>ID"` + Snapshots ImageSnapshot `xml:"SNAPSHOTS"` + Template imageTemplate `xml:"TEMPLATE"` +} + +type imageTemplate struct { + Dynamic unmatchedTagsSlice `xml:",any"` } // ImageState is the state of the Image @@ -55,6 +85,13 @@ const ( ImageLockUsedPers ) +func (st ImageState) isValid() bool { + if st >= ImageInit && st <= ImageLockUsedPers { + return true + } + return false +} + // String returns the string version of the ImageState func (s ImageState) String() string { return [...]string{ @@ -84,7 +121,7 @@ func CreateImage(template string, dsid uint) (uint, error) { } // NewImagePool returns a new image pool. It accepts the scope of the query. It -// performs an OpenNebula connectio to fetch the information. +// performs an OpenNebula connection to fetch the information. func NewImagePool(args ...int) (*ImagePool, error) { var who, start, end int @@ -106,10 +143,13 @@ func NewImagePool(args ...int) (*ImagePool, error) { return nil, err } - imagepool := &ImagePool{XMLResource{body: response.Body()}} - - return imagepool, err + imagePool := &ImagePool{} + err = xml.Unmarshal([]byte(response.Body()), imagePool) + if err != nil { + return nil, err + } + return imagePool, nil } // NewImage finds an image by ID returns a new Image object. At this stage no @@ -122,14 +162,26 @@ func NewImage(id uint) *Image { // to OpenNebula to retrieve the pool, but doesn't perform the Info() call to // retrieve the attributes of the image. func NewImageFromName(name string) (*Image, error) { + var id uint + imagePool, err := NewImagePool() if err != nil { return nil, err } - id, err := imagePool.GetIDFromName(name, "/IMAGE_POOL/IMAGE") - if err != nil { - return nil, err + match := false + for i := 0; i < len(imagePool.Images); i++ { + if imagePool.Images[i].Name != name { + continue + } + if match { + return nil, errors.New("multiple resources with that name") + } + id = imagePool.Images[i].ID + match = true + } + if !match { + return nil, errors.New("resource not found") } return NewImage(id), nil @@ -141,29 +193,25 @@ func (image *Image) Info() error { if err != nil { return err } - image.body = response.Body() - return nil + return xml.Unmarshal([]byte(response.Body()), image) } // State looks up the state of the image and returns the ImageState func (image *Image) State() (ImageState, error) { - stateString, ok := image.XPath("/IMAGE/STATE") - if ok != true { - return -1, errors.New("Unable to parse Image State") + state := ImageState(image.StateRaw) + if !state.isValid() { + return -1, fmt.Errorf("Image State: this state value is not currently handled: %d\n", image.StateRaw) } - - state, _ := strconv.Atoi(stateString) - - return ImageState(state), nil + return state, nil } // StateString returns the state in string format func (image *Image) StateString() (string, error) { - state, err := image.State() - if err != nil { - return "", err + state := ImageState(image.StateRaw) + if !state.isValid() { + return "", fmt.Errorf("Image State: this state value is not currently handled: %d\n", image.StateRaw) } - return ImageState(state).String(), nil + return state.String(), nil } // Clone clones an existing image. It returns the clone ID diff --git a/src/oca/go/src/goca/image_test.go b/src/oca/go/src/goca/image_test.go index 184305b5c2..661b7a43d8 100644 --- a/src/oca/go/src/goca/image_test.go +++ b/src/oca/go/src/goca/image_test.go @@ -33,7 +33,7 @@ func ImageExpectState(image *Image, state string) func() bool { } // Helper to create a Image -func createImage(t *testing.T) *Image { +func createImage(t *testing.T) (*Image, uint) { // Datastore ID 1 means default for image id, err := CreateImage(imageTpl, 1) if err != nil { @@ -48,26 +48,21 @@ func createImage(t *testing.T) *Image { t.Error(err) } - return image + return image, id } func TestImage(t *testing.T) { - image := createImage(t) + var err error - idParse, err := GetID(t, image, "IMAGE") - if err != nil { - t.Error(err) - } + image, idOrig := createImage(t) - if idParse != image.ID { + idParse := image.ID + if idParse != idOrig { t.Errorf("Image ID does not match") } // Get image by Name - name, ok := image.XPath("/IMAGE/NAME") - if !ok { - t.Errorf("Could not get name") - } + name := image.Name image, err = NewImageFromName(name) if err != nil { @@ -79,9 +74,8 @@ func TestImage(t *testing.T) { t.Error(err) } - idParse, err = GetID(t, image, "IMAGE") - - if idParse != image.ID { + idParse = image.ID + if idParse != idOrig { t.Errorf("Image ID does not match") } @@ -104,16 +98,10 @@ func TestImage(t *testing.T) { } // Get Image Owner Name - uname, ok := image.XPath("/IMAGE/UNAME") - if !ok { - t.Errorf("Could not get user name") - } + uname := image.UName // Get Image owner group Name - gname, ok := image.XPath("/IMAGE/GNAME") - if !ok { - t.Errorf("Could not get group name") - } + gname := image.GName // Compare with caller username caller := strings.Split(client.token, ":")[0] @@ -143,16 +131,10 @@ func TestImage(t *testing.T) { } // Get Image Owner Name - uname, ok = image.XPath("/IMAGE/UNAME") - if !ok { - t.Errorf("Could not get user name") - } + uname = image.UName - // Get Image Owner Name - gname, ok = image.XPath("/IMAGE/GNAME") - if !ok { - t.Errorf("Could not get user name") - } + // Get Image owner group Name + gname = image.GName if "serveradmin" != uname { t.Error("Image owner is not oneadmin") diff --git a/src/oca/go/src/goca/lock.go b/src/oca/go/src/goca/lock.go new file mode 100644 index 0000000000..c2ae86ecef --- /dev/null +++ b/src/oca/go/src/goca/lock.go @@ -0,0 +1,8 @@ +package goca + +type Lock struct { + Locked int `xml:"LOCKED"` + Owner int `xml:"OWNER"` + Time int `xml:"TIME"` + ReqID int `xml:"REQ_ID"` +} diff --git a/src/oca/go/src/goca/marketplace.go b/src/oca/go/src/goca/marketplace.go index 17548f42f1..74bb37a017 100644 --- a/src/oca/go/src/goca/marketplace.go +++ b/src/oca/go/src/goca/marketplace.go @@ -1,17 +1,36 @@ package goca -import "errors" - -// MarketPlace represents an OpenNebula MarketPlace -type MarketPlace struct { - XMLResource - ID uint - Name string -} +import ( + "encoding/xml" + "errors" +) // MarketPlacePool represents an OpenNebula MarketPlacePool type MarketPlacePool struct { - XMLResource + MarketPlaces []MarketPlace `xml:"MARKETPLACE"` +} + +// MarketPlace represents an OpenNebula MarketPlace +type MarketPlace struct { + ID uint `xml:"ID"` + UID int `xml:"UID"` + GID int `xml:"GID"` + UName string `xml:"UNAME"` + GName string `xml:"GNAME"` + Name string `xml:"NAME"` + MarketMad string `xml:"MARKET_MAD"` + ZoneID string `xml:"ZONE_ID"` + TotalMB int `xml:"TOTAL_MB"` + FreeMB int `xml:"FREE_MB"` + UsedMB int `xml:"USED_MB"` + MarketPlaceAppsIDs []int `xml:"MARKETPLACEAPPS>ID"` + Permissions *Permissions `xml:"PERMISSIONS"` + Template marketPlaceTemplate `xml:"TEMPLATE"` +} + +// MarketPlaceTemplate represent the template part of the MarketPlace +type marketPlaceTemplate struct { + Dynamic unmatchedTagsSlice `xml:",any"` } // NewMarketPlacePool returns a marketplace pool. A connection to OpenNebula is @@ -41,9 +60,13 @@ func NewMarketPlacePool(args ...int) (*MarketPlacePool, error) { return nil, err } - marketpool := &MarketPlacePool{XMLResource{body: response.Body()}} + marketPool := &MarketPlacePool{} + err = xml.Unmarshal([]byte(response.Body()), marketPool) + if err != nil { + return nil, err + } - return marketpool, err + return marketPool, nil } // NewMarketPlace finds a marketplace object by ID. No connection to OpenNebula. @@ -55,14 +78,26 @@ func NewMarketPlace(id uint) *MarketPlace { // OpenNebula to retrieve the pool, but doesn't perform the Info() call to // retrieve the attributes of the marketplace. func NewMarketPlaceFromName(name string) (*MarketPlace, error) { + var id uint + marketPool, err := NewMarketPlacePool() if err != nil { return nil, err } - id, err := marketPool.GetIDFromName(name, "/MARKETPLACE_POOL/MARKETPLACE") - if err != nil { - return nil, err + match := false + for i := 0; i < len(marketPool.MarketPlaces); i++ { + if marketPool.MarketPlaces[i].Name != name { + continue + } + if match { + return nil, errors.New("multiple resources with that name") + } + id = marketPool.MarketPlaces[i].ID + match = true + } + if !match { + return nil, errors.New("resource not found") } return NewMarketPlace(id), nil @@ -129,6 +164,5 @@ func (market *MarketPlace) Info() error { if err != nil { return err } - market.body = response.Body() - return nil + return xml.Unmarshal([]byte(response.Body()), market) } diff --git a/src/oca/go/src/goca/marketplace_test.go b/src/oca/go/src/goca/marketplace_test.go index 8563ae1be2..e3607fa351 100644 --- a/src/oca/go/src/goca/marketplace_test.go +++ b/src/oca/go/src/goca/marketplace_test.go @@ -4,7 +4,6 @@ import ( "testing" ) - func TestMarketplace(t *testing.T){ var mkt_name string = "marketplace_test_go" @@ -25,7 +24,7 @@ func TestMarketplace(t *testing.T){ market = NewMarketPlace(market_id) market.Info() - actual, _:= market.XMLResource.XPath("/MARKETPLACE/NAME") + actual := market.Name if actual != mkt_name { t.Errorf("Test failed, expected: '%s', got: '%s'", mkt_name, actual) @@ -42,17 +41,20 @@ func TestMarketplace(t *testing.T){ market.Info() - actual_mm, _ := market.XMLResource.XPath("/MARKETPLACE/TEMPLATE/MARKET_MAD") - actual_1, _ := market.XMLResource.XPath("/MARKETPLACE/TEMPLATE/ATT1") + actual_mm := market.MarketMad + actual_1, err := market.Template.Dynamic.GetContentByName("ATT1") + if err != nil { + t.Errorf("Test failed, can't retrieve '%s', error: %s", "ATT1", err.Error()) + } else { + if actual_1 != "VAL1" { + t.Errorf("Test failed, expected: '%s', got: '%s'", "VAL1", actual_1) + } + } if actual_mm != "http" { t.Errorf("Test failed, expected: '%s', got: '%s'", "http", actual_mm) } - if actual_1 != "VAL1" { - t.Errorf("Test failed, expected: '%s', got: '%s'", "VAL1", actual_1) - } - //Change permissions for Marketpkace err = market.Chmod(1,1,1,1,1,1,1,1,1) @@ -62,11 +64,11 @@ func TestMarketplace(t *testing.T){ market.Info() - expected := "111111111" - actual, _ = market.XMLResource.XPath("/MARKETPLACE/PERMISSIONS") + expected_perm := Permissions{ 1, 1, 1, 1, 1, 1, 1, 1, 1 } + actual_perm := *market.Permissions - if actual != expected { - t.Errorf("Test failed, expected: '%s', got: '%s'", expected, actual) + if actual_perm != expected_perm { + t.Errorf("Test failed, expected: '%s', got: '%s'", expected_perm.String(), actual_perm.String()) } //Change owner of Marketpkace @@ -78,17 +80,17 @@ func TestMarketplace(t *testing.T){ market.Info() - expected_usr := "1" - expected_grp := "1" - actual_usr, _ :=market.XMLResource.XPath("/MARKETPLACE/UID") - actual_grp, _ :=market.XMLResource.XPath("/MARKETPLACE/GID") + expected_usr := 1 + expected_grp := 1 + actual_usr := market.UID + actual_grp := market.GID if actual_usr != expected_usr { - t.Errorf("Test failed, expected: '%s', got: '%s'", expected_usr, actual_usr) + t.Errorf("Test failed, expected: '%d', got: '%d'", expected_usr, actual_usr) } if actual_grp != expected_grp { - t.Errorf("Test failed, expected: '%s', got: '%s'", expected_grp, actual_grp) + t.Errorf("Test failed, expected: '%d', got: '%d'", expected_grp, actual_grp) } rename := mkt_name + "-renamed" @@ -102,7 +104,7 @@ func TestMarketplace(t *testing.T){ market.Info() - actual, _ = market.XMLResource.XPath("/MARKETPLACE/NAME") + actual = market.Name if actual != rename { t.Errorf("Test failed, expected: '%s', got: '%s'", rename, actual) diff --git a/src/oca/go/src/goca/marketplaceapp.go b/src/oca/go/src/goca/marketplaceapp.go index b003227dec..87675cbac5 100644 --- a/src/oca/go/src/goca/marketplaceapp.go +++ b/src/oca/go/src/goca/marketplaceapp.go @@ -1,17 +1,44 @@ package goca -import "errors" - -// MarketPlaceApp represents an OpenNebula MarketPlaceApp -type MarketPlaceApp struct { - XMLResource - ID uint - Name string -} +import ( + "encoding/xml" + "errors" +) // MarketPlaceAppPool represents an OpenNebula MarketPlaceAppPool type MarketPlaceAppPool struct { - XMLResource + MarketPlaceApps []MarketPlaceApp `xml:"MARKETPLACEAPP"` +} + +// MarketPlaceApp represents an OpenNebula MarketPlaceApp +type MarketPlaceApp struct { + ID uint `xml:"ID"` + UID int `xml:"UID"` + GID int `xml:"GID"` + UName string `xml:"UNAME"` + GName string `xml:"GNAME"` + LockInfos *Lock `xml:"LOCK"` + Permissions *Permissions `xml:"PERMISSIONS"` + RegTime int `xml:"REGTIME"` + Name string `xml:"NAME"` + ZoneId string `xml:"ZONE_ID"` + OriginId string `xml:"ORIGIN_ID"` + Source string `xml:"SOURCE"` + MD5 string `xml:"MD5"` + Size int `xml:"SIZE"` + Description string `xml:"DESCRIPTION"` + Version string `xml:"VERSION"` + Format string `xml:"FORMAT"` + AppTemplate64 string `xml:"APPTEMPLATE64"` + MarketPlaceID int `xml:"MARKETPLACEID"` + MarketPlace string `xml:"MARKETPLACE"` + State int `xml:"STATE"` + Type int `xml:"TYPE"` + Template marketPlaceAppTemplate `xml:"TEMPLATE"` +} + +type marketPlaceAppTemplate struct { + Dynamic unmatchedTagsSlice `xml:,any` } // NewMarketPlaceAppPool returns a marketplace app pool. A connection to OpenNebula is @@ -41,9 +68,13 @@ func NewMarketPlaceAppPool(args ...int) (*MarketPlaceAppPool, error) { return nil, err } - marketapppool := &MarketPlaceAppPool{XMLResource{body: response.Body()}} + marketappPool := &MarketPlaceAppPool{} + err = xml.Unmarshal([]byte(response.Body()), marketappPool) + if err != nil { + return nil, err + } - return marketapppool, err + return marketappPool, nil } // NewMarketPlaceApp finds a marketplace app object by ID. No connection to OpenNebula. @@ -55,14 +86,26 @@ func NewMarketPlaceApp(id uint) *MarketPlaceApp { // OpenNebula to retrieve the pool, but doesn't perform the Info() call to // retrieve the attributes of the marketplace app. func NewMarketPlaceAppFromName(name string) (*MarketPlaceApp, error) { + var id uint + marketAppPool, err := NewMarketPlaceAppPool() if err != nil { return nil, err } - id, err := marketAppPool.GetIDFromName(name, "/MARKETPLACEAPP_POOL/MARKETPLACEAPP") - if err != nil { - return nil, err + match := false + for i := 0; i < len(marketAppPool.MarketPlaceApps); i++ { + if marketAppPool.MarketPlaceApps[i].Name != name { + continue + } + if match { + return nil, errors.New("multiple resources with that name") + } + id = marketAppPool.MarketPlaceApps[i].ID + match = true + } + if !match { + return nil, errors.New("resource not found") } return NewMarketPlaceApp(id), nil @@ -137,8 +180,7 @@ func (marketApp *MarketPlaceApp) Info() error { if err != nil { return err } - marketApp.body = response.Body() - return nil + return xml.Unmarshal([]byte(response.Body()), marketApp) } // Lock locks the marketplace app depending on blocking level. diff --git a/src/oca/go/src/goca/marketplaceapp_test.go b/src/oca/go/src/goca/marketplaceapp_test.go index 62093a0cff..bf077bb82c 100644 --- a/src/oca/go/src/goca/marketplaceapp_test.go +++ b/src/oca/go/src/goca/marketplaceapp_test.go @@ -53,7 +53,7 @@ func TestMarketplaceApp(t *testing.T){ mkt_app = NewMarketPlaceApp(app_id) mkt_app.Info() - actual, _:= mkt_app.XMLResource.XPath("/MARKETPLACEAPP/NAME") + actual := mkt_app.Name if actual != mkt_app_name { t.Errorf("Test failed, expected: '%s', got: '%s'", mkt_app_name, actual) diff --git a/src/oca/go/src/goca/permissions.go b/src/oca/go/src/goca/permissions.go new file mode 100644 index 0000000000..8185339a5a --- /dev/null +++ b/src/oca/go/src/goca/permissions.go @@ -0,0 +1,23 @@ +package goca + +//import "fmt" + +type Permissions struct { + OwnerU int `xml:"OWNER_U"` + OwnerM int `xml:"OWNER_M"` + OwnerA int `xml:"OWNER_A"` + GroupU int `xml:"GROUP_U"` + GroupM int `xml:"GROUP_M"` + GroupA int `xml:"GROUP_A"` + OtherU int `xml:"OTHER_U"` + OtherM int `xml:"OTHER_M"` + OtherA int `xml:"OTHER_A"` +} + +func (p *Permissions) String() string { + permStr := [8]string{"---", "--a", "-m-", "-ma", "u--", "u-a", "um-", "uma"} + owner := permStr[p.OwnerU<<2|p.OwnerM<<1|p.OwnerA] + group := permStr[p.GroupU<<2|p.GroupM<<1|p.GroupA] + other := permStr[p.OtherU<<2|p.OtherM<<1|p.OtherA] + return owner + group + other +} diff --git a/src/oca/go/src/goca/quota.go b/src/oca/go/src/goca/quota.go new file mode 100644 index 0000000000..293746862a --- /dev/null +++ b/src/oca/go/src/goca/quota.go @@ -0,0 +1,50 @@ +package goca + +type quotas struct { + ID uint `xml:"ID"` + quotasList +} + +type quotasList struct { + DatastoreQuotas []datastoreQuota `xml:"DATASTORE_QUOTA>DATASTORE"` + NetworkQuotas []networkQuota `xml:"NETWORK_QUOTA>NETWORK"` + VMQuotas []vmQuota `xml:"VM_QUOTA>VM"` + ImageQuotas []imageQuota `xml:"IMAGE_QUOTA>IMAGE"` +} + +type datastoreQuota struct { + ID string `xml:"ID"` + Images string `xml:"IMAGES"` + ImagesUsed string `xml:"IMAGES_USED"` + Size string `xml:"SIZE"` + SizeUsed string `xml:"SIZE_USED"` +} + +type networkQuota struct { + ID string `xml:"ID"` + Leases string `xml:"LEASES"` + LeasesUsed string `xml:"LEASES_USED"` +} + +type vmQuota struct { + CPU string `xml:"CPU"` + CPUUsed string `xml:"CPU_USED"` + Memory string `xml:"MEMORY"` + MemoryUsed string `xml:"MEMORY_USED"` + RunningCpu string `xml:"RUNNING_CPU"` + RunningCpuUsed string `xml:"RUNNING_CPU_USED"` + RunningMemory string `xml:"RUNNING_MEMORY"` + RunningMemoryUsed string `xml:"RUNNING_MEMORY_USED"` + RunningVMs string `xml:"RUNNING_VMS"` + RunningVMsUsed string `xml:"RUNNING_VMS_USED"` + SystemDiskSize string `xml:"SYSTEM_DISK_SIZE"` + SystemDiskSizeUsed string `xml:"SYSTEM_DISK_SIZE_USED"` + VMs string `xml:"VMS"` + VMsUsed string `xml:"VMS_USED"` +} + +type imageQuota struct { + ID string `xml:"ID"` + RVMs string `xml:"RVMS"` + RVMsUsed string `xml:"RVMS_USED"` +} diff --git a/src/oca/go/src/goca/securitygroup.go b/src/oca/go/src/goca/securitygroup.go index fedb6ebf68..f2ef3db58f 100644 --- a/src/oca/go/src/goca/securitygroup.go +++ b/src/oca/go/src/goca/securitygroup.go @@ -1,17 +1,41 @@ package goca -import "errors" - -// SecurityGroup represents an OpenNebula SecurityGroup -type SecurityGroup struct { - XMLResource - ID uint - Name string -} +import ( + "encoding/xml" + "errors" +) // SecurityGroupPool represents an OpenNebula SecurityGroupPool type SecurityGroupPool struct { - XMLResource + SecurityGroups []SecurityGroup `xml:"SECURITY_GROUP"` +} + +// SecurityGroup represents an OpenNebula SecurityGroup +type SecurityGroup struct { + ID uint `xml:"ID"` + UID int `xml:"UID"` + GID int `xml:"GID"` + UName string `xml:"UNAME"` + GName string `xml:"GNAME"` + Name string `xml:"NAME"` + Permissions *Permissions `xml:"PERMISSIONS"` + UpdatedVMs []int `xml:"UPDATED_VMS>ID"` + OutdatedVMs []int `xml:"OUTDATED_VMS>ID"` + UpdatingVMs []int `xml:"UPDATING_VMS>ID"` + ErrorVMs []int `xml:"ERROR_VMS>ID"` + Template securityGroupTemplate `xml:"TEMPLATE"` +} + +// VirtualRouterTemplate represent the template part of the OpenNebula VirtualRouter +type securityGroupTemplate struct { + Description string `xml:"DESCRIPTION"` + Rules []securityGroupRule `xml:"RULE"` + Dynamic unmatchedTagsSlice `xml:",any"` +} + +type securityGroupRule struct { + Protocol string `xml:"PROTOCOL"` + RuleType string `xml:"RULE_TYPE"` } // NewSecurityGroupPool returns a security group pool. A connection to OpenNebula is @@ -41,9 +65,13 @@ func NewSecurityGroupPool(args ...int) (*SecurityGroupPool, error) { return nil, err } - secgrouppool := &SecurityGroupPool{XMLResource{body: response.Body()}} + secgroupPool := &SecurityGroupPool{} + err = xml.Unmarshal([]byte(response.Body()), secgroupPool) + if err != nil { + return nil, err + } - return secgrouppool, err + return secgroupPool, nil } // NewSecurityGroup finds a security group object by ID. No connection to OpenNebula. @@ -55,14 +83,26 @@ func NewSecurityGroup(id uint) *SecurityGroup { // OpenNebula to retrieve the pool, but doesn't perform the Info() call to // retrieve the attributes of the security group. func NewSecurityGroupFromName(name string) (*SecurityGroup, error) { + var id uint + secgroupPool, err := NewSecurityGroupPool() if err != nil { return nil, err } - id, err := secgroupPool.GetIDFromName(name, "/SECURITY_GROUP_POOL/SECURITY_GROUP") - if err != nil { - return nil, err + match := false + for i := 0; i < len(secgroupPool.SecurityGroups); i++ { + if secgroupPool.SecurityGroups[i].Name != name { + continue + } + if match { + return nil, errors.New("multiple resources with that name") + } + id = secgroupPool.SecurityGroups[i].ID + match = true + } + if !match { + return nil, errors.New("resource not found") } return NewSecurityGroup(id), nil @@ -146,6 +186,5 @@ func (sg *SecurityGroup) Info() error { if err != nil { return err } - sg.body = response.Body() - return nil + return xml.Unmarshal([]byte(response.Body()), sg) } diff --git a/src/oca/go/src/goca/securitygroup_test.go b/src/oca/go/src/goca/securitygroup_test.go index b1f3cbde06..110a184f5d 100644 --- a/src/oca/go/src/goca/securitygroup_test.go +++ b/src/oca/go/src/goca/securitygroup_test.go @@ -22,7 +22,7 @@ func TestSGAllocate(t *testing.T){ sg = NewSecurityGroup(sg_id) sg.Info() - actual, _:= sg.XMLResource.XPath("/SECURITY_GROUP/NAME") + actual := sg.Name if actual != sg_name { t.Errorf("Test failed, expected: '%s', got: '%s'", sg_name, actual) @@ -39,15 +39,22 @@ func TestSGAllocate(t *testing.T){ sg.Info() - actual_1, _ := sg.XMLResource.XPath("/SECURITY_GROUP/TEMPLATE/ATT1") - actual_3, _ := sg.XMLResource.XPath("/SECURITY_GROUP/TEMPLATE/ATT3") - - if actual_1 != "VAL1" { - t.Errorf("Test failed, expected: '%s', got: '%s'", "VAL1", actual_1) + actual_1, err := sg.Template.Dynamic.GetContentByName("ATT1") + if err != nil { + t.Errorf("Test failed, can't retrieve '%s', error: %s", "ATT1", err.Error()) + } else { + if actual_1 != "VAL1" { + t.Errorf("Test failed, expected: '%s', got: '%s'", "VAL1", actual_1) + } } - if actual_3 != "VAL3" { - t.Errorf("Test failed, expected: '%s', got: '%s'", "VAL3", actual_3) + actual_3, err := sg.Template.Dynamic.GetContentByName("ATT3") + if err != nil { + t.Errorf("Test failed, can't retrieve '%s', error: %s", "ATT3", err.Error()) + } else { + if actual_3 != "VAL3" { + t.Errorf("Test failed, expected: '%s', got: '%s'", "VAL3", actual_3) + } } clone_name := sg_name + "-cloned" @@ -62,7 +69,7 @@ func TestSGAllocate(t *testing.T){ clone := NewSecurityGroup(clone_id) clone.Info() - actual, _ = clone.XMLResource.XPath("/SECURITY_GROUP/NAME") + actual = sg.Name if actual != clone_name { t.Errorf("Test failed, expected: '%s', got: '%s'", clone_name, actual) @@ -79,11 +86,11 @@ func TestSGAllocate(t *testing.T){ sg.Info() - expected := "111111111" - actual, _ = sg.XMLResource.XPath("/SECURITY_GROUP/PERMISSIONS") + expected_perm := Permissions{1, 1, 1, 1, 1, 1, 1, 1, 1} + actual_perm := sg.Permissions - if actual != expected { - t.Errorf("Test failed, expected: '%s', got: '%s'", expected, actual) + if actual_perm == nil || *actual_perm != expected_perm { + t.Errorf("Test failed, expected: '%s', got: '%s'", expected_perm.String(), actual_perm.String()) } //Change owner of SG @@ -95,17 +102,17 @@ func TestSGAllocate(t *testing.T){ sg.Info() - expected_usr := "1" - expected_grp := "1" - actual_usr, _ := sg.XMLResource.XPath("/SECURITY_GROUP/UID") - actual_grp, _ := sg.XMLResource.XPath("/SECURITY_GROUP/GID") + expected_usr := 1 + expected_grp := 1 + actual_usr := sg.UID + actual_grp := sg.GID if actual_usr != expected_usr { - t.Errorf("Test failed, expected: '%s', got: '%s'", expected_usr, actual_usr) + t.Errorf("Test failed, expected: '%d', got: '%d'", expected_usr, actual_usr) } if actual_grp != expected_grp { - t.Errorf("Test failed, expected: '%s', got: '%s'", expected_grp, actual_grp) + t.Errorf("Test failed, expected: '%d', got: '%d'", expected_grp, actual_grp) } //Rename SG @@ -118,7 +125,7 @@ func TestSGAllocate(t *testing.T){ sg.Info() - actual, _ = sg.XMLResource.XPath("/SECURITY_GROUP/NAME") + actual = sg.Name if actual != rename { t.Errorf("Test failed, expected: '%s', got: '%s'", rename, actual) diff --git a/src/oca/go/src/goca/snapshot.go b/src/oca/go/src/goca/snapshot.go new file mode 100644 index 0000000000..cfc562cfe0 --- /dev/null +++ b/src/oca/go/src/goca/snapshot.go @@ -0,0 +1,40 @@ +package goca + +// An user can take snapshot on VM, or on VM disks + +// Common part +type snapshot struct { + Children string `xml:"CHILDREN"` //minOccur=0 + Active string `xml:"ACTIVE"` //minOccur=0 + Date int `xml:"DATE"` + ID int `xml:"ID"` + Name string `xml:"NAME"` //minOccur=0 + Parent int `xml:"PARENT"` + Size int `xml:"SIZE"` +} + +// Image entity related +type ImageSnapshot struct { + AllowOrphans string `xml:"ALLOW_ORPHANS"` + CurrentBase int `xml:"CURRENT_BASE"` + NextSnapshot int `xml:"NEXT_SNAPSHOT"` + Snapshots []snapshot `xml:"SNAPSHOT"` +} + +// VM entity related +type VMSnapshot struct { + HypervisorID string `xml:"HYPERVISOR_ID"` + Name string `xml:"NAME"` + ID int `xml:"SNAPSHOT_ID"` + Time string `xml:"TIME"` +} + +type vmHistoryRecordSnapshot struct { + ImageSnapshot + DiskID int `xml:"DISK_ID"` +} + +type vmMonitoringSnapshotSize struct { + DiskID int `xml:"DISK_ID"` + Size int `xml:"SIZE"` +} diff --git a/src/oca/go/src/goca/template.go b/src/oca/go/src/goca/template.go index 1972ff049f..7f2e2438d4 100644 --- a/src/oca/go/src/goca/template.go +++ b/src/oca/go/src/goca/template.go @@ -1,19 +1,65 @@ package goca import ( + "encoding/xml" "errors" ) -// Template represents an OpenNebula Template -type Template struct { - XMLResource - ID uint - Name string -} - // TemplatePool represents an OpenNebula TemplatePool type TemplatePool struct { - XMLResource + Templates []Template `xml:"VMTEMPLATE"` +} + +// Template represents an OpenNebula Template +type Template struct { + ID uint `xml:"ID"` + UID int `xml:"UID"` + GID int `xml:"GID"` + UName string `xml:"UNAME"` + GName string `xml:"GNAME"` + Name string `xml:"NAME"` + LockInfos *Lock `xml:"LOCK"` + Permissions *Permissions `xml:"PERMISSIONS"` + RegTime int `xml:"REGTIME"` + Template templateTemplate `xml:"TEMPLATE"` +} + +// templateTemplate represent the template part of the OpenNebula Template +type templateTemplate struct { + CPU float64 `xml:"CPU"` + Memory int `xml:"MEMORY"` + Context *templateContext `xml:"CONTEXT"` + Disk []templateDisk `xml:"DISK"` + Graphics *templateGraphics `xml:"GRAPHICS"` + NICDefault *templateNicDefault `xml:"NIC_DEFAULT"` + OS *templateOS `xml:"OS"` + UserInputs templateUserInputs `xml:"USER_INPUTS"` + Dynamic unmatchedTagsSlice `xml:",any"` +} + +type templateContext struct { + Dynamic unmatchedTagsSlice `xml:",any"` +} + +type templateDisk struct { + Dynamic unmatchedTagsSlice `xml:",any"` +} + +type templateGraphics struct { + Dynamic unmatchedTagsSlice `xml:",any"` +} + +type templateUserInputs struct { + Dynamic unmatchedTagsSlice `xml:",any"` +} + +type templateNicDefault struct { + Model string `xml:"MODEL"` +} + +type templateOS struct { + Arch string `xml:"ARCH"` + Boot string `xml:"BOOT"` } // NewTemplatePool returns a template pool. A connection to OpenNebula is @@ -39,10 +85,13 @@ func NewTemplatePool(args ...int) (*TemplatePool, error) { return nil, err } - templatepool := &TemplatePool{XMLResource{body: response.Body()}} - - return templatepool, err + templatePool := &TemplatePool{} + err = xml.Unmarshal([]byte(response.Body()), templatePool) + if err != nil { + return nil, err + } + return templatePool, nil } // NewTemplate finds a template object by ID. No connection to OpenNebula. @@ -54,14 +103,26 @@ func NewTemplate(id uint) *Template { // OpenNebula to retrieve the pool, but doesn't perform the Info() call to // retrieve the attributes of the template. func NewTemplateFromName(name string) (*Template, error) { + var id uint + templatePool, err := NewTemplatePool() if err != nil { return nil, err } - id, err := templatePool.GetIDFromName(name, "/VMTEMPLATE_POOL/VMTEMPLATE") - if err != nil { - return nil, err + match := false + for i := 0; i < len(templatePool.Templates); i++ { + if templatePool.Templates[i].Name != name { + continue + } + if match { + return nil, errors.New("multiple resources with that name") + } + id = templatePool.Templates[i].ID + match = true + } + if !match { + return nil, errors.New("resource not found") } return NewTemplate(id), nil @@ -83,8 +144,7 @@ func (template *Template) Info() error { if err != nil { return err } - template.body = response.Body() - return nil + return xml.Unmarshal([]byte(response.Body()), template) } // Update will modify the template. If appendTemplate is 0, it will diff --git a/src/oca/go/src/goca/template_test.go b/src/oca/go/src/goca/template_test.go index 58b1471399..16840fb587 100644 --- a/src/oca/go/src/goca/template_test.go +++ b/src/oca/go/src/goca/template_test.go @@ -5,7 +5,7 @@ import ( ) // Helper to create a template -func createTemplate(t *testing.T) *Template { +func createTemplate(t *testing.T) (*Template, uint) { templateName := GenName("template") // Create template @@ -28,28 +28,22 @@ func createTemplate(t *testing.T) *Template { t.Error(err) } - return template + return template, id } func TestTemplateCreateAndDelete(t *testing.T) { - template := createTemplate(t) + var err error - idParse, err := GetID(t, template, "VMTEMPLATE") - if err != nil { - t.Error(err) - } + template, idOrig := createTemplate(t) - if idParse != template.ID { + idParse := template.ID + if idParse != idOrig { t.Errorf("Template ID does not match") } // Get template by Name - templateName, ok := template.XPath("/VMTEMPLATE/NAME") - if !ok { - t.Errorf("Could not get name") - } - - template, err = NewTemplateFromName(templateName) + name := template.Name + template, err = NewTemplateFromName(name) if err != nil { t.Fatal(err) } @@ -59,9 +53,8 @@ func TestTemplateCreateAndDelete(t *testing.T) { t.Error(err) } - idParse, err = GetID(t, template, "VMTEMPLATE") - - if idParse != template.ID { + idParse = template.ID + if idParse != idOrig { t.Errorf("Template ID does not match") } @@ -107,7 +100,7 @@ func TestTemplateInstantiate(t *testing.T) { } func TestTemplateUpdate(t *testing.T) { - template := createTemplate(t) + template, _ := createTemplate(t) tpl := NewTemplateBuilder() tpl.AddValue("A", "B") @@ -120,8 +113,13 @@ func TestTemplateUpdate(t *testing.T) { t.Error(err) } - if val, ok := template.XPath("/VMTEMPLATE/TEMPLATE/A"); !ok || val != "B" { - t.Errorf("Expecting A=B") + val, err := template.Template.Dynamic.GetContentByName("A") + if err != nil { + t.Errorf("Test failed, can't retrieve '%s', error: %s", "A", err.Error()) + } else { + if val != "B" { + t.Errorf("Expecting A=B") + } } // Delete template diff --git a/src/oca/go/src/goca/unmatched_tags.go b/src/oca/go/src/goca/unmatched_tags.go new file mode 100644 index 0000000000..2098cd13ee --- /dev/null +++ b/src/oca/go/src/goca/unmatched_tags.go @@ -0,0 +1,101 @@ +package goca + +import ( + "encoding/xml" + "fmt" +) + +// Common part + +// UnmatchedTag contains the tag informations +type UnmatchedTag struct { + XMLName xml.Name + Content string `xml:",chardata"` + //FullContent string `xml:",innerxml"` // for debug purpose, allow to see what's inside some tags +} + +// Store unmatched tags in a map +// Inspired from: https://stackoverflow.com/questions/30928770/marshall-map-to-xml-in-go/33110881 + +// NOTE: to be used in flat xml part with distinct tag names +// If it's not flat: the hash will contains key with empty values +// If there is several tags with the same name : only the last value will be stored + +// UnmatchedTagsMap store tags not handled by Unmarshal in a map, it should be labelled with `xml",any"` +type unmatchedTagsMap map[string]string + +func (m *unmatchedTagsMap) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + if *m == nil { + *m = unmatchedTagsMap{} + } + + e := UnmatchedTag{} + err := d.DecodeElement(&e, &start) + if err != nil { + return err + } + + // Fail the parsing of the whole xml + //if _, ok := (*m)[e.XMLName.Local]; ok { + // return fmt.Errorf("UnmatchedTagsMap: UnmarshalXML: Tag %s: multiple entries with the same name", e.XMLName.Local) + //} + (*m)[e.XMLName.Local] = e.Content + + return nil +} + +func (u *unmatchedTagsMap) GetContentByName(name string) string { + return ((map[string]string)(*u))[name] +} + +// Store unmatched tags in a slice + +// NOTE: to be used in flat xml part + +// UnmatchedTagsSlice store tags not handled by Unmarshal in a slice, it should be labelled with `xml",any"` +type unmatchedTagsSlice struct { + Tags []UnmatchedTag +} + +func (u *unmatchedTagsSlice) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var e UnmatchedTag + err := d.DecodeElement(&e, &start) + if err != nil { + return err + } + + u.Tags = append(u.Tags, e) + return nil +} + +// Retrieve slice of tags with given name +func (u *unmatchedTagsSlice) GetContentSliceByName(name string) []string { + content := make([]string, 0, 1) + for _, t := range u.Tags { + if t.XMLName.Local != name { + continue + } + content = append(content, t.Content) + } + return content +} + +// Retrieve a tag with given name, fail if not present or present more than once +func (u *unmatchedTagsSlice) GetContentByName(name string) (string, error) { + var content string + match := false + for _, t := range u.Tags { + if t.XMLName.Local != name { + continue + } + if match == true { + return "", fmt.Errorf("GetContentByName: multiple entries with the name %s", name) + } + content = t.Content + match = true + } + if match == false { + return "", fmt.Errorf("GetContentByName: tag %s not found", name) + } + return content, nil +} diff --git a/src/oca/go/src/goca/user.go b/src/oca/go/src/goca/user.go index 58d98540ab..69dc6bc4a6 100644 --- a/src/oca/go/src/goca/user.go +++ b/src/oca/go/src/goca/user.go @@ -1,15 +1,46 @@ package goca -// User represents an OpenNebula User -type User struct { - XMLResource - ID uint - Name string -} +import ( + "encoding/xml" + "errors" +) // UserPool represents an OpenNebula UserPool type UserPool struct { - XMLResource + Users []userBase `xml:"USER"` + Quotas []quotas `xml:"QUOTAS"` + DefaultUserQuotas quotasList `xml:"DEFAULT_USER_QUOTAS"` +} + +// User represents an OpenNebula user +type User struct { + userBase + quotasList + DefaultUserQuotas quotasList `xml:"DEFAULT_USER_QUOTAS"` +} + +// User represents an OpenNebula User +type userBase struct { + ID uint `xml:"ID"` + GID int `xml:"GID"` + GroupsID []int `xml:"GROUPS>ID"` + GName string `xml:"GNAME"` + Name string `xml:"NAME"` + Password string `xml:"PASSWORD"` + AuthDriver string `xml:"AUTH_DRIVER"` + Enabled int `xml:"ENABLED"` + LoginTokens []loginToken `xml:"LOGIN_TOKEN"` + Template userTemplate `xml:"TEMPLATE"` +} + +type userTemplate struct { + Dynamic unmatchedTagsSlice `xml:",any"` +} + +type loginToken struct { + Token string `xml:"TOKEN"` + ExpirationTime int `xml:"EXPIRATION_TIME"` + EGID int `xml:"EGID"` } // NewUserPool returns a user pool. A connection to OpenNebula is @@ -20,28 +51,44 @@ func NewUserPool() (*UserPool, error) { return nil, err } - userpool := &UserPool{XMLResource{body: response.Body()}} + userpool := &UserPool{} + err = xml.Unmarshal([]byte(response.Body()), userpool) + if err != nil { + return nil, err + } - return userpool, err + return userpool, nil } // NewUser finds a user object by ID. No connection to OpenNebula. func NewUser(id uint) *User { - return &User{ID: id} + return &User{userBase: userBase{ID: id}} } // NewUserFromName finds a user object by name. It connects to // OpenNebula to retrieve the pool, but doesn't perform the Info() call to // retrieve the attributes of the user. func NewUserFromName(name string) (*User, error) { + var id uint + userPool, err := NewUserPool() if err != nil { return nil, err } - id, err := userPool.GetIDFromName(name, "/USER_POOL/USER") - if err != nil { - return nil, err + match := false + for i := 0; i < len(userPool.Users); i++ { + if userPool.Users[i].Name != name { + continue + } + if match { + return nil, errors.New("multiple resources with that name") + } + id = userPool.Users[i].ID + match = true + } + if !match { + return nil, errors.New("resource not found") } return NewUser(id), nil @@ -133,6 +180,5 @@ func (user *User) Info() error { if err != nil { return err } - user.body = response.Body() - return nil + return xml.Unmarshal([]byte(response.Body()), user) } diff --git a/src/oca/go/src/goca/vdc.go b/src/oca/go/src/goca/vdc.go index bfc627f4ba..940eb7d7de 100644 --- a/src/oca/go/src/goca/vdc.go +++ b/src/oca/go/src/goca/vdc.go @@ -1,15 +1,49 @@ package goca -// Vdc represents an OpenNebula Vdc -type Vdc struct { - XMLResource - ID uint - Name string -} +import ( + "encoding/xml" + "errors" +) // VdcPool represents an OpenNebula VdcPool type VdcPool struct { - XMLResource + Vdcs []Vdc `xml:"VDC"` +} + +// Vdc represents an OpenNebula Vdc +type Vdc struct { + ID uint `xml:"ID"` + Name string `xml:"NAME"` + GroupsID []int `xml:"GROUPS>ID"` + Clusters []vdcCluster `xml:"CLUSTERS>CLUSTER"` + Hosts []vdcHost `xml:"HOSTS>HOST"` + Datastores []vdcDatastore `xml:"DATASTORES>DATASTORE"` + VNets []vdcVNet `xml:"VNETS>VNET"` + Template vdcTemplate `xml:"TEMPLATE"` +} + +type vdcTemplate struct { + Dynamic unmatchedTagsSlice `xml:",any"` +} + +type vdcCluster struct { + ZoneID int `xml:"ZONE_ID"` + ClusterID int `xml:"CLUSTER_ID"` +} + +type vdcHost struct { + ZoneID int `xml:"ZONE_ID"` + HostID int `xml:"HOST_ID"` +} + +type vdcDatastore struct { + ZoneID int `xml:"ZONE_ID"` + DatastoreID int `xml:"DATASTORE_ID"` +} + +type vdcVNet struct { + ZoneID int `xml:"ZONE_ID"` + VnetID int `xml:"VNET_ID"` } // NewVdcPool returns a vdc pool. A connection to OpenNebula is @@ -20,9 +54,13 @@ func NewVdcPool() (*VdcPool, error) { return nil, err } - vdcpool := &VdcPool{XMLResource{body: response.Body()}} + vdcPool := &VdcPool{} + err = xml.Unmarshal([]byte(response.Body()), vdcPool) + if err != nil { + return nil, err + } - return vdcpool, err + return vdcPool, nil } // NewVdc finds a vdc object by ID. No connection to OpenNebula. @@ -34,14 +72,26 @@ func NewVdc(id uint) *Vdc { // OpenNebula to retrieve the pool, but doesn't perform the Info() call to // retrieve the attributes of the vdc. func NewVdcFromName(name string) (*Vdc, error) { + var id uint + vdcPool, err := NewVdcPool() if err != nil { return nil, err } - id, err := vdcPool.GetIDFromName(name, "/VDC_POOL/VDC") - if err != nil { - return nil, err + match := false + for i := 0; i < len(vdcPool.Vdcs); i++ { + if vdcPool.Vdcs[i].Name != name { + continue + } + if match { + return nil, errors.New("multiple resources with that name") + } + id = vdcPool.Vdcs[i].ID + match = true + } + if !match { + return nil, errors.New("resource not found") } return NewVdc(id), nil @@ -88,8 +138,7 @@ func (vdc *Vdc) Info() error { if err != nil { return err } - vdc.body = response.Body() - return nil + return xml.Unmarshal([]byte(response.Body()), vdc) } // AddGroup adds a group to the VDC diff --git a/src/oca/go/src/goca/virtualnetwork.go b/src/oca/go/src/goca/virtualnetwork.go index 416d014c9a..db1016e7ee 100644 --- a/src/oca/go/src/goca/virtualnetwork.go +++ b/src/oca/go/src/goca/virtualnetwork.go @@ -1,17 +1,97 @@ package goca -import "errors" - -// VirtualNetwork represents an OpenNebula VirtualNetwork -type VirtualNetwork struct { - XMLResource - ID uint - Name string -} +import ( + "encoding/xml" + "errors" +) // VirtualNetworkPool represents an OpenNebula VirtualNetworkPool type VirtualNetworkPool struct { - XMLResource + VirtualNetworks []virtualNetworkPoolBase `xml:"VNET"` +} + +// VirtualNetwork represents an OpenNebula VirtualNetwork +type VirtualNetwork struct { + virtualNetworkBase + Lock *Lock `xml:"LOCK"` + ARs []virtualNetworkAR `xml:"AR_POOL>AR"` +} + +// VirtualNetworkBase represents common attributes for parts of VirtualNetworkPool and VirtualNetwork +type virtualNetworkBase struct { + ID uint `xml:"ID"` + UID int `xml:"UID"` + GID int `xml:"GID"` + UName string `xml:"UNAME"` + GName string `xml:"GNAME"` + Name string `xml:"NAME"` + Permissions *Permissions `xml:"PERMISSIONS"` + Clusters []int `xml:"CLUSTERS>ID"` + Bridge string `xml:"BRIDGE"` + BridgeType string `xml:"BRIDGE_TYPE"` // minOccurs=0 + ParentNetworkID string `xml:"PARENT_NETWORK_ID"` + VNMad string `xml:"VN_MAD"` + PhyDev string `xml:"PHYDEV"` + VlanID string `xml:"VLAN_ID"` // minOccurs=0 + OuterVlanID string `xml:"OUTER_VLAN_ID"` // minOccurs=0 + VlanIDAutomatic string `xml:"VLAN_ID_AUTOMATIC"` + OuterVlanIDAutomatic string `xml:"OUTER_VLAN_ID_AUTOMATIC"` + UsedLeases int `xml:"USED_LEASES"` + VRouters []int `xml:"VROUTERS>ID"` + Template virtualNetworkTemplate `xml:"TEMPLATE"` +} + +type virtualNetworkTemplate struct { + Dynamic unmatchedTagsSlice `xml:",any"` +} + +// AR represent an Address Range +type virtualNetworkPoolBase struct { + virtualNetworkBase + ARs []virtualNetworkARPool `xml:"AR_POOL>AR"` +} + +type virtualNetworkARBase struct { + ARID string `xml:"AR_ID"` + GlobalPrefix string `xml:"GLOBAL_PREFIX"` // minOccurs=0 + IP string `xml:"IP"` // minOccurs=0 + MAC string `xml:"MAC"` + ParentNetworkARID string `xml:"PARENT_NETWORK_AR_ID"` // minOccurs=0 + Size int `xml:"SIZE"` + Type string `xml:"TYPE"` + ULAPrefix string `xml:"ULA_PREFIX"` // minOccurs=0 + VNMAD string `xml:"VN_MAD"` // minOccurs=0 +} + +type virtualNetworkARPool struct { + virtualNetworkARBase + Allocated string `xml:ALLOCATED` +} + +type virtualNetworkAR struct { + virtualNetworkARBase + MACEnd string `xml:"MAC_END"` + IPEnd string `xml:"IP_END"` + IP6ULA string `xml:"IP6_ULA"` + IP6ULAEnd string `xml:"IP6_ULA_END"` + IP6Global string `xml:"IP6_GLOBAL"` + IP6GlobalEnd string `xml:"IP6_GLOBAL_END"` + IP6 string `xml:"IP6"` + IP6End string `xml:"IP6_END"` + UsedLeases string `xml:"USED_LEASES"` + Leases []lease `xml:"LEASES>LEASE"` +} + +type lease struct { + IP string `xml:"IP"` + IP6 string `xml:"IP6"` + IP6Global string `xml:"IP6GLOBAL"` + IP6Link string `xml:"IP6LINK"` + IP6ULA string `xml:"IP6ULA"` + MAC string `xml:"MAC"` + VM int `xml:"VM"` + VNet int `xml:"VNET"` + VRouter int `xml:"VROUTER"` } // NewVirtualNetworkPool returns a virtualnetwork pool. A connection to OpenNebula is @@ -41,28 +121,44 @@ func NewVirtualNetworkPool(args ...int) (*VirtualNetworkPool, error) { return nil, err } - vnpool := &VirtualNetworkPool{XMLResource{body: response.Body()}} + vnPool := &VirtualNetworkPool{} + err = xml.Unmarshal([]byte(response.Body()), &vnPool) + if err != nil { + return nil, err + } - return vnpool, err + return vnPool, nil } // NewVirtualNetwork finds a virtualnetwork object by ID. No connection to OpenNebula. func NewVirtualNetwork(id uint) *VirtualNetwork { - return &VirtualNetwork{ID: id} + return &VirtualNetwork{virtualNetworkBase: virtualNetworkBase{ID: id}} } // NewVirtualNetworkFromName finds a virtualnetwork object by name. It connects to // OpenNebula to retrieve the pool, but doesn't perform the Info() call to // retrieve the attributes of the virtualnetwork. func NewVirtualNetworkFromName(name string) (*VirtualNetwork, error) { - virtualnetworkPool, err := NewVirtualNetworkPool() + var id uint + + virtualNetworkPool, err := NewVirtualNetworkPool() if err != nil { return nil, err } - id, err := virtualnetworkPool.GetIDFromName(name, "/VNET_POOL/VNET") - if err != nil { - return nil, err + match := false + for i := 0; i < len(virtualNetworkPool.VirtualNetworks); i++ { + if virtualNetworkPool.VirtualNetworks[i].Name != name { + continue + } + if match { + return nil, errors.New("multiple resources with that name") + } + id = virtualNetworkPool.VirtualNetworks[i].ID + match = true + } + if !match { + return nil, errors.New("resource not found") } return NewVirtualNetwork(id), nil @@ -179,6 +275,5 @@ func (vn *VirtualNetwork) Info() error { if err != nil { return err } - vn.body = response.Body() - return nil + return xml.Unmarshal([]byte(response.Body()), vn) } diff --git a/src/oca/go/src/goca/virtualnetwork_test.go b/src/oca/go/src/goca/virtualnetwork_test.go index cf475f4bcc..8e5985a0b9 100644 --- a/src/oca/go/src/goca/virtualnetwork_test.go +++ b/src/oca/go/src/goca/virtualnetwork_test.go @@ -15,7 +15,7 @@ VN_MAD = "vxlan" ` // Helper to create a Virtual Network -func createVirtualNetwork(t *testing.T) *VirtualNetwork { +func createVirtualNetwork(t *testing.T) (*VirtualNetwork, uint) { id, err := CreateVirtualNetwork(vnTpl, -1) if err != nil { t.Error(err) @@ -29,26 +29,21 @@ func createVirtualNetwork(t *testing.T) *VirtualNetwork { t.Error(err) } - return vnet + return vnet, id } func TestVirtualNetwork(t *testing.T) { - vnet := createVirtualNetwork(t) + var err error - idParse, err := GetID(t, vnet, "VNET") - if err != nil { - t.Error(err) - } + vnet, idOrig := createVirtualNetwork(t) - if idParse != vnet.ID { + idParse := vnet.ID + if idParse != idOrig { t.Errorf("Virtual Network ID does not match") } // Get virtual network by Name - name, ok := vnet.XPath("/VNET/NAME") - if !ok { - t.Errorf("Could not get name") - } + name := vnet.Name vnet, err = NewVirtualNetworkFromName(name) if err != nil { @@ -60,9 +55,8 @@ func TestVirtualNetwork(t *testing.T) { t.Error(err) } - idParse, err = GetID(t, vnet, "VNET") - - if idParse != vnet.ID { + idParse = vnet.ID + if idParse != idOrig { t.Errorf("Virtual Network ID does not match") } @@ -78,16 +72,10 @@ func TestVirtualNetwork(t *testing.T) { } // Get Virtual Network Owner Name - uname, ok := vnet.XPath("/VNET/UNAME") - if !ok { - t.Errorf("Could not get user name") - } + uname := vnet.UName - // Get Virtual Network owner group Name - gname, ok := vnet.XPath("/VNET/GNAME") - if !ok { - t.Errorf("Could not get group name") - } + // Get Image owner group Name + gname := vnet.GName // Compare with caller username caller := strings.Split(client.token, ":")[0] @@ -117,16 +105,10 @@ func TestVirtualNetwork(t *testing.T) { } // Get Virtual Network Owner Name - uname, ok = vnet.XPath("/VNET/UNAME") - if !ok { - t.Errorf("Could not get user name") - } + uname = vnet.UName - // Get Virtual Network Owner Name - gname, ok = vnet.XPath("/VNET/GNAME") - if !ok { - t.Errorf("Could not get user name") - } + // Get Image owner group Name + gname = vnet.GName if "serveradmin" != uname { t.Error("Virtual network owner is not oenadmin") diff --git a/src/oca/go/src/goca/virtualrouter.go b/src/oca/go/src/goca/virtualrouter.go index c21ac33752..3ed42283cd 100644 --- a/src/oca/go/src/goca/virtualrouter.go +++ b/src/oca/go/src/goca/virtualrouter.go @@ -1,19 +1,41 @@ package goca import ( + "encoding/xml" "errors" ) -// VirtualRouter represents an OpenNebula VirtualRouter -type VirtualRouter struct { - XMLResource - ID uint - Name string -} - // VirtualRouterPool represents an OpenNebula VirtualRouterPool type VirtualRouterPool struct { - XMLResource + VirtualRouters []VirtualRouter `xml:"VROUTER"` +} + +// VirtualRouter represents an OpenNebula VirtualRouter +type VirtualRouter struct { + ID uint `xml:"ID"` + UID int `xml:"UID"` + GID int `xml:"GID"` + UName string `xml:"UNAME"` + GName string `xml:"GNAME"` + Name string `xml:"NAME"` + LockInfos *Lock `xml:"LOCK"` + Permissions *Permissions `xml:"PERMISSIONS"` + Type int `xml:"TYPE"` + DiskType int `xml:"DISK_TYPE"` + Persistent int `xml:"PERSISTENT"` + VMsID []int `xml:"VMS>ID"` + Template virtualRouterTemplate `xml:"TEMPLATE"` +} + +// VirtualRouterTemplate represent the template part of the OpenNebula VirtualRouter +type virtualRouterTemplate struct { + NIC []virtualRouterNIC `xml:"NIC"` + Dynamic unmatchedTagsSlice `xml:",any"` +} + +type virtualRouterNIC struct { + NICID int `xml:"NIC_ID"` + Dynamic unmatchedTagsSlice `xml:",any"` } // NewVirtualRouterPool returns a virtual router pool. A connection to OpenNebula is @@ -39,10 +61,14 @@ func NewVirtualRouterPool(args ...int) (*VirtualRouterPool, error) { return nil, err } - vrouterpool := &VirtualRouterPool{XMLResource{body: response.Body()}} + vrouterPool := &VirtualRouterPool{} - return vrouterpool, err + err = xml.Unmarshal([]byte(response.Body()), vrouterPool) + if err != nil { + return nil, err + } + return vrouterPool, nil } // NewVirtualRouter finds a virtual router object by ID. No connection to OpenNebula. @@ -54,14 +80,25 @@ func NewVirtualRouter(id uint) *VirtualRouter { // OpenNebula to retrieve the pool, but doesn't perform the Info() call to // retrieve the attributes of the virtual router. func NewVirtualRouterFromName(name string) (*VirtualRouter, error) { + var id uint + vrouterPool, err := NewVirtualRouterPool() if err != nil { return nil, err } - id, err := vrouterPool.GetIDFromName(name, "/VROUTER_POOL/VROUTER") - if err != nil { - return nil, err + match := false + for i := 0; i < len(vrouterPool.VirtualRouters); i++ { + if vrouterPool.VirtualRouters[i].Name == name { + if match { + return nil, errors.New("multiple resources with that name") + } + id = vrouterPool.VirtualRouters[i].ID + match = true + } + } + if !match { + return nil, errors.New("resource not found") } return NewVirtualRouter(id), nil @@ -84,8 +121,7 @@ func (vr *VirtualRouter) Info() error { if err != nil { return err } - vr.body = response.Body() - return nil + return xml.Unmarshal([]byte(response.Body()), vr) } // Update will modify the virtual router. If appendVirtualRouter is 0, it will diff --git a/src/oca/go/src/goca/virtualrouter_test.go b/src/oca/go/src/goca/virtualrouter_test.go index 209f065d4a..7bc0299a29 100644 --- a/src/oca/go/src/goca/virtualrouter_test.go +++ b/src/oca/go/src/goca/virtualrouter_test.go @@ -24,7 +24,7 @@ func TestVirtualRouter(t *testing.T){ vr = NewVirtualRouter(vr_id) vr.Info() - actual, _:= vr.XMLResource.XPath("/VROUTER/NAME") + actual := vr.Name if actual != vr_name { t.Errorf("Test failed, expected: '%s', got: '%s'", vr_name, actual) @@ -41,15 +41,22 @@ func TestVirtualRouter(t *testing.T){ vr.Info() - actual_1, _ := vr.XMLResource.XPath("/VROUTER/TEMPLATE/ATT1") - actual_3, _ := vr.XMLResource.XPath("/VROUTER/TEMPLATE/ATT3") - - if actual_1 != "VAL1" { - t.Errorf("Test failed, expected: '%s', got: '%s'", "VAL1", actual_1) + actual_1, err := vr.Template.Dynamic.GetContentByName("ATT1") + if err != nil { + t.Errorf("Test failed, can't retrieve '%s', error: %s", "ATT1", err.Error()) + } else { + if actual_1 != "VAL1" { + t.Errorf("Test failed, expected: '%s', got: '%s'", "VAL1", actual_1) + } } - if actual_3 != "VAL3" { - t.Errorf("Test failed, expected: '%s', got: '%s'", "VAL3", actual_3) + actual_3, err := vr.Template.Dynamic.GetContentByName("ATT3") + if err != nil { + t.Errorf("Test failed, can't retrieve '%s', error: %s", "ATT3", err.Error()) + } else { + if actual_3 != "VAL3" { + t.Errorf("Test failed, expected: '%s', got: '%s'", "VAL3", actual_3) + } } //Change permissions of VirtualRouter @@ -61,11 +68,11 @@ func TestVirtualRouter(t *testing.T){ vr.Info() - expected := "111111111" - actual, _ = vr.XMLResource.XPath("/VROUTER/PERMISSIONS") + expected_perm := Permissions{1, 1, 1, 1, 1, 1, 1, 1, 1} + actual_perm := vr.Permissions - if actual != expected { - t.Errorf("Test failed, expected: '%s', got: '%s'", expected, actual) + if actual_perm == nil || *actual_perm != expected_perm { + t.Errorf("Test failed, expected: '%s', got: '%s'", expected_perm.String(), actual_perm.String()) } //Change owner of VirtualRouter @@ -77,17 +84,17 @@ func TestVirtualRouter(t *testing.T){ vr.Info() - expected_usr := "1" - expected_grp := "1" - actual_usr, _ := vr.XMLResource.XPath("/VROUTER/UID") - actual_grp, _ := vr.XMLResource.XPath("/VROUTER/GID") + expected_usr := 1 + expected_grp := 1 + actual_usr := vr.UID + actual_grp := vr.GID if actual_usr != expected_usr { - t.Errorf("Test failed, expected: '%s', got: '%s'", expected_usr, actual_usr) + t.Errorf("Test failed, expected: '%d', got: '%d'", expected_usr, actual_usr) } if actual_grp != expected_grp { - t.Errorf("Test failed, expected: '%s', got: '%s'", expected_grp, actual_grp) + t.Errorf("Test failed, expected: '%d', got: '%d'", expected_grp, actual_grp) } rename := vr_name + "-renamed" @@ -101,7 +108,7 @@ func TestVirtualRouter(t *testing.T){ vr.Info() - actual, _ = vr.XMLResource.XPath("/VROUTER/NAME") + actual = vr.Name if actual != rename { t.Errorf("Test failed, expected: '%s', got: '%s'", rename, actual) @@ -149,10 +156,13 @@ func TestVirtualRouter(t *testing.T){ vr.Info() - actual, _ = vr.XMLResource.XPath("/VROUTER/TEMPLATE/NIC/NETWORK") - - if actual != "go-net" { - t.Errorf("Test failed, expected: '%s', got: '%s'", "go-net", actual) + actualNet, err := vr.Template.Dynamic.GetContentByName("NETWORK") + if err != nil { + t.Errorf("Test failed, can't retrieve '%s', error: %s", "NETWORK", err.Error()) + } else { + if actualNet != "go-net" { + t.Errorf("Test failed, expected: '%s', got: '%s'", "go-net", actualNet) + } } vnet := NewVirtualNetwork(vnet_id) @@ -174,9 +184,12 @@ func TestVirtualRouter(t *testing.T){ vr.Info() - actual, _ = vr.XMLResource.XPath("/VROUTER/LOCK/LOCKED") - if actual != "4" { - t.Errorf("Test failed, expected: '%s', got: '%s'", "4", actual) + actualLock := vr.LockInfos + if actualLock == nil { + t.Errorf("Test failed, expected: '%s', got: '%s'", "LockInfos", "nil") + } + if actualLock.Locked != 4 { + t.Errorf("Test failed, expected: '%d', got: '%d'", 4, actualLock.Locked) } //Unlock VirtualRouter @@ -188,9 +201,13 @@ func TestVirtualRouter(t *testing.T){ vr.Info() - actual, _= vr.XMLResource.XPath("/VROUTER/LOCK/LOCKED") - if actual != "" { - t.Errorf("Test failed, expected: '%s', got: '%s'", "", actual) + actualLock = vr.LockInfos + if actualLock == nil { + t.Errorf("Test failed, expected: '%s', got: '%s'", "LockInfos", "nil") + } + // XXX is it useful ? fail at previous part ? + if actualLock.Locked != 0 { + t.Errorf("Test failed, expected: '%d', got: '%d'", 0, actualLock.Locked) } //Delete VirtualRouter diff --git a/src/oca/go/src/goca/vm.go b/src/oca/go/src/goca/vm.go index 94c20034ce..0afb1e760c 100644 --- a/src/oca/go/src/goca/vm.go +++ b/src/oca/go/src/goca/vm.go @@ -1,20 +1,146 @@ package goca import ( + "encoding/xml" "errors" - "strconv" + "fmt" ) -// VM represents an OpenNebula Virtual Machine -type VM struct { - XMLResource - ID uint - Name string -} - // VMPool represents an OpenNebula Virtual Machine pool type VMPool struct { - XMLResource + VMs []vmBase `xml:"VM"` +} + +// VM represents an OpenNebula Virtual Machine +type VM struct { + vmBase + LockInfos *Lock `xml:"LOCK"` +} + +type vmBase struct { + ID uint `xml:"ID"` + UID int `xml:"UID"` + GID int `xml:"GID"` + UName string `xml:"UNAME"` + GName string `xml:"GNAME"` + Name string `xml:"NAME"` + Permissions *Permissions `xml:"PERMISSIONS"` + LastPoll int `xml:"LAST_POLL"` + StateRaw int `xml:"STATE"` + LCMStateRaw int `xml:"LCM_STATE"` + PrevStateRaw int `xml:"PREV_STATE"` + PrevLCMStateRaw int `xml:"PREV_LCM_STATE"` + ReschedValue int `xml:"RESCHED"` + STime int `xml:"STIME"` + ETime int `xml:"ETIME"` + DeployID string `xml:"DEPLOY_ID"` + Monitoring vmMonitoring `xml:"MONITORING"` + Template vmTemplate `xml:"TEMPLATE"` + UserTemplate *vmUserTemplate `xml:"USER_TEMPLATE"` + HistoryRecords []vmHistoryRecord `xml:"HISTORY_RECORDS>HISTORY"` +} + +type vmMonitoring struct { + DiskSize []vmMonitoringDiskSize `xml:"DISK_SIZE"` + SnapshotSize []vmMonitoringSnapshotSize `xml:"SNAPSHOT_SIZE"` + Dynamic unmatchedTagsSlice `xml:",any"` +} + +type vmMonitoringDiskSize struct { + ID int `xml:"ID"` + Size int `xml:"SIZE"` +} + +// History records +type vmHistoryRecord struct { + OID int `xml:"OID"` + SEQ int `xml:"SEQ"` + Hostname string `xml:"HOSTNAME"` + HID int `xml:"HID"` + CID int `xml:"CID"` + DSID int `xml:"DS_ID"` + Action int `xml:"ACTION"` + UID int `xml:"UID"` + GID int `xml:"GID"` + RequestID string `xml:"REQUEST_ID"` + PSTime int `xml:"PSTIME"` + PETime int `xml:"PETIME"` + RSTime int `xml:"RSTIME"` + RETime int `xml:"RETIME"` + ESTime int `xml:"ESTIME"` + EETime int `xml:"EETIME"` + STime int `xml:"STIME"` + ETime int `xml:"ETIME"` + VMMad string `xml:"VM_MAD"` + TMMad string `xml:"TM_MAD"` + Snapshots []vmHistoryRecordSnapshot `xml:"SNAPSHOTS"` +} + +// VMUserTemplate contain custom attributes +type vmUserTemplate struct { + Error string `xml:"ERROR"` + SchedMessage string `xml:"SCHED_MESSAGE"` + Dynamic unmatchedTagsMap `xml:",any"` +} + +type vmTemplate struct { + CPU float64 `xml:"CPU"` + Memory int `xml:"MEMORY"` + NIC []vmNic `xml:"NIC"` + NICAlias []vmNicAlias `xml:"NIC_ALIAS"` + Context *vmContext `xml:"CONTEXT"` + Disk []vmDisk `xml:"DISK"` + Graphics *vmGraphics `xml:"GRAPHICS"` + OS *vmOS `xml:"OS"` + Snapshot []VMSnapshot `xml:"SNAPSHOT"` + SecurityGroupRule []vmSecurityGroupRule `xml:"SECURITY_GROUP_RULE"` + Dynamic unmatchedTagsSlice `xml:",any"` +} + +type vmContext struct { + Dynamic unmatchedTagsMap `xml:",any"` +} + +type vmNic struct { + ID int `xml:"NIC_ID"` + Network string `xml:"NETWORK"` + IP string `xml:"IP"` + MAC string `xml:"MAC"` + PhyDev string `xml:"PHYDEV"` + Dynamic unmatchedTagsSlice `xml:",any"` +} + +type vmNicAlias struct { + ID int `xml:"NIC_ID"` // minOccurs=1 + Parent string `xml:"PARENT"` // minOccurs=1 + ParentId string `xml:"PARENT_ID"` // minOccurs=1 +} + +type vmGraphics struct { + Listen string `xml:"LISTEN"` + Port string `xml:"PORT"` + Type string `xml:"TYPE"` +} + +type vmDisk struct { + ID int `xml:"DISK_ID"` + Datastore string `xml:"DATASTORE"` + DiskType string `xml:"DISK_TYPE"` + Image string `xml:"IMAGE"` + Driver string `xml:"DRIVER"` + OriginalSize int `xml:"ORIGINAL_SIZE"` + Size int `xml:"SIZE"` + Dynamic unmatchedTagsSlice `xml:",any"` +} + +type vmOS struct { + Arch string `xml:"ARCH"` + Boot string `xml:"BOOT"` +} + +type vmSecurityGroupRule struct { + securityGroupRule + SecurityGroup string `xml:"SECURITY_GROUP_NAME"` } // VMState is the state of the Virtual Machine @@ -58,6 +184,14 @@ const ( CloningFailure VMState = 11 ) +func (st VMState) isValid() bool { + if (st >= Init && st <= Done) || + (st >= Poweroff && st <= CloningFailure) { + return true + } + return false +} + func (s VMState) String() string { switch s { case Init: @@ -286,6 +420,15 @@ const ( DiskResizeUndeployed LCMState = 64 ) +func (st LCMState) isValid() bool { + if (st >= LcmInit && st <= Shutdown) || + (st >= CleanupResubmit && st <= DiskSnapshot) || + (st >= DiskSnapshotDelete && st <= DiskResizeUndeployed) { + return true + } + return false +} + func (l LCMState) String() string { switch l { case LcmInit: @@ -301,7 +444,7 @@ func (l LCMState) String() string { case SaveStop: return "SAVE_STOP" case SaveSuspend: - return "SAVESuspend" + return "SAVE_SUSPEND" case SaveMigrate: return "SAVE_MIGRATE" case PrologMigrate: @@ -327,7 +470,7 @@ func (l LCMState) String() string { case BootPoweroff: return "BOOT_POWEROFF" case BootSuspended: - return "BOOTSuspendED" + return "BOOT_SUSPENDED" case BootStopped: return "BOOT_STOPPED" case CleanupDelete: @@ -341,7 +484,7 @@ func (l LCMState) String() string { case HotplugSaveasPoweroff: return "HOTPLUG_SAVEAS_POWEROFF" case HotplugSaveasSuspended: - return "HOTPLUG_SAVEASSuspendED" + return "HOTPLUG_SAVEAS_SUSPENDED" case ShutdownUndeploy: return "SHUTDOWN_UNDEPLOY" case EpilogUndeploy: @@ -375,9 +518,9 @@ func (l LCMState) String() string { case PrologMigratePoweroffFailure: return "PROLOG_MIGRATE_POWEROFF_FAILURE" case PrologMigrateSuspend: - return "PROLOG_MIGRATESuspend" + return "PROLOG_MIGRATE_SUSPEND" case PrologMigrateSuspendFailure: - return "PROLOG_MIGRATESuspend_FAILURE" + return "PROLOG_MIGRATE_SUSPEND_FAILURE" case BootUndeployFailure: return "BOOT_UNDEPLOY_FAILURE" case BootStoppedFailure: @@ -393,11 +536,11 @@ func (l LCMState) String() string { case DiskSnapshotDeletePoweroff: return "DISK_SNAPSHOT_DELETE_POWEROFF" case DiskSnapshotSuspended: - return "DISK_SNAPSHOTSuspendED" + return "DISK_SNAPSHOT_SUSPENDED" case DiskSnapshotRevertSuspended: - return "DISK_SNAPSHOT_REVERTSuspendED" + return "DISK_SNAPSHOT_REVERT_SUSPENDED" case DiskSnapshotDeleteSuspended: - return "DISK_SNAPSHOT_DELETESuspendED" + return "DISK_SNAPSHOT_DELETE_SUSPENDED" case DiskSnapshot: return "DISK_SNAPSHOT" case DiskSnapshotDelete: @@ -451,13 +594,16 @@ func NewVMPool(args ...int) (*VMPool, error) { return nil, err } - vmpool := &VMPool{XMLResource{body: response.Body()}} - - return vmpool, err + vmPool := &VMPool{} + err = xml.Unmarshal([]byte(response.Body()), vmPool) + if err != nil { + return nil, err + } + return vmPool, nil } -// Monitoring returns all the virtual machine monitorin records +// Monitoring returns all the virtual machine monitoring records // filter flag: // -4: Resources belonging to the user's primary group // -3: Resources belonging to the user @@ -529,51 +675,62 @@ func CreateVM(template string, pending bool) (uint, error) { // NewVM finds an VM by ID returns a new VM object. At this stage no // connection to OpenNebula is performed. func NewVM(id uint) *VM { - return &VM{ID: id} + return &VM{vmBase: vmBase{ID: id}} } // NewVMFromName finds the VM by name and returns a VM object. It connects to // OpenNebula to retrieve the pool, but doesn't perform the Info() call to // retrieve the attributes of the VM. func NewVMFromName(name string) (*VM, error) { - vmpool, err := NewVMPool() + var id uint + + vmPool, err := NewVMPool() if err != nil { return nil, err } - id, err := vmpool.GetIDFromName(name, "/VM_POOL/VM") - if err != nil { - return nil, err + match := false + for i := 0; i < len(vmPool.VMs); i++ { + if vmPool.VMs[i].Name != name { + continue + } + if match { + return nil, errors.New("multiple resources with that name") + } + id = vmPool.VMs[i].ID + match = true + } + if !match { + return nil, errors.New("resource not found") } return NewVM(id), nil } // State returns the VMState and LCMState -func (vm *VM) State() (VMState, LCMState, error) { - vmStateString, ok := vm.XPath("/VM/STATE") - if ok != true { - return -1, -1, errors.New("Unable to parse VM State") +func (vm *vmBase) State() (VMState, LCMState, error) { + state := VMState(vm.StateRaw) + if !state.isValid() { + return -1, -1, fmt.Errorf("VM State: this state value is not currently handled: %d\n", vm.StateRaw) } - - lcmStateString, ok := vm.XPath("/VM/LCM_STATE") - if ok != true { - return -1, -1, errors.New("Unable to parse LCM State") + lcmState := LCMState(vm.LCMStateRaw) + if !lcmState.isValid() { + return state, -1, fmt.Errorf("VM LCMState: this state value is not currently handled: %d\n", vm.LCMStateRaw) } - - vmState, _ := strconv.Atoi(vmStateString) - lcmState, _ := strconv.Atoi(lcmStateString) - - return VMState(vmState), LCMState(lcmState), nil + return state, lcmState, nil } // StateString returns the VMState and LCMState as strings -func (vm *VM) StateString() (string, string, error) { - vmState, lcmState, err := vm.State() - if err != nil { - return "", "", err +func (vm *vmBase) StateString() (string, string, error) { + state := VMState(vm.StateRaw) + if !state.isValid() { + return "", "", fmt.Errorf("VM State: this state value is not currently handled: %d\n", vm.StateRaw) } - return VMState(vmState).String(), LCMState(lcmState).String(), nil + lcmState := LCMState(vm.LCMStateRaw) + if !lcmState.isValid() { + return state.String(), "", fmt.Errorf("VM LCMState: this state value is not currently handled: %d\n", vm.LCMStateRaw) + } + return state.String(), lcmState.String(), nil } // Action is the generic method to run any action on the VM @@ -588,8 +745,7 @@ func (vm *VM) Info() error { if err != nil { return err } - vm.body = response.Body() - return nil + return xml.Unmarshal([]byte(response.Body()), vm) } // Update will modify the VM's template. If appendTemplate is 0, it will diff --git a/src/oca/go/src/goca/vm_test.go b/src/oca/go/src/goca/vm_test.go index 736de28d5f..799aa58a7f 100644 --- a/src/oca/go/src/goca/vm_test.go +++ b/src/oca/go/src/goca/vm_test.go @@ -104,8 +104,7 @@ func (s *VMSuite) TestVMUpdate(c *C) { err = vm.Info() c.Assert(err, IsNil) - val, ok := vm.XPath("/VM/USER_TEMPLATE/A") - c.Assert(ok, Equals, true) + val := vm.UserTemplate.Dynamic.GetContentByName("A") c.Assert(val, Equals, "B") } @@ -194,11 +193,7 @@ func (s *VMSuite) TestVMResize(c *C) { err = vm.Info() c.Assert(err, IsNil) - cpu, ok := vm.XPath("/VM/TEMPLATE/CPU") - c.Assert(ok, Equals, true) - c.Assert(cpu, Equals, "2.5") + c.Assert(vm.Template.CPU, Equals, "2.5") - memory, ok := vm.XPath("/VM/TEMPLATE/MEMORY") - c.Assert(ok, Equals, true) - c.Assert(memory, Equals, "512") + c.Assert(vm.Template.Memory, Equals, "512") } diff --git a/src/oca/go/src/goca/vntemplate.go b/src/oca/go/src/goca/vntemplate.go index 104dd86e0e..7b26a9e84a 100644 --- a/src/oca/go/src/goca/vntemplate.go +++ b/src/oca/go/src/goca/vntemplate.go @@ -1,19 +1,34 @@ package goca +// Since version 5.8 of OpenNebula + import ( + "encoding/xml" "errors" ) -// VNTemplate represents an OpenNebula Virtual Network Template -type VNTemplate struct { - XMLResource - ID uint - Name string -} - // VNTemplatePool represents an OpenNebula Virtual Network TemplatePool type VNTemplatePool struct { - XMLResource + VNTemplates []VNTemplate `xml:"VNTEMPLATE"` +} + +// VNTemplate represents an OpenNebula Virtual Network Template +type VNTemplate struct { + ID uint `xml:"ID"` + UID int `xml:"UID"` + GID int `xml:"GID"` + UName string `xml:"UNAME"` + GName string `xml:"GNAME"` + Name string `xml:"NAME"` + LockInfos *Lock `xml:"LOCK"` + Permissions Permissions `xml:"PERMISSIONS"` + RegTime string `xml:"REGTIME"` + Template vnTemplateTemplate `xml:"TEMPLATE"` +} + +type vnTemplateTemplate struct { + VNMad string `xml:"VN_MAD"` + Dynamic unmatchedTagsSlice `xml:",any"` } // NewVNTemplatePool returns a vntemplate pool. A connection to OpenNebula is @@ -39,9 +54,13 @@ func NewVNTemplatePool(args ...int) (*VNTemplatePool, error) { return nil, err } - vntemplatepool := &VNTemplatePool{XMLResource{body: response.Body()}} + vnTemplatePool := &VNTemplatePool{} + err = xml.Unmarshal([]byte(response.Body()), vnTemplatePool) + if err != nil { + return nil, err + } - return vntemplatepool, err + return vnTemplatePool, nil } @@ -54,14 +73,26 @@ func NewVNTemplate(id uint) *VNTemplate { // OpenNebula to retrieve the pool, but doesn't perform the Info() call to // retrieve the attributes of the vntemplate. func NewVNTemplateFromName(name string) (*VNTemplate, error) { - vntemplatePool, err := NewVNTemplatePool() + var id uint + + vnTemplatePool, err := NewVNTemplatePool() if err != nil { return nil, err } - id, err := vntemplatePool.GetIDFromName(name, "/VNTEMPLATE_POOL/VNTEMPLATE") - if err != nil { - return nil, err + match := false + for i := 0; i < len(vnTemplatePool.VNTemplates); i++ { + if vnTemplatePool.VNTemplates[i].Name != name { + continue + } + if match { + return nil, errors.New("multiple resources with that name") + } + id = vnTemplatePool.VNTemplates[i].ID + match = true + } + if !match { + return nil, errors.New("resource not found") } return NewVNTemplate(id), nil @@ -80,8 +111,10 @@ func CreateVNTemplate(vntemplate string) (uint, error) { // Info connects to OpenNebula and fetches the information of the VNTemplate func (vntemplate *VNTemplate) Info() error { response, err := client.Call("one.vntemplate.info", vntemplate.ID) - vntemplate.body = response.Body() - return err + if err != nil { + return err + } + return xml.Unmarshal([]byte(response.Body()), vntemplate) } // Update will modify the vntemplate. If appendTemplate is 0, it will diff --git a/src/oca/go/src/goca/xmlresource.go b/src/oca/go/src/goca/xmlresource.go index 2a908f4fbf..66608223ee 100644 --- a/src/oca/go/src/goca/xmlresource.go +++ b/src/oca/go/src/goca/xmlresource.go @@ -1,13 +1,5 @@ package goca -import ( - "bytes" - "errors" - "strconv" - - "gopkg.in/xmlpath.v2" -) - const ( // PoolWhoPrimaryGroup resources belonging to the user’s primary group. PoolWhoPrimaryGroup = -4 @@ -24,89 +16,3 @@ const ( // the query. PoolWhoGroup = -1 ) - -// XMLResource contains an XML body field. All the resources in OpenNebula are -// of this kind. -type XMLResource struct { - body string -} - -// XMLIter is used to iterate over XML xpaths in an object. -type XMLIter struct { - iter *xmlpath.Iter -} - -// XMLNode represent an XML node. -type XMLNode struct { - node *xmlpath.Node -} - -// Body accesses the body of an XMLResource -func (r *XMLResource) Body() string { - return r.body -} - -// XPath returns the string pointed at by xpath, for an XMLResource -func (r *XMLResource) XPath(xpath string) (string, bool) { - path := xmlpath.MustCompile(xpath) - b := bytes.NewBufferString(r.Body()) - - root, _ := xmlpath.Parse(b) - - return path.String(root) -} - -// XPathIter returns an XMLIter object pointed at by the xpath -func (r *XMLResource) XPathIter(xpath string) *XMLIter { - path := xmlpath.MustCompile(xpath) - b := bytes.NewBufferString(string(r.Body())) - - root, _ := xmlpath.Parse(b) - - return &XMLIter{iter: path.Iter(root)} -} - -// GetIDFromName finds the a resource by ID by looking at an xpath contained -// in that resource -func (r *XMLResource) GetIDFromName(name string, xpath string) (uint, error) { - var id int - var match = false - - iter := r.XPathIter(xpath) - for iter.Next() { - node := iter.Node() - - n, _ := node.XPath("NAME") - if n == name { - if match { - return 0, errors.New("multiple resources with that name") - } - - idString, _ := node.XPath("ID") - id, _ = strconv.Atoi(idString) - match = true - } - } - - if !match { - return 0, errors.New("resource not found") - } - - return uint(id), nil -} - -// Next moves on to the next resource -func (i *XMLIter) Next() bool { - return i.iter.Next() -} - -// Node returns the XMLNode -func (i *XMLIter) Node() *XMLNode { - return &XMLNode{node: i.iter.Node()} -} - -// XPath returns an XMLNode pointed at by xpath -func (n *XMLNode) XPath(xpath string) (string, bool) { - path := xmlpath.MustCompile(xpath) - return path.String(n.node) -} diff --git a/src/oca/go/src/goca/zone.go b/src/oca/go/src/goca/zone.go index 9728c3171e..834130aea1 100644 --- a/src/oca/go/src/goca/zone.go +++ b/src/oca/go/src/goca/zone.go @@ -1,17 +1,83 @@ package goca -// Zone represents an OpenNebula Zone -type Zone struct { - XMLResource - ID uint - Name string -} +import ( + "encoding/xml" + "errors" + "fmt" +) // ZonePool represents an OpenNebula ZonePool type ZonePool struct { - XMLResource + ID uint `xml:"ZONE>ID"` + Name string `xml:"ZONE>NAME"` + Template zoneTemplate `xml:"ZONE>TEMPLATE"` + ServerPool []zoneServer `xml:"ZONE>SERVER_POOL>SERVER"` } +// Zone represents an OpenNebula Zone +type Zone struct { + ID uint `xml:"ID"` + Name string `xml:"NAME"` + Template zoneTemplate `xml:"TEMPLATE"` + ServerPool []zoneServer `xml:"SERVER_POOL>SERVER"` +} + +type zoneServer struct { + ID int `xml:"ID"` + Name string `xml:"NAME"` + Endpoint string `xml:"ENDPOINT"` +} + +type zoneTemplate struct { + Endpoint string `xml:"ENDPOINT"` +} + +// ZoneServerRaftStatus contains the raft status datas of a server +type ZoneServerRaftStatus struct { + ID int `xml:"SERVER_ID"` + StateRaw int `xml:"STATE"` + Term int `xml:"TERM"` + Votedfor int `xml:"VOTEDFOR"` + Commit int `xml:"COMMIT"` + LogIndex int `xml:"LOG_INDEX"` + FedlogIndex int `xml:"FEDLOG_INDEX"` +} + +// ZoneServerRaftState is the state of an OpenNebula server from a zone (See HA and Raft) +type ZoneServerRaftState int + +const ( + // ZoneServerRaftSolo is the initial leader + ZoneServerRaftSolo ZoneServerRaftState = 0 + + // ZoneServerRaftCandidate when the server is candidate to election + ZoneServerRaftCandidate = 1 + + // ZoneServerRaftFollower when the server is a follower + ZoneServerRaftFollower = 2 + + // ZoneServerRaftLeader when the server is the leader + ZoneServerRaftLeader = 3 +) + +func (st ZoneServerRaftState) isValid() bool { + if st >= ZoneServerRaftSolo && st <= ZoneServerRaftLeader { + return true + } + return false +} + +func (st ZoneServerRaftState) String() string { + return [...]string{ + "SOLO", + "CANDIDATE", + "FOLLOWER", + "LEADER", + }[st] +} + +// NewZonePool returns a zone pool. A connection to OpenNebula is + // NewZonePool returns a zone pool. A connection to OpenNebula is // performed. func NewZonePool() (*ZonePool, error) { @@ -20,9 +86,13 @@ func NewZonePool() (*ZonePool, error) { return nil, err } - zonepool := &ZonePool{XMLResource{body: response.Body()}} + zonePool := &ZonePool{} + err = xml.Unmarshal([]byte(response.Body()), zonePool) + if err != nil { + return nil, err + } - return zonepool, err + return zonePool, err } // NewZone finds a zone object by ID. No connection to OpenNebula. @@ -39,12 +109,11 @@ func NewZoneFromName(name string) (*Zone, error) { return nil, err } - id, err := zonePool.GetIDFromName(name, "/ZONE_POOL/ZONE") - if err != nil { - return nil, err + if zonePool.Name != name { + return nil, errors.New("resource not found") } - return NewZone(id), nil + return NewZone(zonePool.ID), nil } // CreateZone allocates a new zone. It returns the new zone ID. @@ -87,6 +156,37 @@ func (zone *Zone) Info() error { if err != nil { return err } - zone.body = response.Body() - return nil + return xml.Unmarshal([]byte(response.Body()), zone) +} + +//GetRaftStatus give the raft status of the server behind the current RPC endpoint. To get endpoints make an info call. +func GetRaftStatus(serverUrl string) (*ZoneServerRaftStatus, error) { + response, err := client.endpointCall(serverUrl, "one.zone.raftstatus") + if err != nil { + return nil, err + } + s := &ZoneServerRaftStatus{} + err = xml.Unmarshal([]byte(response.Body()), s) + if err != nil { + return nil, err + } + return s, nil +} + +// State looks up the state of the zone server and returns the ZoneServerRaftState +func (server *ZoneServerRaftStatus) State() (ZoneServerRaftState, error) { + state := ZoneServerRaftState(server.StateRaw) + if !state.isValid() { + return -1, fmt.Errorf("Zone server State: this state value is not currently handled: %d\n", server.StateRaw) + } + return state, nil +} + +// StateString returns the state in string format +func (server *ZoneServerRaftStatus) StateString() (string, error) { + state := ZoneServerRaftState(server.StateRaw) + if !state.isValid() { + return "", fmt.Errorf("Zone server StateString: this state value is not currently handled: %d\n", server.StateRaw) + } + return state.String(), nil } diff --git a/src/oca/java/src/org/opennebula/client/OneSystem.java b/src/oca/java/src/org/opennebula/client/OneSystem.java index 87770200a0..23f28d7e9e 100644 --- a/src/oca/java/src/org/opennebula/client/OneSystem.java +++ b/src/oca/java/src/org/opennebula/client/OneSystem.java @@ -32,7 +32,7 @@ public class OneSystem private static final String GROUP_QUOTA_INFO = "groupquota.info"; private static final String GROUP_QUOTA_UPDATE = "groupquota.update"; - public static final String VERSION = "5.7.85"; + public static final String VERSION = "5.7.90"; public OneSystem(Client client) { diff --git a/src/oca/java/src/org/opennebula/client/vm/VirtualMachinePool.java b/src/oca/java/src/org/opennebula/client/vm/VirtualMachinePool.java index 3ceeacce6d..a0491ebbde 100644 --- a/src/oca/java/src/org/opennebula/client/vm/VirtualMachinePool.java +++ b/src/oca/java/src/org/opennebula/client/vm/VirtualMachinePool.java @@ -177,6 +177,36 @@ public class VirtualMachinePool extends Pool implements Iterable return client.call(INFO_METHOD, filter, startId, endId, state); } + /** + * Retrieves all or part of the Virtual Machines in the pool. The + * Virtual Machines to retrieve can be also filtered by Id, specifying the + * first and last Id to include; and by state. + * + * @param client XML-RPC Client. + * @param filter Filter flag to use. Possible values: + * + * {@link Pool#ALL}: All Virtual Machines + * {@link Pool#MINE}: Connected user's Virtual Machines + * {@link Pool#MINE_GROUP}: Connected user's Virtual Machines, and the ones in + * his group + * {@link Pool#GROUP}: User's primary group Virtual Machines + * >= 0 UID User's Virtual Machines + * + * @param startId Lowest Id to retrieve + * @param endId Biggest Id to retrieve + * @param state Numeric state of the Virtual Machines wanted, or one + * of {@link VirtualMachinePool#ALL_VM} or + * {@link VirtualMachinePool#NOT_DONE} + * @param query query for FTS + * @return If successful the message contains the string + * with the information returned by OpenNebula. + */ + public static OneResponse info_search(Client client, int filter, + int startId, int endId, int state, String query) + { + return client.call(INFO_METHOD, filter, startId, endId, state, query); + } + /** * Retrieves the monitoring data for all or part of the Virtual * Machines in the pool. diff --git a/src/oca/python/Makefile b/src/oca/python/Makefile index cbf243391d..9baeb9e9f1 100644 --- a/src/oca/python/Makefile +++ b/src/oca/python/Makefile @@ -14,7 +14,7 @@ # limitations under the License. # Use full path to ensure virtualenv compatibility -PYTHON = $(shell which python) +PYTHON = $(shell which python) GDS = $(shell which generateDS) PWD = $(shell pwd) @@ -35,12 +35,12 @@ pyone/bindings/__init__.py pyone/bindings/supbind.py: $(schemas) sed -i "s/import sys/import sys\nfrom pyone.util import TemplatedType/" pyone/bindings/__init__.py sed -i "s/(supermod\./(TemplatedType, supermod\./g" pyone/bindings/__init__.py -.PHONY: clean +.PHONY: clean dist clean: rm -rf build dist pyone/bindings *.egg-info doc dist: - ${PYTHON} setup.py bdist_wheel + ${PYTHON} setup.py sdist bdist_wheel install: ${PYTHON} setup.py install ${root} diff --git a/src/oca/python/setup.py b/src/oca/python/setup.py index bddf09694c..a12dc5a341 100644 --- a/src/oca/python/setup.py +++ b/src/oca/python/setup.py @@ -41,7 +41,7 @@ if sys.version_info[0] < 3: setup( name='pyone', - version='5.7.80', + version='5.7.90', description='Python Bindings for OpenNebula XML-RPC API', long_description=long_description, diff --git a/src/oca/ruby/opennebula.rb b/src/oca/ruby/opennebula.rb index b12bfed5bf..1a33799905 100644 --- a/src/oca/ruby/opennebula.rb +++ b/src/oca/ruby/opennebula.rb @@ -71,5 +71,5 @@ require 'opennebula/vntemplate_pool' module OpenNebula # OpenNebula version - VERSION = '5.7.85' + VERSION = '5.7.90' end diff --git a/src/oca/ruby/opennebula/virtual_machine.rb b/src/oca/ruby/opennebula/virtual_machine.rb index c330baa109..2a46b1bbf0 100644 --- a/src/oca/ruby/opennebula/virtual_machine.rb +++ b/src/oca/ruby/opennebula/virtual_machine.rb @@ -782,6 +782,12 @@ module OpenNebula REMOVE_VNET_ATTRS = %w{AR_ID BRIDGE CLUSTER_ID IP MAC TARGET NIC_ID NETWORK_ID VN_MAD SECURITY_GROUPS VLAN_ID} + REMOVE_IMAGE_ATTRS = %w{DEV_PREFIX SOURCE ORIGINAL_SIZE SIZE + DISK_SNAPSHOT_TOTAL_SIZE DRIVER IMAGE_STATE SAVE CLONE READONLY + PERSISTENT TARGET ALLOW_ORPHANS CLONE_TARGET CLUSTER_ID DATASTORE + DATASTORE_ID DISK_ID DISK_TYPE IMAGE_ID IMAGE IMAGE_UNAME IMAGE_UID + LN_TARGET TM_MAD TYPE OPENNEBULA_MANAGED} + def save_as_template(name,description, persistent=nil) img_ids = [] new_tid = nil @@ -858,13 +864,19 @@ module OpenNebula raise end - image_id = disk["IMAGE_ID"] + image_id = disk["IMAGE_ID"] + opennebula_managed = disk["OPENNEBULA_MANAGED"] + type = disk["TYPE"] + + REMOVE_IMAGE_ATTRS.each do |attr| + disk.delete_element(attr) + end if !image_id.nil? && !image_id.empty? - if disk['TYPE'] == 'CDROM' + if type == 'CDROM' replace << "DISK = [ IMAGE_ID = #{image_id}" - if disk["OPENNEBULA_MANAGED"] - replace << ", OPENNEBULA_MANAGED=#{disk["OPENNEBULA_MANAGED"]}" + if opennebula_managed + replace << ", OPENNEBULA_MANAGED=#{opennebula_managed}" end replace << " ]\n" else @@ -878,7 +890,14 @@ module OpenNebula img_ids << rc.to_i - replace << "DISK = [ IMAGE_ID = #{rc} ]\n" + disk_template = disk.template_like_str(".").tr("\n", ",\n") + + if disk_template.empty? + replace << "DISK = [ IMAGE_ID = #{rc} ] \n" + else + replace << "DISK = [ IMAGE_ID = #{rc}, " << + disk_template << " ] \n" + end end else # Volatile disks cannot be saved, so the definition is copied diff --git a/src/oca/ruby/opennebula/virtual_machine_pool.rb b/src/oca/ruby/opennebula/virtual_machine_pool.rb index c3f54635b6..aba1228e4f 100644 --- a/src/oca/ruby/opennebula/virtual_machine_pool.rb +++ b/src/oca/ruby/opennebula/virtual_machine_pool.rb @@ -115,6 +115,23 @@ module OpenNebula INFO_NOT_DONE) end + def info_search(args = {}) + default_args = { + :who => INFO_ALL, + :start_id => -1, + :end_id => -1, + :state => INFO_NOT_DONE, + :query => "" + }.merge!(args) + + return info_filter(VM_POOL_METHODS[:info], + default_args[:who], + default_args[:start_id], + default_args[:end_id], + default_args[:state], + default_args[:query]) + end + alias_method :info!, :info alias_method :info_all!, :info_all alias_method :info_mine!, :info_mine @@ -458,8 +475,8 @@ module OpenNebula data_hash end - def info_filter(xml_method, who, start_id, end_id, state) - return xmlrpc_info(xml_method, who, start_id, end_id, state) + def info_filter(xml_method, who, start_id, end_id, state, query="") + return xmlrpc_info(xml_method, who, start_id, end_id, state, query) end end end diff --git a/src/onedb/database_schema.rb b/src/onedb/database_schema.rb index a106263571..2c9f8649a3 100644 --- a/src/onedb/database_schema.rb +++ b/src/onedb/database_schema.rb @@ -91,8 +91,15 @@ class OneDBBacKEnd vm_pool: "oid INTEGER PRIMARY KEY, name VARCHAR(128), " << "body MEDIUMTEXT, uid INTEGER, gid INTEGER, " << "last_poll INTEGER, state INTEGER, lcm_state INTEGER, " << - "owner_u INTEGER, group_u INTEGER, other_u INTEGER, short_body MEDIUMTEXT", - + "owner_u INTEGER, group_u INTEGER, other_u INTEGER, short_body MEDIUMTEXT, " << + "search_token MEDIUMTEXT, FULLTEXT ftidx(search_token)", + + vm_pool_sqlite: "oid INTEGER PRIMARY KEY, name VARCHAR(128), " << + "body MEDIUMTEXT, uid INTEGER, gid INTEGER, " << + "last_poll INTEGER, state INTEGER, lcm_state INTEGER, " << + "owner_u INTEGER, group_u INTEGER, other_u INTEGER, short_body MEDIUMTEXT, " << + "search_token MEDIUMTEXT", + vn_template_pool: "oid INTEGER PRIMARY KEY, name VARCHAR(128), " << "body MEDIUMTEXT, uid INTEGER, gid INTEGER," << "owner_u INTEGER, group_u INTEGER, other_u INTEGER" diff --git a/src/onedb/fsck.rb b/src/onedb/fsck.rb index de579d63c3..d366be2e11 100644 --- a/src/onedb/fsck.rb +++ b/src/onedb/fsck.rb @@ -52,7 +52,7 @@ require 'fsck/template' require 'fsck/quotas' module OneDBFsck - VERSION = "5.7.80" + VERSION = "5.6.0" LOCAL_VERSION = "5.7.80" def db_version diff --git a/src/onedb/local/5.6.0_to_5.7.80.rb b/src/onedb/local/5.6.0_to_5.7.80.rb index 64ad8ebd92..1e2fdef924 100644 --- a/src/onedb/local/5.6.0_to_5.7.80.rb +++ b/src/onedb/local/5.6.0_to_5.7.80.rb @@ -38,7 +38,7 @@ module Migrator def up bug_2687 # MUST be run before 2489, which generates short body feature_2253 - feature_2489 + feature_2489_2671 feature_826 true end @@ -74,7 +74,12 @@ module Migrator @db.run 'DROP TABLE IF EXISTS old_vm_pool;' @db.run 'ALTER TABLE vm_pool RENAME TO old_vm_pool;' - create_table(:vm_pool) + + if !is_fts_available + create_table(:vm_pool_sqlite, "vm_pool", db_version) + else + create_table(:vm_pool, nil, db_version) + end @db.transaction do # updates VM's nics @@ -104,10 +109,15 @@ module Migrator @db.run 'DROP TABLE old_vm_pool;' end - def feature_2489 + def feature_2489_2671 @db.run 'DROP TABLE IF EXISTS old_vm_pool;' @db.run 'ALTER TABLE vm_pool RENAME TO old_vm_pool;' - create_table(:vm_pool, nil, db_version) + + if !is_fts_available + create_table(:vm_pool_sqlite, "vm_pool", db_version) + else + create_table(:vm_pool, nil, db_version) + end @db.transaction do @db.fetch('SELECT * FROM old_vm_pool') do |row| @@ -116,6 +126,7 @@ module Migrator end row[:short_body] = gen_short_body(doc) + row[:search_token] = gen_search_body(doc) @db[:vm_pool].insert(row) end @@ -135,6 +146,79 @@ module Migrator end end + def gen_search_body(body) + + search_body = "UNAME=" + escape_token(body.root.xpath('UNAME').text) + "\n" + + "GNAME=" + escape_token(body.root.xpath('GNAME').text) + "\n" + + "NAME=" + escape_token(body.root.xpath('NAME').text) + "\n" + + "LAST_POLL=" + escape_token(body.root.xpath('LAST_POLL').text) + "\n" + + "PREV_STATE=" + escape_token(body.root.xpath('PREV_STATE').text) + "\n" + + "PREV_LCM_STATE=" + escape_token(body.root.xpath('PREV_LCM_STATE').text) + "\n" + + "RESCHED=" + escape_token(body.root.xpath('RESCHED').text) + "\n" + + "STIME=" + escape_token(body.root.xpath('STIME').text) + "\n" + + "ETIME=" + escape_token(body.root.xpath('ETIME').text) + "\n" + + "DEPLOY_ID=" + escape_token(body.root.xpath('DEPLOY_ID').text) + "\n" + + body.root.xpath("//TEMPLATE/*").each do |node| + search_body += to_token(node) + end + + node = Nokogiri::XML(body.root.xpath("//HISTORY_RECORDS/HISTORY[last()]").to_s) + + if !node.root.nil? + search_body += history_to_token(node) + end + + return search_body + end + + def to_token(node) + search_body = "" + if node.children.size > 1 + node.children.each do |child| + search_body += to_token(child) + end + elsif + search_body += node.name + "=" + escape_token(node.children.text) + "\n" + end + + return search_body + end + + def history_to_token(hr) + hr_token = "HOSTNAME=" + escape_token(hr.xpath("//HOSTNAME").text) + "\n" + + "HID=" + hr.xpath("//HID").text + "\n" + + "CID=" + hr.xpath("//CID").text + "\n" + + "DS_ID=" + hr.xpath("//DS_ID").text + "\n" + end + + def escape_token(str) + str_scaped = "" + + str.split("").each do |c| + case c + when '-', '_', '.', ':' + str_scaped += '_' + else + str_scaped += c + end + end + + return str_scaped + end + + def is_fts_available() + if @db.adapter_scheme == :sqlite + return false + else + if @db.server_version >= 50600 + return true + else + return false + end + end + end + def gen_short_body(body) short_body = Nokogiri::XML::Builder.new(:encoding => 'UTF-8') do |xml| xml.VM{ @@ -282,7 +366,11 @@ module Migrator @db.run "DROP TABLE IF EXISTS old_vm_pool;" @db.run "ALTER TABLE vm_pool RENAME TO old_vm_pool;" - create_table(:vm_pool) + if !is_fts_available + create_table(:vm_pool_sqlite, "vm_pool", db_version) + else + create_table(:vm_pool, nil, db_version) + end @db.transaction do @db.fetch("SELECT * FROM old_vm_pool") do |row| diff --git a/src/pool/PoolSQL.cc b/src/pool/PoolSQL.cc index 0b89478a2d..392cf6e4f8 100644 --- a/src/pool/PoolSQL.cc +++ b/src/pool/PoolSQL.cc @@ -148,7 +148,14 @@ int PoolSQL::allocate(PoolObjectSQL *objsql, string& error_str) if( rc == -1 ) { - _set_lastOID(--lastOID, db, table); + lastOID = lastOID - 1; + + if ( lastOID < 0 ) + { + lastOID = 0; + } + + _set_lastOID(lastOID, db, table); } unlock(); diff --git a/src/rm/RequestManagerLock.cc b/src/rm/RequestManagerLock.cc index 3963c8bc78..039efc1ba9 100644 --- a/src/rm/RequestManagerLock.cc +++ b/src/rm/RequestManagerLock.cc @@ -54,7 +54,15 @@ void RequestManagerLock::request_execute(xmlrpc_c::paramList const& paramList, object->unlock(); - success_response((rc == 0), att); + if (rc != 0) + { + att.resp_msg = "Error trying to lock the resource."; + failure_response(ACTION, att); + } + else + { + success_response(oid, att); + } } else { diff --git a/src/rm/RequestManagerPoolInfoFilter.cc b/src/rm/RequestManagerPoolInfoFilter.cc index 3228a4eac8..821f0f9583 100644 --- a/src/rm/RequestManagerPoolInfoFilter.cc +++ b/src/rm/RequestManagerPoolInfoFilter.cc @@ -96,12 +96,28 @@ void VirtualMachinePoolInfo::request_execute( int end_id = xmlrpc_c::value_int(paramList.getInt(3)); int state = xmlrpc_c::value_int(paramList.getInt(4)); - ostringstream state_filter; + std::string fts_query; + + if (paramList.size() > 5) + { + fts_query = xmlrpc_c::value_string(paramList.getString(5)); + + if (!fts_query.empty() && !pool->is_fts_available()) + { + att.resp_msg = "Full text search is not supported by the SQL backend"; + + failure_response(INTERNAL, att); + return; + } + } + + ostringstream and_filter; if (( state < VirtualMachinePoolInfo::ALL_VM ) || ( state > VirtualMachine::CLONING_FAILURE )) { att.resp_msg = "Incorrect filter_flag, state"; + failure_response(XML_RPC_API, att); return; } @@ -112,15 +128,39 @@ void VirtualMachinePoolInfo::request_execute( break; case VirtualMachinePoolInfo::NOT_DONE: - state_filter << "state <> " << VirtualMachine::DONE; + and_filter << "state <> " << VirtualMachine::DONE; break; default: - state_filter << "state = " << state; + and_filter << "state = " << state; break; } - dump(att, filter_flag, start_id, end_id, state_filter.str(), ""); + if (!fts_query.empty()) + { + char * _fts_query = pool->escape_str(fts_query); + + if ( _fts_query == 0 ) + { + att.resp_msg = "Error building search query"; + + failure_response(INTERNAL, att); + return; + } + + if (!and_filter.str().empty()) + { + and_filter << " AND "; + } + + and_filter << "MATCH(search_token) AGAINST ('+\""; + one_util::escape_token(_fts_query, and_filter); + and_filter << "\"' in boolean mode)"; + + pool->free_str(_fts_query); + } + + dump(att, filter_flag, start_id, end_id, and_filter.str(), ""); } /* ------------------------------------------------------------------------- */ @@ -471,7 +511,7 @@ void RequestManagerPoolInfoFilter::dump( limit_clause = oss.str(); } - Nebula::instance().get_configuration_attribute(att.uid, att.gid, + Nebula::instance().get_configuration_attribute(att.uid, att.gid, "API_LIST_ORDER", desc); rc = pool->dump(str, where_string, limit_clause, diff --git a/src/rm/RequestManagerUser.cc b/src/rm/RequestManagerUser.cc index fd09a36a54..8f0bc6a701 100644 --- a/src/rm/RequestManagerUser.cc +++ b/src/rm/RequestManagerUser.cc @@ -493,7 +493,14 @@ void UserLogin::request_execute(xmlrpc_c::paramList const& paramList, } else if (valid > 0 || valid == -1) { - if ( egid != -1 && (!user->is_in_group(egid) || att.group_ids.count(egid) == 0) ) + /** + * Scoped token checks + * 1. user is in the target group + * 2. Authenticated groups for the user include the target group + * 3. user is not oneadmin or admin group + */ + if ( egid != -1 && !att.is_admin() && ( !user->is_in_group(egid) || + att.group_ids.count(egid) == 0) ) { att.resp_msg = "EGID is not in user group list"; failure_response(XML_RPC_API, att); diff --git a/src/rm/RequestManagerVirtualMachine.cc b/src/rm/RequestManagerVirtualMachine.cc index f5180ca8a3..ba740290f0 100644 --- a/src/rm/RequestManagerVirtualMachine.cc +++ b/src/rm/RequestManagerVirtualMachine.cc @@ -1277,7 +1277,17 @@ void VirtualMachineMigrate::request_execute(xmlrpc_c::paramList const& paramList if (ds_id != -1) { - if ( c_ds_id != ds_id && live ) + VirtualMachineManager * vmm = Nebula::instance().get_vmm(); + const VirtualMachineManagerDriver * vmmd = vmm->get(vmm_mad); + + if ( vmmd == 0 ) + { + att.resp_msg = "Cannot find vmm driver: " + vmm_mad; + failure_response(ACTION, att); + return; + } + + if ( c_ds_id != ds_id && live && !vmmd->is_ds_live_migration()) { att.resp_msg = "A migration to a different system datastore " "cannot be performed live."; diff --git a/src/scheduler/etc/sched.conf b/src/scheduler/etc/sched.conf index a7d97656a8..eca73381c4 100644 --- a/src/scheduler/etc/sched.conf +++ b/src/scheduler/etc/sched.conf @@ -16,10 +16,10 @@ # MAX_VM: Maximum number of Virtual Machines scheduled in each scheduling # action. Use 0 to schedule all pending VMs each time. # -# MAX_DISPATCH: Maximum number of Virtual Machines actually dispatched to a -# host in each scheduling action +# MAX_DISPATCH: Maximum number of Virtual Machines dispatched in each +# scheduling action # -# MAX_HOST: Maximum number of Virtual Machines dispatched to a given host in +# MAX_HOST: Maximum number of Virtual Machines dispatched to each host in # each scheduling action # # LIVE_RESCHEDS: Perform live (1) or cold migrations (0) when rescheduling a VM @@ -45,7 +45,7 @@ # 1 = Striping. Tries to optimize I/O by distributing the VMs across # datastores. # 2 = Custom. -# - rank: Custom arithmetic exprission to rank suitable datastores based +# - rank: Custom arithmetic expression to rank suitable datastores based # on their attributes # 3 = Fixed. Datastores will be ranked according to the PRIORITY attribute # found in the Datastore template. @@ -56,13 +56,13 @@ # less free leases # 1 = Striping. Tries to distribute address usage across VNETs. # 2 = Custom. -# - rank: Custom arithmetic exprission to rank suitable datastores based +# - rank: Custom arithmetic expression to rank suitable datastores based # on their attributes -# 3 = Fixed. Virtual Networks will be ranked according to the PRIORITY +# 3 = Fixed. Virtual Networks will be ranked according to the PRIORITY # attribute found in the Virtual Network template. # -# DIFFERENT_VNETS: When set (YES) the NICs of a VM will be forced to be in -# different Virtual Networks. +# DIFFERENT_VNETS: When set (YES) the NICs of a VM will be forced to be in +# different Virtual Networks. # # LOG: Configuration for the logging system # - system: defines the logging system: @@ -86,7 +86,7 @@ #******************************************************************************* MESSAGE_SIZE = 1073741824 -TIMEOUT = 60 +TIMEOUT = 60 ONE_XMLRPC = "http://localhost:2633/RPC2" diff --git a/src/sql/MySqlDB.cc b/src/sql/MySqlDB.cc index 1fcc638b8b..d1771764e8 100644 --- a/src/sql/MySqlDB.cc +++ b/src/sql/MySqlDB.cc @@ -345,3 +345,21 @@ void MySqlDB::free_db_connection(MYSQL * db) } /* -------------------------------------------------------------------------- */ + +bool MySqlDB::fts_available() +{ + unsigned long version; + + version = mysql_get_server_version(db_escape_connect); + + if (version >= 50600) + { + return true; + } + else + { + return false; + } +} + +/* -------------------------------------------------------------------------- */ \ No newline at end of file diff --git a/src/sunstone/OpenNebulaVNC.rb b/src/sunstone/OpenNebulaVNC.rb index f3908f6b2e..120f2a1745 100644 --- a/src/sunstone/OpenNebulaVNC.rb +++ b/src/sunstone/OpenNebulaVNC.rb @@ -109,7 +109,7 @@ class OpenNebulaVNC @pipe = nil @token_folder = File.join(VAR_LOCATION, opts[:token_folder_name]) - @proxy_path = File.join(SHARE_LOCATION, "websockify/websocketproxy.py") + @proxy_path = File.join(SHARE_LOCATION, "websockify/run") @proxy_port = config[:vnc_proxy_port] @proxy_ipv6 = config[:vnc_proxy_ipv6] @@ -151,10 +151,7 @@ class OpenNebulaVNC proxy_options << " -6" end - system("which python2 >/dev/null 2>&1") - python = $?.success? ? "python2" : "python" - - cmd ="#{python} #{@proxy_path} #{proxy_options} #{@proxy_port}" + cmd ="python #{@proxy_path} #{proxy_options} #{@proxy_port}" begin @logger.info { "Starting VNC proxy: #{cmd}" } diff --git a/src/sunstone/public/app/opennebula/host.js b/src/sunstone/public/app/opennebula/host.js index a455a30fad..93582c4ba0 100644 --- a/src/sunstone/public/app/opennebula/host.js +++ b/src/sunstone/public/app/opennebula/host.js @@ -26,6 +26,7 @@ define(function(require) { var pcisCallbacks = []; var customizationsCallbacks = []; var kvmInfoCallbacks = []; + var lxdProfilesInfoCallbacks = []; var CACHE_EXPIRE = 300000; //ms @@ -183,6 +184,25 @@ define(function(require) { _infrastructure(); }, + "lxdProfilesInfo": function(params){ + var callback = params.success; + var callbackError = params.error; + var request = OpenNebulaHelper.request(RESOURCE, "infrastructure"); + + if (infrastructureCache && + infrastructureCache["timestamp"] + CACHE_EXPIRE > new Date().getTime()) { + + return callback ? + callback(request, infrastructureCache["lxd_profiles"]) : null; + } + + lxdProfilesInfoCallbacks.push({ + success : callback, + error : callbackError + }); + + _infrastructure(); + }, "kvmInfo": function(params){ var callback = params.success; var callbackError = params.error; @@ -240,6 +260,16 @@ define(function(require) { customizations = [customizations]; } + var lxd_profiles = response.lxd_profiles; + + if (lxd_profiles == undefined){ + lxd_profiles = []; + } + + if (!$.isArray(lxd_profiles)){ // If only 1 convert to array + lxd_profiles = [lxd_profiles]; + } + var kvm_info = response.kvm_info; if (kvm_info == undefined){ @@ -254,7 +284,8 @@ define(function(require) { timestamp : new Date().getTime(), pcis : pcis, customizations : customizations, - kvm_info : kvm_info + kvm_info : kvm_info, + lxd_profiles : lxd_profiles }; infrastructureWaiting = false; @@ -291,6 +322,17 @@ define(function(require) { kvmInfoCallbacks = []; + + for (var i = 0; i < lxdProfilesInfoCallbacks.length; i++) { + var callback = lxdProfilesInfoCallbacks[i].success; + + if (callback) { + callback(request, lxd_profiles); + } + } + + lxdProfilesInfoCallbacks = []; + return; }, error: function(response) { diff --git a/src/sunstone/public/app/tabs/templates-tab/form-panels/create/wizard-tabs/general.js b/src/sunstone/public/app/tabs/templates-tab/form-panels/create/wizard-tabs/general.js index e58c21032a..765c6b70b5 100644 --- a/src/sunstone/public/app/tabs/templates-tab/form-panels/create/wizard-tabs/general.js +++ b/src/sunstone/public/app/tabs/templates-tab/form-panels/create/wizard-tabs/general.js @@ -29,6 +29,7 @@ define(function(require) { var OpenNebula = require('opennebula'); var UsersTable = require("tabs/users-tab/datatable"); var GroupTable = require("tabs/groups-tab/datatable"); + var OpenNebulaHost = require("opennebula/host"); /* TEMPLATES @@ -198,6 +199,7 @@ define(function(require) { $("#vcenter_ccr_ref", context).attr("required", ""); $("#MEMORY", context).attr("pattern", "^([048]|\\d*[13579][26]|\\d*[24680][048])$"); $('.only_kvm').hide(); + $('.only_lxd').hide(); $('.only_vcenter').show(); } else { $("#vcenter_template_ref", context).removeAttr("required"); @@ -206,8 +208,20 @@ define(function(require) { $("#MEMORY", context).removeAttr("pattern"); $('.only_kvm').show(); $('.only_vcenter').hide(); + if (this.value != "lxd") + { + $('.only_lxd').hide(); + } } // There is another listener in context.js setup + + // Needs proper LXD view, this is just a workaround + // All KVM settings are available in LXD plus + // Privileged, Profile and Security Nesting + + if (this.value == "lxd"){ + $('.only_lxd').show(); + } }); CapacityCreate.setup($("div.capacityCreate", context)); @@ -229,6 +243,33 @@ define(function(require) { $('.only_kvm').hide(); $('.only_vcenter').show(); } + + fillLXDProfiles(context) + } + + function fillLXDProfiles(context){ + OpenNebulaHost.lxdProfilesInfo({ + data : {}, + timeout: true, + success: function (request, lxdProfilesInfo){ + if ($("#lxd_profile", context).html() === undefined){ + lxdprofiles = lxdProfilesInfo; + + var html = ""; + html += "" + " " + ""; + $.each(lxdprofiles, function(i, lxdprofile){ + html += "" + lxdprofile + ""; + }); + html += ""; + $("#lxd_profile_label", context).append(html); + } + + }, + error: function(request, error_json){ + console.error("There was an error requesting lxd info: " + + error_json.error.message); + } + }); } function _retrieve(context) { @@ -243,6 +284,12 @@ define(function(require) { } } + if (templateJSON["HYPERVISOR"] == 'lxd') { + templateJSON["LXD_SECURITY_PRIVILEGED"] = WizardFields.retrieveInput($("#lxd_security_privileged", context)); + templateJSON["LXD_PROFILE"] = WizardFields.retrieveInput($("#lxd_profile", context)); + templateJSON["LXD_SECURITY_NESTING"] = WizardFields.retrieveInput($("#lxd_security_nesting", context)); + } + var sunstone_template = {}; if ($('#sunstone_network_select:checked', context).length > 0) { @@ -351,6 +398,11 @@ define(function(require) { } } + // LXD specific attributes + if (templateJSON["HYPERVISOR"] == 'lxd') { + fillLXD(context, templateJSON) + } + if (templateJSON["HYPERVISOR"]) { $("input[name='hypervisor'][value='"+templateJSON["HYPERVISOR"]+"']", context).trigger("click") delete templateJSON["HYPERVISOR"]; @@ -412,4 +464,22 @@ define(function(require) { WizardFields.fill(context, templateJSON); } + + function fillLXD(context, templateJSON) { + if (templateJSON["LXD_SECURITY_PRIVILEGED"]){ + WizardFields.fillInput($("#lxd_security_privileged", context), templateJSON["LXD_SECURITY_PRIVILEGED"]); + delete templateJSON["LXD_SECURITY_PRIVILEGED"]; + } + + if (templateJSON["LXD_PROFILE"]){ + WizardFields.fillInput($("#lxd_profile", context), templateJSON["LXD_PROFILE"]); + delete templateJSON["LXD_PROFILE"]; + } + + if (templateJSON["LXD_SECURITY_NESTING"]){ + WizardFields.fillInput($("#lxd_security_nesting", context), templateJSON["LXD_SECURITY_NESTING"]); + delete templateJSON["LXD_SECURITY_NESTING"]; + } + } + }); diff --git a/src/sunstone/public/app/tabs/templates-tab/form-panels/create/wizard-tabs/general/html.hbs b/src/sunstone/public/app/tabs/templates-tab/form-panels/create/wizard-tabs/general/html.hbs index 623acece5e..0ad1ab87ea 100644 --- a/src/sunstone/public/app/tabs/templates-tab/form-panels/create/wizard-tabs/general/html.hbs +++ b/src/sunstone/public/app/tabs/templates-tab/form-panels/create/wizard-tabs/general/html.hbs @@ -26,6 +26,8 @@ {{tr "KVM"}} {{tr "vCenter"}} + + {{tr "LXD"}} @@ -109,6 +111,38 @@ + + {{tr "LXD"}} + + + + {{tr "Security Privileged"}} + + + Yes + No + + + + + + + {{tr "Security Nesting"}} + + + Yes + No + + + + + + + {{tr "Profile"}} + + + + {{{capacityCreateHTML}}} diff --git a/src/sunstone/public/app/tabs/vms-tab/panels/conf.js b/src/sunstone/public/app/tabs/vms-tab/panels/conf.js index f852ba2e5c..25f01d144c 100644 --- a/src/sunstone/public/app/tabs/vms-tab/panels/conf.js +++ b/src/sunstone/public/app/tabs/vms-tab/panels/conf.js @@ -56,7 +56,7 @@ define(function(require) { var conf = {}; var template = this.element.TEMPLATE; - $.each(["OS", "FEATURES", "INPUT", "GRAPHICS", "RAW", "CONTEXT"], function(){ + $.each(["OS", "FEATURES", "INPUT", "GRAPHICS", "RAW", "CONTEXT", "CPU_MODEL"], function(){ if(template[this] != undefined){ conf[this] = template[this]; } diff --git a/src/sunstone/sunstone-server.rb b/src/sunstone/sunstone-server.rb index d8ecd1a328..b0504226e3 100755 --- a/src/sunstone/sunstone-server.rb +++ b/src/sunstone/sunstone-server.rb @@ -44,10 +44,10 @@ LOGOS_CONFIGURATION_FILE = ETC_LOCATION + "/sunstone-logos.yaml" SUNSTONE_ROOT_DIR = File.dirname(__FILE__) -$: << RUBY_LIB_LOCATION -$: << RUBY_LIB_LOCATION+'/cloud' -$: << SUNSTONE_ROOT_DIR -$: << SUNSTONE_ROOT_DIR+'/models' +$LOAD_PATH << RUBY_LIB_LOCATION +$LOAD_PATH << RUBY_LIB_LOCATION + '/cloud' +$LOAD_PATH << SUNSTONE_ROOT_DIR +$LOAD_PATH << SUNSTONE_ROOT_DIR + '/models' DISPLAY_NAME_XPATH = 'TEMPLATE/SUNSTONE/DISPLAY_NAME' TABLE_ORDER_XPATH = 'TEMPLATE/SUNSTONE/TABLE_ORDER' @@ -283,7 +283,7 @@ helpers do csrftoken_plain = Time.now.to_f.to_s + SecureRandom.base64 session[:csrftoken] = Digest::MD5.hexdigest(csrftoken_plain) - group = OpenNebula::Group.new_with_id(user['GID'], client) + group = OpenNebula::Group.new_with_id(OpenNebula::Group::SELF, client) rc = group.info if OpenNebula.is_error?(rc) logger.error { rc.message } @@ -663,6 +663,14 @@ get '/infrastructure' do infrastructure[:kvm_info] = { :set_cpu_models => set_cpu_models.to_a, :set_kvm_machines => set_kvm_machines.to_a } + set_lxd_profiles = Set.new + + xml.each('HOST/TEMPLATE/LXD_PROFILES') do |lxd_profiles| + set_lxd_profiles += lxd_profiles.text.split(" ") + end + + infrastructure[:lxd_profiles] = set_lxd_profiles.to_a + [200, infrastructure.to_json] end diff --git a/src/template/Template.cc b/src/template/Template.cc index e824e3fdae..b14995a817 100644 --- a/src/template/Template.cc +++ b/src/template/Template.cc @@ -386,19 +386,14 @@ bool Template::get(const string& name, bool& value) const string& Template::to_xml(string& xml) const { - multimap::const_iterator it; - ostringstream oss; - string * s; + multimap::const_iterator it; + ostringstream oss; oss << "<" << xml_root << ">"; for ( it = attributes.begin(); it!=attributes.end(); it++) { - s = it->second->to_xml(); - - oss << *s; - - delete s; + it->second->to_xml(oss); } oss << "" << xml_root << ">"; @@ -407,6 +402,81 @@ string& Template::to_xml(string& xml) const return xml; } + +string& Template::to_json(string& json) const +{ + multimap::const_iterator it; + ostringstream oss; + + bool is_first = true; + + oss << "\"" << xml_root << "\": {"; + + for ( it = attributes.begin(); it!=attributes.end(); ) + { + if (!is_first) + { + oss << ","; + } + else + { + is_first = false; + } + + oss << "\"" << it->first << "\": "; + + if ( attributes.count(it->first) == 1 ) + { + it->second->to_json(oss); + + ++it; + } + else + { + std::string jelem = it->first; + bool is_array_first = true; + + oss << "[ "; + + for ( ; it->first == jelem && it != attributes.end() ; ++it ) + { + if ( !is_array_first ) + { + oss << ","; + } + else + { + is_array_first = false; + } + + it->second->to_json(oss); + } + + oss << "]"; + } + } + + oss << "}"; + + json = oss.str(); + + return json; +} + +string& Template::to_token(string& str) const +{ + ostringstream os; + multimap::const_iterator it; + + for ( it = attributes.begin(); it!=attributes.end(); it++) + { + it->second->to_token(os); + } + + str = os.str(); + return str; +} + /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ diff --git a/src/tm_mad/ceph/clone b/src/tm_mad/ceph/clone index 6c4705926f..888059eb2a 100755 --- a/src/tm_mad/ceph/clone +++ b/src/tm_mad/ceph/clone @@ -118,7 +118,7 @@ if [ -n "$EC_POOL_NAME" ]; then fi if [ "${TYPE}" = 'FILE' ]; then - ssh_make_path $DST_HOST $DST_DIR + ssh_make_path $DST_HOST $DST_DIR 'ssh' CLONE_CMD=$(cat <"${DST_DIR}/.host" || : # zero trailing space + if [ "${ZERO_LVM_ON_CREATE}" = "yes" ]; then LVSIZE=\$(${SUDO} ${LVS} --nosuffix --noheadings --units B -o lv_size "${DEV}" | tr -d '[:blank:]') ${DD} if=/dev/zero of="${DEV}" bs=64k \ oflag=seek_bytes iflag=count_bytes \ seek="${ZERO_SEEK_BYTES}" count="\$(( LVSIZE - ${ZERO_SEEK_BYTES} ))" + fi $QEMU_IMG convert -O raw "$SRC_PATH" "$DEV" rm -f "$DST_PATH" @@ -142,4 +145,5 @@ EOF ssh_exec_and_log "$DST_HOST" "$CLONE_CMD" \ "Error cloning $SRC_PATH to $LV_NAME" +hup_collectd $DST_HOST exit 0 diff --git a/src/tm_mad/fs_lvm/cpds b/src/tm_mad/fs_lvm/cpds index e6791fd4b5..2730bd1d8c 100755 --- a/src/tm_mad/fs_lvm/cpds +++ b/src/tm_mad/fs_lvm/cpds @@ -66,5 +66,6 @@ log "Dumping $SRC to $DST" ssh_exec_and_log "$SRC_HOST" "$DUMP_CMD" \ "Error dumping $SRC to $DST" +hup_collectd $SRC_HOST exit 0 diff --git a/src/tm_mad/fs_lvm/delete b/src/tm_mad/fs_lvm/delete index b2969d7ae7..70075ee183 100755 --- a/src/tm_mad/fs_lvm/delete +++ b/src/tm_mad/fs_lvm/delete @@ -30,7 +30,10 @@ else TMCOMMON=$ONE_LOCATION/var/remotes/tm/tm_common.sh fi +DRIVER_PATH=$(dirname $0) + source $TMCOMMON +source ${DRIVER_PATH}/../../etc/tm/fs_lvm/fs_lvm.conf #------------------------------------------------------------------------------- # Return if deleting a disk, we will delete them when removing the @@ -74,13 +77,16 @@ EOF ) if [ -n "${DS_SYS_ID}" ]; then - ssh_exec_and_log "$DST_HOST" "$ZERO_CMD" \ - "Error cleaning $DST_PATH" + + if [ "${ZERO_LVM_ON_DELETE}" = "yes" ]; then + ssh_exec_and_log "$DST_HOST" "$ZERO_CMD" "Error cleaning $DST_PATH" + fi LOCK="tm-fs_lvm-${DS_SYS_ID}.lock" exclusive "${LOCK}" 120 ssh_exec_and_log "$DST_HOST" "$DELETE_CMD" \ "Error deleting $DST_PATH" else - ssh_exec_and_log "$DST_HOST" "$DELETE_CMD" \ - "Error deleting $DST_PATH" + ssh_exec_and_log "$DST_HOST" "$DELETE_CMD" "Error deleting $DST_PATH" fi + +hup_collectd $DST_HOST diff --git a/src/tm_mad/fs_lvm/fs_lvm.conf b/src/tm_mad/fs_lvm/fs_lvm.conf new file mode 100644 index 0000000000..1ebab245e1 --- /dev/null +++ b/src/tm_mad/fs_lvm/fs_lvm.conf @@ -0,0 +1,25 @@ +# -------------------------------------------------------------------------- # +# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +# Default ZERO'ing behavior for the driver. When these variables are set the driver +# will zero LVM space. This prevents data from a previous allocation to be exposed +# in new volumes. However this will increase the VM allocation time. +# +# Zero LVM volumes on creation or resizing +ZERO_LVM_ON_CREATE=yes + +# Zero LVM volumes on delete, when the VM disks are disposed +ZERO_LVM_ON_DELETE=yes diff --git a/src/tm_mad/fs_lvm/mkimage b/src/tm_mad/fs_lvm/mkimage deleted file mode 120000 index abfcd36c18..0000000000 --- a/src/tm_mad/fs_lvm/mkimage +++ /dev/null @@ -1 +0,0 @@ -../shared/mkimage \ No newline at end of file diff --git a/src/tm_mad/fs_lvm/mkimage b/src/tm_mad/fs_lvm/mkimage new file mode 100755 index 0000000000..2a5d032c7b --- /dev/null +++ b/src/tm_mad/fs_lvm/mkimage @@ -0,0 +1,99 @@ +#!/bin/bash + +# -------------------------------------------------------------------------- # +# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +# mkimage size format host:remote_system_ds/disk.i vmid dsid +# - size in MB of the image +# - format for the image +# - remote_system_ds is the path for the system datastore in the host +# - vmid is the id of the VM +# - dsid is the target datastore (0 is the system datastore) + +SIZE=$1 +FSTYPE=$2 +DST=$3 +VM_ID=$4 + +if [ -z "${ONE_LOCATION}" ]; then + TMCOMMON=/var/lib/one/remotes/tm/tm_common.sh +else + TMCOMMON=$ONE_LOCATION/var/remotes/tm/tm_common.sh +fi + +DRIVER_PATH=$(dirname $0) + +source $TMCOMMON +source ${DRIVER_PATH}/../../datastore/libfs.sh +source ${DRIVER_PATH}/../../etc/tm/fs_lvm/fs_lvm.conf + +#------------------------------------------------------------------------------- +# Set dst path and dir +#------------------------------------------------------------------------------- + +DST_PATH=`arg_path $DST` +DST_HOST=`arg_host $DST` +DST_DIR=`dirname $DST_PATH` + +DS_SYS_ID=$(basename $(dirname $DST_DIR)) +DST_DS_PATH="$(dirname $(dirname $(dirname $DST_PATH)))" +DISK_ID=$(basename ${DST_PATH} | cut -d. -f2) + + +#------------------------------------------------------------------------------- +# Create the logical volume and link it +#------------------------------------------------------------------------------- + +LV_NAME="lv-one-$VM_ID-$DISK_ID" +VG_NAME="vg-one-$DS_SYS_ID" +DEV="/dev/${VG_NAME}/${LV_NAME}" + +# Execute lvcreate with a lock in the frontend +CREATE_CMD=$(cat <"${DST_DIR}/.host" || : + + # zero the volume + if [ "${ZERO_LVM_ON_CREATE}" = "yes" ]; then + ${DD} if=/dev/zero of="${DEV}" bs=64k || : + fi + + [ "$FSTYPE" = "swap" ] && mkswap ${DEV} + + rm -f "$DST_PATH" + ln -s "$DEV" "$DST_PATH" +EOF +) + +ssh_exec_and_log "$DST_HOST" "$MKIMAGE_CMD" \ + "Could not create image $DST_PATH" +exit 0 diff --git a/src/tm_mad/fs_lvm/mv b/src/tm_mad/fs_lvm/mv index e7e9b0556e..3276be55eb 100755 --- a/src/tm_mad/fs_lvm/mv +++ b/src/tm_mad/fs_lvm/mv @@ -154,3 +154,6 @@ fi ssh_exec_and_log "$DST_HOST" "mv $SRC_PATH $DST_PATH" \ "Error moving VM files to another System DS: $SRC_PATH to $DST_PATH in $DST_HOST" + +hup_collectd $DST_HOST +hup_collectd $SRC_HOST diff --git a/src/tm_mad/fs_lvm/mvds b/src/tm_mad/fs_lvm/mvds index 627eca734e..e471e9d959 100755 --- a/src/tm_mad/fs_lvm/mvds +++ b/src/tm_mad/fs_lvm/mvds @@ -75,5 +75,6 @@ ssh_exec_and_log "$SRC_HOST" "$DUMP_CMD" \ LOCK="tm-fs_lvm-${DS_SYS_ID}.lock" exclusive "${LOCK}" 120 ssh_exec_and_log "$SRC_HOST" "$DELETE_CMD" \ "Error dumping $SRC to $DST" +hup_collectd $SRC_HOST exit 0 diff --git a/src/tm_mad/fs_lvm/resize b/src/tm_mad/fs_lvm/resize index 5a712b4ea2..6b7fb95b20 100755 --- a/src/tm_mad/fs_lvm/resize +++ b/src/tm_mad/fs_lvm/resize @@ -32,6 +32,7 @@ DRIVER_PATH=$(dirname $0) source $TMCOMMON source ${DRIVER_PATH}/../../datastore/libfs.sh +source ${DRIVER_PATH}/../../etc/tm/fs_lvm/fs_lvm.conf #------------------------------------------------------------------------------- # Set dst path and dir @@ -58,6 +59,8 @@ VG_NAME="vg-one-$DS_SYS_ID" DEV="/dev/${VG_NAME}/${LV_NAME}" # Get current LV size +if [ "${ZERO_LVM_ON_CREATE}" = "yes" ]; then + LVSIZE_CMD=$(cat </dev/null || : +fi +EOF +) + +MONITOR=$(ssh_monitor_and_log "$SRC_HOST" "$MONITOR_CMD" 'Get .monitor') + +ssh_make_path "$DST_HOST" "$DST_PATH" "$MONITOR" log "Moving $SRC to $DST" @@ -85,5 +96,7 @@ EOF ) ssh_exec_and_log "$SRC_HOST" "$TAR_SSH" "Error copying disk directory to target host" +hup_collectd $DST_HOST +hup_collectd $SRC_HOST exit 0 diff --git a/src/tm_mad/ssh/mvds b/src/tm_mad/ssh/mvds index ae3bdff892..88f97424f6 100755 --- a/src/tm_mad/ssh/mvds +++ b/src/tm_mad/ssh/mvds @@ -53,6 +53,7 @@ exec_and_log "$SCP -r $SRC $DST" "Error copying $SRC to $DST" if $SSH $SRC_HOST ls ${SRC_PATH_SNAP} >/dev/null 2>&1; then exec_and_log "rsync -r --delete ${SRC_HOST}:${SRC_PATH_SNAP}/ ${DST_SNAP}" + hup_collectd $SRC_HOST fi exit 0 diff --git a/src/tm_mad/ssh/snap_create b/src/tm_mad/ssh/snap_create index 4d58faafc7..97a9abde31 100755 --- a/src/tm_mad/ssh/snap_create +++ b/src/tm_mad/ssh/snap_create @@ -82,4 +82,5 @@ EOT ssh_exec_and_log "${SRC_HOST}" "${CMD}" \ "Error creating snapshot ${SNAP_PATH}" +hup_collectd $SRC_HOST diff --git a/src/tm_mad/ssh/snap_delete b/src/tm_mad/ssh/snap_delete index 931d976759..f57d539def 100755 --- a/src/tm_mad/ssh/snap_delete +++ b/src/tm_mad/ssh/snap_delete @@ -70,4 +70,5 @@ CURRENT_PATH=${DISK_PATH} ssh_exec_and_log "${SRC_HOST}" "rm ${SNAP_PATH}" \ "Error deleting snapshot ${SNAP_PATH}" +hup_collectd $SRC_HOST diff --git a/src/tm_mad/ssh/snap_revert b/src/tm_mad/ssh/snap_revert index 506f9e3e19..8ac296cdfb 100755 --- a/src/tm_mad/ssh/snap_revert +++ b/src/tm_mad/ssh/snap_revert @@ -77,4 +77,5 @@ EOF ssh_exec_and_log "${SRC_HOST}" "${CMD}" \ "Error reverting snapshot to ${SNAP_PATH}" +hup_collectd $SRC_HOST diff --git a/src/tm_mad/vcenter/clone b/src/tm_mad/vcenter/clone index c41a7ab4b9..3182493066 100755 --- a/src/tm_mad/vcenter/clone +++ b/src/tm_mad/vcenter/clone @@ -110,7 +110,9 @@ rescue Exception => e message = "Error clone virtual disk #{src_path} in "\ "datastore #{target_ds_name_vc}. "\ "Reason: #{e.message}\n#{e.backtrace}" - STDERR.puts error_message(message) + OpenNebula.log_error(message) + STDERR.puts "#{message} #{e.backtrace}" if VCenterDriver::CONFIG[:debug_information] + exit -1 ensure vi_client.close_connection if vi_client diff --git a/src/tm_mad/vcenter/cpds b/src/tm_mad/vcenter/cpds index 6463c7cd92..e29474ab14 100755 --- a/src/tm_mad/vcenter/cpds +++ b/src/tm_mad/vcenter/cpds @@ -92,7 +92,9 @@ begin rescue Exception => e message = "Error copying img #{src_path} to #{target_ds_name_vc} "\ "Reason: #{e.message}\n#{e.backtrace}" - STDERR.puts error_message(message) + OpenNebula.log_error(message) + STDERR.puts "#{message} #{e.backtrace}" if VCenterDriver::CONFIG[:debug_information] + exit -1 ensure vi_client.close_connection if vi_client diff --git a/src/tm_mad/vcenter/delete b/src/tm_mad/vcenter/delete index 38b8a42123..a0f017d440 100755 --- a/src/tm_mad/vcenter/delete +++ b/src/tm_mad/vcenter/delete @@ -104,6 +104,9 @@ begin rescue Exception => e vi_client.close_connection if vi_client - STDERR.puts "#{@error_message}. Reason: #{e.message}\n#{e.backtrace}" + message = "#{@error_message}. Reason: #{e.message}\n#{e.backtrace}" + OpenNebula.log_error(message) + STDERR.puts "#{message} #{e.backtrace}" if VCenterDriver::CONFIG[:debug_information] + exit -1 end diff --git a/src/tm_mad/vcenter/mkimage b/src/tm_mad/vcenter/mkimage index eb8192dc50..a06a5246f3 100755 --- a/src/tm_mad/vcenter/mkimage +++ b/src/tm_mad/vcenter/mkimage @@ -88,7 +88,9 @@ begin rescue Exception => e message = "Error creating virtual disk in #{ds_vc['name']}."\ " Reason: #{e.message}\n#{e.backtrace}" - STDERR.puts error_message(message) + OpenNebula.log_error(message) + STDERR.puts "#{message} #{e.backtrace}" if VCenterDriver::CONFIG[:debug_information] + exit -1 ensure vi_client.close_connection if vi_client diff --git a/src/tm_mad/vcenter/mv b/src/tm_mad/vcenter/mv index 832e92200a..ab7e416d2b 100755 --- a/src/tm_mad/vcenter/mv +++ b/src/tm_mad/vcenter/mv @@ -45,18 +45,21 @@ begin one_client = OpenNebula::Client.new vm = OpenNebula::VirtualMachine.new_with_id(vmid, one_client) vm.info + src_ds = vm.retrieve_elements("HISTORY_RECORDS/HISTORY/DS_ID")[-2] if src_ds == dsid VCenterDriver::VirtualMachine.migrate_routine(vmid, host_orig, host_dest) else - VCenterDriver::VirtualMachine.migrate_routine(vmid, host_orig, host_dest, dsid) + VCenterDriver::VirtualMachine.migrate_routine(vmid, host_orig, host_dest, false, dsid) end rescue StandardError => e - message = "Cannot migrate for VM #{vmid}"\ - 'failed due to '\ + message = "Cannot migrate for VM #{vmid}. "\ + 'Failed due to '\ "\"#{e.message}\"\n" OpenNebula.log_error(message) + STDERR.puts "#{message} #{e.backtrace}" if VCenterDriver::CONFIG[:debug_information] + exit(-1) end diff --git a/src/tm_mad/vcenter/mvds b/src/tm_mad/vcenter/mvds index c324c7c0f9..d4cc5b8e74 100755 --- a/src/tm_mad/vcenter/mvds +++ b/src/tm_mad/vcenter/mvds @@ -80,7 +80,9 @@ begin rescue Exception => e message = "Error detaching virtual disk #{disk_id} from vm #{vmid}."\ " Reason: #{e.message}\n#{e.backtrace}" - STDERR.puts error_message(message) + OpenNebula.log_error(message) + STDERR.puts "#{message} #{e.backtrace}" if VCenterDriver::CONFIG[:debug_information] + exit -1 ensure vi_client.close_connection if vi_client diff --git a/src/tm_mad/vcenter/resize b/src/tm_mad/vcenter/resize index 349738ab1e..3c30a2062f 100755 --- a/src/tm_mad/vcenter/resize +++ b/src/tm_mad/vcenter/resize @@ -72,7 +72,9 @@ begin rescue Exception => e message = "Error resizing disk #{disk_id} for VM #{one_vm["NAME"]} "\ "Reason: #{e.message}\n#{e.backtrace}" - STDERR.puts error_message(message) + OpenNebula.log_error(message) + STDERR.puts "#{message} #{e.backtrace}" if VCenterDriver::CONFIG[:debug_information] + exit -1 ensure vi_client.close_connection if vi_client diff --git a/src/um/UserPool.cc b/src/um/UserPool.cc index e35862b398..960efc5b28 100644 --- a/src/um/UserPool.cc +++ b/src/um/UserPool.cc @@ -871,16 +871,16 @@ auth_failure_driver: NebulaLog::log("AuM",Log::ERROR,oss); goto auth_failure; - + auth_failure_token: NebulaLog::log("AuM", Log::ERROR, "Token has expired."); goto auth_failure; - + auth_failure_nodriver: NebulaLog::log("AuM",Log::ERROR, "Auth Error: Authentication driver not enabled. " "Check AUTH_MAD in oned.conf"); - + auth_failure: user_id = -1; group_id = -1; @@ -920,9 +920,15 @@ bool UserPool::authenticate_server(User * user, string target_username; string second_token; + string egid; + + istringstream iss; + + int egid_i = -1; Nebula& nd = Nebula::instance(); AuthManager* authm = nd.get_authm(); + GroupPool* gpool = nd.get_gpool(); server_username = user->name; server_password = user->password; @@ -934,13 +940,34 @@ bool UserPool::authenticate_server(User * user, user->unlock(); // token = target_username:second_token - int rc = User::split_secret(token,target_username,second_token); + int rc = User::split_secret(token, target_username, second_token); if ( rc != 0 ) { goto wrong_server_token; } + // Look for a EGID in the user token. The second token can be: + // second_token = egid:server_admin_auth + // second_token = server_admin_auth + rc = User::split_secret(second_token, egid, second_token); + + if ( rc == -1 ) //No EGID found + { + egid_i = -1; + } + else + { + iss.str(egid); + + iss >> egid_i; + + if (iss.fail() || !iss.eof()) + { + goto wrong_server_token; + } + } + user = get_ro(target_username); if ( user == 0 ) @@ -964,6 +991,16 @@ bool UserPool::authenticate_server(User * user, user->unlock(); + //server_admin token set a EGID, update auth info + if ( egid_i != - 1 ) + { + group_id = egid_i; + gname = gpool->get_name(egid_i); + + group_ids.clear(); + group_ids.insert(egid_i); + } + if (result) { return true; diff --git a/src/vm/History.cc b/src/vm/History.cc index 0975ce90c4..0668094436 100644 --- a/src/vm/History.cc +++ b/src/vm/History.cc @@ -341,6 +341,62 @@ string& History::to_xml(string& xml, bool database) const /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ +string& History::to_json(string& json) const +{ + ostringstream oss; + + oss << "\"HISTORY\": {" << + "\"OID\": \"" << oid << "\"," << + "\"SEQ\": \"" << seq << "\"," << + "\"HOSTNAME\": \"" << hostname << "\"," << + "\"HID\": \"" << hid << "\"," << + "\"CID\": \"" << cid << "\"," << + "\"STIME\": \"" << stime << "\"," << + "\"ETIME\": \"" << etime << "\"," << + "\"VM_MAD\": \"" << vmm_mad_name << "\"," << + "\"TM_MAD\": \"" << tm_mad_name << "\"," << + "\"DS_ID\": \"" << ds_id << "\"," << + "\"PSTIME\": \"" << prolog_stime << "\"," << + "\"PETIME\": \"" << prolog_etime << "\"," << + "\"RSTIME\": \"" << running_stime << "\"," << + "\"RETIME\": \"" << running_etime << "\"," << + "\"ESTIME\": \"" << epilog_stime << "\"," << + "\"EETIME\": \"" << epilog_etime << "\"," << + "\"ACTION\": \"" << action << "\"," << + "\"UID\": \"" << uid << "\"," << + "\"GID\": \"" << gid << "\"," << + "\"REQUEST_ID\": \"" << req_id << "\","; + + oss << "}"; + + json = oss.str(); + + return json; +} + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + +string& History::to_token(string& text) const +{ + ostringstream oss; + + oss << "HOSTNAME="; + one_util::escape_token(hostname, oss); + oss << "\n"; + + oss << "HID=" << hid << "\n" << + "CID=" << cid << "\n" << + "DS_ID=" << ds_id << "\n"; + + text = oss.str(); + + return text; +} + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + string& History::to_xml_short(string& xml) const { ostringstream oss; diff --git a/src/vm/VirtualMachine.cc b/src/vm/VirtualMachine.cc index f149c3f97c..727f9868f1 100644 --- a/src/vm/VirtualMachine.cc +++ b/src/vm/VirtualMachine.cc @@ -457,13 +457,13 @@ const char * VirtualMachine::table = "vm_pool"; const char * VirtualMachine::db_names = "oid, name, body, uid, gid, last_poll, state, lcm_state, " - "owner_u, group_u, other_u, short_body"; + "owner_u, group_u, other_u, short_body, search_token"; const char * VirtualMachine::db_bootstrap = "CREATE TABLE IF NOT EXISTS " - "vm_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, uid INTEGER, " - "gid INTEGER, last_poll INTEGER, state INTEGER, lcm_state INTEGER, " - "owner_u INTEGER, group_u INTEGER, other_u INTEGER, short_body MEDIUMTEXT)"; - + "vm_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body MEDIUMTEXT, " + "uid INTEGER, gid INTEGER, last_poll INTEGER, state INTEGER, " + "lcm_state INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, " + "short_body MEDIUMTEXT, search_token MEDIUMTEXT"; const char * VirtualMachine::monit_table = "vm_monitoring"; @@ -489,7 +489,19 @@ int VirtualMachine::bootstrap(SqlDB * db) { int rc; - ostringstream oss_vm(VirtualMachine::db_bootstrap); + ostringstream oss_vm; + + oss_vm << VirtualMachine::db_bootstrap; + + if (db->fts_available()) + { + oss_vm << ", FULLTEXT ftidx(search_token))"; + } + else + { + oss_vm << ")"; + } + ostringstream oss_monit(VirtualMachine::monit_db_bootstrap); ostringstream oss_hist(History::db_bootstrap); ostringstream oss_showback(VirtualMachine::showback_db_bootstrap); @@ -1662,10 +1674,12 @@ int VirtualMachine::insert_replace(SqlDB *db, bool replace, string& error_str) ostringstream oss; int rc; - string xml_body, short_xml_body; + string xml_body, short_xml_body, text; + char * sql_name; char * sql_xml; char * sql_short_xml; + char * sql_text; sql_name = db->escape_str(name.c_str()); @@ -1698,6 +1712,13 @@ int VirtualMachine::insert_replace(SqlDB *db, bool replace, string& error_str) goto error_xml_short; } + sql_text = db->escape_str(to_token(text).c_str()); + + if ( sql_text == 0 ) + { + goto error_text; + } + if(replace) { oss << "REPLACE"; @@ -1719,17 +1740,21 @@ int VirtualMachine::insert_replace(SqlDB *db, bool replace, string& error_str) << owner_u << "," << group_u << "," << other_u << "," - << "'" << sql_short_xml << "'" + << "'" << sql_short_xml << "'," + << "'" << sql_text << "'" << ")"; db->free_str(sql_name); db->free_str(sql_xml); db->free_str(sql_short_xml); + db->free_str(sql_text); rc = db->exec_wr(oss); return rc; +error_text: + db->free_str(sql_text); error_xml_short: db->free_str(sql_short_xml); error_xml: @@ -2145,7 +2170,7 @@ string& VirtualMachine::to_xml_extended(string& xml, int n_history) const string snap_xml; string lock_str; - ostringstream oss; + ostringstream oss; oss << "" << "" << oid << "" @@ -2215,6 +2240,91 @@ string& VirtualMachine::to_xml_extended(string& xml, int n_history) const /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ +string& VirtualMachine::to_json(string& json) const +{ + string template_json; + string user_template_json; + string history_json; + + ostringstream oss; + + oss << "{\"VM\":{" + << "\"ID\": \""<< oid << "\"," + << "\"UID\": \""<< uid << "\"," + << "\"GID\": \""<< gid << "\"," + << "\"UNAME\": \""<< uname << "\"," + << "\"GNAME\": \""<< gname << "\"," + << "\"NAME\": \""<< name << "\"," + << "\"LAST_POLL\": \""<< last_poll << "\"," + << "\"STATE\": \""<< state << "\"," + << "\"LCM_STATE\": \""<< lcm_state << "\"," + << "\"PREV_STATE\": \""<< prev_state << "\"," + << "\"PREV_LCM_STATE\": \""<< prev_lcm_state << "\"," + << "\"RESCHED\": \""<< resched << "\"," + << "\"STIME\": \""<< stime << "\"," + << "\"ETIME\": \""<< etime << "\"," + << "\"DEPLOY_ID\": \""<< deploy_id << "\"," + << obj_template->to_json(template_json) << "," + << user_obj_template->to_json(user_template_json); + + if ( hasHistory() ) + { + oss << ",\"HISTORY_RECORDS\": ["; + + oss << history->to_json(history_json); + + oss << "]"; + } + + oss << "}}"; + + + json = oss.str(); + + return json; +} + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + +string& VirtualMachine::to_token(string& text) const +{ + string template_text; + string user_template_text; + string history_text; + + ostringstream oss; + + oss << "UNAME="<< uname << "\n" + << "GNAME="<< gname << "\n"; + + oss << "NAME="; + one_util::escape_token(name, oss); + oss << "\n"; + + oss << "LAST_POLL="<< last_poll << "\n" + << "PREV_STATE="<< prev_state << "\n" + << "PREV_LCM_STATE="<< prev_lcm_state << "\n" + << "RESCHED="<< resched << "\n" + << "STIME="<< stime << "\n" + << "ETIME="<< etime << "\n" + << "DEPLOY_ID="<< deploy_id << "\n" + << obj_template->to_token(template_text) << "\n" + << user_obj_template->to_token(user_template_text); + + if ( hasHistory() ) + { + oss << "\n" << history->to_token(history_text); + } + + text = oss.str(); + + return text; +} + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + string& VirtualMachine::to_xml_short(string& xml) { string disks_xml, monitoring_xml, user_template_xml, history_xml, nics_xml; @@ -2744,7 +2854,8 @@ static std::map> UPDATECONF_ATTRS = { "GUEST_AGENT"} }, {"INPUT", {"TYPE", "BUS"} }, {"GRAPHICS", {"TYPE", "LISTEN", "PASSWD", "KEYMAP"} }, - {"RAW", {"TYPE", "DATA", "DATA_VMX"} } + {"RAW", {"TYPE", "DATA", "DATA_VMX"} }, + {"CPU_MODEL", {"MODEL"} } }; /** @@ -2884,7 +2995,7 @@ int VirtualMachine::updateconf(VirtualMachineTemplate& tmpl, string &err) } // ------------------------------------------------------------------------- - // Update OS, FEATURES, INPUT, GRAPHICS, RAW + // Update OS, FEATURES, INPUT, GRAPHICS, RAW, CPU_MODEL // ------------------------------------------------------------------------- replace_vector_values(obj_template, &tmpl, "OS"); @@ -2901,6 +3012,8 @@ int VirtualMachine::updateconf(VirtualMachineTemplate& tmpl, string &err) replace_vector_values(obj_template, &tmpl, "RAW"); + replace_vector_values(obj_template, &tmpl, "CPU_MODEL"); + // ------------------------------------------------------------------------- // Update CONTEXT: any value // ------------------------------------------------------------------------- diff --git a/src/vmm/LibVirtDriverKVM.cc b/src/vmm/LibVirtDriverKVM.cc index 04220efdd6..881b937eb1 100644 --- a/src/vmm/LibVirtDriverKVM.cc +++ b/src/vmm/LibVirtDriverKVM.cc @@ -353,6 +353,12 @@ int LibVirtDriver::deployment_description_kvm( file << "\tone-" << vm->get_oid() << "" << endl; + // ------------------------------------------------------------------------ + // Title name + // ------------------------------------------------------------------------ + + file << "\t" << vm->get_name() << "" << endl; + // ------------------------------------------------------------------------ // CPU & Memory // ------------------------------------------------------------------------ @@ -1472,9 +1478,35 @@ int LibVirtDriver::deployment_description_kvm( // Metadata used by drivers // ------------------------------------------------------------------------ file << "\t\n" - << "\t\t" + << "\t\t\n" + << "\t\t\t" << one_util::escape_xml(vm->get_system_dir()) - << "\n" + << "\n" + << "" + << one_util::escape_xml(vm->get_name()) + << "\n" + << "\t\t\t" + << one_util::escape_xml(vm->get_uname()) + << "\n" + << "\t\t\t" + << vm->get_uid() + << "\n" + << "\t\t\t" + << one_util::escape_xml(vm->get_gname()) + << "\n" + << "\t\t\t" + << vm->get_gid() + << "\n" + << "\t\t\t" + << Nebula::instance().code_version() + << "\n" + << "\t\t\t" + << vm->get_stime() + << "\n" + << "\t\t\t" + << time(0) + << "\n" + << "\t\t\n" // << "\t\t\n" << vm->to_xml(vm_xml) << "\t\t\n" << "\t\n"; diff --git a/src/vmm/VirtualMachineManager.cc b/src/vmm/VirtualMachineManager.cc index 716c9fbcf7..9d275ebc18 100644 --- a/src/vmm/VirtualMachineManager.cc +++ b/src/vmm/VirtualMachineManager.cc @@ -2420,6 +2420,19 @@ void VirtualMachineManager::detach_nic_action( return; } + int uid = vm->get_created_by_uid(); + int owner_id = vm->get_uid(); + vm->unlock(); + + password = Nebula::instance().get_upool()->get_token_password(uid, owner_id); + + vm = vmpool->get(vid); + + if (vm == 0) + { + return; + } + if (!vm->hasHistory()) { goto error_history; diff --git a/src/vmm/VirtualMachineManagerDriver.cc b/src/vmm/VirtualMachineManagerDriver.cc index 54edfe30ce..b6fb8e5cea 100644 --- a/src/vmm/VirtualMachineManagerDriver.cc +++ b/src/vmm/VirtualMachineManagerDriver.cc @@ -39,7 +39,7 @@ VirtualMachineManagerDriver::VirtualMachineManagerDriver( bool sudo, VirtualMachinePool * pool): Mad(userid,attrs,sudo), driver_conf(true), keep_snapshots(false), - vmpool(pool) + ds_live_migration(false), vmpool(pool) { map::const_iterator it; char * error_msg = 0; @@ -99,6 +99,11 @@ VirtualMachineManagerDriver::VirtualMachineManagerDriver( // ------------------------------------------------------------------------- driver_conf.get("KEEP_SNAPSHOTS", keep_snapshots); + // ------------------------------------------------------------------------- + // Parse KEEP_SNAPSHOTS + // ------------------------------------------------------------------------- + driver_conf.get("DS_LIVE_MIGRATION", ds_live_migration); + // ------------------------------------------------------------------------- // Parse IMPORTED_VMS_ACTIONS string and init the action set // ------------------------------------------------------------------------- diff --git a/src/vmm_mad/exec/vmm_exec_vcenter.conf b/src/vmm_mad/exec/vmm_exec_vcenter.conf index 3cc83a2e78..484f0b3af1 100644 --- a/src/vmm_mad/exec/vmm_exec_vcenter.conf +++ b/src/vmm_mad/exec/vmm_exec_vcenter.conf @@ -16,8 +16,8 @@ # Default configuration attributes for the vCenter driver # (all domains will use these values as defaults) -# Valid atributes: +# Valid attributes: # - nic[model] -NIC=[MODEL="VirtualE1000"] - +# WARNING: Do not use!! +# Not honored due to https://github.com/OpenNebula/one/issues/2855 diff --git a/src/vmm_mad/remotes/kvm/deploy b/src/vmm_mad/remotes/kvm/deploy index c05d425875..488b601653 100755 --- a/src/vmm_mad/remotes/kvm/deploy +++ b/src/vmm_mad/remotes/kvm/deploy @@ -19,16 +19,33 @@ source $(dirname $0)/../../etc/vmm/kvm/kvmrc source $(dirname $0)/../../scripts_common.sh -domain=$1 +DEP_FILE=$1 +DEP_FILE_LOCATION=$(dirname $DEP_FILE) -mkdir -p `dirname $domain` -cat > $domain +mkdir -p $DEP_FILE_LOCATION +cat > $DEP_FILE -data=`virsh --connect $LIBVIRT_URI create $domain` +DATA=`virsh --connect $LIBVIRT_URI create $DEP_FILE` if [ "x$?" = "x0" ]; then - echo $data | sed 's/Domain //' | sed 's/ created from .*$//' + + DOMAIN_ID=$(echo $DATA | sed 's/Domain //' | sed 's/ created from .*$//') + echo $DOMAIN_ID + + # redefine potential snapshots + for SNAPSHOT_MD_XML in $(ls ${DEP_FILE_LOCATION}/snap-*.xml 2>/dev/null); do + + # query UUID, but only once + UUID=${UUID:-$(virsh --connect $LIBVIRT_URI dominfo $DOMAIN_ID | grep UUID: | awk '{print $2}')} + + # replace uuid in the snapshot metadata xml + sed -i "s%[[:alnum:]-]*%$UUID%" $SNAPSHOT_MD_XML + + # redefine the snapshot using the xml metadata file + virsh --connect $LIBVIRT_URI snapshot-create $DOMAIN_ID $SNAPSHOT_MD_XML --redefine > /dev/null || true + done + else - error_message "Could not create domain from $domain" + error_message "Could not create domain from $DEP_FILE" exit -1 fi diff --git a/src/vmm_mad/remotes/kvm/kvmrc b/src/vmm_mad/remotes/kvm/kvmrc index cfecfb6650..8a643324c4 100644 --- a/src/vmm_mad/remotes/kvm/kvmrc +++ b/src/vmm_mad/remotes/kvm/kvmrc @@ -20,6 +20,9 @@ export LIBVIRT_URI=qemu:///system export QEMU_PROTOCOL=qemu+ssh +export LIBVIRT_MD_URI=http://opennebula.org/xmlns/libvirt/1.0 +export LIBVIRT_MD_KEY=one + # Seconds to wait after shutdown until timeout export SHUTDOWN_TIMEOUT=300 diff --git a/src/vmm_mad/remotes/kvm/snapshot_create b/src/vmm_mad/remotes/kvm/snapshot_create index ac2a445c2e..96357babb9 100755 --- a/src/vmm_mad/remotes/kvm/snapshot_create +++ b/src/vmm_mad/remotes/kvm/snapshot_create @@ -22,10 +22,38 @@ source $(dirname $0)/../../scripts_common.sh DOMAIN="$1" SNAP_ID="$2" -data=`virsh --connect $LIBVIRT_URI snapshot-create-as $DOMAIN --name "snap-${SNAP_ID}"` +# -------- Get datastore location from libvirt metadata ------------ + +DRIVER_PATH=$(dirname $0) +XPATH="${DRIVER_PATH}/../../datastore/xpath.rb --stdin" + +METADATA_XML=`virsh --connect $LIBVIRT_URI metadata $DOMAIN $LIBVIRT_MD_URI $LIBVIRT_MD_KEY` + +unset i XPATH_ELEMENTS + +while IFS= read -r -d '' element; do + XPATH_ELEMENTS[i++]="$element" +done < <(echo "$METADATA_XML" | $XPATH /vm/system_datastore/) + +unset i + +DATASTORE_PATH="${XPATH_ELEMENTS[i++]}" + + +# -------- Create the snapshot and dump its metadata to xml file ------------ + +SNAP_NAME="snap-${SNAP_ID}" +data=`virsh --connect $LIBVIRT_URI snapshot-create-as $DOMAIN --name "$SNAP_NAME"` if [ "$?" = "0" ]; then echo "$data" | awk '{print $3}' + + if [ -n "$DATASTORE_PATH" ]; then + SNAP_XML_PATH="${DATASTORE_PATH}/${SNAP_NAME}.xml" + + # dump snapshot metadata xml to the VM location + virsh --connect $LIBVIRT_URI snapshot-dumpxml $DOMAIN $SNAP_NAME > $SNAP_XML_PATH || true + fi else error_message "Could not create snapshot $NAME for domain $DOMAIN." exit -1 diff --git a/src/vmm_mad/remotes/kvm/snapshot_delete b/src/vmm_mad/remotes/kvm/snapshot_delete index 7e82641277..7ea19d53d6 100755 --- a/src/vmm_mad/remotes/kvm/snapshot_delete +++ b/src/vmm_mad/remotes/kvm/snapshot_delete @@ -22,6 +22,28 @@ source $(dirname $0)/../../scripts_common.sh DOMAIN="$1" NAME="$2" +# -------- Get datastore location from libvirt metadata ------------ + +DRIVER_PATH=$(dirname $0) +XPATH="${DRIVER_PATH}/../../datastore/xpath.rb --stdin" + +METADATA_XML=`virsh --connect $LIBVIRT_URI metadata $DOMAIN $LIBVIRT_MD_URI $LIBVIRT_MD_KEY` + +unset i XPATH_ELEMENTS + +while IFS= read -r -d '' element; do + XPATH_ELEMENTS[i++]="$element" +done < <(echo "$METADATA_XML" | $XPATH /vm/system_datastore/) + +unset i + +DATASTORE_PATH="${XPATH_ELEMENTS[i++]}" + + +# ------ Delete snapshot metadata and the snapshot itself ---------- + +[ -n "$DATASTORE_PATH" ] && rm ${DATASTORE_PATH}/${NAME}.xml 2>/dev/null || true + exec_and_log \ "virsh --connect $LIBVIRT_URI snapshot-delete $DOMAIN $NAME" \ "Could not delete snapshot $NAME for domain $DOMAIN." diff --git a/src/vmm_mad/remotes/lib/lxd/client.rb b/src/vmm_mad/remotes/lib/lxd/client.rb index c26cccad24..0f500d15d0 100644 --- a/src/vmm_mad/remotes/lib/lxd/client.rb +++ b/src/vmm_mad/remotes/lib/lxd/client.rb @@ -135,10 +135,15 @@ end # Error used for raising LXDClient exception when response is error return value class LXDError < StandardError - attr_reader :body + + attr_reader :body, :error, :error_code, :type def initialize(msg = 'LXD API error') @body = msg + @error = @body['error'] + @error_code = @body['error_code'] + @type = @body['type'] super end + end diff --git a/src/vmm_mad/remotes/lib/lxd/command.rb b/src/vmm_mad/remotes/lib/lxd/command.rb index 1f64a843db..9437144bfb 100644 --- a/src/vmm_mad/remotes/lib/lxd/command.rb +++ b/src/vmm_mad/remotes/lib/lxd/command.rb @@ -32,21 +32,12 @@ module Command begin fd = lock if block - Open3.popen3(cmd) {|i, o, e, t| - rc = t.value.exitstatus - - stdout = o.read - stderr = e.read - - o.close - e.close - } - rescue + stdout, stderr, s = Open3.capture3(cmd) ensure unlock(fd) if block end - [rc, stdout, stderr] + [s.exitstatus, stdout, stderr] end def self.execute_once(cmd, lock) diff --git a/src/vmm_mad/remotes/lib/lxd/container.rb b/src/vmm_mad/remotes/lib/lxd/container.rb index 99cf724adc..5738a7f1dd 100644 --- a/src/vmm_mad/remotes/lib/lxd/container.rb +++ b/src/vmm_mad/remotes/lib/lxd/container.rb @@ -169,9 +169,8 @@ class Container err = 'cannot create user data directory:' rc, o, e = Command.execute("sudo #{cmd}", true) if e.include?(err) - return [rc, o, e] unless rc != 0 - - OpenNebula.log_error("#{__method__}: Failed to run command #{cmd}: #{e}") + log = "Failed to run command #{cmd}: #{e}" + OpenNebula.log_error("#{__method__}: #{log}") unless rc.zero? [rc, o, e] end @@ -232,11 +231,17 @@ class Container return unless @one @one.get_disks.each do |disk| + if @one.volatile?(disk) + e = "disk #{disk['DISK_ID']} type #{disk['TYPE']} not supported" + OpenNebula.log_error e + next + end + status = setup_disk(disk, operation) return nil unless status end - return unless @one.has_context? + return 'no context' unless @one.has_context? csrc = @lxc['devices']['context']['source'].clone @@ -273,7 +278,7 @@ class Container # Removes the context section from the LXD configuration and unmap the # context device def detach_context - return unless @one.has_context? + return 'no context' unless @one.has_context? csrc = @lxc['devices']['context']['source'].clone diff --git a/src/vmm_mad/remotes/lib/lxd/mapper/mapper.rb b/src/vmm_mad/remotes/lib/lxd/mapper/mapper.rb index 15ca6745a0..bbc5446e13 100644 --- a/src/vmm_mad/remotes/lib/lxd/mapper/mapper.rb +++ b/src/vmm_mad/remotes/lib/lxd/mapper/mapper.rb @@ -268,6 +268,8 @@ class Mapper # Look for fstab and mount rootfs in path. First partition with # a /etc/fstab file is used as rootfs and it is kept mounted partitions.each do |p| + OpenNebula.log("Looking for fstab on #{p['path']}") + rc = mount_dev(p['path'], path) return false if !rc @@ -277,7 +279,7 @@ class Mapper cmd = "#{bin} #{path}/etc/fstab" - rc, fstab, e = Command.execute(cmd, false) + _rc, fstab, _e = Command.execute(cmd, false) if fstab.empty? return false unless umount_dev(p['path']) @@ -285,11 +287,12 @@ class Mapper next end + OpenNebula.log("Found fstab on #{p['path']}") break end if fstab.empty? - OpenNebula.log_error("mount: No fstab file found in disk partitions") + OpenNebula.log_error('No fstab file found') return false end @@ -310,7 +313,7 @@ class Mapper next end - next if mount_point == '/' || mount_point == 'swap' + next if %w[/ swap].include?(mount_point) partitions.each { |p| next if p[key] != value @@ -339,30 +342,9 @@ class Mapper def mount_dev(dev, path) OpenNebula.log_info "Mounting #{dev} at #{path}" - rc, out, err = Command.execute("#{COMMANDS[:lsblk]} -J", false) + return false if mount_on?(path) - if rc != 0 || out.empty? - OpenNebula.log_error("mount_dev: #{err}") - return false - end - - if out.include?(path) - OpenNebula.log_error("mount_dev: Mount detected in #{path}") - return false - end - - if path =~ /.*\/rootfs/ - cmd = COMMANDS[:su_mkdir] - else - cmd = COMMANDS[:mkdir] - end - - rc, _out, err = Command.execute("#{cmd} #{path}", false) - - if rc != 0 - OpenNebula.log_error("mount_dev: #{err}") - return false - end + mkdir_safe(path) rc, _out, err = Command.execute("#{COMMANDS[:mount]} #{dev} #{path}", true) @@ -381,7 +363,7 @@ class Mapper rc, _o, e = Command.execute("#{COMMANDS[:umount]} #{dev}", true) - return true if rc.zero? + return true if rc.zero? || e.include?('not mounted') OpenNebula.log_error("umount_dev: #{e}") nil @@ -446,4 +428,56 @@ class Mapper end end + # Returns true if device has mapped partitions + def parts_on?(device) + partitions = lsblk(device) + return true if partitions[0]['type'] == 'part' + + false + end + + def show_parts(device) + action_parts(device, '-s -av') + end + + def hide_parts(device) + action_parts(device, '-d') + end + + # Runs kpartx vs a device with required flags as arguments + def action_parts(device, action) + cmd = "#{COMMANDS[:kpartx]} #{action} #{device}" + rc, _out, err = Command.execute(cmd, false) + + return true if rc.zero? + + OpenNebula.log_error("#{__method__}: #{err}") + false + end + + def mount_on?(path) + _rc, out, _err = Command.execute("#{COMMANDS[:lsblk]} -J", false) + + if out.include?(path) + OpenNebula.log_error("mount_dev: Mount detected in #{path}") + return true + end + false + end + + def mkdir_safe(path) + if path =~ /.*\/rootfs/ + cmd = COMMANDS[:su_mkdir] + else + cmd = COMMANDS[:mkdir] + end + + rc, _out, err = Command.execute("#{cmd} #{path}", false) + + return true if rc.zero? + + OpenNebula.log_error("#{__method__}: #{err}") + false + end + end diff --git a/src/vmm_mad/remotes/lib/lxd/mapper/qcow2.rb b/src/vmm_mad/remotes/lib/lxd/mapper/qcow2.rb index 5a3f390027..51e4dc7aba 100644 --- a/src/vmm_mad/remotes/lib/lxd/mapper/qcow2.rb +++ b/src/vmm_mad/remotes/lib/lxd/mapper/qcow2.rb @@ -20,11 +20,11 @@ $LOAD_PATH.unshift File.dirname(__FILE__) require 'mapper' -class Qcow2Mapper < Mapper +class Qcow2Mapper < Mapper # Max number of block devices. This should be set to the parameter used # to load the nbd kernel module (default in kernel is 16) - NBDS_MAX = 256 + NBDS_MAX = 256 # TODO: Read system config file def do_map(one_vm, disk, _directory) device = nbd_device @@ -32,26 +32,34 @@ class Qcow2Mapper < Mapper return if device.empty? dsrc = one_vm.disk_source(disk) - cmd = "#{COMMANDS[:nbd]} -c #{device} #{dsrc}" + File.chmod(0o664, dsrc) if File.symlink?(one_vm.sysds_path) - File.chmod(0664, dsrc) if File.symlink?(one_vm.sysds_path) + map = "#{COMMANDS[:nbd]} -c #{device} #{dsrc}" + rc, _out, err = Command.execute(map, true) - rc, _out, err = Command.execute(cmd, true) - - if rc != 0 + unless rc.zero? OpenNebula.log_error("#{__method__}: #{err}") return end - sleep 0.5 # TODO: improve settledown, lsblk -f fails + # TODO: improve wait condition + sleep 1 # wait for parts to come out + + show_parts(device) unless parts_on?(device) device end def do_unmap(device, _one_vm, _disk, _directory) + #After mapping and unmapping a qcow2 disk the next mapped qcow2 may collide with the previous one. + #The use of kpartx before unmapping seems to prevent this behavior on the nbd module used with + #the kernel versions in ubuntu 16.04 + # + # TODO: avoid using if kpartx was not used + hide_parts(device) cmd = "#{COMMANDS[:nbd]} -d #{device}" - rc, _out, err = Command.execute(cmd, true) + rc, _out, err = Command.execute(cmd, false) return true if rc.zero? @@ -61,21 +69,20 @@ class Qcow2Mapper < Mapper private - def nbd_device() + def nbd_device sys_parts = lsblk('') - device_id = -1 nbds = [] - sys_parts.each { |p| + sys_parts.each do |p| m = p['name'].match(/nbd(\d+)/) - next if !m + next unless m nbds << m[1].to_i - } + end - NBDS_MAX.times { |i| + NBDS_MAX.times do |i| return "/dev/nbd#{i}" unless nbds.include?(i) - } + end OpenNebula.log_error("#{__method__}: Cannot find free nbd device") diff --git a/src/vmm_mad/remotes/lib/lxd/mapper/raw.rb b/src/vmm_mad/remotes/lib/lxd/mapper/raw.rb index 0ca9621c2a..cecb386365 100644 --- a/src/vmm_mad/remotes/lib/lxd/mapper/raw.rb +++ b/src/vmm_mad/remotes/lib/lxd/mapper/raw.rb @@ -44,7 +44,7 @@ class FSRawMapper < Mapper return true if rc.zero? - OpenNebula.log_error("#{__method__}: #{err}") if rc != 0 + OpenNebula.log_error("#{__method__}: #{err}") nil end diff --git a/src/vmm_mad/remotes/lib/lxd/mapper/rbd.rb b/src/vmm_mad/remotes/lib/lxd/mapper/rbd.rb index b3fbd55f89..5689110cbb 100644 --- a/src/vmm_mad/remotes/lib/lxd/mapper/rbd.rb +++ b/src/vmm_mad/remotes/lib/lxd/mapper/rbd.rb @@ -19,6 +19,7 @@ $LOAD_PATH.unshift File.dirname(__FILE__) require 'mapper' +require 'tmpdir' # Ceph RBD mapper class RBDMapper < Mapper @@ -32,12 +33,20 @@ class RBDMapper < Mapper cmd = "#{COMMANDS[:rbd]} #{@ceph_user} map #{dsrc}" - rc, out, err = Command.execute(cmd, false) + rc, out, err = Command.execute(cmd, true) - return out.chomp if rc.zero? + unless rc.zero? - OpenNebula.log_error("#{__method__}: #{err}") - nil + OpenNebula.log_error("#{__method__}: #{err}") + return + end + + # TODO: improve wait condition + sleep 1 # wait for partition table + + device = out.chomp + try_mount(device) + device end def do_unmap(device, _one_vm, _disk, _directory) @@ -51,4 +60,13 @@ class RBDMapper < Mapper nil end + private + + # This function tries to mount mapped devices to force update of partition + # tables + def try_mount(dev) + cmd = "#{COMMANDS[:mount]} --fake #{dev} /mnt" + Command.execute(cmd, false) + end + end diff --git a/src/vmm_mad/remotes/lib/lxd/opennebula_vm.rb b/src/vmm_mad/remotes/lib/lxd/opennebula_vm.rb index 97d7d7da39..50a87f32f9 100644 --- a/src/vmm_mad/remotes/lib/lxd/opennebula_vm.rb +++ b/src/vmm_mad/remotes/lib/lxd/opennebula_vm.rb @@ -196,6 +196,8 @@ class OpenNebulaVM disks = @xml.elements('//TEMPLATE/DISK') disks.each do |n| + next if volatile?(n) + hash.update(disk(n, nil, nil)) end @@ -305,6 +307,13 @@ class OpenNebulaVM { disk_name => disk } end + # Return true if disk if volatile + def volatile?(disk) + return true if %w[fs swap].include? disk['TYPE'] + + false + end + #--------------------------------------------------------------------------- # Container Mapping: Extra Configuration & Profiles #--------------------------------------------------------------------------- @@ -340,7 +349,19 @@ class OpenNebulaVM def profile(hash) profile = @xml['//USER_TEMPLATE/LXD_PROFILE'] - profile = 'default' if profile.empty? + + if profile.empty? + profile = 'default' + else + begin + LXDClient.new.get("profiles/#{profile}") + rescue LXDError => e + raise e unless e.error_code == 404 + + OpenNebula.log_error "Profile \"#{profile}\" not found\n#{e}" + profile = 'default' + end + end hash['profiles'] = [profile] end diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/host.rb b/src/vmm_mad/remotes/lib/vcenter_driver/host.rb index fe25ff470b..afba5df09c 100644 --- a/src/vmm_mad/remotes/lib/vcenter_driver/host.rb +++ b/src/vmm_mad/remotes/lib/vcenter_driver/host.rb @@ -243,9 +243,21 @@ class ClusterComputeResource return rp_info end - def monitor_host_systems - host_info = "" + def hostname_to_moref(hostname) + result = filter_hosts + moref = "" + result.each do |r| + if r.obj.name == hostname + moref = r.obj._ref + break + end + end + raise "Host #{hostname} was not found" if moref.empty? + return moref + end + + def filter_hosts view = @vi_client.vim.serviceContent.viewManager.CreateContainerView({ container: @item, #View for Hosts inside this cluster type: ['HostSystem'], @@ -284,7 +296,13 @@ class ClusterComputeResource ) result = pc.RetrieveProperties(:specSet => [filterSpec]) + view.DestroyView # Destroy the view + return result + end + def monitor_host_systems + host_info = "" + result = filter_hosts hosts = {} result.each do |r| hashed_properties = r.to_hash @@ -319,8 +337,6 @@ class ClusterComputeResource host_info << "]" end - view.DestroyView # Destroy the view - return host_info end diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb b/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb index 020b8a164f..b037e29fac 100644 --- a/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb +++ b/src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb @@ -670,10 +670,9 @@ class VirtualMachine < VCenterDriver::Template # Create and reconfigure VM related methods ############################################################################ - # This function creates a new VM from the @one_item XML and returns the + # This function creates a new VM from the driver_action XML and returns the # VMware ref - # @param one_item OpenNebula::VirtualMachine - # @param vi_client VCenterDriver::VIClient + # @param drv_action XML representing the deploy action # @return String vmware ref def clone_vm(drv_action) vcenter_name = get_vcenter_name @@ -1050,7 +1049,7 @@ class VirtualMachine < VCenterDriver::Template key = backing.port.portgroupKey end - @nics[key] = Nic.vc_nic(d) + @nics["#{key}#{d.key}"] = Nic.vc_nic(d) end @nics.reject{|k| k == :macs} @@ -1163,8 +1162,8 @@ class VirtualMachine < VCenterDriver::Template return RbVmomi::VIM::VirtualVmxnet2 when 'virtualvmxnet3', 'vmxnet3' return RbVmomi::VIM::VirtualVmxnet3 - else # If none matches, use VirtualE1000 - return RbVmomi::VIM::VirtualE1000 + else # If none matches, use vmxnet3 + return RbVmomi::VIM::VirtualVmxnet3 end end @@ -1217,10 +1216,17 @@ class VirtualMachine < VCenterDriver::Template if !unmanaged_nics.empty? nics = get_vcenter_nics - select = ->(name){ + select_net =->(ref){ device = nil nics.each do |nic| - next unless nic.deviceInfo.summary == name + type = nic.backing.class + if type == NET_CARD + nref = nic.backing.network._ref + else + nref = nic.backing.port.portgroupKey + end + + next unless nref == ref device = nic break end @@ -1231,20 +1237,20 @@ class VirtualMachine < VCenterDriver::Template } unmanaged_nics.each do |unic| - vnic = select.call(unic['BRIDGE']) - vcenter_nic_class = vnic.class - new_model = unic['MODEL'] && !unic['MODEL'].empty? && !unic['MODEL'].nil? - opennebula_nic_class = nic_model_class(unic['MODEL']) if new_model + vnic = select_net.call(unic['VCENTER_NET_REF']) + nic_class = vnic.class + new_model = nic_model_class(unic['MODEL']) if unic['MODEL'] - if new_model && opennebula_nic_class != vcenter_nic_class - # delete actual nic and update the new one. + # delete actual nic and update the new one. + if new_model && new_model != nic_class device_change << { :device => vnic, :operation => :remove } - device_change << calculate_add_nic_spec(unic) + device_change << calculate_add_nic_spec(unic, vnic.unitNumber) else vnic.macAddress = unic['MAC'] device_change << { :device => vnic, :operation => :edit } end end + end rescue Exception => e raise "There is a problem with your vm NICS, make sure that they are working properly. Error: #{e.message}" @@ -1363,14 +1369,21 @@ class VirtualMachine < VCenterDriver::Template device_change = [] if option == :all + dchange = [] + # detached? condition indicates that the nic exists in OpeNebula but not # in vCenter nics_each(:detached?) do |nic| - device_change << { + dchange << { :operation => :remove, :device => nic.vc_item } end + if !dchange.empty? + dspec_hash = { :deviceChange => dchange } + dspec = RbVmomi::VIM.VirtualMachineConfigSpec(dspec_hash) + @item.ReconfigVM_Task(:spec => dspec).wait_for_completion + end end # no_exits? condition indicates that the nic does not exist in vCenter @@ -1492,21 +1505,15 @@ class VirtualMachine < VCenterDriver::Template end # Returns an array of actions to be included in :deviceChange - def calculate_add_nic_spec(nic) + def calculate_add_nic_spec(nic, unumber = nil) mac = nic["MAC"] pg_name = nic["BRIDGE"] - model = '' + default = VCenterDriver::VIHelper.get_default('VM/TEMPLATE/NIC/MODEL') + tmodel = one_item['USER_TEMPLATE/NIC_DEFAULT/MODEL'] + + model = nic['MODEL'] || tmodel || default + raise 'nic model cannot be empty!' if model == '' - if !one_item.retrieve_xmlelements('TEMPLATE/NIC_DEFAULT/MODEL').nil? && - !one_item.retrieve_xmlelements('TEMPLATE/NIC_DEFAULT/MODEL').empty? - model = one_item['TEMPLATE/NIC_DEFAULT/MODEL'] - elsif (model.nil? || model.empty?) && - !nic['MODEL'].nil? && - !nic['MODEL'].empty? - model = nic['MODEL'] - else - model = VCenterDriver::VIHelper.get_default('VM/TEMPLATE/NIC/MODEL') - end vnet_ref = nic["VCENTER_NET_REF"] backing = nil @@ -1539,24 +1546,7 @@ class VirtualMachine < VCenterDriver::Template card_num += 1 if is_nic?(dv) end - nic_card = case model - when "virtuale1000", "e1000" - RbVmomi::VIM::VirtualE1000 - when "virtuale1000e", "e1000e" - RbVmomi::VIM::VirtualE1000e - when "virtualpcnet32", "pcnet32" - RbVmomi::VIM::VirtualPCNet32 - when "virtualsriovethernetcard", "sriovethernetcard" - RbVmomi::VIM::VirtualSriovEthernetCard - when "virtualvmxnetm", "vmxnetm" - RbVmomi::VIM::VirtualVmxnetm - when "virtualvmxnet2", "vmnet2" - RbVmomi::VIM::VirtualVmxnet2 - when "virtualvmxnet3", "vmxnet3" - RbVmomi::VIM::VirtualVmxnet3 - else # If none matches, use VirtualE1000 - RbVmomi::VIM::VirtualE1000 - end + nic_card = nic_model_class(model) if network.class == RbVmomi::VIM::Network backing = RbVmomi::VIM.VirtualEthernetCardNetworkBackingInfo( @@ -1573,7 +1563,13 @@ class VirtualMachine < VCenterDriver::Template end # grab the last unitNumber to ensure the nic to be added at the end - @unic = @unic || get_vcenter_nics.map{|d| d.unitNumber}.max rescue 0 + if !unumber + @unic = @unic || get_vcenter_nics.map{|d| d.unitNumber}.max || 0 + unumber = @unic += 1 + else + @unic = unumber + end + card_spec = { :key => 0, :deviceInfo => { @@ -1583,7 +1579,7 @@ class VirtualMachine < VCenterDriver::Template :backing => backing, :addressType => mac ? 'manual' : 'generated', :macAddress => mac, - :unitNumber => @unic+=1 + :unitNumber => unumber } if (limit || rsrv) && (limit > 0) @@ -1609,17 +1605,11 @@ class VirtualMachine < VCenterDriver::Template def calculate_add_nic_spec_autogenerate_mac(nic) pg_name = nic["BRIDGE"] model = '' + default = VCenterDriver::VIHelper.get_default('VM/TEMPLATE/NIC/MODEL') + tmodel = one_item['USER_TEMPLATE/NIC_DEFAULT/MODEL'] + + model = nic['MODEL'] || tmodel || default - if !one_item.retrieve_xmlelements('TEMPLATE/NIC_DEFAULT/MODEL').nil? && - !one_item.retrieve_xmlelements('TEMPLATE/NIC_DEFAULT/MODEL').empty? - model = one_item['TEMPLATE/NIC_DEFAULT/MODEL'] - elsif (model.nil? || model.empty?) && - !nic['MODEL'].nil? && - !nic['MODEL'].empty? - model = nic['MODEL'] - else - model = VCenterDriver::VIHelper.get_default('VM/TEMPLATE/NIC/MODEL') - end vnet_ref = nic["VCENTER_NET_REF"] backing = nil @@ -1651,24 +1641,7 @@ class VirtualMachine < VCenterDriver::Template card_num += 1 if is_nic?(dv) end - nic_card = case model - when "virtuale1000", "e1000" - RbVmomi::VIM::VirtualE1000 - when "virtuale1000e", "e1000e" - RbVmomi::VIM::VirtualE1000e - when "virtualpcnet32", "pcnet32" - RbVmomi::VIM::VirtualPCNet32 - when "virtualsriovethernetcard", "sriovethernetcard" - RbVmomi::VIM::VirtualSriovEthernetCard - when "virtualvmxnetm", "vmxnetm" - RbVmomi::VIM::VirtualVmxnetm - when "virtualvmxnet2", "vmnet2" - RbVmomi::VIM::VirtualVmxnet2 - when "virtualvmxnet3", "vmxnet3" - RbVmomi::VIM::VirtualVmxnet3 - else # If none matches, use VirtualE1000 - RbVmomi::VIM::VirtualE1000 - end + nic_card = nic_model_class(model) if network.class == RbVmomi::VIM::Network backing = RbVmomi::VIM.VirtualEthernetCardNetworkBackingInfo( @@ -1714,17 +1687,13 @@ class VirtualMachine < VCenterDriver::Template end # Add NIC to VM - def attach_nic + def attach_nic(one_nic) spec_hash = {} - nic = nil - - # Extract nic from driver action - nic = one_item.retrieve_xmlelements("TEMPLATE/NIC[ATTACH='YES']").first begin # A new NIC requires a vcenter spec attach_nic_array = [] - attach_nic_array << calculate_add_nic_spec(nic) + attach_nic_array << calculate_add_nic_spec(one_nic) spec_hash[:deviceChange] = attach_nic_array if !attach_nic_array.empty? # Reconfigure VM @@ -1738,12 +1707,9 @@ class VirtualMachine < VCenterDriver::Template end # Detach NIC from VM - def detach_nic + def detach_nic(mac) spec_hash = {} - # Extract nic from driver action - one_nic = one_item.retrieve_xmlelements("TEMPLATE/NIC[ATTACH='YES']").first - mac = one_nic["MAC"] nic = nic(mac) rescue nil return if !nic || nic.no_exists? @@ -2452,7 +2418,8 @@ class VirtualMachine < VCenterDriver::Template begin # retrieve host from DRS - resourcepool = config[:cluster].resourcePool + one_cluster = config[:cluster] + resourcepool = one_cluster.item.resourcePool datastore = config[:datastore] if datastore @@ -2461,6 +2428,18 @@ class VirtualMachine < VCenterDriver::Template datastore: datastore, } + if config[:esx_migration_list].is_a?(String) + if config[:esx_migration_list]=="" + relocate_spec_params[:host] = config[:cluster].item.host.sample + elsif config[:esx_migration_list]!="Selected_by_DRS" + hostnames = config[:esx_migration_list].split(' ') + hostname = hostnames.sample + host_moref = one_cluster.hostname_to_moref(hostname) + relocate_spec_params[:host] = host_moref + end + end + + relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec(relocate_spec_params) @item.RelocateVM_Task(spec: relocate_spec, priority: "defaultPriority").wait_for_completion else @@ -2914,7 +2893,13 @@ class VirtualMachine < VCenterDriver::Template return one_vm end - def self.migrate_routine(vm_id, src_host, dst_host, ds = nil) + # Migrate a VM to another cluster and/or datastore + # @params [int] vm_id ID of the VM to be migrated + # params [String] src_host Name of the source cluster + # params [String] dst_host Name of the target cluster + # params [Bool] hot_ds Wether this is a DS migration with the VM running or not + # params [int] ds Destination datastore ID + def self.migrate_routine(vm_id, src_host, dst_host, hot_ds = false, ds = nil) one_client = OpenNebula::Client.new pool = OpenNebula::HostPool.new(one_client) pool.info @@ -2939,14 +2924,29 @@ class VirtualMachine < VCenterDriver::Template vm.info dst_host.info + esx_migration_list = dst_host['/HOST/TEMPLATE/ESX_MIGRATION_LIST'] + # required vcenter objects vc_vm = VCenterDriver::VirtualMachine.new_without_id(vi_client, vm['/VM/DEPLOY_ID']) + + vc_vm.vm_id = vm_id + error = !vc_vm.disks_each(:managed?).empty? && !ds.nil? + # We know this comes from a migration from poweroff state (not a poweroff migration) + # since all the other cases are treated in vmm drivers: save, migrate and shutdown + raise 'datastore migration from poweroff state with managed disks is not supported' if error + ccr_ref = dst_host['/HOST/TEMPLATE/VCENTER_CCR_REF'] - vc_host = VCenterDriver::ClusterComputeResource.new_from_ref(ccr_ref, vi_client).item + vc_host = VCenterDriver::ClusterComputeResource.new_from_ref(ccr_ref, vi_client) config = { :cluster => vc_host } config[:datastore] = datastore if datastore + if hot_ds + config[:esx_migration_list] = esx_migration_list if esx_migration_list + else + config[:esx_migration_list] = "Selected_by_DRS" + end + vc_vm.migrate(config) vm.replace({ 'VCENTER_CCR_REF' => ccr_ref}) @@ -3016,7 +3016,9 @@ class VirtualMachine < VCenterDriver::Template id = one_item["ID"] || one_item["VM/ID"] rescue -1 self.new(vi_client, ref, id).tap do |vm| - vm.one_item = one_item + if one_item.instance_of?(OpenNebula::VirtualMachine) + vm.one_item = one_item + end end end diff --git a/src/vmm_mad/remotes/lib/vcenter_driver/vm_template.rb b/src/vmm_mad/remotes/lib/vcenter_driver/vm_template.rb index a0dc9c31d8..160493d8c9 100644 --- a/src/vmm_mad/remotes/lib/vcenter_driver/vm_template.rb +++ b/src/vmm_mad/remotes/lib/vcenter_driver/vm_template.rb @@ -110,7 +110,7 @@ class Template :name => template_name, :spec => clone_spec).wait_for_completion template_ref = template._ref - rescue Exception => e + rescue StandardError => e if !e.message.start_with?('DuplicateName') error = "Could not create the template clone. Reason: #{e.message}" return error, nil @@ -159,7 +159,7 @@ class Template if self['config.template'] @item.MarkAsVirtualMachine(:pool => get_rp, :host => self['runtime.host']) end - rescue Exception => e + rescue StandardError => e @item.MarkAsTemplate() error = "Cannot mark the template as a VirtualMachine. Not using linked clones. Reason: #{e.message}/#{e.backtrace}" use_linked_clones = false @@ -186,7 +186,7 @@ class Template end @item.ReconfigVM_Task(:spec => spec).wait_for_completion if !spec[:deviceChange].empty? - rescue Exception => e + rescue StandardError => e error = "Cannot create the delta disks on top of the template. Reason: #{e.message}." use_linked_clones = false return error, use_linked_clones @@ -299,7 +299,7 @@ class Template end end - rescue Exception => e + rescue StandardError => e error = "\n There was an error trying to create an image for disk in vcenter template. Reason: #{e.message}\n#{e.backtrace}" ensure unlock @@ -529,7 +529,7 @@ class Template npool.info_all end end - rescue Exception => e + rescue StandardError => e error = "\n There was an error trying to create a virtual network to repesent a vCenter network for a VM or VM Template. Reason: #{e.message}" ensure unlock @@ -805,7 +805,6 @@ class Template end def vm_to_one(vm_name) - str = "NAME = \"#{vm_name}\"\n"\ "CPU = \"#{@vm_info["config.hardware.numCPU"]}\"\n"\ "vCPU = \"#{@vm_info["config.hardware.numCPU"]}\"\n"\ @@ -826,8 +825,9 @@ class Template if !@vm_info["datastore"].nil? !@vm_info["datastore"].last.nil? && !@vm_info["datastore"].last._ref.nil? - str << "VCENTER_DS_REF = \"#{@vm_info["datastore"].last._ref}\"\n" - end + ds_ref = vm_template_ds_ref + str << "VCENTER_DS_REF = \"#{ds_ref}\"\n" + end vnc_port = nil keymap = VCenterDriver::VIHelper.get_default("VM/TEMPLATE/GRAPHICS/KEYMAP") @@ -879,6 +879,40 @@ class Template return str end + #Gets MOREF from Datastore used by the VM. It validates + #the selected DS is not only used to host swap. + def vm_template_ds_ref + begin + ds_ref = nil + if @vm_info["datastore"].length > 1 + swap_path = "" + @vm_info["config.extraConfig"].each do |element| + if element.key == "sched.swap.derivedName" + swap_path = element.value + end + end + @vm_info["datastore"].each do |datastore| + path = datastore.summary.url.sub(/ds:\/\/\/*/, "") + if !swap_path.include? path && !datastore._ref.nil? + ds_ref = datastore._ref + break + end + end + elsif @vm_info["datastore"].length == 1 + if !@vm_info["datastore"].first._ref.nil? + ds_ref = @vm_info["datastore"].first._ref + end + end + + return ds_ref + rescue StandardError => e + error = "Could not find DATASTORE for this VM. Reason: #{e.message}" + + return error + end + end + + def self.template_to_one(template, vc_uuid, ccr_ref, ccr_name, import_name, host_id) num_cpu, memory, annotation, guest_fullname = template.item.collect("config.hardware.numCPU","config.hardware.memoryMB","config.annotation","guest.guestFullName") @@ -1021,7 +1055,7 @@ class Template # Get the OpenNebula's template hash one_tmp[:one] = template_to_one(template, vcenter_uuid, template_ccr_ref, template_ccr_name, import_name, host_id) return one_tmp - rescue Exception => e + rescue StandardError => e return nil end end diff --git a/src/vmm_mad/remotes/lxd/lxdrc b/src/vmm_mad/remotes/lxd/lxdrc index eb0ea19d34..2c54353d97 100644 --- a/src/vmm_mad/remotes/lxd/lxdrc +++ b/src/vmm_mad/remotes/lxd/lxdrc @@ -24,7 +24,7 @@ # - :height: of the terminal # - :timeout: seconds to close the terminal if no input has been received :vnc: - :command: /bin/bash + :command: /bin/login :width: 800 :height: 600 :timeout: 300 diff --git a/src/vmm_mad/remotes/lxd/poll b/src/vmm_mad/remotes/lxd/poll index 1ef4408b5b..e51202a439 100755 --- a/src/vmm_mad/remotes/lxd/poll +++ b/src/vmm_mad/remotes/lxd/poll @@ -130,9 +130,16 @@ module LXD state end + def lxc_path(vm_name) + path = 'lxc/' + vm_name + path = "#{ENV['LXC_CGROUP_PREFIX']}#{path}" if ENV['LXC_CGROUP_PREFIX'] + end + def get_memory(vm_name) - stat = File.read('/sys/fs/cgroup/memory/lxc/' + vm_name + '/memory.usage_in_bytes').to_i + stat = File.read('/sys/fs/cgroup/memory/' + lxc_path(vm_name) + '/memory.usage_in_bytes').to_i stat / 1024 + rescue StandardError + 0 end def get_net_statistics(vmd) @@ -167,7 +174,7 @@ module LXD cpu_jiffies = get_cpu_jiffies - start_cpu_jiffies vm_names.each do |vm_name| - cpu_used[vm_name] = (get_process_jiffies(vm_name).to_f - + cpu_used[vm_name] = (get_process_jiffies(vm_name).to_f - cpu_used[vm_name]) / cpu_jiffies cpu_used[vm_name] = (cpu_used[vm_name] * multiplier).round(2) @@ -196,7 +203,8 @@ module LXD def get_process_jiffies(vm_name) begin jiffies = 0 - stat = File.read('/sys/fs/cgroup/cpu,cpuacct/lxc/' + vm_name + '/cpuacct.stat') + + stat = File.read('/sys/fs/cgroup/cpu,cpuacct/' + lxc_path(vm_name) + '/cpuacct.stat') stat.lines.each {|line| jiffies += line.split(' ')[1] } rescue StandardError return 0 @@ -222,20 +230,19 @@ module LXD arch = container.architecture capacity = container.expanded_config - cpu = "" - vcpu= "" - mem = "" + cpu = '' + vcpu = '' + mem = '' if capacity cpu = capacity['limits.cpu.allowance'] vcpu = capacity['limits.cpu'] - mem = capacity['limits.memory'] + mem = capacity['limits.memory'] end - cpu = "50%" if !cpu || cpu.empty? - vcpu = "1" if !vcpu || vcpu.empty? - mem = "512M" if !mem || mem.empty? - + cpu = '50%' if !cpu || cpu.empty? + vcpu = '1' if !vcpu || vcpu.empty? + mem = '512MB' if !mem || mem.empty? cpu = cpu.chomp('%').to_f / 100 mem = parse_memory(mem) diff --git a/src/vmm_mad/remotes/lxd/reconfigure b/src/vmm_mad/remotes/lxd/reconfigure index 9948d113ce..b3b83745a1 100755 --- a/src/vmm_mad/remotes/lxd/reconfigure +++ b/src/vmm_mad/remotes/lxd/reconfigure @@ -29,17 +29,17 @@ vm_name = ARGV[0] iso_path = ARGV[2] vm_id = ARGV[3] -xml = STDIN.read +exit 0 if iso_path == '' # ------------------------------------------------------------------------------ # Setup Context for the container # ------------------------------------------------------------------------------ -if iso_path != '' - client = LXDClient.new - container = Container.get(vm_name, xml, client) +xml = STDIN.read +client = LXDClient.new +container = Container.get(vm_name, xml, client) - raise 'Failed to attach context' unless container.attach_context +raise 'Failed to attach context' unless container.attach_context - rc, _out, err = container.exec('one-contextd local') - OpenNebula.log_error "Failed to run contextualization\n#{err}" unless rc.zero? -end +rc, _out, err = container.exec('service one-context-reconfigure restart') + +OpenNebula.log_error "Failed to run recontextualization service \n#{err}" unless rc.zero? diff --git a/src/vmm_mad/remotes/vcenter/attach_nic b/src/vmm_mad/remotes/vcenter/attach_nic index b34b28e203..c848861b02 100755 --- a/src/vmm_mad/remotes/vcenter/attach_nic +++ b/src/vmm_mad/remotes/vcenter/attach_nic @@ -47,7 +47,10 @@ begin vm = VCenterDriver::VirtualMachine.new_one(vi_client, vm_ref, one_item) - vm.attach_nic + # Extract nic from driver action + nic = one_item.retrieve_xmlelements("TEMPLATE/NIC[ATTACH='YES']").first + + vm.attach_nic(nic) rescue StandardError => e message = "Attach NIC for VM #{vm_ref} on vCenter cluster "\ "#{vc_cluster_name} failed due to \"#{e.message}\"\n" diff --git a/src/vmm_mad/remotes/vcenter/cancel b/src/vmm_mad/remotes/vcenter/cancel index 06e0134612..1595620cbb 100755 --- a/src/vmm_mad/remotes/vcenter/cancel +++ b/src/vmm_mad/remotes/vcenter/cancel @@ -36,6 +36,10 @@ vm_id = ARGV[-2] drv_action = OpenNebula::XMLElement.new drv_action.initialize_xml(Base64.decode64(STDIN.read), 'VMM_DRIVER_ACTION_DATA') +lcm_state = drv_action['/VMM_DRIVER_ACTION_DATA/VM/LCM_STATE'] +check_valid(lcm_state, 'lcm_state') +lcm_state_str = OpenNebula::VirtualMachine::LCM_STATE[lcm_state.to_i] + host = VCenterDriver::VIHelper.find_by_name(OpenNebula::HostPool, host) host_id = host['ID'] @@ -44,7 +48,15 @@ begin vm = VCenterDriver::VirtualMachine.new(vi_client, vm_ref, vm_id) - vm.one_item = drv_action.retrieve_xmlelements('VM').first + if (%{'SAVE_MIGRATE'}).include?(lcm_state_str) + dst_ds = drv_action['VM/HISTORY_RECORDS/HISTORY/DS_ID'] + src_ds = drv_action['DATASTORE/ID'] + + new_ds = dst_ds != src_ds + + error = !vm.disks_each(:managed?).empty? && new_ds + raise 'cold datastore migration in poweroff hard with managed disk(s) is not supported' if error + end vm.poweroff_hard rescue StandardError => e diff --git a/src/vmm_mad/remotes/vcenter/detach_nic b/src/vmm_mad/remotes/vcenter/detach_nic index fd225cc958..7ddf3becd2 100755 --- a/src/vmm_mad/remotes/vcenter/detach_nic +++ b/src/vmm_mad/remotes/vcenter/detach_nic @@ -46,7 +46,11 @@ begin one_item = drv_action.retrieve_xmlelements('VM').first vm = VCenterDriver::VirtualMachine.new_one(vi_client, vm_ref, one_item) - vm.detach_nic + # Extract nic from driver action + one_nic = one_item.retrieve_xmlelements("TEMPLATE/NIC[ATTACH='YES']").first + mac = one_nic["MAC"] + + vm.detach_nic(mac) rescue StandardError => e message = "Detach NIC for VM #{vm_ref} on vCenter cluster " \ "#{vc_cluster_name} failed due to \"#{e.message}\"." diff --git a/src/vmm_mad/remotes/vcenter/migrate b/src/vmm_mad/remotes/vcenter/migrate index 175ed1e22c..2c01e95421 100755 --- a/src/vmm_mad/remotes/vcenter/migrate +++ b/src/vmm_mad/remotes/vcenter/migrate @@ -19,9 +19,9 @@ ONE_LOCATION = ENV['ONE_LOCATION'] unless defined?(ONE_LOCATION) if !ONE_LOCATION - RUBY_LIB_LOCATION = '/usr/lib/one/ruby' unless defined?(RUBY_LIB_LOCATION) + RUBY_LIB_LOCATION ||= '/usr/lib/one/ruby' else - RUBY_LIB_LOCATION = ONE_LOCATION + '/lib/ruby' unless defined?(RUBY_LIB_LOCATION) + RUBY_LIB_LOCATION ||= ONE_LOCATION + '/lib/ruby' end $LOAD_PATH << RUBY_LIB_LOCATION @@ -29,21 +29,40 @@ $LOAD_PATH << File.dirname(__FILE__) require 'vcenter_driver' +vm_ref = ARGV[0] vm_id = ARGV[-2] src_host = ARGV[-3] dst_host = ARGV[-4] +drv_action = OpenNebula::XMLElement.new +drv_action.initialize_xml(Base64.decode64(STDIN.read), 'VMM_DRIVER_ACTION_DATA') + +dst_ds = drv_action['VM/HISTORY_RECORDS/HISTORY/DS_ID'] +src_ds = drv_action['DATASTORE/ID'] + begin - # TODO: grab destination ds - VCenterDriver::VirtualMachine.migrate_routine(vm_id, - src_host, - dst_host) + vi_client = VCenterDriver::VIClient.new_from_host(src_host) + vm = VCenterDriver::VirtualMachine.new(vi_client, vm_ref, vm_id) + new_ds = dst_ds != src_ds + + error = !vm.disks_each(:managed?).empty? && new_ds + raise 'live datastore migration with managed disk is not supported' if error + + if new_ds + VCenterDriver::VirtualMachine + .migrate_routine(vm_id, src_host, dst_host, true, dst_ds) + else + VCenterDriver::VirtualMachine.migrate_routine(vm_id, src_host, dst_host) + end rescue StandardError => e - message = "Cannot migrate for VM #{vm_id}"\ - 'failed due to '\ + message = "Cannot migrate for VM #{vm_id}. "\ + 'Failed due to '\ "\"#{e.message}\"." OpenNebula.log_error(message) - STDERR.puts "#{message} #{e.backtrace}" if VCenterDriver::CONFIG[:debug_information] + + if VCenterDriver::CONFIG[:debug_information] + STDERR.puts "#{message} #{e.backtrace}" + end exit(-1) end diff --git a/src/vmm_mad/remotes/vcenter/save b/src/vmm_mad/remotes/vcenter/save index c0ac3c6264..7bbb13712f 100755 --- a/src/vmm_mad/remotes/vcenter/save +++ b/src/vmm_mad/remotes/vcenter/save @@ -30,6 +30,7 @@ $LOAD_PATH << File.dirname(__FILE__) require 'vcenter_driver' vm_ref = ARGV[0] +vm_id = ARGV[3] vc_cluster_name = ARGV[-1] host = VCenterDriver::VIHelper.find_by_name(OpenNebula::HostPool, vc_cluster_name) @@ -45,7 +46,7 @@ check_valid(lcm_state, 'lcm_state') lcm_state_str = OpenNebula::VirtualMachine::LCM_STATE[lcm_state.to_i] if !(%{'SAVE_SUSPEND', 'SAVE_STOP', 'SAVE_MIGRATE'}).include?(lcm_state_str) - STDERR.puts "Wrong lcm state #{lcm_state_str} }when saving VM" + STDERR.puts "Wrong lcm state #{lcm_state_str} when saving VM" exit(-1) end @@ -54,6 +55,17 @@ begin vm = VCenterDriver::VirtualMachine.new_without_id(vi_client, vm_ref) + if (%{'SAVE_MIGRATE'}).include?(lcm_state_str) + vm.vm_id = vm_id + dst_ds = drv_action['VM/HISTORY_RECORDS/HISTORY/DS_ID'] + src_ds = drv_action['DATASTORE/ID'] + + new_ds = dst_ds != src_ds + + error = !vm.disks_each(:managed?).empty? && new_ds + raise 'cold datastore migration with managed disk(s) is not supported' if error + end + vm.suspend rescue StandardError => e diff --git a/src/vmm_mad/remotes/vcenter/shutdown b/src/vmm_mad/remotes/vcenter/shutdown index 80f347f777..810795f9dc 100755 --- a/src/vmm_mad/remotes/vcenter/shutdown +++ b/src/vmm_mad/remotes/vcenter/shutdown @@ -45,7 +45,7 @@ lcm_state = drv_action['/VMM_DRIVER_ACTION_DATA/VM/LCM_STATE'] check_valid(lcm_state, 'lcm_state') lcm_state_str = OpenNebula::VirtualMachine::LCM_STATE[lcm_state.to_i] -if !(%{'SHUTDOWN', 'SHUTDOWN_POWEROFF', 'SHUTDOWN_UNDEPLOY'}).include?(lcm_state_str) +if !(%{'SAVE_MIGRATE', 'SHUTDOWN', 'SHUTDOWN_POWEROFF', 'SHUTDOWN_UNDEPLOY'}).include?(lcm_state_str) STDERR.puts "Wrong lcm state #{lcm_state_str} when shutting down VM" exit(-1) end @@ -62,6 +62,19 @@ begin vm = VCenterDriver::VirtualMachine.new_without_id(vi_client, vm_ref) + + if (%{'SAVE_MIGRATE'}).include?(lcm_state_str) + vm.vm_id = vm_id + dst_ds = drv_action['VM/HISTORY_RECORDS/HISTORY/DS_ID'] + src_ds = drv_action['DATASTORE/ID'] + + new_ds = dst_ds != src_ds + + error = !vm.disks_each(:managed?).empty? && new_ds + raise 'cold datastore migration in poweroff with managed disk(s) is not supported' if error + end + + vm.shutdown # Undeploy, Poweroff or Terminate rescue StandardError => e message = "Shutdown of VM #{vm_ref} on vCenter cluster "\ diff --git a/src/vmm_mad/remotes/vcenter/vcenterrc b/src/vmm_mad/remotes/vcenter/vcenterrc index 1b5469bd81..ab04548245 100644 --- a/src/vmm_mad/remotes/vcenter/vcenterrc +++ b/src/vmm_mad/remotes/vcenter/vcenterrc @@ -14,10 +14,10 @@ # limitations under the License. # #--------------------------------------------------------------------------- # -# No : VCENTER_IMPORTED attribute will be set on imported images +# no : VCENTER_IMPORTED attribute will be set on imported images # this attribute prevents the image to be deleted. -# Yes : You can delete the images using OpenNebula. -:delete_images: No +# yes : You can delete the images using OpenNebula. +:delete_images: no # Default timeout to complete deploy :vm_poweron_wait_default: 300 diff --git a/src/vnm/AddressRangeIPAM.cc b/src/vnm/AddressRangeIPAM.cc index 34a404eee9..a5b95c7c17 100644 --- a/src/vnm/AddressRangeIPAM.cc +++ b/src/vnm/AddressRangeIPAM.cc @@ -26,13 +26,13 @@ int AddressRangeIPAM::from_vattr(VectorAttribute * attr, std::string& error_msg) { + std::ostringstream oss; + IPAMManager * ipamm = Nebula::instance().get_ipamm(); - std::string * ar_xml = attr->to_xml(); + attr->to_xml(oss); - IPAMRequest ir(*ar_xml); - - free(ar_xml); + IPAMRequest ir(oss.str()); ipamm->trigger(IPMAction::REGISTER_ADDRESS_RANGE, &ir); diff --git a/src/vnm_mad/remotes/lib/nic.rb b/src/vnm_mad/remotes/lib/nic.rb index e15e8c03a7..edc9e84f64 100644 --- a/src/vnm_mad/remotes/lib/nic.rb +++ b/src/vnm_mad/remotes/lib/nic.rb @@ -110,7 +110,12 @@ module VNMMAD end if deploy_id && vm.vm_info[:dumpxml].nil? - vm.vm_info[:dumpxml] = YAML.safe_load(`lxc config show #{deploy_id} 2>/dev/null`) + cmd = "lxc config show #{deploy_id} 2>/dev/null" + + config = YAML.safe_load(`#{cmd}`) + config = YAML.safe_load(`sudo #{cmd}`) if config.nil? + + vm.vm_info[:dumpxml] = config vm.vm_info.each_key do |k| vm.vm_info[k] = nil if vm.vm_info[k].to_s.strip.empty? diff --git a/src/vnm_mad/remotes/ovswitch/OpenvSwitch.rb b/src/vnm_mad/remotes/ovswitch/OpenvSwitch.rb index 33ed662ff3..1bd32f253e 100644 --- a/src/vnm_mad/remotes/ovswitch/OpenvSwitch.rb +++ b/src/vnm_mad/remotes/ovswitch/OpenvSwitch.rb @@ -391,7 +391,7 @@ private def create_bridge return if @bridges.keys.include? @nic[:bridge] - OpenNebula.exec_and_log("#{command(:ovs_vsctl)} add-br #{@nic[:bridge]}") + OpenNebula.exec_and_log("#{command(:ovs_vsctl)} --may-exist add-br #{@nic[:bridge]}") set_bridge_options diff --git a/src/vnm_mad/remotes/vxlan/vxlan.rb b/src/vnm_mad/remotes/vxlan/vxlan.rb index a74f026d6c..1c67d467e4 100644 --- a/src/vnm_mad/remotes/vxlan/vxlan.rb +++ b/src/vnm_mad/remotes/vxlan/vxlan.rb @@ -65,9 +65,11 @@ module VXLAN ip_link_conf << "#{option} #{value} " end + # `ip link add ...` returns 2 when vxlan device already exists + # allow it to prevent race conditions OpenNebula.exec_and_log("#{command(:ip)} link add #{@nic[@attr_vlan_dev]}"\ " #{mtu} type vxlan id #{@nic[@attr_vlan_id]} #{group} #{ttl}"\ - " #{tep} #{ip_link_conf}") + " #{tep} #{ip_link_conf}", nil, 2) OpenNebula.exec_and_log("#{command(:ip)} link set #{@nic[@attr_vlan_dev]} up") end