1
0
mirror of https://github.com/OpenNebula/one.git synced 2025-01-03 01:17:41 +03:00

Merge branch 'master' into provision_refactor

This commit is contained in:
Tino Vazquez 2019-02-08 17:42:06 +01:00
commit d998145ea1
No known key found for this signature in database
GPG Key ID: 2FE9C32E94AEABBE
199 changed files with 5614 additions and 1674 deletions

2
.gitignore vendored
View File

@ -28,7 +28,7 @@ src/vmm_mad/remotes/lxd/tests/
src/oca/python/pyone/bindings
src/oca/python/build/
src/oca/python/dist/
src/oca/python/opennebula.egg-info/
src/oca/python/pyone.egg-info/
src/oca/python/doc/
src/docker_machine/pkg

27
.travis.yml Normal file
View File

@ -0,0 +1,27 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
dist: xenial
language: generic
before_install:
- sudo apt-get install -y libsystemd-dev bash-completion bison debhelper default-jdk flex javahelper libmysql++-dev libsqlite3-dev libssl-dev libws-commons-util-java libxml2-dev libxmlrpc3-client-java libxmlrpc3-common-java libxslt1-dev libcurl4-openssl-dev ruby scons libxmlrpc-c++8-dev npm libvncserver-dev
- gem install rubocop
- sudo npm install -g bower
- sudo npm install -g grunt
- sudo npm install -g grunt-cli
- (cd src/sunstone/public && npm install && bower install)
script:
- set -o errexit; source .travis/smoke_tests.sh

10
.travis/README.md Normal file
View File

@ -0,0 +1,10 @@
## Travis smoke tests
The `.travis/tests` directory contains scripts for each smoke test.
The smoke_test.sh script is called which iterates on each script, and it exits and logs on any failure. To add more tests, simply create a new file on `.travis/tests`.
Each test should:
- have a number as prefix to define the order. Renaming is allowed, the rule is to execute the less costly tests (in time) first
- return 0 on success, other number on error

50
.travis/smoke_tests.sh Executable file
View File

@ -0,0 +1,50 @@
#!/bin/bash
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
#-------------------------------------------------------------------------------
# Smoke tests for OpenNebula, to be triggered by travis or manually
# It executes all scripts in 'tests' folder and expects 0 exit code
#-------------------------------------------------------------------------------
# default parameters values
LOG_FILE='smoke_tests.results'
check_test() {
local TEST=$1
echo "Executing test $TEST" >> ${LOG_FILE}
eval $TEST >> ${LOG_FILE} 2>&1
RC=$?
echo "RC for $TEST is $RC"
return $RC
}
for smoke_test in .travis/tests/*.sh; do
check_test "$smoke_test" || break
done
if [ $RC == 0 ]; then
echo "All tests OK!"
else
echo "Test failed: "$smoke_test
echo "Log follows:"
cat $LOG_FILE
fi
exit $RC

19
.travis/tests/01-rubocop.sh Executable file
View File

@ -0,0 +1,19 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
# lint ruby code
ln -s share/linters/.rubocop.yml . && rubocop

19
.travis/tests/02-scons.sh Executable file
View File

@ -0,0 +1,19 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
# check that OpenNebula compiles
scons sunstone=yes mysql=yes systemd=yes new_xmlrpc=yes

33
.travis/tests/03-oned.sh Executable file
View File

@ -0,0 +1,33 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
# install oned system wide
sudo ./install.sh -u travis
# Set credentials
mkdir $HOME/.one
echo "oneadmin:opennebula" > $HOME/.one/one_auth
# Install gems
/usr/share/one/install_gems --yes
# start oned
one start
# check it's up
timeout 60 sh -c 'until nc -z $0 $1; do sleep 1; done' localhost 2633

30
.travis/tests/04-sunstone.sh Executable file
View File

@ -0,0 +1,30 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
# start sunstone
sunstone-server start
# check it's up
RC=`timeout 60 sh -c 'until nc -z $0 $1; do sleep 1; done' localhost 9869`
echo "Sunstone log"
cat /var/log/one/sunstone.log
echo
echo "Sunstone error log"
cat /var/log/one/sunstone.error
echo "---------"
exit $RC

View File

@ -83,7 +83,11 @@ public:
* by the calling function.
* @return a string (allocated in the heap) holding the attribute value.
*/
virtual string * to_xml() const = 0;
virtual void to_xml(std::ostringstream& s) const = 0;
virtual void to_json(std::ostringstream& s) const = 0;
virtual void to_token(std::ostringstream& s) const = 0;
/**
* Builds a new attribute from a string.
@ -158,17 +162,31 @@ public:
*
* <attribute_name>attribute_value</attribute_name>
*
* The string MUST be freed by the calling function.
* @return a string (allocated in the heap) holding the attribute value.
* @paran s the stream to write the attribute.
*/
string * to_xml() const
void to_xml(std::ostringstream& s) const
{
string * xml = new string;
s << "<" << attribute_name << ">" << one_util::escape_xml(attribute_value)
<< "</"<< attribute_name << ">";
*xml = "<" + name() + ">" + one_util::escape_xml(attribute_value) +
"</"+ name() + ">";
}
return xml;
void to_json(std::ostringstream& s) const
{
one_util::escape_json(attribute_value, s);
}
void to_token(std::ostringstream& s) const
{
if (attribute_name.empty() || attribute_value.empty())
{
return;
}
one_util::escape_token(attribute_name, s);
s << "=";
one_util::escape_token(attribute_value, s);
s << std::endl;
}
/**
@ -350,12 +368,11 @@ public:
* The string MUST be freed by the calling function.
* @return a string (allocated in the heap) holding the attribute value.
*/
string * to_xml() const;
void to_xml(std::ostringstream& s) const;
/**
* Same as above but the attribute is written in an string stream;
*/
void to_xml(ostringstream &oss) const;
void to_json(std::ostringstream& s) const;
void to_token(std::ostringstream& s) const;
/**
* Builds a new attribute from a string of the form:

View File

@ -92,9 +92,19 @@ protected:
return va->marshall(_sep);
};
string * to_xml() const
void to_xml(std::ostringstream& s) const
{
return va->to_xml();
return va->to_xml(s);
};
void to_json(std::ostringstream& s) const
{
return va->to_json(s);
};
void to_token(std::ostringstream& s) const
{
return va->to_token(s);
};
void unmarshall(const std::string& sattr, const char * _sep = 0)

View File

@ -258,6 +258,10 @@ private:
*/
string& to_xml(string& xml, bool database) const;
string& to_json(string& json) const;
string& to_token(string& text) const;
/**
* Rebuilds the object from an xml node
* @param node The xml node pointer

View File

@ -310,7 +310,9 @@ private:
// -------------------------------------------------------------------------
// Internal Actions, triggered by OpenNebula components & drivers
// -------------------------------------------------------------------------
void start_prolog_migrate(VirtualMachine* vm, int vid);
void start_prolog_migrate(VirtualMachine* vm);
void revert_migrate_after_failure(VirtualMachine* vm);
void save_success_action(int vid);
void save_failure_action(int vid);

View File

@ -251,6 +251,11 @@ public:
int next_federated(int index);
bool fts_available()
{
return db->fts_available();
}
protected:
int exec(std::ostringstream& cmd, Callbackable* obj, bool quiet)
{
@ -416,6 +421,11 @@ public:
return _logdb->limit_support();
}
bool fts_available()
{
return _logdb->fts_available();
}
protected:
int exec(std::ostringstream& cmd, Callbackable* obj, bool quiet)
{

View File

@ -84,6 +84,11 @@ public:
*/
bool limit_support();
/**
* Return true if the backend allows FTS index
*/
bool fts_available();
protected:
/**
* Wraps the mysql_query function call
@ -171,6 +176,8 @@ public:
bool limit_support(){return true;};
bool fts_available(){return false;};
protected:
int exec(ostringstream& cmd, Callbackable* obj, bool quiet){return -1;};
};

View File

@ -362,7 +362,7 @@ public:
*/
static string code_version()
{
return "5.7.85"; // bump version
return "5.7.90"; // bump version
}
/**
@ -371,7 +371,7 @@ public:
*/
static string shared_db_version()
{
return "5.7.80";
return "5.6.0";
}
/**
@ -578,6 +578,15 @@ public:
return nebula_configuration->to_xml(xml);
};
/**
* Gets the database backend type
* @return database backend type
*/
string get_db_backend() const
{
return db_backend_type;
}
// -----------------------------------------------------------------------
// Default Quotas
// -----------------------------------------------------------------------
@ -695,7 +704,7 @@ private:
"/DEFAULT_GROUP_QUOTAS/NETWORK_QUOTA",
"/DEFAULT_GROUP_QUOTAS/IMAGE_QUOTA",
"/DEFAULT_GROUP_QUOTAS/VM_QUOTA"),
system_db(0), logdb(0), fed_logdb(0),
system_db(0), db_backend_type("sqlite"), logdb(0), fed_logdb(0),
vmpool(0), hpool(0), vnpool(0), upool(0), ipool(0), gpool(0), tpool(0),
dspool(0), clpool(0), docpool(0), zonepool(0), secgrouppool(0),
vdcpool(0), vrouterpool(0), marketpool(0), apppool(0), vmgrouppool(0),
@ -819,6 +828,7 @@ private:
// ---------------------------------------------------------------
SystemDB * system_db;
string db_backend_type;
// ---------------------------------------------------------------
// Nebula Pools

View File

@ -208,6 +208,11 @@ namespace one_util
{
return escape(v, "'", "'");
}
void escape_json(const std::string& str, std::ostringstream& s);
void escape_token(const std::string& str, std::ostringstream& s);
/**
* Checks if a strings matches a regular expression
*
@ -236,7 +241,8 @@ namespace one_util
const std::string& replacement);
template <class T>
std::set<T> set_intersection(const std::set<T> &first, const std::set<T> &second)
std::set<T> set_intersection(const std::set<T> &first, const std::set<T>
&second)
{
std::set<T> output;
@ -247,48 +253,48 @@ namespace one_util
return output;
}
/**
/**
* Compress the input string unsing zlib
* @param in input string
* @param bool64 true to base64 encode output
* @return pointer to the compressed sting (must be freed) or 0 in case
* of error
*/
std::string * zlib_compress(const std::string& in, bool base64);
std::string * zlib_compress(const std::string& in, bool base64);
/**
/**
* Decompress the input string unsing zlib
* @param in input string
* @param base64 true if the input is base64 encoded
* @return pointer to the decompressed sting (must be freed) or 0 in case
* of error
*/
std::string * zlib_decompress(const std::string& in, bool base64);
std::string * zlib_decompress(const std::string& in, bool base64);
extern "C" void sslmutex_lock_callback(int mode, int type, char *file,
int line);
extern "C" void sslmutex_lock_callback(int mode, int type, char *file,
int line);
extern "C" unsigned long sslmutex_id_callback();
extern "C" unsigned long sslmutex_id_callback();
class SSLMutex
{
public:
static void initialize();
class SSLMutex
{
public:
static void initialize();
static void finalize();
static void finalize();
private:
friend void sslmutex_lock_callback(int mode, int type, char *file,
int line);
private:
friend void sslmutex_lock_callback(int mode, int type, char *file,
int line);
SSLMutex();
SSLMutex();
~SSLMutex();
~SSLMutex();
static SSLMutex * ssl_mutex;
static SSLMutex * ssl_mutex;
static std::vector<pthread_mutex_t *> vmutex;
};
static std::vector<pthread_mutex_t *> vmutex;
};
};
#endif /* _NEBULA_UTIL_H_ */

View File

@ -251,6 +251,35 @@ public:
static void oid_filter(int start_id,
int end_id,
string& filter);
/**
* This function returns a legal SQL string that can be used in an SQL
* statement. The string is encoded to an escaped SQL string, taking into
* account the current character set of the connection.
* @param str the string to be escaped
* @return a valid SQL string or NULL in case of failure
*/
char * escape_str(const string& str)
{
return db->escape_str(str);
}
/**
* Frees a previously scaped string
* @param str pointer to the str
*/
void free_str(char * str)
{
db->free_str(str);
}
/**
* Return true if FTS is available.
*/
bool is_fts_available()
{
return db->fts_available();
}
protected:
/**

View File

@ -103,7 +103,7 @@ class VirtualNetworkTemplateChmod : public RequestManagerChmod
public:
VirtualNetworkTemplateChmod():
RequestManagerChmod("one.vntemplate.chmod", "Changes permission bits of a "
"virtual network template", "A:siiiiiiiiiib")
"virtual network template")
{
Nebula& nd = Nebula::instance();
pool = nd.get_vntpool();

View File

@ -132,7 +132,7 @@ class VNTemplateClone : public RequestManagerClone
public:
VNTemplateClone():
RequestManagerClone("one.vntemplate.clone",
"Clone a virtual network template", "A:sisb")
"Clone a virtual network template", "A:sis")
{
Nebula& nd = Nebula::instance();
pool = nd.get_vntpool();
@ -144,9 +144,9 @@ public:
~VNTemplateClone(){};
ErrorCode request_execute(int source_id, const string &name, int &new_id,
bool recursive, const string& s_uattrs, RequestAttributes& att)
const string& s_uattrs, RequestAttributes& att)
{
return clone(source_id, name, new_id, recursive, s_uattrs, att);
return clone(source_id, name, new_id, false, s_uattrs, att);
};
protected:

View File

@ -121,7 +121,7 @@ class VirtualNetworkTemplateDelete : public RequestManagerDelete
public:
VirtualNetworkTemplateDelete():
RequestManagerDelete("one.vntemplate.delete",
"A:sib",
"A:si",
"Deletes a virtual network template")
{
Nebula& nd = Nebula::instance();
@ -133,7 +133,7 @@ public:
ErrorCode request_execute(int oid, bool recursive, RequestAttributes& att)
{
return delete_object(oid, recursive, att, auth_op);
return delete_object(oid, false, att, auth_op);
}
};

View File

@ -113,7 +113,7 @@ public:
VirtualMachinePoolInfo():
RequestManagerPoolInfoFilter("one.vmpool.info",
"Returns the virtual machine instances pool",
"A:siiii")
"A:siiiis")
{
Nebula& nd = Nebula::instance();
pool = nd.get_vmpool();

View File

@ -96,6 +96,11 @@ public:
*/
virtual bool limit_support() = 0;
/**
* Return true if the backend allows FTS index
*/
virtual bool fts_available() = 0;
protected:
/**
* Performs a DB transaction

View File

@ -79,6 +79,10 @@ public:
*/
bool limit_support();
bool fts_available()
{
return false;
}
protected:
/**
* Wraps the sqlite3_exec function call, and locks the DB mutex.

View File

@ -170,6 +170,10 @@ public:
*/
string& to_xml(string& xml) const;
string& to_json(string& xml) const;
string& to_token(string& xml) const;
/**
* Writes the template in a plain text string
* @param str string that hold the template representation

View File

@ -984,6 +984,14 @@ public:
// ------------------------------------------------------------------------
// Timers & Requirements
// ------------------------------------------------------------------------
/**
* @return time when the VM was created (in epoch)
*/
time_t get_stime() const
{
return stime;
};
/**
* Gets time from last information polling.
* @return time of last poll (epoch) or 0 if never polled
@ -1274,8 +1282,7 @@ public:
* @param uid for template owner
* @param ar the AuthRequest object
* @param tmpl the virtual machine template
* @param
* lock for check if the resource is lock or not
* @param check_lock for check if the resource is lock or not
*/
static void set_auth_request(int uid, AuthRequest& ar,
VirtualMachineTemplate *tmpl, bool check_lock);
@ -1884,6 +1891,10 @@ private:
*/
string& to_xml_extended(string& xml, int n_history) const;
string& to_json(string& json) const;
string& to_token(string& text) const;
// -------------------------------------------------------------------------
// Attribute Parser
// -------------------------------------------------------------------------

View File

@ -188,6 +188,21 @@ public:
return vmd->is_keep_snapshots();
}
/**
* Returns a pointer to a Virtual Machine Manager driver. The driver is
* searched by its name.
* @param name the name of the driver
* @return the VM driver owned by uid with attribute name equal to value
* or 0 in not found
*/
const VirtualMachineManagerDriver * get(
const string& name)
{
string _name("NAME");
return static_cast<const VirtualMachineManagerDriver *>
(MadManager::get(0,_name,name));
};
private:
/**
* Thread id for the Virtual Machine Manager
@ -256,21 +271,6 @@ private:
(MadManager::get(0,name,value));
};
/**
* Returns a pointer to a Virtual Machine Manager driver. The driver is
* searched by its name.
* @param name the name of the driver
* @return the VM driver owned by uid with attribute name equal to value
* or 0 in not found
*/
const VirtualMachineManagerDriver * get(
const string& name)
{
string _name("NAME");
return static_cast<const VirtualMachineManagerDriver *>
(MadManager::get(0,_name,name));
};
// -------------------------------------------------------------------------
// Action Listener interface
// -------------------------------------------------------------------------

View File

@ -103,6 +103,14 @@ public:
return keep_snapshots;
}
/**
* @return true if datastore live migration
*/
bool is_ds_live_migration() const
{
return ds_live_migration;
}
protected:
/**
* Gets a configuration attr from driver configuration file (single
@ -159,6 +167,11 @@ private:
*/
bool keep_snapshots;
/**
* Set to true if live migration between datastores is allowed.
*/
bool ds_live_migration;
/**
* Pointer to the Virtual Machine Pool, to access VMs
*/

View File

@ -219,6 +219,7 @@ fi
SHARE_DIRS="$SHARE_LOCATION/examples \
$SHARE_LOCATION/websockify \
$SHARE_LOCATION/websockify/websockify \
$SHARE_LOCATION/esx-fw-vnc \
$SHARE_LOCATION/oneprovision"
@ -253,6 +254,7 @@ LIB_DIRS="$LIB_LOCATION/ruby \
VAR_DIRS="$VAR_LOCATION/remotes \
$VAR_LOCATION/remotes/etc \
$VAR_LOCATION/remotes/etc/tm/fs_lvm \
$VAR_LOCATION/remotes/etc/datastore/ceph \
$VAR_LOCATION/remotes/etc/im/kvm-probes.d \
$VAR_LOCATION/remotes/etc/im/lxd-probes.d \
@ -445,6 +447,7 @@ INSTALL_FILES=(
TM_FILES:$VAR_LOCATION/remotes/tm
TM_SHARED_FILES:$VAR_LOCATION/remotes/tm/shared
TM_FS_LVM_FILES:$VAR_LOCATION/remotes/tm/fs_lvm
TM_FS_LVM_ETC_FILES:$VAR_LOCATION/remotes/etc/tm/fs_lvm/fs_lvm.conf
TM_QCOW2_FILES:$VAR_LOCATION/remotes/tm/qcow2
TM_SSH_FILES:$VAR_LOCATION/remotes/tm/ssh
TM_CEPH_FILES:$VAR_LOCATION/remotes/tm/ceph
@ -477,7 +480,8 @@ INSTALL_FILES=(
NETWORK_OVSWITCH_VXLAN_FILES:$VAR_LOCATION/remotes/vnm/ovswitch_vxlan
NETWORK_VCENTER_FILES:$VAR_LOCATION/remotes/vnm/vcenter
EXAMPLE_SHARE_FILES:$SHARE_LOCATION/examples
WEBSOCKIFY_SHARE_FILES:$SHARE_LOCATION/websockify
WEBSOCKIFY_SHARE_RUN_FILES:$SHARE_LOCATION/websockify
WEBSOCKIFY_SHARE_MODULE_FILES:$SHARE_LOCATION/websockify/websockify
ESX_FW_VNC_SHARE_FILES:$SHARE_LOCATION/esx-fw-vnc
INSTALL_GEMS_SHARE_FILES:$SHARE_LOCATION
ONETOKEN_SHARE_FILE:$SHARE_LOCATION
@ -999,6 +1003,7 @@ IM_PROBES_LXD_PROBES_FILES="src/im_mad/remotes/lxd-probes.d/lxd.rb \
src/im_mad/remotes/lxd-probes.d/pci.rb \
src/im_mad/remotes/lxd-probes.d/monitor_ds.sh \
src/im_mad/remotes/lxd-probes.d/version.sh \
src/im_mad/remotes/lxd-probes.d/profiles.sh \
src/im_mad/remotes/lxd-probes.d/collectd-client-shepherd.sh"
IM_PROBES_LXD_FILES="src/im_mad/remotes/lxd.d/collectd-client_control.sh \
@ -1171,6 +1176,9 @@ TM_FS_LVM_FILES="src/tm_mad/fs_lvm/activate \
src/tm_mad/fs_lvm/delete \
src/tm_mad/fs_lvm/resize"
TM_FS_LVM_ETC_FILES="src/tm_mad/fs_lvm/fs_lvm.conf"
TM_QCOW2_FILES="src/tm_mad/qcow2/clone \
src/tm_mad/qcow2/delete \
src/tm_mad/qcow2/ln \
@ -1457,8 +1465,7 @@ ONEDB_SHARED_MIGRATOR_FILES="src/onedb/shared/2.0_to_2.9.80.rb \
src/onedb/shared/5.3.80_to_5.4.0.rb \
src/onedb/shared/5.4.0_to_5.4.1.rb \
src/onedb/shared/5.4.1_to_5.5.80.rb \
src/onedb/shared/5.5.80_to_5.6.0.rb \
src/onedb/shared/5.6.0_to_5.7.80.rb"
src/onedb/shared/5.5.80_to_5.6.0.rb"
ONEDB_LOCAL_MIGRATOR_FILES="src/onedb/local/4.5.80_to_4.7.80.rb \
src/onedb/local/4.7.80_to_4.9.80.rb \
@ -1531,9 +1538,12 @@ EXAMPLE_SHARE_FILES="share/examples/vm.template \
# Files required to interact with the websockify server
#-------------------------------------------------------------------------------
WEBSOCKIFY_SHARE_FILES="share/websockify/websocketproxy.py \
share/websockify/websocket.py \
share/websockify/websockify"
WEBSOCKIFY_SHARE_RUN_FILES="share/websockify/run"
WEBSOCKIFY_SHARE_MODULE_FILES="share/websockify/websockify/__init__.py \
share/websockify/websockify/auth_plugins.py \
share/websockify/websockify/token_plugins.py \
share/websockify/websockify/websocket.py \
share/websockify/websockify/websocketproxy.py"
#-------------------------------------------------------------------------------
# Installation packages for ESX hosts to enable VNC ports

View File

@ -27,15 +27,10 @@
</xs:element>
<xs:element name="LAST_POLL" type="xs:integer"/>
<!-- STATE values,
see http://docs.opennebula.org/stable/user/references/vm_states.html
<!-- STATE and LCM_STATE values,
see http://docs.opennebula.org/5.6/operation/references/vm_states.html#list-of-states
-->
<xs:element name="STATE" type="xs:integer"/>
<!-- LCM_STATE values, this sub-state is relevant only when STATE is
ACTIVE (4)
see http://docs.opennebula.org/stable/user/references/vm_states.html
-->
<xs:element name="LCM_STATE" type="xs:integer"/>
<xs:element name="PREV_STATE" type="xs:integer"/>
<xs:element name="PREV_LCM_STATE" type="xs:integer"/>

View File

@ -566,7 +566,7 @@ VM_MAD = [
ARGUMENTS = "-t 15 -r 0 kvm",
DEFAULT = "vmm_exec/vmm_exec_kvm.conf",
TYPE = "kvm",
KEEP_SNAPSHOTS = "no",
KEEP_SNAPSHOTS = "yes",
IMPORTED_VMS_ACTIONS = "terminate, terminate-hard, hold, release, suspend,
resume, delete, reboot, reboot-hard, resched, unresched, disk-attach,
disk-detach, nic-attach, nic-detach, snapshot-create, snapshot-delete"
@ -611,13 +611,14 @@ VM_MAD = [
# -w Timeout in seconds to execute external commands (default unlimited)
#-------------------------------------------------------------------------------
VM_MAD = [
NAME = "vcenter",
SUNSTONE_NAME = "VMWare vCenter",
EXECUTABLE = "one_vmm_sh",
ARGUMENTS = "-p -t 15 -r 0 vcenter -s sh",
DEFAULT = "vmm_exec/vmm_exec_vcenter.conf",
TYPE = "xml",
KEEP_SNAPSHOTS = "yes",
NAME = "vcenter",
SUNSTONE_NAME = "VMWare vCenter",
EXECUTABLE = "one_vmm_sh",
ARGUMENTS = "-p -t 15 -r 0 vcenter -s sh",
DEFAULT = "vmm_exec/vmm_exec_vcenter.conf",
TYPE = "xml",
KEEP_SNAPSHOTS = "yes",
DS_LIVE_MIGRATION = "yes",
IMPORTED_VMS_ACTIONS = "terminate, terminate-hard, hold, release, suspend,
resume, delete, reboot, reboot-hard, resched, unresched, poweroff,
poweroff-hard, disk-attach, disk-detach, nic-attach, nic-detach,
@ -1307,7 +1308,7 @@ TM_MAD_CONF = [
TM_MAD_CONF = [
NAME = "ceph", LN_TARGET = "NONE", CLONE_TARGET = "SELF", SHARED = "YES",
DS_MIGRATE = "NO", DRIVER = "raw", ALLOW_ORPHANS="mixed",
TM_MAD_SYSTEM = "ssh", LN_TARGET_SSH = "SYSTEM", CLONE_TARGET_SSH = "SYSTEM",
TM_MAD_SYSTEM = "ssh,shared", LN_TARGET_SSH = "SYSTEM", CLONE_TARGET_SSH = "SYSTEM",
DISK_TYPE_SSH = "FILE", TM_MAD_SYSTEM = "shared", LN_TARGET_SHARED = "NONE",
CLONE_TARGET_SHARED = "SELF", DISK_TYPE_SHARED = "RBD"
]

View File

@ -1,23 +1,23 @@
GEM
remote: https://rubygems.org/
specs:
activesupport (4.2.10)
activesupport (4.2.11)
i18n (~> 0.7)
minitest (~> 5.1)
thread_safe (~> 0.3, >= 0.3.4)
tzinfo (~> 1.1)
addressable (2.5.2)
addressable (2.6.0)
public_suffix (>= 2.0.2, < 4.0)
amazon-ec2 (0.9.17)
xml-simple (>= 1.0.12)
aws-sdk (2.11.39)
aws-sdk-resources (= 2.11.39)
aws-sdk-core (2.11.39)
aws-sdk (2.11.212)
aws-sdk-resources (= 2.11.212)
aws-sdk-core (2.11.212)
aws-sigv4 (~> 1.0)
jmespath (~> 1.0)
aws-sdk-resources (2.11.39)
aws-sdk-core (= 2.11.39)
aws-sigv4 (1.0.2)
aws-sdk-resources (2.11.212)
aws-sdk-core (= 2.11.212)
aws-sigv4 (1.0.3)
azure (0.7.10)
addressable (~> 2.3)
azure-core (~> 0.1)
@ -27,46 +27,46 @@ GEM
nokogiri (~> 1.6)
systemu (~> 2.6)
thor (~> 0.19)
azure-core (0.1.14)
azure-core (0.1.15)
faraday (~> 0.9)
faraday_middleware (~> 0.10)
nokogiri (~> 1.6)
builder (3.2.3)
concurrent-ruby (1.1.3)
concurrent-ruby (1.1.4)
configparser (0.1.7)
curb (0.9.4)
daemons (1.2.6)
eventmachine (1.2.5)
faraday (0.15.0)
curb (0.9.8)
daemons (1.3.1)
eventmachine (1.2.7)
faraday (0.15.4)
multipart-post (>= 1.2, < 3)
faraday_middleware (0.12.2)
faraday_middleware (0.13.0)
faraday (>= 0.7.4, < 1.0)
hashie (3.5.7)
hashie (3.6.0)
highline (1.7.10)
i18n (0.9.5)
concurrent-ruby (~> 1.0)
inflection (1.0.0)
jmespath (1.4.0)
memcache-client (1.8.5)
mime-types (3.1)
mime-types (3.2.2)
mime-types-data (~> 3.2015)
mime-types-data (3.2016.0521)
mime-types-data (3.2018.0812)
mini_portile2 (2.1.0)
minitest (5.11.3)
multipart-post (2.0.0)
mysql2 (0.5.1)
mysql2 (0.5.2)
net-ldap (0.16.1)
nokogiri (1.6.8.1)
mini_portile2 (~> 2.1.0)
ox (2.9.2)
ox (2.10.0)
parse-cron (0.1.4)
polyglot (0.3.5)
public_suffix (2.0.5)
rack (1.6.10)
rack (1.6.11)
rack-protection (1.5.5)
rack
scrub_rb (1.0.1)
sequel (5.7.1)
sequel (5.17.0)
sinatra (1.4.8)
rack (~> 1.5)
rack-protection (~> 1.4)
@ -77,12 +77,12 @@ GEM
daemons (~> 1.0, >= 1.0.9)
eventmachine (~> 1.0, >= 1.0.4)
rack (>= 1, < 3)
thor (0.20.0)
thor (0.20.3)
thread_safe (0.3.6)
tilt (2.0.8)
tilt (2.0.9)
treetop (1.6.10)
polyglot (~> 0.3)
trollop (2.1.2)
trollop (2.9.9)
tzinfo (1.2.5)
thread_safe (~> 0.1)
uuidtools (2.1.5)
@ -112,7 +112,7 @@ DEPENDENCIES
memcache-client
mysql2
net-ldap
nokogiri
nokogiri (< 1.7)
ox
parse-cron
public_suffix (< 3.0.0)
@ -128,4 +128,4 @@ DEPENDENCIES
zendesk_api
BUNDLED WITH
1.17.1
1.11.2

View File

@ -1,23 +1,23 @@
GEM
remote: https://rubygems.org/
specs:
activesupport (4.2.10)
activesupport (4.2.11)
i18n (~> 0.7)
minitest (~> 5.1)
thread_safe (~> 0.3, >= 0.3.4)
tzinfo (~> 1.1)
addressable (2.5.2)
addressable (2.6.0)
public_suffix (>= 2.0.2, < 4.0)
amazon-ec2 (0.9.17)
xml-simple (>= 1.0.12)
aws-sdk (2.11.39)
aws-sdk-resources (= 2.11.39)
aws-sdk-core (2.11.39)
aws-sdk (2.11.212)
aws-sdk-resources (= 2.11.212)
aws-sdk-core (2.11.212)
aws-sigv4 (~> 1.0)
jmespath (~> 1.0)
aws-sdk-resources (2.11.39)
aws-sdk-core (= 2.11.39)
aws-sigv4 (1.0.2)
aws-sdk-resources (2.11.212)
aws-sdk-core (= 2.11.212)
aws-sigv4 (1.0.3)
azure (0.7.10)
addressable (~> 2.3)
azure-core (~> 0.1)
@ -27,61 +27,63 @@ GEM
nokogiri (~> 1.6)
systemu (~> 2.6)
thor (~> 0.19)
azure-core (0.1.14)
azure-core (0.1.15)
faraday (~> 0.9)
faraday_middleware (~> 0.10)
nokogiri (~> 1.6)
builder (3.2.3)
concurrent-ruby (1.1.3)
concurrent-ruby (1.1.4)
configparser (0.1.7)
curb (0.9.4)
daemons (1.2.6)
eventmachine (1.2.5)
faraday (0.15.0)
curb (0.9.8)
daemons (1.3.1)
eventmachine (1.2.7)
faraday (0.15.4)
multipart-post (>= 1.2, < 3)
faraday_middleware (0.12.2)
faraday_middleware (0.13.0)
faraday (>= 0.7.4, < 1.0)
hashie (3.5.7)
hashie (3.6.0)
highline (1.7.10)
i18n (0.9.5)
concurrent-ruby (~> 1.0)
inflection (1.0.0)
jmespath (1.4.0)
memcache-client (1.8.5)
mime-types (3.1)
mime-types (3.2.2)
mime-types-data (~> 3.2015)
mime-types-data (3.2016.0521)
mini_portile2 (2.3.0)
mime-types-data (3.2018.0812)
mini_portile2 (2.4.0)
minitest (5.11.3)
multipart-post (2.0.0)
mysql2 (0.5.1)
mustermann (1.0.3)
mysql2 (0.5.2)
net-ldap (0.16.1)
nokogiri (1.8.2)
mini_portile2 (~> 2.3.0)
ox (2.9.2)
nokogiri (1.10.1)
mini_portile2 (~> 2.4.0)
ox (2.10.0)
parse-cron (0.1.4)
polyglot (0.3.5)
public_suffix (3.0.2)
rack (1.6.10)
rack-protection (1.5.5)
public_suffix (3.0.3)
rack (2.0.6)
rack-protection (2.0.5)
rack
sequel (5.7.1)
sinatra (1.4.8)
rack (~> 1.5)
rack-protection (~> 1.4)
tilt (>= 1.3, < 3)
sequel (5.17.0)
sinatra (2.0.5)
mustermann (~> 1.0)
rack (~> 2.0)
rack-protection (= 2.0.5)
tilt (~> 2.0)
sqlite3 (1.3.13)
systemu (2.6.5)
thin (1.7.2)
daemons (~> 1.0, >= 1.0.9)
eventmachine (~> 1.0, >= 1.0.4)
rack (>= 1, < 3)
thor (0.20.0)
thor (0.20.3)
thread_safe (0.3.6)
tilt (2.0.8)
tilt (2.0.9)
treetop (1.6.10)
polyglot (~> 0.3)
trollop (2.1.2)
trollop (2.9.9)
tzinfo (1.2.5)
thread_safe (~> 0.1)
uuidtools (2.1.5)
@ -114,7 +116,8 @@ DEPENDENCIES
nokogiri
ox
parse-cron
rack (< 2.0.0)
public_suffix
rack
sequel
sinatra
sqlite3
@ -125,4 +128,4 @@ DEPENDENCIES
zendesk_api
BUNDLED WITH
1.17.1
1.11.2

View File

@ -1,17 +1,34 @@
source 'https://rubygems.org'
if RUBY_VERSION < '1.9.0'
gem 'nokogiri', '< 1.6.0'
gem 'net-ldap', '< 0.9'
gem 'zendesk_api', '< 1.5'
if RUBY_VERSION < '2.1.0'
gem 'nokogiri', '< 1.7'
elsif RUBY_VERSION < '2.3.0'
gem 'nokogiri', '< 1.10'
else
gem 'nokogiri'
gem 'net-ldap'
gem 'zendesk_api'
end
if RUBY_VERSION >= '2.0.0' and RUBY_VERSION < '2.1.0'
if RUBY_VERSION < '2.0.0'
gem 'net-ldap', '< 0.13' # auth
gem 'mysql2', '< 0.5.0' # onedb
gem 'mime-types', '< 3.0' # hybrid (azure)
else
gem 'net-ldap' # auth
gem 'mysql2' # onedb
end
if RUBY_VERSION < '2.0.0'
gem 'public_suffix', '< 1.5.0'
elsif RUBY_VERSION < '2.1.0'
gem 'public_suffix', '< 3.0.0'
else
gem 'public_suffix'
end
if RUBY_VERSION < '2.2.0'
gem 'rack', '< 2.0.0' # sunstone, cloud, oneflow
else
gem 'rack' # sunstone, cloud, oneflow
end
if RUBY_VERSION >= '2.4.0'
@ -27,8 +44,8 @@ gem 'treetop', '>= 1.6.3' # oneflow
gem 'sequel' # quota, oneb
gem 'sinatra' # sunstone, cloud, oneflow
gem 'thin' # sunstone, cloud
gem 'rack', '< 2.0.0' # sunstone, cloud, oneflow
gem 'memcache-client' # sunstone
gem 'zendesk_api' # sunstone
gem 'amazon-ec2' # cloud
gem 'uuidtools' # cloud
gem 'curb' # cloud
@ -39,7 +56,6 @@ gem 'trollop' # vmware
gem 'parse-cron' # oneflow
gem 'aws-sdk', '~> 2.5' # ec2_hybrid
gem 'ox' # oca
gem 'mysql2' # onedb
gem 'highline', '~> 1.7' # oneprovision
gem 'faraday', '~> 0.15' # packethost
gem 'faraday_middleware', '~> 0.12' # packethost

View File

@ -1,7 +1,7 @@
GEM
remote: https://rubygems.org/
specs:
activesupport (4.2.10)
activesupport (4.2.11)
i18n (~> 0.7)
minitest (~> 5.1)
thread_safe (~> 0.3, >= 0.3.4)
@ -9,14 +9,14 @@ GEM
addressable (2.4.0)
amazon-ec2 (0.9.17)
xml-simple (>= 1.0.12)
aws-sdk (2.11.39)
aws-sdk-resources (= 2.11.39)
aws-sdk-core (2.11.39)
aws-sdk (2.11.212)
aws-sdk-resources (= 2.11.212)
aws-sdk-core (2.11.212)
aws-sigv4 (~> 1.0)
jmespath (~> 1.0)
aws-sdk-resources (2.11.39)
aws-sdk-core (= 2.11.39)
aws-sigv4 (1.0.2)
aws-sdk-resources (2.11.212)
aws-sdk-core (= 2.11.212)
aws-sigv4 (1.0.3)
azure (0.7.10)
addressable (~> 2.3)
azure-core (~> 0.1)
@ -26,21 +26,21 @@ GEM
nokogiri (~> 1.6)
systemu (~> 2.6)
thor (~> 0.19)
azure-core (0.1.14)
azure-core (0.1.15)
faraday (~> 0.9)
faraday_middleware (~> 0.10)
nokogiri (~> 1.6)
builder (3.2.3)
concurrent-ruby (1.1.3)
concurrent-ruby (1.1.4)
configparser (0.1.7)
curb (0.9.4)
daemons (1.2.6)
eventmachine (1.2.5)
faraday (0.15.0)
curb (0.9.8)
daemons (1.3.1)
eventmachine (1.2.7)
faraday (0.15.4)
multipart-post (>= 1.2, < 3)
faraday_middleware (0.12.2)
faraday_middleware (0.13.0)
faraday (>= 0.7.4, < 1.0)
hashie (3.5.7)
hashie (3.6.0)
highline (1.7.10)
i18n (0.9.5)
concurrent-ruby (~> 1.0)
@ -55,14 +55,15 @@ GEM
net-ldap (0.12.1)
nokogiri (1.6.8.1)
mini_portile2 (~> 2.1.0)
ox (2.9.2)
ox (2.10.0)
parse-cron (0.1.4)
polyglot (0.3.5)
rack (1.6.10)
public_suffix (1.4.6)
rack (1.6.11)
rack-protection (1.5.5)
rack
scrub_rb (1.0.1)
sequel (5.7.1)
sequel (5.17.0)
sinatra (1.4.8)
rack (~> 1.5)
rack-protection (~> 1.4)
@ -73,12 +74,12 @@ GEM
daemons (~> 1.0, >= 1.0.9)
eventmachine (~> 1.0, >= 1.0.4)
rack (>= 1, < 3)
thor (0.20.0)
thor (0.20.3)
thread_safe (0.3.6)
tilt (2.0.8)
tilt (2.0.9)
treetop (1.6.10)
polyglot (~> 0.3)
trollop (2.1.2)
trollop (2.9.9)
tzinfo (1.2.5)
thread_safe (~> 0.1)
uuidtools (2.1.5)
@ -106,11 +107,13 @@ DEPENDENCIES
highline (~> 1.7)
i18n (~> 0.9)
memcache-client
mysql2
net-ldap
nokogiri
mime-types (< 3.0)
mysql2 (< 0.5.0)
net-ldap (< 0.13)
nokogiri (< 1.7)
ox
parse-cron
public_suffix (< 1.5.0)
rack (< 2.0.0)
scrub_rb
sequel
@ -123,4 +126,4 @@ DEPENDENCIES
zendesk_api
BUNDLED WITH
1.17.1
1.11.2

View File

@ -1,23 +1,23 @@
GEM
remote: https://rubygems.org/
specs:
activesupport (4.2.10)
activesupport (4.2.11)
i18n (~> 0.7)
minitest (~> 5.1)
thread_safe (~> 0.3, >= 0.3.4)
tzinfo (~> 1.1)
addressable (2.5.2)
addressable (2.6.0)
public_suffix (>= 2.0.2, < 4.0)
amazon-ec2 (0.9.17)
xml-simple (>= 1.0.12)
aws-sdk (2.11.39)
aws-sdk-resources (= 2.11.39)
aws-sdk-core (2.11.39)
aws-sdk (2.11.212)
aws-sdk-resources (= 2.11.212)
aws-sdk-core (2.11.212)
aws-sigv4 (~> 1.0)
jmespath (~> 1.0)
aws-sdk-resources (2.11.39)
aws-sdk-core (= 2.11.39)
aws-sigv4 (1.0.2)
aws-sdk-resources (2.11.212)
aws-sdk-core (= 2.11.212)
aws-sigv4 (1.0.3)
azure (0.7.10)
addressable (~> 2.3)
azure-core (~> 0.1)
@ -27,61 +27,63 @@ GEM
nokogiri (~> 1.6)
systemu (~> 2.6)
thor (~> 0.19)
azure-core (0.1.14)
azure-core (0.1.15)
faraday (~> 0.9)
faraday_middleware (~> 0.10)
nokogiri (~> 1.6)
builder (3.2.3)
concurrent-ruby (1.1.3)
concurrent-ruby (1.1.4)
configparser (0.1.7)
curb (0.9.4)
daemons (1.2.6)
eventmachine (1.2.5)
faraday (0.15.0)
curb (0.9.8)
daemons (1.3.1)
eventmachine (1.2.7)
faraday (0.15.4)
multipart-post (>= 1.2, < 3)
faraday_middleware (0.12.2)
faraday_middleware (0.13.0)
faraday (>= 0.7.4, < 1.0)
hashie (3.5.7)
hashie (3.6.0)
highline (1.7.10)
i18n (0.9.5)
concurrent-ruby (~> 1.0)
inflection (1.0.0)
jmespath (1.4.0)
memcache-client (1.8.5)
mime-types (3.1)
mime-types (3.2.2)
mime-types-data (~> 3.2015)
mime-types-data (3.2016.0521)
mini_portile2 (2.3.0)
mime-types-data (3.2018.0812)
mini_portile2 (2.4.0)
minitest (5.11.3)
multipart-post (2.0.0)
mysql2 (0.5.1)
mustermann (1.0.3)
mysql2 (0.5.2)
net-ldap (0.16.1)
nokogiri (1.8.2)
mini_portile2 (~> 2.3.0)
ox (2.9.2)
nokogiri (1.10.1)
mini_portile2 (~> 2.4.0)
ox (2.10.0)
parse-cron (0.1.4)
polyglot (0.3.5)
public_suffix (3.0.2)
rack (1.6.10)
rack-protection (1.5.5)
public_suffix (3.0.3)
rack (2.0.6)
rack-protection (2.0.5)
rack
sequel (5.7.1)
sinatra (1.4.8)
rack (~> 1.5)
rack-protection (~> 1.4)
tilt (>= 1.3, < 3)
sequel (5.17.0)
sinatra (2.0.5)
mustermann (~> 1.0)
rack (~> 2.0)
rack-protection (= 2.0.5)
tilt (~> 2.0)
sqlite3 (1.3.13)
systemu (2.6.5)
thin (1.7.2)
daemons (~> 1.0, >= 1.0.9)
eventmachine (~> 1.0, >= 1.0.4)
rack (>= 1, < 3)
thor (0.20.0)
thor (0.20.3)
thread_safe (0.3.6)
tilt (2.0.8)
tilt (2.0.9)
treetop (1.6.10)
polyglot (~> 0.3)
trollop (2.1.2)
trollop (2.9.9)
tzinfo (1.2.5)
thread_safe (~> 0.1)
uuidtools (2.1.5)
@ -114,7 +116,8 @@ DEPENDENCIES
nokogiri
ox
parse-cron
rack (< 2.0.0)
public_suffix
rack
sequel
sinatra
sqlite3
@ -125,4 +128,4 @@ DEPENDENCIES
zendesk_api
BUNDLED WITH
1.17.1
1.11.2

View File

@ -1,23 +1,23 @@
GEM
remote: https://rubygems.org/
specs:
activesupport (4.2.10)
activesupport (4.2.11)
i18n (~> 0.7)
minitest (~> 5.1)
thread_safe (~> 0.3, >= 0.3.4)
tzinfo (~> 1.1)
addressable (2.5.2)
addressable (2.6.0)
public_suffix (>= 2.0.2, < 4.0)
amazon-ec2 (0.9.17)
xml-simple (>= 1.0.12)
aws-sdk (2.11.39)
aws-sdk-resources (= 2.11.39)
aws-sdk-core (2.11.39)
aws-sdk (2.11.212)
aws-sdk-resources (= 2.11.212)
aws-sdk-core (2.11.212)
aws-sigv4 (~> 1.0)
jmespath (~> 1.0)
aws-sdk-resources (2.11.39)
aws-sdk-core (= 2.11.39)
aws-sigv4 (1.0.2)
aws-sdk-resources (2.11.212)
aws-sdk-core (= 2.11.212)
aws-sigv4 (1.0.3)
azure (0.7.10)
addressable (~> 2.3)
azure-core (~> 0.1)
@ -27,61 +27,63 @@ GEM
nokogiri (~> 1.6)
systemu (~> 2.6)
thor (~> 0.19)
azure-core (0.1.14)
azure-core (0.1.15)
faraday (~> 0.9)
faraday_middleware (~> 0.10)
nokogiri (~> 1.6)
builder (3.2.3)
concurrent-ruby (1.1.3)
concurrent-ruby (1.1.4)
configparser (0.1.7)
curb (0.9.4)
daemons (1.2.6)
eventmachine (1.2.5)
faraday (0.15.0)
curb (0.9.8)
daemons (1.3.1)
eventmachine (1.2.7)
faraday (0.15.4)
multipart-post (>= 1.2, < 3)
faraday_middleware (0.12.2)
faraday_middleware (0.13.0)
faraday (>= 0.7.4, < 1.0)
hashie (3.5.7)
hashie (3.6.0)
highline (1.7.10)
i18n (0.9.5)
concurrent-ruby (~> 1.0)
inflection (1.0.0)
jmespath (1.4.0)
memcache-client (1.8.5)
mime-types (3.1)
mime-types (3.2.2)
mime-types-data (~> 3.2015)
mime-types-data (3.2016.0521)
mini_portile2 (2.3.0)
mime-types-data (3.2018.0812)
mini_portile2 (2.4.0)
minitest (5.11.3)
multipart-post (2.0.0)
mysql2 (0.5.1)
mustermann (1.0.3)
mysql2 (0.5.2)
net-ldap (0.16.1)
nokogiri (1.8.2)
mini_portile2 (~> 2.3.0)
ox (2.9.2)
nokogiri (1.10.1)
mini_portile2 (~> 2.4.0)
ox (2.10.0)
parse-cron (0.1.4)
polyglot (0.3.5)
public_suffix (3.0.2)
rack (1.6.10)
rack-protection (1.5.5)
public_suffix (3.0.3)
rack (2.0.6)
rack-protection (2.0.5)
rack
sequel (5.7.1)
sinatra (1.4.8)
rack (~> 1.5)
rack-protection (~> 1.4)
tilt (>= 1.3, < 3)
sequel (5.17.0)
sinatra (2.0.5)
mustermann (~> 1.0)
rack (~> 2.0)
rack-protection (= 2.0.5)
tilt (~> 2.0)
sqlite3 (1.3.13)
systemu (2.6.5)
thin (1.7.2)
daemons (~> 1.0, >= 1.0.9)
eventmachine (~> 1.0, >= 1.0.4)
rack (>= 1, < 3)
thor (0.20.0)
thor (0.20.3)
thread_safe (0.3.6)
tilt (2.0.8)
tilt (2.0.9)
treetop (1.6.10)
polyglot (~> 0.3)
trollop (2.1.2)
trollop (2.9.9)
tzinfo (1.2.5)
thread_safe (~> 0.1)
uuidtools (2.1.5)
@ -115,7 +117,8 @@ DEPENDENCIES
nokogiri
ox
parse-cron
rack (< 2.0.0)
public_suffix
rack
sequel
sinatra
sqlite3
@ -127,4 +130,4 @@ DEPENDENCIES
zendesk_api
BUNDLED WITH
1.17.1
1.11.2

View File

@ -1,22 +1,22 @@
GEM
remote: https://rubygems.org/
specs:
activesupport (4.2.10)
activesupport (4.2.11)
i18n (~> 0.7)
minitest (~> 5.1)
thread_safe (~> 0.3, >= 0.3.4)
tzinfo (~> 1.1)
addressable (2.5.2)
addressable (2.6.0)
public_suffix (>= 2.0.2, < 4.0)
amazon-ec2 (0.9.17)
xml-simple (>= 1.0.12)
aws-sdk (2.11.166)
aws-sdk-resources (= 2.11.166)
aws-sdk-core (2.11.166)
aws-sdk (2.11.212)
aws-sdk-resources (= 2.11.212)
aws-sdk-core (2.11.212)
aws-sigv4 (~> 1.0)
jmespath (~> 1.0)
aws-sdk-resources (2.11.166)
aws-sdk-core (= 2.11.166)
aws-sdk-resources (2.11.212)
aws-sdk-core (= 2.11.212)
aws-sigv4 (1.0.3)
azure (0.7.10)
addressable (~> 2.3)
@ -27,19 +27,19 @@ GEM
nokogiri (~> 1.6)
systemu (~> 2.6)
thor (~> 0.19)
azure-core (0.1.14)
azure-core (0.1.15)
faraday (~> 0.9)
faraday_middleware (~> 0.10)
nokogiri (~> 1.6)
builder (3.2.3)
concurrent-ruby (1.1.3)
concurrent-ruby (1.1.4)
configparser (0.1.7)
curb (0.9.6)
daemons (1.2.6)
curb (0.9.8)
daemons (1.3.1)
eventmachine (1.2.7)
faraday (0.15.3)
faraday (0.15.4)
multipart-post (>= 1.2, < 3)
faraday_middleware (0.12.2)
faraday_middleware (0.13.0)
faraday (>= 0.7.4, < 1.0)
hashie (3.6.0)
highline (1.7.10)
@ -51,34 +51,36 @@ GEM
mime-types (3.2.2)
mime-types-data (~> 3.2015)
mime-types-data (3.2018.0812)
mini_portile2 (2.3.0)
mini_portile2 (2.4.0)
minitest (5.11.3)
multipart-post (2.0.0)
mustermann (1.0.3)
mysql2 (0.5.2)
net-ldap (0.16.1)
nokogiri (1.8.5)
mini_portile2 (~> 2.3.0)
nokogiri (1.10.1)
mini_portile2 (~> 2.4.0)
ox (2.10.0)
parse-cron (0.1.4)
polyglot (0.3.5)
public_suffix (3.0.3)
rack (1.6.11)
rack-protection (1.5.5)
rack (2.0.6)
rack-protection (2.0.5)
rack
sequel (5.14.0)
sinatra (1.4.8)
rack (~> 1.5)
rack-protection (~> 1.4)
tilt (>= 1.3, < 3)
sequel (5.17.0)
sinatra (2.0.5)
mustermann (~> 1.0)
rack (~> 2.0)
rack-protection (= 2.0.5)
tilt (~> 2.0)
sqlite3 (1.3.13)
systemu (2.6.5)
thin (1.7.2)
daemons (~> 1.0, >= 1.0.9)
eventmachine (~> 1.0, >= 1.0.4)
rack (>= 1, < 3)
thor (0.20.0)
thor (0.20.3)
thread_safe (0.3.6)
tilt (2.0.8)
tilt (2.0.9)
treetop (1.6.10)
polyglot (~> 0.3)
trollop (2.9.9)
@ -115,7 +117,8 @@ DEPENDENCIES
nokogiri
ox
parse-cron
rack (< 2.0.0)
public_suffix
rack
sequel
sinatra
sqlite3
@ -127,4 +130,4 @@ DEPENDENCIES
zendesk_api
BUNDLED WITH
1.17.1
1.11.2

View File

@ -1,11 +1,613 @@
# -------------------------------------------------------------------------- #
# Copyright 2002-2019, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
#
# Configuration file for rubocop linter
# Applies to every ruby file on the OpenNebula repository
#
AllCops:
Include:
- 'src/cli'
Exclude:
- src/sunstone/public/node_modules/**/*
- src/tm_mad
- share/onegate/onegate
- share/scons/get_xmlrpc_config
- share/rubygems/generate
- share/hooks/raft/follower_cleanup
- share/esx-fw-vnc/Vagrantfile
- share/vendor/ruby/gems/rbvmomi/Rakefile
- share/vendor/ruby/gems/packethost/Rakefile
- share/vendor/ruby/gems/packethost/Gemfile
- share/vendor/ruby/gems/packethost/packethost.gemspec
- share/install_gems/Gemfile
- share/install_gems/install_gems
- src/authm_mad/remotes/dummy/authenticate
- src/authm_mad/remotes/plain/authenticate
- src/authm_mad/remotes/ldap/authenticate
- src/authm_mad/remotes/server_x509/authenticate
- src/authm_mad/remotes/server_cipher/authenticate
- src/authm_mad/remotes/x509/authenticate
- src/authm_mad/remotes/ssh/authenticate
- src/sunstone/bin/novnc-server
- src/sunstone/config.ru
- src/pm_mad/remotes/dummy/cancel
- src/pm_mad/remotes/dummy/shutdown
- src/pm_mad/remotes/dummy/reboot
- src/pm_mad/remotes/dummy/deploy
- src/pm_mad/remotes/dummy/reset
- src/pm_mad/remotes/dummy/poll
- src/pm_mad/remotes/packet/cancel
- src/pm_mad/remotes/packet/shutdown
- src/pm_mad/remotes/packet/reboot
- src/pm_mad/remotes/packet/deploy
- src/pm_mad/remotes/packet/reset
- src/pm_mad/remotes/packet/poll
- src/pm_mad/remotes/ec2/cancel
- src/pm_mad/remotes/ec2/shutdown
- src/pm_mad/remotes/ec2/reboot
- src/pm_mad/remotes/ec2/deploy
- src/pm_mad/remotes/ec2/reset
- src/pm_mad/remotes/ec2/poll
- src/onegate/config.ru
- src/datastore_mad/remotes/vcenter/monitor
- src/datastore_mad/remotes/vcenter/mkfs
- src/datastore_mad/remotes/vcenter/stat
- src/datastore_mad/remotes/vcenter/clone
- src/datastore_mad/remotes/vcenter/cp
- src/datastore_mad/remotes/vcenter/export
- src/datastore_mad/remotes/vcenter/rm
- src/im_mad/remotes/lxd-probes.d/pci.rb
- src/im_mad/remotes/kvm-probes.d/pci.rb
- src/im_mad/remotes/kvm.d/collectd-client.rb
- src/im_mad/remotes/lxd.d/collectd-client.rb
- src/im_mad/remotes/vcenter.d/poll
- src/im_mad/remotes/packet.d/poll
- src/im_mad/remotes/ec2.d/poll
- src/im_mad/remotes/one.d/poll
- src/im_mad/remotes/az.d/poll
- src/vnm_mad/remotes/ovswitch/post
- src/vnm_mad/remotes/ovswitch/clean
- src/vnm_mad/remotes/ovswitch/pre
- src/vnm_mad/remotes/802.1Q/post
- src/vnm_mad/remotes/802.1Q/update_sg
- src/vnm_mad/remotes/802.1Q/clean
- src/vnm_mad/remotes/802.1Q/pre
- src/vnm_mad/remotes/ebtables/post
- src/vnm_mad/remotes/ebtables/update_sg
- src/vnm_mad/remotes/ebtables/clean
- src/vnm_mad/remotes/ebtables/pre
- src/vnm_mad/remotes/bridge/clean
- src/vnm_mad/remotes/bridge/pre
- src/vnm_mad/remotes/vxlan/post
- src/vnm_mad/remotes/vxlan/update_sg
- src/vnm_mad/remotes/vxlan/clean
- src/vnm_mad/remotes/vxlan/pre
- src/vnm_mad/remotes/fw/post
- src/vnm_mad/remotes/fw/update_sg
- src/vnm_mad/remotes/fw/clean
- src/vnm_mad/remotes/fw/pre
- src/vnm_mad/remotes/ovswitch_vxlan/post
- src/vnm_mad/remotes/ovswitch_vxlan/clean
- src/vnm_mad/remotes/ovswitch_vxlan/pre
- src/vmm_mad/remotes/vcenter/attach_nic
- src/vmm_mad/remotes/vcenter/cancel
- src/vmm_mad/remotes/vcenter/snapshot_revert
- src/vmm_mad/remotes/vcenter/detach_nic
- src/vmm_mad/remotes/vcenter/snapshot_delete
- src/vmm_mad/remotes/vcenter/detach_disk
- src/vmm_mad/remotes/vcenter/shutdown
- src/vmm_mad/remotes/vcenter/attach_disk
- src/vmm_mad/remotes/vcenter/reboot
- src/vmm_mad/remotes/vcenter/deploy
- src/vmm_mad/remotes/vcenter/reset
- src/vmm_mad/remotes/vcenter/reconfigure
- src/vmm_mad/remotes/vcenter/save
- src/vmm_mad/remotes/vcenter/restore
- src/vmm_mad/remotes/vcenter/snapshot_create
- src/vmm_mad/remotes/vcenter/poll
- src/vmm_mad/remotes/lxd/attach_nic
- src/vmm_mad/remotes/lxd/detach_nic
- src/vmm_mad/remotes/lxd/detach_disk
- src/vmm_mad/remotes/lxd/shutdown
- src/vmm_mad/remotes/lxd/attach_disk
- src/vmm_mad/remotes/lxd/reboot
- src/vmm_mad/remotes/lxd/deploy
- src/vmm_mad/remotes/lxd/prereconfigure
- src/vmm_mad/remotes/lxd/reconfigure
- src/vmm_mad/remotes/lxd/poll
- src/vmm_mad/remotes/one/cancel
- src/vmm_mad/remotes/one/shutdown
- src/vmm_mad/remotes/one/reboot
- src/vmm_mad/remotes/one/deploy
- src/vmm_mad/remotes/one/reset
- src/vmm_mad/remotes/one/save
- src/vmm_mad/remotes/one/restore
- src/vmm_mad/remotes/one/poll
- src/vmm_mad/remotes/kvm/poll
- src/vmm_mad/remotes/az/cancel
- src/vmm_mad/remotes/az/shutdown
- src/vmm_mad/remotes/az/reboot
- src/vmm_mad/remotes/az/deploy
- src/vmm_mad/remotes/az/save
- src/vmm_mad/remotes/az/restore
- src/vmm_mad/remotes/az/poll
- src/vmm_mad/remotes/packet/cancel
- src/vmm_mad/remotes/packet/shutdown
- src/vmm_mad/remotes/packet/reboot
- src/vmm_mad/remotes/packet/deploy
- src/vmm_mad/remotes/packet/reset
- src/vmm_mad/remotes/packet/poll
- src/vmm_mad/remotes/ec2/cancel
- src/vmm_mad/remotes/ec2/shutdown
- src/vmm_mad/remotes/ec2/reboot
- src/vmm_mad/remotes/ec2/deploy
- src/vmm_mad/remotes/ec2/save
- src/vmm_mad/remotes/ec2/restore
- src/vmm_mad/remotes/ec2/poll
- src/cloud/ec2/bin/econe-detach-volume
- src/cloud/ec2/bin/econe-start-instances
- src/cloud/ec2/bin/econe-associate-address
- src/cloud/ec2/bin/econe-create-volume
- src/cloud/ec2/bin/econe-delete-volume
- src/cloud/ec2/bin/econe-attach-volume
- src/cloud/ec2/bin/econe-stop-instances
- src/cloud/ec2/bin/econe-delete-keypair
- src/cloud/ec2/bin/econe-register
- src/cloud/ec2/bin/econe-release-address
- src/cloud/ec2/bin/econe-describe-images
- src/cloud/ec2/bin/econe-terminate-instances
- src/cloud/ec2/bin/econe-describe-keypairs
- src/cloud/ec2/bin/econe-describe-instances
- src/cloud/ec2/bin/econe-reboot-instances
- src/cloud/ec2/bin/econe-allocate-address
- src/cloud/ec2/bin/econe-upload
- src/cloud/ec2/bin/econe-describe-addresses
- src/cloud/ec2/bin/econe-run-instances
- src/cloud/ec2/bin/econe-disassociate-address
- src/cloud/ec2/bin/econe-create-keypair
- src/cloud/ec2/bin/econe-describe-volumes
- src/onedb/onedb
- src/market_mad/remotes/s3/monitor
- src/market_mad/remotes/s3/delete
- src/market_mad/remotes/s3/import
- src/market_mad/remotes/linuxcontainers/monitor
- src/market_mad/remotes/one/monitor
- src/tm_mad/vcenter/monitor
- src/tm_mad/vcenter/delete
- src/tm_mad/vcenter/mvds
- src/tm_mad/vcenter/mkimage
- src/tm_mad/vcenter/cpds
- src/tm_mad/vcenter/clone
- src/tm_mad/vcenter/mv
- src/tm_mad/vcenter/resize
- src/flow/config.ru
- src/flow/Gemfile
- src/cli/oneprovision
- share/scons/po2json.rb
- share/sudoers/sudo_commands.rb
- share/hooks/vcenter/delete_vcenter_net.rb
- share/hooks/vcenter/create_vcenter_net.rb
- share/hooks/ft/host_error.rb
- share/instance_types/ec2-instance-types.rb
- share/instance_types/az-instance-types.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/Datastore.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/DynamicTypeMgrDataTypeInfo.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/HostSystem.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ManagedObject.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/DynamicTypeMgrManagedTypeInfo.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ServiceInstance.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/PropertyCollector.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/DynamicTypeMgrAllTypeInfo.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ObjectUpdate.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ManagedEntity.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/Folder.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ReflectManagedMethodExecuter.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ResourcePool.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/OvfManager.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/Datacenter.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ObjectContent.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/VirtualMachine.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ComputeResource.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/PerfCounterInfo.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/PerformanceManager.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/Task.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/sms.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/fault.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/trivial_soap.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/sms/SmsStorageManager.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/deserialization.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/connection.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/basic_types.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/utils/perfdump.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/utils/admission_control.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/utils/deploy.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/utils/leases.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/trollop.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/version.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/pbm.rb
- share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/type_loader.rb
- share/vendor/ruby/gems/packethost/spec/spec_helper.rb
- share/vendor/ruby/gems/packethost/spec/support/webmock.rb
- share/vendor/ruby/gems/packethost/spec/support/fake_packet.rb
- share/vendor/ruby/gems/packethost/spec/support/shared/entity.rb
- share/vendor/ruby/gems/packethost/spec/lib/packet_spec.rb
- share/vendor/ruby/gems/packethost/spec/lib/packet/client_spec.rb
- share/vendor/ruby/gems/packethost/spec/lib/packet/project_spec.rb
- share/vendor/ruby/gems/packethost/spec/lib/packet/configuration_spec.rb
- share/vendor/ruby/gems/packethost/spec/lib/packet/device_spec.rb
- share/vendor/ruby/gems/packethost/lib/packet.rb
- share/vendor/ruby/gems/packethost/lib/packet/project.rb
- share/vendor/ruby/gems/packethost/lib/packet/errors.rb
- share/vendor/ruby/gems/packethost/lib/packet/entity/base.rb
- share/vendor/ruby/gems/packethost/lib/packet/entity/serialization.rb
- share/vendor/ruby/gems/packethost/lib/packet/entity/associations.rb
- share/vendor/ruby/gems/packethost/lib/packet/entity/finders.rb
- share/vendor/ruby/gems/packethost/lib/packet/entity/timestamps.rb
- share/vendor/ruby/gems/packethost/lib/packet/entity/persistence.rb
- share/vendor/ruby/gems/packethost/lib/packet/entity.rb
- share/vendor/ruby/gems/packethost/lib/packet/facility.rb
- share/vendor/ruby/gems/packethost/lib/packet/operating_system.rb
- share/vendor/ruby/gems/packethost/lib/packet/device.rb
- share/vendor/ruby/gems/packethost/lib/packet/ip_range.rb
- share/vendor/ruby/gems/packethost/lib/packet/client.rb
- share/vendor/ruby/gems/packethost/lib/packet/version.rb
- share/vendor/ruby/gems/packethost/lib/packet/global_id.rb
- share/vendor/ruby/gems/packethost/lib/packet/configuration.rb
- share/vendor/ruby/gems/packethost/lib/packet/client/devices.rb
- share/vendor/ruby/gems/packethost/lib/packet/client/projects.rb
- share/vendor/ruby/gems/packethost/lib/packet/client/plans.rb
- share/vendor/ruby/gems/packethost/lib/packet/client/facilities.rb
- share/vendor/ruby/gems/packethost/lib/packet/client/ip_ranges.rb
- share/vendor/ruby/gems/packethost/lib/packet/client/ssh_keys.rb
- share/vendor/ruby/gems/packethost/lib/packet/client/users.rb
- share/vendor/ruby/gems/packethost/lib/packet/client/operating_systems.rb
- share/vendor/ruby/gems/packethost/lib/packet/ssh_key.rb
- share/vendor/ruby/gems/packethost/lib/packet/plan.rb
- share/vendor/ruby/gems/packethost/lib/packet/user.rb
- share/vendor/ruby/gems/packethost/lib/packethost.rb
- share/router/vmcontext.rb
- src/authm_mad/one_auth_mad.rb
- src/authm_mad/remotes/ldap/test/ldap_auth_spec.rb
- src/authm_mad/remotes/ldap/ldap_auth.rb
- src/authm_mad/remotes/server_x509/server_x509_auth.rb
- src/authm_mad/remotes/server_cipher/server_cipher_auth.rb
- src/authm_mad/remotes/x509/x509_auth.rb
- src/authm_mad/remotes/ssh/ssh_auth.rb
- src/sunstone/sunstone-server.rb
- src/sunstone/test/spec/spec_helper.rb
- src/sunstone/test/spec/vnet_spec.rb
- src/sunstone/test/spec/image_spec.rb
- src/sunstone/test/spec/vm_spec.rb
- src/sunstone/test/spec/host_spec.rb
- src/sunstone/test/spec/user_spec.rb
- src/sunstone/OpenNebulaAddons.rb
- src/sunstone/OpenNebulaVNC.rb
- src/sunstone/models/OpenNebulaJSON.rb
- src/sunstone/models/SunstoneViews.rb
- src/sunstone/models/SunstoneServer.rb
- src/sunstone/models/OpenNebulaJSON/SecurityGroupJSON.rb
- src/sunstone/models/OpenNebulaJSON/HostJSON.rb
- src/sunstone/models/OpenNebulaJSON/PoolJSON.rb
- src/sunstone/models/OpenNebulaJSON/TemplateJSON.rb
- src/sunstone/models/OpenNebulaJSON/DatastoreJSON.rb
- src/sunstone/models/OpenNebulaJSON/UserJSON.rb
- src/sunstone/models/OpenNebulaJSON/AclJSON.rb
- src/sunstone/models/OpenNebulaJSON/JSONUtils.rb
- src/sunstone/models/OpenNebulaJSON/VirtualMachineJSON.rb
- src/sunstone/models/OpenNebulaJSON/MarketPlaceAppJSON.rb
- src/sunstone/models/OpenNebulaJSON/ImageJSON.rb
- src/sunstone/models/OpenNebulaJSON/ZoneJSON.rb
- src/sunstone/models/OpenNebulaJSON/MarketPlaceJSON.rb
- src/sunstone/models/OpenNebulaJSON/VirtualNetworkJSON.rb
- src/sunstone/models/OpenNebulaJSON/VirtualRouterJSON.rb
- src/sunstone/models/OpenNebulaJSON/VirtualNetworkTemplateJSON.rb
- src/sunstone/models/OpenNebulaJSON/GroupJSON.rb
- src/sunstone/models/OpenNebulaJSON/ClusterJSON.rb
- src/sunstone/models/OpenNebulaJSON/VMGroupJSON.rb
- src/sunstone/models/OpenNebulaJSON/VdcJSON.rb
- src/sunstone/public/config.rb
- src/sunstone/routes/oneflow.rb
- src/sunstone/routes/vcenter.rb
- src/sunstone/routes/support.rb
- src/onegate/onegate-server.rb
- src/datastore_mad/remotes/vcenter_downloader.rb
- src/datastore_mad/remotes/vcenter_uploader.rb
- src/datastore_mad/remotes/xpath.rb
- src/datastore_mad/remotes/url.rb
- src/datastore_mad/one_datastore.rb
- src/im_mad/dummy/one_im_dummy.rb
- src/im_mad/im_exec/one_im_exec.rb
- src/im_mad/remotes/lxd-probes.d/lxd.rb
- src/im_mad/remotes/kvm-probes.d/kvm.rb
- src/im_mad/remotes/kvm-probes.d/machines-models.rb
- src/im_mad/remotes/common.d/collectd-client.rb
- src/im_mad/remotes/node-probes.d/pci.rb
- src/hm_mad/one_hm.rb
- src/vnm_mad/one_vnm.rb
- src/vnm_mad/remotes/ovswitch/OpenvSwitch.rb
- src/vnm_mad/remotes/802.1Q/vlan_tag_driver.rb
- src/vnm_mad/remotes/ebtables/Ebtables.rb
- src/vnm_mad/remotes/vxlan/vxlan.rb
- src/vnm_mad/remotes/vxlan/vxlan_driver.rb
- src/vnm_mad/remotes/ovswitch_vxlan/OpenvSwitchVXLAN.rb
- src/vnm_mad/remotes/lib/no_vlan.rb
- src/vnm_mad/remotes/lib/nic.rb
- src/vnm_mad/remotes/lib/vnmmad.rb
- src/vnm_mad/remotes/lib/security_groups.rb
- src/vnm_mad/remotes/lib/security_groups_iptables.rb
- src/vnm_mad/remotes/lib/vm.rb
- src/vnm_mad/remotes/lib/vnm_driver.rb
- src/vnm_mad/remotes/lib/sg_driver.rb
- src/vnm_mad/remotes/lib/command.rb
- src/vnm_mad/remotes/lib/vlan.rb
- src/vnm_mad/remotes/lib/address.rb
- src/cli/command_parser.rb
- src/cli/cli_helper.rb
- src/cli/one_helper.rb
- src/cli/one_helper/onevmgroup_helper.rb
- src/cli/one_helper/onemarket_helper.rb
- src/cli/one_helper/onesecgroup_helper.rb
- src/cli/one_helper/onezone_helper.rb
- src/cli/one_helper/onetemplate_helper.rb
- src/cli/one_helper/onevm_helper.rb
- src/cli/one_helper/oneacct_helper.rb
- src/cli/one_helper/onequota_helper.rb
- src/cli/one_helper/oneuser_helper.rb
- src/cli/one_helper/oneimage_helper.rb
- src/cli/one_helper/onemarketapp_helper.rb
- src/cli/one_helper/onegroup_helper.rb
- src/cli/one_helper/onevnet_helper.rb
- src/cli/one_helper/oneacl_helper.rb
- src/cli/one_helper/onevcenter_helper.rb
- src/cli/one_helper/onecluster_helper.rb
- src/cli/one_helper/onevntemplate_helper.rb
- src/cli/one_helper/onevrouter_helper.rb
- src/cli/one_helper/oneprovision_helpers/host_helper.rb
- src/cli/one_helper/oneprovision_helpers/provision_helper.rb
- src/cli/one_helper/oneprovision_helpers/cluster_helper.rb
- src/cli/one_helper/oneprovision_helpers/common_helper.rb
- src/cli/one_helper/oneprovision_helpers/datastore_helper.rb
- src/cli/one_helper/oneprovision_helpers/vnet_helper.rb
- src/cli/one_helper/oneprovision_helpers/ansible_helper.rb
- src/cli/one_helper/onevdc_helper.rb
- src/cli/one_helper/onedatastore_helper.rb
- src/oca/ruby/test/VirtualMachine_spec.rb
- src/oca/ruby/test/VirtualMachinePool_spec.rb
- src/oca/ruby/test/XMLUtils_spec.rb
- src/oca/ruby/test/UserPool_spec.rb
- src/oca/ruby/test/Host_spec.rb
- src/oca/ruby/test/User_spec.rb
- src/oca/ruby/test/helpers/MockClient.rb
- src/oca/ruby/test/VirtualNetwork_spec.rb
- src/oca/ruby/test/HostPool_spec.rb
- src/oca/ruby/test/VirtualNetworkPool_spec.rb
- src/oca/ruby/opennebula.rb
- src/oca/ruby/opennebula/image.rb
- src/oca/ruby/opennebula/datastore.rb
- src/oca/ruby/opennebula/group_pool.rb
- src/oca/ruby/opennebula/template_pool.rb
- src/oca/ruby/opennebula/marketplaceapp_pool.rb
- src/oca/ruby/opennebula/acl_pool.rb
- src/oca/ruby/opennebula/virtual_machine_pool.rb
- src/oca/ruby/opennebula/pool.rb
- src/oca/ruby/opennebula/host_pool.rb
- src/oca/ruby/opennebula/security_group.rb
- src/oca/ruby/opennebula/cluster_pool.rb
- src/oca/ruby/opennebula/document.rb
- src/oca/ruby/opennebula/zone.rb
- src/oca/ruby/opennebula/virtual_router_pool.rb
- src/oca/ruby/opennebula/user_pool.rb
- src/oca/ruby/opennebula/xml_utils.rb
- src/oca/ruby/opennebula/virtual_router.rb
- src/oca/ruby/opennebula/document_json.rb
- src/oca/ruby/opennebula/marketplace.rb
- src/oca/ruby/opennebula/virtual_machine.rb
- src/oca/ruby/opennebula/xml_element.rb
- src/oca/ruby/opennebula/template.rb
- src/oca/ruby/opennebula/group.rb
- src/oca/ruby/opennebula/virtual_network.rb
- src/oca/ruby/opennebula/security_group_pool.rb
- src/oca/ruby/opennebula/pool_element.rb
- src/oca/ruby/opennebula/document_pool.rb
- src/oca/ruby/opennebula/vm_group_pool.rb
- src/oca/ruby/opennebula/vntemplate_pool.rb
- src/oca/ruby/opennebula/vdc_pool.rb
- src/oca/ruby/opennebula/datastore_pool.rb
- src/oca/ruby/opennebula/cluster.rb
- src/oca/ruby/opennebula/utils.rb
- src/oca/ruby/opennebula/acl.rb
- src/oca/ruby/opennebula/vntemplate.rb
- src/oca/ruby/opennebula/oneflow_client.rb
- src/oca/ruby/opennebula/host.rb
- src/oca/ruby/opennebula/vm_group.rb
- src/oca/ruby/opennebula/xml_pool.rb
- src/oca/ruby/opennebula/client.rb
- src/oca/ruby/opennebula/document_pool_json.rb
- src/oca/ruby/opennebula/zone_pool.rb
- src/oca/ruby/opennebula/error.rb
- src/oca/ruby/opennebula/image_pool.rb
- src/oca/ruby/opennebula/virtual_network_pool.rb
- src/oca/ruby/opennebula/system.rb
- src/oca/ruby/opennebula/marketplaceapp.rb
- src/oca/ruby/opennebula/marketplace_pool.rb
- src/oca/ruby/opennebula/vdc.rb
- src/oca/ruby/opennebula/user.rb
- src/oca/ruby/deprecated/OpenNebula.rb
- src/vmm_mad/dummy/one_vmm_dummy.rb
- src/vmm_mad/remotes/vcenter/vcenter_driver.rb
- src/vmm_mad/remotes/one/opennebula_driver.rb
- src/vmm_mad/remotes/lib/vcenter_driver/datastore.rb
- src/vmm_mad/remotes/lib/vcenter_driver/vcenter_importer.rb
- src/vmm_mad/remotes/lib/vcenter_driver/vi_helper.rb
- src/vmm_mad/remotes/lib/vcenter_driver/network.rb
- src/vmm_mad/remotes/lib/vcenter_driver/virtual_machine.rb
- src/vmm_mad/remotes/lib/vcenter_driver/vm_template.rb
- src/vmm_mad/remotes/lib/vcenter_driver/file_helper.rb
- src/vmm_mad/remotes/lib/vcenter_driver/vi_client.rb
- src/vmm_mad/remotes/lib/vcenter_driver/host.rb
- src/vmm_mad/remotes/lib/vcenter_driver/datacenter.rb
- src/vmm_mad/remotes/lib/vcenter_driver/memoize.rb
- src/vmm_mad/remotes/lib/lxd/mapper/qcow2.rb
- src/vmm_mad/remotes/lib/lxd/mapper/mapper.rb
- src/vmm_mad/remotes/lib/lxd/mapper/rbd.rb
- src/vmm_mad/remotes/lib/lxd/mapper/raw.rb
- src/vmm_mad/remotes/lib/lxd/container.rb
- src/vmm_mad/remotes/lib/lxd/client.rb
- src/vmm_mad/remotes/lib/lxd/opennebula_vm.rb
- src/vmm_mad/remotes/lib/lxd/command.rb
- src/vmm_mad/remotes/lib/poll_common.rb
- src/vmm_mad/remotes/az/az_driver.rb
- src/vmm_mad/remotes/packet/packet_driver.rb
- src/vmm_mad/remotes/ec2/ec2_driver.rb
- src/vmm_mad/exec/one_vmm_exec.rb
- src/mad/ruby/ssh_stream.rb
- src/mad/ruby/test/MonkeyPatcher.rb
- src/mad/ruby/test/OpenNebulaDriver_spec.rb
- src/mad/ruby/VirtualMachineDriver.rb
- src/mad/ruby/ActionManager.rb
- src/mad/ruby/DriverExecHelper.rb
- src/mad/ruby/CommandManager.rb
- src/mad/ruby/test_mad.rb
- src/mad/ruby/OpenNebulaDriver.rb
- src/mad/ruby/scripts_common.rb
- src/ipamm_mad/one_ipam.rb
- src/cloud/common/CloudClient.rb
- src/cloud/common/CloudServer.rb
- src/cloud/common/CloudAuth.rb
- src/cloud/common/CloudAuth/RemoteCloudAuth.rb
- src/cloud/common/CloudAuth/X509CloudAuth.rb
- src/cloud/common/CloudAuth/EC2CloudAuth.rb
- src/cloud/common/CloudAuth/SunstoneCloudAuth.rb
- src/cloud/common/CloudAuth/OneGateCloudAuth.rb
- src/cloud/common/CloudAuth/OpenNebulaCloudAuth.rb
- src/cloud/ec2/lib/ebs.rb
- src/cloud/ec2/lib/elastic_ip.rb
- src/cloud/ec2/lib/ImageEC2.rb
- src/cloud/ec2/lib/net_ssh_replacement.rb
- src/cloud/ec2/lib/keypair.rb
- src/cloud/ec2/lib/EC2QueryServer.rb
- src/cloud/ec2/lib/econe-server.rb
- src/cloud/ec2/lib/EC2QueryClient.rb
- src/cloud/ec2/lib/tags.rb
- src/cloud/ec2/lib/instance.rb
- src/cloud/ec2/lib/econe_application.rb
- src/onedb/database_schema.rb
- src/onedb/fsck/image.rb
- src/onedb/fsck/datastore.rb
- src/onedb/fsck/quotas.rb
- src/onedb/fsck/history.rb
- src/onedb/fsck/vrouter.rb
- src/onedb/fsck/pool_control.rb
- src/onedb/fsck/marketplace.rb
- src/onedb/fsck/network.rb
- src/onedb/fsck/template.rb
- src/onedb/fsck/group.rb
- src/onedb/fsck/cluster.rb
- src/onedb/fsck/cluster_vnc_bitmap.rb
- src/onedb/fsck/host.rb
- src/onedb/fsck/vm.rb
- src/onedb/fsck/marketplaceapp.rb
- src/onedb/fsck/user.rb
- src/onedb/fsck.rb
- src/onedb/patches/ip4_ip6_static.rb
- src/onedb/patches/history_times.rb
- src/onedb/patches/marketapps_clean.rb
- src/onedb/patches/4.14_monitoring.rb
- src/onedb/patches/next_snapshot.rb
- src/onedb/vcenter_one54_pre.rb
- src/onedb/vcenter_one54.rb
- src/onedb/onedb.rb
- src/onedb/shared/4.2.0_to_4.3.80.rb
- src/onedb/shared/5.3.80_to_5.4.0.rb
- src/onedb/shared/3.0.0_to_3.1.0.rb
- src/onedb/shared/3.2.0_to_3.2.1.rb
- src/onedb/shared/3.6.0_to_3.7.80.rb
- src/onedb/shared/3.5.80_to_3.6.0.rb
- src/onedb/shared/5.6.0_to_5.7.80.rb
- src/onedb/shared/4.4.1_to_4.5.80.rb
- src/onedb/shared/3.1.0_to_3.1.80.rb
- src/onedb/shared/3.8.0_to_3.8.1.rb
- src/onedb/shared/2.0_to_2.9.80.rb
- src/onedb/shared/3.8.5_to_3.9.80.rb
- src/onedb/shared/4.5.80_to_4.6.0.rb
- src/onedb/shared/3.8.2_to_3.8.3.rb
- src/onedb/shared/4.0.1_to_4.1.80.rb
- src/onedb/shared/4.11.80_to_4.90.0.rb
- src/onedb/shared/3.9.80_to_3.9.90.rb
- src/onedb/shared/4.90.0_to_5.2.0.rb
- src/onedb/shared/4.6.0_to_4.11.80.rb
- src/onedb/shared/3.3.0_to_3.3.80.rb
- src/onedb/shared/3.4.1_to_3.5.80.rb
- src/onedb/shared/5.4.1_to_5.5.80.rb
- src/onedb/shared/4.3.85_to_4.3.90.rb
- src/onedb/shared/4.4.0_to_4.4.1.rb
- src/onedb/shared/3.8.1_to_3.8.2.rb
- src/onedb/shared/2.9.90_to_3.0.0.rb
- src/onedb/shared/3.8.3_to_3.8.4.rb
- src/onedb/shared/2.9.80_to_2.9.85.rb
- src/onedb/shared/5.2.0_to_5.3.80.rb
- src/onedb/shared/3.4.0_to_3.4.1.rb
- src/onedb/shared/5.4.0_to_5.4.1.rb
- src/onedb/shared/4.3.80_to_4.3.85.rb
- src/onedb/shared/3.2.1_to_3.3.0.rb
- src/onedb/shared/2.9.85_to_2.9.90.rb
- src/onedb/shared/3.8.4_to_3.8.5.rb
- src/onedb/shared/3.7.80_to_3.8.0.rb
- src/onedb/shared/3.1.80_to_3.2.0.rb
- src/onedb/shared/5.5.80_to_5.6.0.rb
- src/onedb/shared/3.3.80_to_3.4.0.rb
- src/onedb/shared/4.3.90_to_4.4.0.rb
- src/onedb/shared/3.9.90_to_4.0.0.rb
- src/onedb/shared/4.0.0_to_4.0.1.rb
- src/onedb/shared/4.1.80_to_4.2.0.rb
- src/onedb/local/5.3.80_to_5.4.0.rb
- src/onedb/local/4.13.80_to_4.13.85.rb
- src/onedb/local/5.6.0_to_5.7.80.rb
- src/onedb/local/5.4.1_to_5.5.80.rb
- src/onedb/local/4.13.85_to_4.90.0.rb
- src/onedb/local/5.4.0_to_5.4.1.rb
- src/onedb/local/4.11.80_to_4.13.80.rb
- src/onedb/local/4.10.3_to_4.11.80.rb
- src/onedb/local/4.90.0_to_5.3.80.rb
- src/onedb/local/4.5.80_to_4.7.80.rb
- src/onedb/local/4.9.80_to_4.10.3.rb
- src/onedb/local/5.5.80_to_5.6.0.rb
- src/onedb/local/4.7.80_to_4.9.80.rb
- src/onedb/onedb_live.rb
- src/onedb/onedb_backend.rb
- src/onedb/sqlite2mysql.rb
- src/market_mad/remotes/s3/S3.rb
- src/market_mad/one_market.rb
- src/tm_mad/one_tm.rb
- src/flow/lib/models/service_template_pool.rb
- src/flow/lib/models/service_pool.rb
- src/flow/lib/models/service_template.rb
- src/flow/lib/models/role.rb
- src/flow/lib/models/service.rb
- src/flow/lib/strategy.rb
- src/flow/lib/grammar.rb
- src/flow/lib/LifeCycleManager.rb
- src/flow/lib/log.rb
- src/flow/lib/models.rb
- src/flow/lib/validator.rb
- src/flow/lib/strategy/straight.rb
- src/flow/oneflow-server.rb
########
# LAYOUT
@ -169,6 +771,12 @@ Metrics/BlockLength:
Metrics/LineLength:
Max: 80
Metrics/ModuleLength:
Enabled: False
Metrics/ClassLength:
Enabled: False
# Parameter list config:
Metrics/ParameterLists:
Max: 5

View File

@ -13,7 +13,7 @@ require 'pp'
module CloudClient
# OpenNebula version
VERSION = '5.7.85'
VERSION = '5.7.90'
# #########################################################################
# Default location for the authentication file

View File

@ -19,7 +19,7 @@
require 'fileutils'
require 'tmpdir'
VERSION = "5.7.85"
VERSION = "5.7.90"
def version
v = VERSION

View File

@ -1,18 +0,0 @@
AllCops:
Include:
- '**/Rakefile'
- lib/**/*
Documentation:
Enabled: false
LineLength:
Enabled: false
Style/PredicateName:
NamePrefix:
- is_
- have_
Style/MethodMissing:
Enabled: false

5
share/websockify/run Executable file
View File

@ -0,0 +1,5 @@
#!/usr/bin/env python
import websockify
websockify.websocketproxy.websockify_init()

View File

@ -1 +0,0 @@
websocketproxy.py

View File

@ -0,0 +1,2 @@
from websockify.websocket import *
from websockify.websocketproxy import *

View File

@ -0,0 +1,83 @@
class BasePlugin(object):
def __init__(self, src=None):
self.source = src
def authenticate(self, headers, target_host, target_port):
pass
class AuthenticationError(Exception):
def __init__(self, log_msg=None, response_code=403, response_headers={}, response_msg=None):
self.code = response_code
self.headers = response_headers
self.msg = response_msg
if log_msg is None:
log_msg = response_msg
super(AuthenticationError, self).__init__('%s %s' % (self.code, log_msg))
class InvalidOriginError(AuthenticationError):
def __init__(self, expected, actual):
self.expected_origin = expected
self.actual_origin = actual
super(InvalidOriginError, self).__init__(
response_msg='Invalid Origin',
log_msg="Invalid Origin Header: Expected one of "
"%s, got '%s'" % (expected, actual))
class BasicHTTPAuth(object):
"""Verifies Basic Auth headers. Specify src as username:password"""
def __init__(self, src=None):
self.src = src
def authenticate(self, headers, target_host, target_port):
import base64
auth_header = headers.get('Authorization')
if auth_header:
if not auth_header.startswith('Basic '):
raise AuthenticationError(response_code=403)
try:
user_pass_raw = base64.b64decode(auth_header[6:])
except TypeError:
raise AuthenticationError(response_code=403)
try:
# http://stackoverflow.com/questions/7242316/what-encoding-should-i-use-for-http-basic-authentication
user_pass_as_text = user_pass_raw.decode('ISO-8859-1')
except UnicodeDecodeError:
raise AuthenticationError(response_code=403)
user_pass = user_pass_as_text.split(':', 1)
if len(user_pass) != 2:
raise AuthenticationError(response_code=403)
if not self.validate_creds(*user_pass):
raise AuthenticationError(response_code=403)
else:
raise AuthenticationError(response_code=401,
response_headers={'WWW-Authenticate': 'Basic realm="Websockify"'})
def validate_creds(self, username, password):
if '%s:%s' % (username, password) == self.src:
return True
else:
return False
class ExpectOrigin(object):
def __init__(self, src=None):
if src is None:
self.source = []
else:
self.source = src.split()
def authenticate(self, headers, target_host, target_port):
origin = headers.get('Origin', None)
if origin is None or origin not in self.source:
raise InvalidOriginError(expected=self.source, actual=origin)

View File

@ -0,0 +1,83 @@
import os
class BasePlugin(object):
def __init__(self, src):
self.source = src
def lookup(self, token):
return None
class ReadOnlyTokenFile(BasePlugin):
# source is a token file with lines like
# token: host:port
# or a directory of such files
def __init__(self, *args, **kwargs):
super(ReadOnlyTokenFile, self).__init__(*args, **kwargs)
self._targets = None
def _load_targets(self):
if os.path.isdir(self.source):
cfg_files = [os.path.join(self.source, f) for
f in os.listdir(self.source)]
else:
cfg_files = [self.source]
self._targets = {}
for f in cfg_files:
for line in [l.strip() for l in open(f).readlines()]:
if line and not line.startswith('#'):
tok, target = line.split(': ')
self._targets[tok] = target.strip().rsplit(':', 1)
def lookup(self, token):
if self._targets is None:
self._load_targets()
if token in self._targets:
return self._targets[token]
else:
return None
# the above one is probably more efficient, but this one is
# more backwards compatible (although in most cases
# ReadOnlyTokenFile should suffice)
class TokenFile(ReadOnlyTokenFile):
# source is a token file with lines like
# token: host:port
# or a directory of such files
def lookup(self, token):
self._load_targets()
return super(TokenFile, self).lookup(token)
class BaseTokenAPI(BasePlugin):
# source is a url with a '%s' in it where the token
# should go
# we import things on demand so that other plugins
# in this file can be used w/o unecessary dependencies
def process_result(self, resp):
return resp.text.split(':')
def lookup(self, token):
import requests
resp = requests.get(self.source % token)
if resp.ok:
return self.process_result(resp)
else:
return None
class JSONTokenApi(BaseTokenAPI):
# source is a url with a '%s' in it where the token
# should go
def process_result(self, resp):
resp_json = resp.json()
return (resp_json['host'], resp_json['port'])

View File

@ -104,6 +104,8 @@ class WebSocketRequestHandler(SimpleHTTPRequestHandler):
self.handler_id = getattr(server, "handler_id", False)
self.file_only = getattr(server, "file_only", False)
self.traffic = getattr(server, "traffic", False)
self.auto_pong = getattr(server, "auto_pong", False)
self.strict_mode = getattr(server, "strict_mode", True)
self.logger = getattr(server, "logger", None)
if self.logger is None:
@ -111,6 +113,9 @@ class WebSocketRequestHandler(SimpleHTTPRequestHandler):
SimpleHTTPRequestHandler.__init__(self, req, addr, server)
def log_message(self, format, *args):
self.logger.info("%s - - [%s] %s" % (self.address_string(), self.log_date_time_string(), format % args))
@staticmethod
def unmask(buf, hlen, plen):
pstart = hlen + 4
@ -118,20 +123,24 @@ class WebSocketRequestHandler(SimpleHTTPRequestHandler):
if numpy:
b = c = s2b('')
if plen >= 4:
mask = numpy.frombuffer(buf, dtype=numpy.dtype('<u4'),
offset=hlen, count=1)
data = numpy.frombuffer(buf, dtype=numpy.dtype('<u4'),
offset=pstart, count=int(plen / 4))
dtype=numpy.dtype('<u4')
if sys.byteorder == 'big':
dtype = dtype.newbyteorder('>')
mask = numpy.frombuffer(buf, dtype, offset=hlen, count=1)
data = numpy.frombuffer(buf, dtype, offset=pstart,
count=int(plen / 4))
#b = numpy.bitwise_xor(data, mask).data
b = numpy.bitwise_xor(data, mask).tostring()
if plen % 4:
#self.msg("Partial unmask")
mask = numpy.frombuffer(buf, dtype=numpy.dtype('B'),
offset=hlen, count=(plen % 4))
data = numpy.frombuffer(buf, dtype=numpy.dtype('B'),
offset=pend - (plen % 4),
dtype=numpy.dtype('B')
if sys.byteorder == 'big':
dtype = dtype.newbyteorder('>')
mask = numpy.frombuffer(buf, dtype, offset=hlen,
count=(plen % 4))
data = numpy.frombuffer(buf, dtype,
offset=pend - (plen % 4), count=(plen % 4))
c = numpy.bitwise_xor(data, mask).tostring()
return b + c
else:
@ -172,7 +181,7 @@ class WebSocketRequestHandler(SimpleHTTPRequestHandler):
return header + buf, len(header), 0
@staticmethod
def decode_hybi(buf, base64=False, logger=None):
def decode_hybi(buf, base64=False, logger=None, strict=True):
""" Decode HyBi style WebSocket packets.
Returns:
{'fin' : 0_or_1,
@ -238,6 +247,10 @@ class WebSocketRequestHandler(SimpleHTTPRequestHandler):
f['length'])
else:
logger.debug("Unmasked frame: %s" % repr(buf))
if strict:
raise WebSocketRequestHandler.CClose(1002, "The client sent an unmasked frame.")
f['payload'] = buf[(f['hlen'] + f['masked'] * 4):full_len]
if base64 and f['opcode'] in [1, 2]:
@ -346,7 +359,8 @@ class WebSocketRequestHandler(SimpleHTTPRequestHandler):
while buf:
frame = self.decode_hybi(buf, base64=self.base64,
logger=self.logger)
logger=self.logger,
strict=self.strict_mode)
#self.msg("Received buf: %s, frame: %s", repr(buf), frame)
if frame['payload'] == None:
@ -360,6 +374,15 @@ class WebSocketRequestHandler(SimpleHTTPRequestHandler):
closed = {'code': frame['close_code'],
'reason': frame['close_reason']}
break
elif self.auto_pong and frame['opcode'] == 0x9: # ping
self.print_traffic("} ping %s\n" %
repr(frame['payload']))
self.send_pong(frame['payload'])
return [], False
elif frame['opcode'] == 0xA: # pong
self.print_traffic("} pong %s\n" %
repr(frame['payload']))
return [], False
self.print_traffic("}")
@ -388,10 +411,20 @@ class WebSocketRequestHandler(SimpleHTTPRequestHandler):
def send_close(self, code=1000, reason=''):
""" Send a WebSocket orderly close frame. """
msg = pack(">H%ds" % len(reason), code, reason)
msg = pack(">H%ds" % len(reason), code, s2b(reason))
buf, h, t = self.encode_hybi(msg, opcode=0x08, base64=False)
self.request.send(buf)
def send_pong(self, data=''):
""" Send a WebSocket pong frame. """
buf, h, t = self.encode_hybi(s2b(data), opcode=0x0A, base64=False)
self.request.send(buf)
def send_ping(self, data=''):
""" Send a WebSocket ping frame. """
buf, h, t = self.encode_hybi(s2b(data), opcode=0x09, base64=False)
self.request.send(buf)
def do_websocket_handshake(self):
h = self.headers
@ -444,9 +477,13 @@ class WebSocketRequestHandler(SimpleHTTPRequestHandler):
"""Upgrade a connection to Websocket, if requested. If this succeeds,
new_websocket_client() will be called. Otherwise, False is returned.
"""
if (self.headers.get('upgrade') and
self.headers.get('upgrade').lower() == 'websocket'):
# ensure connection is authorized, and determine the target
self.validate_connection()
if not self.do_websocket_handshake():
return False
@ -519,6 +556,10 @@ class WebSocketRequestHandler(SimpleHTTPRequestHandler):
""" Do something with a WebSockets client connection. """
raise Exception("WebSocketRequestHandler.new_websocket_client() must be overloaded")
def validate_connection(self):
""" Ensure that the connection is a valid connection, and set the target. """
pass
def do_HEAD(self):
if self.only_upgrade:
self.send_error(405, "Method Not Allowed")
@ -567,7 +608,7 @@ class WebSocketServer(object):
file_only=False,
run_once=False, timeout=0, idle_timeout=0, traffic=False,
tcp_keepalive=True, tcp_keepcnt=None, tcp_keepidle=None,
tcp_keepintvl=None):
tcp_keepintvl=None, auto_pong=False, strict_mode=True):
# settings
self.RequestHandlerClass = RequestHandlerClass
@ -581,6 +622,8 @@ class WebSocketServer(object):
self.timeout = timeout
self.idle_timeout = idle_timeout
self.traffic = traffic
self.file_only = file_only
self.strict_mode = strict_mode
self.launch_time = time.time()
self.ws_connection = False
@ -592,6 +635,7 @@ class WebSocketServer(object):
self.tcp_keepidle = tcp_keepidle
self.tcp_keepintvl = tcp_keepintvl
self.auto_pong = auto_pong
# Make paths settings absolute
self.cert = os.path.abspath(cert)
self.key = self.web = self.record = ''
@ -618,7 +662,10 @@ class WebSocketServer(object):
self.listen_host, self.listen_port)
self.msg(" - Flash security policy server")
if self.web:
self.msg(" - Web server. Web root: %s", self.web)
if self.file_only:
self.msg(" - Web server (no directory listings). Web root: %s", self.web)
else:
self.msg(" - Web server. Web root: %s", self.web)
if ssl:
if os.path.exists(self.cert):
self.msg(" - SSL/TLS support")
@ -701,6 +748,10 @@ class WebSocketServer(object):
@staticmethod
def daemonize(keepfd=None, chdir='/'):
if keepfd is None:
keepfd = []
os.umask(0)
if chdir:
os.chdir(chdir)
@ -723,7 +774,7 @@ class WebSocketServer(object):
if maxfd == resource.RLIM_INFINITY: maxfd = 256
for fd in reversed(range(maxfd)):
try:
if fd != keepfd:
if fd not in keepfd:
os.close(fd)
except OSError:
_, exc, _ = sys.exc_info()
@ -753,7 +804,7 @@ class WebSocketServer(object):
"""
ready = select.select([sock], [], [], 3)[0]
if not ready:
raise self.EClose("ignoring socket not ready")
# Peek, but do not read the data so that we have a opportunity
@ -761,7 +812,7 @@ class WebSocketServer(object):
handshake = sock.recv(1024, socket.MSG_PEEK)
#self.msg("Handshake [%s]" % handshake)
if handshake == "":
if not handshake:
raise self.EClose("ignoring empty handshake")
elif handshake.startswith(s2b("<policy-file-request/>")):
@ -844,11 +895,14 @@ class WebSocketServer(object):
raise self.Terminate()
def multiprocessing_SIGCHLD(self, sig, stack):
self.vmsg('Reaping zombies, active child count is %s', len(multiprocessing.active_children()))
# TODO: figure out a way to actually log this information without
# calling `log` in the signal handlers
multiprocessing.active_children()
def fallback_SIGCHLD(self, sig, stack):
# Reap zombies when using os.fork() (python 2.4)
self.vmsg("Got SIGCHLD, reaping zombies")
# TODO: figure out a way to actually log this information without
# calling `log` in the signal handlers
try:
result = os.waitpid(-1, os.WNOHANG)
while result[0]:
@ -858,16 +912,18 @@ class WebSocketServer(object):
pass
def do_SIGINT(self, sig, stack):
self.msg("Got SIGINT, exiting")
# TODO: figure out a way to actually log this information without
# calling `log` in the signal handlers
self.terminate()
def do_SIGTERM(self, sig, stack):
self.msg("Got SIGTERM, exiting")
# TODO: figure out a way to actually log this information without
# calling `log` in the signal handlers
self.terminate()
def top_new_client(self, startsock, address):
""" Do something with a WebSockets client connection. """
# handler process
# handler process
client = None
try:
try:
@ -890,6 +946,18 @@ class WebSocketServer(object):
# Original socket closed by caller
client.close()
def get_log_fd(self):
"""
Get file descriptors for the loggers.
They should not be closed when the process is forked.
"""
descriptors = []
for handler in self.logger.parent.handlers:
if isinstance(handler, logging.FileHandler):
descriptors.append(handler.stream.fileno())
return descriptors
def start_server(self):
"""
Daemonize if requested. Listen for for connections. Run
@ -905,7 +973,9 @@ class WebSocketServer(object):
tcp_keepintvl=self.tcp_keepintvl)
if self.daemon:
self.daemonize(keepfd=lsock.fileno(), chdir=self.web)
keepfd = self.get_log_fd()
keepfd.append(lsock.fileno())
self.daemonize(keepfd=keepfd, chdir=self.web)
self.started() # Some things need to happen after daemonizing
@ -1009,8 +1079,17 @@ class WebSocketServer(object):
except (self.Terminate, SystemExit, KeyboardInterrupt):
self.msg("In exit")
# terminate all child processes
if multiprocessing and not self.run_once:
children = multiprocessing.active_children()
for child in children:
self.msg("Terminating child %s" % child.pid)
child.terminate()
break
except Exception:
exc = sys.exc_info()[1]
self.msg("handler exception: %s", str(exc))
self.vmsg("exception", exc_info=True)

View File

@ -11,13 +11,14 @@ as taken from http://docs.python.org/dev/library/ssl.html#certificates
'''
import signal, socket, optparse, time, os, sys, subprocess, logging
import signal, socket, optparse, time, os, sys, subprocess, logging, errno
try: from socketserver import ForkingMixIn
except: from SocketServer import ForkingMixIn
try: from http.server import HTTPServer
except: from BaseHTTPServer import HTTPServer
from select import select
import websocket
import select
from websockify import websocket
from websockify import auth_plugins as auth
try:
from urllib.parse import parse_qs, urlparse
except:
@ -38,14 +39,33 @@ Traffic Legend:
<. - Client send partial
"""
def send_auth_error(self, ex):
self.send_response(ex.code, ex.msg)
self.send_header('Content-Type', 'text/html')
for name, val in ex.headers.items():
self.send_header(name, val)
self.end_headers()
def validate_connection(self):
if self.server.token_plugin:
(self.server.target_host, self.server.target_port) = self.get_target(self.server.token_plugin, self.path)
if self.server.auth_plugin:
try:
self.server.auth_plugin.authenticate(
headers=self.headers, target_host=self.server.target_host,
target_port=self.server.target_port)
except auth.AuthenticationError:
ex = sys.exc_info()[1]
self.send_auth_error(ex)
raise
def new_websocket_client(self):
"""
Called after a new WebSocket connection has been established.
"""
# Checks if we receive a token, and look
# for a valid target for it then
if self.server.target_cfg:
(self.server.target_host, self.server.target_port) = self.get_target(self.server.target_cfg, self.path)
# Checking for a token is done in validate_connection()
# Connect to the target
if self.server.wrap_cmd:
@ -73,15 +93,15 @@ Traffic Legend:
if tsock:
tsock.shutdown(socket.SHUT_RDWR)
tsock.close()
if self.verbose:
if self.verbose:
self.log_message("%s:%s: Closed target",
self.server.target_host, self.server.target_port)
raise
def get_target(self, target_cfg, path):
def get_target(self, target_plugin, path):
"""
Parses the path, extracts a token, and looks for a valid
target for that token in the configuration file(s). Sets
Parses the path, extracts a token, and looks up a target
for that token using the token plugin. Sets
target_host and target_port if successful
"""
# The files in targets contain the lines
@ -90,32 +110,17 @@ Traffic Legend:
# Extract the token parameter from url
args = parse_qs(urlparse(path)[4]) # 4 is the query from url
if not args.has_key('token') or not len(args['token']):
raise self.EClose("Token not present")
if not 'token' in args or not len(args['token']):
raise self.server.EClose("Token not present")
token = args['token'][0].rstrip('\n')
# target_cfg can be a single config file or directory of
# config files
if os.path.isdir(target_cfg):
cfg_files = [os.path.join(target_cfg, f)
for f in os.listdir(target_cfg)]
result_pair = target_plugin.lookup(token)
if result_pair is not None:
return result_pair
else:
cfg_files = [target_cfg]
targets = {}
for f in cfg_files:
for line in [l.strip() for l in file(f).readlines()]:
if line and not line.startswith('#'):
ttoken, target = line.split(': ')
targets[ttoken] = target.strip()
self.vmsg("Target config: %s" % repr(targets))
if targets.has_key(token):
return targets[token].split(':')
else:
raise self.EClose("Token '%s' not found" % token)
raise self.server.EClose("Token '%s' not found" % token)
def do_proxy(self, target):
"""
@ -126,12 +131,37 @@ Traffic Legend:
tqueue = []
rlist = [self.request, target]
if self.server.heartbeat:
now = time.time()
self.heartbeat = now + self.server.heartbeat
else:
self.heartbeat = None
while True:
wlist = []
if self.heartbeat is not None:
now = time.time()
if now > self.heartbeat:
self.heartbeat = now + self.server.heartbeat
self.send_ping()
if tqueue: wlist.append(target)
if cqueue or c_pend: wlist.append(self.request)
ins, outs, excepts = select(rlist, wlist, [], 1)
try:
ins, outs, excepts = select.select(rlist, wlist, [], 1)
except (select.error, OSError):
exc = sys.exc_info()[1]
if hasattr(exc, 'errno'):
err = exc.errno
else:
err = exc[0]
if err != errno.EINTR:
raise
else:
continue
if excepts: raise Exception("Socket exception")
if self.request in outs:
@ -147,7 +177,7 @@ Traffic Legend:
if closed:
# TODO: What about blocking on client socket?
if self.verbose:
if self.verbose:
self.log_message("%s:%s: Client closed connection",
self.server.target_host, self.server.target_port)
raise self.CClose(closed['code'], closed['reason'])
@ -195,7 +225,11 @@ class WebSocketProxy(websocket.WebSocketServer):
self.wrap_mode = kwargs.pop('wrap_mode', None)
self.unix_target = kwargs.pop('unix_target', None)
self.ssl_target = kwargs.pop('ssl_target', None)
self.target_cfg = kwargs.pop('target_cfg', None)
self.heartbeat = kwargs.pop('heartbeat', None)
self.token_plugin = kwargs.pop('token_plugin', None)
self.auth_plugin = kwargs.pop('auth_plugin', None)
# Last 3 timestamps command was run
self.wrap_times = [0, 0, 0]
@ -251,9 +285,9 @@ class WebSocketProxy(websocket.WebSocketServer):
else:
dst_string = "%s:%s" % (self.target_host, self.target_port)
if self.target_cfg:
msg = " - proxying from %s:%s to targets in %s" % (
self.listen_host, self.listen_port, self.target_cfg)
if self.token_plugin:
msg = " - proxying from %s:%s to targets generated by %s" % (
self.listen_host, self.listen_port, type(self.token_plugin).__name__)
else:
msg = " - proxying from %s:%s to %s" % (
self.listen_host, self.listen_port, dst_string)
@ -352,20 +386,69 @@ def websockify_init():
parser.add_option("--prefer-ipv6", "-6",
action="store_true", dest="source_is_ipv6",
help="prefer IPv6 when resolving source_addr")
parser.add_option("--libserver", action="store_true",
help="use Python library SocketServer engine")
parser.add_option("--target-config", metavar="FILE",
dest="target_cfg",
help="Configuration file containing valid targets "
"in the form 'token: host:port' or, alternatively, a "
"directory containing configuration files of this form")
parser.add_option("--libserver", action="store_true",
help="use Python library SocketServer engine")
"directory containing configuration files of this form "
"(DEPRECATED: use `--token-plugin TokenFile --token-source "
" path/to/token/file` instead)")
parser.add_option("--token-plugin", default=None, metavar="PLUGIN",
help="use the given Python class to process tokens "
"into host:port pairs")
parser.add_option("--token-source", default=None, metavar="ARG",
help="an argument to be passed to the token plugin"
"on instantiation")
parser.add_option("--auth-plugin", default=None, metavar="PLUGIN",
help="use the given Python class to determine if "
"a connection is allowed")
parser.add_option("--auth-source", default=None, metavar="ARG",
help="an argument to be passed to the auth plugin"
"on instantiation")
parser.add_option("--auto-pong", action="store_true",
help="Automatically respond to ping frames with a pong")
parser.add_option("--heartbeat", type=int, default=0,
help="send a ping to the client every HEARTBEAT seconds")
parser.add_option("--log-file", metavar="FILE",
dest="log_file",
help="File where logs will be saved")
(opts, args) = parser.parse_args()
if opts.log_file:
opts.log_file = os.path.abspath(opts.log_file)
handler = logging.FileHandler(opts.log_file)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter("%(message)s"))
logging.getLogger(WebSocketProxy.log_prefix).addHandler(handler)
del opts.log_file
if opts.verbose:
logging.getLogger(WebSocketProxy.log_prefix).setLevel(logging.DEBUG)
if opts.token_source and not opts.token_plugin:
parser.error("You must use --token-plugin to use --token-source")
if opts.auth_source and not opts.auth_plugin:
parser.error("You must use --auth-plugin to use --auth-source")
# Transform to absolute path as daemon may chdir
if opts.target_cfg:
opts.target_cfg = os.path.abspath(opts.target_cfg)
if opts.target_cfg:
opts.token_plugin = 'TokenFile'
opts.token_source = opts.target_cfg
del opts.target_cfg
# Sanity checks
if len(args) < 2 and not (opts.target_cfg or opts.unix_target):
if len(args) < 2 and not (opts.token_plugin or opts.unix_target):
parser.error("Too few arguments")
if sys.argv.count('--'):
opts.wrap_cmd = args[1:]
@ -390,7 +473,7 @@ def websockify_init():
try: opts.listen_port = int(opts.listen_port)
except: parser.error("Error parsing listen port")
if opts.wrap_cmd or opts.unix_target or opts.target_cfg:
if opts.wrap_cmd or opts.unix_target or opts.token_plugin:
opts.target_host = None
opts.target_port = None
else:
@ -402,9 +485,32 @@ def websockify_init():
try: opts.target_port = int(opts.target_port)
except: parser.error("Error parsing target port")
# Transform to absolute path as daemon may chdir
if opts.target_cfg:
opts.target_cfg = os.path.abspath(opts.target_cfg)
if opts.token_plugin is not None:
if '.' not in opts.token_plugin:
opts.token_plugin = (
'websockify.token_plugins.%s' % opts.token_plugin)
token_plugin_module, token_plugin_cls = opts.token_plugin.rsplit('.', 1)
__import__(token_plugin_module)
token_plugin_cls = getattr(sys.modules[token_plugin_module], token_plugin_cls)
opts.token_plugin = token_plugin_cls(opts.token_source)
del opts.token_source
if opts.auth_plugin is not None:
if '.' not in opts.auth_plugin:
opts.auth_plugin = 'websockify.auth_plugins.%s' % opts.auth_plugin
auth_plugin_module, auth_plugin_cls = opts.auth_plugin.rsplit('.', 1)
__import__(auth_plugin_module)
auth_plugin_cls = getattr(sys.modules[auth_plugin_module], auth_plugin_cls)
opts.auth_plugin = auth_plugin_cls(opts.auth_source)
del opts.auth_source
# Create and start the WebSockets proxy
libserver = opts.libserver
@ -433,9 +539,13 @@ class LibProxyServer(ForkingMixIn, HTTPServer):
self.wrap_mode = kwargs.pop('wrap_mode', None)
self.unix_target = kwargs.pop('unix_target', None)
self.ssl_target = kwargs.pop('ssl_target', None)
self.target_cfg = kwargs.pop('target_cfg', None)
self.token_plugin = kwargs.pop('token_plugin', None)
self.auth_plugin = kwargs.pop('auth_plugin', None)
self.heartbeat = kwargs.pop('heartbeat', None)
self.token_plugin = None
self.auth_plugin = None
self.daemon = False
self.target_cfg = None
# Server configuration
listen_host = kwargs.pop('listen_host', '')
@ -456,8 +566,8 @@ class LibProxyServer(ForkingMixIn, HTTPServer):
if web:
os.chdir(web)
HTTPServer.__init__(self, (listen_host, listen_port),
HTTPServer.__init__(self, (listen_host, listen_port),
RequestHandlerClass)

View File

@ -108,12 +108,15 @@ class OpenNebula::ServerCipherAuth
end
# auth method for auth_mad
def authenticate(srv_user,srv_pass, signed_text)
def authenticate(srv_user, srv_pass, signed_text)
begin
# truncate token to 32-bytes for Ruby >= 2.4
@key = srv_pass[0..31]
s_user, t_user, expires = decrypt(signed_text).split(':')
token_array = decrypt(signed_text).split(':')
s_user = token_array[0]
expires = token_array[-1]
return "User name missmatch" if s_user != srv_user

View File

@ -87,7 +87,10 @@ class OpenNebula::ServerX509Auth < OpenNebula::X509Auth
# auth method for auth_mad
def authenticate(server_user, server_pass, signed_text)
begin
s_user, t_user, expires = decrypt(signed_text).split(':')
token_array = decrypt(signed_text).split(':')
s_user = token_array[0]
expires = token_array[-1]
return "Server password missmatch" if server_pass != password

View File

@ -303,7 +303,7 @@ module CLIHelper
begin
if options[:csv]
puts CSV.generate_line(@default_columns)
puts CSV.generate_line(@default_columns) if !options[:noheader]
res_data.each {|l| puts CSV.generate_line(l) }
else
res_data.each{|l|

View File

@ -516,11 +516,13 @@ EOT
# List pool functions
#-----------------------------------------------------------------------
def start_pager
pager = ENV['ONE_PAGER'] || 'less'
pager = ENV['ONE_PAGER'] || 'more'
# Start pager, defaults to less
p_r, p_w = IO.pipe
Signal.trap('PIPE', 'SIG_IGN')
lpid = fork do
$stdin.reopen(p_r)
@ -531,7 +533,7 @@ EOT
exec([pager, pager])
end
# Send listing to pager pipe
$stdout.close
$stdout = p_w.dup
@ -547,6 +549,9 @@ EOT
begin
Process.wait(lpid)
rescue Interrupt
Process.kill("TERM", lpid)
Process.wait(lpid)
rescue Errno::ECHILD
end
end
@ -558,12 +563,11 @@ EOT
elements = 0
page = ""
pool.each {|e|
elements += 1
pool.each {|e|
elements += 1
page << e.to_xml(true) << "\n"
}
else
pname = pool.pool_name
ename = pool.element_name
@ -585,8 +589,8 @@ EOT
# output
#-----------------------------------------------------------------------
def list_pool_table(table, pool, options, filter_flag)
if $stdout.isatty and (!options.key?:no_pager)
size = $stdout.winsize[0] - 1
if $stdout.isatty and (!options.key?:no_pager)
size = $stdout.winsize[0] - 1
# ----------- First page, check if pager is needed -------------
rc = pool.get_page(size, 0)
@ -662,8 +666,8 @@ EOT
# List pool in XML format, pagination is used in interactive output
#-----------------------------------------------------------------------
def list_pool_xml(pool, options, filter_flag)
if $stdout.isatty
size = $stdout.winsize[0] - 1
if $stdout.isatty
size = $stdout.winsize[0] - 1
# ----------- First page, check if pager is needed -------------
rc = pool.get_page(size, 0)

View File

@ -18,8 +18,10 @@ require 'one_helper'
require 'one_helper/onevm_helper'
require 'rubygems'
# implements onehost command
class OneHostHelper < OpenNebulaHelper::OneHelper
TEMPLATE_XPATH = '//HOST/TEMPLATE'
TEMPLATE_XPATH = '//HOST/TEMPLATE'
HYBRID = {
:ec2 => {
:help => <<-EOT.unindent,
@ -41,7 +43,7 @@ class OneHostHelper < OpenNebulaHelper::OneHelper
# See your ec2_driver.conf for more information
#
#-----------------------------------------------------------------------
EOT
EOT
},
:az => {
:help => <<-EOT.unindent,
@ -67,110 +69,112 @@ class OneHostHelper < OpenNebulaHelper::OneHelper
# AZ_ENDPOINT = <endpoint address>
#
#-----------------------------------------------------------------------
EOT
EOT
}
}
VERSION_XPATH = "#{TEMPLATE_XPATH}/VERSION"
VERSION_XPATH = "#{TEMPLATE_XPATH}/VERSION"
def self.rname
"HOST"
'HOST'
end
def self.conf_file
"onehost.yaml"
'onehost.yaml'
end
def self.state_to_str(id)
id = id.to_i
state_str = Host::HOST_STATES[id]
return Host::SHORT_HOST_STATES[state_str]
Host::SHORT_HOST_STATES[state_str]
end
def format_pool(options)
config_file = self.class.table_conf
table = CLIHelper::ShowTable.new(config_file, self) do
column :ID, "ONE identifier for Host", :size=>4 do |d|
d["ID"]
column :ID, 'ONE identifier for Host', :size => 4 do |d|
d['ID']
end
column :NAME, "Name of the Host", :left, :size=>15 do |d|
d["NAME"]
column :NAME, 'Name of the Host', :left, :size => 15 do |d|
d['NAME']
end
column :CLUSTER, "Name of the Cluster", :left, :size=>9 do |d|
OpenNebulaHelper.cluster_str(d["CLUSTER"])
column :CLUSTER, 'Name of the Cluster', :left, :size => 9 do |d|
OpenNebulaHelper.cluster_str(d['CLUSTER'])
end
column :TVM, "Total Virtual Machines allocated to the Host", :size=>3 do |d|
d["HOST_SHARE"]["RUNNING_VMS"] || 0
column :TVM, 'Total Virtual Machines allocated to the Host',
:size => 3 do |d|
d['HOST_SHARE']['RUNNING_VMS'] || 0
end
column :ZVM, "Number of Virtual Machine zombies", :size=>3 do |d|
d["TEMPLATE"]["TOTAL_ZOMBIES"] || 0
column :ZVM, 'Number of Virtual Machine zombies', :size => 3 do |d|
d['TEMPLATE']['TOTAL_ZOMBIES'] || 0
end
column :TCPU, "Total CPU percentage", :size=>4 do |d|
d["HOST_SHARE"]["MAX_CPU"] || 0
column :TCPU, 'Total CPU percentage', :size => 4 do |d|
d['HOST_SHARE']['MAX_CPU'] || 0
end
column :FCPU, "Free CPU percentage", :size=>4 do |d|
d["HOST_SHARE"]["MAX_CPU"].to_i-
d["HOST_SHARE"]["USED_CPU"].to_i rescue "-"
column :FCPU, 'Free CPU percentage', :size => 4 do |d|
d['HOST_SHARE']['MAX_CPU'].to_i -
d['HOST_SHARE']['USED_CPU'].to_i rescue '-'
end
column :ACPU, "Available cpu percentage (not reserved by VMs)",
:size=>4 do |d|
max_cpu=d["HOST_SHARE"]["MAX_CPU"].to_i
max_cpu=100 if max_cpu==0
max_cpu-d["HOST_SHARE"]["CPU_USAGE"].to_i
column :ACPU, 'Available cpu percentage (not reserved by VMs)',
:size => 4 do |d|
max_cpu = d['HOST_SHARE']['MAX_CPU'].to_i
max_cpu = 100 if max_cpu.zero?
max_cpu - d['HOST_SHARE']['CPU_USAGE'].to_i
end
column :TMEM, "Total Memory", :size=>7 do |d|
column :TMEM, 'Total Memory', :size => 7 do |d|
OpenNebulaHelper.unit_to_str(
d["HOST_SHARE"]["MAX_MEM"].to_i,
options) rescue "-"
d['HOST_SHARE']['MAX_MEM'].to_i,
options
) rescue '-'
end
column :FMEM, "Free Memory", :size=>7 do |d|
column :FMEM, 'Free Memory', :size => 7 do |d|
OpenNebulaHelper.unit_to_str(
d["HOST_SHARE"]["FREE_MEM"].to_i,
options) rescue "-"
d['HOST_SHARE']['FREE_MEM'].to_i,
options
) rescue '-'
end
column :AMEM, "Available Memory (not reserved by VMs)",
:size=>7 do |d|
acpu=d["HOST_SHARE"]["MAX_MEM"].to_i-
d["HOST_SHARE"]["MEM_USAGE"].to_i
OpenNebulaHelper.unit_to_str(acpu,options)
column :AMEM, 'Available Memory (not reserved by VMs)',
:size => 7 do |d|
acpu = d['HOST_SHARE']['MAX_MEM'].to_i -
d['HOST_SHARE']['MEM_USAGE'].to_i
OpenNebulaHelper.unit_to_str(acpu, options)
end
column :REAL_CPU, "Real CPU", :size=>18 do |d|
max_cpu = d["HOST_SHARE"]["MAX_CPU"].to_i
column :REAL_CPU, 'Real CPU', :size => 18 do |d|
max_cpu = d['HOST_SHARE']['MAX_CPU'].to_i
if max_cpu != 0
used_cpu = d["HOST_SHARE"]["USED_CPU"].to_i
ratio = (used_cpu*100) / max_cpu
used_cpu = d['HOST_SHARE']['USED_CPU'].to_i
ratio = (used_cpu * 100) / max_cpu
"#{used_cpu} / #{max_cpu} (#{ratio}%)"
else
'-'
end
end
column :ALLOCATED_CPU, "Allocated CPU)", :size=>18 do |d|
max_cpu = d["HOST_SHARE"]["MAX_CPU"].to_i
cpu_usage = d["HOST_SHARE"]["CPU_USAGE"].to_i
column :ALLOCATED_CPU, 'Allocated CPU)', :size => 18 do |d|
max_cpu = d['HOST_SHARE']['MAX_CPU'].to_i
cpu_usage = d['HOST_SHARE']['CPU_USAGE'].to_i
if max_cpu == 0 && cpu_usage == 0
if max_cpu.zero? && cpu_usage.zero?
'-'
else
cpu_usage = d["HOST_SHARE"]["CPU_USAGE"].to_i
cpu_usage = d['HOST_SHARE']['CPU_USAGE'].to_i
if max_cpu != 0
ratio = (cpu_usage*100) / max_cpu
ratio = (cpu_usage * 100) / max_cpu
"#{cpu_usage} / #{max_cpu} (#{ratio}%)"
else
"#{cpu_usage} / -"
@ -178,43 +182,46 @@ class OneHostHelper < OpenNebulaHelper::OneHelper
end
end
column :REAL_MEM, "Real MEM", :size=>18 do |d|
max_mem = d["HOST_SHARE"]["MAX_MEM"].to_i
column :REAL_MEM, 'Real MEM', :size => 18 do |d|
max_mem = d['HOST_SHARE']['MAX_MEM'].to_i
if max_mem != 0
used_mem = d["HOST_SHARE"]["USED_MEM"].to_i
ratio = (used_mem*100) / max_mem
"#{OpenNebulaHelper.unit_to_str(used_mem,options)} / #{OpenNebulaHelper.unit_to_str(max_mem,options)} (#{ratio}%)"
used_mem = d['HOST_SHARE']['USED_MEM'].to_i
ratio = (used_mem * 100) / max_mem
"#{OpenNebulaHelper.unit_to_str(used_mem, options)} / "\
"#{OpenNebulaHelper.unit_to_str(max_mem, options)} "\
"(#{ratio}%)"
else
'-'
end
end
column :ALLOCATED_MEM, "Allocated MEM", :size=>18 do |d|
max_mem = d["HOST_SHARE"]["MAX_MEM"].to_i
mem_usage = d["HOST_SHARE"]["MEM_USAGE"].to_i
column :ALLOCATED_MEM, 'Allocated MEM', :size => 18 do |d|
max_mem = d['HOST_SHARE']['MAX_MEM'].to_i
mem_usage = d['HOST_SHARE']['MEM_USAGE'].to_i
if max_mem == 0 && mem_usage == 0
if max_mem.zero? && mem_usage.zero?
'-'
elsif max_mem != 0
ratio = (mem_usage * 100) / max_mem
"#{OpenNebulaHelper.unit_to_str(mem_usage, options)} / "\
"#{OpenNebulaHelper.unit_to_str(max_mem, options)} "\
"(#{ratio}%)"
else
if max_mem != 0
ratio = (mem_usage*100) / max_mem
"#{OpenNebulaHelper.unit_to_str(mem_usage,options)} / #{OpenNebulaHelper.unit_to_str(max_mem,options)} (#{ratio}%)"
else
"#{OpenNebulaHelper.unit_to_str(mem_usage,options)} / -"
end
"#{OpenNebulaHelper.unit_to_str(mem_usage, options)} / -"
end
end
column :PROVIDER, "Host provider", :left, :size=>6 do |d|
column :PROVIDER, "Host provider", :left, :size => 6 do |d|
d['TEMPLATE']['PM_MAD'].nil? ? '-' : d['TEMPLATE']['PM_MAD']
end
column :STAT, "Host status", :left, :size=>6 do |d|
column :STAT, "Host status", :left, :size => 6 do |d|
OneHostHelper.state_to_str(d["STATE"])
end
default :ID, :NAME, :CLUSTER, :TVM, :ALLOCATED_CPU, :ALLOCATED_MEM, :STAT
default :ID, :NAME, :CLUSTER, :TVM,
:ALLOCATED_CPU, :ALLOCATED_MEM, :STAT
end
table
@ -222,34 +229,37 @@ class OneHostHelper < OpenNebulaHelper::OneHelper
def set_hybrid(type, path)
k = type.to_sym
if HYBRID.key?(k)
str = path.nil? ? OpenNebulaHelper.editor_input(HYBRID[k][:help]): File.read(path)
end
return unless HYBRID.key?(k)
return OpenNebulaHelper.editor_input(HYBRID[k][:help]) if path.nil?
File.read(path)
end
NUM_THREADS = 15
def sync(host_ids, options)
if `id -u`.to_i == 0 || `id -G`.split.collect{|e| e.to_i}.include?(0)
if Process.uid.zero? || Process.gid.zero?
STDERR.puts("Cannot run 'onehost sync' as root")
exit -1
exit(-1)
end
begin
current_version = File.read(REMOTES_LOCATION+'/VERSION').strip
rescue
current_version = File.read(REMOTES_LOCATION + '/VERSION').strip
rescue StandardError
STDERR.puts("Could not read #{REMOTES_LOCATION}/VERSION")
exit(-1)
end
if current_version.empty?
STDERR.puts "Remotes version can not be empty"
STDERR.puts 'Remotes version can not be empty'
exit(-1)
end
begin
current_version = Gem::Version.new(current_version)
rescue
STDERR.puts "VERSION file is malformed, use semantic versioning."
rescue StandardError
STDERR.puts 'VERSION file is malformed, use semantic versioning.'
end
cluster_id = options[:cluster]
@ -263,13 +273,13 @@ class OneHostHelper < OpenNebulaHelper::OneHelper
# Verify the existence of REMOTES_LOCATION
if !File.directory? REMOTES_LOCATION
error_msg = "'#{REMOTES_LOCATION}' does not exist. " <<
"This command must be run in the frontend."
return -1,error_msg
error_msg = "'#{REMOTES_LOCATION}' does not exist. " \
'This command must be run in the frontend.'
return -1, error_msg
end
# Touch the update file
FileUtils.touch(File.join(REMOTES_LOCATION,'.update'))
FileUtils.touch(File.join(REMOTES_LOCATION, '.update'))
# Get the Host pool
filter_flag ||= OpenNebula::Pool::INFO_ALL
@ -280,18 +290,16 @@ class OneHostHelper < OpenNebulaHelper::OneHelper
return -1, rc.message if OpenNebula.is_error?(rc)
# Assign hosts to threads
i = 0
queue = Array.new
queue = []
pool.each do |host|
if host_ids
next if !host_ids.include?(host['ID'].to_i)
next unless host_ids.include?(host['ID'].to_i)
elsif cluster_id
next if host['CLUSTER_ID'].to_i != cluster_id
end
vm_mad = host['VM_MAD'].downcase
remote_remotes = host['TEMPLATE/REMOTE_REMOTES']
state = host['STATE']
# Skip this host from remote syncing if it's a PUBLIC_CLOUD host
@ -301,20 +309,22 @@ class OneHostHelper < OpenNebulaHelper::OneHelper
next if Host::HOST_STATES[state.to_i] == 'OFFLINE'
# Skip this host if it is a vCenter cluster
next if vm_mad == "vcenter"
next if vm_mad == 'vcenter'
host_version=host['TEMPLATE/VERSION']
host_version = host['TEMPLATE/VERSION']
begin
host_version = Gem::Version.new(host_version)
rescue
rescue StandardError
nil
end
if !options[:force]
begin
next if host_version && host_version >= current_version
rescue
STDERR.puts "Error comparing versions for host #{host['NAME']}."
rescue StandardError
STDERR.puts 'Error comparing versions '\
" for host #{host['NAME']}."
end
end
@ -324,45 +334,45 @@ class OneHostHelper < OpenNebulaHelper::OneHelper
end
# Run the jobs in threads
host_errors = Array.new
host_errors = []
queue_lock = Mutex.new
error_lock = Mutex.new
total = queue.length
if total==0
puts "No hosts are going to be updated."
if total.zero?
puts 'No hosts are going to be updated.'
exit(0)
end
ts = (1..NUM_THREADS).map do |t|
ts = (1..NUM_THREADS).map do |_t|
Thread.new do
while true do
loop do
host = nil
size = 0
queue_lock.synchronize do
host=queue.shift
size=queue.length
host = queue.shift
size = queue.length
end
break if !host
break unless host
print_update_info(total-size, total, host['NAME'])
print_update_info(total - size, total, host['NAME'])
if options[:rsync]
sync_cmd = "rsync -Laz --delete #{REMOTES_LOCATION}" <<
" #{host['NAME']}:#{remote_dir}"
sync_cmd = "rsync -Laz --delete #{REMOTES_LOCATION}" \
" #{host['NAME']}:#{remote_dir}"
else
sync_cmd = "scp -rp #{REMOTES_LOCATION}/. " <<
"#{host['NAME']}:#{remote_dir} 2> /dev/null"
sync_cmd = "scp -rp #{REMOTES_LOCATION}/. " \
"#{host['NAME']}:#{remote_dir} 2> /dev/null"
end
`#{sync_cmd} 2>/dev/null`
if !$?.success?
error_lock.synchronize {
if !$CHILD_STATUS.success?
error_lock.synchronize do
host_errors << host['NAME']
}
end
else
update_version(host, current_version)
end
@ -371,16 +381,108 @@ class OneHostHelper < OpenNebulaHelper::OneHelper
end
# Wait for threads to finish
ts.each{|t| t.join}
ts.each {|t| t.join }
puts
if host_errors.empty?
puts "All hosts updated successfully."
puts 'All hosts updated successfully.'
0
else
STDERR.puts "Failed to update the following hosts:"
host_errors.each{|h| STDERR.puts "* #{h}"}
STDERR.puts 'Failed to update the following hosts:'
host_errors.each {|h| STDERR.puts "* #{h}" }
-1
end
end
def forceupdate(host_ids, options)
if Process.uid.zero? || Process.gid.zero?
STDERR.puts("Cannot run 'onehost forceupdate' as root")
exit(-1)
end
cluster_id = options[:cluster]
# Get the Host pool
filter_flag ||= OpenNebula::Pool::INFO_ALL
pool = factory_pool(filter_flag)
rc = pool.info
return -1, rc.message if OpenNebula.is_error?(rc)
# Assign hosts to threads
queue = []
pool.each do |host|
if host_ids
next unless host_ids.include?(host['ID'].to_i)
elsif cluster_id
next if host['CLUSTER_ID'].to_i != cluster_id
end
vm_mad = host['VM_MAD'].downcase
state = host['STATE']
# Skip this host from remote syncing if it's a PUBLIC_CLOUD host
next if host['TEMPLATE/PUBLIC_CLOUD'] == 'YES'
# Skip this host from remote syncing if it's OFFLINE
next if Host::HOST_STATES[state.to_i] == 'OFFLINE'
# Skip this host if it is a vCenter cluster
next if vm_mad == 'vcenter'
queue << host
end
# Run the jobs in threads
host_errors = []
queue_lock = Mutex.new
error_lock = Mutex.new
total = queue.length
if total.zero?
puts 'No hosts are going to be forced.'
exit(0)
end
ts = (1..NUM_THREADS).map do |_t|
Thread.new do
loop do
host = nil
size = 0
queue_lock.synchronize do
host = queue.shift
size = queue.length
end
break unless host
cmd = 'cat /tmp/one-collectd-client.pid | xargs kill -HUP'
system("ssh #{host['NAME']} \"#{cmd}\" 2>/dev/null")
if !$CHILD_STATUS.success?
error_lock.synchronize do
host_errors << host['NAME']
end
else
puts "#{host['NAME']} monitoring forced"
end
end
end
end
# Wait for threads to finish
ts.each {|t| t.join }
if host_errors.empty?
puts 'All hosts updated successfully.'
0
else
STDERR.puts 'Failed to update the following hosts:'
host_errors.each {|h| STDERR.puts "* #{h}" }
-1
end
end
@ -388,22 +490,22 @@ class OneHostHelper < OpenNebulaHelper::OneHelper
private
def print_update_info(current, total, host)
bar_length=40
bar_length = 40
percentage=current.to_f/total.to_f
done=(percentage*bar_length).floor
percentage = current.to_f / total.to_f
done = (percentage * bar_length).floor
bar="["
bar+="="*done
bar+="-"*(bar_length-done)
bar+="]"
bar = '['
bar += '=' * done
bar += '-' * (bar_length - done)
bar += ']'
info="#{current}/#{total}"
info = "#{current}/#{total}"
str="#{bar} #{info} "
name=host[0..(79-str.length)]
str=str+name
str=str+" "*(80-str.length)
str = "#{bar} #{info} "
name = host[0..(79 - str.length)]
str += name
str += ' ' * (80 - str.length)
print "#{str}\r"
STDOUT.flush
@ -416,54 +518,65 @@ class OneHostHelper < OpenNebulaHelper::OneHelper
host.add_element(TEMPLATE_XPATH, 'VERSION' => version)
template=host.template_str
template = host.template_str
host.update(template)
end
def factory(id=nil)
def factory(id = nil)
if id
OpenNebula::Host.new_with_id(id, @client)
else
xml=OpenNebula::Host.build_xml
xml = OpenNebula::Host.build_xml
OpenNebula::Host.new(xml, @client)
end
end
def factory_pool(user_flag=-2)
#TBD OpenNebula::HostPool.new(@client, user_flag)
def factory_pool(_user_flag = -2)
# TBD OpenNebula::HostPool.new(@client, user_flag)
OpenNebula::HostPool.new(@client)
end
def format_resource(host, options = {})
str = "%-22s: %-20s"
str_h1 = "%-80s"
def format_resource(host, _options = {})
str = '%-22s: %-20s'
str_h1 = '%-80s'
CLIHelper.print_header(
str_h1 % "HOST #{host.id.to_s} INFORMATION", true)
str_h1 % "HOST #{host.id} INFORMATION", true
)
puts str % ["ID", host.id.to_s]
puts str % ["NAME", host.name]
puts str % ["CLUSTER", OpenNebulaHelper.cluster_str(host['CLUSTER'])]
puts str % ["STATE", host.state_str]
puts str % ["IM_MAD", host['IM_MAD']]
puts str % ["VM_MAD", host['VM_MAD']]
puts str % ["LAST MONITORING TIME", OpenNebulaHelper.time_to_str(host['LAST_MON_TIME'])]
puts format(str, 'ID', host.id.to_s)
puts format(str, 'NAME', host.name)
puts format(str, 'CLUSTER',
OpenNebulaHelper.cluster_str(host['CLUSTER']))
puts format(str, 'STATE', host.state_str)
puts format(str, 'IM_MAD', host['IM_MAD'])
puts format(str, 'VM_MAD', host['VM_MAD'])
puts format(str, 'LAST MONITORING TIME',
OpenNebulaHelper.time_to_str(host['LAST_MON_TIME']))
puts
CLIHelper.print_header(str_h1 % "HOST SHARES", false)
puts str % ["RUNNING VMS", host['HOST_SHARE/RUNNING_VMS']]
CLIHelper.print_header(str_h1 % 'HOST SHARES', false)
puts format(str, 'RUNNING VMS', host['HOST_SHARE/RUNNING_VMS'])
CLIHelper.print_header(str_h1 % "MEMORY", false)
puts str % [" TOTAL", OpenNebulaHelper.unit_to_str(host['HOST_SHARE/TOTAL_MEM'].to_i, {})]
puts str % [" TOTAL +/- RESERVED", OpenNebulaHelper.unit_to_str(host['HOST_SHARE/MAX_MEM'].to_i, {})]
puts str % [" USED (REAL)", OpenNebulaHelper.unit_to_str(host['HOST_SHARE/USED_MEM'].to_i, {})]
puts str % [" USED (ALLOCATED)", OpenNebulaHelper.unit_to_str(host['HOST_SHARE/MEM_USAGE'].to_i, {})]
CLIHelper.print_header(str_h1 % 'MEMORY', false)
puts format(str, ' TOTAL',
OpenNebulaHelper.unit_to_str(host['HOST_SHARE/TOTAL_MEM']
.to_i, {}))
puts format(str, ' TOTAL +/- RESERVED',
OpenNebulaHelper.unit_to_str(host['HOST_SHARE/MAX_MEM']
.to_i, {}))
puts format(str, ' USED (REAL)',
OpenNebulaHelper.unit_to_str(host['HOST_SHARE/USED_MEM']
.to_i, {}))
puts format(str, ' USED (ALLOCATED)',
OpenNebulaHelper.unit_to_str(host['HOST_SHARE/MEM_USAGE']
.to_i, {}))
CLIHelper.print_header(str_h1 % "CPU", false)
puts str % [" TOTAL", host['HOST_SHARE/TOTAL_CPU']]
puts str % [" TOTAL +/- RESERVED", host['HOST_SHARE/MAX_CPU']]
puts str % [" USED (REAL)", host['HOST_SHARE/USED_CPU']]
puts str % [" USED (ALLOCATED)", host['HOST_SHARE/CPU_USAGE']]
CLIHelper.print_header(str_h1 % 'CPU', false)
puts format(str, ' TOTAL', host['HOST_SHARE/TOTAL_CPU'])
puts format(str, ' TOTAL +/- RESERVED', host['HOST_SHARE/MAX_CPU'])
puts format(str, ' USED (REAL)', host['HOST_SHARE/USED_CPU'])
puts format(str, ' USED (ALLOCATED)', host['HOST_SHARE/CPU_USAGE'])
puts
datastores = host.to_hash['HOST']['HOST_SHARE']['DATASTORES']['DS']
@ -475,26 +588,34 @@ class OneHostHelper < OpenNebulaHelper::OneHelper
end
datastores.each do |datastore|
CLIHelper.print_header(str_h1 % "LOCAL SYSTEM DATASTORE ##{datastore['ID']} CAPACITY", false)
puts str % ["TOTAL:", OpenNebulaHelper.unit_to_str(datastore['TOTAL_MB'].to_i, {},'M')]
puts str % ["USED: ", OpenNebulaHelper.unit_to_str(datastore['USED_MB'].to_i, {},'M')]
puts str % ["FREE:", OpenNebulaHelper.unit_to_str(datastore['FREE_MB'].to_i, {},'M')]
CLIHelper.print_header(str_h1 %
'LOCAL SYSTEM DATASTORE '\
"##{datastore['ID']} CAPACITY", false)
puts format(str, 'TOTAL:',
OpenNebulaHelper.unit_to_str(datastore['TOTAL_MB']
.to_i, {}, 'M'))
puts format(str, 'USED: ',
OpenNebulaHelper.unit_to_str(datastore['USED_MB']
.to_i, {}, 'M'))
puts format(str, 'FREE:',
OpenNebulaHelper.unit_to_str(datastore['FREE_MB']
.to_i, {}, 'M'))
puts
end
CLIHelper.print_header(str_h1 % "MONITORING INFORMATION", false)
CLIHelper.print_header(str_h1 % 'MONITORING INFORMATION', false)
wilds = host.wilds
begin
pcis = [host.to_hash['HOST']['HOST_SHARE']['PCI_DEVICES']['PCI']]
pcis = pcis.flatten.compact
rescue
rescue StandardError
pcis = nil
end
host.delete_element("TEMPLATE/VM")
host.delete_element("TEMPLATE_WILDS")
host.delete_element('TEMPLATE/VM')
host.delete_element('TEMPLATE_WILDS')
puts host.template_str
@ -503,81 +624,85 @@ class OneHostHelper < OpenNebulaHelper::OneHelper
end
puts
CLIHelper.print_header("WILD VIRTUAL MACHINES", false)
CLIHelper.print_header('WILD VIRTUAL MACHINES', false)
puts
format = "%-30.30s %36s %4s %10s"
CLIHelper.print_header(format % ["NAME", "IMPORT_ID", "CPU", "MEMORY"],
format = '%-30.30s %36s %4s %10s'
CLIHelper.print_header(format(format, 'NAME',
'IMPORT_ID', 'CPU', 'MEMORY'),
true)
wilds.each do |wild|
if wild['IMPORT_TEMPLATE']
wild_tmplt = Base64::decode64(wild['IMPORT_TEMPLATE']).split("\n")
name = wild['VM_NAME']
import = wild_tmplt.select { |line|
line[/^IMPORT_VM_ID/]
}[0].split("=")[1].gsub("\"", " ").strip
memory = wild_tmplt.select { |line|
line[/^MEMORY/]
}[0].split("=")[1].gsub("\"", " ").strip
cpu = wild_tmplt.select { |line|
line[/^CPU/]
}[0].split("=")[1].gsub("\"", " ").strip
else
name = wild['DEPLOY_ID']
import = memory = cpu = "-"
end
if wild['IMPORT_TEMPLATE']
wild_tmplt = Base64.decode64(wild['IMPORT_TEMPLATE'])
.split("\n")
name = wild['VM_NAME']
import = wild_tmplt.select do |line|
line[/^IMPORT_VM_ID/]
end[0].split('=')[1].tr('"', ' ').strip
memory = wild_tmplt.select do |line|
line[/^MEMORY/]
end[0].split('=')[1].tr('"', ' ').strip
cpu = wild_tmplt.select do |line|
line[/^CPU/]
end[0].split('=')[1].tr('"', ' ').strip
else
name = wild['DEPLOY_ID']
import = memory = cpu = '-'
end
puts format % [name, import, cpu, memory]
puts format(format, name, import, cpu, memory)
end
puts
CLIHelper.print_header("VIRTUAL MACHINES", false)
CLIHelper.print_header('VIRTUAL MACHINES', false)
puts
onevm_helper=OneVMHelper.new
onevm_helper.client=@client
onevm_helper.list_pool({:filter=>["HOST=#{host.name}"], :no_pager => true}, false)
onevm_helper = OneVMHelper.new
onevm_helper.client = @client
onevm_helper.list_pool({ :filter => ["HOST=#{host.name}"],
:no_pager => true },
false)
end
def print_pcis(pcis)
puts
CLIHelper.print_header("PCI DEVICES", false)
CLIHelper.print_header('PCI DEVICES', false)
puts
table=CLIHelper::ShowTable.new(nil, self) do
column :VM, "Used by VM", :size => 5, :left => false do |d|
if d["VMID"] == "-1"
""
table = CLIHelper::ShowTable.new(nil, self) do
column :VM, 'Used by VM', :size => 5, :left => false do |d|
if d['VMID'] == '-1'
''
else
d["VMID"]
d['VMID']
end
end
column :ADDR, "PCI Address", :size => 7, :left => true do |d|
d["SHORT_ADDRESS"]
column :ADDR, 'PCI Address', :size => 7, :left => true do |d|
d['SHORT_ADDRESS']
end
column :TYPE, "Type", :size => 14, :left => true do |d|
d["TYPE"]
column :TYPE, 'Type', :size => 14, :left => true do |d|
d['TYPE']
end
column :CLASS, "Class", :size => 12, :left => true do |d|
d["CLASS_NAME"]
column :CLASS, 'Class', :size => 12, :left => true do |d|
d['CLASS_NAME']
end
column :NAME, "Name", :size => 50, :left => true do |d|
d["DEVICE_NAME"]
column :NAME, 'Name', :size => 50, :left => true do |d|
d['DEVICE_NAME']
end
column :VENDOR, "Vendor", :size => 8, :left => true do |d|
d["VENDOR_NAME"]
column :VENDOR, 'Vendor', :size => 8, :left => true do |d|
d['VENDOR_NAME']
end
default :VM, :ADDR, :TYPE, :NAME
end
table.show(pcis)
end
end

View File

@ -198,6 +198,13 @@ class OneVMHelper < OpenNebulaHelper::OneHelper
:format => String
}
SEARCH = {
:name => "search",
:large => "--search search",
:description=> "query in KEY=VALUE format",
:format => String
}
def self.rname
"VM"
end

View File

@ -285,4 +285,17 @@ CommandParser::CmdParser.new(ARGV) do
o.import_wild(args[1])
end
end
forceupdate_desc = <<-EOT.unindent
Forces host monitoring update
Examples:
onehost forceupdate host01
onehost forceupdate host01,host02,host03
onehost forceupdate -c myCluster
EOT
command :forceupdate, forceupdate_desc, [:range, :hostid_list, nil],
:options => [OneClusterHelper::CLUSTER] do
helper.forceupdate(args[0], options)
end
end

View File

@ -557,8 +557,10 @@ CommandParser::CmdParser.new(ARGV) do
EOT
command :migrate, migrate_desc, [:range, :vmid_list], :hostid,
[:datastoreid, nil], :options => [ENFORCE, OneVMHelper::LIVE,
OneVMHelper::POFF, OneVMHelper::POFFHARD] do
[:datastoreid, nil], :options => [ENFORCE,
OneVMHelper::LIVE,
OneVMHelper::POFF,
OneVMHelper::POFFHARD] do
host_id = args[1]
verbose = "migrating to #{host_id}"
@ -795,7 +797,8 @@ CommandParser::CmdParser.new(ARGV) do
if ip
if !nic_alias && !nic_name
template = "NIC = [ NETWORK_ID = #{network_id}, IP = #{ip} ]"
template = "NIC = [ NETWORK_ID = #{network_id}, \
IP = #{ip} ]"
elsif !nic_alias && nic_name
template = "NIC = [ NETWORK_ID = #{network_id},
IP = #{ip},
@ -1044,8 +1047,28 @@ CommandParser::CmdParser.new(ARGV) do
command :list, list_desc, [:filterflag, nil],
:options => CLIHelper::OPTIONS + OpenNebulaHelper::OPTIONS +
[OpenNebulaHelper::DESCRIBE] do
helper.list_pool(options, false, args[0])
[OpenNebulaHelper::DESCRIBE] + [OneVMHelper::SEARCH] do
if !options[:search]
helper.list_pool(options, false, args[0])
else
table = helper.format_pool(options)
pool = OpenNebula::VirtualMachinePool.new(OneVMHelper.get_client)
rc = pool.info_search(:query => options[:search])
if !rc.nil?
puts rc.message
exit(-1)
end
if options[:xml]
puts pool.to_xml
else
table.show(pool.to_hash, options)
end
0
end
end
show_desc = <<-EOT.unindent
@ -1128,11 +1151,12 @@ CommandParser::CmdParser.new(ARGV) do
This command accepts a template file or opens an editor, the full list of
configuration attributes are:
OS = ["ARCH", "MACHINE", "KERNEL", "INITRD", "BOOTLOADER", "BOOT"]
FEATURES = ["ACPI", "PAE", "APIC", "LOCALTIME", "HYPERV", "GUEST_AGENT"]
INPUT = ["TYPE", "BUS"]
GRAPHICS = ["TYPE", "LISTEN", "PASSWD", "KEYMAP" ]
RAW = ["DATA", "DATA_VMX", "TYPE"]
OS = ["ARCH", "MACHINE", "KERNEL", "INITRD", "BOOTLOADER", "BOOT"]
FEATURES = ["ACPI", "PAE", "APIC", "LOCALTIME", "HYPERV", "GUEST_AGENT"]
INPUT = ["TYPE", "BUS"]
GRAPHICS = ["TYPE", "LISTEN", "PASSWD", "KEYMAP" ]
RAW = ["DATA", "DATA_VMX", "TYPE"]
CPU_MODEL = ["MODEL"]
CONTEXT (any value, **variable substitution will be made**)
EOT
@ -1153,7 +1177,8 @@ CommandParser::CmdParser.new(ARGV) do
template = vm.template_like_str('TEMPLATE', true,
'OS | FEATURES | INPUT | '\
'GRAPHICS | RAW | CONTEXT')
'GRAPHICS | RAW | CONTEXT | '\
'CPU_MODEL')
template = OpenNebulaHelper.editor_input(template)
end

View File

@ -30,7 +30,6 @@ $LOAD_PATH << RUBY_LIB_LOCATION + '/cli'
require 'command_parser'
require 'one_helper/onevntemplate_helper'
require 'one_helper/onevnet_helper'
require 'pry'
CommandParser::CmdParser.new(ARGV) do
usage '`onevntemplate` <command> [<args>] [<options>]'

View File

@ -31,6 +31,7 @@ module OpenNebulaCloudAuth
#
def do_auth(env, params={})
auth = Rack::Auth::Basic::Request.new(env)
if auth.provided? && auth.basic?
username, password = auth.credentials
authenticated = false
@ -63,11 +64,14 @@ module OpenNebulaCloudAuth
end
username = parser.escape(username)
password = parser.escape(password)
client = OpenNebula::Client.new("#{username}:#{password}", @conf[:one_xmlrpc])
epassword = parser.escape(password)
client = OpenNebula::Client.new("#{username}:#{epassword}", @conf[:one_xmlrpc])
user = OpenNebula::User.new_with_id(OpenNebula::User::SELF, client)
rc = user.info
end
if OpenNebula.is_error?(rc)
if logger
logger.error{ "User #{username} could not be authenticated"}
@ -77,7 +81,14 @@ module OpenNebulaCloudAuth
return nil
end
return user.name
# Check if the user authenticated with a scoped token. In this case
# encode the EGID in the username as "user:egid"
egid = user["//LOGIN_TOKEN [ TOKEN = \"#{password}\" ]/EGID"]
auth_name = user.name
auth_name = "#{auth_name}:#{egid}" if egid
return auth_name
end
return nil

View File

@ -50,7 +50,7 @@ end
module CloudClient
# OpenNebula version
VERSION = '5.7.85'
VERSION = '5.7.90'
# #########################################################################
# Default location for the authentication file

View File

@ -65,25 +65,9 @@ string * VectorAttribute::marshall(const char * _sep) const
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
string * VectorAttribute::to_xml() const
{
ostringstream oss;
to_xml(oss);
string * xml = new string;
*xml = oss.str();
return xml;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void VectorAttribute::to_xml(ostringstream &oss) const
{
map<string,string>::const_iterator it;
map<string,string>::const_iterator it;
oss << "<" << name() << ">";
@ -102,6 +86,60 @@ void VectorAttribute::to_xml(ostringstream &oss) const
oss << "</"<< name() << ">";
}
void VectorAttribute::to_json(std::ostringstream& s) const
{
if ( attribute_value.empty() )
{
s << "{}";
return;
}
map<string,string>::const_iterator it = attribute_value.begin();
bool is_first = true;
s << "{";
for (++it; it!=attribute_value.end(); it++)
{
if ( it->first.empty() )
{
continue;
}
if ( !is_first )
{
s << ",";
}
else
{
is_first = false;
}
s << "\"" << it->first << "\": ";
one_util::escape_json(it->second, s);
}
s << "}";
}
void VectorAttribute::to_token(std::ostringstream& s) const
{
map<string,string>::const_iterator it;
for (it=attribute_value.begin(); it!=attribute_value.end(); it++)
{
if (it->first.empty() || it->second.empty())
{
continue;
}
one_util::escape_token(it->first, s);
s << "=";
one_util::escape_token(it->second, s);
s << std::endl;
}
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */

View File

@ -380,6 +380,57 @@ std::string one_util::gsub(const std::string& st, const std::string& sfind,
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void one_util::escape_json(const std::string& str, std::ostringstream& s)
{
std::string::const_iterator it;
s << "\"";
for (it = str.begin(); it != str.end(); ++it)
{
switch (*it)
{
case '\\': s << "\\\\"; break;
case '"' : s << "\\\""; break;
case '/' : s << "\\/"; break;
case '\b': s << "\\b"; break;
case '\f': s << "\\f"; break;
case '\n': s << "\\n"; break;
case '\r': s << "\\r"; break;
case '\t': s << "\\t"; break;
default : s << *it;
}
}
s << "\"";
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void one_util::escape_token(const std::string& str, std::ostringstream& s)
{
std::string::const_iterator it;
for (it = str.begin(); it != str.end(); ++it)
{
switch (*it)
{
case '-':
case '_':
case '.':
case ':':
s << '_';
break;
default :
s << *it;
}
}
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
namespace one_util
{
template<>

View File

@ -451,22 +451,24 @@ function check_restricted {
}
#-------------------------------------------------------------------------------
# Filter out hosts which are not ON
# Filter out hosts which are OFF, ERROR or DISABLED
# @param $1 - space separated list of hosts
# @return - space separated list of hosts which are in ON state
# @return - space separated list of hosts which are not in OFF, ERROR or
# DISABLED sate
#-------------------------------------------------------------------------------
function get_only_on_hosts {
INPUT_ARRAY=($1)
function remove_off_hosts {
ALL_HOSTS_ARRAY=($1)
OFF_HOSTS_STR=$(onehost list --no-pager --csv \
--filter="STAT=off,STAT=err,STAT=dsbl" --list=NAME,STAT 2>/dev/null)
ONEHOST_LIST_ON_CMD='onehost list --no-pager --csv --filter="STAT=on" --list=NAME,STAT'
ON_HOSTS_STR=$(eval "$ONEHOST_LIST_ON_CMD 2>/dev/null")
if [[ $? = 0 ]]; then
ON_HOSTS_ARRAY=($( echo "$ON_HOSTS_STR" | $AWK -F, '{ if (NR>1) print $1 }'))
for A in "${INPUT_ARRAY[@]}"; do
for B in "${ON_HOSTS_ARRAY[@]}"; do
[[ $A == $B ]] && { echo $A; break; }
if [ $? -eq 0 ]; then
OFF_HOSTS_ARRAY=($( echo "$OFF_HOSTS_STR" | awk -F, '{ if (NR>1) print $1 }'))
for HOST in "${ALL_HOSTS_ARRAY[@]}"; do
OFF=false
for OFF_HOST in "${OFF_HOSTS_ARRAY[@]}"; do
[ $HOST = $OFF_HOST ] && { OFF=true; break; }
done
$OFF || echo -ne "$HOST "
done
else
# onehost cmd failed, can't filter anything, better return unchanged
@ -483,8 +485,14 @@ function get_only_on_hosts {
# @return host to be used as bridge
#-------------------------------------------------------------------------------
function get_destination_host {
BRIDGE_LIST=$(get_only_on_hosts "$BRIDGE_LIST")
HOSTS_ARRAY=($BRIDGE_LIST)
REDUCED_LIST=$(remove_off_hosts "$BRIDGE_LIST")
if [ -z "$REDUCED_LIST" -a -n "$BRIDGE_LIST" ]; then
error_message "All hosts from 'BRIDGE_LIST' are offline, error or disabled"
exit -1
fi
HOSTS_ARRAY=($REDUCED_LIST)
N_HOSTS=${#HOSTS_ARRAY[@]}
if [ -n "$1" ]; then

View File

@ -73,7 +73,9 @@ begin
rescue Exception => e
message = "Error cloning img #{src_path} to #{target_ds_name}"\
" Reason: \"#{e.message}\"\n#{e.backtrace}"
STDERR.puts error_message(message)
OpenNebula.log_error(message)
STDERR.puts "#{message} #{e.backtrace}" if VCenterDriver::CONFIG[:debug_information]
exit -1
ensure
vi_client.close_connection if vi_client

View File

@ -75,7 +75,9 @@ begin
rescue Exception => e
message = "Error creating virtual disk #{img_name}."\
" Reason: \"#{e.message}\"\n#{e.backtrace}"
STDERR.puts error_message(message)
OpenNebula.log_error(message)
STDERR.puts "#{message} #{e.backtrace}" if VCenterDriver::CONFIG[:debug_information]
exit -1
ensure
vi_client.close_connection if vi_client

View File

@ -61,7 +61,9 @@ begin
rescue Exception => e
message = "Error monitoring datastore #{id}."\
" Reason: \"#{e.message}\"\n#{e.backtrace}"
STDERR.puts error_message(message)
OpenNebula.log_error(message)
STDERR.puts "#{message} #{e.backtrace}" if VCenterDriver::CONFIG[:debug_information]
exit -1
ensure
vi_client.close_connection if vi_client

View File

@ -88,7 +88,9 @@ rescue Exception => e
if !e.message.start_with?('FileNotFound')
message = "Error deleting virtual disk #{img_src}."\
" Reason: \"#{e.message}\"\n#{e.backtrace}"
STDERR.puts error_message(message)
OpenNebula.log_error(message)
STDERR.puts "#{message} #{e.backtrace}" if VCenterDriver::CONFIG[:debug_information]
exit -1
end
ensure

View File

@ -314,6 +314,8 @@ void DispatchManager::resubmit_action(int vid)
vm->set_state(VirtualMachine::PENDING);
vm->set_deploy_id(""); //reset the deploy-id
vmpool->update(vm);
vm->unlock();

View File

@ -521,8 +521,10 @@ func (d *Driver) GetIP() (string, error) {
return "", err
}
if ip, ok := vm.XPath("/VM/TEMPLATE/NIC/IP"); ok {
d.IPAddress = ip
if len(vm.Template.NIC) > 0 {
if vm.Template.NIC[0].IP != "" {
d.IPAddress = vm.Template.NIC[0].IP
}
}
if d.IPAddress == "" {

View File

@ -1 +1 @@
5.7.85
5.7.90

View File

@ -88,13 +88,12 @@ class CollectdClient
# Collect the Data
ts = Time.now
data = run_probes
# Send signal to itself to run probes and send the data
Process.kill('HUP', $$)
run_probes_time = (Time.now - ts).to_i
# Send the Data
send data
# Sleep during the Cycle
sleep_time = @monitor_push_period - run_probes_time
sleep_time = 0 if sleep_time < 0
@ -130,4 +129,16 @@ sleep rand monitor_push_period
# Start push monitorization
client = CollectdClient.new(hypervisor, number, host, port, probes_args,
monitor_push_period)
Signal.trap('HUP') do
# ignore another HUP until we handle this one
this_handler = Signal.trap('HUP', 'IGNORE')
data = client.run_probes
client.send data
# set the handler back
Signal.trap('HUP', this_handler)
end
client.monitor

View File

@ -30,14 +30,9 @@ begin
machines = []
models = []
Open3.popen3("virsh -r -c qemu:///system capabilities") {|i, o, e, t|
if t.value.exitstatus != 0
exit -1
end
capabilities = o.read
}
cmd = 'virsh -r -c qemu:///system capabilities'
capabilities, e, s = Open3.capture3(cmd)
exit(-1) unless s.success?
cap_xml = REXML::Document.new(capabilities)
cap_xml = cap_xml.root
@ -94,12 +89,9 @@ begin
end
}
cpu_models = ""
Open3.popen3("virsh -r -c qemu:///system cpu-models #{a}") {|i, o, e, t|
break if t.value.exitstatus != 0
cpu_models = o.read
}
cmd = "virsh -r -c qemu:///system cpu-models #{a}"
cpu_models, e, s = Open3.capture3(cmd)
break unless s.success?
cpu_models.each_line { |l|
l.chomp!

View File

@ -0,0 +1,19 @@
#!/bin/bash
cmd='lxc profile list'
profiles=$($cmd | grep -v -- -+- | grep -v NAME | grep -v default | awk '{print $2}')
if [ "$?" -ne "0" ]; then
profiles=$(sudo $cmd | grep -v -- -+- | grep -v NAME | grep -v default | awk '{print $2}')
fi
tmpfile=$(mktemp /tmp/lxd_probe.XXXXXX)
echo "$profiles" > "$tmpfile"
out=$(tr '\n' ' ' < "$tmpfile")
out=${out::-1}
echo -e LXD_PROFILES=\""$out"\"
rm "$tmpfile"

View File

@ -20,7 +20,7 @@
#include "VirtualMachineManager.h"
#include "ImageManager.h"
void LifeCycleManager::start_prolog_migrate(VirtualMachine* vm, int vid)
void LifeCycleManager::start_prolog_migrate(VirtualMachine* vm)
{
int cpu, mem, disk;
vector<VectorAttribute *> pci;
@ -64,7 +64,63 @@ void LifeCycleManager::start_prolog_migrate(VirtualMachine* vm, int vid)
//----------------------------------------------------
tm->trigger(TMAction::PROLOG_MIGR,vid);
tm->trigger(TMAction::PROLOG_MIGR,vm->get_oid());
}
void LifeCycleManager::revert_migrate_after_failure(VirtualMachine* vm)
{
int cpu, mem, disk;
vector<VectorAttribute *> pci;
time_t the_time = time(0);
//----------------------------------------------------
// RUNNING STATE FROM SAVE_MIGRATE
//----------------------------------------------------
vm->set_state(VirtualMachine::RUNNING);
vm->set_etime(the_time);
vm->set_vm_info();
vmpool->update_history(vm);
vm->get_requirements(cpu, mem, disk, pci);
if ( vm->get_hid() != vm->get_previous_hid() )
{
hpool->del_capacity(vm->get_hid(), vm->get_oid(), cpu, mem, disk, pci);
}
vm->set_previous_etime(the_time);
vm->set_previous_vm_info();
vm->set_previous_running_etime(the_time);
vmpool->update_previous_history(vm);
// --- Add new record by copying the previous one
vm->cp_previous_history();
vm->set_stime(the_time);
vm->set_running_stime(the_time);
vm->set_last_poll(0);
vmpool->update_history(vm);
vmpool->update(vm);
vm->log("LCM", Log::INFO, "Fail to save VM state while migrating."
" Assuming that the VM is still RUNNING (will poll VM).");
//----------------------------------------------------
vmm->trigger(VMMAction::POLL,vm->get_oid());
}
/* -------------------------------------------------------------------------- */
@ -84,7 +140,7 @@ void LifeCycleManager::save_success_action(int vid)
if ( vm->get_lcm_state() == VirtualMachine::SAVE_MIGRATE )
{
start_prolog_migrate(vm, vid);
start_prolog_migrate(vm);
}
else if (vm->get_lcm_state() == VirtualMachine::SAVE_SUSPEND)
{
@ -168,58 +224,7 @@ void LifeCycleManager::save_failure_action(int vid)
if ( vm->get_lcm_state() == VirtualMachine::SAVE_MIGRATE )
{
int cpu, mem, disk;
vector<VectorAttribute *> pci;
time_t the_time = time(0);
//----------------------------------------------------
// RUNNING STATE FROM SAVE_MIGRATE
//----------------------------------------------------
vm->set_state(VirtualMachine::RUNNING);
vm->set_etime(the_time);
vm->set_vm_info();
vmpool->update_history(vm);
vm->get_requirements(cpu, mem, disk, pci);
if ( vm->get_hid() != vm->get_previous_hid() )
{
hpool->del_capacity(vm->get_hid(), vm->get_oid(), cpu, mem, disk, pci);
}
vm->set_previous_etime(the_time);
vm->set_previous_vm_info();
vm->set_previous_running_etime(the_time);
vmpool->update_previous_history(vm);
// --- Add new record by copying the previous one
vm->cp_previous_history();
vm->set_stime(the_time);
vm->set_running_stime(the_time);
vm->set_last_poll(0);
vmpool->update_history(vm);
vmpool->update(vm);
vm->log("LCM", Log::INFO, "Fail to save VM state while migrating."
" Assuming that the VM is still RUNNING (will poll VM).");
//----------------------------------------------------
vmm->trigger(VMMAction::POLL,vid);
revert_migrate_after_failure(vm);
}
else if ( vm->get_lcm_state() == VirtualMachine::SAVE_SUSPEND ||
vm->get_lcm_state() == VirtualMachine::SAVE_STOP )
@ -561,7 +566,7 @@ void LifeCycleManager::shutdown_success_action(int vid)
}
else if (vm->get_lcm_state() == VirtualMachine::SAVE_MIGRATE)
{
start_prolog_migrate(vm, vid);
start_prolog_migrate(vm);
}
else
{
@ -608,6 +613,10 @@ void LifeCycleManager::shutdown_failure_action(int vid)
vmm->trigger(VMMAction::POLL,vid);
}
else if (vm->get_lcm_state() == VirtualMachine::SAVE_MIGRATE)
{
revert_migrate_after_failure(vm);
}
else
{
vm->log("LCM",Log::ERROR,"shutdown_failure_action, VM in a wrong state");

View File

@ -75,11 +75,11 @@ module OpenNebula
# Executes a command, if it fails returns error message and exits
# If a second parameter is present it is used as the error message when
# the command fails
def self.exec_and_log(command, message=nil)
def self.exec_and_log(command, message=nil, allowed_return_code=0)
output=`#{command} 2>&1 1>/dev/null`
code=$?.exitstatus
if code!=0
if code!=0 && code!=allowed_return_code
log_error "Command \"#{command}\" failed."
log_error output
if !message

View File

@ -460,12 +460,23 @@ function ssh_make_path
{
SSH_EXEC_ERR=`$SSH $1 bash -s 2>&1 1>/dev/null <<EOF
set -e -o pipefail
if [ ! -d $2 ]; then
mkdir -p $2
if [ -n "$3" ]; then
echo "$3" > "\$(dirname $2)/.monitor"
fi
if [ ! -d $2 ]; then
mkdir -p $2
fi
# create or update .monitor content
if [ -n "$3" ]; then
MONITOR_FN="\$(dirname $2)/.monitor"
MONITOR=''
if [ -f "\\${MONITOR_FN}" ]; then
MONITOR="\\$(cat "\\${MONITOR_FN}" 2>/dev/null)"
fi
if [ "x\\${MONITOR}" != "x$3" ]; then
echo "$3" > "\\${MONITOR_FN}"
fi
fi
EOF`
SSH_EXEC_RC=$?
@ -994,3 +1005,9 @@ function get_nic_information {
OUTBOUND_PEAK_KB="${XPATH_ELEMENTS[j++]}"
ORDER="${XPATH_ELEMENTS[j++]}"
}
function hup_collectd
{
SEND_HUP='kill -HUP `cat /tmp/one-collectd-client.pid` || true'
ssh_exec_and_log_no_error $1 "$SEND_HUP"
}

View File

@ -33,12 +33,18 @@ MarketPlacePool::MarketPlacePool(SqlDB * db, bool is_federation_slave)
//lastOID is set in PoolSQL::init_cb
if (get_lastOID() == -1)
{
// Build the default default security group
// Build the template for the OpenNebula Systems MarketPlace
string default_market =
"NAME=\"OpenNebula Public\"\n"
"MARKET_MAD=one\n"
"DESCRIPTION=\"OpenNebula Systems MarketPlace\"";
string lxc_market =
"NAME=\"Linux Containers\"\n"
"MARKET_MAD=linuxcontainers\n"
"DESCRIPTION=\"MarketPlace for the public image server fo LXC &"
" LXD hosted at linuxcontainers.org\"";
Nebula& nd = Nebula::instance();
UserPool * upool = nd.get_upool();
User * oneadmin = upool->get_ro(0);
@ -46,9 +52,12 @@ MarketPlacePool::MarketPlacePool(SqlDB * db, bool is_federation_slave)
string error;
MarketPlaceTemplate * default_tmpl = new MarketPlaceTemplate;
MarketPlaceTemplate * lxc_tmpl = new MarketPlaceTemplate;
char * error_parse;
default_tmpl->parse(default_market, &error_parse);
lxc_tmpl->parse(lxc_market, &error_parse);
MarketPlace * marketplace = new MarketPlace(
oneadmin->get_uid(),
@ -58,19 +67,33 @@ MarketPlacePool::MarketPlacePool(SqlDB * db, bool is_federation_slave)
oneadmin->get_umask(),
default_tmpl);
MarketPlace * lxc_marketplace = new MarketPlace(
oneadmin->get_uid(),
oneadmin->get_gid(),
oneadmin->get_uname(),
oneadmin->get_gname(),
oneadmin->get_umask(),
lxc_tmpl);
oneadmin->unlock();
marketplace->set_permissions(1,1,1, 1,0,0, 1,0,0, error);
lxc_marketplace->set_permissions(1,1,1, 1,0,0, 1,0,0, error);
marketplace->zone_id = Nebula::instance().get_zone_id();
lxc_marketplace->zone_id = Nebula::instance().get_zone_id();
marketplace->parse_template(error);
lxc_marketplace->parse_template(error);
if (PoolSQL::allocate(marketplace, error) < 0)
int rc = PoolSQL::allocate(marketplace, error);
rc += PoolSQL::allocate(lxc_marketplace, error);
if (rc < 0)
{
ostringstream oss;
oss << "Error trying to create default "
<< "OpenNebula Systems MarketPlace: " << error;
oss << "Error trying to create default marketplaces: " << error;
NebulaLog::log("MKP", Log::ERROR, oss);
throw runtime_error(oss.str());

View File

@ -31,7 +31,7 @@ class LinuxContainersMarket
#---------------------------------------------------------------------------
DEFAULTS = {
:url => 'https://images.linuxcontainers.org',
:sizemb => 5120,
:sizemb => 2560,
:fs => 'ext4',
:format => 'raw',
:agent => 'OpenNebula'

View File

@ -252,8 +252,6 @@ void Nebula::start(bool bootstrap_only)
// -----------------------------------------------------------
try
{
bool db_is_sqlite = true;
string server;
int port;
string user;
@ -265,12 +263,7 @@ void Nebula::start(bool bootstrap_only)
if ( _db != 0 )
{
string value = _db->vector_value("BACKEND");
if (value == "mysql")
{
db_is_sqlite = false;
}
db_backend_type = _db->vector_value("BACKEND");
if (_db->vector_value("SERVER", server) == -1)
{
@ -303,7 +296,7 @@ void Nebula::start(bool bootstrap_only)
}
}
if ( db_is_sqlite )
if ( db_backend_type == "sqlite" )
{
db_backend = new SqliteDB(var_location + "one.db");
}

View File

@ -1,8 +1,15 @@
package goca
import "encoding/xml"
// ACLPool represents an OpenNebula ACL list pool
type ACLPool struct {
XMLResource
ID uint `xml:"ID"`
User int `xml:"USER"`
Resource int `xml:"RESOURCE"`
Rights int `xml:"RIGHTS"`
Zone int `xml:"ZONE"`
String string `xml:"STRING"`
}
// NewACLPool returns an acl pool. A connection to OpenNebula is
@ -13,9 +20,13 @@ func NewACLPool() (*ACLPool, error) {
return nil, err
}
aclpool := &ACLPool{XMLResource{body: response.Body()}}
aclPool := &ACLPool{}
err = xml.Unmarshal([]byte(response.Body()), aclPool)
if err != nil {
return nil, err
}
return aclpool, err
return aclPool, nil
}
// CreateACLRule adds a new ACL rule.

View File

@ -1,15 +1,30 @@
package goca
// Cluster represents an OpenNebula Cluster
type Cluster struct {
XMLResource
ID uint
Name string
}
import (
"encoding/xml"
"errors"
)
// ClusterPool represents an OpenNebula ClusterPool
type ClusterPool struct {
XMLResource
Clusters []Cluster `xml:"CLUSTER"`
}
// Cluster represents an OpenNebula Cluster
type Cluster struct {
ID uint `xml:"ID"`
Name string `xml:"NAME"`
HostsID []int `xml:"HOSTS>ID"`
DatastoresID []int `xml:"DATASTORES>ID"`
VnetsID []int `xml:"VNETS>ID"`
Template clusterTemplate `xml:"TEMPLATE"`
}
type clusterTemplate struct {
// Example of reservation: https://github.com/OpenNebula/addon-storpool/blob/ba9dd3462b369440cf618c4396c266f02e50f36f/misc/reserved.sh
ReservedMem string `xml:"RESERVED_MEM"`
ReservedCpu string `xml:"RESERVED_CPU"`
Dynamic unmatchedTagsSlice `xml:",any"`
}
// NewClusterPool returns a cluster pool. A connection to OpenNebula is
@ -20,9 +35,13 @@ func NewClusterPool() (*ClusterPool, error) {
return nil, err
}
clusterpool := &ClusterPool{XMLResource{body: response.Body()}}
clusterPool := &ClusterPool{}
err = xml.Unmarshal([]byte(response.Body()), clusterPool)
if err != nil {
return nil, err
}
return clusterpool, err
return clusterPool, nil
}
@ -35,14 +54,26 @@ func NewCluster(id uint) *Cluster {
// OpenNebula to retrieve the pool, but doesn't perform the Info() call to
// retrieve the attributes of the cluster.
func NewClusterFromName(name string) (*Cluster, error) {
var id uint
clusterPool, err := NewClusterPool()
if err != nil {
return nil, err
}
id, err := clusterPool.GetIDFromName(name, "/CLUSTER_POOL/CLUSTER")
if err != nil {
return nil, err
match := false
for i := 0; i < len(clusterPool.Clusters); i++ {
if clusterPool.Clusters[i].Name != name {
continue
}
if match {
return nil, errors.New("multiple resources with that name")
}
id = clusterPool.Clusters[i].ID
match = true
}
if !match {
return nil, errors.New("resource not found")
}
return NewCluster(id), nil
@ -129,6 +160,5 @@ func (cluster *Cluster) Info() error {
if err != nil {
return err
}
cluster.body = response.Body()
return nil
return xml.Unmarshal([]byte(response.Body()), cluster)
}

View File

@ -1,15 +1,66 @@
package goca
// Datastore represents an OpenNebula Datastore
type Datastore struct {
XMLResource
ID uint
Name string
}
import (
"encoding/xml"
"errors"
"fmt"
)
// DatastorePool represents an OpenNebula DatastorePool
type DatastorePool struct {
XMLResource
Datastores []Datastore `xml:"DATASTORE"`
}
// Datastore represents an OpenNebula Datastore
type Datastore struct {
ID uint `xml:"ID"`
UID int `xml:"UID"`
GID int `xml:"GID"`
UName string `xml:"UNAME"`
GName string `xml:"GNAME"`
Name string `xml:"NAME"`
Permissions *Permissions `xml:"PERMISSIONS"`
DSMad string `xml:"DS_MAD"`
TMMad string `xml:"TM_MAD"`
BasePath string `xml:"BASE_PATH"`
Type string `xml:"TYPE"`
DiskType string `xml:"DISK_TYPE"`
StateRaw int `xml:"STATE"`
ClustersID []int `xml:"CLUSTERS>ID"`
TotalMB int `xml:"TOTAL_MB"`
FreeMB int `xml:"FREE_MB"`
UsedMB int `xml:"USED_MB"`
ImagesID []int `xml:"IMAGES>ID"`
Template datastoreTemplate `xml:"TEMPLATE"`
}
type datastoreTemplate struct {
Dynamic unmatchedTagsSlice `xml:",any"`
}
// DatastoreState is the state of an OpenNebula datastore
type DatastoreState int
const (
// DatastoreReady datastore is ready
DatastoreReady = iota
// DatastoreDisable datastore is disabled
DatastoreDisable
)
func (st DatastoreState) isValid() bool {
if st >= DatastoreReady && st <= DatastoreDisable {
return true
}
return false
}
func (st DatastoreState) String() string {
return [...]string{
"READY",
"DISABLE",
}[st]
}
// NewDatastorePool returns a datastore pool. A connection to OpenNebula is
@ -20,9 +71,13 @@ func NewDatastorePool() (*DatastorePool, error) {
return nil, err
}
datastorepool := &DatastorePool{XMLResource{body: response.Body()}}
datastorePool := &DatastorePool{}
err = xml.Unmarshal([]byte(response.Body()), datastorePool)
if err != nil {
return nil, err
}
return datastorepool, err
return datastorePool, nil
}
// NewDatastore finds a datastore object by ID. No connection to OpenNebula.
@ -34,14 +89,26 @@ func NewDatastore(id uint) *Datastore {
// OpenNebula to retrieve the pool, but doesn't perform the Info() call to
// retrieve the attributes of the datastore.
func NewDatastoreFromName(name string) (*Datastore, error) {
var id uint
datastorePool, err := NewDatastorePool()
if err != nil {
return nil, err
}
id, err := datastorePool.GetIDFromName(name, "/DATASTORE_POOL/DATASTORE")
if err != nil {
return nil, err
match := false
for i := 0; i < len(datastorePool.Datastores); i++ {
if datastorePool.Datastores[i].Name != name {
continue
}
if match {
return nil, errors.New("multiple resources with that name")
}
id = datastorePool.Datastores[i].ID
match = true
}
if !match {
return nil, errors.New("resource not found")
}
return NewDatastore(id), nil
@ -116,6 +183,23 @@ func (datastore *Datastore) Info() error {
if err != nil {
return err
}
datastore.body = response.Body()
return nil
return xml.Unmarshal([]byte(response.Body()), datastore)
}
// State looks up the state of the image and returns the DatastoreState
func (datastore *Datastore) State() (DatastoreState, error) {
state := DatastoreState(datastore.StateRaw)
if !state.isValid() {
return -1, fmt.Errorf("Datastore State: this state value is not currently handled: %d\n", datastore.StateRaw)
}
return state, nil
}
// StateString returns the state in string format
func (datastore *Datastore) StateString() (string, error) {
state := DatastoreState(datastore.StateRaw)
if !state.isValid() {
return "", fmt.Errorf("Datastore StateString: this state value is not currently handled: %d\n", datastore.StateRaw)
}
return state.String(), nil
}

View File

@ -1,17 +1,31 @@
package goca
import "errors"
// Document represents an OpenNebula Document
type Document struct {
XMLResource
ID uint
Name string
}
import (
"encoding/xml"
"errors"
)
// DocumentPool represents an OpenNebula DocumentPool
type DocumentPool struct {
XMLResource
Documents []Document `xml:"DOCUMENT"`
}
// Document represents an OpenNebula Document
type Document struct {
ID uint `xml:"ID"`
UID int `xml:"UID"`
GID int `xml:"GID"`
UName string `xml:"UNAME"`
GName string `xml:"GNAME"`
Name string `xml:"NAME"`
Type string `xml:"TYPE"`
Permissions *Permissions `xml:"PERMISSIONS"`
LockInfos *Lock `xml:"LOCK"`
Template documentTemplate `xml:"TEMPLATE"`
}
type documentTemplate struct {
Dynamic unmatchedTagsSlice `xml:",any"`
}
// NewDocumentPool returns a document pool. A connection to OpenNebula is
@ -41,9 +55,13 @@ func NewDocumentPool(documentType int, args ...int) (*DocumentPool, error) {
return nil, err
}
documentpool := &DocumentPool{XMLResource{body: response.Body()}}
documentPool := &DocumentPool{}
err = xml.Unmarshal([]byte(response.Body()), documentPool)
if err != nil {
return nil, err
}
return documentpool, err
return documentPool, nil
}
// NewDocument finds a document object by ID. No connection to OpenNebula.
@ -55,14 +73,26 @@ func NewDocument(id uint) *Document {
// OpenNebula to retrieve the pool, but doesn't perform the Info() call to
// retrieve the attributes of the document.
func NewDocumentFromName(name string, documentType int) (*Document, error) {
var id uint
documentPool, err := NewDocumentPool(documentType)
if err != nil {
return nil, err
}
id, err := documentPool.GetIDFromName(name, "/DOCUMENT_POOL/DOCUMENT")
if err != nil {
return nil, err
match := false
for i := 0; i < len(documentPool.Documents); i++ {
if documentPool.Documents[i].Name != name {
continue
}
if match {
return nil, errors.New("multiple resources with that name")
}
id = documentPool.Documents[i].ID
match = true
}
if !match {
return nil, errors.New("resource not found")
}
return NewDocument(id), nil

View File

@ -38,15 +38,6 @@ type response struct {
bodyBool bool
}
// Resource implements an OpenNebula Resource methods. *XMLResource implements
// all these methods
type Resource interface {
Body() string
XPath(string) (string, bool)
XPathIter(string) *XMLIter
GetIDFromName(string, string) (uint, error)
}
// Initializes the client variable, used as a singleton
func init() {
SetClient(NewConfig("", "", ""))
@ -123,6 +114,10 @@ func SystemConfig() (string, error) {
// Call is an XML-RPC wrapper. It returns a pointer to response and an error.
func (c *oneClient) Call(method string, args ...interface{}) (*response, error) {
return c.endpointCall(c.url, method, args...)
}
func (c *oneClient) endpointCall(url string, method string, args ...interface{}) (*response, error) {
var (
ok bool
@ -144,7 +139,7 @@ func (c *oneClient) Call(method string, args ...interface{}) (*response, error)
&ClientError{Code: ClientReqBuild, msg: "xmlrpc request encoding", err: err}
}
req, err := http.NewRequest("POST", c.url, bytes.NewBuffer(buf))
req, err := http.NewRequest("POST", url, bytes.NewBuffer(buf))
if err != nil {
return nil,
&ClientError{Code: ClientReqBuild, msg: "http request build", err: err}

View File

@ -1,15 +1,34 @@
package goca
// Group represents an OpenNebula Group
type Group struct {
XMLResource
ID uint
Name string
}
import (
"encoding/xml"
"errors"
)
// GroupPool represents an OpenNebula GroupPool
type GroupPool struct {
XMLResource
Groups []groupBase `xml:"GROUP"`
Quotas []quotas `xml:"QUOTAS"`
DefaultUserQuotas quotasList `xml:"DEFAULT_USER_QUOTAS"`
}
// Group represents an OpenNebula Group
type Group struct {
groupBase
quotasList
DefaultUserQuotas quotasList `xml:"DEFAULT_USER_QUOTAS"`
}
type groupBase struct {
ID uint `xml:"ID"`
Name string `xml:"NAME"`
Users []int `xml:"USERS>ID"`
Admins []int `xml:"ADMINS>ID"`
Template groupTemplate `xml:"TEMPLATE"`
}
type groupTemplate struct {
Dynamic unmatchedTagsSlice `xml:",any"`
}
// NewGroupPool returns a group pool. A connection to OpenNebula is
@ -20,28 +39,44 @@ func NewGroupPool() (*GroupPool, error) {
return nil, err
}
grouppool := &GroupPool{XMLResource{body: response.Body()}}
groupPool := &GroupPool{}
err = xml.Unmarshal([]byte(response.Body()), groupPool)
if err != nil {
return nil, err
}
return grouppool, err
return groupPool, nil
}
// NewGroup finds a group object by ID. No connection to OpenNebula.
func NewGroup(id uint) *Group {
return &Group{ID: id}
return &Group{groupBase: groupBase{ID: id}}
}
// NewGroupFromName finds a group object by name. It connects to
// OpenNebula to retrieve the pool, but doesn't perform the Info() call to
// retrieve the attributes of the group.
func NewGroupFromName(name string) (*Group, error) {
var id uint
groupPool, err := NewGroupPool()
if err != nil {
return nil, err
}
id, err := groupPool.GetIDFromName(name, "/GROUP_POOL/GROUP")
if err != nil {
return nil, err
match := false
for i := 0; i < len(groupPool.Groups); i++ {
if groupPool.Groups[i].Name != name {
continue
}
if match {
return nil, errors.New("multiple resources with that name")
}
id = groupPool.Groups[i].ID
match = true
}
if !match {
return nil, errors.New("resource not found")
}
return NewGroup(id), nil
@ -69,8 +104,7 @@ func (group *Group) Info() error {
if err != nil {
return err
}
group.body = response.Body()
return nil
return xml.Unmarshal([]byte(response.Body()), group)
}
// Update replaces the group template contents.

View File

@ -8,23 +8,6 @@ import (
"time"
)
// Extracts the ID of a resource
func GetID(t *testing.T, r Resource, s string) (uint, error) {
path := fmt.Sprintf("/%s/ID", s)
sIDFromXML, ok := r.XPath(path)
if !ok {
t.Error("Could not find ID")
}
idFromXML, err := strconv.ParseUint(sIDFromXML, 10, strconv.IntSize)
if err != nil {
t.Error(err)
}
return uint(idFromXML), nil
}
// Appends a random string to a name
func GenName(name string) string {
t := strconv.FormatInt(time.Now().UnixNano(), 10)
@ -57,11 +40,6 @@ func GetUserGroup(t *testing.T, user string) (string, error){
t.Error("Cannot retreive caller user Info")
}
// Get Caller Group
ugroup, ok := u.XPath("/USER/GNAME")
if !ok {
t.Errorf("Could not get caller group name")
}
return u.GName, nil
return ugroup, nil
}

View File

@ -1,20 +1,71 @@
package goca
import (
"encoding/xml"
"errors"
"strconv"
"fmt"
)
// Host represents an OpenNebula Host
type Host struct {
XMLResource
ID uint
Name string
}
// HostPool represents an OpenNebula HostPool
type HostPool struct {
XMLResource
Hosts []Host `xml:"HOST"`
}
// Host represents an OpenNebula Host
type Host struct {
ID uint `xml:"ID"`
Name string `xml:"NAME"`
StateRaw int `xml:"STATE"`
IMMAD string `xml:"IM_MAD"`
VMMAD string `xml:"VM_MAD"`
LastMonTime int `xml:"LAST_MON_TIME"`
ClusterID int `xml:"CLUSTER_ID"`
Cluster string `xml:"CLUSTER"`
Share hostShare `xml:"HOST_SHARE"`
VMsID []int `xml:"VMS>ID"`
Template hostTemplate `xml:"TEMPLATE"`
}
type hostShare struct {
DiskUsage int `xml:"DISK_USAGE"`
MemUsage int `xml:"MEM_USAGE"`
CPUUsage int `xml:"CPU_USAGE"`
TotalMem int `xml:"TOTAL_MEM"`
TotalCPU int `xml:"TOTAL_CPU"`
MaxDisk int `xml:"MAX_DISK"`
MaxMem int `xml:"MAX_MEM"`
MaxCPU int `xml:"MAX_CPU"`
FreeDisk int `xml:"FREE_DISK"`
FreeMem int `xml:"FREE_MEM"`
FreeCPU int `xml:"FREE_CPU"`
UsedDisk int `xml:"USED_DISK"`
UsedMem int `xml:"USED_MEM"`
UsedCPU int `xml:"USED_CPU"`
RunningVMs int `xml:"RUNNING_VMS"`
Stores hostDataStores `xml:"DATASTORES"`
PCIDevices interface{} `xml:"PCI_DEVICES>PCI"`
}
type hostDataStores struct {
DSs []hostDS `xml:"DS"`
}
type hostDS struct {
ID int `xml:"ID"`
UsedMB int `xml:"USED_MB"`
FreeMB int `xml:"FREE_MB"`
TotalMB int `xml:"TOTAL_MB"`
}
type hostTemplate struct {
// Example of reservation: https://github.com/OpenNebula/addon-storpool/blob/ba9dd3462b369440cf618c4396c266f02e50f36f/misc/reserved.sh
ReservedMem int `xml:"RESERVED_MEM"`
ReservedCpu int `xml:"RESERVED_CPU"`
Dynamic unmatchedTagsSlice `xml:",any"`
}
// HostState is the state of an OpenNebula Host
@ -49,6 +100,13 @@ const (
HostOffline
)
func (st HostState) isValid() bool {
if st >= HostInit && st <= HostOffline {
return true
}
return false
}
func (st HostState) String() string {
return [...]string{
"INIT",
@ -71,9 +129,12 @@ func NewHostPool() (*HostPool, error) {
return nil, err
}
hostpool := &HostPool{XMLResource{body: response.Body()}}
return hostpool, err
hostPool := &HostPool{}
err = xml.Unmarshal([]byte(response.Body()), &hostPool)
if err != nil {
return nil, err
}
return hostPool, nil
}
// NewHost finds a host object by ID. No connection to OpenNebula.
@ -85,14 +146,26 @@ func NewHost(id uint) *Host {
// OpenNebula to retrieve the pool, but doesn't perform the Info() call to
// retrieve the attributes of the host.
func NewHostFromName(name string) (*Host, error) {
var id uint
hostPool, err := NewHostPool()
if err != nil {
return nil, err
}
id, err := hostPool.GetIDFromName(name, "/HOST_POOL/HOST")
if err != nil {
return nil, err
match := false
for i := 0; i < len(hostPool.Hosts); i++ {
if hostPool.Hosts[i].Name != name {
continue
}
if match {
return nil, errors.New("multiple resources with that name")
}
id = hostPool.Hosts[i].ID
match = true
}
if !match {
return nil, errors.New("resource not found")
}
return NewHost(id), nil
@ -146,8 +219,7 @@ func (host *Host) Info() error {
if err != nil {
return err
}
host.body = response.Body()
return nil
return xml.Unmarshal([]byte(response.Body()), host)
}
// Monitoring returns the host monitoring records.
@ -156,23 +228,20 @@ func (host *Host) Monitoring() error {
return err
}
// State returns the HostState
// State looks up the state of the image and returns the ImageState
func (host *Host) State() (HostState, error) {
stateString, ok := host.XPath("/HOST/STATE")
if ok != true {
return -1, errors.New("Unable to parse host State")
state := HostState(host.StateRaw)
if !state.isValid() {
return -1, fmt.Errorf("Host State: this state value is not currently handled: %d\n", host.StateRaw)
}
state, _ := strconv.Atoi(stateString)
return HostState(state), nil
return state, nil
}
// StateString returns the HostState as string
// StateString returns the state in string format
func (host *Host) StateString() (string, error) {
state, err := host.State()
if err != nil {
return "", err
state := HostState(host.StateRaw)
if !state.isValid() {
return "", fmt.Errorf("Host StateString: this state value is not currently handled: %d\n", host.StateRaw)
}
return HostState(state).String(), nil
return state.String(), nil
}

View File

@ -1,20 +1,50 @@
package goca
import (
"encoding/xml"
"errors"
"strconv"
"fmt"
)
// Image represents an OpenNebula Image
type Image struct {
XMLResource
ID uint
Name string
}
// ImagePool represents an OpenNebula Image pool
type ImagePool struct {
XMLResource
Images []Image `xml:"IMAGE"`
}
// Image represents an OpenNebula Image
type Image struct {
ID uint `xml:"ID"`
UID int `xml:"UID"`
GID int `xml:"GID"`
UName string `xml:"UNAME"`
GName string `xml:"GNAME"`
Name string `xml:"NAME"`
LockInfos *Lock `xml:"LOCK"`
Permissions *Permissions `xml:"PERMISSIONS"`
Type int `xml:"TYPE"`
DiskType int `xml:"DISK_TYPE"`
PersistentValue int `xml:"PERSISTENT"`
RegTime int `xml:"REGTIME"`
Source string `xml:"SOURCE"`
Path string `xml:"PATH"`
FsType string `xml:"FSTYPE"`
Size int `xml:"SIZE"`
StateRaw int `xml:"STATE"`
RunningVMs int `xml:"RUNNING_VMS"`
CloningOps int `xml:"CLONING_OPS"`
CloningID int `xml:"CLONING_ID"`
TargetSnapshot int `xml:"TARGET_SNAPSHOT"`
DatastoreID int `xml:"DATASTORE_ID"`
Datastore string `xml:"DATASTORE"`
VMsID []int `xml:"VMS>ID"`
ClonesID []int `xml:"CLONES>ID"`
AppClonesID []int `xml:"APP_CLONES>ID"`
Snapshots ImageSnapshot `xml:"SNAPSHOTS"`
Template imageTemplate `xml:"TEMPLATE"`
}
type imageTemplate struct {
Dynamic unmatchedTagsSlice `xml:",any"`
}
// ImageState is the state of the Image
@ -55,6 +85,13 @@ const (
ImageLockUsedPers
)
func (st ImageState) isValid() bool {
if st >= ImageInit && st <= ImageLockUsedPers {
return true
}
return false
}
// String returns the string version of the ImageState
func (s ImageState) String() string {
return [...]string{
@ -84,7 +121,7 @@ func CreateImage(template string, dsid uint) (uint, error) {
}
// NewImagePool returns a new image pool. It accepts the scope of the query. It
// performs an OpenNebula connectio to fetch the information.
// performs an OpenNebula connection to fetch the information.
func NewImagePool(args ...int) (*ImagePool, error) {
var who, start, end int
@ -106,10 +143,13 @@ func NewImagePool(args ...int) (*ImagePool, error) {
return nil, err
}
imagepool := &ImagePool{XMLResource{body: response.Body()}}
return imagepool, err
imagePool := &ImagePool{}
err = xml.Unmarshal([]byte(response.Body()), imagePool)
if err != nil {
return nil, err
}
return imagePool, nil
}
// NewImage finds an image by ID returns a new Image object. At this stage no
@ -122,14 +162,26 @@ func NewImage(id uint) *Image {
// to OpenNebula to retrieve the pool, but doesn't perform the Info() call to
// retrieve the attributes of the image.
func NewImageFromName(name string) (*Image, error) {
var id uint
imagePool, err := NewImagePool()
if err != nil {
return nil, err
}
id, err := imagePool.GetIDFromName(name, "/IMAGE_POOL/IMAGE")
if err != nil {
return nil, err
match := false
for i := 0; i < len(imagePool.Images); i++ {
if imagePool.Images[i].Name != name {
continue
}
if match {
return nil, errors.New("multiple resources with that name")
}
id = imagePool.Images[i].ID
match = true
}
if !match {
return nil, errors.New("resource not found")
}
return NewImage(id), nil
@ -141,29 +193,25 @@ func (image *Image) Info() error {
if err != nil {
return err
}
image.body = response.Body()
return nil
return xml.Unmarshal([]byte(response.Body()), image)
}
// State looks up the state of the image and returns the ImageState
func (image *Image) State() (ImageState, error) {
stateString, ok := image.XPath("/IMAGE/STATE")
if ok != true {
return -1, errors.New("Unable to parse Image State")
state := ImageState(image.StateRaw)
if !state.isValid() {
return -1, fmt.Errorf("Image State: this state value is not currently handled: %d\n", image.StateRaw)
}
state, _ := strconv.Atoi(stateString)
return ImageState(state), nil
return state, nil
}
// StateString returns the state in string format
func (image *Image) StateString() (string, error) {
state, err := image.State()
if err != nil {
return "", err
state := ImageState(image.StateRaw)
if !state.isValid() {
return "", fmt.Errorf("Image State: this state value is not currently handled: %d\n", image.StateRaw)
}
return ImageState(state).String(), nil
return state.String(), nil
}
// Clone clones an existing image. It returns the clone ID

View File

@ -33,7 +33,7 @@ func ImageExpectState(image *Image, state string) func() bool {
}
// Helper to create a Image
func createImage(t *testing.T) *Image {
func createImage(t *testing.T) (*Image, uint) {
// Datastore ID 1 means default for image
id, err := CreateImage(imageTpl, 1)
if err != nil {
@ -48,26 +48,21 @@ func createImage(t *testing.T) *Image {
t.Error(err)
}
return image
return image, id
}
func TestImage(t *testing.T) {
image := createImage(t)
var err error
idParse, err := GetID(t, image, "IMAGE")
if err != nil {
t.Error(err)
}
image, idOrig := createImage(t)
if idParse != image.ID {
idParse := image.ID
if idParse != idOrig {
t.Errorf("Image ID does not match")
}
// Get image by Name
name, ok := image.XPath("/IMAGE/NAME")
if !ok {
t.Errorf("Could not get name")
}
name := image.Name
image, err = NewImageFromName(name)
if err != nil {
@ -79,9 +74,8 @@ func TestImage(t *testing.T) {
t.Error(err)
}
idParse, err = GetID(t, image, "IMAGE")
if idParse != image.ID {
idParse = image.ID
if idParse != idOrig {
t.Errorf("Image ID does not match")
}
@ -104,16 +98,10 @@ func TestImage(t *testing.T) {
}
// Get Image Owner Name
uname, ok := image.XPath("/IMAGE/UNAME")
if !ok {
t.Errorf("Could not get user name")
}
uname := image.UName
// Get Image owner group Name
gname, ok := image.XPath("/IMAGE/GNAME")
if !ok {
t.Errorf("Could not get group name")
}
gname := image.GName
// Compare with caller username
caller := strings.Split(client.token, ":")[0]
@ -143,16 +131,10 @@ func TestImage(t *testing.T) {
}
// Get Image Owner Name
uname, ok = image.XPath("/IMAGE/UNAME")
if !ok {
t.Errorf("Could not get user name")
}
uname = image.UName
// Get Image Owner Name
gname, ok = image.XPath("/IMAGE/GNAME")
if !ok {
t.Errorf("Could not get user name")
}
// Get Image owner group Name
gname = image.GName
if "serveradmin" != uname {
t.Error("Image owner is not oneadmin")

View File

@ -0,0 +1,8 @@
package goca
type Lock struct {
Locked int `xml:"LOCKED"`
Owner int `xml:"OWNER"`
Time int `xml:"TIME"`
ReqID int `xml:"REQ_ID"`
}

View File

@ -1,17 +1,36 @@
package goca
import "errors"
// MarketPlace represents an OpenNebula MarketPlace
type MarketPlace struct {
XMLResource
ID uint
Name string
}
import (
"encoding/xml"
"errors"
)
// MarketPlacePool represents an OpenNebula MarketPlacePool
type MarketPlacePool struct {
XMLResource
MarketPlaces []MarketPlace `xml:"MARKETPLACE"`
}
// MarketPlace represents an OpenNebula MarketPlace
type MarketPlace struct {
ID uint `xml:"ID"`
UID int `xml:"UID"`
GID int `xml:"GID"`
UName string `xml:"UNAME"`
GName string `xml:"GNAME"`
Name string `xml:"NAME"`
MarketMad string `xml:"MARKET_MAD"`
ZoneID string `xml:"ZONE_ID"`
TotalMB int `xml:"TOTAL_MB"`
FreeMB int `xml:"FREE_MB"`
UsedMB int `xml:"USED_MB"`
MarketPlaceAppsIDs []int `xml:"MARKETPLACEAPPS>ID"`
Permissions *Permissions `xml:"PERMISSIONS"`
Template marketPlaceTemplate `xml:"TEMPLATE"`
}
// MarketPlaceTemplate represent the template part of the MarketPlace
type marketPlaceTemplate struct {
Dynamic unmatchedTagsSlice `xml:",any"`
}
// NewMarketPlacePool returns a marketplace pool. A connection to OpenNebula is
@ -41,9 +60,13 @@ func NewMarketPlacePool(args ...int) (*MarketPlacePool, error) {
return nil, err
}
marketpool := &MarketPlacePool{XMLResource{body: response.Body()}}
marketPool := &MarketPlacePool{}
err = xml.Unmarshal([]byte(response.Body()), marketPool)
if err != nil {
return nil, err
}
return marketpool, err
return marketPool, nil
}
// NewMarketPlace finds a marketplace object by ID. No connection to OpenNebula.
@ -55,14 +78,26 @@ func NewMarketPlace(id uint) *MarketPlace {
// OpenNebula to retrieve the pool, but doesn't perform the Info() call to
// retrieve the attributes of the marketplace.
func NewMarketPlaceFromName(name string) (*MarketPlace, error) {
var id uint
marketPool, err := NewMarketPlacePool()
if err != nil {
return nil, err
}
id, err := marketPool.GetIDFromName(name, "/MARKETPLACE_POOL/MARKETPLACE")
if err != nil {
return nil, err
match := false
for i := 0; i < len(marketPool.MarketPlaces); i++ {
if marketPool.MarketPlaces[i].Name != name {
continue
}
if match {
return nil, errors.New("multiple resources with that name")
}
id = marketPool.MarketPlaces[i].ID
match = true
}
if !match {
return nil, errors.New("resource not found")
}
return NewMarketPlace(id), nil
@ -129,6 +164,5 @@ func (market *MarketPlace) Info() error {
if err != nil {
return err
}
market.body = response.Body()
return nil
return xml.Unmarshal([]byte(response.Body()), market)
}

View File

@ -4,7 +4,6 @@ import (
"testing"
)
func TestMarketplace(t *testing.T){
var mkt_name string = "marketplace_test_go"
@ -25,7 +24,7 @@ func TestMarketplace(t *testing.T){
market = NewMarketPlace(market_id)
market.Info()
actual, _:= market.XMLResource.XPath("/MARKETPLACE/NAME")
actual := market.Name
if actual != mkt_name {
t.Errorf("Test failed, expected: '%s', got: '%s'", mkt_name, actual)
@ -42,17 +41,20 @@ func TestMarketplace(t *testing.T){
market.Info()
actual_mm, _ := market.XMLResource.XPath("/MARKETPLACE/TEMPLATE/MARKET_MAD")
actual_1, _ := market.XMLResource.XPath("/MARKETPLACE/TEMPLATE/ATT1")
actual_mm := market.MarketMad
actual_1, err := market.Template.Dynamic.GetContentByName("ATT1")
if err != nil {
t.Errorf("Test failed, can't retrieve '%s', error: %s", "ATT1", err.Error())
} else {
if actual_1 != "VAL1" {
t.Errorf("Test failed, expected: '%s', got: '%s'", "VAL1", actual_1)
}
}
if actual_mm != "http" {
t.Errorf("Test failed, expected: '%s', got: '%s'", "http", actual_mm)
}
if actual_1 != "VAL1" {
t.Errorf("Test failed, expected: '%s', got: '%s'", "VAL1", actual_1)
}
//Change permissions for Marketpkace
err = market.Chmod(1,1,1,1,1,1,1,1,1)
@ -62,11 +64,11 @@ func TestMarketplace(t *testing.T){
market.Info()
expected := "111111111"
actual, _ = market.XMLResource.XPath("/MARKETPLACE/PERMISSIONS")
expected_perm := Permissions{ 1, 1, 1, 1, 1, 1, 1, 1, 1 }
actual_perm := *market.Permissions
if actual != expected {
t.Errorf("Test failed, expected: '%s', got: '%s'", expected, actual)
if actual_perm != expected_perm {
t.Errorf("Test failed, expected: '%s', got: '%s'", expected_perm.String(), actual_perm.String())
}
//Change owner of Marketpkace
@ -78,17 +80,17 @@ func TestMarketplace(t *testing.T){
market.Info()
expected_usr := "1"
expected_grp := "1"
actual_usr, _ :=market.XMLResource.XPath("/MARKETPLACE/UID")
actual_grp, _ :=market.XMLResource.XPath("/MARKETPLACE/GID")
expected_usr := 1
expected_grp := 1
actual_usr := market.UID
actual_grp := market.GID
if actual_usr != expected_usr {
t.Errorf("Test failed, expected: '%s', got: '%s'", expected_usr, actual_usr)
t.Errorf("Test failed, expected: '%d', got: '%d'", expected_usr, actual_usr)
}
if actual_grp != expected_grp {
t.Errorf("Test failed, expected: '%s', got: '%s'", expected_grp, actual_grp)
t.Errorf("Test failed, expected: '%d', got: '%d'", expected_grp, actual_grp)
}
rename := mkt_name + "-renamed"
@ -102,7 +104,7 @@ func TestMarketplace(t *testing.T){
market.Info()
actual, _ = market.XMLResource.XPath("/MARKETPLACE/NAME")
actual = market.Name
if actual != rename {
t.Errorf("Test failed, expected: '%s', got: '%s'", rename, actual)

View File

@ -1,17 +1,44 @@
package goca
import "errors"
// MarketPlaceApp represents an OpenNebula MarketPlaceApp
type MarketPlaceApp struct {
XMLResource
ID uint
Name string
}
import (
"encoding/xml"
"errors"
)
// MarketPlaceAppPool represents an OpenNebula MarketPlaceAppPool
type MarketPlaceAppPool struct {
XMLResource
MarketPlaceApps []MarketPlaceApp `xml:"MARKETPLACEAPP"`
}
// MarketPlaceApp represents an OpenNebula MarketPlaceApp
type MarketPlaceApp struct {
ID uint `xml:"ID"`
UID int `xml:"UID"`
GID int `xml:"GID"`
UName string `xml:"UNAME"`
GName string `xml:"GNAME"`
LockInfos *Lock `xml:"LOCK"`
Permissions *Permissions `xml:"PERMISSIONS"`
RegTime int `xml:"REGTIME"`
Name string `xml:"NAME"`
ZoneId string `xml:"ZONE_ID"`
OriginId string `xml:"ORIGIN_ID"`
Source string `xml:"SOURCE"`
MD5 string `xml:"MD5"`
Size int `xml:"SIZE"`
Description string `xml:"DESCRIPTION"`
Version string `xml:"VERSION"`
Format string `xml:"FORMAT"`
AppTemplate64 string `xml:"APPTEMPLATE64"`
MarketPlaceID int `xml:"MARKETPLACEID"`
MarketPlace string `xml:"MARKETPLACE"`
State int `xml:"STATE"`
Type int `xml:"TYPE"`
Template marketPlaceAppTemplate `xml:"TEMPLATE"`
}
type marketPlaceAppTemplate struct {
Dynamic unmatchedTagsSlice `xml:,any`
}
// NewMarketPlaceAppPool returns a marketplace app pool. A connection to OpenNebula is
@ -41,9 +68,13 @@ func NewMarketPlaceAppPool(args ...int) (*MarketPlaceAppPool, error) {
return nil, err
}
marketapppool := &MarketPlaceAppPool{XMLResource{body: response.Body()}}
marketappPool := &MarketPlaceAppPool{}
err = xml.Unmarshal([]byte(response.Body()), marketappPool)
if err != nil {
return nil, err
}
return marketapppool, err
return marketappPool, nil
}
// NewMarketPlaceApp finds a marketplace app object by ID. No connection to OpenNebula.
@ -55,14 +86,26 @@ func NewMarketPlaceApp(id uint) *MarketPlaceApp {
// OpenNebula to retrieve the pool, but doesn't perform the Info() call to
// retrieve the attributes of the marketplace app.
func NewMarketPlaceAppFromName(name string) (*MarketPlaceApp, error) {
var id uint
marketAppPool, err := NewMarketPlaceAppPool()
if err != nil {
return nil, err
}
id, err := marketAppPool.GetIDFromName(name, "/MARKETPLACEAPP_POOL/MARKETPLACEAPP")
if err != nil {
return nil, err
match := false
for i := 0; i < len(marketAppPool.MarketPlaceApps); i++ {
if marketAppPool.MarketPlaceApps[i].Name != name {
continue
}
if match {
return nil, errors.New("multiple resources with that name")
}
id = marketAppPool.MarketPlaceApps[i].ID
match = true
}
if !match {
return nil, errors.New("resource not found")
}
return NewMarketPlaceApp(id), nil
@ -137,8 +180,7 @@ func (marketApp *MarketPlaceApp) Info() error {
if err != nil {
return err
}
marketApp.body = response.Body()
return nil
return xml.Unmarshal([]byte(response.Body()), marketApp)
}
// Lock locks the marketplace app depending on blocking level.

View File

@ -53,7 +53,7 @@ func TestMarketplaceApp(t *testing.T){
mkt_app = NewMarketPlaceApp(app_id)
mkt_app.Info()
actual, _:= mkt_app.XMLResource.XPath("/MARKETPLACEAPP/NAME")
actual := mkt_app.Name
if actual != mkt_app_name {
t.Errorf("Test failed, expected: '%s', got: '%s'", mkt_app_name, actual)

View File

@ -0,0 +1,23 @@
package goca
//import "fmt"
type Permissions struct {
OwnerU int `xml:"OWNER_U"`
OwnerM int `xml:"OWNER_M"`
OwnerA int `xml:"OWNER_A"`
GroupU int `xml:"GROUP_U"`
GroupM int `xml:"GROUP_M"`
GroupA int `xml:"GROUP_A"`
OtherU int `xml:"OTHER_U"`
OtherM int `xml:"OTHER_M"`
OtherA int `xml:"OTHER_A"`
}
func (p *Permissions) String() string {
permStr := [8]string{"---", "--a", "-m-", "-ma", "u--", "u-a", "um-", "uma"}
owner := permStr[p.OwnerU<<2|p.OwnerM<<1|p.OwnerA]
group := permStr[p.GroupU<<2|p.GroupM<<1|p.GroupA]
other := permStr[p.OtherU<<2|p.OtherM<<1|p.OtherA]
return owner + group + other
}

View File

@ -0,0 +1,50 @@
package goca
type quotas struct {
ID uint `xml:"ID"`
quotasList
}
type quotasList struct {
DatastoreQuotas []datastoreQuota `xml:"DATASTORE_QUOTA>DATASTORE"`
NetworkQuotas []networkQuota `xml:"NETWORK_QUOTA>NETWORK"`
VMQuotas []vmQuota `xml:"VM_QUOTA>VM"`
ImageQuotas []imageQuota `xml:"IMAGE_QUOTA>IMAGE"`
}
type datastoreQuota struct {
ID string `xml:"ID"`
Images string `xml:"IMAGES"`
ImagesUsed string `xml:"IMAGES_USED"`
Size string `xml:"SIZE"`
SizeUsed string `xml:"SIZE_USED"`
}
type networkQuota struct {
ID string `xml:"ID"`
Leases string `xml:"LEASES"`
LeasesUsed string `xml:"LEASES_USED"`
}
type vmQuota struct {
CPU string `xml:"CPU"`
CPUUsed string `xml:"CPU_USED"`
Memory string `xml:"MEMORY"`
MemoryUsed string `xml:"MEMORY_USED"`
RunningCpu string `xml:"RUNNING_CPU"`
RunningCpuUsed string `xml:"RUNNING_CPU_USED"`
RunningMemory string `xml:"RUNNING_MEMORY"`
RunningMemoryUsed string `xml:"RUNNING_MEMORY_USED"`
RunningVMs string `xml:"RUNNING_VMS"`
RunningVMsUsed string `xml:"RUNNING_VMS_USED"`
SystemDiskSize string `xml:"SYSTEM_DISK_SIZE"`
SystemDiskSizeUsed string `xml:"SYSTEM_DISK_SIZE_USED"`
VMs string `xml:"VMS"`
VMsUsed string `xml:"VMS_USED"`
}
type imageQuota struct {
ID string `xml:"ID"`
RVMs string `xml:"RVMS"`
RVMsUsed string `xml:"RVMS_USED"`
}

View File

@ -1,17 +1,41 @@
package goca
import "errors"
// SecurityGroup represents an OpenNebula SecurityGroup
type SecurityGroup struct {
XMLResource
ID uint
Name string
}
import (
"encoding/xml"
"errors"
)
// SecurityGroupPool represents an OpenNebula SecurityGroupPool
type SecurityGroupPool struct {
XMLResource
SecurityGroups []SecurityGroup `xml:"SECURITY_GROUP"`
}
// SecurityGroup represents an OpenNebula SecurityGroup
type SecurityGroup struct {
ID uint `xml:"ID"`
UID int `xml:"UID"`
GID int `xml:"GID"`
UName string `xml:"UNAME"`
GName string `xml:"GNAME"`
Name string `xml:"NAME"`
Permissions *Permissions `xml:"PERMISSIONS"`
UpdatedVMs []int `xml:"UPDATED_VMS>ID"`
OutdatedVMs []int `xml:"OUTDATED_VMS>ID"`
UpdatingVMs []int `xml:"UPDATING_VMS>ID"`
ErrorVMs []int `xml:"ERROR_VMS>ID"`
Template securityGroupTemplate `xml:"TEMPLATE"`
}
// VirtualRouterTemplate represent the template part of the OpenNebula VirtualRouter
type securityGroupTemplate struct {
Description string `xml:"DESCRIPTION"`
Rules []securityGroupRule `xml:"RULE"`
Dynamic unmatchedTagsSlice `xml:",any"`
}
type securityGroupRule struct {
Protocol string `xml:"PROTOCOL"`
RuleType string `xml:"RULE_TYPE"`
}
// NewSecurityGroupPool returns a security group pool. A connection to OpenNebula is
@ -41,9 +65,13 @@ func NewSecurityGroupPool(args ...int) (*SecurityGroupPool, error) {
return nil, err
}
secgrouppool := &SecurityGroupPool{XMLResource{body: response.Body()}}
secgroupPool := &SecurityGroupPool{}
err = xml.Unmarshal([]byte(response.Body()), secgroupPool)
if err != nil {
return nil, err
}
return secgrouppool, err
return secgroupPool, nil
}
// NewSecurityGroup finds a security group object by ID. No connection to OpenNebula.
@ -55,14 +83,26 @@ func NewSecurityGroup(id uint) *SecurityGroup {
// OpenNebula to retrieve the pool, but doesn't perform the Info() call to
// retrieve the attributes of the security group.
func NewSecurityGroupFromName(name string) (*SecurityGroup, error) {
var id uint
secgroupPool, err := NewSecurityGroupPool()
if err != nil {
return nil, err
}
id, err := secgroupPool.GetIDFromName(name, "/SECURITY_GROUP_POOL/SECURITY_GROUP")
if err != nil {
return nil, err
match := false
for i := 0; i < len(secgroupPool.SecurityGroups); i++ {
if secgroupPool.SecurityGroups[i].Name != name {
continue
}
if match {
return nil, errors.New("multiple resources with that name")
}
id = secgroupPool.SecurityGroups[i].ID
match = true
}
if !match {
return nil, errors.New("resource not found")
}
return NewSecurityGroup(id), nil
@ -146,6 +186,5 @@ func (sg *SecurityGroup) Info() error {
if err != nil {
return err
}
sg.body = response.Body()
return nil
return xml.Unmarshal([]byte(response.Body()), sg)
}

View File

@ -22,7 +22,7 @@ func TestSGAllocate(t *testing.T){
sg = NewSecurityGroup(sg_id)
sg.Info()
actual, _:= sg.XMLResource.XPath("/SECURITY_GROUP/NAME")
actual := sg.Name
if actual != sg_name {
t.Errorf("Test failed, expected: '%s', got: '%s'", sg_name, actual)
@ -39,15 +39,22 @@ func TestSGAllocate(t *testing.T){
sg.Info()
actual_1, _ := sg.XMLResource.XPath("/SECURITY_GROUP/TEMPLATE/ATT1")
actual_3, _ := sg.XMLResource.XPath("/SECURITY_GROUP/TEMPLATE/ATT3")
if actual_1 != "VAL1" {
t.Errorf("Test failed, expected: '%s', got: '%s'", "VAL1", actual_1)
actual_1, err := sg.Template.Dynamic.GetContentByName("ATT1")
if err != nil {
t.Errorf("Test failed, can't retrieve '%s', error: %s", "ATT1", err.Error())
} else {
if actual_1 != "VAL1" {
t.Errorf("Test failed, expected: '%s', got: '%s'", "VAL1", actual_1)
}
}
if actual_3 != "VAL3" {
t.Errorf("Test failed, expected: '%s', got: '%s'", "VAL3", actual_3)
actual_3, err := sg.Template.Dynamic.GetContentByName("ATT3")
if err != nil {
t.Errorf("Test failed, can't retrieve '%s', error: %s", "ATT3", err.Error())
} else {
if actual_3 != "VAL3" {
t.Errorf("Test failed, expected: '%s', got: '%s'", "VAL3", actual_3)
}
}
clone_name := sg_name + "-cloned"
@ -62,7 +69,7 @@ func TestSGAllocate(t *testing.T){
clone := NewSecurityGroup(clone_id)
clone.Info()
actual, _ = clone.XMLResource.XPath("/SECURITY_GROUP/NAME")
actual = sg.Name
if actual != clone_name {
t.Errorf("Test failed, expected: '%s', got: '%s'", clone_name, actual)
@ -79,11 +86,11 @@ func TestSGAllocate(t *testing.T){
sg.Info()
expected := "111111111"
actual, _ = sg.XMLResource.XPath("/SECURITY_GROUP/PERMISSIONS")
expected_perm := Permissions{1, 1, 1, 1, 1, 1, 1, 1, 1}
actual_perm := sg.Permissions
if actual != expected {
t.Errorf("Test failed, expected: '%s', got: '%s'", expected, actual)
if actual_perm == nil || *actual_perm != expected_perm {
t.Errorf("Test failed, expected: '%s', got: '%s'", expected_perm.String(), actual_perm.String())
}
//Change owner of SG
@ -95,17 +102,17 @@ func TestSGAllocate(t *testing.T){
sg.Info()
expected_usr := "1"
expected_grp := "1"
actual_usr, _ := sg.XMLResource.XPath("/SECURITY_GROUP/UID")
actual_grp, _ := sg.XMLResource.XPath("/SECURITY_GROUP/GID")
expected_usr := 1
expected_grp := 1
actual_usr := sg.UID
actual_grp := sg.GID
if actual_usr != expected_usr {
t.Errorf("Test failed, expected: '%s', got: '%s'", expected_usr, actual_usr)
t.Errorf("Test failed, expected: '%d', got: '%d'", expected_usr, actual_usr)
}
if actual_grp != expected_grp {
t.Errorf("Test failed, expected: '%s', got: '%s'", expected_grp, actual_grp)
t.Errorf("Test failed, expected: '%d', got: '%d'", expected_grp, actual_grp)
}
//Rename SG
@ -118,7 +125,7 @@ func TestSGAllocate(t *testing.T){
sg.Info()
actual, _ = sg.XMLResource.XPath("/SECURITY_GROUP/NAME")
actual = sg.Name
if actual != rename {
t.Errorf("Test failed, expected: '%s', got: '%s'", rename, actual)

View File

@ -0,0 +1,40 @@
package goca
// An user can take snapshot on VM, or on VM disks
// Common part
type snapshot struct {
Children string `xml:"CHILDREN"` //minOccur=0
Active string `xml:"ACTIVE"` //minOccur=0
Date int `xml:"DATE"`
ID int `xml:"ID"`
Name string `xml:"NAME"` //minOccur=0
Parent int `xml:"PARENT"`
Size int `xml:"SIZE"`
}
// Image entity related
type ImageSnapshot struct {
AllowOrphans string `xml:"ALLOW_ORPHANS"`
CurrentBase int `xml:"CURRENT_BASE"`
NextSnapshot int `xml:"NEXT_SNAPSHOT"`
Snapshots []snapshot `xml:"SNAPSHOT"`
}
// VM entity related
type VMSnapshot struct {
HypervisorID string `xml:"HYPERVISOR_ID"`
Name string `xml:"NAME"`
ID int `xml:"SNAPSHOT_ID"`
Time string `xml:"TIME"`
}
type vmHistoryRecordSnapshot struct {
ImageSnapshot
DiskID int `xml:"DISK_ID"`
}
type vmMonitoringSnapshotSize struct {
DiskID int `xml:"DISK_ID"`
Size int `xml:"SIZE"`
}

View File

@ -1,19 +1,65 @@
package goca
import (
"encoding/xml"
"errors"
)
// Template represents an OpenNebula Template
type Template struct {
XMLResource
ID uint
Name string
}
// TemplatePool represents an OpenNebula TemplatePool
type TemplatePool struct {
XMLResource
Templates []Template `xml:"VMTEMPLATE"`
}
// Template represents an OpenNebula Template
type Template struct {
ID uint `xml:"ID"`
UID int `xml:"UID"`
GID int `xml:"GID"`
UName string `xml:"UNAME"`
GName string `xml:"GNAME"`
Name string `xml:"NAME"`
LockInfos *Lock `xml:"LOCK"`
Permissions *Permissions `xml:"PERMISSIONS"`
RegTime int `xml:"REGTIME"`
Template templateTemplate `xml:"TEMPLATE"`
}
// templateTemplate represent the template part of the OpenNebula Template
type templateTemplate struct {
CPU float64 `xml:"CPU"`
Memory int `xml:"MEMORY"`
Context *templateContext `xml:"CONTEXT"`
Disk []templateDisk `xml:"DISK"`
Graphics *templateGraphics `xml:"GRAPHICS"`
NICDefault *templateNicDefault `xml:"NIC_DEFAULT"`
OS *templateOS `xml:"OS"`
UserInputs templateUserInputs `xml:"USER_INPUTS"`
Dynamic unmatchedTagsSlice `xml:",any"`
}
type templateContext struct {
Dynamic unmatchedTagsSlice `xml:",any"`
}
type templateDisk struct {
Dynamic unmatchedTagsSlice `xml:",any"`
}
type templateGraphics struct {
Dynamic unmatchedTagsSlice `xml:",any"`
}
type templateUserInputs struct {
Dynamic unmatchedTagsSlice `xml:",any"`
}
type templateNicDefault struct {
Model string `xml:"MODEL"`
}
type templateOS struct {
Arch string `xml:"ARCH"`
Boot string `xml:"BOOT"`
}
// NewTemplatePool returns a template pool. A connection to OpenNebula is
@ -39,10 +85,13 @@ func NewTemplatePool(args ...int) (*TemplatePool, error) {
return nil, err
}
templatepool := &TemplatePool{XMLResource{body: response.Body()}}
return templatepool, err
templatePool := &TemplatePool{}
err = xml.Unmarshal([]byte(response.Body()), templatePool)
if err != nil {
return nil, err
}
return templatePool, nil
}
// NewTemplate finds a template object by ID. No connection to OpenNebula.
@ -54,14 +103,26 @@ func NewTemplate(id uint) *Template {
// OpenNebula to retrieve the pool, but doesn't perform the Info() call to
// retrieve the attributes of the template.
func NewTemplateFromName(name string) (*Template, error) {
var id uint
templatePool, err := NewTemplatePool()
if err != nil {
return nil, err
}
id, err := templatePool.GetIDFromName(name, "/VMTEMPLATE_POOL/VMTEMPLATE")
if err != nil {
return nil, err
match := false
for i := 0; i < len(templatePool.Templates); i++ {
if templatePool.Templates[i].Name != name {
continue
}
if match {
return nil, errors.New("multiple resources with that name")
}
id = templatePool.Templates[i].ID
match = true
}
if !match {
return nil, errors.New("resource not found")
}
return NewTemplate(id), nil
@ -83,8 +144,7 @@ func (template *Template) Info() error {
if err != nil {
return err
}
template.body = response.Body()
return nil
return xml.Unmarshal([]byte(response.Body()), template)
}
// Update will modify the template. If appendTemplate is 0, it will

View File

@ -5,7 +5,7 @@ import (
)
// Helper to create a template
func createTemplate(t *testing.T) *Template {
func createTemplate(t *testing.T) (*Template, uint) {
templateName := GenName("template")
// Create template
@ -28,28 +28,22 @@ func createTemplate(t *testing.T) *Template {
t.Error(err)
}
return template
return template, id
}
func TestTemplateCreateAndDelete(t *testing.T) {
template := createTemplate(t)
var err error
idParse, err := GetID(t, template, "VMTEMPLATE")
if err != nil {
t.Error(err)
}
template, idOrig := createTemplate(t)
if idParse != template.ID {
idParse := template.ID
if idParse != idOrig {
t.Errorf("Template ID does not match")
}
// Get template by Name
templateName, ok := template.XPath("/VMTEMPLATE/NAME")
if !ok {
t.Errorf("Could not get name")
}
template, err = NewTemplateFromName(templateName)
name := template.Name
template, err = NewTemplateFromName(name)
if err != nil {
t.Fatal(err)
}
@ -59,9 +53,8 @@ func TestTemplateCreateAndDelete(t *testing.T) {
t.Error(err)
}
idParse, err = GetID(t, template, "VMTEMPLATE")
if idParse != template.ID {
idParse = template.ID
if idParse != idOrig {
t.Errorf("Template ID does not match")
}
@ -107,7 +100,7 @@ func TestTemplateInstantiate(t *testing.T) {
}
func TestTemplateUpdate(t *testing.T) {
template := createTemplate(t)
template, _ := createTemplate(t)
tpl := NewTemplateBuilder()
tpl.AddValue("A", "B")
@ -120,8 +113,13 @@ func TestTemplateUpdate(t *testing.T) {
t.Error(err)
}
if val, ok := template.XPath("/VMTEMPLATE/TEMPLATE/A"); !ok || val != "B" {
t.Errorf("Expecting A=B")
val, err := template.Template.Dynamic.GetContentByName("A")
if err != nil {
t.Errorf("Test failed, can't retrieve '%s', error: %s", "A", err.Error())
} else {
if val != "B" {
t.Errorf("Expecting A=B")
}
}
// Delete template

Some files were not shown because too many files have changed in this diff Show More