1
0
mirror of https://github.com/OpenNebula/one.git synced 2024-12-24 21:34:01 +03:00

Merge branch 'master' into feature-203

Conflicts:
	install.sh
This commit is contained in:
Ruben S. Montero 2010-07-12 16:33:25 +02:00
commit 0fac49b28a
35 changed files with 2428 additions and 183 deletions

140
include/ClusterPool.h Normal file
View File

@ -0,0 +1,140 @@
/* -------------------------------------------------------------------------- */
/* Copyright 2002-2010, OpenNebula Project Leads (OpenNebula.org) */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
#ifndef CLUSTER_POOL_H_
#define CLUSTER_POOL_H_
#include <string>
#include <sstream>
#include <map>
#include "SqlDB.h"
using namespace std;
/**
* A cluster helper class. It is not a normal PoolSQL,
* but a series of static methods related to clusters.
*/
class ClusterPool
{
public:
/**
* Cluster name for the default cluster
*/
static const string DEFAULT_CLUSTER_NAME;
private:
// -------------------------------------------------------------------------
// Friends
// -------------------------------------------------------------------------
friend class HostPool;
/* ---------------------------------------------------------------------- */
/* Attributes */
/* ---------------------------------------------------------------------- */
/**
* This map stores the clusters
*/
map<int, string> cluster_names;
/* ---------------------------------------------------------------------- */
/* Methods for cluster management */
/* ---------------------------------------------------------------------- */
/**
* Returns true if the clid is an id for an existing cluster
* @param clid ID of the cluster
*
* @return true if the clid is an id for an existing cluster
*/
bool exists(int clid)
{
return cluster_names.count(clid) > 0;
};
/**
* Allocates a new cluster in the pool
* @param clid the id assigned to the cluster
* @return the id assigned to the cluster or -1 in case of failure
*/
int allocate(int * clid, string name, SqlDB *db);
/**
* Returns the xml representation of the given cluster
* @param clid ID of the cluster
*
* @return the xml representation of the given cluster
*/
string info(int clid);
/**
* Removes the given cluster from the pool and the DB
* @param clid ID of the cluster
*
* @return 0 on success
*/
int drop(int clid, SqlDB *db);
/**
* Dumps the cluster pool in XML format.
* @param oss the output stream to dump the pool contents
*
* @return 0 on success
*/
int dump(ostringstream& oss);
/**
* Bootstraps the database table(s) associated to the Cluster
*/
static void bootstrap(SqlDB * db)
{
ostringstream oss(ClusterPool::db_bootstrap);
db->exec(oss);
};
/**
* Function to insert new Cluster in the pool
* @param oid the id assigned to the Cluster
* @param name the Cluster's name
* @return 0 on success, -1 in case of failure
*/
int insert (int oid, string name, SqlDB *db);
/**
* Formats as XML the given id and name.
* @param oss the output stream to dump the pool contents
*
* @return 0 on success
*/
void dump_cluster(ostringstream& oss, int id, string name);
/* ---------------------------------------------------------------------- */
/* DB manipulation */
/* ---------------------------------------------------------------------- */
static const char * db_names;
static const char * db_bootstrap;
static const char * table;
};
#endif /*CLUSTER_POOL_H_*/

View File

@ -20,6 +20,7 @@
#include "PoolSQL.h"
#include "HostShare.h"
#include "HostTemplate.h"
#include "ClusterPool.h"
using namespace std;
@ -90,14 +91,14 @@ public:
if ( state != DISABLED) //Don't change the state is host is disabled
{
if (success == true)
{
state = MONITORED;
}
else
{
state = ERROR;
}
if (success == true)
{
state = MONITORED;
}
else
{
state = ERROR;
}
}
};
@ -107,7 +108,7 @@ public:
*/
void disable()
{
state = DISABLED;
state = DISABLED;
};
/**
@ -116,17 +117,17 @@ public:
*/
void enable()
{
state = INIT;
state = INIT;
};
/**
* Returns host host_name
* @return host_name Host's hostname
*/
const string& get_hostname() const
const string& get_hostname() const
{
return hostname;
};
return hostname;
};
/** Update host counters and update the whole host on the DB
* @param parse_str string with values to be parsed
@ -188,6 +189,16 @@ public:
return last_monitored;
};
/**
* Sets the cluster for this host
* @return time_t last monitored time
*/
int set_cluster(const string& cluster_name)
{
cluster = cluster_name;
return 0;
};
// ------------------------------------------------------------------------
// Template
// ------------------------------------------------------------------------
@ -396,6 +407,11 @@ private:
*/
time_t last_monitored;
/**
* Name of the cluster this host belongs to.
*/
string cluster;
// -------------------------------------------------------------------------
// Host Attributes
// -------------------------------------------------------------------------
@ -472,7 +488,8 @@ protected:
VM_MAD = 4,
TM_MAD = 5,
LAST_MON_TIME = 6,
LIMIT = 7
CLUSTER = 7,
LIMIT = 8
};
static const char * db_names;

View File

@ -19,6 +19,7 @@
#include "PoolSQL.h"
#include "Host.h"
#include "ClusterPool.h"
#include <time.h>
#include <sstream>
@ -36,7 +37,7 @@ class HostPool : public PoolSQL
{
public:
HostPool(SqlDB * db):PoolSQL(db,Host::table){};
HostPool(SqlDB * db);
~HostPool(){};
@ -81,10 +82,12 @@ public:
static void bootstrap(SqlDB *_db)
{
Host::bootstrap(_db);
ClusterPool::bootstrap(_db);
};
/**
* Get the 10 least monitored hosts
* Get the least monitored hosts
* @param discovered hosts, map to store the retrieved hosts hids and
* hostnames
* @param host_limit max. number of hosts to monitor at a time
@ -144,7 +147,101 @@ public:
*/
int dump(ostringstream& oss, const string& where);
/* ---------------------------------------------------------------------- */
/* ---------------------------------------------------------------------- */
/* Methods for cluster management */
/* ---------------------------------------------------------------------- */
/* ---------------------------------------------------------------------- */
/**
* Returns true if the clid is an id for an existing cluster
* @param clid ID of the cluster
*
* @return true if the clid is an id for an existing cluster
*/
/* bool exists_cluster(int clid)
{
return cluster_pool.exists(clid);
};*/
/**
* Allocates a new cluster in the pool
* @param clid the id assigned to the cluster
* @return the id assigned to the cluster or -1 in case of failure
*/
int allocate_cluster(int * clid, string name)
{
return cluster_pool.allocate(clid, name, db);
};
/**
* Returns the xml representation of the given cluster
* @param clid ID of the cluster
*
* @return the xml representation of the given cluster
*/
string info_cluster(int clid)
{
return cluster_pool.info(clid);
};
/**
* Removes the given cluster from the pool and the DB
* @param clid ID of the cluster
*
* @return 0 on success
*/
int drop_cluster(int clid);
/**
* Dumps the cluster pool in XML format.
* @param oss the output stream to dump the pool contents
*
* @return 0 on success
*/
int dump_cluster(ostringstream& oss)
{
return cluster_pool.dump(oss);
};
/**
* Assigns the host to the given cluster
* @param host The host to assign
* @param clid ID of the cluster
*
* @return 0 on success
*/
int set_cluster(Host* host, int clid)
{
map<int, string>::iterator it;
it = cluster_pool.cluster_names.find(clid);
if (it == cluster_pool.cluster_names.end())
{
return -1;
}
return host->set_cluster( it->second );
};
/**
* Removes the host from the given cluster setting the default one.
* @param host The host to assign
*
* @return 0 on success
*/
int set_default_cluster(Host* host)
{
return host->set_cluster(ClusterPool::DEFAULT_CLUSTER_NAME);
};
private:
/**
* ClusterPool, clusters defined and persistance functionality
*/
ClusterPool cluster_pool;
/**
* Factory method to produce Host objects
* @return a pointer to the new Host
@ -154,6 +251,15 @@ private:
return new Host;
};
/**
* Callback function to build the cluster pool
* @param num the number of columns read from the DB
* @param names the column names
* @param vaues the column values
* @return 0 on success
*/
int init_cb(void *nil, int num, char **values, char **names);
/**
* Callback function to get the IDs of the hosts to be monitored
* (Host::discover)
@ -175,4 +281,4 @@ private:
int dump_cb(void * _oss, int num, char **values, char **names);
};
#endif /*HOST_POOL_H_*/
#endif /*HOST_POOL_H_*/

View File

@ -463,7 +463,166 @@ private:
HostPool * hpool;
UserPool * upool;
};
/* ---------------------------------------------------------------------- */
/* Cluster Interface */
/* ---------------------------------------------------------------------- */
class ClusterAllocate: public xmlrpc_c::method
{
public:
ClusterAllocate(
HostPool * _hpool,
UserPool * _upool):
hpool(_hpool),
upool(_upool)
{
_signature="A:ss";
_help="Allocates a cluster in the pool";
};
~ClusterAllocate(){};
void execute(
xmlrpc_c::paramList const& paramList,
xmlrpc_c::value * const retvalP);
private:
HostPool * hpool;
UserPool * upool;
};
/* ---------------------------------------------------------------------- */
class ClusterInfo: public xmlrpc_c::method
{
public:
ClusterInfo(
HostPool * _hpool,
UserPool * _upool):
hpool(_hpool),
upool(_upool)
{
_signature="A:si";
_help="Returns cluster information";
};
~ClusterInfo(){};
void execute(
xmlrpc_c::paramList const& paramList,
xmlrpc_c::value * const retvalP);
private:
HostPool * hpool;
UserPool * upool;
};
/* ---------------------------------------------------------------------- */
class ClusterDelete: public xmlrpc_c::method
{
public:
ClusterDelete(
HostPool * _hpool,
UserPool * _upool):
hpool(_hpool),
upool(_upool)
{
_signature="A:si";
_help="Deletes a cluster from the pool";
};
~ClusterDelete(){};
void execute(
xmlrpc_c::paramList const& paramList,
xmlrpc_c::value * const retvalP);
private:
HostPool * hpool;
UserPool * upool;
};
/* ---------------------------------------------------------------------- */
class ClusterAdd: public xmlrpc_c::method
{
public:
ClusterAdd(
HostPool * _hpool,
UserPool * _upool):
hpool(_hpool),
upool(_upool)
{
_signature="A:sii";
_help="Adds a host to a cluster";
};
~ClusterAdd(){};
void execute(
xmlrpc_c::paramList const& paramList,
xmlrpc_c::value * const retvalP);
private:
HostPool * hpool;
UserPool * upool;
};
/* ---------------------------------------------------------------------- */
class ClusterRemove: public xmlrpc_c::method
{
public:
ClusterRemove(
HostPool * _hpool,
UserPool * _upool):
hpool(_hpool),
upool(_upool)
{
_signature="A:si";
_help="Removes a host from its cluster";
};
~ClusterRemove(){};
void execute(
xmlrpc_c::paramList const& paramList,
xmlrpc_c::value * const retvalP);
private:
HostPool * hpool;
UserPool * upool;
};
/* ---------------------------------------------------------------------- */
class ClusterPoolInfo: public xmlrpc_c::method
{
public:
ClusterPoolInfo(
HostPool * _hpool,
UserPool * _upool):
hpool(_hpool),
upool(_upool)
{
_signature="A:s";
_help="Returns the cluster pool information";
};
~ClusterPoolInfo(){};
void execute(
xmlrpc_c::paramList const& paramList,
xmlrpc_c::value * const retvalP);
private:
HostPool * hpool;
UserPool * upool;
};
/* ---------------------------------------------------------------------- */
/* Virtual Network Interface */
/* ---------------------------------------------------------------------- */

View File

@ -18,7 +18,7 @@
#-------------------------------------------------------------------------------
# Install program for OpenNebula. It will install it relative to
# $ONE_LOCATION if defined with the -d option, otherwise it'll be installed
# $ONE_LOCATION if defined with the -d option, otherwise it'll be installed
# under /. In this case you may specified the oneadmin user/group, so you do
# not need run the OpenNebula daemon with root priviledges
#-------------------------------------------------------------------------------
@ -28,9 +28,9 @@
#-------------------------------------------------------------------------------
usage() {
echo
echo "Usage: install.sh [-u install_user] [-g install_group] [-k keep conf]"
echo "Usage: install.sh [-u install_user] [-g install_group] [-k keep conf]"
echo " [-d ONE_LOCATION] [-c occi|ec2] [-r] [-h]"
echo
echo
echo "-u: user that will run opennebula, defults to user executing install.sh"
echo "-g: group of the user that will run opennebula, defults to user"
echo " executing install.sh"
@ -46,7 +46,7 @@ usage() {
TEMP_OPT=`getopt -o hkrlc:u:g:d: -n 'install.sh' -- "$@"`
if [ $? != 0 ] ; then
if [ $? != 0 ] ; then
usage
exit 1
fi
@ -98,12 +98,12 @@ if [ -z "$ROOT" ] ; then
LOCK_LOCATION="/var/lock/one"
INCLUDE_LOCATION="/usr/include"
SHARE_LOCATION="/usr/share/doc/opennebula"
if [ "$CLIENT" = "no" ]; then
MAKE_DIRS="$BIN_LOCATION $LIB_LOCATION $ETC_LOCATION $VAR_LOCATION \
$INCLUDE_LOCATION $SHARE_LOCATION \
$LOG_LOCATION $RUN_LOCATION $LOCK_LOCATION"
DELETE_DIRS="$LIB_LOCATION $ETC_LOCATION $LOG_LOCATION $VAR_LOCATION \
$RUN_LOCATION $SHARE_DIRS"
@ -127,7 +127,7 @@ else
if [ "$CLIENT" = "no" ]; then
MAKE_DIRS="$BIN_LOCATION $LIB_LOCATION $ETC_LOCATION $VAR_LOCATION \
$INCLUDE_LOCATION $SHARE_LOCATION"
DELETE_DIRS="$MAKE_DIRS"
CHOWN_DIRS="$ROOT"
@ -258,6 +258,7 @@ BIN_FILES="src/nebula/oned \
src/cli/onevnet \
src/cli/oneuser \
src/cli/oneimage \
src/cli/onecluster \
share/scripts/one"
#-------------------------------------------------------------------------------
@ -299,6 +300,8 @@ RUBY_OPENNEBULA_LIB_FILES="src/oca/ruby/OpenNebula/Host.rb \
src/oca/ruby/OpenNebula/VirtualNetworkPool.rb \
src/oca/ruby/OpenNebula/Image.rb \
src/oca/ruby/OpenNebula/ImagePool.rb \
src/oca/ruby/OpenNebula/Cluster.rb \
src/oca/ruby/OpenNebula/ClusterPool.rb \
src/oca/ruby/OpenNebula/XMLUtils.rb"
#-------------------------------------------------------------------------------
# Driver executable files, to be installed under $LIB_LOCATION/mads
@ -325,8 +328,8 @@ MADS_LIB_FILES="src/mad/sh/madcommon.sh \
src/hm_mad/one_hm.rb \
src/hm_mad/one_hm \
src/authm_mad/one_auth_mad.rb \
src/authm_mad/one_auth_mad"
src/authm_mad/one_auth_mad \
src/hm_mad/one_hm"
#-------------------------------------------------------------------------------
# Information Manager Probes, to be installed under $LIB_LOCATION/im_probes
#-------------------------------------------------------------------------------
@ -490,7 +493,7 @@ COMMON_CLOUD_LIB_FILES="src/cloud/common/CloudServer.rb \
COMMON_CLOUD_CLIENT_LIB_FILES="src/cloud/common/CloudClient.rb"
#-------------------------------------------------------------------------------
# EC2 Query for OpenNebula
# EC2 Query for OpenNebula
#-------------------------------------------------------------------------------
ECO_LIB_FILES="src/cloud/ec2/lib/EC2QueryClient.rb \
@ -536,7 +539,7 @@ OCCI_LIB_FILES="src/cloud/occi/lib/OCCIServer.rb \
src/cloud/occi/lib/VirtualNetworkOCCI.rb \
src/cloud/occi/lib/VirtualNetworkPoolOCCI.rb \
src/cloud/occi/lib/ImageOCCI.rb \
src/cloud/occi/lib/ImagePoolOCCI.rb"
src/cloud/occi/lib/ImagePoolOCCI.rb"
OCCI_LIB_CLIENT_FILES="src/cloud/occi/lib/OCCIClient.rb"
@ -563,7 +566,7 @@ OCCI_ETC_TEMPLATE_FILES="src/cloud/occi/etc/templates/small.erb \
# --- Create OpenNebula directories ---
if [ "$UNINSTALL" = "no" ] ; then
if [ "$UNINSTALL" = "no" ] ; then
for d in $MAKE_DIRS; do
mkdir -p $DESTDIR$d
done
@ -595,10 +598,10 @@ fi
for i in ${INSTALL_SET[@]}; do
SRC=$`echo $i | cut -d: -f1`
DST=`echo $i | cut -d: -f2`
eval SRC_FILES=$SRC
for f in $SRC_FILES; do
eval SRC_FILES=$SRC
for f in $SRC_FILES; do
do_file $f $DST
done
done
@ -607,23 +610,23 @@ if [ "$CLIENT" = "no" -a "$INSTALL_ETC" = "yes" ] ; then
for i in ${INSTALL_ETC_FILES[@]}; do
SRC=$`echo $i | cut -d: -f1`
DST=`echo $i | cut -d: -f2`
eval SRC_FILES=$SRC
OLD_LINK=$LINK
LINK="no"
for f in $SRC_FILES; do
for f in $SRC_FILES; do
do_file $f $DST
done
LINK=$OLD_LINK
done
fi
# --- Set ownership or remove OpenNebula directories ---
if [ "$UNINSTALL" = "no" ] ; then
if [ "$UNINSTALL" = "no" ] ; then
for d in $CHOWN_DIRS; do
chown -R $ONEADMIN_USER:$ONEADMIN_GROUP $DESTDIR$d
done

View File

@ -49,7 +49,7 @@ end
def print_header(format_str, str, underline)
scr_bold
scr_underline if underline
print format_str % str
print format_str % str
scr_restore
puts
end
@ -80,7 +80,7 @@ ShowTableExample={
# Class to print tables
class ShowTable
attr_accessor :ext, :columns
# table => definition of the table to print
# ext => external variables (Hash), @ext
def initialize(table, ext=nil)
@ -89,7 +89,7 @@ class ShowTable
@ext=ext if ext.kind_of?(Hash)
@columns=@table[:default]
end
# Returns a formated string for header
def header_str
@columns.collect {|c|
@ -101,7 +101,7 @@ class ShowTable
end
}.compact.join(' ')
end
# Returns an array with header titles
def header_array
@columns.collect {|c|
@ -112,39 +112,39 @@ class ShowTable
end
}.compact
end
def data_str(data, options=nil)
# TODO: Use data_array so it can be ordered and/or filtered
res_data=data_array(data, options)
res_data.collect {|d|
(0..(@columns.length-1)).collect {|c|
dat=d[c]
col=@columns[c]
format_data(col, dat) if @table[col]
}.join(' ')
}.join("\n")
#data.collect {|d|
# @columns.collect {|c|
# format_data(c, @table[c][:proc].call(d, @ext)) if @table[c]
# }.join(' ')
#}.join("\n")
end
def data_array(data, options=nil)
res_data=data.collect {|d|
@columns.collect {|c|
@table[c][:proc].call(d, @ext).to_s if @table[c]
}
}
if options
filter_data!(res_data, options[:filter]) if options[:filter]
sort_data!(res_data, options[:order]) if options[:order]
end
res_data
end
@ -153,14 +153,14 @@ class ShowTable
size=@table[field][:size]
"%#{minus}#{size}.#{size}s" % [ data.to_s ]
end
def get_order_column(column)
desc=column.match(/^-/)
col_name=column.gsub(/^-/, '')
index=@columns.index(col_name.to_sym)
[index, desc]
end
def sort_data!(data, order)
data.sort! {|a,b|
# rows are equal by default
@ -169,9 +169,9 @@ class ShowTable
# compare
pos, dec=get_order_column(o)
break if !pos
r = (b[pos]<=>a[pos])
# if diferent set res (return value) and exit loop
if r!=0
# change sign if the order is decreasing
@ -183,7 +183,7 @@ class ShowTable
res
}
end
def filter_data!(data, filters)
filters.each {|key, value|
pos=@columns.index(key.downcase.to_sym)
@ -198,7 +198,7 @@ class ShowTable
end
}
end
def print_help
text=[]
@table.each {|option, data|
@ -207,7 +207,7 @@ class ShowTable
}
text.join("\n")
end
end
@ -251,9 +251,9 @@ def get_entity_id(name, pool_class)
# TODO: Check for errors
objects=pool.select {|object| object.name==name }
class_name=pool_class.name.split('::').last.gsub(/Pool$/, '')
if objects.length>0
if objects.length>1
puts "There are multiple #{class_name}'s with name #{name}."
@ -265,7 +265,7 @@ def get_entity_id(name, pool_class)
puts "#{class_name} named #{name} not found."
exit -1
end
result
end
@ -289,6 +289,10 @@ def get_image_id(name)
get_entity_id(name, OpenNebula::ImagePool)
end
def get_cluster_id(name)
get_entity_id(name, OpenNebula::ClusterPool)
end
def str_running_time(data)
stime=Time.at(data["stime"].to_i)
if data["etime"]=="0"
@ -317,9 +321,9 @@ def expand_range(param)
last=match[4]
post=match[5]
size=0
result=Array.new
if operator=='-'
range=(start.to_i..last.to_i)
size=last.size
@ -327,7 +331,7 @@ def expand_range(param)
size=(start.to_i+last.to_i-1).to_s.size
range=(start.to_i..(start.to_i+last.to_i-1))
end
if start[0]==?0
range.each do |num|
result<<sprintf("%s%0#{size}d%s", pre, num, post)

214
src/cli/onecluster Executable file
View File

@ -0,0 +1,214 @@
#!/usr/bin/env ruby
# -------------------------------------------------------------------------- #
# Copyright 2002-2010, OpenNebula Project Leads (OpenNebula.org) #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
ONE_LOCATION=ENV["ONE_LOCATION"]
if !ONE_LOCATION
RUBY_LIB_LOCATION="/usr/lib/one/ruby"
else
RUBY_LIB_LOCATION=ONE_LOCATION+"/lib/ruby"
end
$: << RUBY_LIB_LOCATION
require 'OpenNebula'
require 'client_utilities'
require 'command_parse'
ShowTableUP={
:id => {
:name => "ID",
:desc => "ONE identifier for cluster",
:size => 4,
:proc => lambda {|d,e|
d.id
}
},
:name => {
:name => "NAME",
:desc => "Name of the cluster",
:size => 8,
:proc => lambda {|d,e|
d.name
}
},
:default => [:id, :name]
}
class UPShow
def initialize
@clusterpool=OpenNebula::ClusterPool.new(get_one_client)
@table=ShowTable.new(ShowTableUP)
end
def header_up_small
scr_bold
scr_underline
print @table.header_str
scr_restore
puts ""
end
def list_short(options=nil)
res=@clusterpool.info
if options
@table.columns=options[:columns] if options[:columns]
end
if OpenNebula.is_error?(res)
result=res
else
result=res
header_up_small
puts @table.data_str(@clusterpool, options)
result
end
end
end
class OneUPParse < CommandParse
COMMANDS_HELP=<<-EOT
Commands:
* create (Creates a new user)
onecluster create clustername
* delete (Removes a cluster)
onecluster delete <id>
* list (Lists all the clusters in the pool)
onecluster list
* addhost (Add a host to the cluster)
onecluster addhost <host_id> <cluster_id>
* removehost (Remove a host from the cluster)
onecluster removehost <host_id> <cluster_id>
EOT
def text_commands
COMMANDS_HELP
end
def text_command_name
"onecluster"
end
def list_options
table=ShowTable.new(ShowTableUP)
table.print_help
end
end
oneup_opts=OneUPParse.new
oneup_opts.parse(ARGV)
ops=oneup_opts.options
result=[false, "Unknown error"]
command=ARGV.shift
case command
when "create"
check_parameters("create", 1)
cluster=OpenNebula::Cluster.new(OpenNebula::Cluster.build_xml, get_one_client)
result=cluster.allocate(ARGV[0])
if is_successful?(result)
puts "ID: " + cluster.id.to_s if ops[:verbose]
exit 0
end
when "delete"
check_parameters("delete", 1)
args=expand_args(ARGV)
args.each do |param|
cluster_id=get_cluster_id(param)
cluster=OpenNebula::Cluster.new(
OpenNebula::Cluster.build_xml(cluster_id), get_one_client)
result=cluster.delete
if !OpenNebula.is_error?(result)
puts "Cluster deleted" if ops[:verbose]
break
end
end
when "addhost"
check_parameters("addhost", 2)
cluster_id=get_cluster_id(ARGV[-1])
cluster=OpenNebula::Cluster.new(
OpenNebula::Cluster.build_xml(cluster_id), get_one_client)
args=expand_args(ARGV[0..-2])
args.each do |param|
host_id=get_host_id(param)
result=cluster.add_host(host_id)
if is_successful?(result)
puts "Added HOST to the Cluster" if ops[:verbose]
else
break
end
end
when "removehost"
check_parameters("removehost", 2)
cluster_id=get_cluster_id(ARGV[-1])
cluster=OpenNebula::Cluster.new(
OpenNebula::Cluster.build_xml(cluster_id), get_one_client)
args=expand_args(ARGV[0..-2])
args.each do |param|
host_id=get_host_id(param)
result=cluster.remove_host(host_id)
if is_successful?(result)
puts "Host removed from the Cluster" if ops[:verbose]
else
break
end
end
when "list"
if !ops[:xml]
uplist=UPShow.new
ops[:columns]=ops[:list] if ops[:list]
result=uplist.list_short(ops)
else
clusterpool=OpenNebula::ClusterPool.new(get_one_client)
clusterpool.info
puts clusterpool.to_xml(true)
end
else
oneup_opts.print_help
exit -1
end
if OpenNebula.is_error?(result)
puts "Error: " + result.message
exit -1
end

View File

@ -46,6 +46,13 @@ ShowTableHost={
:left => true,
:proc => lambda {|d,e| d.name }
},
:cluster => {
:name => "CLUSTER",
:desc => "Clustername",
:size => 25,
:left => true,
:proc => lambda {|d,e| d.cluster }
},
:rvm => {
:name => "RVM",
:desc => "Number of virtual machines running",
@ -98,7 +105,7 @@ ShowTableHost={
},
:default => [:id, :name, :rvm, :tcpu, :fcpu, :acpu, :tmem, :fmem, :stat]
:default => [:id, :name, :cluster, :rvm, :tcpu, :fcpu, :acpu, :tmem, :fmem, :stat]
}
class HostShow
@ -257,6 +264,7 @@ when "show"
puts str % ["ID", host[:id]]
puts str % ["NAME", host[:name]]
puts str % ["CLUSTER", host[:cluster]]
puts str % ["STATE", host.state_str]
puts str % ["IM_MAD", host[:im_mad]]
puts str % ["VM_MAD", host[:vm_mad]]

View File

@ -87,24 +87,6 @@ class UPShow
result
end
end
def top(options=nil)
delay=1
delay=options[:delay] if options && options[:delay]
result=nil
begin
while true
scr_cls
scr_move(0,0)
result=list_short(options)
sleep delay
end
rescue Exception
end
result
end
end
class OneUPParse < CommandParse

View File

@ -132,24 +132,6 @@ class VNShow
result
end
end
def top(options=nil)
delay=1
delay=options[:delay] if options && options[:delay]
result=nil
begin
while true
scr_cls
scr_move(0,0)
result=list_short(options)
sleep delay
end
rescue Exception
end
result
end
end
class OneVNParse < CommandParse

173
src/host/ClusterPool.cc Normal file
View File

@ -0,0 +1,173 @@
/* -------------------------------------------------------------------------- */
/* Copyright 2002-2010, OpenNebula Project Leads (OpenNebula.org) */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
#include "ClusterPool.h"
const char * ClusterPool::table = "cluster_pool";
const char * ClusterPool::db_names = "oid, cluster_name";
const char * ClusterPool::db_bootstrap =
"CREATE TABLE IF NOT EXISTS cluster_pool ("
"oid INTEGER PRIMARY KEY, cluster_name VARCHAR(128), "
"UNIQUE(cluster_name) )";
const string ClusterPool::DEFAULT_CLUSTER_NAME = "default";
/* -------------------------------------------------------------------------- */
int ClusterPool::allocate(int * clid, string name, SqlDB *db)
{
int rc;
map<int, string>::iterator it;
// Return error if name already exists
for(it=cluster_names.begin();it!=cluster_names.end();it++)
{
if(it->second == name)
{
goto error_existing_name;
}
}
// Get the highest key, and add 1
*clid = cluster_names.rbegin()->first + 1;
rc = insert(*clid, name, db);
if(rc != 0)
{
goto error_db;
}
return *clid;
// TODO: LOG ERRORS
error_existing_name:
error_db:
error_common:
*clid = -1;
return *clid;
}
/* -------------------------------------------------------------------------- */
string ClusterPool::info(int clid)
{
ostringstream oss;
map<int, string>::iterator it;
it = cluster_names.find(clid);
if(it != cluster_names.end())
{
dump_cluster(oss, it->first, it->second);
}
return oss.str();
}
/* -------------------------------------------------------------------------- */
int ClusterPool::drop(int clid, SqlDB *db)
{
int rc;
ostringstream oss;
// Return error if cluster is 'default' or if it doesn't exist
if( clid == 0 || cluster_names.count(clid) == 0 )
{
return -1;
}
oss << "DELETE FROM " << table << " WHERE oid=" << clid;
rc = db->exec(oss);
if(rc == 0)
{
cluster_names.erase(clid);
}
return rc;
}
/* -------------------------------------------------------------------------- */
int ClusterPool::dump(ostringstream& oss)
{
map<int, string>::iterator it;
oss << "<CLUSTER_POOL>";
for(it=cluster_names.begin();it!=cluster_names.end();it++)
{
dump_cluster(oss, it->first, it->second);
}
oss << "</CLUSTER_POOL>";
return 0;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int ClusterPool::insert(int oid, string name, SqlDB *db)
{
ostringstream oss;
int rc;
char * sql_name;
sql_name = db->escape_str(name.c_str());
if ( sql_name == 0 )
{
return -1;
}
oss << "INSERT INTO "<< table <<" ("<< db_names <<") VALUES ("
<< oid << ","
<< "'" << sql_name << "')";
rc = db->exec(oss);
db->free_str(sql_name);
if( rc == 0 )
{
cluster_names.insert( make_pair(oid, name) );
}
return rc;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void ClusterPool::dump_cluster(ostringstream& oss, int id, string name)
{
oss <<
"<CLUSTER>" <<
"<ID>" << id << "</ID>" <<
"<NAME>" << name << "</NAME>" <<
"</CLUSTER>";
}

View File

@ -40,6 +40,7 @@ Host::Host(
vmm_mad_name(_vmm_mad_name),
tm_mad_name(_tm_mad_name),
last_monitored(0),
cluster(ClusterPool::DEFAULT_CLUSTER_NAME),
host_template(id)
{};
@ -53,12 +54,13 @@ Host::~Host(){};
const char * Host::table = "host_pool";
const char * Host::db_names = "(oid,host_name,state,im_mad,vm_mad,"
"tm_mad,last_mon_time)";
"tm_mad,last_mon_time, cluster)";
const char * Host::db_bootstrap = "CREATE TABLE IF NOT EXISTS host_pool ("
"oid INTEGER PRIMARY KEY,host_name VARCHAR(512), state INTEGER,"
"im_mad VARCHAR(128),vm_mad VARCHAR(128),tm_mad VARCHAR(128),"
"last_mon_time INTEGER, UNIQUE(host_name, im_mad, vm_mad, tm_mad) )";
"last_mon_time INTEGER, cluster VARCHAR(128), "
"UNIQUE(host_name, im_mad, vm_mad, tm_mad) )";
/* ------------------------------------------------------------------------ */
/* ------------------------------------------------------------------------ */
@ -72,6 +74,7 @@ int Host::select_cb(void * nil, int num, char **values, char ** names)
(!values[VM_MAD]) ||
(!values[TM_MAD]) ||
(!values[LAST_MON_TIME]) ||
(!values[CLUSTER]) ||
(num != LIMIT ))
{
return -1;
@ -87,6 +90,8 @@ int Host::select_cb(void * nil, int num, char **values, char ** names)
last_monitored = static_cast<time_t>(atoi(values[LAST_MON_TIME]));
cluster = values[CLUSTER];
host_template.id = oid;
host_share.hsid = oid;
@ -118,7 +123,6 @@ int Host::select(SqlDB *db)
}
// Get the template
rc = host_template.select(db);
if ( rc != 0 )
@ -127,7 +131,6 @@ int Host::select(SqlDB *db)
}
// Select the host shares from the DB
rc = host_share.select(db);
if ( rc != 0 )
@ -138,7 +141,6 @@ int Host::select(SqlDB *db)
return 0;
}
/* ------------------------------------------------------------------------ */
/* ------------------------------------------------------------------------ */
@ -238,6 +240,7 @@ int Host::insert_replace(SqlDB *db, bool replace)
char * sql_im_mad_name;
char * sql_tm_mad_name;
char * sql_vmm_mad_name;
char * sql_cluster;
// Update the Host
@ -269,6 +272,13 @@ int Host::insert_replace(SqlDB *db, bool replace)
goto error_vmm;
}
sql_cluster = db->escape_str(cluster.c_str());
if ( sql_cluster == 0 )
{
goto error_cluster;
}
if(replace)
{
oss << "REPLACE";
@ -287,7 +297,8 @@ int Host::insert_replace(SqlDB *db, bool replace)
<< "'" << sql_im_mad_name << "',"
<< "'" << sql_vmm_mad_name << "',"
<< "'" << sql_tm_mad_name << "',"
<< last_monitored << ")";
<< last_monitored << ","
<< "'" << sql_cluster << "')";
rc = db->exec(oss);
@ -295,9 +306,12 @@ int Host::insert_replace(SqlDB *db, bool replace)
db->free_str(sql_im_mad_name);
db->free_str(sql_tm_mad_name);
db->free_str(sql_vmm_mad_name);
db->free_str(sql_cluster);
return rc;
error_cluster:
db->free_str(sql_vmm_mad_name);
error_vmm:
db->free_str(sql_tm_mad_name);
error_tm:
@ -320,6 +334,7 @@ int Host::dump(ostringstream& oss, int num, char **values, char **names)
(!values[VM_MAD]) ||
(!values[TM_MAD]) ||
(!values[LAST_MON_TIME]) ||
(!values[CLUSTER]) ||
(num != LIMIT + HostShare::LIMIT ))
{
return -1;
@ -333,7 +348,8 @@ int Host::dump(ostringstream& oss, int num, char **values, char **names)
"<IM_MAD>" << values[IM_MAD] <<"</IM_MAD>" <<
"<VM_MAD>" << values[VM_MAD] <<"</VM_MAD>" <<
"<TM_MAD>" << values[TM_MAD] <<"</TM_MAD>" <<
"<LAST_MON_TIME>"<< values[LAST_MON_TIME]<<"</LAST_MON_TIME>";
"<LAST_MON_TIME>"<< values[LAST_MON_TIME]<<"</LAST_MON_TIME>"<<
"<CLUSTER>" << values[CLUSTER] <<"</CLUSTER>";
HostShare::dump(oss,num - LIMIT, values + LIMIT, names + LIMIT);
@ -402,21 +418,20 @@ int Host::update_info(string &parse_str)
ostream& operator<<(ostream& os, Host& host)
{
string host_str;
string host_str;
os << host.to_xml(host_str);
os << host.to_xml(host_str);
return os;
};
/* ------------------------------------------------------------------------ */
/* ------------------------------------------------------------------------ */
string& Host::to_xml(string& xml) const
{
string template_xml;
string share_xml;
string share_xml;
ostringstream oss;
oss <<
@ -428,9 +443,10 @@ string& Host::to_xml(string& xml) const
"<VM_MAD>" << vmm_mad_name << "</VM_MAD>" <<
"<TM_MAD>" << tm_mad_name << "</TM_MAD>" <<
"<LAST_MON_TIME>" << last_monitored << "</LAST_MON_TIME>" <<
host_share.to_xml(share_xml) <<
"<CLUSTER>" << cluster << "</CLUSTER>" <<
host_share.to_xml(share_xml) <<
host_template.to_xml(template_xml) <<
"</HOST>";
"</HOST>";
xml = oss.str();
@ -443,22 +459,23 @@ string& Host::to_xml(string& xml) const
string& Host::to_str(string& str) const
{
string template_str;
string share_str;
string share_str;
ostringstream os;
os <<
"ID = " << oid << endl <<
"NAME = " << hostname << endl <<
"STATE = " << state << endl <<
"IM MAD = " << im_mad_name << endl <<
"VMM MAD = " << vmm_mad_name << endl <<
"TM MAD = " << tm_mad_name << endl <<
"LAST_MON = " << last_monitored << endl <<
"ID = " << oid << endl <<
"NAME = " << hostname << endl <<
"STATE = " << state << endl <<
"IM MAD = " << im_mad_name << endl <<
"VMM MAD = " << vmm_mad_name << endl <<
"TM MAD = " << tm_mad_name << endl <<
"LAST_MON = " << last_monitored << endl <<
"CLUSTER = " << cluster << endl <<
"ATTRIBUTES" << endl << host_template.to_str(template_str) << endl <<
"HOST SHARES" << endl << host_share.to_str(share_str) <<endl;
str = os.str();
str = os.str();
return str;
return str;
}

View File

@ -18,7 +18,55 @@
/* Host Pool */
/* ************************************************************************** */
#include <stdexcept>
#include "HostPool.h"
#include "ClusterPool.h"
#include "NebulaLog.h"
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int HostPool::init_cb(void *nil, int num, char **values, char **names)
{
if ( num != 2 || values == 0 || values[0] == 0 )
{
return -1;
}
cluster_pool.cluster_names.insert( make_pair(atoi(values[0]), values[1]) );
return 0;
}
/* -------------------------------------------------------------------------- */
HostPool::HostPool(SqlDB* db):PoolSQL(db,Host::table)
{
ostringstream sql;
set_callback(static_cast<Callbackable::Callback>(&HostPool::init_cb));
sql << "SELECT " << ClusterPool::db_names << " FROM "
<< ClusterPool::table;
db->exec(sql, this);
unset_callback();
if (cluster_pool.cluster_names.empty())
{
int rc = cluster_pool.insert(0, ClusterPool::DEFAULT_CLUSTER_NAME, db);
if(rc != 0)
{
throw runtime_error("Could not create default cluster HostPool");
}
}
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int HostPool::allocate (
int * oid,
@ -129,3 +177,55 @@ int HostPool::dump(ostringstream& oss, const string& where)
return rc;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
int HostPool::drop_cluster(int clid)
{
int rc;
map<int, string>::iterator it;
string cluster_name;
it = cluster_pool.cluster_names.find(clid);
if ( it == cluster_pool.cluster_names.end() )
{
return -1;
}
cluster_name = it->second;
// try to drop the cluster from the pool and DB
rc = cluster_pool.drop(clid, db);
// Move the hosts assigned to the deleted cluster to the default one
if( rc == 0 )
{
Host* host;
vector<int> hids;
vector<int>::iterator hid_it;
string where = "cluster = '" + cluster_name + "'";
search(hids, Host::table, where);
for ( hid_it=hids.begin() ; hid_it < hids.end(); hid_it++ )
{
host = get(*hid_it, true);
if ( host == 0 )
{
continue;
}
set_default_cluster(host);
update(host);
host->unlock();
}
}
return rc;
}

View File

@ -26,6 +26,7 @@ source_files=[
'HostShare.cc',
'HostPool.cc',
'HostTemplate.cc',
'ClusterPool.cc',
]
# Build library

View File

@ -33,7 +33,7 @@ const string xmls[] =
{
"<HOST><ID>0</ID><NAME>Host one</NAME><STATE>0</STATE>"
"<IM_MAD>im_mad</IM_MAD><VM_MAD>vmm_mad</VM_MAD><TM_MAD>tm_mad</TM_MAD>"
"<LAST_MON_TIME>0</LAST_MON_TIME><HOST_SHARE><HID>0</HID>"
"<LAST_MON_TIME>0</LAST_MON_TIME><CLUSTER>default</CLUSTER><HOST_SHARE><HID>0</HID>"
"<DISK_USAGE>0</DISK_USAGE><MEM_USAGE>0</MEM_USAGE><CPU_USAGE>0</CPU_USAGE>"
"<MAX_DISK>0</MAX_DISK><MAX_MEM>0</MAX_MEM><MAX_CPU>0</MAX_CPU>"
"<FREE_DISK>0</FREE_DISK><FREE_MEM>0</FREE_MEM><FREE_CPU>0</FREE_CPU>"
@ -42,7 +42,7 @@ const string xmls[] =
"<HOST><ID>1</ID><NAME>Second host</NAME><STATE>0</STATE>"
"<IM_MAD>im_mad</IM_MAD><VM_MAD>vmm_mad</VM_MAD><TM_MAD>tm_mad</TM_MAD>"
"<LAST_MON_TIME>0</LAST_MON_TIME><HOST_SHARE><HID>1</HID>"
"<LAST_MON_TIME>0</LAST_MON_TIME><CLUSTER>default</CLUSTER><HOST_SHARE><HID>1</HID>"
"<DISK_USAGE>0</DISK_USAGE><MEM_USAGE>0</MEM_USAGE><CPU_USAGE>0</CPU_USAGE>"
"<MAX_DISK>0</MAX_DISK><MAX_MEM>0</MAX_MEM><MAX_CPU>0</MAX_CPU>"
"<FREE_DISK>0</FREE_DISK><FREE_MEM>0</FREE_MEM><FREE_CPU>0</FREE_CPU>"
@ -54,34 +54,34 @@ const string xmls[] =
const string xml_dump =
"<HOST_POOL><HOST><ID>0</ID><NAME>a</NAME><STATE>0</STATE><IM_MAD>im_mad</I"
"M_MAD><VM_MAD>vmm_mad</VM_MAD><TM_MAD>tm_mad</TM_MAD><LAST_MON_TIME>0"
"</LAST_MON_TIME><HOST_SHARE><HID>0</HID><DISK_USAGE>0</DISK_USAGE><MEM"
"</LAST_MON_TIME><CLUSTER>default</CLUSTER><HOST_SHARE><HID>0</HID><DISK_USAGE>0</DISK_USAGE><MEM"
"_USAGE>0</MEM_USAGE><CPU_USAGE>0</CPU_USAGE><MAX_DISK>0</MAX_DISK><MAX_MEM"
">0</MAX_MEM><MAX_CPU>0</MAX_CPU><FREE_DISK>0</FREE_DISK><FREE_MEM>0</FREE_"
"MEM><FREE_CPU>0</FREE_CPU><USED_DISK>0</USED_DISK><USED_MEM>0</USED_MEM><U"
"SED_CPU>0</USED_CPU><RUNNING_VMS>0</RUNNING_VMS></HOST_SHARE></HOST><HOST>"
"<ID>1</ID><NAME>a name</NAME><STATE>0</STATE><IM_MAD>im_mad</IM_MAD><VM_MA"
"D>vmm_mad</VM_MAD><TM_MAD>tm_mad</TM_MAD><LAST_MON_TIME>0</LAST_M"
"ON_TIME><HOST_SHARE><HID>1</HID><DISK_USAGE>0</DISK_USAGE><MEM_USAGE>0</ME"
"ON_TIME><CLUSTER>default</CLUSTER><HOST_SHARE><HID>1</HID><DISK_USAGE>0</DISK_USAGE><MEM_USAGE>0</ME"
"M_USAGE><CPU_USAGE>0</CPU_USAGE><MAX_DISK>0</MAX_DISK><MAX_MEM>0</MAX_MEM>"
"<MAX_CPU>0</MAX_CPU><FREE_DISK>0</FREE_DISK><FREE_MEM>0</FREE_MEM><FREE_CP"
"U>0</FREE_CPU><USED_DISK>0</USED_DISK><USED_MEM>0</USED_MEM><USED_CPU>0</U"
"SED_CPU><RUNNING_VMS>0</RUNNING_VMS></HOST_SHARE></HOST><HOST><ID>2</ID><N"
"AME>a_name</NAME><STATE>0</STATE><IM_MAD>im_mad</IM_MAD><VM_MAD>vmm_mad</V"
"M_MAD><TM_MAD>tm_mad</TM_MAD><LAST_MON_TIME>0</LAST_MON_TIME><HOS"
"M_MAD><TM_MAD>tm_mad</TM_MAD><LAST_MON_TIME>0</LAST_MON_TIME><CLUSTER>default</CLUSTER><HOS"
"T_SHARE><HID>2</HID><DISK_USAGE>0</DISK_USAGE><MEM_USAGE>0</MEM_USAGE><CPU"
"_USAGE>0</CPU_USAGE><MAX_DISK>0</MAX_DISK><MAX_MEM>0</MAX_MEM><MAX_CPU>0</"
"MAX_CPU><FREE_DISK>0</FREE_DISK><FREE_MEM>0</FREE_MEM><FREE_CPU>0</FREE_CP"
"U><USED_DISK>0</USED_DISK><USED_MEM>0</USED_MEM><USED_CPU>0</USED_CPU><RUN"
"NING_VMS>0</RUNNING_VMS></HOST_SHARE></HOST><HOST><ID>3</ID><NAME>another "
"name</NAME><STATE>0</STATE><IM_MAD>im_mad</IM_MAD><VM_MAD>vmm_mad</VM_MAD>"
"<TM_MAD>tm_mad</TM_MAD><LAST_MON_TIME>0</LAST_MON_TIME><HOST_SHAR"
"<TM_MAD>tm_mad</TM_MAD><LAST_MON_TIME>0</LAST_MON_TIME><CLUSTER>default</CLUSTER><HOST_SHAR"
"E><HID>3</HID><DISK_USAGE>0</DISK_USAGE><MEM_USAGE>0</MEM_USAGE><CPU_USAGE"
">0</CPU_USAGE><MAX_DISK>0</MAX_DISK><MAX_MEM>0</MAX_MEM><MAX_CPU>0</MAX_CP"
"U><FREE_DISK>0</FREE_DISK><FREE_MEM>0</FREE_MEM><FREE_CPU>0</FREE_CPU><USE"
"D_DISK>0</USED_DISK><USED_MEM>0</USED_MEM><USED_CPU>0</USED_CPU><RUNNING_V"
"MS>0</RUNNING_VMS></HOST_SHARE></HOST><HOST><ID>4</ID><NAME>host</NAME><ST"
"ATE>0</STATE><IM_MAD>im_mad</IM_MAD><VM_MAD>vmm_mad</VM_MAD><TM_MAD>tm_mad"
"</TM_MAD><LAST_MON_TIME>0</LAST_MON_TIME><HOST_SHARE><HID>4</HID>"
"</TM_MAD><LAST_MON_TIME>0</LAST_MON_TIME><CLUSTER>default</CLUSTER><HOST_SHARE><HID>4</HID>"
"<DISK_USAGE>0</DISK_USAGE><MEM_USAGE>0</MEM_USAGE><CPU_USAGE>0</CPU_USAGE>"
"<MAX_DISK>0</MAX_DISK><MAX_MEM>0</MAX_MEM><MAX_CPU>0</MAX_CPU><FREE_DISK>0"
"</FREE_DISK><FREE_MEM>0</FREE_MEM><FREE_CPU>0</FREE_CPU><USED_DISK>0</USED"
@ -91,36 +91,42 @@ const string xml_dump =
const string xml_dump_like_a =
"<HOST_POOL><HOST><ID>0</ID><NAME>a</NAME><STATE>0</STATE><IM_MAD>im_mad</I"
"M_MAD><VM_MAD>vmm_mad</VM_MAD><TM_MAD>tm_mad</TM_MAD><LAST_MON_TIME>0"
"</LAST_MON_TIME><HOST_SHARE><HID>0</HID><DISK_USAGE>0</DISK_USAGE><MEM"
"</LAST_MON_TIME><CLUSTER>default</CLUSTER><HOST_SHARE><HID>0</HID><DISK_USAGE>0</DISK_USAGE><MEM"
"_USAGE>0</MEM_USAGE><CPU_USAGE>0</CPU_USAGE><MAX_DISK>0</MAX_DISK><MAX_MEM"
">0</MAX_MEM><MAX_CPU>0</MAX_CPU><FREE_DISK>0</FREE_DISK><FREE_MEM>0</FREE_"
"MEM><FREE_CPU>0</FREE_CPU><USED_DISK>0</USED_DISK><USED_MEM>0</USED_MEM><U"
"SED_CPU>0</USED_CPU><RUNNING_VMS>0</RUNNING_VMS></HOST_SHARE></HOST><HOST>"
"<ID>1</ID><NAME>a name</NAME><STATE>0</STATE><IM_MAD>im_mad</IM_MAD><VM_MA"
"D>vmm_mad</VM_MAD><TM_MAD>tm_mad</TM_MAD><LAST_MON_TIME>0</LAST_M"
"ON_TIME><HOST_SHARE><HID>1</HID><DISK_USAGE>0</DISK_USAGE><MEM_USAGE>0</ME"
"ON_TIME><CLUSTER>default</CLUSTER><HOST_SHARE><HID>1</HID><DISK_USAGE>0</DISK_USAGE><MEM_USAGE>0</ME"
"M_USAGE><CPU_USAGE>0</CPU_USAGE><MAX_DISK>0</MAX_DISK><MAX_MEM>0</MAX_MEM>"
"<MAX_CPU>0</MAX_CPU><FREE_DISK>0</FREE_DISK><FREE_MEM>0</FREE_MEM><FREE_CP"
"U>0</FREE_CPU><USED_DISK>0</USED_DISK><USED_MEM>0</USED_MEM><USED_CPU>0</U"
"SED_CPU><RUNNING_VMS>0</RUNNING_VMS></HOST_SHARE></HOST><HOST><ID>2</ID><N"
"AME>a_name</NAME><STATE>0</STATE><IM_MAD>im_mad</IM_MAD><VM_MAD>vmm_mad</V"
"M_MAD><TM_MAD>tm_mad</TM_MAD><LAST_MON_TIME>0</LAST_MON_TIME><HOS"
"M_MAD><TM_MAD>tm_mad</TM_MAD><LAST_MON_TIME>0</LAST_MON_TIME><CLUSTER>default</CLUSTER><HOS"
"T_SHARE><HID>2</HID><DISK_USAGE>0</DISK_USAGE><MEM_USAGE>0</MEM_USAGE><CPU"
"_USAGE>0</CPU_USAGE><MAX_DISK>0</MAX_DISK><MAX_MEM>0</MAX_MEM><MAX_CPU>0</"
"MAX_CPU><FREE_DISK>0</FREE_DISK><FREE_MEM>0</FREE_MEM><FREE_CPU>0</FREE_CP"
"U><USED_DISK>0</USED_DISK><USED_MEM>0</USED_MEM><USED_CPU>0</USED_CPU><RUN"
"NING_VMS>0</RUNNING_VMS></HOST_SHARE></HOST><HOST><ID>3</ID><NAME>another "
"name</NAME><STATE>0</STATE><IM_MAD>im_mad</IM_MAD><VM_MAD>vmm_mad</VM_MAD>"
"<TM_MAD>tm_mad</TM_MAD><LAST_MON_TIME>0</LAST_MON_TIME><HOST_SHAR"
"<TM_MAD>tm_mad</TM_MAD><LAST_MON_TIME>0</LAST_MON_TIME><CLUSTER>default</CLUSTER><HOST_SHAR"
"E><HID>3</HID><DISK_USAGE>0</DISK_USAGE><MEM_USAGE>0</MEM_USAGE><CPU_USAGE"
">0</CPU_USAGE><MAX_DISK>0</MAX_DISK><MAX_MEM>0</MAX_MEM><MAX_CPU>0</MAX_CP"
"U><FREE_DISK>0</FREE_DISK><FREE_MEM>0</FREE_MEM><FREE_CPU>0</FREE_CPU><USE"
"D_DISK>0</USED_DISK><USED_MEM>0</USED_MEM><USED_CPU>0</USED_CPU><RUNNING_V"
"MS>0</RUNNING_VMS></HOST_SHARE></HOST></HOST_POOL>";
const string replacement = "0000000000";
const string cluster_default =
"<CLUSTER><ID>0</ID><NAME>default</NAME></CLUSTER>";
const string cluster_xml_dump =
"<CLUSTER_POOL><CLUSTER><ID>0</ID><NAME>default</NAME></CLUSTER><CLUSTER><ID>1</ID><NAME>cluster_a</NAME></CLUSTER><CLUSTER><ID>3</ID><NAME>cluster_c</NAME></CLUSTER><CLUSTER><ID>4</ID><NAME>cluster_d</NAME></CLUSTER></CLUSTER_POOL>";
const string host_0_cluster =
"<HOST><ID>0</ID><NAME>Host one</NAME><STATE>0</STATE><IM_MAD>im_mad</IM_MAD><VM_MAD>vmm_mad</VM_MAD><TM_MAD>tm_mad</TM_MAD><LAST_MON_TIME>0</LAST_MON_TIME><CLUSTER>cluster_a</CLUSTER><HOST_SHARE><HID>0</HID><DISK_USAGE>0</DISK_USAGE><MEM_USAGE>0</MEM_USAGE><CPU_USAGE>0</CPU_USAGE><MAX_DISK>0</MAX_DISK><MAX_MEM>0</MAX_MEM><MAX_CPU>0</MAX_CPU><FREE_DISK>0</FREE_DISK><FREE_MEM>0</FREE_MEM><FREE_CPU>0</FREE_CPU><USED_DISK>0</USED_DISK><USED_MEM>0</USED_MEM><USED_CPU>0</USED_CPU><RUNNING_VMS>0</RUNNING_VMS></HOST_SHARE><TEMPLATE></TEMPLATE></HOST>";
/* ************************************************************************* */
/* ************************************************************************* */
@ -136,6 +142,14 @@ class HostPoolTest : public PoolTest
CPPUNIT_TEST (discover);
CPPUNIT_TEST (duplicates);
CPPUNIT_TEST (cluster_init);
CPPUNIT_TEST (cluster_allocate);
CPPUNIT_TEST (cluster_drop);
CPPUNIT_TEST (cluster_id);
CPPUNIT_TEST (cluster_dump);
CPPUNIT_TEST (set_cluster);
CPPUNIT_TEST (remove_cluster);
CPPUNIT_TEST_SUITE_END ();
protected:
@ -211,6 +225,8 @@ public:
CPPUNIT_ASSERT( host->get_state() == Host::DISABLED );
};
/* ********************************************************************* */
void duplicates()
{
int rc, oid_0, oid_1;
@ -245,6 +261,7 @@ public:
CPPUNIT_ASSERT( host->get_tm_mad() == tm_mad_2 );
}
/* ********************************************************************* */
void dump()
{
@ -266,6 +283,8 @@ public:
CPPUNIT_ASSERT( result == xml_dump );
}
/* ********************************************************************* */
void dump_where()
{
string names[] = {"a", "a name", "a_name", "another name", "host"};
@ -287,6 +306,8 @@ public:
CPPUNIT_ASSERT( result == xml_dump_like_a );
}
/* ********************************************************************* */
void discover()
{
int rc, oid, i;
@ -330,8 +351,198 @@ public:
CPPUNIT_ASSERT(host->isEnabled());
}
}
/* ********************************************************************* */
/* ********************************************************************* */
void cluster_init()
{
HostPool * hp = static_cast<HostPool *>(pool);
CPPUNIT_ASSERT( hp->info_cluster(0) == cluster_default );
}
/* ********************************************************************* */
void cluster_allocate()
{
HostPool * hp = static_cast<HostPool *>(pool);
int clid, rc;
rc = hp->allocate_cluster(&clid, "new_cluster");
CPPUNIT_ASSERT( rc == clid );
CPPUNIT_ASSERT( clid == 1 );
CPPUNIT_ASSERT( hp->info_cluster(clid) ==
"<CLUSTER><ID>1</ID><NAME>new_cluster</NAME></CLUSTER>");
// Try to allocate using the same name
rc = hp->allocate_cluster(&clid, "new_cluster");
CPPUNIT_ASSERT( rc == clid );
CPPUNIT_ASSERT( clid == -1 );
}
/* ********************************************************************* */
void cluster_drop()
{
HostPool * hp = static_cast<HostPool *>(pool);
int clid, rc;
// Drop a non-existing cluster
rc = hp->drop_cluster(20);
CPPUNIT_ASSERT( rc == -1 );
// Allocate a cluster and drop it
rc = hp->allocate_cluster(&clid, "new_cluster");
CPPUNIT_ASSERT( clid == 1);
rc = hp->drop_cluster(clid);
CPPUNIT_ASSERT( rc == 0 );
// Try to drop the default cluster, should fail
rc = hp->drop_cluster(0);
CPPUNIT_ASSERT( rc == -1 );
}
/* ********************************************************************* */
void cluster_id()
{
HostPool * hp = static_cast<HostPool *>(pool);
int clid, rc;
ostringstream oss;
// Allocate some clusters
rc = hp->allocate_cluster(&clid, "cluster_a");
CPPUNIT_ASSERT( rc == 1 );
rc = hp->allocate_cluster(&clid, "cluster_b");
CPPUNIT_ASSERT( rc == 2 );
rc = hp->allocate_cluster(&clid, "cluster_c");
CPPUNIT_ASSERT( rc == 3 );
rc = hp->allocate_cluster(&clid, "cluster_d");
CPPUNIT_ASSERT( rc == 4 );
// Drop id 2
rc = hp->drop_cluster(2);
CPPUNIT_ASSERT( rc == 0 );
// Next one should use id 5, because the biggest id is 4
rc = hp->allocate_cluster(&clid, "cluster_e");
CPPUNIT_ASSERT( rc == 5 );
// Drop id 5
rc = hp->drop_cluster(5);
CPPUNIT_ASSERT( rc == 0 );
// Next one should use id 5, because the biggest id is 4 again
rc = hp->allocate_cluster(&clid, "cluster_f");
CPPUNIT_ASSERT( rc == 5 );
}
/* ********************************************************************* */
void cluster_dump()
{
HostPool * hp = static_cast<HostPool *>(pool);
int clid, rc;
ostringstream oss;
// Allocate some clusters
rc = hp->allocate_cluster(&clid, "cluster_a");
CPPUNIT_ASSERT( rc == 1 );
rc = hp->allocate_cluster(&clid, "cluster_b");
CPPUNIT_ASSERT( rc == 2 );
rc = hp->allocate_cluster(&clid, "cluster_c");
CPPUNIT_ASSERT( rc == 3 );
rc = hp->allocate_cluster(&clid, "cluster_d");
CPPUNIT_ASSERT( rc == 4 );
// Drop one of them
rc = hp->drop_cluster(2);
CPPUNIT_ASSERT( rc == 0 );
// dump the pool
rc = hp->dump_cluster(oss);
CPPUNIT_ASSERT( oss.str() == cluster_xml_dump );
}
/* ********************************************************************* */
void set_cluster()
{
HostPool * hp = static_cast<HostPool *>(pool);
Host* host;
int clid, rc, oid;
string xml_str;
// Allocate a host
oid = allocate(0);
host = hp->get(0, false);
rc = hp->allocate_cluster(&clid, "cluster_a");
CPPUNIT_ASSERT( rc == 1 );
rc = hp->set_cluster(host, clid);
CPPUNIT_ASSERT( rc == 0 );
host->to_xml(xml_str);
CPPUNIT_ASSERT( xml_str == host_0_cluster);
// Try to set a non-existing cluster
rc = hp->set_cluster(host, 20);
CPPUNIT_ASSERT( rc == -1 );
CPPUNIT_ASSERT( xml_str == host_0_cluster);
}
/* ********************************************************************* */
void remove_cluster()
{
HostPool * hp = static_cast<HostPool *>(pool);
Host* host;
int clid, rc, oid;
string xml_str;
// Allocate a host
oid = allocate(0);
host = hp->get(0, false);
rc = hp->allocate_cluster(&clid, "cluster_a");
CPPUNIT_ASSERT( rc == 1 );
// Set host 0 to cluster 1
rc = hp->set_cluster(host, clid);
CPPUNIT_ASSERT( rc == 0 );
// Check
host->to_xml(xml_str);
CPPUNIT_ASSERT( xml_str == host_0_cluster);
// Remove the cluster
rc = hp->set_default_cluster(host);
CPPUNIT_ASSERT( rc == 0 );
// The host should have been moved to the default cluster
host->to_xml(xml_str);
check(0, host);
}
};
/* ************************************************************************* */
/* ************************************************************************* */
/* ************************************************************************* */

View File

@ -19,6 +19,8 @@ require 'OpenNebula/User'
require 'OpenNebula/UserPool'
require 'OpenNebula/Host'
require 'OpenNebula/HostPool'
require 'OpenNebula/Cluster'
require 'OpenNebula/ClusterPool'
module OpenNebula

View File

@ -0,0 +1,75 @@
require 'OpenNebula/Pool'
module OpenNebula
class Cluster < PoolElement
# ---------------------------------------------------------------------
# Constants and Class Methods
# ---------------------------------------------------------------------
CLUSTER_METHODS = {
:info => "cluster.info",
:allocate => "cluster.allocate",
:delete => "cluster.delete",
:addhost => "cluster.add",
:removehost => "cluster.remove",
}
# Creates a Cluster description with just its identifier
# this method should be used to create plain Cluster objects.
# +id+ the id of the user
#
# Example:
# cluster = Cluster.new(User.build_xml(3),rpc_client)
#
def Cluster.build_xml(pe_id=nil)
if pe_id
user_xml = "<CLUSTER><ID>#{pe_id}</ID></CLUSTER>"
else
user_xml = "<CLUSTER></CLUSTER>"
end
XMLUtilsElement.initialize_xml(user_xml, 'CLUSTER')
end
# ---------------------------------------------------------------------
# Class constructor
# ---------------------------------------------------------------------
def initialize(xml, client)
super(xml,client)
@client = client
end
# ---------------------------------------------------------------------
# XML-RPC Methods for the User Object
# ---------------------------------------------------------------------
def info()
super(CLUSTER_METHODS[:info], 'CLUSTER')
end
def allocate(clustername)
super(CLUSTER_METHODS[:allocate], clustername)
end
def delete()
super(CLUSTER_METHODS[:delete])
end
def add_host(host_id)
return Error.new('ID not defined') if !@pe_id
rc = @client.call(CLUSTER_METHODS[:addhost], host_id.to_i, @pe_id)
rc = nil if !OpenNebula.is_error?(rc)
return rc
end
def remove_host(host_id)
return Error.new('ID not defined') if !@pe_id
rc = @client.call(CLUSTER_METHODS[:removehost], host_id.to_i)
rc = nil if !OpenNebula.is_error?(rc)
return rc
end
end
end

View File

@ -0,0 +1,35 @@
require 'OpenNebula/Pool'
module OpenNebula
class ClusterPool < Pool
# ---------------------------------------------------------------------
# Constants and Class attribute accessors
# ---------------------------------------------------------------------
CLUSTER_POOL_METHODS = {
:info => "clusterpool.info"
}
# ---------------------------------------------------------------------
# Class constructor & Pool Methods
# ---------------------------------------------------------------------
# +client+ a Client object that represents a XML-RPC connection
def initialize(client)
super('CLUSTER_POOL','CLUSTER',client)
end
# Factory method to create User objects
def factory(element_xml)
OpenNebula::Cluster.new(element_xml,@client)
end
# ---------------------------------------------------------------------
# XML-RPC Methods for the User Object
# ---------------------------------------------------------------------
def info()
super(CLUSTER_POOL_METHODS[:info])
end
end
end

View File

@ -86,12 +86,16 @@ module OpenNebula
HOST_STATES[state]
end
# Returns the state of the Host (string value)
def short_state_str
SHORT_HOST_STATES[state_str]
end
# Returns the cluster of the Host
def cluster
self['CLUSTER']
end
private
def set_enabled(enabled)

View File

@ -0,0 +1,81 @@
$: << '../'
require 'OpenNebula'
require 'MockClient'
module OpenNebula
describe "Cluster using NOKOGIRI" do
before(:all) do
NOKOGIRI=true
client = MockClient.new()
@cluster_pool = ClusterPool.new(client)
end
it "should update the CLUSTER_POOL info" do
rc = @cluster_pool.info()
rc.nil?.should eql(true)
end
it "should iterate the USER_POOL elements and get info from them" do
rc = @cluster_pool.each{ |cluster|
cluster.class.to_s.should eql("OpenNebula::Cluster")
if cluster.id == 0
cluster.name.should eql('default')
elsif cluster.id == 1
cluster.name.should eql('Red')
elsif cluster.id == 2
cluster.name.should eql('Black')
end
}
end
it "should get a hash representation of the USER_POOL" do
cluster_hash = @cluster_pool.to_hash
cluster_hash['CLUSTER_POOL']['CLUSTER'][0]['ID'].should eql('0')
cluster_hash['CLUSTER_POOL']['CLUSTER'][0]['NAME'].should eql('default')
cluster_hash['CLUSTER_POOL']['CLUSTER'][1]['ID'].should eql('1')
cluster_hash['CLUSTER_POOL']['CLUSTER'][1]['NAME'].should eql('Red')
cluster_hash['CLUSTER_POOL']['CLUSTER'][2]['ID'].should eql('2')
cluster_hash['CLUSTER_POOL']['CLUSTER'][2]['NAME'].should eql('Black')
end
end
describe "Cluster using REXML" do
before(:all) do
NOKOGIRI=false
client = MockClient.new()
@cluster_pool = ClusterPool.new(client)
end
it "should update the CLUSTER_POOL info" do
rc = @cluster_pool.info()
rc.nil?.should eql(true)
end
it "should iterate the CLUSTER_POOL elements and get info from them" do
rc = @cluster_pool.each{ |cluster|
cluster.class.to_s.should eql("OpenNebula::Cluster")
if cluster.id == 0
cluster.name.should eql('default')
elsif cluster.id == 1
cluster.name.should eql('Red')
elsif cluster.id == 2
cluster.name.should eql('Black')
end
}
end
it "should get a hash representation of the CLUSTER_POOL" do
cluster_hash = @cluster_pool.to_hash
cluster_hash['CLUSTER_POOL']['CLUSTER'][0]['ID'].should eql('0')
cluster_hash['CLUSTER_POOL']['CLUSTER'][0]['NAME'].should eql('default')
cluster_hash['CLUSTER_POOL']['CLUSTER'][1]['ID'].should eql('1')
cluster_hash['CLUSTER_POOL']['CLUSTER'][1]['NAME'].should eql('Red')
cluster_hash['CLUSTER_POOL']['CLUSTER'][2]['ID'].should eql('2')
cluster_hash['CLUSTER_POOL']['CLUSTER'][2]['NAME'].should eql('Black')
end
end
end

View File

@ -0,0 +1,201 @@
$: << '../'
require 'OpenNebula'
require 'MockClient'
module OpenNebula
describe "Cluster using NOKOGIRI" do
before(:all) do
NOKOGIRI=true
@xml = Cluster.build_xml(5)
client = MockClient.new()
@cluster = Cluster.new(@xml,client)
end
it "should create a Nokogiri Node" do
@xml.class.to_s.should eql('Nokogiri::XML::NodeSet')
end
it "should allocate the new CLUSTER" do
@cluster.allocate(nil)
@cluster.id.should eql(5)
end
it "should update the CLUSTER info" do
@cluster.info()
@cluster.id.should eql(5)
@cluster.name.should eql('Production')
end
it "should delete the CLUSTER" do
rc = @cluster.delete()
rc.should eql(nil)
end
it "should add a host to the CLUSTER" do
rc = @cluster.add_host(nil)
rc.should eql(nil)
end
it "should remove a host from the CLUSTER" do
rc = @cluster.remove_host(nil)
rc.should eql(nil)
end
it "should access an attribute using []" do
@cluster['ID'].should eql('5')
@cluster['NAME'].should eql('Production')
end
it "should get a hash representation of the CLUSTER" do
cluster_hash = @cluster.to_hash
cluster_hash['CLUSTER']['ID'].should eql('5')
cluster_hash['CLUSTER']['NAME'].should eql('Production')
end
end
describe "Cluster using REXML" do
before(:all) do
NOKOGIRI=false
@xml = Cluster.build_xml(5)
client = MockClient.new()
@cluster = Cluster.new(@xml,client)
end
it "should create a REXML Element" do
@xml.class.to_s.should eql('REXML::Element')
end
it "should allocate the new CLUSTER" do
@cluster.allocate(nil)
@cluster.id.should eql(5)
end
it "should update the CLUSTER info" do
@cluster.info()
@cluster.id.should eql(5)
@cluster.name.should eql('Production')
end
it "should delete the CLUSTER" do
rc = @cluster.delete()
rc.should eql(nil)
end
it "should add a host to the CLUSTER" do
rc = @cluster.add_host(nil)
rc.should eql(nil)
end
it "should remove a host from the CLUSTER" do
rc = @cluster.remove_host(nil)
rc.should eql(nil)
end
it "should access an attribute using []" do
@cluster['ID'].should eql('5')
@cluster['NAME'].should eql('Production')
end
it "should get a hash representation of the CLUSTER" do
cluster_hash = @cluster.to_hash
cluster_hash['CLUSTER']['ID'].should eql('5')
cluster_hash['CLUSTER']['NAME'].should eql('Production')
end
end
describe "Cluster using NOKOGIRI without id" do
before(:all) do
NOKOGIRI=true
@xml = Cluster.build_xml()
client = MockClient.new()
@cluster = Cluster.new(@xml,client)
end
it "should create a Nokogiri Node" do
@xml.class.to_s.should eql('Nokogiri::XML::NodeSet')
end
it "should get Error getting info" do
rc = @cluster.info()
OpenNebula.is_error?(rc).should eql(true)
end
it "should get Error deleting the CLUSTER" do
rc = @cluster.delete()
OpenNebula.is_error?(rc).should eql(true)
end
it "should add a host to the CLUSTER" do
rc = @cluster.add_host(nil)
OpenNebula.is_error?(rc).should eql(true)
end
it "should remove a host from the CLUSTER" do
rc = @cluster.remove_host(nil)
OpenNebula.is_error?(rc).should eql(true)
end
end
describe "User using REXML without id" do
before(:all) do
NOKOGIRI=false
@xml = Cluster.build_xml()
client = MockClient.new()
@cluster = Cluster.new(@xml,client)
end
it "should create a REXML Element" do
@xml.class.to_s.should eql('REXML::Element')
end
it "should get Error getting info" do
rc = @cluster.info()
OpenNebula.is_error?(rc).should eql(true)
end
it "should get Error deleting the CLUSTER" do
rc = @cluster.delete()
OpenNebula.is_error?(rc).should eql(true)
end
it "should add a host to the CLUSTER" do
rc = @cluster.add_host(nil)
OpenNebula.is_error?(rc).should eql(true)
end
it "should remove a host from the CLUSTER" do
rc = @cluster.remove_host(nil)
OpenNebula.is_error?(rc).should eql(true)
end
end
end

View File

@ -1,8 +1,8 @@
class MockClient
class MockClient
def call(action, *args)
xmlrpc_action = "one."+action
case xmlrpc_action
when "one.vn.info"
return File.read("xml_test/vnet.xml")
@ -36,6 +36,16 @@ class MockClient
return File.read("xml_test/user.xml")
when "one.user.delete"
return nil
when "one.cluster.allocate"
return 5
when "one.cluster.info"
return File.read("xml_test/cluster.xml")
when "one.cluster.delete"
return nil
when "one.cluster.addhost"
return nil
when "one.cluster.removehost"
return nil
when "one.vnpool.info"
return File.read("xml_test/vnetpool.xml")
when "one.vmpool.info"
@ -44,6 +54,8 @@ class MockClient
return File.read("xml_test/hostpool.xml")
when "one.userpool.info"
return File.read("xml_test/userpool.xml")
when "one.clusterpool.info"
return File.read("xml_test/clusterpool.xml")
end
end
end

View File

@ -0,0 +1,4 @@
<CLUSTER>
<ID>5</ID>
<NAME>Production</NAME>
</CLUSTER>

View File

@ -0,0 +1,14 @@
<CLUSTER_POOL>
<CLUSTER>
<ID>0</ID>
<NAME>default</NAME>
</CLUSTER>
<CLUSTER>
<ID>1</ID>
<NAME>Red</NAME>
</CLUSTER>
<CLUSTER>
<ID>2</ID>
<NAME>Black</NAME>
</CLUSTER>
</CLUSTER_POOL>

View File

@ -245,7 +245,25 @@ void RequestManager::register_xml_methods()
xmlrpc_c::methodPtr host_enable(new
RequestManager::HostEnable(hpool,upool));
xmlrpc_c::methodPtr cluster_allocate(new
RequestManager::ClusterAllocate(hpool,upool));
xmlrpc_c::methodPtr cluster_info(new
RequestManager::ClusterInfo(hpool,upool));
xmlrpc_c::methodPtr cluster_delete(new
RequestManager::ClusterDelete(hpool,upool));
xmlrpc_c::methodPtr cluster_add(new
RequestManager::ClusterAdd(hpool,upool));
xmlrpc_c::methodPtr cluster_remove(new
RequestManager::ClusterRemove(hpool,upool));
xmlrpc_c::methodPtr clusterpool_info(new
RequestManager::ClusterPoolInfo(hpool,upool));
xmlrpc_c::methodPtr vn_allocate(new
RequestManager::VirtualNetworkAllocate(vnpool,upool));
@ -315,7 +333,17 @@ void RequestManager::register_xml_methods()
RequestManagerRegistry.addMethod("one.host.enable", host_enable);
RequestManagerRegistry.addMethod("one.hostpool.info", hostpool_info);
/* Cluster related methods */
RequestManagerRegistry.addMethod("one.cluster.allocate", cluster_allocate);
RequestManagerRegistry.addMethod("one.cluster.info", cluster_info);
RequestManagerRegistry.addMethod("one.cluster.delete", cluster_delete);
RequestManagerRegistry.addMethod("one.cluster.add", cluster_add);
RequestManagerRegistry.addMethod("one.cluster.remove", cluster_remove);
RequestManagerRegistry.addMethod("one.clusterpool.info", clusterpool_info);
/* Network related methods*/
RequestManagerRegistry.addMethod("one.vn.allocate", vn_allocate);

View File

@ -0,0 +1,119 @@
/* -------------------------------------------------------------------------- */
/* Copyright 2002-2010, OpenNebula Project Leads (OpenNebula.org) */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
#include "RequestManager.h"
#include "NebulaLog.h"
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void RequestManager::ClusterAdd::execute(
xmlrpc_c::paramList const& paramList,
xmlrpc_c::value * const retval)
{
string session;
int hid;
int clid;
int rc;
Host * host;
ostringstream oss;
/* -- RPC specific vars -- */
vector<xmlrpc_c::value> arrayData;
xmlrpc_c::value_array * arrayresult;
NebulaLog::log("ReM",Log::DEBUG,"ClusterAdd method invoked");
// Get the parameters
session = xmlrpc_c::value_string(paramList.getString(0));
hid = xmlrpc_c::value_int (paramList.getInt(1));
clid = xmlrpc_c::value_int (paramList.getInt(2));
// Only oneadmin can add hosts to clusters
rc = ClusterAdd::upool->authenticate(session);
if ( rc != 0 )
{
goto error_authenticate;
}
// Check if host exists
host = ClusterAdd::hpool->get(hid,true);
if ( host == 0 )
{
goto error_host_get;
}
// Set cluster
rc = ClusterAdd::hpool->set_cluster(host, clid);
if ( rc != 0 )
{
goto error_cluster_add;
}
// Update the DB
ClusterAdd::hpool->update(host);
host->unlock();
// All nice, return success to the client
arrayData.push_back(xmlrpc_c::value_boolean(true)); // SUCCESS
// Copy arrayresult into retval mem space
arrayresult = new xmlrpc_c::value_array(arrayData);
*retval = *arrayresult;
delete arrayresult; // and get rid of the original
return;
error_authenticate:
oss << "User not authorized to add hosts to clusters";
goto error_common;
error_host_get:
oss << "The host " << hid << " does not exists";
goto error_common;
error_cluster_add:
host->unlock();
oss << "Can not add host " << hid << " to cluster " << clid <<
", returned error code [" << rc << "]";
goto error_common;
error_common:
arrayData.push_back(xmlrpc_c::value_boolean(false)); // FAILURE
arrayData.push_back(xmlrpc_c::value_string(oss.str()));
NebulaLog::log("ReM",Log::ERROR,oss);
xmlrpc_c::value_array arrayresult_error(arrayData);
*retval = arrayresult_error;
return;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */

View File

@ -0,0 +1,97 @@
/* -------------------------------------------------------------------------- */
/* Copyright 2002-2010, OpenNebula Project Leads (OpenNebula.org) */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
#include "RequestManager.h"
#include "NebulaLog.h"
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void RequestManager::ClusterAllocate::execute(
xmlrpc_c::paramList const& paramList,
xmlrpc_c::value * const retval)
{
string session;
string clustername;
int id;
int rc;
ostringstream oss;
/* -- RPC specific vars -- */
vector<xmlrpc_c::value> arrayData;
xmlrpc_c::value_array * arrayresult;
NebulaLog::log("ReM",Log::DEBUG,"ClusterAllocate method invoked");
// Get the parameters
session = xmlrpc_c::value_string(paramList.getString(0));
clustername = xmlrpc_c::value_string(paramList.getString(1));
// Only oneadmin can add new clusters
rc = ClusterAllocate::upool->authenticate(session);
if ( rc != 0 )
{
goto error_authenticate;
}
// Perform the allocation in the hostpool
rc = ClusterAllocate::hpool->allocate_cluster(&id, clustername);
if ( rc == -1 )
{
goto error_cluster_allocate;
}
// All nice, return the new id to client
arrayData.push_back(xmlrpc_c::value_boolean(true)); // SUCCESS
arrayData.push_back(xmlrpc_c::value_int(id));
arrayresult = new xmlrpc_c::value_array(arrayData);
// Copy arrayresult into retval mem space
*retval = *arrayresult;
// and get rid of the original
delete arrayresult;
return;
error_authenticate:
oss << "User not authorized to add new clusters";
goto error_common;
error_cluster_allocate:
oss << "Can not allocate cluster " << clustername <<
" in the ClusterPool, returned error code [" << rc << "]";
goto error_common;
error_common:
arrayData.push_back(xmlrpc_c::value_boolean(false)); // FAILURE
arrayData.push_back(xmlrpc_c::value_string(oss.str()));
NebulaLog::log("ReM",Log::ERROR,oss);
xmlrpc_c::value_array arrayresult_error(arrayData);
*retval = arrayresult_error;
return;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */

View File

@ -0,0 +1,93 @@
/* -------------------------------------------------------------------------- */
/* Copyright 2002-2010, OpenNebula Project Leads (OpenNebula.org) */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
#include "RequestManager.h"
#include "NebulaLog.h"
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void RequestManager::ClusterDelete::execute(
xmlrpc_c::paramList const& paramList,
xmlrpc_c::value * const retval)
{
string session;
// <clid> of the cluster to delete from the HostPool
int clid;
ostringstream oss;
int rc;
/* -- RPC specific vars -- */
vector<xmlrpc_c::value> arrayData;
xmlrpc_c::value_array * arrayresult;
NebulaLog::log("ReM",Log::DEBUG,"ClusterDelete method invoked");
// Get the parameters
session = xmlrpc_c::value_string(paramList.getString(0));
clid = xmlrpc_c::value_int (paramList.getInt(1));
// Only oneadmin can delete clusters
rc = ClusterDelete::upool->authenticate(session);
if ( rc != 0 )
{
goto error_authenticate;
}
rc = ClusterDelete::hpool->drop_cluster(clid);
if ( rc != 0 )
{
goto error_cluster_delete;
}
// Return success
arrayData.push_back(xmlrpc_c::value_boolean( rc == 0 )); // SUCCESS
arrayresult = new xmlrpc_c::value_array(arrayData);
// Copy arrayresult into retval mem space
*retval = *arrayresult;
// and get rid of the original
delete arrayresult;
return;
error_authenticate:
oss << "User not authorized to delete clusters";
goto error_common;
error_cluster_delete:
oss << "Can not delete cluster with CLID " << clid <<
" from the ClusterPool, returned error code [" << rc << "]";
goto error_common;
error_common:
NebulaLog::log ("Rem",Log::ERROR,oss);
arrayData.push_back(xmlrpc_c::value_boolean(false)); // FAILURE
arrayData.push_back(xmlrpc_c::value_string(oss.str()));
xmlrpc_c::value_array arrayresult_error(arrayData);
*retval = arrayresult_error;
return;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */

View File

@ -0,0 +1,96 @@
/* -------------------------------------------------------------------------- */
/* Copyright 2002-2010, OpenNebula Project Leads (OpenNebula.org) */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
#include "RequestManager.h"
#include "NebulaLog.h"
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void RequestManager::ClusterInfo::execute(
xmlrpc_c::paramList const& paramList,
xmlrpc_c::value * const retval)
{
string session;
string info;
int clid;
int rc;
ostringstream oss;
/* -- RPC specific vars -- */
vector<xmlrpc_c::value> arrayData;
xmlrpc_c::value_array * arrayresult;
NebulaLog::log("ReM",Log::DEBUG,"ClusterInfo method invoked");
// Get the parameters
session = xmlrpc_c::value_string(paramList.getString(0));
clid = xmlrpc_c::value_int (paramList.getInt(1));
// Check if it is a valid user
rc = ClusterInfo::upool->authenticate(session);
if ( rc == -1 )
{
goto error_authenticate;
}
info = ClusterInfo::hpool->info_cluster(clid);
// Cluster does not exists
if ( info.empty() )
{
goto error_cluster;
}
// All nice, return the cluster info to the client
arrayData.push_back(xmlrpc_c::value_boolean(true)); // SUCCESS
arrayData.push_back(xmlrpc_c::value_string(info));
// Copy arrayresult into retval mem space
arrayresult = new xmlrpc_c::value_array(arrayData);
*retval = *arrayresult;
delete arrayresult; // and get rid of the original
return;
error_authenticate:
oss << "User not authenticated, ClusterInfo call aborted.";
goto error_common;
error_cluster:
oss << "Error getting cluster with CLID = " << clid;
goto error_common;
error_common:
arrayData.push_back(xmlrpc_c::value_boolean(false)); // FAILURE
arrayData.push_back(xmlrpc_c::value_string(oss.str()));
NebulaLog::log("ReM",Log::ERROR,oss);
xmlrpc_c::value_array arrayresult_error(arrayData);
*retval = arrayresult_error;
return;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */

View File

@ -0,0 +1,94 @@
/* -------------------------------------------------------------------------- */
/* Copyright 2002-2010, OpenNebula Project Leads (OpenNebula.org) */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
#include "RequestManager.h"
#include "NebulaLog.h"
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void RequestManager::ClusterPoolInfo::execute(
xmlrpc_c::paramList const& paramList,
xmlrpc_c::value * const retval)
{
string session;
ostringstream oss;
int rc;
/* -- RPC specific vars -- */
vector<xmlrpc_c::value> arrayData;
xmlrpc_c::value_array * arrayresult;
NebulaLog::log("ReM",Log::DEBUG,"ClusterPoolInfo method invoked");
// Get the parameters
session = xmlrpc_c::value_string(paramList.getString(0));
// Check if it is a valid user
rc = ClusterPoolInfo::upool->authenticate(session);
if ( rc == -1 )
{
goto error_authenticate;
}
// Perform the allocation in the vmpool
rc = ClusterPoolInfo::hpool->dump_cluster(oss);
if ( rc != 0 )
{
goto error_dump;
}
//All nice, return the info to the client
arrayData.push_back(xmlrpc_c::value_boolean(true)); // SUCCESS
arrayData.push_back(xmlrpc_c::value_string(oss.str()));
arrayresult = new xmlrpc_c::value_array(arrayData);
// Copy arrayresult into retval mem space
*retval = *arrayresult;
// and get rid of the original
delete arrayresult;
return;
error_authenticate:
oss << "User not authenticated, RequestManagerClusterPoolInfo aborted.";
goto error_common;
error_dump:
oss << "Error getting Cluster pool";
goto error_common;
error_common:
arrayData.push_back(xmlrpc_c::value_boolean(false)); // FAILURE
arrayData.push_back(xmlrpc_c::value_string(oss.str()));
NebulaLog::log("ReM",Log::ERROR,oss);
xmlrpc_c::value_array arrayresult_error(arrayData);
*retval = arrayresult_error;
return;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */

View File

@ -0,0 +1,118 @@
/* -------------------------------------------------------------------------- */
/* Copyright 2002-2010, OpenNebula Project Leads (OpenNebula.org) */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
#include "RequestManager.h"
#include "NebulaLog.h"
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
void RequestManager::ClusterRemove::execute(
xmlrpc_c::paramList const& paramList,
xmlrpc_c::value * const retval)
{
string session;
int hid;
int rc;
Host * host;
ostringstream oss;
/* -- RPC specific vars -- */
vector<xmlrpc_c::value> arrayData;
xmlrpc_c::value_array * arrayresult;
NebulaLog::log("ReM",Log::DEBUG,"ClusterRemove method invoked");
// Get the parameters
session = xmlrpc_c::value_string(paramList.getString(0));
hid = xmlrpc_c::value_int (paramList.getInt(1));
// Only oneadmin can remove hosts from clusters
rc = ClusterRemove::upool->authenticate(session);
if ( rc != 0 )
{
goto error_authenticate;
}
// Check if host exists
host = ClusterRemove::hpool->get(hid,true);
if ( host == 0 )
{
goto error_host_get;
}
// Remove host from cluster
rc = ClusterRemove::hpool->set_default_cluster(host);
if ( rc != 0 )
{
goto error_cluster_remove;
}
// Update the DB
ClusterRemove::hpool->update(host);
host->unlock();
// All nice, return success to the client
arrayData.push_back(xmlrpc_c::value_boolean(true)); // SUCCESS
// Copy arrayresult into retval mem space
arrayresult = new xmlrpc_c::value_array(arrayData);
*retval = *arrayresult;
delete arrayresult; // and get rid of the original
return;
error_authenticate:
oss << "User not authorized to remove hosts from clusters";
goto error_common;
error_host_get:
oss << "The host " << hid << " does not exists";
goto error_common;
error_cluster_remove:
host->unlock();
oss << "Can not remove host " << hid << " from its cluster, "
<< "returned error code [" << rc << "]";
goto error_common;
error_common:
arrayData.push_back(xmlrpc_c::value_boolean(false)); // FAILURE
arrayData.push_back(xmlrpc_c::value_string(oss.str()));
NebulaLog::log("ReM",Log::ERROR,oss);
xmlrpc_c::value_array arrayresult_error(arrayData);
*retval = arrayresult_error;
return;
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */

View File

@ -42,6 +42,12 @@ source_files=[
'RequestManagerImagePublish.cc',
'RequestManagerImageEnable.cc',
'RequestManagerImagePoolInfo.cc',
'RequestManagerClusterAdd.cc',
'RequestManagerClusterAllocate.cc',
'RequestManagerClusterDelete.cc',
'RequestManagerClusterInfo.cc',
'RequestManagerClusterPoolInfo.cc',
'RequestManagerClusterRemove.cc',
'RequestManagerVirtualNetworkAllocate.cc',
'RequestManagerVirtualNetworkInfo.cc',
'RequestManagerVirtualNetworkPoolInfo.cc',

View File

@ -1867,22 +1867,29 @@ extern "C" void expr_bool_error(
void get_xml_attribute(ObjectXML * oxml, const char* attr, int& val)
{
//TODO: pass xpath base
ostringstream xpath_t;
ostringstream xpath_s;
vector<string> results;
xpath_t << "/HOST/TEMPLATE/" << attr;
xpath_s << "/HOST/HOST_SHARE/" << attr;
val = 0;
//TODO: pass xpath base
vector<string> results;
ostringstream xpath_t;
xpath_t << "/HOST/TEMPLATE/" << attr;
results = (*oxml)[xpath_t.str().c_str()];
if (results.size() == 0)
{
ostringstream xpath_s;
xpath_s << "/HOST/HOST_SHARE/" << attr;
results = (*oxml)[xpath_s.str().c_str()];
if (results.size() == 0)
{
ostringstream xpath_h;
xpath_h << "/HOST/" << attr;
results = (*oxml)[xpath_h.str().c_str()];
}
}
if (results.size() != 0)
@ -1894,22 +1901,29 @@ void get_xml_attribute(ObjectXML * oxml, const char* attr, int& val)
void get_xml_attribute(ObjectXML * oxml, const char* attr, float& val)
{
val = 0.0;
//TODO: pass xpath base
ostringstream xpath_t;
ostringstream xpath_s;
vector<string> results;
xpath_t << "/HOST/TEMPLATE/" << attr;
xpath_s << "/HOST/HOST_SHARE/" << attr;
val = 0.0;
results = (*oxml)[xpath_t.str().c_str()];
if (results.size() == 0)
{
ostringstream xpath_s;
xpath_s << "/HOST/HOST_SHARE/" << attr;
results = (*oxml)[xpath_s.str().c_str()];
if (results.size() == 0)
{
ostringstream xpath_h;
xpath_h << "/HOST/" << attr;
results = (*oxml)[xpath_h.str().c_str()];
}
}
if (results.size() != 0)
@ -1921,22 +1935,29 @@ void get_xml_attribute(ObjectXML * oxml, const char* attr, float& val)
void get_xml_attribute(ObjectXML * oxml, const char* attr, string& val)
{
val = "";
//TODO: pass xpath base
ostringstream xpath_t;
ostringstream xpath_s;
vector<string> results;
xpath_t << "/HOST/TEMPLATE/" << attr;
xpath_s << "/HOST/HOST_SHARE/" << attr;
val = "";
results = (*oxml)[xpath_t.str().c_str()];
if (results.size() == 0)
{
ostringstream xpath_s;
xpath_s << "/HOST/HOST_SHARE/" << attr;
results = (*oxml)[xpath_s.str().c_str()];
if (results.size() == 0)
{
ostringstream xpath_h;
xpath_h << "/HOST/" << attr;
results = (*oxml)[xpath_h.str().c_str()];
}
}
if (results.size() != 0)

View File

@ -188,22 +188,29 @@ extern "C" void expr_bool_error(
void get_xml_attribute(ObjectXML * oxml, const char* attr, int& val)
{
//TODO: pass xpath base
ostringstream xpath_t;
ostringstream xpath_s;
vector<string> results;
xpath_t << "/HOST/TEMPLATE/" << attr;
xpath_s << "/HOST/HOST_SHARE/" << attr;
val = 0;
//TODO: pass xpath base
vector<string> results;
ostringstream xpath_t;
xpath_t << "/HOST/TEMPLATE/" << attr;
results = (*oxml)[xpath_t.str().c_str()];
if (results.size() == 0)
{
ostringstream xpath_s;
xpath_s << "/HOST/HOST_SHARE/" << attr;
results = (*oxml)[xpath_s.str().c_str()];
if (results.size() == 0)
{
ostringstream xpath_h;
xpath_h << "/HOST/" << attr;
results = (*oxml)[xpath_h.str().c_str()];
}
}
if (results.size() != 0)
@ -215,22 +222,29 @@ void get_xml_attribute(ObjectXML * oxml, const char* attr, int& val)
void get_xml_attribute(ObjectXML * oxml, const char* attr, float& val)
{
val = 0.0;
//TODO: pass xpath base
ostringstream xpath_t;
ostringstream xpath_s;
vector<string> results;
xpath_t << "/HOST/TEMPLATE/" << attr;
xpath_s << "/HOST/HOST_SHARE/" << attr;
val = 0.0;
results = (*oxml)[xpath_t.str().c_str()];
if (results.size() == 0)
{
ostringstream xpath_s;
xpath_s << "/HOST/HOST_SHARE/" << attr;
results = (*oxml)[xpath_s.str().c_str()];
if (results.size() == 0)
{
ostringstream xpath_h;
xpath_h << "/HOST/" << attr;
results = (*oxml)[xpath_h.str().c_str()];
}
}
if (results.size() != 0)
@ -242,22 +256,29 @@ void get_xml_attribute(ObjectXML * oxml, const char* attr, float& val)
void get_xml_attribute(ObjectXML * oxml, const char* attr, string& val)
{
val = "";
//TODO: pass xpath base
ostringstream xpath_t;
ostringstream xpath_s;
vector<string> results;
xpath_t << "/HOST/TEMPLATE/" << attr;
xpath_s << "/HOST/HOST_SHARE/" << attr;
val = "";
results = (*oxml)[xpath_t.str().c_str()];
if (results.size() == 0)
{
ostringstream xpath_s;
xpath_s << "/HOST/HOST_SHARE/" << attr;
results = (*oxml)[xpath_s.str().c_str()];
if (results.size() == 0)
{
ostringstream xpath_h;
xpath_h << "/HOST/" << attr;
results = (*oxml)[xpath_h.str().c_str()];
}
}
if (results.size() != 0)

View File

@ -177,6 +177,11 @@ public:
"ARCH = \"*64*\"",
"RUNNING_VMS < 100",
"CLUSTER = \"cluster A\"",
"CLUSTER = \"default\"",
"CLUSTER = clusterA",
"CLUSTER = \"Cluster A\"",
/*
// Boolean operators
"HOSTNAME = \"ursa*\" & NETRX = \"13335836573\"",
@ -191,6 +196,7 @@ public:
bool results[] = { true, false, true, false,
true, true, false,
true, true, true,
true, false, false, false
/*
true, true, false,
//*/
@ -385,6 +391,7 @@ const string ObjectXMLTest::host =
"<VM_MAD>vmm_kvm</VM_MAD>"
"<TM_MAD>tm_nfs</TM_MAD>"
"<LAST_MON_TIME>1273799044</LAST_MON_TIME>"
"<CLUSTER>cluster A</CLUSTER>"
"<HOST_SHARE>"
" <HID>1</HID>"
" <DISK_USAGE>0</DISK_USAGE>"