diff --git a/SConstruct b/SConstruct index da427b948c..cf962f4d85 100644 --- a/SConstruct +++ b/SConstruct @@ -57,6 +57,8 @@ main_env.Append(LIBPATH=[ cwd+'/src/log', cwd+'/src/sql', cwd+'/src/host', + cwd+'/src/cluster', + cwd+'/src/datastore', cwd+'/src/group', cwd+'/src/mad', cwd+'/src/nebula', @@ -187,6 +189,8 @@ build_scripts=[ 'src/common/SConstruct', 'src/template/SConstruct', 'src/host/SConstruct', + 'src/cluster/SConstruct', + 'src/datastore/SConstruct', 'src/group/SConstruct', 'src/mad/SConstruct', 'src/mad/utils/SConstruct', @@ -236,6 +240,8 @@ if testing=='yes': 'src/authm/test/SConstruct', 'src/common/test/SConstruct', 'src/host/test/SConstruct', + 'src/cluster/test/SConstruct', + 'src/datastore/test/SConstruct', 'src/group/test/SConstruct', 'src/image/test/SConstruct', 'src/lcm/test/SConstruct', diff --git a/include/Attribute.h b/include/Attribute.h index 9d08a8e9ff..d302419987 100644 --- a/include/Attribute.h +++ b/include/Attribute.h @@ -243,6 +243,16 @@ public: */ string vector_value(const char *name) const; + /** + * Returns the integer value + * + * @param name Name of the attribute + * @param value Integer value + * + * @return 0 on success, -1 otherwise + */ + int vector_value(const char *name, int & value) const; + /** * Marshall the attribute in a single string. The string MUST be freed * by the calling function. The string is in the form: diff --git a/include/Cluster.h b/include/Cluster.h new file mode 100644 index 0000000000..fe91731a13 --- /dev/null +++ b/include/Cluster.h @@ -0,0 +1,252 @@ +/* ------------------------------------------------------------------------ */ +/* Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); you may */ +/* not use this file except in compliance with the License. You may obtain */ +/* a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ +/* See the License for the specific language governing permissions and */ +/* limitations under the License. */ +/* -------------------------------------------------------------------------*/ + +#ifndef CLUSTER_H_ +#define CLUSTER_H_ + +#include "PoolSQL.h" +#include "ObjectCollection.h" + +using namespace std; + +/** + * The Cluster class. + */ +class Cluster : public PoolObjectSQL +{ +public: + + // ************************************************************************* + // Object Collections (Public) + // ************************************************************************* + + /** + * Adds this host ID to the set. + * @param id to be added to the cluster + * @param error_msg Error message, if any + * @return 0 on success + */ + int add_host(int id, string& error_msg) + { + int rc = hosts.add_collection_id(id); + + if ( rc < 0 ) + { + error_msg = "Host ID is already in the cluster set."; + } + + return rc; + } + + /** + * Deletes this host ID from the set. + * @param id to be deleted from the cluster + * @param error_msg Error message, if any + * @return 0 on success + */ + int del_host(int id, string& error_msg) + { + int rc = hosts.del_collection_id(id); + + if ( rc < 0 ) + { + error_msg = "Host ID is not part of the cluster set."; + } + + return rc; + } + + /** + * Adds this datastore ID to the set. + * @param id to be added to the cluster + * @param error_msg Error message, if any + * @return 0 on success + */ + int add_datastore(int id, string& error_msg) + { + int rc = datastores.add_collection_id(id); + + if ( rc < 0 ) + { + error_msg = "Datastore ID is already in the cluster set."; + } + + return rc; + } + + /** + * Deletes this datastore ID from the set. + * @param id to be deleted from the cluster + * @param error_msg Error message, if any + * @return 0 on success + */ + int del_datastore(int id, string& error_msg) + { + int rc = datastores.del_collection_id(id); + + if ( rc < 0 ) + { + error_msg = "Datastore ID is not part of the cluster set."; + } + + return rc; + } + + /** + * Adds this vnet ID to the set. + * @param id to be added to the cluster + * @param error_msg Error message, if any + * @return 0 on success + */ + int add_vnet(int id, string& error_msg) + { + int rc = vnets.add_collection_id(id); + + if ( rc < 0 ) + { + error_msg = "Network ID is already in the cluster set."; + } + + return rc; + } + + /** + * Deletes this vnet ID from the set. + * @param id to be deleted from the cluster + * @param error_msg Error message, if any + * @return 0 on success + */ + int del_vnet(int id, string& error_msg) + { + int rc = vnets.del_collection_id(id); + + if ( rc < 0 ) + { + error_msg = "Network ID is not part of the cluster set."; + } + + return rc; + } + + // ************************************************************************* + // DataBase implementation (Public) + // ************************************************************************* + + /** + * Function to print the Cluster object into a string in XML format + * @param xml the resulting XML string + * @return a reference to the generated string + */ + string& to_xml(string& xml) const; + + /** + * Rebuilds the object from an xml formatted string + * @param xml_str The xml-formatted string + * + * @return 0 on success, -1 otherwise + */ + int from_xml(const string &xml_str); + +private: + + // ------------------------------------------------------------------------- + // Friends + // ------------------------------------------------------------------------- + + friend class ClusterPool; + + // ************************************************************************* + // Constructor + // ************************************************************************* + + Cluster(int id, const string& name): + PoolObjectSQL(id,CLUSTER,name,-1,-1,"","",table), + hosts("HOSTS"), + datastores("DATASTORES"), + vnets("VNETS"){}; + + virtual ~Cluster(){}; + + // ************************************************************************* + // Object Collections (Private) + // ************************************************************************* + + ObjectCollection hosts; + ObjectCollection datastores; + ObjectCollection vnets; + + // ************************************************************************* + // DataBase implementation (Private) + // ************************************************************************* + + static const char * db_names; + + static const char * db_bootstrap; + + static const char * table; + + /** + * Execute an INSERT or REPLACE Sql query. + * @param db The SQL DB + * @param replace Execute an INSERT or a REPLACE + * @param error_str Returns the error reason, if any + * @return 0 one success + */ + int insert_replace(SqlDB *db, bool replace, string& error_str); + + /** + * Bootstraps the database table(s) associated to the Cluster + * @return 0 on success + */ + static int bootstrap(SqlDB * db) + { + ostringstream oss(Cluster::db_bootstrap); + + return db->exec(oss); + }; + + /** + * Writes the Cluster in the database. + * @param db pointer to the db + * @return 0 on success + */ + int insert(SqlDB *db, string& error_str) + { + return insert_replace(db, false, error_str); + } + + /** + * Writes/updates the Cluster's data fields in the database. + * @param db pointer to the db + * @return 0 on success + */ + int update(SqlDB *db) + { + string error_str; + return insert_replace(db, true, error_str); + } + + /** + * Checks if all the collections are empty, and therefore this cluster + * can be dropped. + * + * @param error_msg Error message, if any. + * @return 0 if cluster can be dropped, -1 otherwise + */ + int check_drop(string& error_msg); +}; + +#endif /*CLUSTER_H_*/ diff --git a/include/ClusterPool.h b/include/ClusterPool.h new file mode 100644 index 0000000000..365dac64e3 --- /dev/null +++ b/include/ClusterPool.h @@ -0,0 +1,158 @@ +/* -------------------------------------------------------------------------- */ +/* Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); you may */ +/* not use this file except in compliance with the License. You may obtain */ +/* a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ +/* See the License for the specific language governing permissions and */ +/* limitations under the License. */ +/* -------------------------------------------------------------------------- */ + +#ifndef CLUSTER_POOL_H_ +#define CLUSTER_POOL_H_ + +#include "Cluster.h" +#include "SqlDB.h" + +using namespace std; + + +class ClusterPool : public PoolSQL +{ +public: + ClusterPool(SqlDB * db); + + ~ClusterPool(){}; + + /* ---------------------------------------------------------------------- */ + /* Constants for DB management */ + /* ---------------------------------------------------------------------- */ + + /** + * Name for the "none" cluster + */ + static const string NONE_CLUSTER_NAME; + + /** + * Identifier for the "none" cluster + */ + static const int NONE_CLUSTER_ID; + + /* ---------------------------------------------------------------------- */ + /* Methods for DB management */ + /* ---------------------------------------------------------------------- */ + + /** + * Allocates a new cluster, writting it in the pool database. No memory is + * allocated for the object. + * @param name Cluster name + * @param oid the id assigned to the Cluster + * @param error_str Returns the error reason, if any + * + * @return the oid assigned to the object, -1 in case of failure + */ + int allocate(string name, + int * oid, + string& error_str); + + /** + * Function to get a cluster from the pool, if the object is not in memory + * it is loaded from the DB + * @param oid cluster unique id + * @param lock locks the cluster mutex + * @return a pointer to the cluster, 0 if the cluster could not be loaded + */ + Cluster * get(int oid, bool lock) + { + return static_cast(PoolSQL::get(oid,lock)); + }; + + /** + * Gets an object from the pool (if needed the object is loaded from the + * database). + * @param name of the object + * @param lock locks the object if true + * + * @return a pointer to the object, 0 in case of failure + */ + Cluster * get(const string& name, bool lock) + { + // The owner is set to -1, because it is not used in the key() method + return static_cast(PoolSQL::get(name,-1,lock)); + }; + + /** + * Generate an index key for the object + * @param name of the object + * @param uid owner of the object, only used if needed + * + * @return the key, a string + */ + string key(const string& name, int uid) + { + // Name is enough key because Clusters can't repeat names. + return name; + }; + + /** Update a particular Cluster + * @param user pointer to Cluster + * @return 0 on success + */ + int update(Cluster * cluster) + { + return cluster->update(db); + }; + + /** + * Drops the Cluster from the data base. The object mutex SHOULD be + * locked. + * @param objsql a pointer to a Cluster object + * @param error_msg Error reason, if any + * @return 0 on success, + * -1 DB error, + * -2 object is a system cluster (ID < 100) + * -3 Cluster's User IDs set is not empty + */ + int drop(PoolObjectSQL * objsql, string& error_msg); + + /** + * Bootstraps the database table(s) associated to the Cluster pool + * @return 0 on success + */ + static int bootstrap(SqlDB * _db) + { + return Cluster::bootstrap(_db); + }; + + /** + * Dumps the Cluster pool in XML format. A filter can be also added to the + * query + * @param oss the output stream to dump the pool contents + * @param where filter for the objects, defaults to all + * + * @return 0 on success + */ + int dump(ostringstream& oss, const string& where) + { + return PoolSQL::dump(oss, "CLUSTER_POOL", Cluster::table, where); + }; + +private: + + /** + * Factory method to produce objects + * @return a pointer to the new object + */ + PoolObjectSQL * create() + { + return new Cluster(-1,""); + }; +}; + +#endif /*CLUSTER_POOL_H_*/ diff --git a/include/Clusterable.h b/include/Clusterable.h new file mode 100644 index 0000000000..94bd133fed --- /dev/null +++ b/include/Clusterable.h @@ -0,0 +1,78 @@ +/* ------------------------------------------------------------------------ */ +/* Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); you may */ +/* not use this file except in compliance with the License. You may obtain */ +/* a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ +/* See the License for the specific language governing permissions and */ +/* limitations under the License. */ +/* -------------------------------------------------------------------------*/ + +#ifndef CLUSTERABLE_H_ +#define CLUSTERABLE_H_ + +using namespace std; + +class Clusterable +{ +public: + + /** + * Changes the cluster this object belongs to + * + * @param _cluster_id Id of the new cluster + * @param _cluster Name of the new cluster + */ + void set_cluster(int _cluster_id, const string& _cluster) + { + cluster_id = _cluster_id; + cluster = _cluster; + }; + + /** + * Returns the cluster ID + * + * @return The cluster ID + */ + int get_cluster_id() const + { + return cluster_id; + }; + + /** + * Returns the cluster name + * + * @return The cluster name + */ + const string& get_cluster_name() const + { + return cluster; + }; + + +protected: + + Clusterable(int _cluster_id, const string& _cluster): + cluster_id(_cluster_id), + cluster(_cluster){}; + + ~Clusterable(){}; + + /** + * ID of the cluster this object belongs to. + */ + int cluster_id; + + /** + * Name of the cluster this object belongs to. + */ + string cluster; +}; + +#endif /*CLUSTERABLE_H_*/ diff --git a/include/Datastore.h b/include/Datastore.h new file mode 100644 index 0000000000..43c5cd3e9e --- /dev/null +++ b/include/Datastore.h @@ -0,0 +1,200 @@ +/* ------------------------------------------------------------------------ */ +/* Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); you may */ +/* not use this file except in compliance with the License. You may obtain */ +/* a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ +/* See the License for the specific language governing permissions and */ +/* limitations under the License. */ +/* -------------------------------------------------------------------------*/ + +#ifndef DATASTORE_H_ +#define DATASTORE_H_ + +#include "PoolSQL.h" +#include "ObjectCollection.h" +#include "DatastoreTemplate.h" +#include "Clusterable.h" + +/** + * The Datastore class. + */ +class Datastore : public PoolObjectSQL, ObjectCollection, public Clusterable +{ +public: + + /** + * Function to print the Datastore object into a string in XML format + * @param xml the resulting XML string + * @return a reference to the generated string + */ + string& to_xml(string& xml) const; + + /** + * Rebuilds the object from an xml formatted string + * @param xml_str The xml-formatted string + * + * @return 0 on success, -1 otherwise + */ + int from_xml(const string &xml_str); + + /** + * Adds this image's ID to the set. + * @param id of the image to be added to the Datastore + * @return 0 on success + */ + int add_image(int id) + { + return add_collection_id(id); + }; + + /** + * Deletes this image's ID from the set. + * @param id of the image to be deleted from the Datastore + * @return 0 on success + */ + int del_image(int id) + { + return del_collection_id(id); + }; + + /** + * Retrieves TM mad name + * @return string tm mad name + */ + const string& get_tm_mad() const + { + return tm_mad; + }; + + /** + * Retrieves the base path + * @return base path string + */ + const string& get_base_path() const + { + return base_path; + }; + + /** + * Modifies the given VM disk attribute adding the relevant datastore + * attributes + * + * @param disk + * @return 0 on success + */ + int disk_attribute(VectorAttribute * disk); + + /** + * Replace template for this object. Object should be updated + * after calling this method + * @param tmpl string representation of the template + */ + int replace_template(const string& tmpl_str, string& error); + +private: + + // ------------------------------------------------------------------------- + // Friends + // ------------------------------------------------------------------------- + + friend class DatastorePool; + + // ************************************************************************* + // Datastore Private Attributes + // ************************************************************************* + + /** + * Name of the datastore driver used to register new images + */ + string ds_mad; + + /** + * Name of the TM driver used to transfer file to and from the hosts + */ + string tm_mad; + + /** + * Base path for the storage + */ + string base_path; + + // ************************************************************************* + // Constructor + // ************************************************************************* + + Datastore( + int uid, + int gid, + const string& uname, + const string& gname, + DatastoreTemplate* ds_template, + int cluster_id, + const string& cluster_name); + + virtual ~Datastore(){}; + + // ************************************************************************* + // DataBase implementation (Private) + // ************************************************************************* + + static const char * db_names; + + static const char * db_bootstrap; + + static const char * table; + + /** + * Execute an INSERT or REPLACE Sql query. + * @param db The SQL DB + * @param replace Execute an INSERT or a REPLACE + * @param error_str Returns the error reason, if any + * @return 0 one success + */ + int insert_replace(SqlDB *db, bool replace, string& error_str); + + /** + * Bootstraps the database table(s) associated to the Datastore + * @return 0 on success + */ + static int bootstrap(SqlDB * db) + { + ostringstream oss(Datastore::db_bootstrap); + + return db->exec(oss); + }; + + /** + * Writes the Datastore in the database. + * @param db pointer to the db + * @return 0 on success + */ + int insert(SqlDB *db, string& error_str); + + /** + * Writes/updates the Datastore's data fields in the database. + * @param db pointer to the db + * @return 0 on success + */ + int update(SqlDB *db) + { + string error_str; + return insert_replace(db, true, error_str); + } + + /** + * Factory method for virtual network templates + */ + Template * get_new_template() const + { + return new DatastoreTemplate; + } +}; + +#endif /*DATASTORE_H_*/ diff --git a/include/DatastorePool.h b/include/DatastorePool.h new file mode 100644 index 0000000000..294af9512d --- /dev/null +++ b/include/DatastorePool.h @@ -0,0 +1,179 @@ +/* -------------------------------------------------------------------------- */ +/* Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); you may */ +/* not use this file except in compliance with the License. You may obtain */ +/* a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ +/* See the License for the specific language governing permissions and */ +/* limitations under the License. */ +/* -------------------------------------------------------------------------- */ + +#ifndef DATASTORE_POOL_H_ +#define DATASTORE_POOL_H_ + +#include "Datastore.h" +#include "SqlDB.h" + +using namespace std; + + +class DatastorePool : public PoolSQL +{ +public: + DatastorePool(SqlDB * db); + + ~DatastorePool(){}; + + /* ---------------------------------------------------------------------- */ + /* Constants for DB management */ + /* ---------------------------------------------------------------------- */ + + /** + * Name for the system datastore + */ + static const string SYSTEM_DS_NAME; + + /** + * Identifier for the system datastore + */ + static const int SYSTEM_DS_ID; + + /** + * Name for the default datastore + */ + static const string DEFAULT_DS_NAME; + + /** + * Identifier for the default datastore + */ + static const int DEFAULT_DS_ID; + + /* ---------------------------------------------------------------------- */ + /* Methods for DB management */ + /* ---------------------------------------------------------------------- */ + + /** + * Allocates a new Datastore, writing it in the pool database. No memory is + * allocated for the object. + * @param uid the user id of the Datastore owner + * @param gid the id of the group this object is assigned to + * @param uname name of the user + * @param gname name of the group + * @param ds_template Datastore definition template + * @param oid the id assigned to the Datastore + * @param cluster_id the id of the cluster this Datastore will belong to + * @param cluster_name the name of the cluster this Datastore will belong to + * @param error_str Returns the error reason, if any + * + * @return the oid assigned to the object, -1 in case of failure + */ + int allocate( + int uid, + int gid, + const string& uname, + const string& gname, + DatastoreTemplate * ds_template, + int * oid, + int cluster_id, + const string& cluster_name, + string& error_str); + + /** + * Function to get a Datastore from the pool, if the object is not in memory + * it is loaded from the DB + * @param oid Datastore unique id + * @param lock locks the Datastore mutex + * @return a pointer to the Datastore, 0 if the Datastore could not be loaded + */ + Datastore * get(int oid, bool lock) + { + return static_cast(PoolSQL::get(oid,lock)); + }; + + /** + * Gets an object from the pool (if needed the object is loaded from the + * database). + * @param name of the object + * @param lock locks the object if true + * + * @return a pointer to the object, 0 in case of failure + */ + Datastore * get(const string& name, bool lock) + { + // The owner is set to -1, because it is not used in the key() method + return static_cast(PoolSQL::get(name,-1,lock)); + }; + + /** + * Generate an index key for the object + * @param name of the object + * @param uid owner of the object, only used if needed + * + * @return the key, a string + */ + string key(const string& name, int uid) + { + // Name is enough key because Datastores can't repeat names. + return name; + }; + + /** Update a particular Datastore + * @param user pointer to Datastore + * @return 0 on success + */ + int update(Datastore * datastore) + { + return datastore->update(db); + }; + + /** + * Drops the Datastore data in the data base. The object mutex SHOULD be + * locked. + * @param objsql a pointer to the Datastore object + * @param error_msg Error reason, if any + * @return 0 on success, -1 DB error + * -3 Datastore's Image IDs set is not empty + */ + int drop(PoolObjectSQL * objsql, string& error_msg); + + /** + * Bootstraps the database table(s) associated to the Datastore pool + * @return 0 on success + */ + static int bootstrap(SqlDB * _db) + { + return Datastore::bootstrap(_db); + }; + + /** + * Dumps the Datastore pool in XML format. A filter can be also added to the + * query + * @param oss the output stream to dump the pool contents + * @param where filter for the objects, defaults to all + * + * @return 0 on success + */ + int dump(ostringstream& oss, const string& where) + { + return PoolSQL::dump(oss, "DATASTORE_POOL", Datastore::table, where); + }; + +private: + + /** + * Factory method to produce objects + * @return a pointer to the new object + */ + PoolObjectSQL * create() + { + return new Datastore(-1,-1,"","", 0, -1, ""); + }; +}; + +#endif /*DATASTORE_POOL_H_*/ diff --git a/include/DatastoreTemplate.h b/include/DatastoreTemplate.h new file mode 100644 index 0000000000..d8856dbe2f --- /dev/null +++ b/include/DatastoreTemplate.h @@ -0,0 +1,39 @@ +/* -------------------------------------------------------------------------- */ +/* Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); you may */ +/* not use this file except in compliance with the License. You may obtain */ +/* a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ +/* See the License for the specific language governing permissions and */ +/* limitations under the License. */ +/* -------------------------------------------------------------------------- */ + +#ifndef DATASTORE_TEMPLATE_H_ +#define DATASTORE_TEMPLATE_H_ + +#include "Template.h" + +using namespace std; + +/** + * Datastore Template class + */ +class DatastoreTemplate : public Template +{ +public: + DatastoreTemplate(): + Template(false,'=',"TEMPLATE"){}; + + ~DatastoreTemplate(){}; +}; + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + +#endif /*DATASTORE_TEMPLATE_H_*/ diff --git a/include/GroupPool.h b/include/GroupPool.h index 335a52c245..7de343cd1f 100644 --- a/include/GroupPool.h +++ b/include/GroupPool.h @@ -31,7 +31,7 @@ public: ~GroupPool(){}; /* ---------------------------------------------------------------------- */ - /* Constants r DB management */ + /* Constants for DB management */ /* ---------------------------------------------------------------------- */ /** diff --git a/include/History.h b/include/History.h index 4541e58ab7..e124ec7a2e 100644 --- a/include/History.h +++ b/include/History.h @@ -44,10 +44,8 @@ public: int seq, int hid, const string& hostname, - const string& vm_dir, const string& vmm, - const string& vnm, - const string& tm); + const string& vnm); ~History(){}; @@ -87,13 +85,10 @@ private: int seq; string hostname; - string vm_dir; - int hid; string vmm_mad_name; string vnm_mad_name; - string tm_mad_name; time_t stime; time_t etime; @@ -109,13 +104,15 @@ private: MigrationReason reason; - //Non-persistent history fields - string vm_lhome; + // ------------------------------------------------------------------------- + // Non-persistent history fields + // ------------------------------------------------------------------------- + // Local paths string transfer_file; string deployment_file; string context_file; - string vm_rhome; + // Remote paths string checkpoint_file; string rdeployment_file; diff --git a/include/Host.h b/include/Host.h index 8df66d9ae8..0c60e7f783 100644 --- a/include/Host.h +++ b/include/Host.h @@ -20,13 +20,14 @@ #include "PoolSQL.h" #include "HostTemplate.h" #include "HostShare.h" +#include "Clusterable.h" using namespace std; /** * The Host class. */ -class Host : public PoolObjectSQL +class Host : public PoolObjectSQL, public Clusterable { public: @@ -139,15 +140,6 @@ public: return vnm_mad_name; }; - /** - * Retrives TM mad name - * @return string tm mad name - */ - const string& get_tm_mad() const - { - return tm_mad_name; - }; - /** * Retrives IM mad name * @return string im mad name @@ -289,7 +281,7 @@ public: /** * Factory method for host templates */ - Template * get_new_template() + Template * get_new_template() const { return new HostTemplate; } @@ -325,11 +317,6 @@ private: */ string vnm_mad_name; - /** - * Name of the TM driver used to transfer file to and from this host - */ - string tm_mad_name; - /** * If Host State= MONITORED last time it got fully monitored or 1 Jan 1970 * Host State = MONITORING last time it got a signal to be monitored @@ -348,12 +335,13 @@ private: // Constructor // ************************************************************************* - Host(int id=-1, - const string& hostname="", - const string& im_mad_name="", - const string& vmm_mad_name="", - const string& vnm_mad_name="", - const string& tm_mad_name=""); + Host(int id, + const string& hostname, + const string& im_mad_name, + const string& vmm_mad_name, + const string& vnm_mad_name, + int cluster_id, + const string& cluster_name); virtual ~Host(); diff --git a/include/HostPool.h b/include/HostPool.h index 7bdc0bfc23..0de6e8f8e4 100644 --- a/include/HostPool.h +++ b/include/HostPool.h @@ -53,7 +53,8 @@ public: const string& im_mad_name, const string& vmm_mad_name, const string& vnm_mad_name, - const string& tm_mad_name, + int cluster_id, + const string& cluster_name, string& error_str); /** @@ -203,7 +204,7 @@ private: */ PoolObjectSQL * create() { - return new Host; + return new Host(-1,"","","","",-1,""); }; /** diff --git a/include/Image.h b/include/Image.h index 61c7055286..60ba8cdb82 100644 --- a/include/Image.h +++ b/include/Image.h @@ -39,6 +39,22 @@ public: DATABLOCK = 2 /** < User persistent data device */ }; + /** + * Return the string representation of an ImageType + * @param ob the type + * @return the string + */ + static string type_to_str(ImageType ob) + { + switch (ob) + { + case OS: return "OS" ; break; + case CDROM: return "CDROM" ; break; + case DATABLOCK: return "DATABLOCK" ; break; + default: return ""; + } + }; + /** * Image State */ @@ -199,6 +215,17 @@ public: return (group_u == 1 || other_u == 1); } + /** + * Check if the image is used for saving_as a current one + * @return true if the image will be used to save an existing image. + */ + bool isSaving() + { + ImageTemplate * it = static_cast(obj_template); + + return it->is_saving(); + } + /** * Set permissions for the Image. Extends the PoolSQLObject method * by checking the persistent state of the image. @@ -291,11 +318,27 @@ public: /** * Factory method for image templates */ - Template * get_new_template() + Template * get_new_template() const { return new ImageTemplate; } + /** + * Returns the Datastore ID + */ + int get_ds_id() const + { + return ds_id; + }; + + /** + * Returns the Datastore ID + */ + const string& get_ds_name() const + { + return ds_name; + }; + private: // ------------------------------------------------------------------------- @@ -353,6 +396,16 @@ private: */ int running_vms; + /** + * Datastore ID + */ + int ds_id; + + /** + * Datastore name + */ + string ds_name; + // ************************************************************************* // DataBase implementation (Private) // ************************************************************************* diff --git a/include/ImageManager.h b/include/ImageManager.h index 8858b27fa0..2d3d8789ca 100644 --- a/include/ImageManager.h +++ b/include/ImageManager.h @@ -82,60 +82,26 @@ public: /** * Try to acquire an image from the repository for a VM. * @param image_id id of image + * @param error string describing the error * @return pointer to the image or 0 if could not be acquired */ - Image * acquire_image(int image_id); + Image * acquire_image(int image_id, string& error); /** * Try to acquire an image from the repository for a VM. * @param name of the image * @param id of owner + * @param error string describing the error * @return pointer to the image or 0 if could not be acquired */ - Image * acquire_image(const string& name, int uid); + Image * acquire_image(const string& name, int uid, string& error); /** * Releases an image and triggers any needed operations in the repo * @param iid image id of the image to be released - * @param disk_path base path for disk location - * @param disk number for this image in the VM - * @param saveid id of image to save the current image + * @param failed the associated VM releasing the images is FAILED */ - void release_image(const string& iid, - const string& disk_path, - int disk_num, - const string& saveid) - { - int image_id; - istringstream iss; - - iss.str(iid); - iss >> image_id; - - release_image(image_id, disk_path, disk_num, saveid); - }; - - /** - * Releases an image and triggers any needed operations in the repo - * @param iid image id of the image to be released - * @param disk_path base path for disk location - * @param disk number for this image in the VM - * @param saveid id of image to save the current image - */ - void release_image(int iid, - const string& disk_path, - int disk_num, - const string& saveid); - - /** - * Moves a VM disk to the Image Repository - * @param disk_path base path for disk location - * @param disk number for this image in the VM - * @param saveid id of image to save the current image - */ - void disk_to_image(const string& disk_path, - int disk_num, - const string& save_id); + void release_image(int iid, bool failed); /** * Enables the image @@ -146,17 +112,18 @@ public: /** * Adds a new image to the repository copying or creating it as needed - * @param iid id of image + * @param img pointer to the image + * @param ds_data data of the associated datastore in XML format * @return 0 on success */ - int register_image(int iid); + int register_image(int iid, const string& ds_data); /** * Deletes an image from the repository and the DB * @param iid id of image * @return 0 on success */ - int delete_image(int iid); + int delete_image(int iid, const string& ds_data); private: /** @@ -202,16 +169,14 @@ private: * @param action the name of the action * @param arg arguments for the action function */ - void do_action( - const string & action, - void * arg); + void do_action(const string& action, void * arg); /** * Acquires an image updating its state. * @param image pointer to image, it should be locked * @return 0 on success */ - int acquire_image(Image *img); + int acquire_image(Image *img, string& error); /** * Moves a file to an image in the repository @@ -219,6 +184,15 @@ private: * @param source path of the disk file */ void move_image(Image *img, const string& source); + + /** + * Formats an XML message for the MAD + * + * @param img_data Image XML representation + * @param ds_data Datastore XML representation + * @return the XML message + */ + string * format_message(const string& img_data, const string& ds_data); }; #endif /*IMAGE_MANAGER_H*/ diff --git a/include/ImageManagerDriver.h b/include/ImageManagerDriver.h index 081c7bd31a..3319ebf717 100644 --- a/include/ImageManagerDriver.h +++ b/include/ImageManagerDriver.h @@ -70,7 +70,12 @@ private: */ //Template driver_conf; - void cp(int oid, const string& source) const; + /** + * Sends a copy request to the MAD + * @param oid the image id. + * @param drv_msg xml data for the mad operation. + */ + void cp(int oid, const string& drv_msg) const; /** * Sends a move request to the MAD: "MV IMAGE_ID SRC_PATH DST_PATH" @@ -84,18 +89,15 @@ private: /** * Sends a make filesystem request to the MAD: "MKFS IMAGE_ID PATH SIZE_MB" * @param oid the image id. - * @param fs type - * @param size_mb of the image to be created + * @param drv_msg xml data for the mad operation. */ - void mkfs(int oid, - const string& fs, - int size_mb) const; + void mkfs(int oid, const string& drv_msg) const; /** * Sends a delete request to the MAD: "DELETE IMAGE_ID PATH" * @param oid the image id. - * @param destination is the path to the image to be removed + * @param drv_msg xml data for the mad operation. */ - void rm(int oid, const string& destination) const; + void rm(int oid, const string& drv_msg) const; }; /* -------------------------------------------------------------------------- */ diff --git a/include/ImagePool.h b/include/ImagePool.h index 2530fcb20c..92e13af73b 100644 --- a/include/ImagePool.h +++ b/include/ImagePool.h @@ -52,6 +52,9 @@ public: * @param uname name of the user * @param gname name of the group * @param img_template template associated with the image + * @param ds_id the id of the datastore + * @param ds_name the name of the datastore + * @param ds_data the datastore data * @param oid the id assigned to the Image * @param error_str Returns the error reason, if any * @return the oid assigned to the object, @@ -64,6 +67,9 @@ public: const string& uname, const string& gname, ImageTemplate * img_template, + int ds_id, + const string& ds_name, + const string& ds_data, int * oid, string& error_str); @@ -133,6 +139,7 @@ public: * @param img_type will be set to the used image's type * @param uid of VM owner (to look for the image id within its images) * @param image_id on success returns the acquired image id + * @param error_str string describing the error * @return 0 on success, * -1 error, * -2 not using the pool, @@ -142,7 +149,8 @@ public: int * index, Image::ImageType * img_type, int uid, - int& image_id); + int& image_id, + string& error_str); /** * Generates an Authorization token for the DISK attribute * @param disk the disk to be authorized diff --git a/include/ImageTemplate.h b/include/ImageTemplate.h index 21dd0eec64..36c2727bb6 100644 --- a/include/ImageTemplate.h +++ b/include/ImageTemplate.h @@ -41,11 +41,36 @@ public: return Template::check(rs_attr, restricted_attributes); }; + bool is_saving() + { + string saving; + + get(saving_attribute, saving); + + return (saving.empty() == false); + } + + void set_saving() + { + SingleAttribute * attr= new SingleAttribute(saving_attribute, "YES"); + + erase(saving_attribute); + + set(attr); + } + + void unset_saving() + { + erase(saving_attribute); + } + private: friend class ImagePool; static vector restricted_attributes; + static string saving_attribute; + /** * Stores the attributes as restricted, these attributes will be used in * ImageTemplate::check diff --git a/include/Nebula.h b/include/Nebula.h index 04e2c52743..deee3fb633 100644 --- a/include/Nebula.h +++ b/include/Nebula.h @@ -27,6 +27,8 @@ #include "UserPool.h" #include "VMTemplatePool.h" #include "GroupPool.h" +#include "DatastorePool.h" +#include "ClusterPool.h" #include "VirtualMachineManager.h" #include "LifeCycleManager.h" @@ -91,6 +93,16 @@ public: return tpool; }; + DatastorePool * get_dspool() + { + return dspool; + }; + + ClusterPool * get_clpool() + { + return clpool; + }; + // -------------------------------------------------------------- // Manager Accessors // -------------------------------------------------------------- @@ -187,9 +199,8 @@ public: }; /** - * Returns the path where the OpenNebula DB and the VM local directories - * are stored. When ONE_LOCATION is defined this path points to - * $ONE_LOCATION/var, otherwise it is /var/lib/one. + * Returns the default var location. When ONE_LOCATION is defined this path + * points to $ONE_LOCATION/var, otherwise it is /var/lib/one. * @return the log location. */ const string& get_var_location() @@ -197,6 +208,40 @@ public: return var_location; }; + /** + * Returns the default var location. When ONE_LOCATION is defined this path + * points to $ONE_LOCATION/var, otherwise it is /var/lib/one. + * @return the log location. + */ + const string& get_ds_location() + { + return ds_location; + }; + + /** + * Returns the Transfer Manager for the system datastore + * @return the tm name. + */ + string get_system_ds_tm_mad() + { + Datastore * ds; + string tm_mad = ""; + + ds = dspool->get(DatastorePool::SYSTEM_DS_ID, true); + + if ( ds == 0 ) + { + NebulaLog::log("DaS", Log::ERROR, "Can not get system datastore"); + return tm_mad; + } + + tm_mad = ds->get_tm_mad(); + + ds->unlock(); + + return tm_mad; + }; + /** * Returns the path of the log file for a VM, depending where OpenNebula is * installed, @@ -254,8 +299,8 @@ private: // ----------------------------------------------------------------------- Nebula():nebula_configuration(0),db(0),vmpool(0),hpool(0),vnpool(0), - upool(0),ipool(0),gpool(0),tpool(0),lcm(0),vmm(0),im(0),tm(0), - dm(0),rm(0),hm(0),authm(0),aclm(0),imagem(0) + upool(0),ipool(0),gpool(0),tpool(0),dspool(0),clpool(0), + lcm(0),vmm(0),im(0),tm(0),dm(0),rm(0),hm(0),authm(0),aclm(0),imagem(0) { const char * nl = getenv("ONE_LOCATION"); @@ -268,6 +313,7 @@ private: log_location = "/var/log/one/"; var_location = "/var/lib/one/"; remotes_location = "/var/lib/one/remotes/"; + ds_location = "/var/lib/one/datastores/"; } else { @@ -283,6 +329,7 @@ private: log_location = nebula_location + "var/"; var_location = nebula_location + "var/"; remotes_location = nebula_location + "var/remotes/"; + ds_location = nebula_location + "var/datastores/"; } }; @@ -323,6 +370,16 @@ private: delete tpool; } + if ( dspool != 0) + { + delete dspool; + } + + if ( clpool != 0) + { + delete clpool; + } + if ( vmm != 0) { delete vmm; @@ -400,6 +457,7 @@ private: string var_location; string hook_location; string remotes_location; + string ds_location; string hostname; @@ -421,6 +479,8 @@ private: ImagePool * ipool; GroupPool * gpool; VMTemplatePool * tpool; + DatastorePool * dspool; + ClusterPool * clpool; // --------------------------------------------------------------- // Nebula Managers diff --git a/include/ObjectCollection.h b/include/ObjectCollection.h index 058c646b6b..4d11b25f32 100644 --- a/include/ObjectCollection.h +++ b/include/ObjectCollection.h @@ -28,7 +28,7 @@ using namespace std; */ class ObjectCollection { -protected: +public: ObjectCollection(const string& _collection_name) :collection_name(_collection_name){}; diff --git a/include/PoolObjectSQL.h b/include/PoolObjectSQL.h index 7829f0c4e6..bd4e72b449 100644 --- a/include/PoolObjectSQL.h +++ b/include/PoolObjectSQL.h @@ -49,29 +49,33 @@ public: */ enum ObjectType { - VM = 0x0000001000000000LL, - HOST = 0x0000002000000000LL, - NET = 0x0000004000000000LL, - IMAGE = 0x0000008000000000LL, - USER = 0x0000010000000000LL, - TEMPLATE = 0x0000020000000000LL, - GROUP = 0x0000040000000000LL, - ACL = 0x0000080000000000LL + VM = 0x0000001000000000LL, + HOST = 0x0000002000000000LL, + NET = 0x0000004000000000LL, + IMAGE = 0x0000008000000000LL, + USER = 0x0000010000000000LL, + TEMPLATE = 0x0000020000000000LL, + GROUP = 0x0000040000000000LL, + ACL = 0x0000080000000000LL, + DATASTORE = 0x0000100000000000LL, + CLUSTER = 0x0000200000000000LL }; static string type_to_str(ObjectType ob) { switch (ob) { - case VM: return "VM" ; break; - case HOST: return "HOST" ; break; - case NET: return "NET" ; break; - case IMAGE: return "IMAGE" ; break; - case USER: return "USER" ; break; - case TEMPLATE: return "TEMPLATE" ; break; - case GROUP: return "GROUP" ; break; - case ACL: return "ACL" ; break; - default: return ""; + case VM: return "VM" ; break; + case HOST: return "HOST" ; break; + case NET: return "NET" ; break; + case IMAGE: return "IMAGE" ; break; + case USER: return "USER" ; break; + case TEMPLATE: return "TEMPLATE" ; break; + case GROUP: return "GROUP" ; break; + case ACL: return "ACL" ; break; + case DATASTORE: return "DATASTORE" ; break; + case CLUSTER: return "CLUSTER" ; break; + default: return ""; } }; @@ -359,7 +363,7 @@ public: * by classes that uses templates * @return a new template */ - virtual Template * get_new_template() + virtual Template * get_new_template() const { return 0; } @@ -369,7 +373,7 @@ public: * after calling this method * @param tmpl string representation of the template */ - int replace_template(const string& tmpl_str, string& error); + virtual int replace_template(const string& tmpl_str, string& error); /** diff --git a/include/Request.h b/include/Request.h index 1078fd8898..9c479d97b6 100644 --- a/include/Request.h +++ b/include/Request.h @@ -219,6 +219,26 @@ protected: * @return string for logging */ string allocate_error (PoolObjectSQL::ObjectType obj, const string& error); + + /** + * Locks the requested object, gets information, and unlocks it + * + * @param pool object pool + * @param id of the object + * @param type of the object + * @param att the specific request attributes + * + * @param perms returns the object's permissions + * @param name returns the object's name + * + * @return 0 on success, -1 otherwise + */ + int get_info (PoolSQL * pool, + int id, + PoolObjectSQL::ObjectType type, + RequestAttributes& att, + PoolObjectAuth& perms, + string& name); }; /* -------------------------------------------------------------------------- */ diff --git a/include/RequestManagerAllocate.h b/include/RequestManagerAllocate.h index 446562b5d3..e13bdbc42c 100644 --- a/include/RequestManagerAllocate.h +++ b/include/RequestManagerAllocate.h @@ -23,6 +23,7 @@ #include "VirtualNetworkTemplate.h" #include "ImageTemplate.h" #include "VirtualMachineTemplate.h" +#include "DatastoreTemplate.h" using namespace std; @@ -40,17 +41,21 @@ protected: :Request(method_name,xml_args,help), do_template(dt) { auth_op = AuthRequest::CREATE; + + Nebula& nd = Nebula::instance(); + clpool = nd.get_clpool(); }; ~RequestManagerAllocate(){}; /* -------------------------------------------------------------------- */ - void request_execute(xmlrpc_c::paramList const& _paramList, + virtual void request_execute(xmlrpc_c::paramList const& _paramList, RequestAttributes& att); - virtual bool allocate_authorization(Template * obj_template, - RequestAttributes& att); + virtual bool allocate_authorization(Template * obj_template, + RequestAttributes& att, + PoolObjectAuth * cluster_perms); /* -------------------------------------------------------------------- */ @@ -60,7 +65,35 @@ protected: Template * tmpl, int& id, string& error_str, - RequestAttributes& att) = 0; + RequestAttributes& att) + { + return -1; + }; + + virtual int pool_allocate(xmlrpc_c::paramList const& _paramList, + Template * tmpl, + int& id, + string& error_str, + RequestAttributes& att, + int cluster_id, + const string& cluster_name) + { + return pool_allocate(_paramList, tmpl, id, error_str, att); + }; + + virtual int get_cluster_id(xmlrpc_c::paramList const& paramList) + { + return ClusterPool::NONE_CLUSTER_ID; + }; + + virtual int add_to_cluster(Cluster* cluster, int id, string& error_msg) + { + return -1; + }; + +protected: + ClusterPool * clpool; + private: bool do_template; @@ -85,6 +118,7 @@ public: }; ~VirtualMachineAllocate(){}; + /* --------------------------------------------------------------------- */ Template * get_object_template() @@ -98,8 +132,9 @@ public: string& error_str, RequestAttributes& att); - bool allocate_authorization(Template * obj_template, - RequestAttributes& att); + bool allocate_authorization(Template * obj_template, + RequestAttributes& att, + PoolObjectAuth * cluster_perms); }; /* ------------------------------------------------------------------------- */ @@ -111,7 +146,7 @@ public: VirtualNetworkAllocate(): RequestManagerAllocate("VirtualNetworkAllocate", "Allocates a new virtual network", - "A:ss", + "A:ssi", true) { Nebula& nd = Nebula::instance(); @@ -128,11 +163,23 @@ public: return new VirtualNetworkTemplate; }; - int pool_allocate(xmlrpc_c::paramList const& _paramList, + int pool_allocate(xmlrpc_c::paramList const& _paramList, Template * tmpl, - int& id, + int& id, string& error_str, - RequestAttributes& att); + RequestAttributes& att, + int cluster_id, + const string& cluster_name); + + int get_cluster_id(xmlrpc_c::paramList const& paramList) + { + return xmlrpc_c::value_int(paramList.getInt(2)); + }; + + int add_to_cluster(Cluster* cluster, int id, string& error_msg) + { + return cluster->add_vnet(id, error_msg); + }; }; /* ------------------------------------------------------------------------- */ @@ -144,9 +191,9 @@ public: ImageAllocate(): RequestManagerAllocate("ImageAllocate", "Allocates a new image", - "A:ss", + "A:ssi", true) - { + { Nebula& nd = Nebula::instance(); pool = nd.get_ipool(); auth_object = PoolObjectSQL::IMAGE; @@ -156,16 +203,12 @@ public: /* --------------------------------------------------------------------- */ - Template * get_object_template() - { - return new ImageTemplate; - }; + void request_execute(xmlrpc_c::paramList const& _paramList, + RequestAttributes& att); - int pool_allocate(xmlrpc_c::paramList const& _paramList, - Template * tmpl, - int& id, - string& error_str, - RequestAttributes& att); + bool allocate_authorization(Template * obj_template, + RequestAttributes& att, + PoolObjectAuth * cluster_perms); }; /* ------------------------------------------------------------------------- */ @@ -210,7 +253,7 @@ public: HostAllocate(): RequestManagerAllocate("HostAllocate", "Allocates a new host", - "A:ssssss", + "A:sssssi", false) { Nebula& nd = Nebula::instance(); @@ -220,11 +263,25 @@ public: ~HostAllocate(){}; - int pool_allocate(xmlrpc_c::paramList const& _paramList, + /* --------------------------------------------------------------------- */ + + int pool_allocate(xmlrpc_c::paramList const& _paramList, Template * tmpl, - int& id, + int& id, string& error_str, - RequestAttributes& att); + RequestAttributes& att, + int cluster_id, + const string& cluster_name); + + int get_cluster_id(xmlrpc_c::paramList const& paramList) + { + return xmlrpc_c::value_int(paramList.getInt(5)); + }; + + int add_to_cluster(Cluster* cluster, int id, string& error_msg) + { + return cluster->add_host(id, error_msg); + }; }; /* ------------------------------------------------------------------------- */ @@ -279,6 +336,77 @@ public: RequestAttributes& att); }; +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ + +class DatastoreAllocate: public RequestManagerAllocate +{ +public: + DatastoreAllocate(): + RequestManagerAllocate("DatastoreAllocate", + "Allocates a new Datastore", + "A:ssi", + true) + { + Nebula& nd = Nebula::instance(); + pool = nd.get_dspool(); + auth_object = PoolObjectSQL::DATASTORE; + }; + + ~DatastoreAllocate(){}; + + /* -------------------------------------------------------------------- */ + + Template * get_object_template() + { + return new DatastoreTemplate; + }; + + int pool_allocate(xmlrpc_c::paramList const& _paramList, + Template * tmpl, + int& id, + string& error_str, + RequestAttributes& att, + int cluster_id, + const string& cluster_name); + + int get_cluster_id(xmlrpc_c::paramList const& paramList) + { + return xmlrpc_c::value_int(paramList.getInt(2)); + }; + + int add_to_cluster(Cluster* cluster, int id, string& error_msg) + { + return cluster->add_datastore(id, error_msg); + }; +}; + +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ + +class ClusterAllocate: public RequestManagerAllocate +{ +public: + ClusterAllocate(): + RequestManagerAllocate("ClusterAllocate", + "Allocates a new cluster", + "A:ss", + false) + { + Nebula& nd = Nebula::instance(); + pool = nd.get_clpool(); + auth_object = PoolObjectSQL::CLUSTER; + }; + + ~ClusterAllocate(){}; + + int pool_allocate(xmlrpc_c::paramList const& _paramList, + Template * tmpl, + int& id, + string& error_str, + RequestAttributes& att); +}; + /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ diff --git a/include/RequestManagerChmod.h b/include/RequestManagerChmod.h index 60282af845..4341c1fc4f 100644 --- a/include/RequestManagerChmod.h +++ b/include/RequestManagerChmod.h @@ -81,7 +81,6 @@ public: /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ - class VirtualNetworkChmod: public RequestManagerChmod { public: @@ -117,6 +116,25 @@ public: }; +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ + +class DatastoreChmod: public RequestManagerChmod +{ +public: + DatastoreChmod(): + RequestManagerChmod("DatastoreChmod", + "Changes permission bits of a datastore") + { + Nebula& nd = Nebula::instance(); + pool = nd.get_dspool(); + auth_object = PoolObjectSQL::DATASTORE; + }; + + ~DatastoreChmod(){}; + +}; + /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ diff --git a/include/RequestManagerChown.h b/include/RequestManagerChown.h index 78569079e0..9ccedc49a0 100644 --- a/include/RequestManagerChown.h +++ b/include/RequestManagerChown.h @@ -52,13 +52,6 @@ protected: virtual void request_execute(xmlrpc_c::paramList const& _paramList, RequestAttributes& att); - - int get_info (PoolSQL * pool, - int id, - PoolObjectSQL::ObjectType type, - RequestAttributes& att, - PoolObjectAuth& perms, - string& name); }; /* ------------------------------------------------------------------------- */ @@ -160,6 +153,25 @@ public: RequestAttributes& att); }; +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ + +class DatastoreChown: public RequestManagerChown +{ +public: + DatastoreChown(): + RequestManagerChown("Datastore", + "Changes ownership of a datastore") + { + Nebula& nd = Nebula::instance(); + pool = nd.get_dspool(); + auth_object = PoolObjectSQL::DATASTORE; + }; + + ~DatastoreChown(){}; + +}; + /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ diff --git a/include/RequestManagerCluster.h b/include/RequestManagerCluster.h new file mode 100644 index 0000000000..22ab693207 --- /dev/null +++ b/include/RequestManagerCluster.h @@ -0,0 +1,329 @@ +/* -------------------------------------------------------------------------- */ +/* Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); you may */ +/* not use this file except in compliance with the License. You may obtain */ +/* a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ +/* See the License for the specific language governing permissions and */ +/* limitations under the License. */ +/* -------------------------------------------------------------------------- */ + +#ifndef REQUEST_MANAGER_CLUSTER_H +#define REQUEST_MANAGER_CLUSTER_H + +#include "Request.h" +#include "Nebula.h" + +using namespace std; + +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ + +class RequestManagerCluster: public Request +{ +protected: + RequestManagerCluster(const string& method_name, + const string& help, + const string& params) + :Request(method_name,params,help) + { + Nebula& nd = Nebula::instance(); + clpool = nd.get_clpool(); + hpool = nd.get_hpool(); + dspool = nd.get_dspool(); + vnpool = nd.get_vnpool(); + + auth_object = PoolObjectSQL::CLUSTER; + auth_op = AuthRequest::ADMIN; + }; + + ~RequestManagerCluster(){}; + + /* --------------------------------------------------------------------- */ + + ClusterPool * clpool; + HostPool * hpool; + DatastorePool * dspool; + VirtualNetworkPool * vnpool; + + /* --------------------------------------------------------------------- */ + + virtual void request_execute(xmlrpc_c::paramList const& paramList, + RequestAttributes& att) = 0; + + void add_generic( + int cluster_id, + int object_id, + RequestAttributes& att, + PoolSQL * pool, + PoolObjectSQL::ObjectType type); + + virtual int add_object(Cluster* cluster, int id, string& error_msg) = 0; + + virtual int del_object(Cluster* cluster, int id, string& error_msg) = 0; + + virtual void get(int oid, bool lock, PoolObjectSQL ** object, Clusterable ** cluster_obj) = 0; +}; + +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ + +class RequestManagerClusterHost : public RequestManagerCluster +{ +public: + RequestManagerClusterHost( + const string& method_name, + const string& help, + const string& params): + RequestManagerCluster(method_name, help, params){}; + + ~RequestManagerClusterHost(){}; + + virtual int add_object(Cluster* cluster, int id, string& error_msg) + { + return cluster->add_host(id, error_msg); + }; + + virtual int del_object(Cluster* cluster, int id, string& error_msg) + { + return cluster->del_host(id, error_msg); + }; + + virtual void get(int oid, bool lock, PoolObjectSQL ** object, Clusterable ** cluster_obj) + { + Host * host = hpool->get(oid, lock); + + *object = static_cast(host); + *cluster_obj = static_cast(host); + }; +}; + +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ + +class ClusterAddHost : public RequestManagerClusterHost +{ +public: + ClusterAddHost(): + RequestManagerClusterHost("ClusterAddHost", + "Adds a host to the cluster", + "A:sii"){}; + + ~ClusterAddHost(){}; + + void request_execute(xmlrpc_c::paramList const& paramList, + RequestAttributes& att) + { + int cluster_id = xmlrpc_c::value_int(paramList.getInt(1)); + int object_id = xmlrpc_c::value_int(paramList.getInt(2)); + + return add_generic(cluster_id, object_id, att, + hpool, PoolObjectSQL::HOST); + } +}; + +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ + +class ClusterDelHost : public RequestManagerClusterHost +{ +public: + ClusterDelHost(): + RequestManagerClusterHost("ClusterDelHost", + "Deletes a host from its cluster", + "A:sii"){}; + + ~ClusterDelHost(){}; + + void request_execute(xmlrpc_c::paramList const& paramList, + RequestAttributes& att) + { + // First param is ignored, as objects can be assigned to only + // one cluster + int cluster_id = ClusterPool::NONE_CLUSTER_ID; + int object_id = xmlrpc_c::value_int(paramList.getInt(2)); + + return add_generic(cluster_id, object_id, att, + hpool, PoolObjectSQL::HOST); + } +}; + +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ + +class RequestManagerClusterDatastore : public RequestManagerCluster +{ +public: + RequestManagerClusterDatastore( + const string& method_name, + const string& help, + const string& params): + RequestManagerCluster(method_name, help, params){}; + + ~RequestManagerClusterDatastore(){}; + + virtual int add_object(Cluster* cluster, int id, string& error_msg) + { + return cluster->add_datastore(id, error_msg); + }; + + virtual int del_object(Cluster* cluster, int id, string& error_msg) + { + return cluster->del_datastore(id, error_msg); + }; + + virtual void get(int oid, bool lock, PoolObjectSQL ** object, Clusterable ** cluster_obj) + { + Datastore * ds = dspool->get(oid, lock); + + *object = static_cast(ds); + *cluster_obj = static_cast(ds); + }; +}; + +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ + +class ClusterAddDatastore : public RequestManagerClusterDatastore +{ +public: + ClusterAddDatastore(): + RequestManagerClusterDatastore("ClusterAddDatastore", + "Adds a datastore to the cluster", + "A:sii"){}; + + ~ClusterAddDatastore(){}; + + void request_execute(xmlrpc_c::paramList const& paramList, + RequestAttributes& att) + { + int cluster_id = xmlrpc_c::value_int(paramList.getInt(1)); + int object_id = xmlrpc_c::value_int(paramList.getInt(2)); + + return add_generic(cluster_id, object_id, att, + dspool, PoolObjectSQL::DATASTORE); + } +}; + +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ + +class ClusterDelDatastore : public RequestManagerClusterDatastore +{ +public: + ClusterDelDatastore(): + RequestManagerClusterDatastore("ClusterDelDatastore", + "Deletes a datastore from its cluster", + "A:sii"){}; + + ~ClusterDelDatastore(){}; + + void request_execute(xmlrpc_c::paramList const& paramList, + RequestAttributes& att) + { + // First param is ignored, as objects can be assigned to only + // one cluster + int cluster_id = ClusterPool::NONE_CLUSTER_ID; + int object_id = xmlrpc_c::value_int(paramList.getInt(2)); + + return add_generic(cluster_id, object_id, att, + dspool, PoolObjectSQL::DATASTORE); + } +}; + +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ + +class RequestManagerClusterVNet : public RequestManagerCluster +{ +public: + + RequestManagerClusterVNet( + const string& method_name, + const string& help, + const string& params): + RequestManagerCluster(method_name, help, params){}; + + ~RequestManagerClusterVNet(){}; + + virtual int add_object(Cluster* cluster, int id, string& error_msg) + { + return cluster->add_vnet(id, error_msg); + }; + + virtual int del_object(Cluster* cluster, int id, string& error_msg) + { + return cluster->del_vnet(id, error_msg); + }; + + virtual void get(int oid, bool lock, PoolObjectSQL ** object, Clusterable ** cluster_obj) + { + VirtualNetwork * vnet = vnpool->get(oid, lock); + + *object = static_cast(vnet); + *cluster_obj = static_cast(vnet); + }; +}; + +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ + +class ClusterAddVNet : public RequestManagerClusterVNet +{ +public: + ClusterAddVNet(): + RequestManagerClusterVNet("ClusterAddVNet", + "Adds a virtual network to the cluster", + "A:sii"){}; + + ~ClusterAddVNet(){}; + + void request_execute(xmlrpc_c::paramList const& paramList, + RequestAttributes& att) + { + int cluster_id = xmlrpc_c::value_int(paramList.getInt(1)); + int object_id = xmlrpc_c::value_int(paramList.getInt(2)); + + return add_generic(cluster_id, object_id, att, + vnpool, PoolObjectSQL::NET); + } +}; + +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ + +class ClusterDelVNet : public RequestManagerClusterVNet +{ +public: + ClusterDelVNet(): + RequestManagerClusterVNet("ClusterDelVNet", + "Deletes a virtual network from its cluster", + "A:sii"){}; + + ~ClusterDelVNet(){}; + + void request_execute(xmlrpc_c::paramList const& paramList, + RequestAttributes& att) + { + // First param is ignored, as objects can be assigned to only + // one cluster + int cluster_id = ClusterPool::NONE_CLUSTER_ID; + int object_id = xmlrpc_c::value_int(paramList.getInt(2)); + + return add_generic(cluster_id, object_id, att, + vnpool, PoolObjectSQL::NET); + } +}; + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + +#endif diff --git a/include/RequestManagerDelete.h b/include/RequestManagerDelete.h index 9162c26a30..0eaf8c6359 100644 --- a/include/RequestManagerDelete.h +++ b/include/RequestManagerDelete.h @@ -35,6 +35,9 @@ protected: :Request(method_name,"A:si",help) { auth_op = AuthRequest::MANAGE; + + Nebula& nd = Nebula::instance(); + clpool = nd.get_clpool(); }; ~RequestManagerDelete(){}; @@ -49,15 +52,20 @@ protected: /* -------------------------------------------------------------------- */ - virtual int drop(int oid, PoolObjectSQL * object, string& error_msg) + virtual int drop(int oid, PoolObjectSQL * object, string& error_msg); + + virtual int get_cluster_id(PoolObjectSQL * object) { - int rc = pool->drop(object, error_msg); - - object->unlock(); - - return rc; + return ClusterPool::NONE_CLUSTER_ID; }; + virtual int del_from_cluster(Cluster* cluster, int id, string& error_msg) + { + return -1; + }; + +private: + ClusterPool * clpool; }; @@ -95,6 +103,18 @@ public: }; ~VirtualNetworkDelete(){}; + + /* -------------------------------------------------------------------- */ + + int get_cluster_id(PoolObjectSQL * object) + { + return static_cast(object)->get_cluster_id(); + }; + + int del_from_cluster(Cluster* cluster, int id, string& error_msg) + { + return cluster->del_vnet(id, error_msg); + }; }; /* ------------------------------------------------------------------------- */ @@ -135,12 +155,23 @@ public: }; ~HostDelete(){}; + + /* -------------------------------------------------------------------- */ + + int get_cluster_id(PoolObjectSQL * object) + { + return static_cast(object)->get_cluster_id(); + }; + + int del_from_cluster(Cluster* cluster, int id, string& error_msg) + { + return cluster->del_host(id, error_msg); + }; }; /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ - class GroupDelete: public RequestManagerDelete { public: @@ -178,6 +209,54 @@ public: int drop(int oid, PoolObjectSQL * object, string& error_msg); }; +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ + +class DatastoreDelete: public RequestManagerDelete +{ +public: + DatastoreDelete(): + RequestManagerDelete("DatastoreDelete", "Deletes a datastore") + { + Nebula& nd = Nebula::instance(); + pool = nd.get_dspool(); + auth_object = PoolObjectSQL::DATASTORE; + auth_op = AuthRequest::ADMIN; + }; + + ~DatastoreDelete(){}; + + /* -------------------------------------------------------------------- */ + + int get_cluster_id(PoolObjectSQL * object) + { + return static_cast(object)->get_cluster_id(); + }; + + int del_from_cluster(Cluster* cluster, int id, string& error_msg) + { + return cluster->del_datastore(id, error_msg); + }; +}; + +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ + +class ClusterDelete: public RequestManagerDelete +{ +public: + ClusterDelete(): + RequestManagerDelete("ClusterDelete", "Deletes a cluster") + { + Nebula& nd = Nebula::instance(); + pool = nd.get_clpool(); + auth_object = PoolObjectSQL::CLUSTER; + auth_op = AuthRequest::ADMIN; + }; + + ~ClusterDelete(){}; +}; + /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ diff --git a/include/RequestManagerInfo.h b/include/RequestManagerInfo.h index 6dbc8e8622..3d5498f997 100644 --- a/include/RequestManagerInfo.h +++ b/include/RequestManagerInfo.h @@ -162,7 +162,6 @@ public: /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ - class GroupInfo: public RequestManagerInfo { public: @@ -196,6 +195,42 @@ public: ~UserInfo(){}; }; +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ + +class DatastoreInfo: public RequestManagerInfo +{ +public: + DatastoreInfo(): + RequestManagerInfo("DatastoreInfo", + "Returns datastore information") + { + Nebula& nd = Nebula::instance(); + pool = nd.get_dspool(); + auth_object = PoolObjectSQL::DATASTORE; + }; + + ~DatastoreInfo(){}; +}; + +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ + +class ClusterInfo: public RequestManagerInfo +{ +public: + ClusterInfo(): + RequestManagerInfo("ClusterInfo", + "Returns cluster information") + { + Nebula& nd = Nebula::instance(); + pool = nd.get_clpool(); + auth_object = PoolObjectSQL::CLUSTER; + }; + + ~ClusterInfo(){}; +}; + /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ diff --git a/include/RequestManagerPoolInfoFilter.h b/include/RequestManagerPoolInfoFilter.h index 38932edad5..2270f7c471 100644 --- a/include/RequestManagerPoolInfoFilter.h +++ b/include/RequestManagerPoolInfoFilter.h @@ -224,6 +224,54 @@ public: xmlrpc_c::paramList const& paramList, RequestAttributes& att); }; +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ + +class DatastorePoolInfo: public RequestManagerPoolInfoFilter +{ +public: + DatastorePoolInfo(): + RequestManagerPoolInfoFilter("DatastorePoolInfo", + "Returns the datastore pool", + "A:s") + { + Nebula& nd = Nebula::instance(); + pool = nd.get_dspool(); + auth_object = PoolObjectSQL::DATASTORE; + }; + + ~DatastorePoolInfo(){}; + + /* -------------------------------------------------------------------- */ + + void request_execute( + xmlrpc_c::paramList const& paramList, RequestAttributes& att); +}; + +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ + +class ClusterPoolInfo: public RequestManagerPoolInfoFilter +{ +public: + ClusterPoolInfo(): + RequestManagerPoolInfoFilter("ClusterPoolInfo", + "Returns the cluster pool", + "A:s") + { + Nebula& nd = Nebula::instance(); + pool = nd.get_clpool(); + auth_object = PoolObjectSQL::CLUSTER; + }; + + ~ClusterPoolInfo(){}; + + /* -------------------------------------------------------------------- */ + + void request_execute( + xmlrpc_c::paramList const& paramList, RequestAttributes& att); +}; + /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ diff --git a/include/RequestManagerUpdateTemplate.h b/include/RequestManagerUpdateTemplate.h index 67b6068d0a..85b633ba85 100644 --- a/include/RequestManagerUpdateTemplate.h +++ b/include/RequestManagerUpdateTemplate.h @@ -134,6 +134,24 @@ public: ~UserUpdateTemplate(){}; }; +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ + +class DatastoreUpdateTemplate : public RequestManagerUpdateTemplate +{ +public: + DatastoreUpdateTemplate(): + RequestManagerUpdateTemplate("DatastoreUpdateTemplate", + "Updates a datastore template") + { + Nebula& nd = Nebula::instance(); + pool = nd.get_dspool(); + auth_object = PoolObjectSQL::DATASTORE; + }; + + ~DatastoreUpdateTemplate(){}; +}; + /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ diff --git a/include/RequestManagerVirtualMachine.h b/include/RequestManagerVirtualMachine.h index 8776f6e03f..6b5472807e 100644 --- a/include/RequestManagerVirtualMachine.h +++ b/include/RequestManagerVirtualMachine.h @@ -48,18 +48,20 @@ protected: virtual void request_execute(xmlrpc_c::paramList const& _paramList, RequestAttributes& att) = 0; - bool vm_authorization(int id, ImageTemplate *tmpl, - RequestAttributes& att, PoolObjectAuth* host_perms); + bool vm_authorization(int id, + ImageTemplate * tmpl, + RequestAttributes& att, + PoolObjectAuth * host_perms, + PoolObjectAuth * ds_perm); int get_host_information(int hid, string& name, string& vmm, string& vnm, - string& tm, RequestAttributes& att, PoolObjectAuth& host_perms); + RequestAttributes& att, PoolObjectAuth& host_perms); int add_history(VirtualMachine * vm, int hid, const string& hostname, const string& vmm_mad, const string& vnm_mad, - const string& tm_mad, RequestAttributes& att); VirtualMachine * get_vm(int id, RequestAttributes& att); diff --git a/include/TransferManager.h b/include/TransferManager.h index 54daf0f824..6977ad76fb 100644 --- a/include/TransferManager.h +++ b/include/TransferManager.h @@ -115,6 +115,11 @@ private: */ ActionManager am; + /** + * Generic name for the TransferManager driver + */ + static const char * transfer_driver_name; + /** * Returns a pointer to a Transfer Manager driver. * @param name of an attribute of the driver (e.g. its type) @@ -145,6 +150,18 @@ private: (MadManager::get(0,_name,name)); }; + /** + * Returns a pointer to a Transfer Manager driver. The driver is + * searched by its name. + * @return the TM driver for the Transfer Manager + */ + const TransferManagerDriver * get() + { + string _name("NAME"); + return static_cast + (MadManager::get(0,_name,transfer_driver_name)); + }; + /** * Function to execute the Manager action loop method within a new pthread * (requires C linkage) diff --git a/include/User.h b/include/User.h index befac1f21b..770b5943bc 100644 --- a/include/User.h +++ b/include/User.h @@ -162,7 +162,7 @@ public: /** * Factory method for image templates */ - Template * get_new_template() + Template * get_new_template() const { return new UserTemplate; } diff --git a/include/UserPool.h b/include/UserPool.h index 3926c71613..afe2e54e36 100644 --- a/include/UserPool.h +++ b/include/UserPool.h @@ -176,6 +176,16 @@ public: */ static const char * SERVER_NAME; + /** + * Name of the oneadmin user + */ + static string oneadmin_name; + + /** + * Identifier for the oneadmin user + */ + static const int ONEADMIN_ID; + private: //-------------------------------------------------------------------------- // Configuration Attributes for Users diff --git a/include/VMTemplate.h b/include/VMTemplate.h index 610f7e43a5..b48f7ddb75 100644 --- a/include/VMTemplate.h +++ b/include/VMTemplate.h @@ -44,7 +44,7 @@ public: /** * Factory method for virtual machine templates */ - Template * get_new_template() + Template * get_new_template() const { return new VirtualMachineTemplate; } diff --git a/include/VirtualMachine.h b/include/VirtualMachine.h index 8df9a9bf9a..9254ec2b4c 100644 --- a/include/VirtualMachine.h +++ b/include/VirtualMachine.h @@ -206,6 +206,24 @@ public: etime = et; }; + // ------------------------------------------------------------------------ + // Access to VM locations + // ------------------------------------------------------------------------ + /** + * Returns the remote VM directory. The VM remote dir is in the form: + * $DATASTORE_LOCATION/$SYSTEM_DS_ID/$VM_ID. The remote system_dir stores + * disks for a running VM in the target host. + * @return the remote system directory for the VM + */ + string get_remote_system_dir() const; + + /** + * Returns the local VM directory. The VM local dir is in the form: + * $SYSTEM_DS_BASE_PATH/$VM_ID. Temporary stores VM disks. + * @return the system directory for the VM + */ + string get_system_dir() const; + // ------------------------------------------------------------------------ // History // ------------------------------------------------------------------------ @@ -215,10 +233,8 @@ public: void add_history( int hid, const string& hostname, - const string& vm_dir, const string& vmm_mad, - const string& vnm_mad, - const string& tm_mad); + const string& vnm_mad); /** * Duplicates the last history record. Only the host related fields are @@ -294,26 +310,6 @@ public: return previous_history->vnm_mad_name; }; - /** - * Returns the TM driver name for the current host. The hasHistory() - * function MUST be called before this one. - * @return the TM mad name - */ - const string & get_tm_mad() const - { - return history->tm_mad_name; - }; - - /** - * Returns the TM driver name for the previous host. The - * hasPreviousHistory() function MUST be called before this one. - * @return the TM mad name - */ - const string & get_previous_tm_mad() const - { - return previous_history->tm_mad_name; - }; - /** * Returns the transfer filename. The transfer file is in the form: * $ONE_LOCATION/var/$VM_ID/transfer.$SEQ @@ -376,30 +372,6 @@ public: return history->checkpoint_file; }; - /** - * Returns the remote VM directory. The VM remote dir is in the form: - * $VM_DIR/$VM_ID/ - * or, in case that OpenNebula is installed in root - * /var/lib/one/$VM_ID/ - * The hasHistory() function MUST be called before this one. - * @return the remote directory - */ - const string & get_remote_dir() const - { - return history->vm_rhome; - }; - - /** - * Returns the local VM directory. The VM local dir is in the form: - * $ONE_LOCATION/var/$VM_ID/ - * The hasHistory() function MUST be called before this one. - * @return the remote directory - */ - const string & get_local_dir() const - { - return history->vm_lhome; - }; - /** * Returns the hostname for the current host. The hasHistory() * function MUST be called before this one. @@ -570,7 +542,7 @@ public: /** * Factory method for virtual machine templates */ - Template * get_new_template() + Template * get_new_template() const { return new VirtualMachineTemplate; } @@ -681,15 +653,27 @@ public: int generate_context(string &files); // ------------------------------------------------------------------------ - // Image repository related functions + // Datastore related functions // ------------------------------------------------------------------------ /** * Set the SAVE_AS attribute for the "disk_id"th disk. * @param disk_id Index of the disk to save - * @param img_id ID of the image this disk will be saved to. + * @param source to save the disk (SAVE_AS_SOURCE) + * @param img_id ID of the image this disk will be saved to (SAVE_AS). * @return 0 if success */ - int save_disk(int disk_id, int img_id, string& error_str); + int save_disk(const string& disk_id, + const string& source, + int img_id); + + /** + * Get the original image id of the disk. It also checks that the disk can + * be saved_as. + * @param disk_id Index of the disk to save + * @param error_str describes the error + * @return -1 if failure + */ + int get_image_from_disk(int disk_id, string& error_str); // ------------------------------------------------------------------------ // Authorization related functions @@ -781,14 +765,13 @@ private: */ History * previous_history; - /** * Complete set of history records for the VM */ vector history_records; // ------------------------------------------------------------------------- - // Logging + // Logging & Dirs // ------------------------------------------------------------------------- /** @@ -797,7 +780,7 @@ private: * or, in case that OpenNebula is installed in root * /var/log/one/$VM_ID.log */ - FileLog * _log; + FileLog * _log; // ************************************************************************* // DataBase implementation (Private) @@ -894,6 +877,14 @@ private: */ int parse_requirements(string& error_str); + /** + * Adds automatic placement requirements: Datastore and Cluster + * + * @param error_str Returns the error reason, if any + * @return 0 on success + */ + int automatic_requirements(string& error_str); + /** * Parse the "GRAPHICS" attribute and generates a default PORT if not * defined diff --git a/include/VirtualNetwork.h b/include/VirtualNetwork.h index 6fa7df2024..069a7449a6 100644 --- a/include/VirtualNetwork.h +++ b/include/VirtualNetwork.h @@ -21,6 +21,7 @@ #include "PoolSQL.h" #include "Leases.h" #include "VirtualNetworkTemplate.h" +#include "Clusterable.h" #include #include @@ -39,7 +40,7 @@ using namespace std; * leases. One lease is formed by one IP and one MAC address. * MAC address are derived from IP addresses. */ -class VirtualNetwork : public PoolObjectSQL +class VirtualNetwork : public PoolObjectSQL, public Clusterable { public: @@ -61,7 +62,7 @@ public: /** * Factory method for virtual network templates */ - Template * get_new_template() + Template * get_new_template() const { return new VirtualNetworkTemplate; } @@ -287,6 +288,8 @@ private: int gid, const string& _uname, const string& _gname, + int _cluster_id, + const string& _cluster_name, VirtualNetworkTemplate * _vn_template = 0); ~VirtualNetwork(); diff --git a/include/VirtualNetworkPool.h b/include/VirtualNetworkPool.h index d1256533e3..9c0446c018 100644 --- a/include/VirtualNetworkPool.h +++ b/include/VirtualNetworkPool.h @@ -45,6 +45,8 @@ public: * @param gid the id of the group this object is assigned to * @param vn_template a VirtualNetworkTemplate describing the VNET * @param oid the id assigned to the VM (output) + * @param cluster_id the id of the cluster this VNET will belong to + * @param cluster_name the name of the cluster this VNET will belong to * @param error_str Returns the error reason, if any * @return oid on success, -1 error */ @@ -55,6 +57,8 @@ public: const string& gname, VirtualNetworkTemplate * vn_template, int * oid, + int cluster_id, + const string& cluster_name, string& error_str); /** @@ -92,11 +96,12 @@ public: * metadata * @param nic the nic attribute to be generated * @param vid of the VM requesting the lease + * @param error_str string describing the error * @return 0 on success, * -1 error, * -2 not using the pool */ - int nic_attribute(VectorAttribute * nic, int uid, int vid); + int nic_attribute(VectorAttribute * nic, int uid, int vid, string& error_str); /** * Generates an Authorization token for a NIC attribute @@ -162,7 +167,7 @@ private: */ PoolObjectSQL * create() { - return new VirtualNetwork(-1,-1,"","",0); + return new VirtualNetwork(-1,-1,"","",-1,"",0); }; /** @@ -171,12 +176,13 @@ private: */ VirtualNetwork * get_nic_by_name(VectorAttribute * nic, const string& name, - int _uid); + int _uidi, + string& error); /** * Function to get a VirtualNetwork by its id, as provided by a VM template */ - VirtualNetwork * get_nic_by_id(const string& id_s); + VirtualNetwork * get_nic_by_id(const string& id_s, string& error); }; #endif /*VIRTUAL_NETWORK_POOL_H_*/ diff --git a/include/test/NebulaTest.h b/include/test/NebulaTest.h index 875459932d..bd73d4f201 100644 --- a/include/test/NebulaTest.h +++ b/include/test/NebulaTest.h @@ -26,6 +26,8 @@ #include "HostPool.h" #include "UserPool.h" #include "VMTemplatePool.h" +#include "DatastorePool.h" +#include "ClusterPool.h" #include "VirtualMachineManager.h" #include "LifeCycleManager.h" @@ -45,7 +47,8 @@ protected: NebulaTest():mysql(false), need_host_pool(false), need_vm_pool(false), need_vnet_pool(false), need_image_pool(false), need_user_pool(false), need_template_pool(false), - need_group_pool(false), + need_group_pool(false), need_datastore_pool(false), + need_cluster_pool(false), need_vmm(false), need_im(false), need_tm(false), need_lcm(false), need_dm(false), @@ -67,6 +70,8 @@ public: bool need_user_pool; bool need_template_pool; bool need_group_pool; + bool need_datastore_pool; + bool need_cluster_pool; bool need_vmm; bool need_im; @@ -107,6 +112,10 @@ public: virtual GroupPool* create_gpool(SqlDB* db); + virtual DatastorePool* create_dspool(SqlDB* db); + + virtual ClusterPool* create_clpool(SqlDB* db); + // ------------------------------------------------------------------------ // Managers // ------------------------------------------------------------------------ diff --git a/install.sh b/install.sh index 74b1d6bdc8..69a3fc669e 100755 --- a/install.sh +++ b/install.sh @@ -99,7 +99,8 @@ if [ -z "$ROOT" ] ; then VAR_LOCATION="/var/lib/one" SUNSTONE_LOCATION="$LIB_LOCATION/sunstone" OZONES_LOCATION="$LIB_LOCATION/ozones" - IMAGES_LOCATION="$VAR_LOCATION/images" + SYSTEM_DS_LOCATION="$VAR_LOCATION/datastores/0" + DEFAULT_DS_LOCATION="$VAR_LOCATION/datastores/1" RUN_LOCATION="/var/run/one" LOCK_LOCATION="/var/lock/one" INCLUDE_LOCATION="/usr/include" @@ -130,7 +131,7 @@ if [ -z "$ROOT" ] ; then MAKE_DIRS="$BIN_LOCATION $LIB_LOCATION $ETC_LOCATION $VAR_LOCATION \ $INCLUDE_LOCATION $SHARE_LOCATION \ $LOG_LOCATION $RUN_LOCATION $LOCK_LOCATION \ - $IMAGES_LOCATION $MAN_LOCATION" + $SYSTEM_DS_LOCATION $DEFAULT_DS_LOCATION $MAN_LOCATION" DELETE_DIRS="$LIB_LOCATION $ETC_LOCATION $LOG_LOCATION $VAR_LOCATION \ $RUN_LOCATION $SHARE_DIRS" @@ -145,7 +146,8 @@ else VAR_LOCATION="$ROOT/var" SUNSTONE_LOCATION="$LIB_LOCATION/sunstone" OZONES_LOCATION="$LIB_LOCATION/ozones" - IMAGES_LOCATION="$VAR_LOCATION/images" + SYSTEM_DS_LOCATION="$VAR_LOCATION/datastores/0" + DEFAULT_DS_LOCATION="$VAR_LOCATION/datastores/1" INCLUDE_LOCATION="$ROOT/include" SHARE_LOCATION="$ROOT/share" MAN_LOCATION="$ROOT/share/man/man1" @@ -166,8 +168,8 @@ else DELETE_DIRS="$MAKE_DIRS" else MAKE_DIRS="$BIN_LOCATION $LIB_LOCATION $ETC_LOCATION $VAR_LOCATION \ - $INCLUDE_LOCATION $SHARE_LOCATION $IMAGES_LOCATION \ - $MAN_LOCATION $OZONES_LOCATION" + $INCLUDE_LOCATION $SHARE_LOCATION $SYSTEM_DS_LOCATION \ + $DEFAULT_DS_LOCATION $MAN_LOCATION $OZONES_LOCATION" DELETE_DIRS="$MAKE_DIRS" @@ -180,15 +182,11 @@ fi SHARE_DIRS="$SHARE_LOCATION/examples \ $SHARE_LOCATION/examples/tm" -ETC_DIRS="$ETC_LOCATION/image \ +ETC_DIRS="$ETC_LOCATION/datastore \ $ETC_LOCATION/im_ec2 \ $ETC_LOCATION/vmm_ec2 \ $ETC_LOCATION/vmm_exec \ - $ETC_LOCATION/tm_shared \ - $ETC_LOCATION/tm_ssh \ - $ETC_LOCATION/tm_dummy \ - $ETC_LOCATION/tm_vmware \ - $ETC_LOCATION/tm_lvm \ + $ETC_LOCATION/tm \ $ETC_LOCATION/hm \ $ETC_LOCATION/auth \ $ETC_LOCATION/auth/certificates \ @@ -205,12 +203,6 @@ LIB_DIRS="$LIB_LOCATION/ruby \ $LIB_LOCATION/ruby/cloud/occi \ $LIB_LOCATION/ruby/cloud/CloudAuth \ $LIB_LOCATION/ruby/onedb \ - $LIB_LOCATION/tm_commands \ - $LIB_LOCATION/tm_commands/shared \ - $LIB_LOCATION/tm_commands/ssh \ - $LIB_LOCATION/tm_commands/dummy \ - $LIB_LOCATION/tm_commands/lvm \ - $LIB_LOCATION/tm_commands/vmware \ $LIB_LOCATION/mads \ $LIB_LOCATION/sh \ $LIB_LOCATION/ruby/cli \ @@ -223,7 +215,10 @@ VAR_DIRS="$VAR_LOCATION/remotes \ $VAR_LOCATION/remotes/im/xen.d \ $VAR_LOCATION/remotes/im/vmware.d \ $VAR_LOCATION/remotes/im/ganglia.d \ + $VAR_LOCATION/remotes/vmm \ $VAR_LOCATION/remotes/vmm/kvm \ + $VAR_LOCATION/remotes/vmm/xen \ + $VAR_LOCATION/remotes/vmm/vmware \ $VAR_LOCATION/remotes/vnm \ $VAR_LOCATION/remotes/vnm/802.1Q \ $VAR_LOCATION/remotes/vnm/dummy \ @@ -231,12 +226,20 @@ VAR_DIRS="$VAR_LOCATION/remotes \ $VAR_LOCATION/remotes/vnm/fw \ $VAR_LOCATION/remotes/vnm/ovswitch \ $VAR_LOCATION/remotes/vnm/vmware \ - $VAR_LOCATION/remotes/vmm/xen \ - $VAR_LOCATION/remotes/vmm/vmware \ + $VAR_LOCATION/remotes/tm/ \ + $VAR_LOCATION/remotes/tm/dummy \ + $VAR_LOCATION/remotes/tm/lvm \ + $VAR_LOCATION/remotes/tm/shared \ + $VAR_LOCATION/remotes/tm/ssh \ + $VAR_LOCATION/remotes/tm/vmware \ + $VAR_LOCATION/remotes/tm/iscsi \ $VAR_LOCATION/remotes/hooks \ $VAR_LOCATION/remotes/hooks/ft \ - $VAR_LOCATION/remotes/image \ - $VAR_LOCATION/remotes/image/fs \ + $VAR_LOCATION/remotes/datastore \ + $VAR_LOCATION/remotes/datastore/dummy \ + $VAR_LOCATION/remotes/datastore/fs \ + $VAR_LOCATION/remotes/datastore/vmware \ + $VAR_LOCATION/remotes/datastore/iscsi \ $VAR_LOCATION/remotes/auth \ $VAR_LOCATION/remotes/auth/plain \ $VAR_LOCATION/remotes/auth/ssh \ @@ -381,12 +384,18 @@ INSTALL_FILES=( VMM_EXEC_KVM_SCRIPTS:$VAR_LOCATION/remotes/vmm/kvm VMM_EXEC_XEN_SCRIPTS:$VAR_LOCATION/remotes/vmm/xen VMM_EXEC_VMWARE_SCRIPTS:$VAR_LOCATION/remotes/vmm/vmware - SHARED_TM_COMMANDS_LIB_FILES:$LIB_LOCATION/tm_commands/shared - SSH_TM_COMMANDS_LIB_FILES:$LIB_LOCATION/tm_commands/ssh - VMWARE_TM_COMMANDS_LIB_FILES:$LIB_LOCATION/tm_commands/vmware - DUMMY_TM_COMMANDS_LIB_FILES:$LIB_LOCATION/tm_commands/dummy - LVM_TM_COMMANDS_LIB_FILES:$LIB_LOCATION/tm_commands/lvm - IMAGE_DRIVER_FS_SCRIPTS:$VAR_LOCATION/remotes/image/fs + TM_FILES:$VAR_LOCATION/remotes/tm + TM_SHARED_FILES:$VAR_LOCATION/remotes/tm/shared + TM_SSH_FILES:$VAR_LOCATION/remotes/tm/ssh + TM_VMWARE_FILES:$VAR_LOCATION/remotes/tm/vmware + TM_ISCSI_FILES:$VAR_LOCATION/remotes/tm/iscsi + TM_DUMMY_FILES:$VAR_LOCATION/remotes/tm/dummy + TM_LVM_FILES:$VAR_LOCATION/remotes/tm/lvm + DATASTORE_DRIVER_COMMON_SCRIPTS:$VAR_LOCATION/remotes/datastore/ + DATASTORE_DRIVER_DUMMY_SCRIPTS:$VAR_LOCATION/remotes/datastore/dummy + DATASTORE_DRIVER_FS_SCRIPTS:$VAR_LOCATION/remotes/datastore/fs + DATASTORE_DRIVER_VMWARE_SCRIPTS:$VAR_LOCATION/remotes/datastore/vmware + DATASTORE_DRIVER_ISCSI_SCRIPTS:$VAR_LOCATION/remotes/datastore/iscsi NETWORK_FILES:$VAR_LOCATION/remotes/vnm NETWORK_8021Q_FILES:$VAR_LOCATION/remotes/vnm/802.1Q NETWORK_DUMMY_FILES:$VAR_LOCATION/remotes/vnm/dummy @@ -526,13 +535,8 @@ INSTALL_ETC_FILES=( VMWARE_ETC_FILES:$ETC_LOCATION VMM_EC2_ETC_FILES:$ETC_LOCATION/vmm_ec2 VMM_EXEC_ETC_FILES:$ETC_LOCATION/vmm_exec - IMAGE_DRIVER_FS_ETC_FILES:$ETC_LOCATION/image/ IM_EC2_ETC_FILES:$ETC_LOCATION/im_ec2 - TM_SHARED_ETC_FILES:$ETC_LOCATION/tm_shared - TM_SSH_ETC_FILES:$ETC_LOCATION/tm_ssh - TM_DUMMY_ETC_FILES:$ETC_LOCATION/tm_dummy - TM_LVM_ETC_FILES:$ETC_LOCATION/tm_lvm - TM_VMWARE_ETC_FILES:$ETC_LOCATION/tm_vmware + TM_LVM_ETC_FILES:$ETC_LOCATION/tm/ HM_ETC_FILES:$ETC_LOCATION/hm AUTH_ETC_FILES:$ETC_LOCATION/auth ECO_ETC_FILES:$ETC_LOCATION @@ -558,6 +562,8 @@ BIN_FILES="src/nebula/oned \ src/cli/onegroup \ src/cli/onetemplate \ src/cli/oneacl \ + src/cli/onedatastore \ + src/cli/onecluster \ src/onedb/onedb \ src/authm_mad/remotes/quota/onequota \ src/mad/utils/tty_expect \ @@ -585,7 +591,6 @@ RUBY_LIB_FILES="src/mad/ruby/ActionManager.rb \ src/vnm_mad/one_vnm.rb \ src/mad/ruby/Ganglia.rb \ src/oca/ruby/OpenNebula.rb \ - src/tm_mad/TMScript.rb \ src/authm_mad/remotes/ssh/ssh_auth.rb \ src/authm_mad/remotes/quota/quota.rb \ src/authm_mad/remotes/server_x509/server_x509_auth.rb \ @@ -606,7 +611,6 @@ MAD_RUBY_LIB_FILES="src/mad/ruby/scripts_common.rb" #------------------------------------------------------------------------------- MADS_LIB_FILES="src/mad/sh/madcommon.sh \ - src/tm_mad/tm_common.sh \ src/vmm_mad/exec/one_vmm_exec.rb \ src/vmm_mad/exec/one_vmm_exec \ src/vmm_mad/exec/one_vmm_sh \ @@ -629,8 +633,8 @@ MADS_LIB_FILES="src/mad/sh/madcommon.sh \ src/hm_mad/one_hm \ src/authm_mad/one_auth_mad.rb \ src/authm_mad/one_auth_mad \ - src/image_mad/one_image.rb \ - src/image_mad/one_image" + src/datastore_mad/one_datastore.rb \ + src/datastore_mad/one_datastore" #------------------------------------------------------------------------------- # VMM SH Driver KVM scripts, to be installed under $REMOTES_LOCATION/vmm/kvm @@ -754,60 +758,87 @@ NETWORK_VMWARE_FILES="src/vnm_mad/remotes/vmware/clean \ src/vnm_mad/remotes/vmware/pre \ src/vnm_mad/remotes/vmware/VMware.rb" - #------------------------------------------------------------------------------- # Transfer Manager commands, to be installed under $LIB_LOCATION/tm_commands -# - SHARED TM, $LIB_LOCATION/tm_commands/shared -# - SSH TM, $LIB_LOCATION/tm_commands/ssh -# - dummy TM, $LIB_LOCATION/tm_commands/dummy -# - LVM TM, $LIB_LOCATION/tm_commands/lvm +# - SHARED TM, $VAR_LOCATION/tm/shared +# - SSH TM, $VAR_LOCATION/tm/ssh +# - dummy TM, $VAR_LOCATION/tm/dummy +# - LVM TM, $VAR_LOCATION/tm/lvm #------------------------------------------------------------------------------- -SHARED_TM_COMMANDS_LIB_FILES="src/tm_mad/shared/tm_clone.sh \ - src/tm_mad/shared/tm_delete.sh \ - src/tm_mad/shared/tm_ln.sh \ - src/tm_mad/shared/tm_mkswap.sh \ - src/tm_mad/shared/tm_mkimage.sh \ - src/tm_mad/shared/tm_mv.sh \ - src/tm_mad/shared/tm_context.sh" +TM_FILES="src/tm_mad/tm_common.sh" -SSH_TM_COMMANDS_LIB_FILES="src/tm_mad/ssh/tm_clone.sh \ - src/tm_mad/ssh/tm_delete.sh \ - src/tm_mad/ssh/tm_ln.sh \ - src/tm_mad/ssh/tm_mkswap.sh \ - src/tm_mad/ssh/tm_mkimage.sh \ - src/tm_mad/ssh/tm_mv.sh \ - src/tm_mad/ssh/tm_context.sh" +TM_SHARED_FILES="src/tm_mad/shared/clone \ + src/tm_mad/shared/delete \ + src/tm_mad/shared/ln \ + src/tm_mad/shared/mkswap \ + src/tm_mad/shared/mkimage \ + src/tm_mad/shared/mv \ + src/tm_mad/shared/context \ + src/tm_mad/shared/mvds" -DUMMY_TM_COMMANDS_LIB_FILES="src/tm_mad/dummy/tm_dummy.sh" +TM_SSH_FILES="src/tm_mad/ssh/clone \ + src/tm_mad/ssh/delete \ + src/tm_mad/ssh/ln \ + src/tm_mad/ssh/mkswap \ + src/tm_mad/ssh/mkimage \ + src/tm_mad/ssh/mv \ + src/tm_mad/ssh/context \ + src/tm_mad/ssh/mvds" -LVM_TM_COMMANDS_LIB_FILES="src/tm_mad/lvm/tm_clone.sh \ - src/tm_mad/lvm/tm_delete.sh \ - src/tm_mad/lvm/tm_ln.sh \ - src/tm_mad/lvm/tm_mkswap.sh \ - src/tm_mad/lvm/tm_mkimage.sh \ - src/tm_mad/lvm/tm_mv.sh \ - src/tm_mad/lvm/tm_context.sh" +TM_DUMMY_FILES="src/tm_mad/dummy/clone \ + src/tm_mad/dummy/delete \ + src/tm_mad/dummy/ln \ + src/tm_mad/dummy/mkswap \ + src/tm_mad/dummy/mkimage \ + src/tm_mad/dummy/mv \ + src/tm_mad/dummy/context \ + src/tm_mad/dummy/mvds" -VMWARE_TM_COMMANDS_LIB_FILES="src/tm_mad/vmware/tm_clone.sh \ - src/tm_mad/vmware/tm_ln.sh \ - src/tm_mad/vmware/tm_mv.sh \ - src/tm_mad/vmware/functions.sh \ - src/tm_mad/vmware/tm_context.sh" +TM_LVM_FILES="src/tm_mad/lvm/clone \ + src/tm_mad/lvm/delete \ + src/tm_mad/lvm/ln \ + src/tm_mad/lvm/mkswap \ + src/tm_mad/lvm/mkimage \ + src/tm_mad/lvm/mv \ + src/tm_mad/lvm/context" +TM_VMWARE_FILES="src/tm_mad/vmware/clone \ + src/tm_mad/vmware/ln \ + src/tm_mad/vmware/mv \ + src/tm_mad/vmware/functions.sh \ + src/tm_mad/vmware/context" + +TM_ISCSI_FILES="src/tm_mad/iscsi/clone \ + src/tm_mad/iscsi/ln \ + src/tm_mad/iscsi/mv \ + src/tm_mad/iscsi/mvds \ + src/tm_mad/iscsi/delete" #------------------------------------------------------------------------------- -# Image Repository drivers, to be installed under $REMOTES_LOCATION/image -# - FS based Image Repository, $REMOTES_LOCATION/image/fs +# Datastore drivers, to be installed under $REMOTES_LOCATION/datastore +# - FS based Image Repository, $REMOTES_LOCATION/datastore/fs +# - VMware based Image Repository, $REMOTES_LOCATION/datastore/vmware #------------------------------------------------------------------------------- -IMAGE_DRIVER_FS_ETC_FILES="src/image_mad/remotes/fs/fs.conf" +DATASTORE_DRIVER_COMMON_SCRIPTS="src/datastore_mad/remotes/xpath.rb \ + src/datastore_mad/remotes/libfs.sh" -IMAGE_DRIVER_FS_SCRIPTS="src/image_mad/remotes/fs/cp \ - src/image_mad/remotes/fs/mkfs \ - src/image_mad/remotes/fs/mv \ - src/image_mad/remotes/fs/fsrc \ - src/image_mad/remotes/fs/rm" +DATASTORE_DRIVER_DUMMY_SCRIPTS="src/datastore_mad/remotes/dummy/cp \ + src/datastore_mad/remotes/dummy/mkfs \ + src/datastore_mad/remotes/dummy/rm" +DATASTORE_DRIVER_FS_SCRIPTS="src/datastore_mad/remotes/fs/cp \ + src/datastore_mad/remotes/fs/mkfs \ + src/datastore_mad/remotes/fs/rm" + +DATASTORE_DRIVER_VMWARE_SCRIPTS="src/datastore_mad/remotes/vmware/cp \ + src/datastore_mad/remotes/vmware/mkfs \ + src/datastore_mad/remotes/vmware/rm" + +DATASTORE_DRIVER_ISCSI_SCRIPTS="src/datastore_mad/remotes/iscsi/cp \ + src/datastore_mad/remotes/iscsi/mkfs \ + src/datastore_mad/remotes/iscsi/rm \ + src/datastore_mad/remotes/iscsi/iscsi.conf" #------------------------------------------------------------------------------- # Migration scripts for onedb command, to be installed under $LIB_LOCATION @@ -821,6 +852,7 @@ ONEDB_MIGRATOR_FILES="src/onedb/2.0_to_2.9.80.rb \ src/onedb/3.1.80_to_3.2.0.rb \ src/onedb/3.2.0_to_3.2.1.rb \ src/onedb/3.2.1_to_3.3.0.rb \ + src/onedb/3.3.0_to_3.3.80.rb \ src/onedb/onedb.rb \ src/onedb/onedb_backend.rb" @@ -865,19 +897,7 @@ IM_EC2_ETC_FILES="src/im_mad/ec2/im_ec2rc \ # - lvm, $ETC_LOCATION/tm_lvm #------------------------------------------------------------------------------- -TM_SHARED_ETC_FILES="src/tm_mad/shared/tm_shared.conf \ - src/tm_mad/shared/tm_sharedrc" - -TM_SSH_ETC_FILES="src/tm_mad/ssh/tm_ssh.conf \ - src/tm_mad/ssh/tm_sshrc" - -TM_DUMMY_ETC_FILES="src/tm_mad/dummy/tm_dummy.conf \ - src/tm_mad/dummy/tm_dummyrc" - -TM_LVM_ETC_FILES="src/tm_mad/lvm/tm_lvm.conf \ - src/tm_mad/lvm/tm_lvmrc" - -TM_VMWARE_ETC_FILES="src/tm_mad/vmware/tm_vmware.conf" +TM_LVM_ETC_FILES="src/tm_mad/lvm/lvm.conf" #------------------------------------------------------------------------------- # Hook Manager driver config. files, to be installed under $ETC_LOCATION/hm @@ -948,6 +968,10 @@ RUBY_OPENNEBULA_LIB_FILES="src/oca/ruby/OpenNebula/Host.rb \ src/oca/ruby/OpenNebula/GroupPool.rb \ src/oca/ruby/OpenNebula/Acl.rb \ src/oca/ruby/OpenNebula/AclPool.rb \ + src/oca/ruby/OpenNebula/Datastore.rb \ + src/oca/ruby/OpenNebula/DatastorePool.rb \ + src/oca/ruby/OpenNebula/Cluster.rb \ + src/oca/ruby/OpenNebula/ClusterPool.rb \ src/oca/ruby/OpenNebula/XMLUtils.rb" #------------------------------------------------------------------------------- @@ -1053,7 +1077,9 @@ ONE_CLI_LIB_FILES="src/cli/one_helper/onegroup_helper.rb \ src/cli/one_helper/oneuser_helper.rb \ src/cli/one_helper/onevm_helper.rb \ src/cli/one_helper/onevnet_helper.rb \ - src/cli/one_helper/oneacl_helper.rb" + src/cli/one_helper/oneacl_helper.rb \ + src/cli/one_helper/onedatastore_helper.rb \ + src/cli/one_helper/onecluster_helper.rb" CLI_BIN_FILES="src/cli/onevm \ src/cli/onehost \ @@ -1062,7 +1088,9 @@ CLI_BIN_FILES="src/cli/onevm \ src/cli/oneimage \ src/cli/onetemplate \ src/cli/onegroup \ - src/cli/oneacl" + src/cli/oneacl \ + src/cli/onedatastore \ + src/cli/onecluster" CLI_CONF_FILES="src/cli/etc/onegroup.yaml \ src/cli/etc/onehost.yaml \ @@ -1071,7 +1099,9 @@ CLI_CONF_FILES="src/cli/etc/onegroup.yaml \ src/cli/etc/oneuser.yaml \ src/cli/etc/onevm.yaml \ src/cli/etc/onevnet.yaml \ - src/cli/etc/oneacl.yaml" + src/cli/etc/oneacl.yaml \ + src/cli/etc/onedatastore.yaml \ + src/cli/etc/onecluster.yaml" ETC_CLIENT_FILES="src/cli/etc/group.default" @@ -1100,6 +1130,8 @@ SUNSTONE_MODELS_JSON_FILES="src/sunstone/models/OpenNebulaJSON/HostJSON.rb \ src/sunstone/models/OpenNebulaJSON/VirtualMachineJSON.rb \ src/sunstone/models/OpenNebulaJSON/TemplateJSON.rb \ src/sunstone/models/OpenNebulaJSON/AclJSON.rb \ + src/sunstone/models/OpenNebulaJSON/ClusterJSON.rb \ + src/sunstone/models/OpenNebulaJSON/DatastoreJSON.rb \ src/sunstone/models/OpenNebulaJSON/VirtualNetworkJSON.rb" SUNSTONE_TEMPLATE_FILES="src/sunstone/templates/login.html \ @@ -1118,6 +1150,11 @@ SUNSTONE_PUBLIC_JS_PLUGINS_FILES="\ src/sunstone/public/js/plugins/dashboard-tab.js \ src/sunstone/public/js/plugins/dashboard-users-tab.js \ src/sunstone/public/js/plugins/hosts-tab.js \ + src/sunstone/public/js/plugins/clusters-tab.js \ + src/sunstone/public/js/plugins/datastores-tab.js \ + src/sunstone/public/js/plugins/system-tab.js \ + src/sunstone/public/js/plugins/vresources-tab.js \ + src/sunstone/public/js/plugins/infra-tab.js \ src/sunstone/public/js/plugins/groups-tab.js \ src/sunstone/public/js/plugins/images-tab.js \ src/sunstone/public/js/plugins/templates-tab.js \ @@ -1404,6 +1441,8 @@ MAN_FILES="share/man/oneauth.1.gz \ share/man/onetemplate.1.gz \ share/man/onegroup.1.gz \ share/man/onedb.1.gz \ + share/man/onedatastore.1.gz \ + share/man/onecluster.1.gz \ share/man/econe-describe-images.1.gz \ share/man/econe-describe-instances.1.gz \ share/man/econe-register.1.gz \ @@ -1430,7 +1469,7 @@ if [ "$UNINSTALL" = "no" ] ; then done # Remove old migrators - rm $LIB_LOCATION/ruby/onedb/*.rb + rm $LIB_LOCATION/ruby/onedb/*.rb &> /dev/null fi # --- Install/Uninstall files --- @@ -1504,12 +1543,6 @@ if [ "$UNINSTALL" = "no" ] ; then for d in $CHOWN_DIRS; do chown -R $ONEADMIN_USER:$ONEADMIN_GROUP $DESTDIR$d done - - # --- Set correct permissions for Image Repository --- - - if [ -d "$DESTDIR$IMAGES_LOCATION" ]; then - chmod 3770 $DESTDIR$IMAGES_LOCATION - fi else for d in `echo $DELETE_DIRS | awk '{for (i=NF;i>=1;i--) printf $i" "}'`; do rmdir $d diff --git a/share/etc/oned.conf b/share/etc/oned.conf index 88fd8c0b29..ba4cb5cba7 100644 --- a/share/etc/oned.conf +++ b/share/etc/oned.conf @@ -16,11 +16,6 @@ # (use 0 to disable VM monitoring). # VM_PER_INTERVAL: Number of VMs monitored in each interval. # -# VM_DIR: Remote path to store the VM images, it should be shared between all -# the cluster nodes to perform live migrations. This variable is the default -# for all the hosts in the cluster. VM_DIR IS ONLY FOR THE NODES AND *NOT* THE -# FRONT-END -# # SCRIPTS_REMOTE_DIR: Remote path to store the monitoring and VM management # scripts. # @@ -49,7 +44,6 @@ HOST_MONITORING_INTERVAL = 600 VM_POLLING_INTERVAL = 600 #VM_PER_INTERVAL = 5 -#VM_DIR=/srv/cloud/one/var SCRIPTS_REMOTE_DIR=/var/tmp/one @@ -84,8 +78,13 @@ NETWORK_SIZE = 254 MAC_PREFIX = "02:00" #******************************************************************************* -# Image Repository Configuration +# DataStore Configuration #******************************************************************************* +# DATASTORE_LOCATION: Path for Datastores in the hosts. It IS the same for all +# the hosts in the cluster. DATASTORE_LOCATION IS ONLY FOR THE HOSTS AND *NOT* +# THE FRONT-END. It defaults to /var/lib/one/datastores (or +# $ONE_LOCATION/var/datastores in self-contained mode) +# # DEFAULT_IMAGE_TYPE: This can take values # OS Image file holding an operating system # CDROM Image file holding a CDROM @@ -97,6 +96,9 @@ MAC_PREFIX = "02:00" # xvd XEN Virtual Disk # vd KVM virtual disk #******************************************************************************* + +#DATASTORE_LOCATION = /var/lib/one/datastores + DEFAULT_IMAGE_TYPE = "OS" DEFAULT_DEVICE_PREFIX = "hd" @@ -271,75 +273,33 @@ VM_MAD = [ # executable: path of the transfer driver executable, can be an # absolute path or relative to $ONE_LOCATION/lib/mads (or # /usr/lib/one/mads/ if OpenNebula was installed in /) -# -# arguments : for the driver executable, usually a commands configuration file -# , can be an absolute path or relative to $ONE_LOCATION/etc (or -# /etc/one/ if OpenNebula was installed in /) +# arguments : +# -t: number of threads, i.e. number of transfers made at the same time +# -d: list of transfer drivers separated by commas, if not defined all the +# drivers available will be enabled #******************************************************************************* -#------------------------------------------------------------------------------- -# SHARED Transfer Manager Driver Configuration -#------------------------------------------------------------------------------- TM_MAD = [ - name = "tm_shared", executable = "one_tm", - arguments = "tm_shared/tm_shared.conf" ] -#------------------------------------------------------------------------------- - -#------------------------------------------------------------------------------- -# SSH Transfer Manager Driver Configuration -#------------------------------------------------------------------------------- -#TM_MAD = [ -# name = "tm_ssh", -# executable = "one_tm", -# arguments = "tm_ssh/tm_ssh.conf" ] -#------------------------------------------------------------------------------- - -#------------------------------------------------------------------------------- -# Dummy Transfer Manager Driver Configuration -#------------------------------------------------------------------------------- -#TM_MAD = [ -# name = "tm_dummy", -# executable = "one_tm", -# arguments = "tm_dummy/tm_dummy.conf" ] -#------------------------------------------------------------------------------- - -#------------------------------------------------------------------------------- -# LVM Transfer Manager Driver Configuration -#------------------------------------------------------------------------------- -#TM_MAD = [ -# name = "tm_lvm", -# executable = "one_tm", -# arguments = "tm_lvm/tm_lvm.conf" ] -#------------------------------------------------------------------------------- - -#------------------------------------------------------------------------------- -# VMware DataStore Transfer Manager Driver Configuration -#------------------------------------------------------------------------------- -#TM_MAD = [ -# name = "tm_vmware", -# executable = "one_tm", -# arguments = "tm_vmware/tm_vmware.conf" ] -#------------------------------------------------------------------------------- + arguments = "-t 15 -d dummy,lvm,shared,ssh,vmware,iscsi" ] #******************************************************************************* -# Image Manager Driver Configuration +# Datastore Driver Configuration #******************************************************************************* -# Drivers to manage the image repository, specialized for the storage backend +# Drivers to manage the datastores, specialized for the storage backend # executable: path of the transfer driver executable, can be an # absolute path or relative to $ONE_LOCATION/lib/mads (or # /usr/lib/one/mads/ if OpenNebula was installed in /) # # arguments : for the driver executable +# -t number of threads, i.e. number of repo operations at the same time +# -d datastore mads separated by commas #******************************************************************************* -#------------------------------------------------------------------------------- -# FS based Image Manager Driver Configuration -# -t number of threads, i.e. number of repo operations at the same time -#------------------------------------------------------------------------------- -IMAGE_MAD = [ - executable = "one_image", - arguments = "fs -t 15" ] -#------------------------------------------------------------------------------- + +DATASTORE_MAD = [ + executable = "one_datastore", + arguments = "-t 15 -d fs,vmware,iscsi" +] #******************************************************************************* # Hook Manager Configuration @@ -376,7 +336,6 @@ IMAGE_MAD = [ # allocated # - NO, The hook is executed in the OpenNebula server (default) # -# # Host Hooks (HOST_HOOK) defined by: # name : for the hook, useful to track the hook (OPTIONAL) # on : when the hook should be executed, @@ -395,10 +354,8 @@ IMAGE_MAD = [ # - YES, The hook is executed in the host # - NO, The hook is executed in the OpenNebula server (default) #------------------------------------------------------------------------------- - HM_MAD = [ executable = "one_hm" ] - #------------------------------------------------------------------------------- #******************************************************************************* diff --git a/src/acct/watch_helper.rb b/src/acct/watch_helper.rb index 218b992369..925b0b18c3 100644 --- a/src/acct/watch_helper.rb +++ b/src/acct/watch_helper.rb @@ -151,7 +151,7 @@ module WatchHelper String :name String :im_mad String :vm_mad - String :tm_mad + String :vn_mad end DB.create_table? :vm_timestamps do @@ -422,7 +422,7 @@ module WatchHelper h.name = host['NAME'] h.im_mad = host['IM_MAD'] h.vm_mad = host['VM_MAD'] - h.tm_mad = host['TM_MAD'] + h.vn_mad = host['VN_MAD'] } end diff --git a/src/acl/AclRule.cc b/src/acl/AclRule.cc index dc7381f072..e65a1071e2 100644 --- a/src/acl/AclRule.cc +++ b/src/acl/AclRule.cc @@ -166,7 +166,7 @@ bool AclRule::malformed(string& error_str) const oss << "when using the ALL bit, [resource] ID must be 0"; } - if ( (resource & 0xFF000000000LL) == 0 ) + if ( (resource & 0xFFF000000000LL) == 0 ) { if ( error ) { @@ -177,7 +177,7 @@ bool AclRule::malformed(string& error_str) const oss << "[resource] type is missing"; } - if ( (resource & 0xFFFFF00000000000LL) != 0 ) + if ( (resource & 0xFFFF000000000000LL) != 0 ) { if ( error ) { @@ -253,12 +253,14 @@ void AclRule::build_str() PoolObjectSQL::IMAGE, PoolObjectSQL::USER, PoolObjectSQL::TEMPLATE, - PoolObjectSQL::GROUP + PoolObjectSQL::GROUP, + PoolObjectSQL::DATASTORE, + PoolObjectSQL::CLUSTER }; bool prefix = false; - for ( int i = 0; i < 7; i++ ) + for ( int i = 0; i < 9; i++ ) { if ( (resource & objects[i]) != 0 ) { diff --git a/src/authm/AuthManager.cc b/src/authm/AuthManager.cc index 348782c68a..6201ba33e2 100644 --- a/src/authm/AuthManager.cc +++ b/src/authm/AuthManager.cc @@ -429,7 +429,7 @@ void AuthManager::notify_request(int auth_id,bool result,const string& message) void AuthManager::load_mads(int uid) { ostringstream oss; - const VectorAttribute * vattr; + const VectorAttribute * vattr = 0; int rc; string name; AuthManagerDriver * authm_driver = 0; @@ -438,7 +438,10 @@ void AuthManager::load_mads(int uid) NebulaLog::log("AuM",Log::INFO,oss); - vattr = static_cast(mad_conf[0]); + if ( mad_conf.size() > 0 ) + { + vattr = static_cast(mad_conf[0]); + } if ( vattr == 0 ) { diff --git a/src/authm/test/SConstruct b/src/authm/test/SConstruct index 390ef37324..0e5538fcc1 100644 --- a/src/authm/test/SConstruct +++ b/src/authm/test/SConstruct @@ -22,6 +22,7 @@ env.Prepend(LIBS=[ 'nebula_im', 'nebula_hm', 'nebula_rm', + 'nebula_datastore', 'nebula_dm', 'nebula_tm', 'nebula_um', @@ -32,6 +33,7 @@ env.Prepend(LIBS=[ 'nebula_template', 'nebula_image', 'nebula_pool', + 'nebula_cluster', 'nebula_host', 'nebula_vnm', 'nebula_vm', diff --git a/src/cli/etc/oneacl.yaml b/src/cli/etc/oneacl.yaml index 4d1e7b66f9..220671a5fd 100644 --- a/src/cli/etc/oneacl.yaml +++ b/src/cli/etc/oneacl.yaml @@ -9,9 +9,9 @@ :size: 8 :right: true -:RES_VHNIUTG: +:RES_VHNIUTGDC: :desc: Which resource the rule applies to - :size: 11 + :size: 13 :RID: :desc: Resource ID @@ -26,6 +26,6 @@ :default: - :ID - :USER -- :RES_VHNIUTG +- :RES_VHNIUTGDC - :RID - :OPE_UMAC diff --git a/src/cli/etc/onecluster.yaml b/src/cli/etc/onecluster.yaml new file mode 100644 index 0000000000..f3d4949e85 --- /dev/null +++ b/src/cli/etc/onecluster.yaml @@ -0,0 +1,31 @@ +--- +:ID: + :desc: ONE identifier for the Cluster + :size: 4 + +:NAME: + :desc: Name of the Cluster + :size: 15 + :left: true + +:HOSTS: + :desc: Number of Hosts + :size: 5 + :left: true + +:NETS: + :desc: Number of Networks + :size: 5 + :left: true + +:DATASTORES: + :desc: Number of Datastores + :size: 10 + :left: true + +:default: +- :ID +- :NAME +- :HOSTS +- :NETS +- :DATASTORES diff --git a/src/cli/etc/onedatastore.yaml b/src/cli/etc/onedatastore.yaml new file mode 100644 index 0000000000..7b9ffe5237 --- /dev/null +++ b/src/cli/etc/onedatastore.yaml @@ -0,0 +1,38 @@ +--- +:ID: + :desc: ONE identifier for the Datastore + :size: 4 + +:NAME: + :desc: Name of the Datastore + :size: 15 + :left: true + +:CLUSTER: + :desc: Name of the Cluster + :size: 8 + :left: true + +:IMAGES: + :desc: Number of Images + :size: 6 + :left: true + +:TYPE: + :desc: Datastore driver + :size: 6 + :left: true + +:TM: + :desc: Transfer driver + :size: 6 + :left: true + +:default: +- :ID +- :NAME +- :CLUSTER +- :IMAGES +- :TYPE +- :TM + diff --git a/src/cli/etc/onehost.yaml b/src/cli/etc/onehost.yaml index 8a8c781f9b..dcb95c9cda 100644 --- a/src/cli/etc/onehost.yaml +++ b/src/cli/etc/onehost.yaml @@ -5,7 +5,12 @@ :NAME: :desc: Name of the Host - :size: 15 + :size: 12 + :left: true + +:CLUSTER: + :desc: Name of the Cluster + :size: 8 :left: true :RVM: @@ -38,11 +43,12 @@ :STAT: :desc: Host status - :size: 6 + :size: 4 :default: - :ID - :NAME +- :CLUSTER - :RVM - :TCPU - :FCPU diff --git a/src/cli/etc/oneimage.yaml b/src/cli/etc/oneimage.yaml index c8ddff7956..235d7e8ca9 100644 --- a/src/cli/etc/oneimage.yaml +++ b/src/cli/etc/oneimage.yaml @@ -3,11 +3,6 @@ :desc: ONE identifier for the Image :size: 4 -:NAME: - :desc: Name of the Image - :size: 12 - :left: true - :USER: :desc: Username of the Virtual Machine owner :size: 8 @@ -18,6 +13,16 @@ :size: 8 :left: true +:NAME: + :desc: Name of the Image + :size: 12 + :left: true + +:DATASTORE: + :desc: Name of the Datastore + :size: 10 + :left: true + :SIZE: :desc: Size of the Image :size: 7 @@ -47,9 +52,9 @@ - :USER - :GROUP - :NAME +- :DATASTORE - :SIZE - :TYPE -- :REGTIME - :PERSISTENT - :STAT - :RVMS diff --git a/src/cli/etc/onevnet.yaml b/src/cli/etc/onevnet.yaml index 12820657ad..36d07930ca 100644 --- a/src/cli/etc/onevnet.yaml +++ b/src/cli/etc/onevnet.yaml @@ -3,11 +3,6 @@ :desc: ONE identifier for Virtual Network :size: 4 -:NAME: - :desc: Name of the Virtual Network - :size: 15 - :left: true - :USER: :desc: Username of the Virtual Network owner :size: 8 @@ -18,6 +13,16 @@ :size: 8 :left: true +:NAME: + :desc: Name of the Virtual Network + :size: 15 + :left: true + +:CLUSTER: + :desc: Name of the Cluster + :size: 8 + :left: true + :TYPE: :desc: Type of Virtual Network :size: 6 @@ -39,6 +44,7 @@ - :USER - :GROUP - :NAME +- :CLUSTER - :TYPE - :BRIDGE - :LEASES diff --git a/src/cli/one_helper.rb b/src/cli/one_helper.rb index dcb3067ff5..50cc513fc1 100644 --- a/src/cli/one_helper.rb +++ b/src/cli/one_helper.rb @@ -178,7 +178,7 @@ EOT # Formatters for arguments ######################################################################## def to_id(name) - return 0, name if name.match(/^[0123456789]+$/) + return 0, name.to_i if name.match(/^[0123456789]+$/) rc = get_pool return rc if rc.first != 0 @@ -202,7 +202,7 @@ EOT result = names.split(',').collect { |name| if name.match(/^[0123456789]+$/) - name + name.to_i else rc = OneHelper.name_to_id(name, pool, poolname) @@ -288,11 +288,11 @@ EOT def pool_to_array(pool) if !pool.instance_of?(Hash) - phash = pool.to_hash + phash = pool.to_hash else phash = pool end - + rname = self.class.rname if phash["#{rname}_POOL"] && @@ -329,9 +329,15 @@ EOT client = OpenNebula::Client.new pool = case poolname - when "HOST" then OpenNebula::HostPool.new(client) - when "GROUP" then OpenNebula::GroupPool.new(client) - when "USER" then OpenNebula::UserPool.new(client) + when "HOST" then OpenNebula::HostPool.new(client) + when "GROUP" then OpenNebula::GroupPool.new(client) + when "USER" then OpenNebula::UserPool.new(client) + when "DATASTORE" then OpenNebula::DatastorePool.new(client) + when "CLUSTER" then OpenNebula::ClusterPool.new(client) + when "VNET" then OpenNebula::VirtualNetworkPool.new(client) + when "IMAGE" then OpenNebula::ImagePool.new(client) + when "VMTEMPLATE" then OpenNebula::TemplatePool.new(client) + when "VM" then OpenNebula::VirtualMachinePool.new(client) end rc = pool.info @@ -393,6 +399,18 @@ EOT end end + # If the cluster name is empty, returns a '-' char. + # + # @param str [String || Hash] Cluster name, or empty Hash (when ) + # @return [String] the same Cluster name, or '-' if it is empty + def OpenNebulaHelper.cluster_str(str) + if str != nil && !str.empty? + str + else + "-" + end + end + def OpenNebulaHelper.update_template(id, resource) require 'tempfile' @@ -402,7 +420,7 @@ EOT rc = resource.info if OpenNebula.is_error?(rc) - puts rc.message + puts rc.message exit -1 end diff --git a/src/cli/one_helper/oneacl_helper.rb b/src/cli/one_helper/oneacl_helper.rb index 553c6656de..fbd4030c1c 100644 --- a/src/cli/one_helper/oneacl_helper.rb +++ b/src/cli/one_helper/oneacl_helper.rb @@ -44,7 +44,7 @@ private def self.resource_mask(str) resource_type=str.split("/")[0] - mask = "-------" + mask = "---------" resource_type.split("+").each{|type| case type @@ -62,6 +62,10 @@ private mask[5] = "T" when "GROUP" mask[6] = "G" + when "DATASTORE" + mask[7] = "D" + when "CLUSTER" + mask[8] = "C" end } mask @@ -101,8 +105,8 @@ private d['STRING'].split(" ")[0] end - column :RES_VHNIUTG, "Resource to which the rule applies", - :size => 11 do |d| + column :RES_VHNIUTGDC, "Resource to which the rule applies", + :size => 13 do |d| OneAclHelper::resource_mask d['STRING'].split(" ")[1] end @@ -115,7 +119,7 @@ private OneAclHelper::right_mask d['STRING'].split(" ")[2] end - default :ID, :USER, :RES_VHNIUTG, :RID, :OPE_UMAC + default :ID, :USER, :RES_VHNIUTGDC, :RID, :OPE_UMAC end table diff --git a/src/cli/one_helper/onecluster_helper.rb b/src/cli/one_helper/onecluster_helper.rb new file mode 100644 index 0000000000..40024d5b53 --- /dev/null +++ b/src/cli/one_helper/onecluster_helper.rb @@ -0,0 +1,119 @@ +# -------------------------------------------------------------------------- # +# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +require 'one_helper' + +class OneClusterHelper < OpenNebulaHelper::OneHelper + + CLUSTER = { + :name => "cluster", + :short => "-c id|name", + :large => "--cluster id|name" , + :description => "Selects the cluster", + :format => String, + :proc => lambda { |o, options| + ch = OneClusterHelper.new + rc, cid = ch.to_id(o) + if rc == 0 + options[:cluster] = cid + else + puts cid + puts "option cluster: Parsing error" + exit -1 + end + } + } + + def self.rname + "CLUSTER" + end + + def self.conf_file + "onecluster.yaml" + end + + def format_pool(options) + config_file = self.class.table_conf + + table = CLIHelper::ShowTable.new(config_file, self) do + column :ID, "ONE identifier for the Cluster", :size=>4 do |d| + d["ID"] + end + + column :NAME, "Name of the Cluster", :left, :size=>15 do |d| + d["NAME"] + end + + column :HOSTS, "Number of Hosts", :left, :size=>5 do |d| + d["HOSTS"].size + end + + column :NETS, "Number of Networks", :left, :size=>5 do |d| + d["HOSTS"].size + end + + column :DATASTORES, "Number of Datastores", :left, :size=>10 do |d| + d["DATASTORES"].size + end + + default :ID, :NAME, :HOSTS, :NETS, :DATASTORES + end + + table + end + + private + + def factory(id=nil) + if id + OpenNebula::Cluster.new_with_id(id, @client) + else + xml=OpenNebula::Cluster.build_xml + OpenNebula::Cluster.new(xml, @client) + end + end + + def factory_pool(user_flag=-2) + OpenNebula::ClusterPool.new(@client) + end + + def format_resource(cluster) + str="%-15s: %-20s" + str_h1="%-80s" + + CLIHelper.print_header(str_h1 % "CLUSTER #{cluster['ID']} INFORMATION") + puts str % ["ID", cluster.id.to_s] + puts str % ["NAME", cluster.name] + puts + + CLIHelper.print_header("%-15s" % ["HOSTS"]) + cluster.host_ids.each do |id| + puts "%-15s" % [id] + end + + puts + CLIHelper.print_header("%-15s" % ["VNETS"]) + cluster.vnet_ids.each do |id| + puts "%-15s" % [id] + end + + puts + CLIHelper.print_header("%-15s" % ["DATASTORES"]) + cluster.datastore_ids.each do |id| + puts "%-15s" % [id] + end + end +end diff --git a/src/cli/one_helper/onedatastore_helper.rb b/src/cli/one_helper/onedatastore_helper.rb new file mode 100644 index 0000000000..5e4c4eecfb --- /dev/null +++ b/src/cli/one_helper/onedatastore_helper.rb @@ -0,0 +1,139 @@ +# -------------------------------------------------------------------------- # +# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +require 'one_helper' + +class OneDatastoreHelper < OpenNebulaHelper::OneHelper + DATASTORE = { + :name => "datastore", + :short => "-d id|name", + :large => "--datastore id|name" , + :description => "Selects the datastore", + :format => String, + :proc => lambda { |o, options| + ch = OneDatastoreHelper.new + rc, dsid = ch.to_id(o) + if rc == 0 + options[:datastore] = dsid + else + puts dsid + puts "option datastore: Parsing error" + exit -1 + end + } + } + + def self.rname + "DATASTORE" + end + + def self.conf_file + "onedatastore.yaml" + end + + def format_pool(options) + config_file = self.class.table_conf + + table = CLIHelper::ShowTable.new(config_file, self) do + column :ID, "ONE identifier for the Datastore", :size=>4 do |d| + d["ID"] + end + + column :NAME, "Name of the Datastore", :left, :size=>12 do |d| + d["NAME"] + end + + column :CLUSTER, "Name of the Cluster", :left, :size=>8 do |d| + OpenNebulaHelper.cluster_str(d["CLUSTER"]) + end + + column :IMAGES, "Number of Images", :left, :size=>6 do |d| + if d["IMAGES"]["ID"].nil? + "0" + else + d["IMAGES"]["ID"].size + end + end + + column :TYPE, "Datastore driver", :left, :size=>6 do |d| + d["DS_MAD"] + end + + column :TM, "Transfer driver", :left, :size=>6 do |d| + d["TM_MAD"] + end + + default :ID, :CLUSTER, :NAME, :IMAGES, :TYPE, :TM_MAD + end + + table + end + + private + + def factory(id=nil) + if id + OpenNebula::Datastore.new_with_id(id, @client) + else + xml=OpenNebula::Datastore.build_xml + OpenNebula::Datastore.new(xml, @client) + end + end + + def factory_pool(user_flag=-2) + #TBD OpenNebula::UserPool.new(@client, user_flag) + OpenNebula::DatastorePool.new(@client) + end + + def format_resource(datastore) + str="%-15s: %-20s" + str_h1="%-80s" + + CLIHelper.print_header(str_h1 % "DATASTORE #{datastore['ID']} INFORMATION") + puts str % ["ID", datastore.id.to_s] + puts str % ["NAME", datastore.name] + puts str % ["USER", datastore['UNAME']] + puts str % ["GROUP", datastore['GNAME']] + puts str % ["CLUSTER", OpenNebulaHelper.cluster_str(datastore['CLUSTER'])] + + puts str % ["DS_MAD", datastore['DS_MAD']] + puts str % ["TM_MAD", datastore['TM_MAD']] + puts str % ["BASE PATH",datastore['BASE_PATH']] + puts + + CLIHelper.print_header(str_h1 % "PERMISSIONS",false) + + ["OWNER", "GROUP", "OTHER"].each { |e| + mask = "---" + mask[0] = "u" if datastore["PERMISSIONS/#{e}_U"] == "1" + mask[1] = "m" if datastore["PERMISSIONS/#{e}_M"] == "1" + mask[2] = "a" if datastore["PERMISSIONS/#{e}_A"] == "1" + + puts str % [e, mask] + } + puts + + CLIHelper.print_header(str_h1 % "DATASTORE TEMPLATE", false) + puts datastore.template_str + + puts + + CLIHelper.print_header("%-15s" % "IMAGES") + datastore.img_ids.each do |id| + puts "%-15s" % [id] + end + end +end diff --git a/src/cli/one_helper/onehost_helper.rb b/src/cli/one_helper/onehost_helper.rb index b50a02c617..1abdb32b4f 100644 --- a/src/cli/one_helper/onehost_helper.rb +++ b/src/cli/one_helper/onehost_helper.rb @@ -39,10 +39,14 @@ class OneHostHelper < OpenNebulaHelper::OneHelper d["ID"] end - column :NAME, "Name of the Host", :left, :size=>15 do |d| + column :NAME, "Name of the Host", :left, :size=>12 do |d| d["NAME"] end + column :CLUSTER, "Name of the Cluster", :left, :size=>8 do |d| + OpenNebulaHelper.cluster_str(d["CLUSTER"]) + end + column :RVM, "Number of Virtual Machines running", :size=>6 do |d| d["HOST_SHARE"]["RUNNING_VMS"] end @@ -82,11 +86,11 @@ class OneHostHelper < OpenNebulaHelper::OneHelper OpenNebulaHelper.unit_to_str(acpu,options) end - column :STAT, "Host status", :size=>6 do |d| + column :STAT, "Host status", :size=>4 do |d| OneHostHelper.state_to_str(d["STATE"]) end - default :ID, :NAME, :RVM, :TCPU, :FCPU, :ACPU, :TMEM, :FMEM, + default :ID, :NAME, :CLUSTER, :RVM, :TCPU, :FCPU, :ACPU, :TMEM, :FMEM, :AMEM, :STAT end @@ -118,11 +122,11 @@ class OneHostHelper < OpenNebulaHelper::OneHelper puts str % ["ID", host.id.to_s] puts str % ["NAME", host.name] + puts str % ["CLUSTER", OpenNebulaHelper.cluster_str(host['CLUSTER'])] puts str % ["STATE", host.state_str] puts str % ["IM_MAD", host['IM_MAD']] puts str % ["VM_MAD", host['VM_MAD']] puts str % ["VN_MAD", host['VN_MAD']] - puts str % ["TM_MAD", host['TM_MAD']] puts str % ["LAST MONITORING TIME", host['LAST_MON_TIME']] puts diff --git a/src/cli/one_helper/oneimage_helper.rb b/src/cli/one_helper/oneimage_helper.rb index 27a4910ec1..e73866f1a3 100644 --- a/src/cli/one_helper/oneimage_helper.rb +++ b/src/cli/one_helper/oneimage_helper.rb @@ -45,10 +45,6 @@ class OneImageHelper < OpenNebulaHelper::OneHelper d["ID"] end - column :NAME, "Name of the Image", :left, :size=>12 do |d| - d["NAME"] - end - column :USER, "Username of the Virtual Machine owner", :left, :size=>8 do |d| helper.user_name(d, options) @@ -59,6 +55,14 @@ class OneImageHelper < OpenNebulaHelper::OneHelper helper.group_name(d, options) end + column :NAME, "Name of the Image", :left, :size=>12 do |d| + d["NAME"] + end + + column :DATASTORE, "Name of the Image", :left, :size=>10 do |d| + d["DATASTORE"] + end + column :TYPE, "Type of the Image", :size=>4 do |d,e| OneImageHelper.type_to_str(d["TYPE"]) end @@ -67,7 +71,7 @@ class OneImageHelper < OpenNebulaHelper::OneHelper :size=>20 do |d| OpenNebulaHelper.time_to_str(d["REGTIME"]) end - + column :PERSISTENT, "Whether the Image is persistent or not", :size=>3 do |d| OpenNebulaHelper.boolean_to_str(d["PERSISTENT"]) @@ -87,7 +91,7 @@ class OneImageHelper < OpenNebulaHelper::OneHelper OpenNebulaHelper.unit_to_str(d['SIZE'].to_i,options,"M") end - default :ID, :USER, :GROUP, :NAME, :SIZE, :TYPE, :REGTIME, + default :ID, :USER, :GROUP, :NAME, :DATASTORE, :SIZE, :TYPE, :PERSISTENT , :STAT, :RVMS end @@ -118,6 +122,7 @@ class OneImageHelper < OpenNebulaHelper::OneHelper puts str % ["NAME", image.name] puts str % ["USER", image['UNAME']] puts str % ["GROUP",image['GNAME']] + puts str % ["DATASTORE",image['DATASTORE']] puts str % ["TYPE", image.type_str] puts str % ["REGISTER TIME", OpenNebulaHelper.time_to_str(image['REGTIME'])] diff --git a/src/cli/one_helper/oneuser_helper.rb b/src/cli/one_helper/oneuser_helper.rb index 82a060b44e..2b1a532919 100644 --- a/src/cli/one_helper/oneuser_helper.rb +++ b/src/cli/one_helper/oneuser_helper.rb @@ -34,7 +34,7 @@ class OneUserHelper < OpenNebulaHelper::OneHelper begin password = File.read(arg).split("\n").first rescue - return -1, "Can not read file: #{arg}" + return -1, "Cannot read file: #{arg}" end else password = arg.dup diff --git a/src/cli/one_helper/onevnet_helper.rb b/src/cli/one_helper/onevnet_helper.rb index 72fb32346c..39907836b1 100644 --- a/src/cli/one_helper/onevnet_helper.rb +++ b/src/cli/one_helper/onevnet_helper.rb @@ -39,11 +39,6 @@ class OneVNetHelper < OpenNebulaHelper::OneHelper d["ID"] end - column :NAME, "Name of the Virtual Network", :left, - :size=>15 do |d| - d["NAME"] - end - column :USER, "Username of the Virtual Network owner", :left, :size=>8 do |d| helper.user_name(d, options) @@ -54,11 +49,20 @@ class OneVNetHelper < OpenNebulaHelper::OneHelper helper.group_name(d, options) end + column :NAME, "Name of the Virtual Network", :left, + :size=>15 do |d| + d["NAME"] + end + + column :CLUSTER, "Name of the Cluster", :left, :size=>8 do |d| + OpenNebulaHelper.cluster_str(d["CLUSTER"]) + end + column :TYPE, "Type of Virtual Network", :size=>6 do |d| OneVNetHelper.type_to_str(d["TYPE"]) end - column :SIZE, "Size of the Virtual Network", :size=>6 do |d| + column :SIZE, "Size of the Virtual Network", :size=>5 do |d| d["SIZE"] end @@ -68,7 +72,7 @@ class OneVNetHelper < OpenNebulaHelper::OneHelper end column :LEASES, "Number of this Virtual Network's given leases", - :size=>7 do |d| + :size=>6 do |d| d["TOTAL_LEASES"] end @@ -103,6 +107,7 @@ class OneVNetHelper < OpenNebulaHelper::OneHelper puts str % ["NAME", vn['NAME']] puts str % ["USER", vn['UNAME']] puts str % ["GROUP", vn['GNAME']] + puts str % ["CLUSTER", OpenNebulaHelper.cluster_str(vn['CLUSTER'])] puts str % ["TYPE", vn.type_str] puts str % ["BRIDGE", vn["BRIDGE"]] puts str % ["VLAN", OpenNebulaHelper.boolean_to_str(vn['VLAN'])] diff --git a/src/cli/onecluster b/src/cli/onecluster new file mode 100755 index 0000000000..8cb5ac0dd3 --- /dev/null +++ b/src/cli/onecluster @@ -0,0 +1,176 @@ +#!/usr/bin/env ruby + +# -------------------------------------------------------------------------- # +# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +ONE_LOCATION=ENV["ONE_LOCATION"] + +if !ONE_LOCATION + RUBY_LIB_LOCATION="/usr/lib/one/ruby" +else + RUBY_LIB_LOCATION=ONE_LOCATION+"/lib/ruby" +end + +$: << RUBY_LIB_LOCATION +$: << RUBY_LIB_LOCATION+"/cli" + +require 'command_parser' +require 'one_helper/onecluster_helper' + +cmd=CommandParser::CmdParser.new(ARGV) do + usage "`onecluster` [] []" + version OpenNebulaHelper::ONE_VERSION + + helper = OneClusterHelper.new + + ######################################################################## + # Global Options + ######################################################################## + set :option, CommandParser::OPTIONS + + list_options = CLIHelper::OPTIONS + list_options << OpenNebulaHelper::XML + list_options << OpenNebulaHelper::NUMERIC + + ######################################################################## + # Formatters for arguments + ######################################################################## + set :format, :clusterid, OneClusterHelper.to_id_desc do |arg| + helper.to_id(arg) + end + + set :format, :clusterid_list, OneClusterHelper.list_to_id_desc do |arg| + helper.list_to_id(arg) + end + + set :format, :vnetid, OpenNebulaHelper.rname_to_id_desc("VNET") do |arg| + OpenNebulaHelper.rname_to_id(arg, "VNET") + end + + set :format, :hostid, OpenNebulaHelper.rname_to_id_desc("HOST") do |arg| + OpenNebulaHelper.rname_to_id(arg, "HOST") + end + + set :format, :datastoreid, OpenNebulaHelper.rname_to_id_desc("DATASTORE") do |arg| + OpenNebulaHelper.rname_to_id(arg, "DATASTORE") + end + + ######################################################################## + # Commands + ######################################################################## + + create_desc = <<-EOT.unindent + Creates a new Cluster + EOT + + command :create, create_desc, :name do + helper.create_resource(options) do |cluster| + cluster.allocate(args[0]) + end + end + + delete_desc = <<-EOT.unindent + Deletes the given Cluster + EOT + + command :delete, delete_desc, [:range, :clusterid_list] do + helper.perform_actions(args[0],options,"deleted") do |obj| + obj.delete + end + end + + list_desc = <<-EOT.unindent + Lists Clusters in the pool + EOT + + command :list, list_desc, :options=>list_options do + helper.list_pool(options) + end + + show_desc = <<-EOT.unindent + Shows information for the given Cluster + EOT + + command :show, show_desc,:clusterid, :options=>OpenNebulaHelper::XML do + helper.show_resource(args[0],options) + end + + addhost_desc = <<-EOT.unindent + Adds a Host to the given Cluster + EOT + + # TODO: allow the second param to be [:range, :hostid_list] + command :addhost, addhost_desc,:clusterid, :hostid do + helper.perform_action(args[0],options,"updated") do |cluster| + cluster.addhost(args[1].to_i) + end + end + + delhost_desc = <<-EOT.unindent + Deletes a Host from the given Cluster + EOT + + # TODO: allow the second param to be [:range, :hostid_list] + command :delhost, delhost_desc, :clusterid, :hostid do + helper.perform_action(args[0],options,"updated") do |cluster| + cluster.delhost(args[1].to_i) + end + end + + adddatastore_desc = <<-EOT.unindent + Adds a Datastore to the given Cluster + EOT + + # TODO: allow the second param to be [:range, :datastoreid_list] + command :adddatastore, adddatastore_desc,:clusterid, :datastoreid do + helper.perform_action(args[0],options,"updated") do |cluster| + cluster.adddatastore(args[1].to_i) + end + end + + deldatastore_desc = <<-EOT.unindent + Deletes a Datastore from the given Cluster + EOT + + # TODO: allow the second param to be [:range, :datastoreid_list] + command :deldatastore, deldatastore_desc, :clusterid, :datastoreid do + helper.perform_action(args[0],options,"updated") do |cluster| + cluster.deldatastore(args[1].to_i) + end + end + + addvnet_desc = <<-EOT.unindent + Adds a Virtual Network to the given Cluster + EOT + + # TODO: allow the second param to be [:range, :vnetid_list] + command :addvnet, addvnet_desc,:clusterid, :vnetid do + helper.perform_action(args[0],options,"updated") do |cluster| + cluster.addvnet(args[1].to_i) + end + end + + delvnet_desc = <<-EOT.unindent + Deletes a Virtual Network from the given Cluster + EOT + + # TODO: allow the second param to be [:range, :vnetid_list] + command :delvnet, delvnet_desc,:clusterid, :vnetid do + helper.perform_action(args[0],options,"updated") do |cluster| + cluster.delvnet(args[1].to_i) + end + end +end diff --git a/src/cli/onedatastore b/src/cli/onedatastore new file mode 100755 index 0000000000..02dbab9189 --- /dev/null +++ b/src/cli/onedatastore @@ -0,0 +1,163 @@ +#!/usr/bin/env ruby + +# -------------------------------------------------------------------------- # +# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +ONE_LOCATION=ENV["ONE_LOCATION"] + +if !ONE_LOCATION + RUBY_LIB_LOCATION="/usr/lib/one/ruby" +else + RUBY_LIB_LOCATION=ONE_LOCATION+"/lib/ruby" +end + +$: << RUBY_LIB_LOCATION +$: << RUBY_LIB_LOCATION+"/cli" + +require 'command_parser' +require 'one_helper/onedatastore_helper' +require 'one_helper/onecluster_helper' + +cmd=CommandParser::CmdParser.new(ARGV) do + usage "`onedatastore` [] []" + version OpenNebulaHelper::ONE_VERSION + + helper = OneDatastoreHelper.new + + ######################################################################## + # Global Options + ######################################################################## + set :option, CommandParser::OPTIONS + + list_options = CLIHelper::OPTIONS + list_options << OpenNebulaHelper::XML + list_options << OpenNebulaHelper::NUMERIC + + ######################################################################## + # Formatters for arguments + ######################################################################## + set :format, :datastoreid, OneDatastoreHelper.to_id_desc do |arg| + helper.to_id(arg) + end + + set :format, :datastoreid_list, OneDatastoreHelper.list_to_id_desc do |arg| + helper.list_to_id(arg) + end + + set :format, :clusterid, OpenNebulaHelper.rname_to_id_desc("CLUSTER") do |arg| + OpenNebulaHelper.rname_to_id(arg, "CLUSTER") + end + + set :format, :groupid, OpenNebulaHelper.rname_to_id_desc("GROUP") do |arg| + OpenNebulaHelper.rname_to_id(arg, "GROUP") + end + + set :format, :userid, OpenNebulaHelper.rname_to_id_desc("USER") do |arg| + OpenNebulaHelper.rname_to_id(arg, "USER") + end + + ######################################################################## + # Commands + ######################################################################## + + create_desc = <<-EOT.unindent + Creates a new Datastore from the given template file + EOT + + command :create, create_desc, :file, :options=>[OneClusterHelper::CLUSTER] do + + cid = options[:cluster] || ClusterPool::NONE_CLUSTER_ID + + helper.create_resource(options) do |datastore| + begin + template=File.read(args[0]) + datastore.allocate(template, cid) + rescue =>e + STDERR.puts e.message + exit -1 + end + end + end + + delete_desc = <<-EOT.unindent + Deletes the given Datastore + EOT + + command :delete, delete_desc, [:range, :datastoreid_list] do + helper.perform_actions(args[0],options,"deleted") do |obj| + obj.delete + end + end + + chgrp_desc = <<-EOT.unindent + Changes the Datastore group + EOT + + command :chgrp, chgrp_desc,[:range, :datastoreid_list], :groupid do + helper.perform_actions(args[0],options,"Group changed") do |obj| + obj.chown(-1, args[1].to_i) + end + end + + chown_desc = <<-EOT.unindent + Changes the Datastore owner and group + EOT + + command :chown, chown_desc, [:range, :datastoreid_list], :userid, + [:groupid,nil] do + gid = args[2].nil? ? -1 : args[2].to_i + helper.perform_actions(args[0],options,"Owner/Group changed") do |obj| + obj.chown(args[1].to_i, gid) + end + end + + chmod_desc = <<-EOT.unindent + Changes the Datastore permissions + EOT + + command :chmod, chmod_desc, [:range, :datastoreid_list], :octet do + helper.perform_actions(args[0],options, "Permissions changed") do |obj| + obj.chmod_octet(args[1]) + end + end + + list_desc = <<-EOT.unindent + Lists Datastores in the pool + EOT + + command :list, list_desc, :options=>list_options do + helper.list_pool(options) + end + + show_desc = <<-EOT.unindent + Shows information for the given Datastore + EOT + + command :show, show_desc, :datastoreid, :options=>OpenNebulaHelper::XML do + helper.show_resource(args[0],options) + end + + update_desc = <<-EOT.unindent + Launches the system editor to modify and update the template contents + EOT + + command :update, update_desc, :datastoreid do + helper.perform_action(args[0],options,"modified") do |obj| + str = OpenNebulaHelper.update_template(args[0], obj) + obj.update(str) + end + end +end diff --git a/src/cli/onehost b/src/cli/onehost index 40dbfe737e..3b7d89fa63 100755 --- a/src/cli/onehost +++ b/src/cli/onehost @@ -29,6 +29,7 @@ $: << RUBY_LIB_LOCATION+"/cli" require 'command_parser' require 'one_helper/onehost_helper' +require 'one_helper/onecluster_helper' cmd=CommandParser::CmdParser.new(ARGV) do usage "`onehost` [] []" @@ -41,9 +42,36 @@ cmd=CommandParser::CmdParser.new(ARGV) do ######################################################################## set :option, CommandParser::OPTIONS + IM = { + :name => "im", + :short => "-i im_mad", + :large => "--im im_mad" , + :description => "Set the information driver for the host", + :format => String + } + + VMM = { + :name => "vmm", + :short => "-v vmm_mad", + :large => "--vm vmm_mad" , + :description => "Set the virtualization driver for the host", + :format => String + } + + VNET = { + :name => "vnm", + :short => "-n vnet_mad", + :large => "--net vnet_mad" , + :description => "Set the network driver for the host", + :format => String + } + + CREAT_OPTIONS = [ IM, VMM, VNET, OneClusterHelper::CLUSTER ] + ######################################################################## # Formatters for arguments ######################################################################## + set :format, :hostid, OneHostHelper.to_id_desc do |arg| helper.to_id(arg) end @@ -60,10 +88,23 @@ cmd=CommandParser::CmdParser.new(ARGV) do Creates a new Host EOT - command :create, create_desc, :hostname, :im_mad, :vmm_mad, - :tm_mad, :vnm_mad do + command :create, create_desc, :hostname, :options=>CREAT_OPTIONS do + if options[:im].nil? or options[:vmm].nil? or options[:vnm].nil? + STDERR.puts "Drivers are mandatory to create a host:" + STDERR.puts "\t -i information driver" + STDERR.puts "\t -v hypervisor driver" + STDERR.puts "\t -n network driver" + exit -1 + end + + cid = options[:cluster] || ClusterPool::NONE_CLUSTER_ID + helper.create_resource(options) do |host| - host.allocate(args[0], args[1], args[2], args[4], args[3]) + host.allocate(args[0], + options[:im], + options[:vmm], + options[:vnm], + cid) end end diff --git a/src/cli/oneimage b/src/cli/oneimage index 4647ec1319..fca548c8ce 100755 --- a/src/cli/oneimage +++ b/src/cli/oneimage @@ -29,6 +29,7 @@ $: << RUBY_LIB_LOCATION+"/cli" require 'command_parser' require 'one_helper/oneimage_helper' +require 'one_helper/onedatastore_helper' cmd=CommandParser::CmdParser.new(ARGV) do usage "`oneimage` [] []" @@ -45,6 +46,8 @@ cmd=CommandParser::CmdParser.new(ARGV) do list_options << OpenNebulaHelper::XML list_options << OpenNebulaHelper::NUMERIC + CREATE_OPTIONS = [OneDatastoreHelper::DATASTORE] + ######################################################################## # Formatters for arguments ######################################################################## @@ -76,10 +79,20 @@ cmd=CommandParser::CmdParser.new(ARGV) do Creates a new Image from the given template file EOT - command :create, create_desc, :file do + command :create, create_desc, :file, :options=>CREATE_OPTIONS do + if options[:datastore].nil? + STDERR.puts "Datastore to save the image is mandatory: " + STDERR.puts "\t -d datastore_id" + exit -1 + end helper.create_resource(options) do |image| - template=File.read(args[0]) - image.allocate(template) + begin + template=File.read(args[0]) + image.allocate(template, options[:datastore] ) + rescue => e + STDERR.puts e.messsage + exit -1 + end end end diff --git a/src/cli/onevnet b/src/cli/onevnet index 720673db27..2c82e5b1fe 100755 --- a/src/cli/onevnet +++ b/src/cli/onevnet @@ -29,6 +29,7 @@ $: << RUBY_LIB_LOCATION+"/cli" require 'command_parser' require 'one_helper/onevnet_helper' +require 'one_helper/onecluster_helper' cmd=CommandParser::CmdParser.new(ARGV) do usage "`onevnet` [] []" @@ -41,6 +42,8 @@ cmd=CommandParser::CmdParser.new(ARGV) do ######################################################################## set :option, CommandParser::OPTIONS + CREATE_OPTIONS = [OneClusterHelper::CLUSTER] + ######################################################################## # Formatters for arguments ######################################################################## @@ -72,10 +75,17 @@ cmd=CommandParser::CmdParser.new(ARGV) do Creates a new Virtual Network from the given template file EOT - command :create, create_desc, :file do + command :create, create_desc, :file, :options=>CREATE_OPTIONS do + cid = options[:cluster] || ClusterPool::NONE_CLUSTER_ID + helper.create_resource(options) do |vn| - template=File.read(args[0]) - vn.allocate(template) + begin + template=File.read(args[0]) + vn.allocate(template, cid) + rescue => e + STDERR.puts e.message + exit -1 + end end end diff --git a/src/cloud/common/CloudServer.rb b/src/cloud/common/CloudServer.rb index 9e3883ad1a..904e072ec0 100755 --- a/src/cloud/common/CloudServer.rb +++ b/src/cloud/common/CloudServer.rb @@ -37,7 +37,7 @@ class CloudServer ########################################################################## # Public attributes ########################################################################## - attr_reader :config, :logger + attr_reader :config # Initializes the Cloud server based on a config file # config_file:: _String_ for the server. MUST include the following @@ -47,9 +47,18 @@ class CloudServer # XMLRPC def initialize(config, logger=nil) # --- Load the Cloud Server configuration file --- - @config = config - @logger = logger + @config = config + @@logger = logger end + + def self.logger + return @@logger + end + + def logger + return @@logger + end + # # Prints the configuration of the server # @@ -101,15 +110,27 @@ module CloudLogger DATE_FORMAT = "%a %b %d %H:%M:%S %Y" # Patch logger class to be compatible with Rack::CommonLogger - class ::Logger + class CloudLogger < Logger + + def initialize(path) + super(path) + end + def write(msg) info msg.chop end + + def add(severity, message = nil, progname = nil, &block) + rc = super(severity, message, progname, &block) + @logdev.dev.flush + + rc + end end def enable_logging(path=nil, debug_level=3) path ||= $stdout - logger = ::Logger.new(path) + logger = CloudLogger.new(path) logger.level = DEBUG_LEVEL[debug_level] logger.formatter = proc do |severity, datetime, progname, msg| MSG_FORMAT % [ diff --git a/src/cloud/ec2/bin/econe-server b/src/cloud/ec2/bin/econe-server index 2071c168d7..5b1d23a9f2 100755 --- a/src/cloud/ec2/bin/econe-server +++ b/src/cloud/ec2/bin/econe-server @@ -53,7 +53,7 @@ setup() start() { if [ ! -f "$ECONE_SERVER" ]; then - echo "Can not find $ECONE_SERVER." + echo "Cannot find $ECONE_SERVER." exit 1 fi diff --git a/src/cloud/ec2/etc/econe.conf b/src/cloud/ec2/etc/econe.conf index f9ba61e670..02e352aa1b 100644 --- a/src/cloud/ec2/etc/econe.conf +++ b/src/cloud/ec2/etc/econe.conf @@ -37,6 +37,9 @@ # 0 = ERROR, 1 = WARNING, 2 = INFO, 3 = DEBUG :debug_level: 3 +:cluster_id: +:datastore_id: + # VM types allowed and its template file (inside templates directory) :instance_types: :m1.small: diff --git a/src/cloud/ec2/lib/EC2QueryServer.rb b/src/cloud/ec2/lib/EC2QueryServer.rb index 66c14ad2a0..9b02650a9c 100644 --- a/src/cloud/ec2/lib/EC2QueryServer.rb +++ b/src/cloud/ec2/lib/EC2QueryServer.rb @@ -79,7 +79,7 @@ class EC2QueryServer < CloudServer return OpenNebula::Error.new('Unsupported'), 400 end - rc = image.allocate(template) + rc = image.allocate(template, @config[:datastore_id]||1) if OpenNebula.is_error?(rc) return OpenNebula::Error.new('Unsupported'), 400 end diff --git a/src/cloud/occi/bin/occi-server b/src/cloud/occi/bin/occi-server index c7306756c7..5218f42e77 100755 --- a/src/cloud/occi/bin/occi-server +++ b/src/cloud/occi/bin/occi-server @@ -53,7 +53,7 @@ setup() start() { if [ ! -x "$OCCI_SERVER" ]; then - echo "Can not find $OCCI_SERVER." + echo "Cannot find $OCCI_SERVER." exit 1 fi diff --git a/src/cloud/occi/etc/occi-server.conf b/src/cloud/occi/etc/occi-server.conf index 5ab9849e09..971326cd20 100644 --- a/src/cloud/occi/etc/occi-server.conf +++ b/src/cloud/occi/etc/occi-server.conf @@ -37,6 +37,9 @@ # 0 = ERROR, 1 = WARNING, 2 = INFO, 3 = DEBUG :debug_level: 3 +:cluster_id: +:datastore_id: + # VM types allowed and its template file (inside templates directory) :instance_types: :small: diff --git a/src/cloud/occi/lib/OCCIClient.rb b/src/cloud/occi/lib/OCCIClient.rb index 4b30b9fb2c..c600599a18 100755 --- a/src/cloud/occi/lib/OCCIClient.rb +++ b/src/cloud/occi/lib/OCCIClient.rb @@ -131,7 +131,7 @@ module OCCIClient file_path="/"+m[1] end elsif !image_info.elements['TYPE'] == "DATABLOCK" - return CloudClient::Error.new("Can not find URL") + return CloudClient::Error.new("Cannot find URL") end if curb @@ -316,7 +316,7 @@ module OCCIClient end if info.elements['ID'] == nil - return CloudClient::Error.new("Can not find RESOURCE ID") + return CloudClient::Error.new("Cannot find RESOURCE ID") end resource_id = info.elements['ID'].text diff --git a/src/cloud/occi/lib/OCCIServer.rb b/src/cloud/occi/lib/OCCIServer.rb index a3102da56c..1bc9b6eaee 100755 --- a/src/cloud/occi/lib/OCCIServer.rb +++ b/src/cloud/occi/lib/OCCIServer.rb @@ -342,7 +342,7 @@ class OCCIServer < CloudServer template = network.to_one_template return template, 500 if OpenNebula.is_error?(template) - rc = network.allocate(template) + rc = network.allocate(template, @config[:cluster_id]||ClusterPool::NONE_CLUSTER_ID) if OpenNebula.is_error?(rc) return rc, CloudServer::HTTP_ERROR_CODE[rc.errno] end @@ -446,7 +446,7 @@ class OCCIServer < CloudServer template = image.to_one_template return template, 500 if OpenNebula.is_error?(template) - rc = image.allocate(template) + rc = image.allocate(template, @config[:datastore_id]||1) if OpenNebula.is_error?(rc) return rc, CloudServer::HTTP_ERROR_CODE[rc.errno] end diff --git a/src/cloud/occi/lib/ui/public/css/layout.css b/src/cloud/occi/lib/ui/public/css/layout.css index 5fcdfcb292..624e1e39a3 100644 --- a/src/cloud/occi/lib/ui/public/css/layout.css +++ b/src/cloud/occi/lib/ui/public/css/layout.css @@ -125,14 +125,44 @@ background-image: -moz-linear-gradient( padding: 0; } -#navigation li { +.navigation li.topTab { line-height: 2em; - text-align: right; - padding-right: 10px; + text-align: left; + padding-left: 15px; } -#navigation li a { +.navigation li.subTab { + line-height: 1.8em; + font-size: 12px; + text-align: left; + padding-left: 30px; +} + +.navigation li.subsubTab { + line-height: 1.7em; + font-size: 11px; + text-align: left; + padding-left: 40px; +} + +.navigation li.topTab span.plusIcon, +.navigation li.subTab span.plusIcon { + display : none; + float: right; + margin-right: 1em; +} + +.navigation li.topTab span.plusIcon { + margin-top: 5px; +} + +.navigation li.subTab span.plusIcon { + margin-top: 3px; +} + +#navigation li { color: #ffffff; + cursor: pointer; } #navigation li:hover, .navigation-active-li { @@ -163,10 +193,10 @@ background-image: -moz-linear-gradient( ); */ } -.navigation-active-li-a { +.navigation-active-li { font-weight: bold; } -#navigation li:hover a, .navigation-active-li-a { +#navigation li:hover { color: #ffffff !important; } @@ -181,3 +211,48 @@ background-image: -moz-linear-gradient( width: 100px; height: 22px; } + +/* top menu css */ +#menutop_container{ + margin:0px 171px; + color:#FFFFFF; + font-size:13px; + font-weight:bold; +} +#menutop_navbar{ + float:left; + height:25px; + font-size:13px; +} +#menutop_navbar ul{ + float:left; + height:25px; + color:#000000; + margin: 0 0; + padding-left: 1px; +} +#menutop_navbar ul{ + background-color: #353735; +} +#menutop_navbar ul li{ + float:left; + min-width:72px; + margin:0px 0 0 0; + height:22px; + display: inline; + text-align:center; + padding-left:5px; + padding-right: 5px; + padding-top: 4px; + padding-bottom: 4px; + border-left:1px solid white; + cursor:pointer; + color: white; +} + +#menutop_navbar ul li:hover { + background-color: #E69138; + +} + +/* end top menu css */ \ No newline at end of file diff --git a/src/cloud/occi/lib/ui/public/js/layout.js b/src/cloud/occi/lib/ui/public/js/layout.js index 7b315b443c..2fee0f93e9 100644 --- a/src/cloud/occi/lib/ui/public/js/layout.js +++ b/src/cloud/occi/lib/ui/public/js/layout.js @@ -14,16 +14,21 @@ /* limitations under the License. */ /* -------------------------------------------------------------------------- */ +//This file is mostly a copy of layout.js from Sunstone. +//Instead of opening a south panel, it opens an east panel. +//Apart from document.ready() modifications, the rest of different lines are +//makerd with MODIFIED + var activeTab; var outerLayout, innerLayout; function hideDialog(){ - innerLayout.close("east"); + innerLayout.close("east");//MODIFIED } function popDialog(content){ $("#dialog").html(content); - innerLayout.open("east"); + innerLayout.open("east");//MODIFIED } function popDialogLoading(){ @@ -31,45 +36,98 @@ function popDialogLoading(){ popDialog(loading); } -function showTab(tabname){ - activeTab = tabname; +function showTab(tabname,highlight_tab){ + //Since menu items no longer have an element + //we no longer expect #tab_id here, but simply tab_id + //So safety check - remove # from #tab_id if present to ensure compatibility + if (tabname.indexOf('#') == 0) + tabname = tabname.substring(1); + if (highlight_tab && highlight_tab.indexOf('#') == 0) + highlight_tab == highlight.substring(1); + + var activeTab = tabname; + + if (!highlight_tab) highlight_tab = activeTab; //clean selected menu $("#navigation li").removeClass("navigation-active-li"); - $("#navigation li a").removeClass("navigation-active-li-a"); + $("div#header ul#menutop_ul li").removeClass("navigation-active-li"); - //select menu - var li = $("#navigation li:has(a[href='"+activeTab+"'])") - var li_a = $("#navigation li a[href='"+activeTab+"']") + //select tab in left menu + var li = $("#navigation li#li_"+highlight_tab) li.addClass("navigation-active-li"); - li_a.addClass("navigation-active-li-a"); + + //select tab in top menu + var top_li = $("div#header ul#menutop_ul li#top_"+highlight_tab); + top_li.addClass("navigation-active-li"); + //show tab $(".tab").hide(); - $(activeTab).show(); - //~ if (activeTab == '#dashboard') { - //~ emptyDashboard(); - //~ preloadTables(); - //~ } - innerLayout.close("south"); -} + $('#'+activeTab).show(); +// innerLayout.close("south");//MODIFIED commented +}; + +function setupTabs(){ + + var topTabs = $(".outer-west ul li.topTab"); + var subTabs = $(".outer-west ul li.subTab"); + + subTabs.live("click",function(){ + //leave floor to topTab listener in case of tabs with both classes + if ($(this).hasClass('topTab')) return false; + + var tab = $(this).attr('id').substring(3); + showTab(tab); + return false; + }); + + topTabs.live("click",function(e){ + var tab = $(this).attr('id').substring(3); + //Subtabs have a class with the name of this tab + var subtabs = $('div#menu li.'+tab); + + //toggle subtabs only when clicking on the icon or when clicking on an + //already selected menu + if ($(e.target).is('span') || + $(this).hasClass("navigation-active-li")){ + //for each subtab, we hide the subsubtabs + subtabs.each(function(){ + //for each subtab, hide its subtabs + var subsubtabs = $(this).attr('id').substr(3); + //subsubtabs class + subsubtabs = $('div#menu li.'+subsubtabs); + subsubtabs.hide(); + }); + //hide subtabs and reset icon to + position, since all subsubtabs + //are hidden + subtabs.fadeToggle('fast'); + $('span',subtabs).removeClass('ui-icon-circle-minus'); + $('span',subtabs).addClass('ui-icon-circle-plus'); + //toggle icon on this tab + $('span',this).toggleClass('ui-icon-circle-plus ui-icon-circle-minus'); + }; + //if we are clicking on the icon only, do not show the tab + if ($(e.target).is('span')) return false; + + showTab(tab); + return false; + }); + +}; + +function setupTopMenu(){ + $('div#header ul#menutop_ul li').live('click',function(){ + var tab = "#" + $(this).attr('id').substring(4); + showTab(tab); + }); +}; $(document).ready(function () { $(".tab").hide(); - $(".outer-west ul li.subTab").live("click",function(){ - var tab = $('a',this).attr('href'); - showTab(tab); - return false; - }); - - $(".outer-west ul li.topTab").live("click",function(){ - var tab = $('a',this).attr('href'); - //toggle subtabs trick - $('li.'+tab.substr(1)).toggle(); - showTab(tab); - return false; - }); + setupTabs(); + //setupTopMenu(); outerLayout = $('body').layout({ applyDefaultStyles: false diff --git a/src/cluster/Cluster.cc b/src/cluster/Cluster.cc new file mode 100644 index 0000000000..78b93ddd28 --- /dev/null +++ b/src/cluster/Cluster.cc @@ -0,0 +1,269 @@ +/* ------------------------------------------------------------------------ */ +/* Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); you may */ +/* not use this file except in compliance with the License. You may obtain */ +/* a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ +/* See the License for the specific language governing permissions and */ +/* limitations under the License. */ +/* ------------------------------------------------------------------------ */ + +#include +#include + +#include +#include + +#include "Cluster.h" +#include "GroupPool.h" + +const char * Cluster::table = "cluster_pool"; + +const char * Cluster::db_names = + "oid, name, body, uid, gid, owner_u, group_u, other_u"; + +const char * Cluster::db_bootstrap = "CREATE TABLE IF NOT EXISTS cluster_pool (" + "oid INTEGER PRIMARY KEY, name VARCHAR(128), body TEXT, uid INTEGER, " + "gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, " + "UNIQUE(name))"; + +/* ------------------------------------------------------------------------ */ +/* ------------------------------------------------------------------------ */ + +int Cluster::check_drop(string& error_msg) +{ + ostringstream oss; + + if ( hosts.get_collection_size() > 0 ) + { + oss << "Cluster " << oid << " is not empty, it contains " + << hosts.get_collection_size() << " hosts."; + + goto error_common; + } + + if ( datastores.get_collection_size() > 0 ) + { + oss << "Cluster " << oid << " is not empty, it contains " + << datastores.get_collection_size() << " datastores."; + + goto error_common; + } + + if ( vnets.get_collection_size() > 0 ) + { + oss << "Cluster " << oid << " is not empty, it contains " + << vnets.get_collection_size() << " vnets."; + + goto error_common; + } + + return 0; + +error_common: + error_msg = oss.str(); + + return -1; +} + +/* ************************************************************************ */ +/* Cluster :: Database Access Functions */ +/* ************************************************************************ */ + +int Cluster::insert_replace(SqlDB *db, bool replace, string& error_str) +{ + ostringstream oss; + + int rc; + string xml_body; + + char * sql_name; + char * sql_xml; + + // Set the owner and group to oneadmin + set_user(0, ""); + set_group(GroupPool::ONEADMIN_ID, GroupPool::ONEADMIN_NAME); + + // Update the Cluster + + sql_name = db->escape_str(name.c_str()); + + if ( sql_name == 0 ) + { + goto error_name; + } + + sql_xml = db->escape_str(to_xml(xml_body).c_str()); + + if ( sql_xml == 0 ) + { + goto error_body; + } + + if ( validate_xml(sql_xml) != 0 ) + { + goto error_xml; + } + + if ( replace ) + { + oss << "REPLACE"; + } + else + { + oss << "INSERT"; + } + + // Construct the SQL statement to Insert or Replace + + oss <<" INTO "<exec(oss); + + db->free_str(sql_name); + db->free_str(sql_xml); + + return rc; + +error_xml: + db->free_str(sql_name); + db->free_str(sql_xml); + + error_str = "Error transforming the Cluster to XML."; + + goto error_common; + +error_body: + db->free_str(sql_name); + goto error_generic; + +error_name: + goto error_generic; + +error_generic: + error_str = "Error inserting Cluster in DB."; +error_common: + return -1; +} + +/* ------------------------------------------------------------------------ */ +/* ------------------------------------------------------------------------ */ + +string& Cluster::to_xml(string& xml) const +{ + ostringstream oss; + string host_collection_xml; + string ds_collection_xml; + string vnet_collection_xml; + + oss << + "" << + "" << oid << "" << + "" << name << "" << + + hosts.to_xml(host_collection_xml) << + datastores.to_xml(ds_collection_xml) << + vnets.to_xml(vnet_collection_xml) << + + ""; + + xml = oss.str(); + + return xml; +} + +/* ------------------------------------------------------------------------ */ +/* ------------------------------------------------------------------------ */ + +int Cluster::from_xml(const string& xml) +{ + int rc = 0; + vector content; + + // Initialize the internal XML object + update_from_str(xml); + + // Get class base attributes + rc += xpath(oid, "/CLUSTER/ID", -1); + rc += xpath(name,"/CLUSTER/NAME", "not_found"); + + // Set oneadmin as the owner + set_user(0,""); + + // Set the Cluster ID as the cluster it belongs to + set_group(oid, name); + + // ------------------------------------------------------------------------- + // Get associated hosts + // ------------------------------------------------------------------------- + ObjectXML::get_nodes("/CLUSTER/HOSTS", content); + + if (content.empty()) + { + return -1; + } + + // Set of IDs + rc += hosts.from_xml_node(content[0]); + + ObjectXML::free_nodes(content); + content.clear(); + + // ------------------------------------------------------------------------- + // Get associated datastores + // ------------------------------------------------------------------------- + ObjectXML::get_nodes("/CLUSTER/DATASTORES", content); + + if (content.empty()) + { + return -1; + } + + // Set of IDs + rc += datastores.from_xml_node(content[0]); + + ObjectXML::free_nodes(content); + content.clear(); + + // ------------------------------------------------------------------------- + // Get associated vnets + // ------------------------------------------------------------------------- + ObjectXML::get_nodes("/CLUSTER/VNETS", content); + + if (content.empty()) + { + return -1; + } + + // Set of IDs + rc += vnets.from_xml_node(content[0]); + + ObjectXML::free_nodes(content); + content.clear(); + + if (rc != 0) + { + return -1; + } + + return 0; +} + +/* ------------------------------------------------------------------------ */ +/* ------------------------------------------------------------------------ */ + diff --git a/src/cluster/ClusterPool.cc b/src/cluster/ClusterPool.cc new file mode 100644 index 0000000000..32d549b1dc --- /dev/null +++ b/src/cluster/ClusterPool.cc @@ -0,0 +1,133 @@ +/* -------------------------------------------------------------------------- */ +/* Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); you may */ +/* not use this file except in compliance with the License. You may obtain */ +/* a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ +/* See the License for the specific language governing permissions and */ +/* limitations under the License. */ +/* -------------------------------------------------------------------------- */ + +#include "ClusterPool.h" +#include "Nebula.h" +#include "NebulaLog.h" + +#include + +/* -------------------------------------------------------------------------- */ +/* There is a default cluster boostrapped by the core: */ +/* The first 100 cluster IDs are reserved for system clusters. */ +/* Regular ones start from ID 100 */ +/* -------------------------------------------------------------------------- */ + +const string ClusterPool::NONE_CLUSTER_NAME = ""; +const int ClusterPool::NONE_CLUSTER_ID = -1; + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + +ClusterPool::ClusterPool(SqlDB * db):PoolSQL(db, Cluster::table) +{ + ostringstream oss; + string error_str; + + if (get_lastOID() == -1) //lastOID is set in PoolSQL::init_cb + { + set_update_lastOID(99); + } + + return; +} + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + +int ClusterPool::allocate(string name, int * oid, string& error_str) +{ + Cluster * cluster; + ostringstream oss; + + if ( name.empty() ) + { + goto error_name; + } + + if ( name.length() > 128 ) + { + goto error_name_length; + } + + // Check for duplicates + cluster = get(name, false); + + if( cluster != 0 ) + { + goto error_duplicated; + } + + // Build a new Cluster object + cluster = new Cluster(-1, name); + + // Insert the Object in the pool + *oid = PoolSQL::allocate(cluster, error_str); + + return *oid; + +error_name: + oss << "NAME cannot be empty."; + goto error_common; + +error_name_length: + oss << "NAME is too long; max length is 128 chars."; + goto error_common; + +error_duplicated: + oss << "NAME is already taken by CLUSTER " << cluster->get_oid() << "."; + +error_common: + *oid = -1; + error_str = oss.str(); + + return *oid; +} + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + +int ClusterPool::drop(PoolObjectSQL * objsql, string& error_msg) +{ + Cluster * cluster = static_cast(objsql); + + int rc; + + // Return error if the cluster is a default one. + if( cluster->get_oid() < 100 ) + { + error_msg = "System Clusters (ID < 100) cannot be deleted."; + NebulaLog::log("CLUSTER", Log::ERROR, error_msg); + return -2; + } + + if ( cluster->check_drop(error_msg) < 0 ) + { + NebulaLog::log("CLUSTER", Log::ERROR, error_msg); + + return -3; + } + + rc = cluster->drop(db); + + if( rc != 0 ) + { + error_msg = "SQL DB error"; + rc = -1; + } + + return rc; +} diff --git a/src/tm_mad/shared/tm_shared.conf b/src/cluster/SConstruct similarity index 84% rename from src/tm_mad/shared/tm_shared.conf rename to src/cluster/SConstruct index fdd1ed0852..c088beebc3 100644 --- a/src/tm_mad/shared/tm_shared.conf +++ b/src/cluster/SConstruct @@ -1,3 +1,5 @@ +# SConstruct for src/group + # -------------------------------------------------------------------------- # # Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) # # # @@ -14,10 +16,15 @@ # limitations under the License. # #--------------------------------------------------------------------------- # -CLONE = shared/tm_clone.sh -LN = shared/tm_ln.sh -MKSWAP = shared/tm_mkswap.sh -MKIMAGE = shared/tm_mkimage.sh -DELETE = shared/tm_delete.sh -MV = shared/tm_mv.sh -CONTEXT = shared/tm_context.sh +Import('env') + +lib_name='nebula_cluster' + +# Sources to generate the library +source_files=[ + 'ClusterPool.cc', + 'Cluster.cc' +] + +# Build library +env.StaticLibrary(lib_name, source_files) diff --git a/src/common/Attribute.cc b/src/common/Attribute.cc index 1c70602f11..bce9e58eb8 100644 --- a/src/common/Attribute.cc +++ b/src/common/Attribute.cc @@ -201,3 +201,27 @@ string VectorAttribute::vector_value(const char *name) const } } +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + +int VectorAttribute::vector_value(const char *name, int & value) const +{ + map::const_iterator it; + + it = attribute_value.find(name); + + if ( it == attribute_value.end() ) + { + return -1; + } + + if ( it->second.empty() ) + { + return -1; + } + + istringstream iss(it->second); + iss >> value; + + return 0; +} diff --git a/src/datastore/Datastore.cc b/src/datastore/Datastore.cc new file mode 100644 index 0000000000..ea7b1d50e8 --- /dev/null +++ b/src/datastore/Datastore.cc @@ -0,0 +1,373 @@ +/* ------------------------------------------------------------------------ */ +/* Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); you may */ +/* not use this file except in compliance with the License. You may obtain */ +/* a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ +/* See the License for the specific language governing permissions and */ +/* limitations under the License. */ +/* ------------------------------------------------------------------------ */ + +#include "Datastore.h" +#include "GroupPool.h" +#include "NebulaLog.h" +#include "Nebula.h" + +const char * Datastore::table = "datastore_pool"; + +const char * Datastore::db_names = + "oid, name, body, uid, gid, owner_u, group_u, other_u"; + +const char * Datastore::db_bootstrap = + "CREATE TABLE IF NOT EXISTS datastore_pool (" + "oid INTEGER PRIMARY KEY, name VARCHAR(128), body TEXT, uid INTEGER, " + "gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, " + "UNIQUE(name))"; + +/* ************************************************************************ */ +/* Datastore :: Constructor/Destructor */ +/* ************************************************************************ */ + +Datastore::Datastore( + int uid, + int gid, + const string& uname, + const string& gname, + DatastoreTemplate* ds_template, + int cluster_id, + const string& cluster_name): + PoolObjectSQL(-1,DATASTORE,"",uid,gid,uname,gname,table), + ObjectCollection("IMAGES"), + Clusterable(cluster_id, cluster_name), + ds_mad(""), + tm_mad(""), + base_path("") +{ + group_u = 1; + + if (ds_template != 0) + { + obj_template = ds_template; + } + else + { + obj_template = new DatastoreTemplate; + } +} + +/* ------------------------------------------------------------------------ */ +/* ------------------------------------------------------------------------ */ + +int Datastore::disk_attribute(VectorAttribute * disk) +{ + ostringstream oss; + + oss << oid; + + disk->replace("DATASTORE", get_name()); + disk->replace("DATASTORE_ID", oss.str()); + disk->replace("TM_MAD", get_tm_mad()); + + if ( get_cluster_id() != ClusterPool::NONE_CLUSTER_ID ) + { + oss.str(""); + oss << get_cluster_id(); + + disk->replace("CLUSTER_ID", oss.str()); + } + + return 0; +} + +/* ************************************************************************ */ +/* Datastore :: Database Access Functions */ +/* ************************************************************************ */ + +/* ------------------------------------------------------------------------ */ +/* ------------------------------------------------------------------------ */ + +int Datastore::insert(SqlDB *db, string& error_str) +{ + int rc; + ostringstream oss; + + Nebula& nd = Nebula::instance(); + + // --------------------------------------------------------------------- + // Check default datastore attributes + // --------------------------------------------------------------------- + + erase_template_attribute("NAME", name); + // NAME is checked in DatastorePool::allocate + + get_template_attribute("DS_MAD", ds_mad); + + if ( ds_mad.empty() == true ) + { + goto error_ds; + } + + get_template_attribute("TM_MAD", tm_mad); + + if ( tm_mad.empty() == true ) + { + goto error_tm; + } + + oss << nd.get_ds_location() << oid; + + base_path = oss.str(); + + //-------------------------------------------------------------------------- + // Insert the Datastore + //-------------------------------------------------------------------------- + + rc = insert_replace(db, false, error_str); + + return rc; + +error_ds: + error_str = "No DS_MAD in template."; + goto error_common; + +error_tm: + error_str = "No TM_MAD in template."; + goto error_common; + +error_common: + NebulaLog::log("DATASTORE", Log::ERROR, error_str); + return -1; +} + +/* ------------------------------------------------------------------------ */ +/* ------------------------------------------------------------------------ */ + +int Datastore::insert_replace(SqlDB *db, bool replace, string& error_str) +{ + ostringstream oss; + + int rc; + string xml_body; + + char * sql_name; + char * sql_xml; + + // Update the Datastore + + sql_name = db->escape_str(name.c_str()); + + if ( sql_name == 0 ) + { + goto error_name; + } + + sql_xml = db->escape_str(to_xml(xml_body).c_str()); + + if ( sql_xml == 0 ) + { + goto error_body; + } + + if ( validate_xml(sql_xml) != 0 ) + { + goto error_xml; + } + + if ( replace ) + { + oss << "REPLACE"; + } + else + { + oss << "INSERT"; + } + + // Construct the SQL statement to Insert or Replace + + oss <<" INTO "<
exec(oss); + + db->free_str(sql_name); + db->free_str(sql_xml); + + return rc; + +error_xml: + db->free_str(sql_name); + db->free_str(sql_xml); + + error_str = "Error transforming the Datastore to XML."; + + goto error_common; + +error_body: + db->free_str(sql_name); + goto error_generic; + +error_name: + goto error_generic; + +error_generic: + error_str = "Error inserting Datastore in DB."; +error_common: + return -1; +} + +/* ------------------------------------------------------------------------ */ +/* ------------------------------------------------------------------------ */ + +string& Datastore::to_xml(string& xml) const +{ + ostringstream oss; + string collection_xml; + string template_xml; + string perms_xml; + + ObjectCollection::to_xml(collection_xml); + + oss << + "" << + "" << oid << "" << + "" << uid << "" << + "" << gid << "" << + "" << uname << "" << + "" << gname << "" << + "" << name << "" << + perms_to_xml(perms_xml) << + "" << ds_mad << "" << + "" << tm_mad << "" << + "" << base_path << "" << + "" << cluster_id << "" << + "" << cluster << "" << + collection_xml << + obj_template->to_xml(template_xml) << + ""; + + xml = oss.str(); + + return xml; +} + +/* ------------------------------------------------------------------------ */ +/* ------------------------------------------------------------------------ */ + +int Datastore::from_xml(const string& xml) +{ + int rc = 0; + vector content; + + // Initialize the internal XML object + update_from_str(xml); + + // Get class base attributes + rc += xpath(oid, "/DATASTORE/ID", -1); + rc += xpath(uid, "/DATASTORE/UID", -1); + rc += xpath(gid, "/DATASTORE/GID", -1); + rc += xpath(uname, "/DATASTORE/UNAME", "not_found"); + rc += xpath(gname, "/DATASTORE/GNAME", "not_found"); + rc += xpath(name, "/DATASTORE/NAME", "not_found"); + rc += xpath(ds_mad, "/DATASTORE/DS_MAD", "not_found"); + rc += xpath(tm_mad, "/DATASTORE/TM_MAD", "not_found"); + rc += xpath(base_path, "/DATASTORE/BASE_PATH", "not_found"); + + rc += xpath(cluster_id, "/DATASTORE/CLUSTER_ID", -1); + rc += xpath(cluster, "/DATASTORE/CLUSTER", "not_found"); + + // Permissions + rc += perms_from_xml(); + + // Get associated classes + ObjectXML::get_nodes("/DATASTORE/IMAGES", content); + + if (content.empty()) + { + return -1; + } + + // Set of IDs + rc += ObjectCollection::from_xml_node(content[0]); + + ObjectXML::free_nodes(content); + content.clear(); + + // Get associated classes + ObjectXML::get_nodes("/DATASTORE/TEMPLATE", content); + + if (content.empty()) + { + return -1; + } + + rc += obj_template->from_xml_node(content[0]); + + ObjectXML::free_nodes(content); + + if (rc != 0) + { + return -1; + } + + return 0; +} + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + +int Datastore::replace_template(const string& tmpl_str, string& error) +{ + string new_ds_mad; + string new_tm_mad; + + int rc; + + rc = PoolObjectSQL::replace_template(tmpl_str, error); + + if ( rc != 0 ) + { + return rc; + } + + get_template_attribute("DS_MAD", new_ds_mad); + + if ( !new_ds_mad.empty() ) + { + ds_mad = new_ds_mad; + } + else + { + replace_template_attribute("DS_MAD", ds_mad); + } + + get_template_attribute("TM_MAD", new_tm_mad); + + if ( !new_tm_mad.empty() ) + { + tm_mad = new_tm_mad; + } + else + { + replace_template_attribute("TM_MAD", tm_mad); + } + + return 0; +} + +/* ------------------------------------------------------------------------ */ +/* ------------------------------------------------------------------------ */ + diff --git a/src/datastore/DatastorePool.cc b/src/datastore/DatastorePool.cc new file mode 100644 index 0000000000..f98396ca38 --- /dev/null +++ b/src/datastore/DatastorePool.cc @@ -0,0 +1,245 @@ +/* -------------------------------------------------------------------------- */ +/* Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); you may */ +/* not use this file except in compliance with the License. You may obtain */ +/* a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ +/* See the License for the specific language governing permissions and */ +/* limitations under the License. */ +/* -------------------------------------------------------------------------- */ + +#include "DatastorePool.h" +#include "Nebula.h" +#include "NebulaLog.h" + +#include + +/* -------------------------------------------------------------------------- */ +/* There is a default datastore boostrapped by the core: */ +/* The first 100 IDs are reserved for system datastores. Regular ones start */ +/* from ID 100 */ +/* -------------------------------------------------------------------------- */ + +const string DatastorePool::SYSTEM_DS_NAME = "system"; +const int DatastorePool::SYSTEM_DS_ID = 0; + +const string DatastorePool::DEFAULT_DS_NAME = "default"; +const int DatastorePool::DEFAULT_DS_ID = 1; + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + +DatastorePool::DatastorePool(SqlDB * db): + PoolSQL(db, Datastore::table) +{ + ostringstream oss; + string error_str; + + if (get_lastOID() == -1) //lastOID is set in PoolSQL::init_cb + { + DatastoreTemplate * ds_tmpl; + Datastore * ds; + + int rc; + + // --------------------------------------------------------------------- + // Create the system datastore + // --------------------------------------------------------------------- + + oss << "NAME = " << SYSTEM_DS_NAME << endl + << "DS_MAD = -" << endl + << "TM_MAD = shared"; + + ds_tmpl = new DatastoreTemplate; + rc = ds_tmpl->parse_str_or_xml(oss.str(), error_str); + + if( rc < 0 ) + { + goto error_bootstrap; + } + + allocate(UserPool::ONEADMIN_ID, + GroupPool::ONEADMIN_ID, + UserPool::oneadmin_name, + GroupPool::ONEADMIN_NAME, + ds_tmpl, + &rc, + ClusterPool::NONE_CLUSTER_ID, + ClusterPool::NONE_CLUSTER_NAME, + error_str); + + if( rc < 0 ) + { + goto error_bootstrap; + } + + // --------------------------------------------------------------------- + // Create the default datastore + // --------------------------------------------------------------------- + oss.str(""); + + oss << "NAME = " << DEFAULT_DS_NAME << endl + << "DS_MAD = fs" << endl + << "TM_MAD = shared"; + + ds_tmpl = new DatastoreTemplate; + rc = ds_tmpl->parse_str_or_xml(oss.str(), error_str); + + if( rc < 0 ) + { + goto error_bootstrap; + } + + allocate(UserPool::ONEADMIN_ID, + GroupPool::ONEADMIN_ID, + UserPool::oneadmin_name, + GroupPool::ONEADMIN_NAME, + ds_tmpl, + &rc, + ClusterPool::NONE_CLUSTER_ID, + ClusterPool::NONE_CLUSTER_NAME, + error_str); + + if( rc < 0 ) + { + goto error_bootstrap; + } + + ds = get(rc, true); + + ds->set_permissions( + -1,-1,-1, + -1,-1,-1, + 1,-1,-1, + error_str); + + update(ds); + + ds->unlock(); + + // User created datastores will start from ID 100 + set_update_lastOID(99); + } + + return; + +error_bootstrap: + oss.str(""); + oss << "Error trying to create default datastore: " << error_str; + NebulaLog::log("DATASTORE",Log::ERROR,oss); + + throw runtime_error(oss.str()); +} + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + +int DatastorePool::allocate( + int uid, + int gid, + const string& uname, + const string& gname, + DatastoreTemplate * ds_template, + int * oid, + int cluster_id, + const string& cluster_name, + string& error_str) +{ + Datastore * ds; + Datastore * ds_aux = 0; + string name; + ostringstream oss; + + ds = new Datastore(uid, gid, uname, gname, + ds_template, cluster_id, cluster_name); + + // ------------------------------------------------------------------------- + // Check name & duplicates + // ------------------------------------------------------------------------- + + ds->get_template_attribute("NAME", name); + + if ( name.empty() ) + { + goto error_name; + } + + if ( name.length() > 128 ) + { + goto error_name_length; + } + + ds_aux = get(name,false); + + if( ds_aux != 0 ) + { + goto error_duplicated; + } + + *oid = PoolSQL::allocate(ds, error_str); + + return *oid; + +error_name: + oss << "NAME cannot be empty."; + goto error_common; + +error_name_length: + oss << "NAME is too long; max length is 128 chars."; + goto error_common; + +error_duplicated: + oss << "NAME is already taken by DATASTORE " << ds_aux->get_oid() << "."; + +error_common: + delete ds; + + *oid = -1; + error_str = oss.str(); + + return *oid; +} + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + +int DatastorePool::drop(PoolObjectSQL * objsql, string& error_msg) +{ + Datastore * datastore = static_cast(objsql); + + int rc; + + // Return error if the datastore is a default one. + if( datastore->get_oid() < 100 ) + { + error_msg = "System Datastores (ID < 100) cannot be deleted."; + NebulaLog::log("DATASTORE", Log::ERROR, error_msg); + return -2; + } + + if( datastore->get_collection_size() > 0 ) + { + ostringstream oss; + oss << "Datastore " << datastore->get_oid() << " is not empty."; + error_msg = oss.str(); + NebulaLog::log("DATASTORE", Log::ERROR, error_msg); + + return -3; + } + + rc = datastore->drop(db); + + if( rc != 0 ) + { + error_msg = "SQL DB error"; + rc = -1; + } + + return rc; +} diff --git a/src/tm_mad/lvm/tm_lvm.conf b/src/datastore/SConstruct old mode 100755 new mode 100644 similarity index 83% rename from src/tm_mad/lvm/tm_lvm.conf rename to src/datastore/SConstruct index 7662ae32b1..36514ca0b9 --- a/src/tm_mad/lvm/tm_lvm.conf +++ b/src/datastore/SConstruct @@ -1,3 +1,5 @@ +# SConstruct for src/datastore + # -------------------------------------------------------------------------- # # Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) # # # @@ -14,10 +16,15 @@ # limitations under the License. # #--------------------------------------------------------------------------- # -CLONE = lvm/tm_clone.sh -LN = lvm/tm_ln.sh -MKSWAP = lvm/tm_mkswap.sh -MKIMAGE = lvm/tm_mkimage.sh -DELETE = lvm/tm_delete.sh -MV = lvm/tm_mv.sh -CONTEXT = lvm/tm_context.sh +Import('env') + +lib_name='nebula_datastore' + +# Sources to generate the library +source_files=[ + 'DatastorePool.cc', + 'Datastore.cc' +] + +# Build library +env.StaticLibrary(lib_name, source_files) diff --git a/src/image_mad/one_image b/src/datastore_mad/one_datastore similarity index 90% rename from src/image_mad/one_image rename to src/datastore_mad/one_datastore index 9f62176cf8..ba5653805b 100755 --- a/src/image_mad/one_image +++ b/src/datastore_mad/one_datastore @@ -20,21 +20,15 @@ DRIVER_NAME=`basename $0 | cut -d. -f1` if [ -z "${ONE_LOCATION}" ]; then - DRIVERRC=/etc/one/${DRIVER_NAME}/${DRIVER_NAME}rc MADCOMMON=/usr/lib/one/mads/madcommon.sh VAR_LOCATION=/var/lib/one else - DRIVERRC=$ONE_LOCATION/etc/${DRIVER_NAME}/${DRIVER_NAME}rc MADCOMMON=$ONE_LOCATION/lib/mads/madcommon.sh VAR_LOCATION=$ONE_LOCATION/var fi . $MADCOMMON -# Export the im_mad specific rc - -export_rc_vars $DRIVERRC - # Go to var directory ONE_LOCATION/var or /var/lib/one cd $VAR_LOCATION diff --git a/src/datastore_mad/one_datastore.rb b/src/datastore_mad/one_datastore.rb new file mode 100755 index 0000000000..2ed7afc99b --- /dev/null +++ b/src/datastore_mad/one_datastore.rb @@ -0,0 +1,177 @@ +#!/usr/bin/env ruby + +# -------------------------------------------------------------------------- */ +# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) # +# Licensed under the Apache License, Version 2.0 (the "License"); you may */ +# not use this file except in compliance with the License. You may obtain */ +# a copy of the License at */ +# */ +# http://www.apache.org/licenses/LICENSE-2.0 */ +# */ +# Unless required by applicable law or agreed to in writing, software */ +# distributed under the License is distributed on an "AS IS" BASIS, */ +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ +# See the License for the specific language governing permissions and */ +# limitations under the License. */ +# -------------------------------------------------------------------------- */ + +# ---------------------------------------------------------------------------- +# Set up the environment for the driver +# ---------------------------------------------------------------------------- + +ONE_LOCATION = ENV["ONE_LOCATION"] + +if !ONE_LOCATION + RUBY_LIB_LOCATION = "/usr/lib/one/ruby" + VAR_LOCATION = "/var/lib/one" +else + RUBY_LIB_LOCATION = ONE_LOCATION + "/lib/ruby" + VAR_LOCATION = ONE_LOCATION + "/var" +end + +$: << RUBY_LIB_LOCATION + +require "OpenNebulaDriver" +require 'getoptlong' +require 'base64' +require 'rexml/document' + +# This class provides basic messaging and logging functionality +# to implement Datastore Drivers. A datastore driver +# is a program (or a set of) that specialize the OpenNebula behavior +# by interfacing with specific infrastructure storage solutions. +class DatastoreDriver < OpenNebulaDriver + + # Image Driver Protocol constants + ACTION = { + :mv => "MV", + :cp => "CP", + :rm => "RM", + :mkfs => "MKFS", + :log => "LOG" + } + + # Register default actions for the protocol + def initialize(ds_type, options={}) + @options={ + :concurrency => 10, + :threaded => true, + :retries => 0, + :local_actions => { + ACTION[:mv] => nil, + ACTION[:cp] => nil, + ACTION[:rm] => nil, + ACTION[:mkfs] => nil + } + }.merge!(options) + + super("datastore/", @options) + + if ds_type == nil + @types = Dir["#{@local_scripts_path}/*/"].map do |d| + d.split('/')[-1] + end + elsif ds_type.class == String + @types = [ds_type] + else + @types = ds_type + end + +# register_action(ACTION[:mv].to_sym, method("mv")) + register_action(ACTION[:cp].to_sym, method("cp")) + register_action(ACTION[:rm].to_sym, method("rm")) + register_action(ACTION[:mkfs].to_sym, method("mkfs")) + end + + ############################################################################ + # Image Manager Protocol Actions (generic implementation) + ############################################################################ +# TODO: Integrate this with TM +# def mv(id, ds, src, dst) +# do_image_action(id, ds, :mv, "'#{src}' '#{dst}' '#{id}'") +# end + + def cp(id, drv_message) + ds = get_ds_type(drv_message) + do_image_action(id, ds, :cp, "#{drv_message} #{id}") + end + + def rm(id, drv_message) + ds = get_ds_type(drv_message) + do_image_action(id, ds, :rm, "#{drv_message} #{id}") + end + + def mkfs(id, drv_message) + ds = get_ds_type(drv_message) + do_image_action(id, ds, :mkfs, "#{drv_message} #{id}") + end + + private + + def is_available?(ds, id, action) + if @types.include?(ds) + return true + else + send_message(ACTION[action], RESULT[:failure], id, + "Datastore driver '#{ds}' not available") + return false + end + end + + def do_image_action(id, ds, action, arguments) + return if not is_available?(ds,id,action) + + path = File.join(@local_scripts_path, ds) + cmd = File.join(path, ACTION[action].downcase) + + cmd << " " << arguments + + rc = LocalCommand.run(cmd, log_method(id)) + + result, info = get_info_from_execution(rc) + + + PP.pp([ACTION[action], result, id, info],STDERR) + send_message(ACTION[action], result, id, info) + end + + def get_ds_type(drv_message) + message = Base64.decode64(drv_message) + xml_doc = REXML::Document.new(message) + + dsxml = xml_doc.root.elements['/DS_DRIVER_ACTION_DATA/DATASTORE/DS_MAD'] + dstxt = dsxml.text if dsxml + + return dstxt + end +end + +################################################################################ +################################################################################ +# DatastoreDriver Main program +################################################################################ +################################################################################ + +opts = GetoptLong.new( + [ '--threads', '-t', GetoptLong::OPTIONAL_ARGUMENT ], + [ '--ds-types', '-d', GetoptLong::OPTIONAL_ARGUMENT ] +) + +ds_type = nil +threads = 15 + +begin + opts.each do |opt, arg| + case opt + when '--threads' + threads = arg.to_i + when '--ds-types' + ds_type = arg.split(',').map {|a| a.strip } + end + end +rescue Exception => e + exit(-1) +end + +ds_driver = DatastoreDriver.new(ds_type, :concurrency => threads) +ds_driver.start_driver diff --git a/src/tm_mad/dummy/tm_dummyrc b/src/datastore_mad/remotes/dummy/cp old mode 100644 new mode 100755 similarity index 97% rename from src/tm_mad/dummy/tm_dummyrc rename to src/datastore_mad/remotes/dummy/cp index 1e12168116..d915146f42 --- a/src/tm_mad/dummy/tm_dummyrc +++ b/src/datastore_mad/remotes/dummy/cp @@ -1,3 +1,5 @@ +#!/bin/sh + # -------------------------------------------------------------------------- # # Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) # # # @@ -13,3 +15,5 @@ # See the License for the specific language governing permissions and # # limitations under the License. # #--------------------------------------------------------------------------- # + +echo "dummy_path 1024" diff --git a/src/tm_mad/ssh/tm_sshrc b/src/datastore_mad/remotes/dummy/mkfs old mode 100644 new mode 100755 similarity index 97% rename from src/tm_mad/ssh/tm_sshrc rename to src/datastore_mad/remotes/dummy/mkfs index 1e12168116..d915146f42 --- a/src/tm_mad/ssh/tm_sshrc +++ b/src/datastore_mad/remotes/dummy/mkfs @@ -1,3 +1,5 @@ +#!/bin/sh + # -------------------------------------------------------------------------- # # Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) # # # @@ -13,3 +15,5 @@ # See the License for the specific language governing permissions and # # limitations under the License. # #--------------------------------------------------------------------------- # + +echo "dummy_path 1024" diff --git a/src/tm_mad/dummy/tm_dummy.sh b/src/datastore_mad/remotes/dummy/rm similarity index 100% rename from src/tm_mad/dummy/tm_dummy.sh rename to src/datastore_mad/remotes/dummy/rm diff --git a/src/datastore_mad/remotes/fs/cp b/src/datastore_mad/remotes/fs/cp new file mode 100755 index 0000000000..81a6b07679 --- /dev/null +++ b/src/datastore_mad/remotes/fs/cp @@ -0,0 +1,91 @@ +#!/bin/bash + +# -------------------------------------------------------------------------- # +# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +############################################################################### +# This script is used to copy a VM image (SRC) to the image repository as DST +# Several SRC types are supported +############################################################################### + +# -------- Set up the environment to source common tools & conf ------------ + +if [ -z "${ONE_LOCATION}" ]; then + LIB_LOCATION=/usr/lib/one +else + LIB_LOCATION=$ONE_LOCATION/lib +fi + +. $LIB_LOCATION/sh/scripts_common.sh + +DRIVER_PATH=$(dirname $0) +source ${DRIVER_PATH}/../libfs.sh + +# -------- Get cp and datastore arguments from OpenNebula core ------------ + +DRV_ACTION=$1 +ID=$2 + +XPATH="${DRIVER_PATH}/../xpath.rb -b $DRV_ACTION" + +unset i XPATH_ELEMENTS + +while IFS= read -r -d '' element; do + XPATH_ELEMENTS[i++]="$element" +done < <($XPATH /DS_DRIVER_ACTION_DATA/DATASTORE/BASE_PATH \ + /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/RESTRICTED_DIRS \ + /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/SAFE_DIRS \ + /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/UMASK \ + /DS_DRIVER_ACTION_DATA/IMAGE/PATH) + +BASE_PATH="${XPATH_ELEMENTS[0]}" +RESTRICTED_DIRS="${XPATH_ELEMENTS[1]}" +SAFE_DIRS="${XPATH_ELEMENTS[2]}" +UMASK="${XPATH_ELEMENTS[3]}" +SRC="${XPATH_ELEMENTS[4]}" + +mkdir -p "$BASE_PATH" +set_up_datastore "$BASE_PATH" "$RESTRICTED_DIRS" "$SAFE_DIRS" "$UMASK" + +DST=`generate_image_path` + +# ------------ Copy the image to the repository ------------- + +case $SRC in +http://*) + log "Downloading $SRC to the image repository" + + exec_and_log "$WGET -O $DST $SRC" "Error downloading $SRC" + ;; + +*) + if [ `check_restricted $SRC` -eq 1 ]; then + log_error "Not allowed to copy images from $RESTRICTED_DIRS" + error_message "Not allowed to copy image file $SRC" + exit -1 + fi + + log "Copying local image $SRC to the image repository" + + exec_and_log "cp -f $SRC $DST" "Error copying $SRC to $DST" + ;; +esac + +# ---------------- Get the size of the image ------------ + +SIZE=`fs_du $DST` + +echo "$DST $SIZE" diff --git a/src/datastore_mad/remotes/fs/mkfs b/src/datastore_mad/remotes/fs/mkfs new file mode 100755 index 0000000000..d6a3d8d484 --- /dev/null +++ b/src/datastore_mad/remotes/fs/mkfs @@ -0,0 +1,84 @@ +#!/bin/bash + +# -------------------------------------------------------------------------- # +# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +############################################################################### +# This script is used to create a VM image (SRC) of size (SIZE) and formatted +# as (FS) +############################################################################### + +# -------- Set up the environment to source common tools & conf ------------ + +if [ -z "${ONE_LOCATION}" ]; then + LIB_LOCATION=/usr/lib/one +else + LIB_LOCATION=$ONE_LOCATION/lib +fi + +. $LIB_LOCATION/sh/scripts_common.sh + +DRIVER_PATH=$(dirname $0) +source ${DRIVER_PATH}/../libfs.sh + +# -------- Get mkfs and datastore arguments from OpenNebula core ------------ + +DRV_ACTION=$1 +ID=$2 + +XPATH="${DRIVER_PATH}/../xpath.rb -b $DRV_ACTION" + +unset i XPATH_ELEMENTS + +while IFS= read -r -d '' element; do + XPATH_ELEMENTS[i++]="$element" +done < <($XPATH /DS_DRIVER_ACTION_DATA/DATASTORE/BASE_PATH \ + /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/RESTRICTED_DIRS \ + /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/SAFE_DIRS \ + /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/UMASK \ + /DS_DRIVER_ACTION_DATA/IMAGE/FSTYPE \ + /DS_DRIVER_ACTION_DATA/IMAGE/SIZE) + + +BASE_PATH="${XPATH_ELEMENTS[0]}" +RESTRICTED_DIRS="${XPATH_ELEMENTS[1]}" +SAFE_DIRS="${XPATH_ELEMENTS[2]}" +UMASK="${XPATH_ELEMENTS[3]}" +FSTYPE="${XPATH_ELEMENTS[4]}" +SIZE="${XPATH_ELEMENTS[5]}" + +mkdir -p "$BASE_PATH" +set_up_datastore "$BASE_PATH" "$RESTRICTED_DIRS" "$SAFE_DIRS" "$UMASK" + +DST=`generate_image_path` + +# ------------ Image to save_as disk, no need to create a FS ------------ + +if [ "$FSTYPE" = "save_as" ]; then + echo "$DST $SIZE" + exit 0 +fi + +# ------------ Create the image to the repository ------------ + +MKFS_CMD=`mkfs_command $DST $FSTYPE` + +exec_and_log "$DD if=/dev/zero of=$DST bs=1 count=1 seek=${SIZE}M" \ + "Could not create image $DST" +exec_and_log "$MKFS_CMD" \ + "Unable to create filesystem $FSTYPE in $DST" + +echo "$DST $SIZE" diff --git a/src/image_mad/remotes/fs/rm b/src/datastore_mad/remotes/fs/rm similarity index 83% rename from src/image_mad/remotes/fs/rm rename to src/datastore_mad/remotes/fs/rm index 4517caeecf..2284c21289 100755 --- a/src/image_mad/remotes/fs/rm +++ b/src/datastore_mad/remotes/fs/rm @@ -17,10 +17,10 @@ #--------------------------------------------------------------------------- # ############################################################################### -# This script is used to remove a VM image (SRC) from the image repository +# This script is used to remove a VM image (SRC) from the image repository ############################################################################### -# ------------ Set up the environment to source common tools ------------ +# ------------ Set up the environment to source common tools ------------ if [ -z "${ONE_LOCATION}" ]; then LIB_LOCATION=/usr/lib/one @@ -29,11 +29,19 @@ else fi . $LIB_LOCATION/sh/scripts_common.sh -source $(dirname $0)/fsrc -# ------------ Remove the image to the repository ------------ +DRIVER_PATH=$(dirname $0) +source ${DRIVER_PATH}/../libfs.sh -SRC=$1 +# -------- Get rm and datastore arguments from OpenNebula core ------------ + +DRV_ACTION=$1 +ID=$2 + +XPATH="${DRIVER_PATH}/../xpath.rb -b $DRV_ACTION" +SRC=`$XPATH /DS_DRIVER_ACTION_DATA/IMAGE/SOURCE` + +# ------------ Remove the image from the repository ------------ if [ -e $SRC ] ; then log "Removing $SRC from the image repository" diff --git a/src/datastore_mad/remotes/iscsi/cp b/src/datastore_mad/remotes/iscsi/cp new file mode 100755 index 0000000000..e7de096d11 --- /dev/null +++ b/src/datastore_mad/remotes/iscsi/cp @@ -0,0 +1,113 @@ +#!/bin/bash + +# -------------------------------------------------------------------------- # +# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +############################################################################### +# This script is used to copy a VM image (SRC) to the image repository as DST +# Several SRC types are supported +############################################################################### + +# -------- Set up the environment to source common tools & conf ------------ + +if [ -z "${ONE_LOCATION}" ]; then + LIB_LOCATION=/usr/lib/one +else + LIB_LOCATION=$ONE_LOCATION/lib +fi + +. $LIB_LOCATION/sh/scripts_common.sh + +DRIVER_PATH=$(dirname $0) +source ${DRIVER_PATH}/../libfs.sh +source ${DRIVER_PATH}/iscsi.conf + +# -------- Get cp and datastore arguments from OpenNebula core ------------ + +DRV_ACTION=$1 +ID=$2 + +XPATH="${DRIVER_PATH}/../xpath.rb -b $DRV_ACTION" + +unset i XPATH_ELEMENTS + +while IFS= read -r -d '' element; do + XPATH_ELEMENTS[i++]="$element" +done < <($XPATH /DS_DRIVER_ACTION_DATA/DATASTORE/BASE_PATH \ + /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/RESTRICTED_DIRS \ + /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/SAFE_DIRS \ + /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/UMASK \ + /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/HOST \ + /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/VG_NAME \ + /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/BASE_IQN \ + /DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/BASE_TID \ + /DS_DRIVER_ACTION_DATA/IMAGE/PATH) + + +BASE_PATH="${XPATH_ELEMENTS[0]}" +RESTRICTED_DIRS="${XPATH_ELEMENTS[1]}" +SAFE_DIRS="${XPATH_ELEMENTS[2]}" +UMASK="${XPATH_ELEMENTS[3]}" +DST_HOST="${XPATH_ELEMENTS[4]}" +VG_NAME="${XPATH_ELEMENTS[5]:-$VG_NAME}" +BASE_IQN="${XPATH_ELEMENTS[6]:-$BASE_IQN}" +BASE_TID="${XPATH_ELEMENTS[7]:-$BASE_TID}" +SRC="${XPATH_ELEMENTS[8]}" + +set_up_datastore "$BASE_PATH" "$RESTRICTED_DIRS" "$SAFE_DIRS" "$UMASK" + +SIZE=`fs_du $SRC` +LV_NAME="lv-one-${ID}" +IQN="$BASE_IQN:$DST_HOST.$VG_NAME.$LV_NAME" +DEV="/dev/$VG_NAME/$LV_NAME" + +let TID=ID+BASE_TID + +REGISTER_CMD=$(cat < e + exit(-1) +end + +values = "" + +tmp = Base64::decode64(tmp64) +xml = REXML::Document.new(tmp).root + +ARGV.each do |xpath| + element = xml.elements[xpath] + values << element.text.to_s if !element.nil? + values << "\0" +end + +puts values + +exit 0 diff --git a/src/dm/DispatchManagerActions.cc b/src/dm/DispatchManagerActions.cc index 7294dad902..4907ed7194 100644 --- a/src/dm/DispatchManagerActions.cc +++ b/src/dm/DispatchManagerActions.cc @@ -616,16 +616,15 @@ int DispatchManager::finalize( case VirtualMachine::PENDING: case VirtualMachine::HOLD: case VirtualMachine::STOPPED: + vm->release_network_leases(); + vm->release_disk_images(); + vm->set_exit_time(time(0)); vm->set_state(VirtualMachine::LCM_INIT); vm->set_state(VirtualMachine::DONE); vmpool->update(vm); - vm->release_network_leases(); - - vm->release_disk_images(); - vm->log("DiM", Log::INFO, "New VM state is DONE."); break; @@ -665,7 +664,7 @@ int DispatchManager::resubmit(int vid) { case VirtualMachine::SUSPENDED: NebulaLog::log("DiM",Log::ERROR, - "Can not resubmit a suspended VM. Resume it first"); + "Cannot resubmit a suspended VM. Resume it first"); rc = -2; break; @@ -689,7 +688,7 @@ int DispatchManager::resubmit(int vid) break; case VirtualMachine::DONE: NebulaLog::log("DiM",Log::ERROR, - "Can not resubmit a VM already in DONE state"); + "Cannot resubmit a VM already in DONE state"); rc = -2; break; } diff --git a/src/group/GroupPool.cc b/src/group/GroupPool.cc index a476669b06..b12cda200f 100644 --- a/src/group/GroupPool.cc +++ b/src/group/GroupPool.cc @@ -22,7 +22,7 @@ /* -------------------------------------------------------------------------- */ /* There are two default groups boostrapped by the core: */ -/* - oneadmin can not be removed */ +/* - oneadmin cannot be removed */ /* - users to place regular users by default */ /* The first 100 group IDs are reserved for system groups. Regular ones start */ /* from ID 100 */ diff --git a/src/group/test/GroupPoolTest.cc b/src/group/test/GroupPoolTest.cc index a96a1c7d56..68bf2da767 100644 --- a/src/group/test/GroupPoolTest.cc +++ b/src/group/test/GroupPoolTest.cc @@ -231,7 +231,7 @@ public: { Group *group_oid, *group_name; int oid_0; - int uid_0; + //int uid_0; string name_0; oid_0 = allocate(0); @@ -244,7 +244,7 @@ public: CPPUNIT_ASSERT(group_oid != 0); name_0 = group_oid->get_name(); - uid_0 = group_oid->get_uid(); + //uid_0 = group_oid->get_uid(); group_oid->unlock(); diff --git a/src/group/test/SConstruct b/src/group/test/SConstruct index a953bac660..2439c9babc 100644 --- a/src/group/test/SConstruct +++ b/src/group/test/SConstruct @@ -23,6 +23,7 @@ env.Prepend(LIBS=[ 'nebula_im', 'nebula_hm', 'nebula_rm', + 'nebula_datastore', 'nebula_dm', 'nebula_tm', 'nebula_um', @@ -33,6 +34,7 @@ env.Prepend(LIBS=[ 'nebula_template', 'nebula_image', 'nebula_pool', + 'nebula_cluster', 'nebula_host', 'nebula_vnm', 'nebula_vm', diff --git a/src/hm/HookManager.cc b/src/hm/HookManager.cc index 8555d1c4ad..f49a21ea92 100644 --- a/src/hm/HookManager.cc +++ b/src/hm/HookManager.cc @@ -49,12 +49,15 @@ void HookManager::load_mads(int uid) { HookManagerDriver * hm_mad; ostringstream oss; - const VectorAttribute * vattr; + const VectorAttribute * vattr = 0; int rc; NebulaLog::log("HKM",Log::INFO,"Loading Hook Manager driver."); - vattr = static_cast(mad_conf[0]); + if ( mad_conf.size() > 0 ) + { + vattr = static_cast(mad_conf[0]); + } if ( vattr == 0 ) { diff --git a/src/host/Host.cc b/src/host/Host.cc index 0fdf0530f0..14afbb4ca1 100644 --- a/src/host/Host.cc +++ b/src/host/Host.cc @@ -34,13 +34,14 @@ Host::Host( const string& _im_mad_name, const string& _vmm_mad_name, const string& _vnm_mad_name, - const string& _tm_mad_name): + int _cluster_id, + const string& _cluster_name): PoolObjectSQL(id,HOST,_hostname,-1,-1,"","",table), + Clusterable(_cluster_id, _cluster_name), state(INIT), im_mad_name(_im_mad_name), vmm_mad_name(_vmm_mad_name), vnm_mad_name(_vnm_mad_name), - tm_mad_name(_tm_mad_name), last_monitored(0) { obj_template = new HostTemplate; @@ -205,8 +206,9 @@ string& Host::to_xml(string& xml) const "" << im_mad_name << "" << "" << vmm_mad_name << "" << "" << vnm_mad_name << "" << - "" << tm_mad_name << "" << "" << last_monitored << "" << + "" << cluster_id << "" << + "" << cluster << "" << host_share.to_xml(share_xml) << obj_template->to_xml(template_xml) << ""; @@ -237,10 +239,12 @@ int Host::from_xml(const string& xml) rc += xpath(im_mad_name, "/HOST/IM_MAD", "not_found"); rc += xpath(vmm_mad_name, "/HOST/VM_MAD", "not_found"); rc += xpath(vnm_mad_name, "/HOST/VN_MAD", "not_found"); - rc += xpath(tm_mad_name, "/HOST/TM_MAD", "not_found"); rc += xpath(last_monitored, "/HOST/LAST_MON_TIME", 0); + rc += xpath(cluster_id, "/HOST/CLUSTER_ID", -1); + rc += xpath(cluster, "/HOST/CLUSTER", "not_found"); + state = static_cast( int_state ); // Set the owner and group to oneadmin diff --git a/src/host/HostPool.cc b/src/host/HostPool.cc index 75ba5c5109..3ead792f4c 100644 --- a/src/host/HostPool.cc +++ b/src/host/HostPool.cc @@ -35,7 +35,7 @@ HostPool::HostPool(SqlDB* db, const string& remotes_location) : PoolSQL(db,Host::table) { - // ------------------ Initialize Hooks fot the pool ---------------------- + // ------------------ Initialize Hooks for the pool ---------------------- const VectorAttribute * vattr; @@ -153,7 +153,8 @@ int HostPool::allocate ( const string& im_mad_name, const string& vmm_mad_name, const string& vnm_mad_name, - const string& tm_mad_name, + int cluster_id, + const string& cluster_name, string& error_str) { Host * host; @@ -184,11 +185,6 @@ int HostPool::allocate ( goto error_vnm; } - if ( tm_mad_name.empty() ) - { - goto error_tm; - } - host = get(hostname,false); if ( host !=0) @@ -198,8 +194,14 @@ int HostPool::allocate ( // Build a new Host object - host = new Host(-1, hostname, im_mad_name, vmm_mad_name, vnm_mad_name, - tm_mad_name); + host = new Host( + -1, + hostname, + im_mad_name, + vmm_mad_name, + vnm_mad_name, + cluster_id, + cluster_name); // Insert the Object in the pool @@ -207,7 +209,6 @@ int HostPool::allocate ( return *oid; - error_name: oss << "NAME cannot be empty."; goto error_common; @@ -228,10 +229,6 @@ error_vnm: oss << "VNM_MAD_NAME cannot be empty."; goto error_common; -error_tm: - oss << "TM_MAD_NAME cannot be empty."; - goto error_common; - error_duplicated: oss << "NAME is already taken by HOST " << host->get_oid() << "."; diff --git a/src/host/test/HostHookTest.cc b/src/host/test/HostHookTest.cc index e65877c27c..f3d8932073 100644 --- a/src/host/test/HostHookTest.cc +++ b/src/host/test/HostHookTest.cc @@ -96,7 +96,15 @@ public: { string err; - hpool->allocate(&oid, "host_test", "im_mad", "vmm_mad", "vnm_mad", "tm_mad", err); + hpool->allocate(&oid, + "host_test", + "im_mad", + "vmm_mad", + "vnm_mad", + ClusterPool::NONE_CLUSTER_ID, + ClusterPool::NONE_CLUSTER_NAME, + err); + CPPUNIT_ASSERT( oid >= 0 ); sleep(1); @@ -114,7 +122,15 @@ public: { string err; - hpool->allocate(&oid, "host_test", "im_mad", "vmm_mad", "vnm_mad", "tm_mad", err); + + hpool->allocate(&oid, + "host_test", + "im_mad", + "vmm_mad", + "vnm_mad", + ClusterPool::NONE_CLUSTER_ID, + ClusterPool::NONE_CLUSTER_NAME, + err); CPPUNIT_ASSERT( oid >= 0 ); host = hpool->get(oid, true); @@ -140,7 +156,14 @@ public: { string err; - hpool->allocate(&oid, "host_test", "im_mad", "vmm_mad", "vnm_mad", "tm_mad", err); + hpool->allocate(&oid, + "host_test", + "im_mad", + "vmm_mad", + "vnm_mad", + ClusterPool::NONE_CLUSTER_ID, + ClusterPool::NONE_CLUSTER_NAME, + err); CPPUNIT_ASSERT( oid >= 0 ); host = hpool->get(oid, true); @@ -166,7 +189,15 @@ public: { string err; - hpool->allocate(&oid, "host_test", "im_mad", "vmm_mad", "vnm_mad", "tm_mad", err); + hpool->allocate(&oid, + "host_test", + "im_mad", + "vmm_mad", + "vnm_mad", + ClusterPool::NONE_CLUSTER_ID, + ClusterPool::NONE_CLUSTER_NAME, + err); + CPPUNIT_ASSERT( oid >= 0 ); host = hpool->get(oid, true); diff --git a/src/host/test/HostPoolTest.cc b/src/host/test/HostPoolTest.cc index dbc4a0b419..9f02041dfc 100644 --- a/src/host/test/HostPoolTest.cc +++ b/src/host/test/HostPoolTest.cc @@ -26,14 +26,13 @@ using namespace std; const string im_mad = "im_mad"; const string vmm_mad = "vmm_mad"; const string vnm_mad = "vnm_mad"; -const string tm_mad = "tm_mad"; const string names[] = {"Host one", "Second host"}; const string xmls[] = { "0Host one0" - "im_madvmm_madvnm_madtm_mad" + "im_madvmm_madvnm_mad" "0" "000" "000" @@ -42,7 +41,7 @@ const string xmls[] = "0", "1Second host0" - "im_madvmm_madvnm_madtm_mad" + "im_madvmm_madvnm_mad" "0" "000" "000" @@ -54,35 +53,35 @@ const string xmls[] = // This xml dump result has the LAST_MON_TIMEs modified to 0000000000 const string xml_dump = "0a0im_madvmm_madvnm_madtm_mad0" + "M_MAD>vmm_madvnm_mad0" "0000000000000" "1a name0im_madvmm_madvnm_madtm_mad0vmm_madvnm_mad000000" "000000002a_name0im_madvmm_madvnm_madtm_mad0vnm_mad000000000000003another " "name0im_madvmm_mad" - "vnm_madtm_mad0vnm_mad000000000000004host0im_madvmm_madvnm_madtm_mad" - "0" + "ATE>0im_madvmm_madvnm_mad" + "0" "000" "0000" "000vmm_madvnm_madtm_mad0" + "M_MAD>vmm_madvnm_mad0" "0000000000000" "1a name0im_madvmm_madvnm_madtm_mad0vmm_madvnm_mad000000" "000000002a_name0im_madvmm_madvnm_madtm_mad0vnm_mad000000000000003another " "name0im_madvmm_mad" - "vnm_madtm_mad0vnm_mad00000000000Host one0im_madvmm_madvnm_madtm_mad00000000000000"; + "0Host one0im_madvmm_madvnm_mad00000000000000"; const string host_0_cluster = - "0Host one0im_madvmm_madvnm_madtm_mad0cluster_a0000000000000"; + "0Host one0im_madvmm_madvnm_mad0cluster_a0000000000000"; /* ************************************************************************* */ /* ************************************************************************* */ @@ -164,7 +163,9 @@ protected: int oid; string err; return ((HostPool*)pool)->allocate(&oid, names[index], im_mad, - vmm_mad, vnm_mad, tm_mad, err); + vmm_mad, vnm_mad, + ClusterPool::NONE_CLUSTER_ID, + ClusterPool::NONE_CLUSTER_NAME,err); }; void check(int index, PoolObjectSQL* obj) @@ -239,28 +240,49 @@ public: Host * host; string err; - string tm_mad_2 = "another_tm_mad"; + string im_mad_2 = "another_im_mad"; // If we try to allocate two hosts with the same name and drivers, // should fail - rc = hp->allocate(&oid_0, names[0], im_mad, vmm_mad, vnm_mad, tm_mad, err); + rc = hp->allocate(&oid_0, + names[0], + im_mad, + vmm_mad, + vnm_mad, + ClusterPool::NONE_CLUSTER_ID, + ClusterPool::NONE_CLUSTER_NAME, + err); CPPUNIT_ASSERT( oid_0 == 0 ); CPPUNIT_ASSERT( rc == oid_0 ); - rc = hp->allocate(&oid_1, names[0], im_mad, vmm_mad, vnm_mad, tm_mad, err); + rc = hp->allocate(&oid_1, + names[0], + im_mad, + vmm_mad, + vnm_mad, + ClusterPool::NONE_CLUSTER_ID, + ClusterPool::NONE_CLUSTER_NAME, + err); CPPUNIT_ASSERT( oid_1 == -1 ); CPPUNIT_ASSERT( rc == oid_1 ); - // the hostname can not be repeated if the drivers change - rc = hp->allocate(&oid_1, names[0], im_mad, vmm_mad, vnm_mad, tm_mad_2, err); + // the hostname cannot be repeated if the drivers change + rc = hp->allocate(&oid_1, + names[0], + im_mad_2, + vmm_mad, + vnm_mad, + ClusterPool::NONE_CLUSTER_ID, + ClusterPool::NONE_CLUSTER_NAME, + err); CPPUNIT_ASSERT( oid_1 == -1 ); CPPUNIT_ASSERT( rc == oid_1 ); // Get the hosts and check them host = hp->get(oid_0, false); CPPUNIT_ASSERT( host != 0 ); - CPPUNIT_ASSERT( host->get_tm_mad() == tm_mad ); + CPPUNIT_ASSERT( host->get_im_mad() == im_mad ); } /* ********************************************************************* */ @@ -273,8 +295,14 @@ public: for(int i=0; i<5; i++) { - ((HostPool*)pool)->allocate(&oid, names[i], - im_mad, vmm_mad, vnm_mad, tm_mad, err); + ((HostPool*)pool)->allocate(&oid, + names[i], + im_mad, + vmm_mad, + vnm_mad, + ClusterPool::NONE_CLUSTER_ID, + ClusterPool::NONE_CLUSTER_NAME, + err); } ostringstream oss; @@ -285,13 +313,13 @@ public: string result = oss.str(); // A little help for debugging -//* +/* if( result != xml_dump ) { cout << endl << result << endl << "========" << endl << xml_dump << endl; } -//*/ +*/ CPPUNIT_ASSERT( result == xml_dump ); } @@ -306,26 +334,30 @@ public: for(int i=0; i<5; i++) { - ((HostPool*)pool)->allocate(&oid, names[i], - im_mad, vmm_mad, vnm_mad, tm_mad, err); + ((HostPool*)pool)->allocate(&oid, + names[i], + im_mad, + vmm_mad, + vnm_mad, + ClusterPool::NONE_CLUSTER_ID, + ClusterPool::NONE_CLUSTER_NAME, + err); } - ostringstream oss; rc = ((HostPool*)pool)->dump(oss, "name LIKE 'a%'"); CPPUNIT_ASSERT(rc == 0); - string result = oss.str(); // A little help for debugging -//* +/* if( result != xml_dump_like_a ) { cout << endl << result << endl << "========" << endl << xml_dump_like_a << endl; } -//*/ +*/ CPPUNIT_ASSERT( result == xml_dump_like_a ); } @@ -348,7 +380,14 @@ public: { oss << "host" << i; - hp->allocate(&oid, oss.str().c_str(), im_mad, vmm_mad, vnm_mad, tm_mad, err); + hp->allocate(&oid, + oss.str(), + im_mad, + vmm_mad, + vnm_mad, + ClusterPool::NONE_CLUSTER_ID, + ClusterPool::NONE_CLUSTER_NAME, + err); CPPUNIT_ASSERT(oid == i); if (i >=8 ) @@ -406,7 +445,14 @@ public: { oss << "host" << j; - hp->allocate(&oid, oss.str().c_str(),im_mad,vmm_mad,vnm_mad,tm_mad,err); + hp->allocate(&oid, + oss.str(), + im_mad, + vmm_mad, + vnm_mad, + ClusterPool::NONE_CLUSTER_ID, + ClusterPool::NONE_CLUSTER_NAME, + err); } the_time2 = time(0) - the_time; @@ -435,7 +481,14 @@ public: for (i=10000,oss.str(""); i<30000 ; i++,oss.str("")) { oss << "host" << i; - hp->allocate(&oid,oss.str().c_str(),im_mad,vmm_mad,vnm_mad,tm_mad,err); + hp->allocate(&oid, + oss.str(), + im_mad, + vmm_mad, + vnm_mad, + ClusterPool::NONE_CLUSTER_ID, + ClusterPool::NONE_CLUSTER_NAME, + err); host = hp->get(oid, false); @@ -501,7 +554,7 @@ public: HostPool * hp = static_cast(pool); Host *host_oid, *host_name; int oid_0; - int uid_0; + //int uid_0; string name_0; oid_0 = allocate(0); @@ -514,7 +567,7 @@ public: CPPUNIT_ASSERT(host_oid != 0); name_0 = host_oid->get_name(); - uid_0 = host_oid->get_uid(); + //uid_0 = host_oid->get_uid(); host_oid->unlock(); diff --git a/src/host/test/SConstruct b/src/host/test/SConstruct index 86fda21c95..c1359e3518 100644 --- a/src/host/test/SConstruct +++ b/src/host/test/SConstruct @@ -19,6 +19,7 @@ Import('env') env.Prepend(LIBS=[ + 'nebula_cluster', 'nebula_host', 'nebula_core_test', 'nebula_vmm', @@ -26,6 +27,7 @@ env.Prepend(LIBS=[ 'nebula_im', 'nebula_hm', 'nebula_rm', + 'nebula_datastore', 'nebula_dm', 'nebula_tm', 'nebula_um', diff --git a/src/image/Image.cc b/src/image/Image.cc index 181300ce56..e05c4b8ffd 100644 --- a/src/image/Image.cc +++ b/src/image/Image.cc @@ -47,7 +47,9 @@ Image::Image(int _uid, fs_type(""), size_mb(0), state(INIT), - running_vms(0) + running_vms(0), + ds_id(-1), + ds_name("") { if (_image_template != 0) { @@ -93,24 +95,10 @@ int Image::insert(SqlDB *db, string& error_str) string persistent_attr; string dev_prefix; string source_attr; - string aname; + string saved_id; ostringstream oss; - // ------------------------------------------------------------------------ - // Check template for restricted attributes - // ------------------------------------------------------------------------ - - if ( uid != 0 && gid != GroupPool::ONEADMIN_ID ) - { - ImageTemplate *img_template = static_cast(obj_template); - - if (img_template->check(aname)) - { - goto error_restricted; - } - } - // --------------------------------------------------------------------- // Check default image attributes // --------------------------------------------------------------------- @@ -149,7 +137,6 @@ int Image::insert(SqlDB *db, string& error_str) { SingleAttribute * dev_att = new SingleAttribute("DEV_PREFIX", ImagePool::default_dev_prefix()); - obj_template->set(dev_att); } @@ -158,20 +145,43 @@ int Image::insert(SqlDB *db, string& error_str) erase_template_attribute("PATH", path); erase_template_attribute("SOURCE", source); - // The template should contain PATH or SOURCE - if ( source.empty() && path.empty() ) + if (!isSaving()) //Not a saving image + { + if ( source.empty() && path.empty() ) + { + string size_attr; + istringstream iss; + + erase_template_attribute("SIZE", size_attr); + erase_template_attribute("FSTYPE", fs_type); + + // DATABLOCK image needs SIZE and FSTYPE + if (type != DATABLOCK || size_attr.empty() || fs_type.empty()) + { + goto error_no_path; + } + + iss.str(size_attr); + + iss >> size_mb; + + if (iss.fail() == true) + { + goto error_size_format; + } + } + else if ( !source.empty() && !path.empty() ) + { + goto error_path_and_source; + } + } + else { string size_attr; istringstream iss; + fs_type = "save_as"; erase_template_attribute("SIZE", size_attr); - erase_template_attribute("FSTYPE", fs_type); - - // DATABLOCK image needs SIZE and FSTYPE - if (type != DATABLOCK || size_attr.empty() || fs_type.empty()) - { - goto error_no_path; - } iss.str(size_attr); @@ -182,10 +192,6 @@ int Image::insert(SqlDB *db, string& error_str) goto error_size_format; } } - else if ( !source.empty() && !path.empty() ) - { - goto error_path_and_source; - } state = LOCKED; //LOCKED till the ImageManager copies it to the Repository @@ -221,11 +227,6 @@ error_path_and_source: error_str = "Template malformed, PATH and SOURCE are mutually exclusive."; goto error_common; -error_restricted: - oss << "Template includes a restricted attribute " << aname << "."; - error_str = oss.str(); - goto error_common; - error_common: NebulaLog::log("IMG", Log::ERROR, error_str); return -1; @@ -352,6 +353,8 @@ string& Image::to_xml(string& xml) const "" << size_mb << "" << "" << state << "" << "" << running_vms << "" << + "" << ds_id << ""<< + "" << ds_name << "" << obj_template->to_xml(template_xml) << ""; @@ -393,6 +396,9 @@ int Image::from_xml(const string& xml) rc += xpath(int_state, "/IMAGE/STATE", 0); rc += xpath(running_vms, "/IMAGE/RUNNING_VMS", -1); + rc += xpath(ds_id, "/IMAGE/DATASTORE_ID", -1); + rc += xpath(ds_name,"/IMAGE/DATASTORE", "not_found"); + // Permissions rc += perms_from_xml(); diff --git a/src/image/ImageManager.cc b/src/image/ImageManager.cc index e0fc6789ff..52c3ab9df8 100644 --- a/src/image/ImageManager.cc +++ b/src/image/ImageManager.cc @@ -50,12 +50,15 @@ void ImageManager::load_mads(int uid) { ImageManagerDriver * imagem_mad; ostringstream oss; - const VectorAttribute * vattr; + const VectorAttribute * vattr = 0; int rc; NebulaLog::log("ImM",Log::INFO,"Loading Image Manager driver."); - vattr = static_cast(mad_conf[0]); + if ( mad_conf.size() > 0 ) + { + vattr = static_cast(mad_conf[0]); + } if ( vattr == 0 ) { diff --git a/src/image/ImageManagerActions.cc b/src/image/ImageManagerActions.cc index 99e3c61dfb..ad1b8e2627 100644 --- a/src/image/ImageManagerActions.cc +++ b/src/image/ImageManagerActions.cc @@ -17,11 +17,12 @@ #include "ImageManager.h" #include "NebulaLog.h" #include "ImagePool.h" +#include "SSLTools.h" /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ -Image * ImageManager::acquire_image(int image_id) +Image * ImageManager::acquire_image(int image_id, string& error) { Image * img; int rc; @@ -30,10 +31,14 @@ Image * ImageManager::acquire_image(int image_id) if ( img == 0 ) { + ostringstream oss; + oss << "Image with ID: " << image_id << " does not exists"; + + error = oss.str(); return 0; } - rc = acquire_image(img); + rc = acquire_image(img, error); if ( rc != 0 ) { @@ -46,7 +51,7 @@ Image * ImageManager::acquire_image(int image_id) /* -------------------------------------------------------------------------- */ -Image * ImageManager::acquire_image(const string& name, int uid) +Image * ImageManager::acquire_image(const string& name, int uid, string& error) { Image * img; int rc; @@ -55,10 +60,14 @@ Image * ImageManager::acquire_image(const string& name, int uid) if ( img == 0 ) { + ostringstream oss; + oss << "Image " << name << " does not exists for user " << uid; + + error = oss.str(); return 0; } - rc = acquire_image(img); + rc = acquire_image(img, error); if ( rc != 0 ) { @@ -71,7 +80,7 @@ Image * ImageManager::acquire_image(const string& name, int uid) /* -------------------------------------------------------------------------- */ -int ImageManager::acquire_image(Image *img) +int ImageManager::acquire_image(Image *img, string& error) { int rc = 0; @@ -86,7 +95,8 @@ int ImageManager::acquire_image(Image *img) case Image::USED: if (img->isPersistent()) { - rc = -1; + error = "Cannot acquire persistent image, it is already in use"; + rc = -1; } else { @@ -96,10 +106,19 @@ int ImageManager::acquire_image(Image *img) break; case Image::DISABLED: + error = "Cannot acquire image, it is disabled"; + rc = -1; + break; case Image::LOCKED: + error = "Cannot acquire image, it is locked"; + rc = -1; + break; case Image::ERROR: + error = "Cannot acquire image, it is in an error state"; + rc = -1; + break; default: - rc = -1; + rc = -1; break; } @@ -109,84 +128,14 @@ int ImageManager::acquire_image(Image *img) /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ -void ImageManager::move_image(Image *img, const string& source) -{ - const ImageManagerDriver* imd = get(); - ostringstream oss; - - if ( imd == 0 ) - { - NebulaLog::log("ImM",Log::ERROR, - "Could not get driver to update repository"); - return; - } - - oss << "Moving disk " << source << " to repository image " - << img->get_oid(); - - imd->mv(img->get_oid(),source,img->get_source()); - - NebulaLog::log("ImM",Log::INFO,oss); -} - -/* -------------------------------------------------------------------------- */ -/* -------------------------------------------------------------------------- */ - -void ImageManager::disk_to_image(const string& disk_path, - int disk_num, - const string& save_id) -{ - int sid; - - istringstream iss; - Image * img; - - ostringstream disk_file; - - iss.str(save_id); - - iss >> sid; - - img = ipool->get(sid,true); - - if ( img == 0 ) - { - NebulaLog::log("ImM",Log::ERROR,"Could not get image to saveas disk."); - } - else - { - disk_file << disk_path << "/disk." << disk_num; - - move_image(img,disk_file.str()); - } - - img->unlock(); -} - -/* -------------------------------------------------------------------------- */ -/* -------------------------------------------------------------------------- */ - -void ImageManager::release_image(int iid, - const string& disk_path, - int disk_num, - const string& save_id) +void ImageManager::release_image(int iid, bool failed) { int rvms; - int sid = -1; - - istringstream iss; - Image * img; + Image * img; ostringstream disk_file; - if ( save_id.empty() == false ) - { - iss.str(save_id); - - iss >> sid; - } - img = ipool->get(iid,true); if ( img == 0 ) @@ -199,54 +148,52 @@ void ImageManager::release_image(int iid, case Image::USED: rvms = img->dec_running(); - if ( img->isPersistent() && !disk_path.empty() ) + if (img->isPersistent()) { - disk_file << disk_path << "/disk." << disk_num; - - img->set_state(Image::LOCKED); - - move_image(img,disk_file.str()); - - ipool->update(img); - - img->unlock(); + if (failed == true) + { + img->set_state(Image::ERROR); + } + else + { + img->set_state(Image::READY); + } } - else + else if ( rvms == 0 ) { - if ( rvms == 0) + img->set_state(Image::READY); + } + + ipool->update(img); + + img->unlock(); + break; + + case Image::LOCKED: //SAVE_AS images are LOCKED till released + if ( img->isSaving() ) + { + if (failed == true) + { + img->set_state(Image::ERROR); + } + else { img->set_state(Image::READY); } ipool->update(img); - - img->unlock(); - - if ( sid != -1 ) - { - img = ipool->get(sid,true); - - if ( img == 0 ) - { - NebulaLog::log("ImM",Log::ERROR, - "Could not get image to saveas disk."); - } - else - { - disk_file << disk_path << "/disk." << disk_num; - - move_image(img,disk_file.str()); - } - - img->unlock(); - } } - break; + else + { + NebulaLog::log("ImM",Log::ERROR, + "Trying to release image in wrong state."); + } + img->unlock(); + break; case Image::DISABLED: case Image::READY: case Image::ERROR: - case Image::LOCKED: NebulaLog::log("ImM",Log::ERROR, "Trying to release image in wrong state."); default: @@ -309,10 +256,12 @@ int ImageManager::enable_image(int iid, bool to_enable) /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ -int ImageManager::delete_image(int iid) +int ImageManager::delete_image(int iid, const string& ds_data) { - Image * img; - string source; + Image * img; + string source; + string img_tmpl; + string * drv_msg; img = ipool->get(iid,true); @@ -327,13 +276,13 @@ int ImageManager::delete_image(int iid) if ( img->get_running() != 0 ) { img->unlock(); - return -1; //Can not remove images in use + return -1; //Cannot remove images in use } break; case Image::USED: img->unlock(); - return -1; //Can not remove images in use + return -1; //Cannot remove images in use break; case Image::INIT: @@ -350,7 +299,8 @@ int ImageManager::delete_image(int iid) return -1; } - source = img->get_source(); + drv_msg = format_message(img->to_xml(img_tmpl), ds_data); + source = img->get_source(); if (source.empty()) { @@ -367,29 +317,34 @@ int ImageManager::delete_image(int iid) } else { - imd->rm(img->get_oid(),img->get_source()); + imd->rm(img->get_oid(), *drv_msg); } img->unlock(); + delete drv_msg; + return 0; } /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ -int ImageManager::register_image(int iid) +int ImageManager::register_image(int iid, const string& ds_data) { const ImageManagerDriver* imd = get(); - ostringstream oss; - string path; - Image* img; + ostringstream oss; + Image * img; + + string path; + string img_tmpl; + string * drv_msg; + if ( imd == 0 ) { - NebulaLog::log("ImM",Log::ERROR, - "Could not get driver to update repository"); + NebulaLog::log("ImM",Log::ERROR, "Could not get datastore driver"); return -1; } @@ -400,37 +355,33 @@ int ImageManager::register_image(int iid) return -1; } - path = img->get_path(); + drv_msg = format_message(img->to_xml(img_tmpl), ds_data); + path = img->get_path(); - if ( path.empty() == true ) //NO PATH -> USE SOURCE OR MKFS FOR DATABLOCK + if ( path.empty() == true ) //NO PATH { - if ( img->get_type() == Image::DATABLOCK) - { - string fs = img->get_fstype(); - int size = img->get_size(); + string source = img->get_source(); - imd->mkfs(img->get_oid(), fs, size); + if ( img->isSaving() || img->get_type() == Image::DATABLOCK ) + { + imd->mkfs(img->get_oid(), *drv_msg); - oss << "Creating disk at " << img->get_source() << " of " - << size << "Mb with format " << fs; + oss << "Creating disk at " << source + << " of "<< img->get_size() + << "Mb (type: " << img->get_fstype() << ")"; } - else + else if ( !source.empty() ) //Source in Template { - string source = img->get_source(); + img->set_state(Image::READY); + ipool->update(img); - if (source != "-") //SAVE_AS IMAGE DO NOT ENABLE THE IMAGE - { - img->set_state(Image::READY); - ipool->update(img); - - oss << "Using source " << img->get_source() - << " from template for image " << img->get_name(); - } + oss << "Using source " << source + << " from template for image " << img->get_name(); } } else //PATH -> COPY TO REPOSITORY AS SOURCE { - imd->cp(img->get_oid(), path); + imd->cp(img->get_oid(), *drv_msg); oss << "Copying " << path <<" to repository for image "<get_oid(); } @@ -438,9 +389,28 @@ int ImageManager::register_image(int iid) img->unlock(); + delete drv_msg; + return 0; } /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ +string * ImageManager::format_message( + const string& img_data, + const string& ds_data) +{ + ostringstream oss; + + oss << "" + << img_data + << ds_data + << ""; + + return SSLTools::base64_encode(oss.str()); +} + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + diff --git a/src/image/ImageManagerDriver.cc b/src/image/ImageManagerDriver.cc index 7c1862fb65..43eae93c5a 100644 --- a/src/image/ImageManagerDriver.cc +++ b/src/image/ImageManagerDriver.cc @@ -27,11 +27,11 @@ /* ************************************************************************** */ void ImageManagerDriver::cp(int oid, - const string& source) const + const string& drv_msg) const { ostringstream os; - os << "CP " << oid << " " << source << endl; + os << "CP " << oid << " " << drv_msg << endl; write(os); } @@ -52,22 +52,22 @@ void ImageManagerDriver::mv(int oid, /* -------------------------------------------------------------------------- */ void ImageManagerDriver::mkfs(int oid, - const string& fs, - int size_mb) const + const string& drv_msg) const { ostringstream os; - os << "MKFS " << oid << " " << fs << " " << size_mb << endl; + os << "MKFS " << oid << " " << drv_msg << endl; + write(os); } /* -------------------------------------------------------------------------- */ -void ImageManagerDriver::rm(int oid, const string& destination) const +void ImageManagerDriver::rm(int oid, const string& drv_msg) const { ostringstream os; - os << "RM " << oid << " " << destination << endl; + os << "RM " << oid << " " << drv_msg << endl; write(os); } @@ -179,40 +179,71 @@ void ImageManagerDriver::protocol( goto error_cp; } } - else if ( action == "MV" ) - { - if ( result == "SUCCESS" ) - { - if (image->get_source() == "-") - { - image->set_source(source); - } - - image->set_size(size_mb); - - image->set_state(Image::READY); - - ipool->update(image); - - NebulaLog::log("ImM", Log::INFO, "Image saved and ready to use."); - } - else - { - goto error_mv; - } - } else if ( action == "MKFS" ) { if ( result == "SUCCESS" ) { - image->set_source(source); - image->set_size(size_mb); + bool is_saving = image->isSaving(); - image->set_state(Image::READY); + string disk_id; + string vm_id; + int rc; + + image->set_source(source); + + if (is_saving) + { + image->get_template_attribute("SAVED_DISK_ID",disk_id); + image->get_template_attribute("SAVED_VM_ID", vm_id); + } + else + { + image->set_size(size_mb); + + image->set_state(Image::READY); + + NebulaLog::log("ImM", Log::INFO, + "Image created and ready to use"); + } ipool->update(image); - NebulaLog::log("ImM", Log::INFO, "Image created and ready to use"); + image->unlock(); + + if (is_saving) + { + Nebula& nd = Nebula::instance(); + + VirtualMachinePool * vmpool = nd.get_vmpool(); + + VirtualMachine * vm; + istringstream iss(vm_id); + + int vm_id_i; + + iss >> vm_id_i; + + vm = vmpool->get(vm_id_i, true); + + if ( vm == 0 ) + { + goto error_save_no_vm; + } + + rc = vm->save_disk(disk_id, source, id); + + if ( rc == -1 ) + { + vm->unlock(); + goto error_save_state_vm; + } + + vmpool->update(vm); + + vm->unlock(); + } + + return; } else { @@ -255,22 +286,17 @@ error_cp: os << "Error copying image in the repository"; goto error_common; -error_mv: - os.str(""); - os << "Error saving image to the repository"; - goto error_common; - error_mkfs: os.str(""); os << "Error creating datablock"; goto error_common; error_rm: - image->unlock(); - os.str(""); - os << "Error removing image from repository. Remove file " << source - << " to completely delete image."; + os << "Error removing image from repository. Remove file " << image->get_source() + << " to completely delete image."; + + image->unlock(); getline(is,info); @@ -280,9 +306,28 @@ error_rm: } NebulaLog::log("ImM", Log::ERROR, os); - return; +error_save_no_vm: + os.str(""); + os << "Image created for SAVE_AS, but the associated VM does not exist."; + + goto error_save_common; + +error_save_state_vm: + os.str(""); + os << "Image created for SAVE_AS, but VM is no longer running"; + + goto error_save_common; + +error_save_common: + image = ipool->get(id, true); + + if (image == 0 ) + { + return; + } + error_common: getline(is,info); diff --git a/src/image/ImagePool.cc b/src/image/ImagePool.cc index 7e023b3868..61c42671e3 100644 --- a/src/image/ImagePool.cc +++ b/src/image/ImagePool.cc @@ -65,6 +65,9 @@ int ImagePool::allocate ( const string& uname, const string& gname, ImageTemplate* img_template, + int ds_id, + const string& ds_name, + const string& ds_data, int * oid, string& error_str) { @@ -75,7 +78,9 @@ int ImagePool::allocate ( img = new Image(uid, gid, uname, gname, img_template); - // Check name + // ------------------------------------------------------------------------- + // Check name & duplicates + // ------------------------------------------------------------------------- img->get_template_attribute("NAME", name); if ( name.empty() ) @@ -88,7 +93,6 @@ int ImagePool::allocate ( goto error_name_length; } - // Check for duplicates img_aux = get(name,uid,false); if( img_aux != 0 ) @@ -96,6 +100,9 @@ int ImagePool::allocate ( goto error_duplicated; } + img->ds_name = ds_name; + img->ds_id = ds_id; + // --------------------------------------------------------------------- // Insert the Object in the pool & Register the image in the repository // --------------------------------------------------------------------- @@ -106,7 +113,7 @@ int ImagePool::allocate ( Nebula& nd = Nebula::instance(); ImageManager * imagem = nd.get_imagem(); - if ( imagem->register_image(*oid) == -1 ) + if ( imagem->register_image(*oid, ds_data) == -1 ) { error_str = "Failed to copy image to repository. " "Image left in ERROR state."; @@ -118,7 +125,6 @@ int ImagePool::allocate ( error_name: oss << "NAME cannot be empty."; - goto error_common; error_name_length: @@ -128,6 +134,7 @@ error_name_length: error_duplicated: oss << "NAME is already taken by IMAGE " << img_aux->get_oid() << "."; + goto error_common; error_common: delete img; @@ -209,16 +216,18 @@ int ImagePool::disk_attribute(VectorAttribute * disk, int * index, Image::ImageType * img_type, int uid, - int& image_id) + int& image_id, + string& error_str) { string source; Image * img = 0; int rc = 0; + int datastore_id; ostringstream oss; - Nebula& nd = Nebula::instance(); - ImageManager * imagem = nd.get_imagem(); + Nebula& nd = Nebula::instance(); + ImageManager * imagem = nd.get_imagem(); if (!(source = disk->vector_value("IMAGE")).empty()) { @@ -226,10 +235,11 @@ int ImagePool::disk_attribute(VectorAttribute * disk, if ( uiid == -1) { + error_str = "Cannot get user set in IMAGE_UID or IMAGE_UNAME."; return -1; } - img = imagem->acquire_image(source, uiid); + img = imagem->acquire_image(source, uiid, error_str); if ( img == 0 ) { @@ -242,10 +252,11 @@ int ImagePool::disk_attribute(VectorAttribute * disk, if ( iid == -1) { + error_str = "Wrong ID set in IMAGE_ID"; return -1; } - img = imagem->acquire_image(iid); + img = imagem->acquire_image(iid, error_str); if ( img == 0 ) { @@ -278,13 +289,29 @@ int ImagePool::disk_attribute(VectorAttribute * disk, if ( img != 0 ) { + DatastorePool * ds_pool = nd.get_dspool(); + Datastore * ds; + img->disk_attribute(disk, index, img_type); - image_id = img->get_oid(); - + image_id = img->get_oid(); + datastore_id = img->get_ds_id(); + update(img); img->unlock(); + + ds = ds_pool->get(datastore_id, true); + + if ( ds == 0 ) + { + error_str = "Associated datastore for the image does not exist"; + return -1; + } + + ds->disk_attribute(disk); + + ds->unlock(); } oss << disk_id; diff --git a/src/image/ImageTemplate.cc b/src/image/ImageTemplate.cc index 60c1fb623a..3ef5145020 100644 --- a/src/image/ImageTemplate.cc +++ b/src/image/ImageTemplate.cc @@ -21,5 +21,7 @@ vector ImageTemplate::restricted_attributes; +string ImageTemplate::saving_attribute = "SAVE_AS"; + /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ diff --git a/src/image/test/ImagePoolTest.cc b/src/image/test/ImagePoolTest.cc index 8427243e70..11f1d82003 100644 --- a/src/image/test/ImagePoolTest.cc +++ b/src/image/test/ImagePoolTest.cc @@ -51,20 +51,20 @@ const string templates[] = const string xmls[] = { -"001oneoneadminImage one110000000010000000000/tmp/image_test040", +"001oneoneadminImage one110000000010000000000/tmp/image_test0400none", -"111twooneadminSecond Image110000000000000000000/tmp/image_second_test040", +"111twooneadminSecond Image110000000000000000000/tmp/image_second_test0400none", -"021threeusersThe third image110000000000000000000/tmp/image_test040", +"021threeusersThe third image110000000000000000000/tmp/image_test0400none", }; // This xml dump result has the STIMEs modified to 0000000000 const string xml_dump = -"001oneoneadminImage one110000000010000000000/tmp/image_test040111twooneadminSecond Image110000000000000000000/tmp/image_second_test040221threeusersThe third image110000000000000000000/tmp/image_test040"; +"001oneoneadminImage one110000000010000000000/tmp/image_test0400none111twooneadminSecond Image110000000000000000000/tmp/image_second_test0400none221threeusersThe third image110000000000000000000/tmp/image_test0400none"; const string xml_dump_where = -"001oneoneadminImage one110000000010000000000/tmp/image_test040111twooneadminSecond Image110000000000000000000/tmp/image_second_test040"; +"001oneoneadminImage one110000000010000000000/tmp/image_test0400none111twooneadminSecond Image110000000000000000000/tmp/image_second_test0400none"; /* ************************************************************************* */ /* ************************************************************************* */ @@ -80,6 +80,7 @@ public: need_image_pool = true; need_imagem = true; + need_datastore_pool = true; } }; @@ -113,7 +114,8 @@ public: string uname = unames[uid]; string gname = gnames[uid]; - return ImagePool::allocate(uid, 1, uname, gname, img_template, oid, err); + return ImagePool::allocate(uid, 1, uname, gname, + img_template, 0,"none", "", oid, err); } else { @@ -595,7 +597,7 @@ public: VectorAttribute * disk; int oid_0, oid_1, index, img_id; - string value; + string value, error; Image::ImageType img_type; // --------------------------------------------------------------------- @@ -630,7 +632,7 @@ public: disk = new VectorAttribute("DISK"); disk->replace("IMAGE_ID", "0"); - ((ImagePool*)imp)->disk_attribute(disk, 0, &index, &img_type,0, img_id); + ((ImagePool*)imp)->disk_attribute(disk, 0, &index, &img_type,0, img_id,error); value = ""; value = disk->vector_value("TARGET"); @@ -644,7 +646,7 @@ public: disk = new VectorAttribute("DISK"); disk->replace("IMAGE_ID", "1"); - ((ImagePool*)imp)->disk_attribute(disk, 0, &index, &img_type,0, img_id); + ((ImagePool*)imp)->disk_attribute(disk, 0, &index, &img_type,0, img_id,error); value = ""; value = disk->vector_value("TARGET"); diff --git a/src/image/test/SConstruct b/src/image/test/SConstruct index 7c8883a3ee..b1f7b9ac8a 100644 --- a/src/image/test/SConstruct +++ b/src/image/test/SConstruct @@ -23,6 +23,7 @@ env.Prepend(LIBS=[ 'nebula_im', 'nebula_hm', 'nebula_rm', + 'nebula_datastore', 'nebula_dm', 'nebula_tm', 'nebula_um', @@ -33,6 +34,7 @@ env.Prepend(LIBS=[ 'nebula_template', 'nebula_image', 'nebula_pool', + 'nebula_cluster', 'nebula_host', 'nebula_vnm', 'nebula_vm', diff --git a/src/image_mad/one_image.rb b/src/image_mad/one_image.rb deleted file mode 100755 index 520b6e515c..0000000000 --- a/src/image_mad/one_image.rb +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/env ruby - -# -------------------------------------------------------------------------- */ -# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) # -# Licensed under the Apache License, Version 2.0 (the "License"); you may */ -# not use this file except in compliance with the License. You may obtain */ -# a copy of the License at */ -# */ -# http://www.apache.org/licenses/LICENSE-2.0 */ -# */ -# Unless required by applicable law or agreed to in writing, software */ -# distributed under the License is distributed on an "AS IS" BASIS, */ -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ -# See the License for the specific language governing permissions and */ -# limitations under the License. */ -# -------------------------------------------------------------------------- */ - -# ---------------------------------------------------------------------------- -# Set up the environment for the driver -# ---------------------------------------------------------------------------- - -ONE_LOCATION = ENV["ONE_LOCATION"] - -if !ONE_LOCATION - RUBY_LIB_LOCATION = "/usr/lib/one/ruby" - VAR_LOCATION = "/var/lib/one" -else - RUBY_LIB_LOCATION = ONE_LOCATION + "/lib/ruby" - VAR_LOCATION = ONE_LOCATION + "/var" -end - -$: << RUBY_LIB_LOCATION - -require "OpenNebulaDriver" -require 'getoptlong' - -# This class provides basic messaging and logging functionality -# to implement Image Repository Drivers. A image repository driver -# is a program (or a set of) that specialize the OpenNebula behavior -# by interfacing with specific infrastructure storage solutions. -class ImageDriver < OpenNebulaDriver - - # Image Driver Protocol constants - ACTION = { - :mv => "MV", - :cp => "CP", - :rm => "RM", - :mkfs => "MKFS", - :log => "LOG" - } - - # Register default actions for the protocol - def initialize(fs_type, options={}) - @options={ - :concurrency => 10, - :threaded => true, - :retries => 0, - :local_actions => { - 'MV' => nil, - 'CP' => nil, - 'RM' => nil, - 'MKFS' => nil - } - }.merge!(options) - - super("image/#{fs_type}", @options) - - register_action(ACTION[:mv].to_sym, method("mv")) - register_action(ACTION[:cp].to_sym, method("cp")) - register_action(ACTION[:rm].to_sym, method("rm")) - register_action(ACTION[:mkfs].to_sym, method("mkfs")) - end - - # Image Manager Protocol Actions (generic implementation - def mv(id, src, dst) - do_action("#{src} #{dst} #{id}", id, nil, - ACTION[:mv]) - end - - def cp(id, src) - do_action("#{src} #{id}", id, nil, ACTION[:cp]) - end - - def rm(id, dst) - do_action("#{dst} #{id}", id, nil, ACTION[:rm]) - end - - def mkfs(id, fs, size) - do_action("#{fs} #{size} #{id}", id, nil, - ACTION[:mkfs]) - end -end - - -# ImageDriver Main program - -opts = GetoptLong.new( - [ '--threads', '-t', GetoptLong::OPTIONAL_ARGUMENT ] -) - -fs_type = '' -threads = 15 - -begin - opts.each do |opt, arg| - case opt - when '--threads' - threads = arg.to_i - end - end -rescue Exception => e - exit(-1) -end - -if ARGV.length >= 1 - fs_type = ARGV.shift -else - exit(-1) -end - -image_driver = ImageDriver.new(fs_type, :concurrency => threads) -image_driver.start_driver diff --git a/src/image_mad/remotes/fs/fs.conf b/src/image_mad/remotes/fs/fs.conf deleted file mode 100644 index 6cb6cfdb8b..0000000000 --- a/src/image_mad/remotes/fs/fs.conf +++ /dev/null @@ -1,38 +0,0 @@ -# -------------------------------------------------------------------------- # -# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) # -# # -# Licensed under the Apache License, Version 2.0 (the "License"); you may # -# not use this file except in compliance with the License. You may obtain # -# a copy of the License at # -# # -# http://www.apache.org/licenses/LICENSE-2.0 # -# # -# Unless required by applicable law or agreed to in writing, software # -# distributed under the License is distributed on an "AS IS" BASIS, # -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # -# See the License for the specific language governing permissions and # -# limitations under the License. # -#--------------------------------------------------------------------------- # - -# PRESERVE BASH SYNTAX - -#******************************************************************************* -# Configuration File for File-System based Image Repositories -#------------------------------------------------------------------------------- -# IMAGE_REPOSITORY: Path where the images will be stored. If not defined -# defaults to /var/lib/one/images or $ONE_LOCATION/var/images -# -# RESTRICTED_DIRS: Paths that can not be used to register images. A space -# separated list of paths. This prevents users to access important files like -# oned.db or /etc/shadow. OpenNebula will automatically add its configuration -# dirs:/var/lib/one, /etc/one and oneadmin's home ($HOME). -# -# SAFE_DIRS: Paths that are safe to specify image paths. A space separated list -# of paths.This will allow you to open specific paths within RESTRICTED_DIRS -#******************************************************************************* - -#IMAGE_REPOSITORY_PATH=/var/lib/one/images - -RESTRICTED_DIRS="/etc/" - -SAFE_DIRS="$HOME/public/" diff --git a/src/image_mad/remotes/fs/fsrc b/src/image_mad/remotes/fs/fsrc deleted file mode 100644 index a55682115b..0000000000 --- a/src/image_mad/remotes/fs/fsrc +++ /dev/null @@ -1,100 +0,0 @@ -#!/bin/bash - -# -------------------------------------------------------------------------- # -# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) # -# # -# Licensed under the Apache License, Version 2.0 (the "License"); you may # -# not use this file except in compliance with the License. You may obtain # -# a copy of the License at # -# # -# http://www.apache.org/licenses/LICENSE-2.0 # -# # -# Unless required by applicable law or agreed to in writing, software # -# distributed under the License is distributed on an "AS IS" BASIS, # -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # -# See the License for the specific language governing permissions and # -# limitations under the License. # -#--------------------------------------------------------------------------- # - -#------------------------------------------------------------------------------ -# Configuration File for File-System based Image Repositories -# - IMAGE_REPOSITORY: Path where the images will be stored -# - RESTRICTED_DIRS: Paths that can not be used to register images -# - SAFE_DIRS: Paths that are safe to specify image paths -#------------------------------------------------------------------------------ -if [ -z "${ONE_LOCATION}" ]; then - VAR_LOCATION=/var/lib/one/ - ETC_LOCATION=/etc/one/ -else - VAR_LOCATION=$ONE_LOCATION/var/ - ETC_LOCATION=$ONE_LOCATION/etc/ -fi - -CONF_FILE=$ETC_LOCATION/image/fs.conf - -source $CONF_FILE - -if [ -z "${IMAGE_REPOSITORY_PATH}" ]; then - if [ -z "${ONE_LOCATION}" ]; then - IMAGE_REPOSITORY_PATH=/var/lib/one/images - else - IMAGE_REPOSITORY_PATH=$ONE_LOCATION/var/images - fi -fi - -RESTRICTED_DIRS="$VAR_LOCATION $ETC_LOCATION $HOME/ $RESTRICTED_DIRS" - -export IMAGE_REPOSITORY_PATH -export RESTRICTED_DIRS -export SAFE_DIRS - -#------------------------------------------------------------------------------ -# Function used to generate Image names, you should not need to override this -#------------------------------------------------------------------------------ -function generate_image_path { - -CANONICAL_STR="`$DATE +%s`:$ID" - -CANONICAL_MD5=$($MD5SUM - << EOF -$CANONICAL_STR -EOF -) - -echo "$IMAGE_REPOSITORY_PATH/`echo $CANONICAL_MD5 | cut -d ' ' -f1`" -} - -function fs_du { - if [ -d "$1" ]; then - SIZE=`du -sb "$1" | cut -f1` - error=$? - else - SIZE=`stat -c %s "$1"` - error=$? - fi - - if [ $error -ne 0 ]; then - SIZE=0 - else - SIZE=$(($SIZE/1048576)) - fi - - echo "$SIZE" -} - -function check_restricted { - for path in $SAFE_DIRS ; do - if [ -n "`readlink -f $1 | grep -E "^$path"`" ] ; then - echo 0 - return - fi - done - - for path in $RESTRICTED_DIRS ; do - if [ -n "`readlink -f $1 | grep -E "^$path"`" ] ; then - echo 1 - return - fi - done - - echo 0 -} diff --git a/src/lcm/test/LifeCycleManagerTest.cc b/src/lcm/test/LifeCycleManagerTest.cc index 50a43f2275..03fd372352 100644 --- a/src/lcm/test/LifeCycleManagerTest.cc +++ b/src/lcm/test/LifeCycleManagerTest.cc @@ -48,8 +48,6 @@ static int hid = 123; static string hostname = "test_hostname"; static string vmm_mad = "vmm_mad"; static string vnm_mad = "vnm_mad"; -static string tm_mad = "tm_mad"; -static string vmdir = "vmdir"; class LifeCycleManagerTest : public OneUnitTest { @@ -226,7 +224,7 @@ private: vm->lock(); - vm->add_history(hid,hostname,vmdir,vmm_mad,vnm_mad,tm_mad); + vm->add_history(hid,hostname,vmm_mad,vnm_mad); rc = vmpool->update_history(vm); CPPUNIT_ASSERT( rc == 0 ); @@ -490,7 +488,7 @@ public: { vm = allocate_running(0); - vm->add_history(hid,hostname,vmdir,vmm_mad,vnm_mad,tm_mad); + vm->add_history(hid,hostname,vmm_mad,vnm_mad); rc = vmpool->update_history(vm); CPPUNIT_ASSERT( rc == 0 ); @@ -574,7 +572,7 @@ public: { vm = allocate_running(0); - vm->add_history(hid,hostname,vmdir,vmm_mad,vnm_mad,tm_mad); + vm->add_history(hid,hostname,vmm_mad,vnm_mad); rc = vmpool->update_history(vm); CPPUNIT_ASSERT( rc == 0 ); @@ -595,7 +593,7 @@ public: { vm = allocate_running(0); - vm->add_history(hid,hostname,vmdir,vmm_mad,vnm_mad,tm_mad); + vm->add_history(hid,hostname,vmm_mad,vnm_mad); rc = vmpool->update_history(vm); CPPUNIT_ASSERT( rc == 0 ); @@ -616,7 +614,7 @@ public: { vm = allocate_running(0); - vm->add_history(hid,hostname,vmdir,vmm_mad,vnm_mad,tm_mad); + vm->add_history(hid,hostname,vmm_mad,vnm_mad); rc = vmpool->update_history(vm); CPPUNIT_ASSERT( rc == 0 ); @@ -638,7 +636,7 @@ public: { vm = allocate_running(0); - vm->add_history(hid,hostname,vmdir,vmm_mad,vnm_mad,tm_mad); + vm->add_history(hid,hostname,vmm_mad,vnm_mad); rc = vmpool->update_history(vm); CPPUNIT_ASSERT( rc == 0 ); @@ -662,7 +660,7 @@ public: { vm = allocate_running(0); - vm->add_history(hid,hostname,vmdir,vmm_mad,vnm_mad,tm_mad); + vm->add_history(hid,hostname,vmm_mad,vnm_mad); rc = vmpool->update_history(vm); CPPUNIT_ASSERT( rc == 0 ); @@ -686,7 +684,7 @@ public: { vm = allocate_running(0); - vm->add_history(hid,hostname,vmdir,vmm_mad,vnm_mad,tm_mad); + vm->add_history(hid,hostname,vmm_mad,vnm_mad); rc = vmpool->update_history(vm); CPPUNIT_ASSERT( rc == 0 ); @@ -750,7 +748,7 @@ public: { vm = allocate_running(0); - vm->add_history(hid,hostname,vmdir,vmm_mad,vnm_mad,tm_mad); + vm->add_history(hid,hostname,vmm_mad,vnm_mad); rc = vmpool->update_history(vm); CPPUNIT_ASSERT( rc == 0 ); @@ -773,7 +771,7 @@ public: { vm = allocate_running(0); - vm->add_history(hid,hostname,vmdir,vmm_mad,vnm_mad,tm_mad); + vm->add_history(hid,hostname,vmm_mad,vnm_mad); rc = vmpool->update_history(vm); CPPUNIT_ASSERT( rc == 0 ); @@ -796,7 +794,7 @@ public: { vm = allocate_running(0); - vm->add_history(hid,hostname,vmdir,vmm_mad,vnm_mad,tm_mad); + vm->add_history(hid,hostname,vmm_mad,vnm_mad); rc = vmpool->update_history(vm); CPPUNIT_ASSERT( rc == 0 ); diff --git a/src/lcm/test/SConstruct b/src/lcm/test/SConstruct index e222b0002f..bd5dc5bfd2 100644 --- a/src/lcm/test/SConstruct +++ b/src/lcm/test/SConstruct @@ -25,6 +25,7 @@ env.Prepend(LIBS=[ 'nebula_im', 'nebula_hm', 'nebula_rm', + 'nebula_datastore', 'nebula_dm', 'nebula_tm', 'nebula_um', @@ -35,6 +36,7 @@ env.Prepend(LIBS=[ 'nebula_template', 'nebula_image', 'nebula_pool', + 'nebula_cluster', 'nebula_host', 'nebula_vnm', 'nebula_vm', diff --git a/src/mad/Mad.cc b/src/mad/Mad.cc index 98388228ee..e33c04cfdb 100644 --- a/src/mad/Mad.cc +++ b/src/mad/Mad.cc @@ -239,13 +239,13 @@ int Mad::start() error_exec: oss.str(""); - oss << "Can not load driver " << executable << ", " << strerror(errno); + oss << "Cannot load driver " << executable << ", " << strerror(errno); NebulaLog::log("MAD", Log::ERROR, oss); exit(-1); error_dup2: oss.str(""); - oss << "Can not duplicate descriptors, " << strerror(errno); + oss << "Cannot duplicate descriptors, " << strerror(errno); NebulaLog::log("MAD", Log::ERROR, oss); exit(-1); @@ -275,7 +275,7 @@ error_attributes: error_pipes: oss.str(""); - oss << "Can not create driver pipes, " << strerror(errno); + oss << "Cannot create driver pipes, " << strerror(errno); NebulaLog::log("MAD", Log::ERROR, oss); return -1; } diff --git a/src/mad/sh/scripts_common.sh b/src/mad/sh/scripts_common.sh index ea3d484b21..addd2ba59c 100755 --- a/src/mad/sh/scripts_common.sh +++ b/src/mad/sh/scripts_common.sh @@ -22,23 +22,50 @@ CUT=cut DATE=date DD=dd DU=du +GREP=grep +ISCSIADM=iscsiadm LVCREATE=lvcreate LVREMOVE=lvremove LVS=lvs +LN=ln MD5SUM=md5sum MKFS=mkfs MKISOFS=mkisofs MKSWAP=mkswap +QEMU_IMG=qemu-img +READLINK=readlink SCP=scp SED=sed SSH=ssh SUDO=sudo +TAR=tar +TGTADM=tgtadm WGET=wget -READLINK=readlink + +if [ "x$(uname -s)" = "xLinux" ]; then + SED="$SED -r" +else + SED="/usr/bin/sed -E" +fi # Used for log messages SCRIPT_NAME=`basename $0` +# ------------------------------------------------------------------------------ +# Path manipulation functions +# ------------------------------------------------------------------------------ + +# Takes out unneeded slashes. Repeated and final directory slashes: +# /some//path///somewhere/ -> /some/path/somewhere +function fix_dir_slashes +{ + dirname "$1/file" | $SED 's/\/+/\//g' +} + +# ------------------------------------------------------------------------------ +# Log functions +# ------------------------------------------------------------------------------ + # Formats date for logs function log_date { @@ -92,23 +119,22 @@ function error_message function exec_and_log { message=$2 - output=`$1 2>&1 1>/dev/null` - code=$? - if [ "x$code" != "x0" ]; then - log_error "Command \"$1\" failed." - log_error "$output" - if [ -z "$message" ]; then - error_message "$output" + + EXEC_LOG_ERR=`$1 2>&1 1>/dev/null` + EXEC_LOG_RC=$? + + if [ $EXEC_LOG_RC -ne 0 ]; then + log_error "Command \"$1\" failed: $EXEC_LOG_ERR" + + if [ -n "$2" ]; then + error_message "$2" else - error_message "$message" + error_message "Error executing $1: $EXEC_LOG_ERR" fi - exit $code + exit $EXEC_LOG_RC fi - log "Executed \"$1\"." } - - # Like exec_and_log but the first argument is the number of seconds # before here is timeout and kills the command # @@ -168,6 +194,14 @@ function mkfs_command { "jfs") OPTS="-q" ;; + "raw") + echo "" + return 0 + ;; + "swap") + echo "$MKSWAP $DST" + return 0 + ;; *) OPTS="" ;; @@ -175,3 +209,126 @@ function mkfs_command { echo "$MKFS -t $FSTYPE $OPTS $DST" } + +#This function executes $2 at $1 host and report error $3 +function ssh_exec_and_log +{ + SSH_EXEC_ERR=`$SSH $1 sh -s 2>&1 1>/dev/null <&1 1>/dev/null < vm_restricted_attrs; vector img_restricted_attrs; + clpool = new ClusterPool(db); + nebula_configuration->get("VM_HOOK", vm_hooks); nebula_configuration->get("HOST_HOOK", host_hooks); @@ -311,6 +315,8 @@ void Nebula::start() img_restricted_attrs); tpool = new VMTemplatePool(db); + + dspool = new DatastorePool(db); } catch (exception&) { @@ -546,7 +552,7 @@ void Nebula::start() { vector image_mads; - nebula_configuration->get("IMAGE_MAD", image_mads); + nebula_configuration->get("DATASTORE_MAD", image_mads); imagem = new ImageManager(ipool,image_mads); } diff --git a/src/nebula/NebulaTemplate.cc b/src/nebula/NebulaTemplate.cc index 83ad962b83..db998d2a17 100644 --- a/src/nebula/NebulaTemplate.cc +++ b/src/nebula/NebulaTemplate.cc @@ -91,8 +91,7 @@ void OpenNebulaTemplate::set_conf_default() # HOST_MONITORING_INTERVAL # HOST_PER_INTERVAL # VM_POLLING_INTERVAL -# VM_PER_INTERVAL -# VM_DIR +# VM_PER_INTERVAL # PORT # DB # VNC_BASE_PORT @@ -123,10 +122,6 @@ void OpenNebulaTemplate::set_conf_default() attribute = new SingleAttribute("VM_PER_INTERVAL",value); conf_default.insert(make_pair(attribute->name(),attribute)); - //VM_DIR - attribute = new SingleAttribute("VM_DIR",var_location); - conf_default.insert(make_pair(attribute->name(),attribute)); - //XML-RPC Server PORT value = "2633"; @@ -180,12 +175,18 @@ void OpenNebulaTemplate::set_conf_default() /* #******************************************************************************* -# Image Repository Configuration +# Datastore Configuration #******************************************************************************* +# DATASTORE_LOCATION # DEFAULT_IMAGE_TYPE # DEFAULT_DEVICE_PREFIX #******************************************************************************* */ + //DATASTORE_LOCATION + attribute = new SingleAttribute("DATASTORE_LOCATION", + var_location + "/datastores"); + conf_default.insert(make_pair(attribute->name(),attribute)); + //DEFAULT_IMAGE_TYPE value = "OS"; @@ -198,7 +199,6 @@ void OpenNebulaTemplate::set_conf_default() attribute = new SingleAttribute("DEFAULT_DEVICE_PREFIX",value); conf_default.insert(make_pair(attribute->name(),attribute)); /* - #******************************************************************************* # Auth Manager Configuration #******************************************************************************* diff --git a/src/nebula/SConstruct b/src/nebula/SConstruct index 0f52378d2a..270373332c 100644 --- a/src/nebula/SConstruct +++ b/src/nebula/SConstruct @@ -41,6 +41,7 @@ env.Prepend(LIBS=[ 'nebula_dm', 'nebula_tm', 'nebula_um', + 'nebula_datastore', 'nebula_group', 'nebula_authm', 'nebula_acl', @@ -49,6 +50,7 @@ env.Prepend(LIBS=[ 'nebula_image', 'nebula_pool', 'nebula_host', + 'nebula_cluster', 'nebula_vnm', 'nebula_vm', 'nebula_vmtemplate', diff --git a/src/nebula/oned.cc b/src/nebula/oned.cc index 3076c9f4fe..cfe7968607 100644 --- a/src/nebula/oned.cc +++ b/src/nebula/oned.cc @@ -127,7 +127,7 @@ int main(int argc, char **argv) if( fd == -1) { - cerr<< "Error: Can not start oned, opening lock file " << lockfile + cerr<< "Error: Cannot start oned, opening lock file " << lockfile << endl; exit(-1); @@ -186,7 +186,7 @@ int main(int argc, char **argv) return 0; error_chdir: - cerr << "Error: can not change to dir " << wd << "\n"; + cerr << "Error: cannot change to dir " << wd << "\n"; unlink(lockfile.c_str()); exit(-1); diff --git a/src/oca/java/src/org/opennebula/client/host/Host.java b/src/oca/java/src/org/opennebula/client/host/Host.java index d264d79130..13fbe45117 100644 --- a/src/oca/java/src/org/opennebula/client/host/Host.java +++ b/src/oca/java/src/org/opennebula/client/host/Host.java @@ -75,7 +75,6 @@ public class Host extends PoolElement{ * @param vnm The name of the virtual network manager mad name * (vnm_mad_name), this values are taken from the oned.conf with the * tag name VN_MAD (name) - * @param tm The transfer manager mad name to be used with this host * @return If successful the message contains the associated * id generated for this host */ @@ -83,10 +82,9 @@ public class Host extends PoolElement{ String hostname, String im, String vmm, - String vnm, - String tm) + String vnm) { - return client.call(ALLOCATE, hostname, im, vmm, vnm, tm); + return client.call(ALLOCATE, hostname, im, vmm, vnm); } /** diff --git a/src/oca/java/test/HostTest.java b/src/oca/java/test/HostTest.java index 431b3ac11e..4126a259eb 100644 --- a/src/oca/java/test/HostTest.java +++ b/src/oca/java/test/HostTest.java @@ -63,8 +63,7 @@ public class HostTest @Before public void setUp() throws Exception { - res = Host.allocate(client, name, "im_dummy", "vmm_dummy", "vnm_dummy", - "tm_dummy"); + res = Host.allocate(client, name, "im_dummy", "vmm_dummy", "vnm_dummy"); int hid = !res.isError() ? Integer.parseInt(res.getMessage()) : -1; host = new Host(hid, client); @@ -84,8 +83,7 @@ public class HostTest { String name = "allocate_test"; - res = Host.allocate(client, name, "im_dummy", "vmm_dummy", "vmm_dummy", - "tm_dummy"); + res = Host.allocate(client, name, "im_dummy", "vmm_dummy", "vmm_dummy"); assertTrue( !res.isError() ); // assertTrue( res.getMessage().equals("0") ); diff --git a/src/oca/java/test/oned.conf b/src/oca/java/test/oned.conf index de3fdf51df..7e88254166 100644 --- a/src/oca/java/test/oned.conf +++ b/src/oca/java/test/oned.conf @@ -6,7 +6,7 @@ # Daemon configuration attributes #------------------------------------------------------------------------------- # MANAGER_TIMER: Time in seconds the core uses to evaluate periodical functions. -# HOST_MONITORING_INTERVAL and VM_POLLING_INTERVAL can not have smaller values +# HOST_MONITORING_INTERVAL and VM_POLLING_INTERVAL cannot have smaller values # than MANAGER_TIMER. # # HOST_MONITORING_INTERVAL: Time in seconds between host monitorization. diff --git a/src/oca/ruby/OpenNebula.rb b/src/oca/ruby/OpenNebula.rb index 6bb4a4916c..525a18d521 100644 --- a/src/oca/ruby/OpenNebula.rb +++ b/src/oca/ruby/OpenNebula.rb @@ -42,6 +42,10 @@ require 'OpenNebula/Group' require 'OpenNebula/GroupPool' require 'OpenNebula/Acl' require 'OpenNebula/AclPool' +require 'OpenNebula/Datastore' +require 'OpenNebula/DatastorePool' +require 'OpenNebula/Cluster' +require 'OpenNebula/ClusterPool' module OpenNebula diff --git a/src/oca/ruby/OpenNebula/Acl.rb b/src/oca/ruby/OpenNebula/Acl.rb index 054a8f6648..de423a1627 100644 --- a/src/oca/ruby/OpenNebula/Acl.rb +++ b/src/oca/ruby/OpenNebula/Acl.rb @@ -52,7 +52,9 @@ module OpenNebula "IMAGE" => 0x8000000000, "USER" => 0x10000000000, "TEMPLATE" => 0x20000000000, - "GROUP" => 0x40000000000 + "GROUP" => 0x40000000000, + "DATASTORE" => 0x100000000000, + "CLUSTER" => 0x200000000000 } RIGHTS = diff --git a/src/oca/ruby/OpenNebula/Cluster.rb b/src/oca/ruby/OpenNebula/Cluster.rb new file mode 100644 index 0000000000..cdec2f5703 --- /dev/null +++ b/src/oca/ruby/OpenNebula/Cluster.rb @@ -0,0 +1,236 @@ +# -------------------------------------------------------------------------- # +# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + + +require 'OpenNebula/Pool' + +module OpenNebula + class Cluster < PoolElement + ####################################################################### + # Constants and Class Methods + ####################################################################### + + CLUSTER_METHODS = { + :info => "cluster.info", + :allocate => "cluster.allocate", + :delete => "cluster.delete", + :addhost => "cluster.addhost", + :delhost => "cluster.delhost", + :adddatastore => "cluster.adddatastore", + :deldatastore => "cluster.deldatastore", + :addvnet => "cluster.addvnet", + :delvnet => "cluster.delvnet" + } + + # Creates a Cluster description with just its identifier + # this method should be used to create plain Cluster objects. + # +id+ the id of the host + # + # Example: + # cluster = Cluster.new(Cluster.build_xml(3),rpc_client) + # + def Cluster.build_xml(pe_id=nil) + if pe_id + cluster_xml = "#{pe_id}" + else + cluster_xml = "" + end + + XMLElement.build_xml(cluster_xml,'CLUSTER') + end + + # Class constructor + def initialize(xml, client) + super(xml,client) + end + + ####################################################################### + # XML-RPC Methods for the Cluster Object + ####################################################################### + + # Retrieves the information of the given Cluster. + def info() + super(CLUSTER_METHODS[:info], 'CLUSTER') + end + + # Allocates a new Cluster in OpenNebula + # + # +clustername+ A string containing the name of the Cluster. + def allocate(clustername) + super(CLUSTER_METHODS[:allocate], clustername) + end + + # Deletes the Cluster + def delete() + super(CLUSTER_METHODS[:delete]) + end + + # Adds a Host to this Cluster + # @param hid [Integer] Host ID + # @return [nil, OpenNebula::Error] nil in case of success, Error + # otherwise + def addhost(hid) + return Error.new('ID not defined') if !@pe_id + + rc = @client.call(CLUSTER_METHODS[:addhost], @pe_id, hid) + rc = nil if !OpenNebula.is_error?(rc) + + return rc + end + + # Deletes a Host from this Cluster + # @param hid [Integer] Host ID + # @return [nil, OpenNebula::Error] nil in case of success, Error + # otherwise + def delhost(hid) + return Error.new('ID not defined') if !@pe_id + + rc = @client.call(CLUSTER_METHODS[:delhost], @pe_id, hid) + rc = nil if !OpenNebula.is_error?(rc) + + return rc + end + + # Adds a Datastore to this Cluster + # @param ds_id [Integer] Datastore ID + # @return [nil, OpenNebula::Error] nil in case of success, Error + # otherwise + def adddatastore(ds_id) + return Error.new('ID not defined') if !@pe_id + + rc = @client.call(CLUSTER_METHODS[:adddatastore], @pe_id, ds_id) + rc = nil if !OpenNebula.is_error?(rc) + + return rc + end + + # Deletes a Datastore from this Cluster + # @param ds_id [Integer] Datastore ID + # @return [nil, OpenNebula::Error] nil in case of success, Error + # otherwise + def deldatastore(ds_id) + return Error.new('ID not defined') if !@pe_id + + rc = @client.call(CLUSTER_METHODS[:deldatastore], @pe_id, ds_id) + rc = nil if !OpenNebula.is_error?(rc) + + return rc + end + + # Adds a VNet to this Cluster + # @param vnet_id [Integer] VNet ID + # @return [nil, OpenNebula::Error] nil in case of success, Error + # otherwise + def addvnet(vnet_id) + return Error.new('ID not defined') if !@pe_id + + rc = @client.call(CLUSTER_METHODS[:addvnet], @pe_id, vnet_id) + rc = nil if !OpenNebula.is_error?(rc) + + return rc + end + + # Deletes a VNet from this Cluster + # @param vnet_id [Integer] VNet ID + # @return [nil, OpenNebula::Error] nil in case of success, Error + # otherwise + def delvnet(vnet_id) + return Error.new('ID not defined') if !@pe_id + + rc = @client.call(CLUSTER_METHODS[:delvnet], @pe_id, vnet_id) + rc = nil if !OpenNebula.is_error?(rc) + + return rc + end + + # --------------------------------------------------------------------- + # Helpers to get information + # --------------------------------------------------------------------- + + # Returns whether or not the host with 'id' is part of this cluster + # @param id [Integer|Array] host ID + # @return [Boolean] true if found + def contains_host?(id) + contains_resource?('HOSTS/ID', id) + end + + # Returns an array with the numeric host ids + # @return [Array] + def host_ids + array = Array.new + + self.each("HOSTS/ID") do |id| + array << id.text.to_i + end + + return array + end + + # Returns whether or not the datastore with 'id' is part of this cluster + # @param id [Integer|Array] datastore ID + # @return [Boolean] true if found + def contains_datastore?(id) + contains_resource?('DATASTORES/ID', id) + end + + # Returns an array with the numeric datastore ids + # @return [Array] + def datastore_ids + array = Array.new + + self.each("DATASTORES/ID") do |id| + array << id.text.to_i + end + + return array + end + + # Returns whether or not the vnet with 'id' is part of this cluster + # @param id [Integer|Arrray] vnet ID + # @return [Boolean] true if found + def contains_vnet?(id) + contains_resource?('VNETS/ID', id) + end + + # Returns an array with the numeric vnet ids + # @return [Array] + def vnet_ids + array = Array.new + + self.each("VNETS/ID") do |id| + array << id.text.to_i + end + + return array + end + + private + + def contains_resource?(xpath, id) + id_array = retrieve_elements(xpath) + + return false if id_array.nil? + + id = [id] if id.class != Array + + id.each { |i| + return false if !id_array.include?(i.to_s) + } + + return true + end + end +end diff --git a/src/oca/ruby/OpenNebula/ClusterPool.rb b/src/oca/ruby/OpenNebula/ClusterPool.rb new file mode 100644 index 0000000000..780b72a347 --- /dev/null +++ b/src/oca/ruby/OpenNebula/ClusterPool.rb @@ -0,0 +1,56 @@ +# -------------------------------------------------------------------------- # +# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + + +require 'OpenNebula/Pool' + +module OpenNebula + class ClusterPool < Pool + ####################################################################### + # Constants and Class attribute accessors + ####################################################################### + + NONE_CLUSTER_ID = -1 + DEFAULT_CLUSTER_ID = 0 + + CLUSTER_POOL_METHODS = { + :info => "clusterpool.info" + } + + ####################################################################### + # Class constructor & Pool Methods + ####################################################################### + + # +client+ a Client object that represents a XML-RPC connection + def initialize(client) + super('CLUSTER_POOL','CLUSTER',client) + end + + # Factory method to create Cluster objects + def factory(element_xml) + OpenNebula::Cluster.new(element_xml,@client) + end + + ####################################################################### + # XML-RPC Methods for the Cluster Object + ####################################################################### + + # Retrieves all the Clusters in the pool. + def info() + super(CLUSTER_POOL_METHODS[:info]) + end + end +end diff --git a/src/oca/ruby/OpenNebula/Datastore.rb b/src/oca/ruby/OpenNebula/Datastore.rb new file mode 100644 index 0000000000..6dbb10b1f4 --- /dev/null +++ b/src/oca/ruby/OpenNebula/Datastore.rb @@ -0,0 +1,147 @@ +# -------------------------------------------------------------------------- # +# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + + +require 'OpenNebula/Pool' + +module OpenNebula + class Datastore < PoolElement + ####################################################################### + # Constants and Class Methods + ####################################################################### + + DATASTORE_METHODS = { + :info => "datastore.info", + :allocate => "datastore.allocate", + :delete => "datastore.delete", + :update => "datastore.update", + :chown => "datastore.chown", + :chmod => "datastore.chmod" + } + + # Creates a Datastore description with just its identifier + # this method should be used to create plain Datastore objects. + # +id+ the id of the user + # + # Example: + # datastore = Datastore.new(Datastore.build_xml(3),rpc_client) + # + def Datastore.build_xml(pe_id=nil) + if pe_id + datastore_xml = "#{pe_id}" + else + datastore_xml = "" + end + + XMLElement.build_xml(datastore_xml,'DATASTORE') + end + + # Class constructor + def initialize(xml, client) + super(xml,client) + end + + ####################################################################### + # XML-RPC Methods for the Datastore Object + ####################################################################### + + # Retrieves the information of the given Datastore. + def info() + super(DATASTORE_METHODS[:info], 'DATASTORE') + end + + # Allocates a new Datastore in OpenNebula + # + # @param description [String] The template of the Datastore. + # @param cluster_id [Integer] Id of the cluster + # + # @return [Integer, OpenNebula::Error] the new ID in case of + # success, error otherwise + def allocate(description, cluster_id=ClusterPool::NONE_CLUSTER_ID) + super(DATASTORE_METHODS[:allocate], description, cluster_id) + end + + # Deletes the Datastore + def delete() + super(DATASTORE_METHODS[:delete]) + end + + # Replaces the template contents + # + # @param new_template [String] New template contents + # + # @return [nil, OpenNebula::Error] nil in case of success, Error + # otherwise + def update(new_template) + super(DATASTORE_METHODS[:update], new_template) + end + + # Changes the owner/group + # + # @param uid [Integer] the new owner id. Set to -1 to leave the current one + # @param gid [Integer] the new group id. Set to -1 to leave the current one + # + # @return [nil, OpenNebula::Error] nil in case of success, Error + # otherwise + def chown(uid, gid) + super(DATASTORE_METHODS[:chown], uid, gid) + end + + # Changes the datastore permissions. + # + # @param octet [String] Permissions octed , e.g. 640 + # @return [nil, OpenNebula::Error] nil in case of success, Error + # otherwise + def chmod_octet(octet) + super(DATASTORE_METHODS[:chmod], octet) + end + + # Changes the datastore permissions. + # Each [Integer] argument must be 1 to allow, 0 deny, -1 do not change + # + # @return [nil, OpenNebula::Error] nil in case of success, Error + # otherwise + def chmod(owner_u, owner_m, owner_a, group_u, group_m, group_a, other_u, + other_m, other_a) + super(DATASTORE_METHODS[:chmod], owner_u, owner_m, owner_a, group_u, + group_m, group_a, other_u, other_m, other_a) + end + + # --------------------------------------------------------------------- + # Helpers to get information + # --------------------------------------------------------------------- + + # Returns whether or not the image with id 'id' is part of this datastore + def contains(id) + #This doesn't work in ruby 1.8.5 + #return self["DATASTORE/ID[.=#{uid}]"] != nil + + id_array = retrieve_elements('DATASTORE/ID') + return id_array != nil && id_array.include?(uid.to_s) + end + + # Returns an array with the numeric image ids + def img_ids + array = Array.new + + self.each("IMAGES/ID") do |id| + array << id.text.to_i + end + + return array + end + end +end diff --git a/src/oca/ruby/OpenNebula/DatastorePool.rb b/src/oca/ruby/OpenNebula/DatastorePool.rb new file mode 100644 index 0000000000..b491d394fe --- /dev/null +++ b/src/oca/ruby/OpenNebula/DatastorePool.rb @@ -0,0 +1,53 @@ +# -------------------------------------------------------------------------- # +# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + + +require 'OpenNebula/Pool' + +module OpenNebula + class DatastorePool < Pool + ####################################################################### + # Constants and Class attribute accessors + ####################################################################### + + DATASTORE_POOL_METHODS = { + :info => "datastorepool.info" + } + + ####################################################################### + # Class constructor & Pool Methods + ####################################################################### + + # +client+ a Client object that represents a XML-RPC connection + def initialize(client) + super('DATASTORE_POOL','DATASTORE',client) + end + + # Factory method to create User objects + def factory(element_xml) + OpenNebula::Group.new(element_xml,@client) + end + + ####################################################################### + # XML-RPC Methods for the User Object + ####################################################################### + + # Retrieves all the Groups in the pool. + def info() + super(DATASTORE_POOL_METHODS[:info]) + end + end +end diff --git a/src/oca/ruby/OpenNebula/Group.rb b/src/oca/ruby/OpenNebula/Group.rb index 74d56ddbf3..3fa5a9a41c 100644 --- a/src/oca/ruby/OpenNebula/Group.rb +++ b/src/oca/ruby/OpenNebula/Group.rb @@ -69,7 +69,7 @@ module OpenNebula # Creates ACLs for the group. The ACL rules are described in a file def create_acls(filename = GROUP_DEFAULT) if !File.readable?(filename) - return -1, "Can not read deafult ACL file for group" + return -1, "Cannot read deafult ACL file for group" end msg = String.new diff --git a/src/oca/ruby/OpenNebula/Host.rb b/src/oca/ruby/OpenNebula/Host.rb index b0f183c6c4..f0c6286939 100644 --- a/src/oca/ruby/OpenNebula/Host.rb +++ b/src/oca/ruby/OpenNebula/Host.rb @@ -79,14 +79,15 @@ module OpenNebula # Allocates a new Host in OpenNebula # # @param hostname [String] Name of the new Host. - # @param im [String] Name of the im_driver - # @param vmm [String] Name of the vmm_driver - # @param tm [String] Name of the tm_driver + # @param im [String] Name of the im_driver (information/monitoring) + # @param vmm [String] Name of the vmm_driver (hypervisor) + # @param tm [String] Name of the vnm_driver (networking) + # @param cluster_id [String] Id of the cluster # - # @return [Integer, OpenNebula::Error] the new VM ID in case of + # @return [Integer, OpenNebula::Error] the new ID in case of # success, error otherwise - def allocate(hostname,im,vmm,vnm,tm) - super(HOST_METHODS[:allocate],hostname,im,vmm,vnm,tm) + def allocate(hostname,im,vmm,vnm,cluster_id=ClusterPool::NONE_CLUSTER_ID) + super(HOST_METHODS[:allocate],hostname,im,vmm,vnm,cluster_id) end # Deletes the Host diff --git a/src/oca/ruby/OpenNebula/Image.rb b/src/oca/ruby/OpenNebula/Image.rb index 348b4c4df1..878115ef3d 100644 --- a/src/oca/ruby/OpenNebula/Image.rb +++ b/src/oca/ruby/OpenNebula/Image.rb @@ -91,9 +91,13 @@ module OpenNebula # Allocates a new Image in OpenNebula # - # +description+ A string containing the template of the Image. - def allocate(description) - super(IMAGE_METHODS[:allocate],description) + # @param description [String] A string containing the template of the Image. + # @param ds_id [Integer] the target datastore ID + # + # @return [nil, OpenNebula::Error] nil in case of success, Error + # otherwise + def allocate(description, ds_id) + super(IMAGE_METHODS[:allocate],description, ds_id) end # Replaces the template contents diff --git a/src/oca/ruby/OpenNebula/VirtualNetwork.rb b/src/oca/ruby/OpenNebula/VirtualNetwork.rb index 134e0a038c..2b82cc1622 100644 --- a/src/oca/ruby/OpenNebula/VirtualNetwork.rb +++ b/src/oca/ruby/OpenNebula/VirtualNetwork.rb @@ -78,9 +78,13 @@ module OpenNebula # Allocates a new VirtualNetwork in OpenNebula # - # +description+ A string containing the template of the VirtualNetwork. - def allocate(description) - super(VN_METHODS[:allocate],description) + # @param description [String] The template of the VirtualNetwork. + # @param cluster_id [Integer] Id of the cluster + # + # @return [Integer, OpenNebula::Error] the new ID in case of + # success, error otherwise + def allocate(description,cluster_id=ClusterPool::NONE_CLUSTER_ID) + super(VN_METHODS[:allocate], description, cluster_id) end # Replaces the template contents @@ -158,9 +162,12 @@ module OpenNebula end # Changes the owner/group - # uid:: _Integer_ the new owner id. Set to -1 to leave the current one - # gid:: _Integer_ the new group id. Set to -1 to leave the current one - # [return] nil in case of success or an Error object + # + # @param uid [Integer] the new owner id. Set to -1 to leave the current one + # @param gid [Integer] the new group id. Set to -1 to leave the current one + # + # @return [nil, OpenNebula::Error] nil in case of success, Error + # otherwise def chown(uid, gid) super(VN_METHODS[:chown], uid, gid) end diff --git a/src/onedb/3.3.0_to_3.3.80.rb b/src/onedb/3.3.0_to_3.3.80.rb new file mode 100644 index 0000000000..3fd8ee4631 --- /dev/null +++ b/src/onedb/3.3.0_to_3.3.80.rb @@ -0,0 +1,306 @@ +# -------------------------------------------------------------------------- * +# Copyright 2002-2011, OpenNebula Project Leads (OpenNebula.org) # +# Licensed under the Apache License, Version 2.0 (the "License"); you may * +# not use this file except in compliance with the License. You may obtain * +# a copy of the License at * +# * +# http://www.apache.org/licenses/LICENSE-2.0 * +# * +# Unless required by applicable law or agreed to in writing, software * +# distributed under the License is distributed on an "AS IS" BASIS, * +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * +# See the License for the specific language governing permissions and * +# limitations under the License. * +# -------------------------------------------------------------------------- * + +require "rexml/document" +include REXML + +module Migrator + def db_version + "3.3.80" + end + + def one_version + "OpenNebula 3.3.80" + end + + SHORT_VM_STATES=%w{init pend hold actv stop susp done fail} + + SHORT_LCM_STATES=%w{prol boot runn migr save save save migr prol, + epil epil shut shut fail clea unkn} + + def up + + header_done = false + + @db.fetch("SELECT oid,name,state,lcm_state FROM vm_pool WHERE ( state <> 1 AND state <> 6 )") do |row| + if ( !header_done ) + puts "You can't have active VMs. Please shutdown or delete the following VMs:" + puts + puts " ID STAT NAME" + + header_done = true + end + + if row[:state] != 3 + state_str = SHORT_VM_STATES[row[:state]] + else + state_str = SHORT_LCM_STATES[row[:lcm_state]] + end + + puts "#{'%6.6s' % row[:oid].to_s} #{state_str} #{row[:name]}" + end + + if ( header_done ) + puts + return false + end + + one_location = ENV["ONE_LOCATION"] + + if !one_location + var_location = "/var/lib/one" + else + var_location = one_location + "/var" + end + + ######################################################################## + # Get oneadmin user and group names + ######################################################################## + + oneadmin_uname = nil + + @db.fetch("SELECT name FROM user_pool WHERE oid=0") do |row| + oneadmin_uname = row[:name] + end + + if oneadmin_uname == nil + puts "Error trying to read oneadmin's user name ('SELECT name FROM user_pool WHERE oid=0')" + return false + end + + oneadmin_gname = nil + + @db.fetch("SELECT name FROM group_pool WHERE oid=0") do |row| + oneadmin_gname = row[:name] + end + + if oneadmin_gname == nil + puts "Error trying to read oneadmin's group name ('SELECT name FROM group_pool WHERE oid=0')" + return false + end + + ######################################################################## + # Create the cluster and datastore tables + ######################################################################## + + # New table for Clusters + @db.run "CREATE TABLE cluster_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body TEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" + + # New table for Datastores + @db.run "CREATE TABLE datastore_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body TEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" + + # Insert system datastore + + xml = + "" << + " 0" << + " 0" << + " 0" << + " #{oneadmin_uname}" << + " #{oneadmin_gname}" << + " system" << + " " << + " 1" << + " 1" << + " 0" << + " 1" << + " 0" << + " 0" << + " 0" << + " 0" << + " 0" << + " " << + " -" << + " shared" << + " #{var_location}/datastores/0" << + " -1" << + " " << + " " << + " " << + "" + + @db[:datastore_pool].insert( + :oid => 0, + :name => 'system', + :body => xml, + :uid => 0, + :gid => 0, + :owner_u => 1, + :group_u => 1, + :other_u => 0) + + # Last oid for cluster_pool and datastore_pool + + @db[:pool_control].insert( + :tablename => 'cluster_pool', + :last_oid => 99) + + @db[:pool_control].insert( + :tablename => 'datastore_pool', + :last_oid => 99) + + ######################################################################## + # Add each Host to Cluster -1 (none) + ######################################################################## + + @db.run "ALTER TABLE host_pool RENAME TO old_host_pool;" + @db.run "CREATE TABLE host_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body TEXT, state INTEGER, last_mon_time INTEGER, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name));" + + @db.fetch("SELECT * FROM old_host_pool") do |row| + doc = Document.new(row[:body]) + + # Delete TM_MAD elem + doc.root.delete_element("TM_MAD") + + # Add Cluster elements + doc.root.add_element("CLUSTER_ID").text = "-1" + doc.root.add_element("CLUSTER").text = "" + + @db[:host_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :state => row[:state], + :last_mon_time => row[:last_mon_time], + :uid => row[:uid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) + end + + @db.run "DROP TABLE old_host_pool;" + + ######################################################################## + # Add each VNet to Cluster -1 (none) + ######################################################################## + + @db.run "ALTER TABLE network_pool RENAME TO old_network_pool;" + @db.run "CREATE TABLE network_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body TEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name,uid));" + + @db.fetch("SELECT * FROM old_network_pool") do |row| + doc = Document.new(row[:body]) + + # Add Cluster elements + doc.root.add_element("CLUSTER_ID").text = "-1" + doc.root.add_element("CLUSTER").text = "" + + @db[:network_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:uid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) + end + + @db.run "DROP TABLE old_network_pool;" + + ######################################################################## + # Add each Image to Datastore 1 (default) + ######################################################################## + + images_element = "" + + @db.run "ALTER TABLE image_pool RENAME TO old_image_pool;" + @db.run "CREATE TABLE image_pool (oid INTEGER PRIMARY KEY, name VARCHAR(128), body TEXT, uid INTEGER, gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, UNIQUE(name,uid) );" + + @db.fetch("SELECT * FROM old_image_pool") do |row| + doc = Document.new(row[:body]) + + # Add Cluster elements + doc.root.add_element("DATASTORE_ID").text = "1" + doc.root.add_element("DATASTORE").text = "default" + + images_element << "#{row[:oid]}" + + # Update SOURCE + doc.root.each_element("SOURCE") { |e| + previous_source = e.text + hash = previous_source.split('/')[-1] + + if ( hash.length == 32 && hash =~ /^[0-9A-F]+$/i ) + e.text = "#{var_location}/datastores/1/#{hash}" + + `ln -s #{previous_source} #{e.text}` + end + } + + @db[:image_pool].insert( + :oid => row[:oid], + :name => row[:name], + :body => doc.root.to_s, + :uid => row[:uid], + :gid => row[:gid], + :owner_u => row[:owner_u], + :group_u => row[:group_u], + :other_u => row[:other_u]) + end + + @db.run "DROP TABLE old_image_pool;" + + images_element << "" + + # Insert default datastore + + xml = + "" << + " 1" << + " 0" << + " 0" << + " #{oneadmin_uname}" << + " #{oneadmin_gname}" << + " default" << + " " << + " 1" << + " 1" << + " 0" << + " 1" << + " 0" << + " 0" << + " 0" << + " 0" << + " 0" << + " " << + " fs" << + " shared" << + " #{var_location}/datastores/1" << + " -1" << + " " << + images_element << + " " << + "" + + @db[:datastore_pool].insert( + :oid => 1, + :name => 'default', + :body => xml, + :uid => 0, + :gid => 0, + :owner_u => 1, + :group_u => 1, + :other_u => 0) + + return true + end +end diff --git a/src/onedb/onedb.rb b/src/onedb/onedb.rb index a6a041ad38..967b27962b 100644 --- a/src/onedb/onedb.rb +++ b/src/onedb/onedb.rb @@ -107,43 +107,57 @@ class OneDB backup(ops[:backup], ops) end - result = nil - i = 0 + begin + result = nil + i = 0 - while ( matches.size > 0 ) - if ( matches.size > 1 ) - raise "There are more than one file that match \ - \"#{RUBY_LIB_LOCATION}/onedb/#{version}_to_*.rb\"" + while ( matches.size > 0 ) + if ( matches.size > 1 ) + raise "There are more than one file that match \ + \"#{RUBY_LIB_LOCATION}/onedb/#{version}_to_*.rb\"" + end + + file = matches[0] + + puts " > Running migrator #{file}" if ops[:verbose] + + load(file) + @backend.extend Migrator + result = @backend.up + + if !result + raise "Error while upgrading from #{version} to " << + " #{@backend.db_version}" + end + + puts " > Done" if ops[:verbose] + puts "" if ops[:verbose] + + matches = Dir.glob( + "#{RUBY_LIB_LOCATION}/onedb/#{@backend.db_version}_to_*.rb") end - file = matches[0] - - puts " > Running migrator #{file}" if ops[:verbose] - - load(file) - @backend.extend Migrator - result = @backend.up - - if !result - raise "Error while upgrading from #{version} to " << - " #{@backend.db_version}" + # Modify db_versioning table + if result != nil + @backend.update_db_version(version) + else + puts "Database already uses version #{version}" end - puts " > Done" if ops[:verbose] - puts "" if ops[:verbose] + return 0 - matches = Dir.glob( - "#{RUBY_LIB_LOCATION}/onedb/#{@backend.db_version}_to_*.rb") + rescue Exception => e + puts e.message + + puts + puts "The database will be restored" + + ops[:force] = true + + restore(ops[:backup], ops) + + return -1 end - - # Modify db_versioning table - if result != nil - @backend.update_db_version(version) - else - puts "Database already uses version #{version}" - end - - return 0 end private diff --git a/src/onedb/test/oned_mysql.conf b/src/onedb/test/oned_mysql.conf index 164abeaf5d..7275333b2c 100644 --- a/src/onedb/test/oned_mysql.conf +++ b/src/onedb/test/oned_mysql.conf @@ -6,7 +6,7 @@ # Daemon configuration attributes #------------------------------------------------------------------------------- # MANAGER_TIMER: Time in seconds the core uses to evaluate periodical functions. -# HOST_MONITORING_INTERVAL and VM_POLLING_INTERVAL can not have smaller values +# HOST_MONITORING_INTERVAL and VM_POLLING_INTERVAL cannot have smaller values # than MANAGER_TIMER. # # HOST_MONITORING_INTERVAL: Time in seconds between host monitorization. @@ -390,7 +390,7 @@ HM_MAD = [ #-------------------------------- ebtables Hook--------------------------------- # You can use these two hooks to isolate networks at the ethernet level so the -# traffic generated in different virtual networks can not be seen in others. +# traffic generated in different virtual networks cannot be seen in others. # # All the network configuration will be done in the cluster nodes, these are the # additional requisites: diff --git a/src/onedb/test/oned_sqlite.conf b/src/onedb/test/oned_sqlite.conf index e8a6aca4f9..09f4189ba0 100644 --- a/src/onedb/test/oned_sqlite.conf +++ b/src/onedb/test/oned_sqlite.conf @@ -6,7 +6,7 @@ # Daemon configuration attributes #------------------------------------------------------------------------------- # MANAGER_TIMER: Time in seconds the core uses to evaluate periodical functions. -# HOST_MONITORING_INTERVAL and VM_POLLING_INTERVAL can not have smaller values +# HOST_MONITORING_INTERVAL and VM_POLLING_INTERVAL cannot have smaller values # than MANAGER_TIMER. # # HOST_MONITORING_INTERVAL: Time in seconds between host monitorization. @@ -390,7 +390,7 @@ HM_MAD = [ #-------------------------------- ebtables Hook--------------------------------- # You can use these two hooks to isolate networks at the ethernet level so the -# traffic generated in different virtual networks can not be seen in others. +# traffic generated in different virtual networks cannot be seen in others. # # All the network configuration will be done in the cluster nodes, these are the # additional requisites: diff --git a/src/ozones/Client/bin/onevdc b/src/ozones/Client/bin/onevdc index 1fb688a2b2..2872771b1a 100755 --- a/src/ozones/Client/bin/onevdc +++ b/src/ozones/Client/bin/onevdc @@ -50,6 +50,32 @@ cmd=CommandParser::CmdParser.new(ARGV) do :description => "Force the usage of Hosts in more than one VDC" } + HOST={ + :name => "hosts", + :short => "-s 1,2,3", + :large => "--hosts 1,2,3", + :description => "Host IDs", + :format => Array + } + + DS={ + :name => "datastores", + :short => "-d 1,2,3", + :large => "--datastores 1,2,3", + :description => "Datastore IDs", + :format => Array + } + + NET={ + :name => "networks", + :short => "-n 1,2,3", + :large => "--networks 1,2,3", + :description => "Network IDs", + :format => Array + } + + ADD_DEL_OPTIONS=[HOST, DS, NET] + begin helper = VDCHelper.new "vdc" rescue Exception => e @@ -74,13 +100,28 @@ cmd=CommandParser::CmdParser.new(ARGV) do helper.delete_resource(args[0],options) end - command :addhost, 'Adds the set of hosts to the VDC', - :vdcid, :range, :options=>[FORCE] do - helper.addhost(args[0], args[1], options) + command :add, 'Adds the set of resources to the VDC', :vdcid, + :options=>[FORCE].concat(ADD_DEL_OPTIONS) do + if options[:hosts].nil? and options[:datastores].nil? and + options[:networks].nil? + STDERR.puts "At least one resource type must be specified: " \ + "network (-n), host (-s) or datastore (-d) " + exit 1 + end + + helper.add(args[0], options) end - command :delhost, 'Deletes the set of hosts from the VDC', - :vdcid, :range do - helper.delhost(args[0], args[1], options) + command :del, 'Deletes the set of resources from the VDC', :vdcid, + :options => ADD_DEL_OPTIONS do + + if options[:hosts].nil? and options[:datastores].nil? and + options[:networks].nil? + STDERR.puts "At least one resource type must be specified: " \ + "network (-n), host (-s) or datastore (-d) " + exit 1 + end + + helper.del(args[0], options) end end diff --git a/src/ozones/Client/bin/onezone b/src/ozones/Client/bin/onezone index 5eae35012e..ccef51e094 100755 --- a/src/ozones/Client/bin/onezone +++ b/src/ozones/Client/bin/onezone @@ -65,7 +65,8 @@ cmd=CommandParser::CmdParser.new(ARGV) do show_desc = <<-EOT.unindent Show information of a particular Zone - Available resources: host, vm, image, vnet, vmtemplate, user + Available resources: host, vm, image, vnet, vmtemplate, + user, cluster, datastore Examples: onezone show 4 onezone show 4 host @@ -95,6 +96,10 @@ cmd=CommandParser::CmdParser.new(ARGV) do aux_helper = OneTemplateHelper.new when "user" aux_helper = OneUserHelper.new + when "cluster" + aux_helper = OneClusterHelper.new + when "datastore" + aux_helper = OneDatastoreHelper.new else puts "\n:!: Pool #{args[1]} doesn't exist or is not supported\n\n" next 0 @@ -102,7 +107,6 @@ cmd=CommandParser::CmdParser.new(ARGV) do pool_hash_data = helper.get_resource_pool("zone", args[0], args[1], options) - if pool_hash_data[0] != 0 puts "\nError retrieving information for pool #{args[1]}. Reason: " + pool_hash_data[1] + "\n\n" next 0 @@ -123,6 +127,10 @@ cmd=CommandParser::CmdParser.new(ARGV) do pool_hash_data[1].each{|hash| hash.replace(Hash.transform_keys_to_strings(hash))} end + puts + str_h1="%-61s" + CLIHelper.print_header(str_h1 % ["ZONE VIEW - #{args[1]}"],false) + table = aux_helper.format_pool(options) table.show(pool_hash_data[1]) diff --git a/src/ozones/Client/lib/cli/ozones_helper/vdc_helper.rb b/src/ozones/Client/lib/cli/ozones_helper/vdc_helper.rb index cf0970e24c..8f8a98b026 100644 --- a/src/ozones/Client/lib/cli/ozones_helper/vdc_helper.rb +++ b/src/ozones/Client/lib/cli/ozones_helper/vdc_helper.rb @@ -17,7 +17,14 @@ require 'cli/ozones_helper' require 'cli/one_helper' +require 'zona' + class VDCHelper < OZonesHelper::OZHelper + NAME_REG = /[\w\d_-]+/ + VAR_REG = /\s*(#{NAME_REG})\s*=\s*/ + + SVAR_REG = /^#{VAR_REG}([^\[]+?)(#.*)?$/ + def initialize(kind, user=nil, pass=nil, endpoint_str=nil, timeout=nil, debug_flag=true) @vdc_str = kind @@ -25,13 +32,30 @@ class VDCHelper < OZonesHelper::OZHelper end def create_resource(template, options) - tmpl_str = File.read(template) + tmpl_str = File.read(template) + tmpl_hash = Hash.new - if options[:force] - tmpl_str << "FORCE=YES\n" + tmpl_str.scan(SVAR_REG) do | m | + key = m[0].strip.upcase + value = m[1].strip + + tmpl_hash[key] = value end - rc = @client.post_resource_str(@vdc_str, tmpl_str) + hosts = tmpl_hash.delete("HOSTS") + ds = tmpl_hash.delete("DATASTORES") + nets = tmpl_hash.delete("NETWORKS") + + tmpl_hash["RESOURCES"] = { "HOSTS" => eval("[#{hosts}]"), + "DATASTORES" => eval("[#{ds}]"), + "NETWORKS" => eval("[#{nets}]") } + if options[:force] + tmpl_hash["FORCE"] = "YES" + end + + vdc = { "#{@vdc_str.upcase}" => tmpl_hash } + + rc = @client.post_resource(@vdc_str,Zona::OZonesJSON.to_json(vdc)) if Zona::is_error?(rc) [-1, rc.message] @@ -53,71 +77,89 @@ class VDCHelper < OZonesHelper::OZHelper super(@vdc_str,id, options) end - def addhost(id, host_array, options) - rc = @client.get_resource(@vdc_str, id) + def add(id, options) + vdc = Zona::VDC.new(Zona::VDC.build_json(id), @client) + rc = vdc.info + + return [-1, rc.message] if Zona::is_error?(rc) + + exit_code = 0 + message = "" + + rc = vdc.add_hosts(options[:hosts], :FORCE => options[:force]) if Zona::is_error?(rc) - return [-1, rc.message] - else - vdc = Zona::OZonesJSON.parse_json(rc.body, @vdc_str.upcase) + message << "Error adding hosts to VDC:\n\t#{rc.message}\n" + exit_code = -1 end - hosts = vdc[:HOSTS].split(',').collect!{|x| x.to_i} - host_array.concat(hosts).uniq! - - new_host = host_array.join(',') - template = "ID=#{id}\nHOSTS=#{new_host}\n" - - if options[:force] - template << "FORCE=YES\n" - end - - rc = @client.put_resource_str(@vdc_str, id, template) + rc = vdc.add_networks(options[:networks]) if Zona::is_error?(rc) - return [-1, rc.message] + message << "Error adding networks to VDC:\n#{rc.message}\n" + exit_code = -1 end - [0, ""] + rc = vdc.add_datastores(options[:datastores]) + + if Zona::is_error?(rc) + message << "Error adding datastores to VDC:\n\t#{rc.message}\n" + exit_code = -1 + end + + return [exit_code, message] end - def delhost(id, host_array, options) - rc = @client.get_resource(@vdc_str, id) + def del(id, options) + vdc = Zona::VDC.new(Zona::VDC.build_json(id), @client) + rc = vdc.info + + return [-1, rc.message] if Zona::is_error?(rc) + + exit_code = 0 + message = "" + + rc = vdc.del_hosts(options[:hosts]) if Zona::is_error?(rc) - return [-1, rc.message] - else - vdc = Zona::OZonesJSON.parse_json(rc.body, @vdc_str.upcase) + message << "Error deleting to VDC:\n\t#{rc.message}\n" + exit_code = -1 end - hosts = vdc[:HOSTS].split(',').collect!{|x| x.to_i} + rc = vdc.del_networks(options[:networks]) - new_host = (hosts - host_array).join(',') - template = "ID=#{id}\nHOSTS=#{new_host}\n" - - rc = @client.put_resource_str(@vdc_str, id, template) - - if Zona.is_error?(rc) - return [-1, rc.message] + if Zona::is_error?(rc) + message << "Error deleting networks to VDC:\n#{rc.message}\n" + exit_code = -1 end - [0, ""] + rc = vdc.del_datastores(options[:datastores]) + + if Zona::is_error?(rc) + message << "Error deleting datastores to VDC:\n\t#{rc.message}\n" + exit_code = -1 + end + + return [exit_code, message] end private def format_resource(vdc, options) str_h1="%-60s" - str="%-10s: %-20s" + str="%-12s: %-20s" CLIHelper.print_header(str_h1 % ["VDC #{vdc['name']} INFORMATION"]) - puts str % ["ID ", vdc[:ID].to_s] - puts str % ["NAME ", vdc[:NAME].to_s] - puts str % ["GROUP_ID ", vdc[:GROUP_ID].to_s] - puts str % ["ZONEID ", vdc[:ZONES_ID].to_s] - puts str % ["VDCADMIN ", vdc[:VDCADMINNAME].to_s] - puts str % ["HOST IDs ", vdc[:HOSTS].to_s] + puts str % ["ID ", vdc[:ID].to_s] + puts str % ["NAME ", vdc[:NAME].to_s] + puts str % ["ZONE_ID ", vdc[:ZONES_ID].to_s] + puts str % ["CLUSTER_ID ", vdc[:CLUSTER_ID].to_s] + puts str % ["GROUP_ID ", vdc[:GROUP_ID].to_s] + puts str % ["VDCADMIN ", vdc[:VDCADMINNAME].to_s] + puts str % ["HOSTS ", vdc[:RESOURCES][:HOSTS].to_s] + puts str % ["DATASTORES ", vdc[:RESOURCES][:DATASTORES].to_s] + puts str % ["NETWORKS ", vdc[:RESOURCES][:NETWORKS].to_s] puts return 0 @@ -129,16 +171,36 @@ class VDCHelper < OZonesHelper::OZHelper d[:ID] end - column :NAME, "Name of the VDC", :right, :size=>15 do |d,e| + column :NAME, "Name of the VDC", :left, :size=>15 do |d,e| d[:NAME] end - column :ZONEID, "Id of the Zone where it belongs", - :right, :size=>40 do |d,e| + column :ZONE, "Id of the Zone where it belongs", + :right, :size=>5 do |d,e| d[:ZONES_ID] end - default :ID, :NAME, :ZONEID + column :CLUSTER, "Cluster where it belongs", + :right, :size=>7 do |d,e| + d[:CLUSTER_ID] + end + + column :HOSTS, "Number of hosts in the VDC", + :right, :size=>5 do |d,e| + d[:RESOURCES][:HOSTS].size + end + + column :DATASTORES, "Number of datastores in the VDC", + :right, :size=>10 do |d,e| + d[:RESOURCES][:DATASTORES].size + end + + column :NETWORKS, "Number of networks in the VDC", + :right, :size=>8 do |d,e| + d[:RESOURCES][:NETWORKS].size + end + + default :ID, :ZONE, :CLUSTER, :NAME, :HOSTS, :NETWORKS, :DATASTORES end st.show(pool[:VDC], options) diff --git a/src/ozones/Client/lib/cli/ozones_helper/zones_helper.rb b/src/ozones/Client/lib/cli/ozones_helper/zones_helper.rb index a4bd18b220..71d85c7610 100644 --- a/src/ozones/Client/lib/cli/ozones_helper/zones_helper.rb +++ b/src/ozones/Client/lib/cli/ozones_helper/zones_helper.rb @@ -5,7 +5,8 @@ require 'cli/one_helper/oneimage_helper' require 'cli/one_helper/onevnet_helper' require 'cli/one_helper/onetemplate_helper' require 'cli/one_helper/oneuser_helper' - +require 'cli/one_helper/onecluster_helper' +require 'cli/one_helper/onedatastore_helper' class ZonesHelper < OZonesHelper::OZHelper def initialize(kind, user=nil, pass=nil, endpoint_str=nil, @@ -50,14 +51,14 @@ class ZonesHelper < OZonesHelper::OZHelper return [0, zone] end - CLIHelper.print_header(str_h1 % ["VDCS INFORMATION"]) + CLIHelper.print_header(str_h1 % ["VDCS INFORMATION"], false) st=CLIHelper::ShowTable.new(nil) do - column :ID, "Identifier for VDC", :size=>4 do |d,e| + column :ID, "Identifier for VDC", :size=>5 do |d,e| d[:ID] end - column :NAME, "Name of the VDC", :right, :size=>15 do |d,e| + column :NAME, "Name of the VDC", :left, :size=>15 do |d,e| d[:NAME] end diff --git a/src/ozones/Client/lib/zona.rb b/src/ozones/Client/lib/zona.rb index 7aae83c1ab..a7af045d60 100644 --- a/src/ozones/Client/lib/zona.rb +++ b/src/ozones/Client/lib/zona.rb @@ -224,12 +224,8 @@ EOT return Client.parse_error(res, kind) end - - private - - # Starts an http connection and calls the block provided. SSL flag # is set if needed. def self.http_start(url, timeout, &block) @@ -274,29 +270,25 @@ EOT def self.parse_error(value, kind) if Zona.is_error?(value) return value - else - if Zona.is_http_error?(value) - str = "Operating with #{kind} failed with HTTP error" - str += " code: #{value.code}\n" - if value.body - # Try to extract error message - begin - str << "Body: " << - OZonesJSON.parse_json(value.body, - "error")[:message] - rescue - str.gsub!("\nBody:","") - end - end - return Error.new(str) - end end + + if Zona.is_http_error?(value) + str = "Operating with #{kind} failed with HTTP error" + str += " code: #{value.code}\n" + + if value.body + ehash = OZonesJSON.parse_json(value.body,"error") + + str << ehash[:message] if !ehash.nil? and !Zona.is_error?(ehash) + end + + return Error.new(str) + end + value # If it is not an error, return it as-is end - end - # Parses a OpenNebula template string and turns it into a JSON string # @param [String] kind element kind # @param [String] tmpl_str template diff --git a/src/ozones/Client/lib/zona/VDCElement.rb b/src/ozones/Client/lib/zona/VDCElement.rb index 730e6db0e9..33fd06e65e 100644 --- a/src/ozones/Client/lib/zona/VDCElement.rb +++ b/src/ozones/Client/lib/zona/VDCElement.rb @@ -73,7 +73,6 @@ module Zona super(VDC_KIND) end - # Adds hosts to a VDC. The specified hosts are added to the VDC's # current ones. # @param [Array<#to_i>] hosts_array array of hosts IDs @@ -82,17 +81,63 @@ module Zona # @option options [Boolean] :force allows hosts to add hosts # which already belong to other VDCs # @return [Zona::Error] nil or Error - def addhosts(hosts_array,options={}) + def add_hosts(hosts_array,options={}) + addresource(:HOSTS, hosts_array, options) + end + + def add_networks(net_array,options={}) + addresource(:NETWORKS, net_array, options) + end + + def add_datastores(ds_array,options={}) + addresource(:DATASTORES, ds_array, options) + end + + # Delete hosts from a VDC. The specified hosts are removed from the VDC. + # @param [Array<#to_i>] hosts_array array of the VDC's hosts IDs + # to be removed. If a host is not in the VDC, then it is ignored. + # @return [Zona::Error] nil or Error + def del_hosts(hosts_array) + delresource(:HOSTS, hosts_array) + end + + def del_networks(net_array) + delresource(:NETWORKS, net_array) + end + + def del_datastores(ds_array) + delresource(:DATASTORES, ds_array) + end + + alias :add_host :add_hosts + alias :del_host :del_hosts + + alias :add_network :add_networks + alias :del_network :del_networks + + alias :add_datastore :add_datastores + alias :del_datastore :del_datastores + + private + + def addresource(type, rsrc_array, options={}) + return nil if rsrc_array.nil? + return nil if rsrc_array.empty? return Error.new('VDC not info-ed') if !@json_hash - # array of hosts, integers - hosts = self[:HOSTS].split(',').collect!{|x| x.to_i} - hosts.concat(hosts_array).uniq! + orig_resources = self[:RESOURCES][type].clone + + rsrc_array.map! {|i| i.to_i} + self[:RESOURCES][type].concat(rsrc_array).uniq! + + return nil if self[:RESOURCES][type] == orig_resources + + template = { + :ID => @pe_id, + :RESOURCES => self[:RESOURCES] + } - new_hosts = hosts.join(',') - template = {:ID => @pe_id, :HOSTS => new_hosts} template[:FORCE] = "YES" if options[:FORCE] - template = {:VDC => template} rc = @client.put_resource(VDC_KIND,@pe_id,template.to_json) @@ -100,25 +145,28 @@ module Zona nil end - # Delete hosts from a VDC. The specified hosts are removed from the VDC. - # @param [Array<#to_i>] hosts_array array of the VDC's hosts IDs - # to be removed. If a host is not in the VDC, then it is ignored. - # @return [Zona::Error] nil or Error - def delhosts(hosts_array) + def delresource(type, rsrc_array) + return nil if rsrc_array.nil? + return nil if rsrc_array.empty? return Error.new('VDC not info-ed') if !@json_hash - hosts = self[:HOSTS].split(',').collect!{|x| x.to_i} + orig_resources = self[:RESOURCES][type].clone - new_hosts = (hosts - hosts_array).join(',') - template = {:VDC => {:ID => @pe_id, :HOSTS => new_hosts}} + rsrc_array.map! {|i| i.to_i} + self[:RESOURCES][type] = self[:RESOURCES][type] - rsrc_array + + return nil if self[:RESOURCES][type] == orig_resources + + template = { + :VDC => { + :ID => @pe_id, + :RESOURCES => self[:RESOURCES] + } + } rc = @client.put_resource(VDC_KIND,@pe_id,template.to_json) return rc if Zona.is_error?(rc) nil end - - alias :addhost :addhosts - alias :delhost :delhosts - end end diff --git a/src/ozones/Server/bin/ozones-server b/src/ozones/Server/bin/ozones-server index 9a637cafbb..720aad1742 100755 --- a/src/ozones/Server/bin/ozones-server +++ b/src/ozones/Server/bin/ozones-server @@ -54,7 +54,7 @@ setup() start() { if [ ! -f "$OZONES_SERVER" ]; then - echo "Can not find $OZONES_SERVER." + echo "Cannot find $OZONES_SERVER." exit 1 fi diff --git a/src/tm_mad/dummy/tm_dummy.conf b/src/ozones/Server/lib/OZones/AggregatedClusters.rb old mode 100755 new mode 100644 similarity index 83% rename from src/tm_mad/dummy/tm_dummy.conf rename to src/ozones/Server/lib/OZones/AggregatedClusters.rb index 5bb570cb61..608cb739ff --- a/src/tm_mad/dummy/tm_dummy.conf +++ b/src/ozones/Server/lib/OZones/AggregatedClusters.rb @@ -14,10 +14,17 @@ # limitations under the License. # #--------------------------------------------------------------------------- # -CLONE = dummy/tm_dummy.sh -LN = dummy/tm_dummy.sh -MKSWAP = dummy/tm_dummy.sh -MKIMAGE = dummy/tm_dummy.sh -DELETE = dummy/tm_dummy.sh -MV = dummy/tm_dummy.sh -CONTEXT = dummy/tm_dummy.sh +module OZones + + class AggregatedClusters < AggregatedPool + + def initialize + super("ZONE_POOL") + end + + def factory(client) + OpenNebulaJSON::ClusterPoolJSON.new(client) + end + end + +end diff --git a/src/ozones/Server/lib/OZones/AggregatedDatastores.rb b/src/ozones/Server/lib/OZones/AggregatedDatastores.rb new file mode 100644 index 0000000000..8b2200e70a --- /dev/null +++ b/src/ozones/Server/lib/OZones/AggregatedDatastores.rb @@ -0,0 +1,30 @@ +# -------------------------------------------------------------------------- # +# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +module OZones + + class AggregatedDatastores < AggregatedPool + + def initialize + super("ZONE_POOL") + end + + def factory(client) + OpenNebulaJSON::DatastorePoolJSON.new(client) + end + end + +end diff --git a/src/ozones/Server/lib/OZones/VDC.rb b/src/ozones/Server/lib/OZones/VDC.rb index b3b89bb905..46ec6de56f 100644 --- a/src/ozones/Server/lib/OZones/VDC.rb +++ b/src/ozones/Server/lib/OZones/VDC.rb @@ -14,49 +14,78 @@ # limitations under the License. # #--------------------------------------------------------------------------- # +require 'OzonesServer' + module OZones + # VDC class represents a virtual datacenter abstraction. It is defined by an + # ID, NAME, the GROUP backing the VDC, the admin credentials and the VDC + # resources. VDC resources are stored in JSON document in the DB and used as + # a hash in the VDC. class Vdc include DataMapper::Resource include OpenNebulaJSON::JSONUtils - extend OpenNebulaJSON::JSONUtils + extend OpenNebulaJSON::JSONUtils property :ID, Serial property :NAME, String, :required => true, :unique => true property :GROUP_ID, Integer property :VDCADMINNAME, String, :required => true property :VDCADMIN_ID, Integer - property :ACLS, Text - property :HOSTS, Text + property :CLUSTER_ID, Integer + property :RESOURCES, Text belongs_to :zones + def resources + rsrc_json = self.RESOURCES + + parser = JSON.parser.new(rsrc_json, {:symbolize_names => true}) + parser.parse + end + + def resources=(rsrc_hash) + self.RESOURCES = JSON.generate(rsrc_hash) + end + def self.to_hash zonePoolHash = Hash.new - zonePoolHash["VDC_POOL"] = Hash.new + + zonePoolHash["VDC_POOL"] = Hash.new zonePoolHash["VDC_POOL"]["VDC"] = Array.new unless self.all.empty? - self.all.each{|vdc| - # Hack! zones_ID does not respect the - # "all capital letters" policy + + self.all.each{ |vdc| + # Hack! zones_ID does not respect the "all capital letters" policy attrs = vdc.attributes.clone + attrs[:ZONES_ID] = vdc.attributes[:zones_ID] attrs.delete(:zones_ID) + rsrc_json = attrs.delete(:RESOURCES) + parser = JSON.parser.new(rsrc_json, {:symbolize_names=>true}) + attrs[:RESOURCES] = parser.parse + zonePoolHash["VDC_POOL"]["VDC"] << attrs } + return zonePoolHash end def to_hash vdc_attributes = Hash.new - # Hack! zones_ID does not respect the - # "all capital letters" policy + # Hack! zones_ID does not respect the "all capital letters" policy attrs = attributes.clone + attrs[:ZONES_ID] = attributes[:zones_ID] attrs.delete(:zones_ID) + rsrc_json = attrs.delete(:RESOURCES) + parser = JSON.parser.new(rsrc_json, {:symbolize_names=>true}) + attrs[:RESOURCES] = parser.parse + vdc_attributes["VDC"] = attrs + return vdc_attributes end end @@ -70,14 +99,17 @@ module OZones ####################################################################### # Constants ####################################################################### - VDC_ATTRS = [:VDCADMINNAME, :VDCADMINPASS, :NAME, :HOSTS] + VDC_ATTRS = [:VDCADMINNAME, + :VDCADMINPASS, + :NAME, + :CLUSTER_ID, + :RESOURCES] attr_reader :vdc attr_reader :zone #Creates an OpenNebula VDC, using its ID, vdcid and the associated zone def initialize(vdcid, zone = nil) - if vdcid != -1 @vdc = Vdc.get(vdcid) @@ -102,21 +134,33 @@ module OZones # ####################################################################### def create(vdc_data) - #Check and prepare VDC data + OzonesServer::logger.debug {"Creating new VDC #{vdc_data}"} + + #Check and prepare VDC data and preserve RESOURCES VDC_ATTRS.each { |param| if !vdc_data[param] return OZones::Error.new("Error: Couldn't create vdc." \ - "Mandatory attribute '#{param}' is missing.") + "Mandatory attribute '#{param}' is missing.") end } - #Create a vdc record + rsrc = vdc_data.delete(:RESOURCES) + vdcpass = vdc_data.delete(:VDCADMINPASS) + + #------------------------------------------------------------------- + # Create a vdc record & check cluster consistency + #------------------------------------------------------------------- @vdc = Vdc.new - vdcpass = vdc_data.delete(:VDCADMINPASS) @vdc.attributes = vdc_data + rc = resources_in_cluster?(rsrc) + + return rc if OpenNebula.is_error?(rc) + + #------------------------------------------------------------------- # Create a group in the zone with the VDC name + #------------------------------------------------------------------- group = OpenNebula::Group.new(OpenNebula::Group.build_xml, @client) rc = group.allocate(@vdc.NAME) @@ -124,7 +168,11 @@ module OZones @vdc.GROUP_ID = group.id + OzonesServer::logger.debug {"Group #{group.id} created"} + + #------------------------------------------------------------------- # Create the VDC admin user in the Zone + #------------------------------------------------------------------- user = OpenNebula::User.new(OpenNebula::User.build_xml, @client) rc = user.allocate(@vdc.VDCADMINNAME, vdcpass) @@ -132,31 +180,47 @@ module OZones @vdc.VDCADMIN_ID = user.id + OzonesServer::logger.debug {"VDC admin user #{user.id} created"} + + #------------------------------------------------------------------- # Change primary group of the admin user to the VDC group + #------------------------------------------------------------------- rc = user.chgrp(group.id) return rollback(group, user, nil, rc) if OpenNebula.is_error?(rc) + #------------------------------------------------------------------- # Add ACLs - aclp = OpenNebula::AclPool.new(@client) - rules = get_acls + #------------------------------------------------------------------- + rules = get_acls(rsrc) - rc, acls_str = create_acls(rules) - return rollback(group, user,acls_str,rc) if OpenNebula.is_error?(rc) + OzonesServer::logger.debug {"Creating ACLs #{rules}..."} - @vdc.ACLS = acls_str + rc, acl_ids = create_acls(rules) + return rollback(group, user, acl_ids,rc) if OpenNebula.is_error?(rc) + + OzonesServer::logger.debug {"ACLs #{acl_ids} created"} + + rsrc[:ACLS] = acl_ids + @vdc.resources = rsrc return true end + ####################################################################### + # + ####################################################################### def destroy + #------------------------------------------------------------------- # Delete the resources from the VDC + #------------------------------------------------------------------- delete_images delete_templates delete_vms - delete_vns delete_acls - # Delete users from a group + #------------------------------------------------------------------- + # Delete users from a group and the group + #------------------------------------------------------------------- up = OpenNebula::UserPool.new(@client) up.info @@ -166,13 +230,14 @@ module OZones end } - # Delete the group OpenNebula::Group.new_with_id(@vdc.GROUP_ID, @client).delete return @vdc.destroy end - #Cleans bootstrap operations in a zone + ####################################################################### + # Cleans bootstrap operations in a zone + ####################################################################### def clean_bootstrap delete_acls @@ -180,33 +245,46 @@ module OZones OpenNebula::Group.new_with_id(@vdc.GROUP_ID, @client).delete end - def update(host_list) + ####################################################################### + # + ####################################################################### + def update(rsrc_hash) + #------------------------------------------------------------------- + # Check cluster consistency + #------------------------------------------------------------------- + rc = resources_in_cluster?(rsrc_hash) + + return rc if OpenNebula.is_error?(rc) + + # ------------------------------------------------------------------ # Delete existing host ACLs - delete_host_acls + # ------------------------------------------------------------------ + delete_resource_acls - if @vdc.ACLS =~ /((\d+,){#{HOST_ACL_FIRST_ID}}).*/ - newacls = $1.chop - else - newacls = @vdc.ACLS.clone - end + acls = @vdc.resources[:ACLS] + acls.slice!(RESOURCE_ACL_FIRST_ID..-1) + + # ------------------------------------------------------------------ # Create new ACLs. TODO Rollback ACL creation - if !host_list.empty? - host_acls = get_host_acls(host_list) - rc, acls_str = create_acls(host_acls) + # ------------------------------------------------------------------ + if !rsrc_hash.nil? + rsrc_acls = get_resource_acls(rsrc_hash) + rc, acls_ids = create_acls(rsrc_acls) return rc if OpenNebula.is_error?(rc) - #Create the new acl string. - newacls << "," << acls_str + acls.concat(acls_ids) end + rsrc_hash[:ACLS] = acls + # ------------------------------------------------------------------ #Update the VDC Record + # ------------------------------------------------------------------ begin @vdc.raise_on_save_failure = true - @vdc.HOSTS = host_list - @vdc.ACLS = newacls + @vdc.resources = rsrc_hash @vdc.save rescue => e @@ -220,63 +298,69 @@ module OZones ####################################################################### # Functions to generate ACL Strings ####################################################################### - # The ID of the first host ACL - HOST_ACL_FIRST_ID = 3 + # The ID of the first resource ACL + RESOURCE_ACL_FIRST_ID = 3 # This method returns an Array of ACL strings to create them # in the target zone - def get_acls + def get_acls(rsrc_hash) rule_str = Array.new # Grant permissions to the group - rule_str << "@#{@vdc.GROUP_ID} VM+NET+IMAGE+TEMPLATE/* CREATE" + rule_str << "@#{@vdc.GROUP_ID} VM+IMAGE+TEMPLATE/* CREATE" # Grant permissions to the vdc admin rule_str << "##{@vdc.VDCADMIN_ID} USER/* CREATE" rule_str << "##{@vdc.VDCADMIN_ID} USER/@#{@vdc.GROUP_ID} " \ "USE+MANAGE+ADMIN" + rule_str << "##{@vdc.VDCADMIN_ID} VM+IMAGE+TEMPLATE/@#{@vdc.GROUP_ID} " \ + "USE+MANAGE" - ############################################################### - #When more rules are added the class constant HOST_ACL_FIRST_ID + #################################################################### + #When more rules are added the class constant RESOURCE_ACL_FIRST_ID #must be modified - ############################################################### + #################################################################### - rule_str.concat(get_host_acls) + rule_str.concat(get_resource_acls(rsrc_hash)) end - def get_host_acls(host_list = nil) - rule_str = Array.new - - if host_list == nil - host_list = @vdc.HOSTS - end + def get_resource_acls(rsrc_hash) + rule_str = Array.new # Grant permissions to use the vdc hosts - host_list.split(',').each{|hostid| + rsrc_hash[:HOSTS].each{ |hostid| rule_str << "@#{@vdc.GROUP_ID} HOST/##{hostid} MANAGE" } + # Grant permissions to use the vdc datastores + rsrc_hash[:DATASTORES].each{ |dsid| + rule_str << "@#{@vdc.GROUP_ID} DATASTORE/##{dsid} USE" + } + + # Grant permissions to use the vdc networks + rsrc_hash[:NETWORKS].each{ |netid| + rule_str << "@#{@vdc.GROUP_ID} NET/##{netid} USE" + } + return rule_str end ####################################################################### # Functions to delete resources associated to the VDC ####################################################################### - # Deletes ACLs for the hosts - def delete_host_acls - host_acls = @vdc.ACLS.split(',')[HOST_ACL_FIRST_ID..-1] - - if host_acls - host_acls.each{|acl| - OpenNebula::Acl.new_with_id(acl.to_i, @client).delete - } - end + # Deletes ACLs for the resources + def delete_resource_acls + delete_acls(RESOURCE_ACL_FIRST_ID) end # Delete ACLs - def delete_acls - @vdc.ACLS.split(",").each{|acl| - OpenNebula::Acl.new_with_id(acl.to_i, @client).delete + def delete_acls(first = 0) + rsrc = @vdc.resources + + return if rsrc[:ACLS].nil? + + rsrc[:ACLS][first..-1].each { |acl_id| + OpenNebula::Acl.new_with_id(acl_id, @client).delete } end @@ -310,16 +394,6 @@ module OZones } end - # Deletes VNs - def delete_vns - vnp = OpenNebula::VirtualNetworkPool.new(@client) - vnp.info - - vnp.each{|vn| - vnp.delete if vn['GID'].to_i == @vdc.GROUP_ID - } - end - ####################################################################### # Misc helper functions for the class ####################################################################### @@ -330,9 +404,8 @@ module OZones user.delete if user if acls - acls.chop - acls.split(",").each{|acl| - OpenNebula::Acl.new_with_id(acl.to_i, @client).delete + acls.each{|acl_id| + OpenNebula::Acl.new_with_id(acl_id, @client).delete } end @@ -342,7 +415,7 @@ module OZones # Creates an acl array of acl strings. Returns true or error and # a comma-separated list with the new acl ids def create_acls(acls) - acls_str = "" + acls_ids = Array.new rc = true acls.each{|rule| @@ -351,10 +424,36 @@ module OZones break if OpenNebula.is_error?(rc) - acls_str << acl.id.to_s << "," + acls_ids << acl.id } - return rc, acls_str.chop + return rc, acls_ids + end + + # + # + # + def resources_in_cluster?(rsrc_hash) + cluster = OpenNebula::Cluster.new_with_id(@vdc.CLUSTER_ID, @client) + rc = cluster.info + + if OpenNebula.is_error?(rc) + return OpenNebula::Error.new("Error getting cluster: #{rc.message}") + end + + if !cluster.contains_datastore?(rsrc_hash[:DATASTORES]) + return OpenNebula::Error.new("Some Datastores are not in cluster") + end + + if !cluster.contains_host?(rsrc_hash[:HOSTS]) + return OpenNebula::Error.new("Some Hosts are not in cluster") + end + + if !cluster.contains_vnet?(rsrc_hash[:NETWORKS]) + return OpenNebula::Error.new("Some Networks are not in cluster") + end + + return true end end end diff --git a/src/ozones/Server/lib/OZones/Zones.rb b/src/ozones/Server/lib/OZones/Zones.rb index 6e9aae9db7..4ec6fac7bd 100644 --- a/src/ozones/Server/lib/OZones/Zones.rb +++ b/src/ozones/Server/lib/OZones/Zones.rb @@ -189,6 +189,10 @@ module OZones OpenNebulaJSON::VirtualNetworkPoolJSON.new(@client) when "template","vmtemplate" then OpenNebulaJSON::TemplatePoolJSON.new(@client) + when "cluster" then + OpenNebulaJSON::ClusterPoolJSON.new(@client) + when "datastore" then + OpenNebulaJSON::DatastorePoolJSON.new(@client) else error = OZones::Error.new("Error: Pool #{pool_kind} not " \ "supported for zone view") @@ -214,6 +218,10 @@ module OZones OZones::AggregatedVirtualNetworks.new when "template","vmtemplate" then OZones::AggregatedTemplates.new + when "cluster" then + OZones::AggregatedCluster.new + when "datastore" then + OZones::AggregatedDatastore.new else error = OZones::Error.new("Error: Pool #{pool_kind} not" \ " supported for aggregated zone view") diff --git a/src/ozones/Server/models/OzonesServer.rb b/src/ozones/Server/models/OzonesServer.rb index 1b43ba887b..2777fe6532 100644 --- a/src/ozones/Server/models/OzonesServer.rb +++ b/src/ozones/Server/models/OzonesServer.rb @@ -15,10 +15,8 @@ #--------------------------------------------------------------------------- # require 'CloudServer' - require 'JSONUtils' - class OzonesServer < CloudServer include OpenNebulaJSON::JSONUtils @@ -92,31 +90,31 @@ class OzonesServer < CloudServer "Reason: #{data.message}.").to_json] end + #----------------------------------------------------------------------- #Get the Zone that will host the VDC. And check resouces - zoneid = vdc_data.delete(:ZONEID) + #----------------------------------------------------------------------- + zoneid = vdc_data.delete(:ZONE_ID) force = vdc_data.delete(:FORCE) if !zoneid - return [400, - OZones::Error.new("Error: Couldn't create vdc. " \ - "Mandatory attribute zoneid missing.").to_json] + return [400, OZones::Error.new("Error: Couldn't create vdc. " \ + "Mandatory attribute zoneid missing.").to_json] end zone = OZones::Zones.get(zoneid) if !zone return [404, OZones::Error.new("Error: Couldn't create vdc. " \ - "Zone #{zoneid} not found.").to_json] + "Zone #{zoneid} not found.").to_json] end - if (!force or force.upcase!="YES") and - !host_uniqueness?(zone, vdc_data[:HOSTS]) - - return [403, - OZones::Error.new("Error: Couldn't create vdc. " \ - "Hosts are not unique, use force to override").to_json] + if (!force or force.upcase != "YES") and !host_unique?(zone, vdc_data) + return [403, OZones::Error.new("Error: Couldn't create vdc. " \ + "Hosts are not unique, use force to override").to_json] end + #----------------------------------------------------------------------- # Create de VDC + #----------------------------------------------------------------------- vdc = OZones::OpenNebulaVdc.new(-1,zone) rc = vdc.create(vdc_data) @@ -125,18 +123,20 @@ class OzonesServer < CloudServer "Reason: #{rc.message}").to_json] end + #----------------------------------------------------------------------- #Update the zone and save the vdc + #----------------------------------------------------------------------- zone.raise_on_save_failure = true zone.vdcs << vdc.vdc begin zone.save rescue => e - vdc.clean_bootstrap + #vdc.clean_bootstrap + logger.error {"create_vdc: #{e.resource.errors.inspect}"} - return [400, - OZones::Error.new("Error: Couldn't create " \ - "vdc. Zone could not be saved: #{e.message}").to_json] + return [400, OZones::Error.new("Error: Couldn't create " \ + "vdc. Zone could not be saved: #{e.message}").to_json] end pr.update # Rewrite proxy conf file @@ -172,18 +172,16 @@ class OzonesServer < CloudServer if OpenNebula.is_error?(vdc_data) return [400, OZones::Error.new("Error: Couldn't update vdc. " \ "Reason: #{data.message}.").to_json] - end - - hosts = vdc_data.delete(:HOSTS) - force = vdc_data.delete(:FORCE) - - # Check parameters - if !hosts + end + + #----------------------------------------------------------------------- + # Check parameters & VDC + #----------------------------------------------------------------------- + if !vdc_data[:RESOURCES] return [400, OZones::Error.new("Error: Couldn't update vdc. " \ - "Missing HOSTS.").to_json] + "Missing RESOURCES.").to_json] end - # Check if the referenced Vdc exists begin vdc = OZones::OpenNebulaVdc.new(vdc_id) rescue => e @@ -191,15 +189,17 @@ class OzonesServer < CloudServer "#{e.message}").to_json] end - if (!force or force.upcase != "YES") and - !host_uniqueness?(vdc.zone, hosts, vdc_id.to_i) + vdc_data[:CLUSTER_ID] = vdc.vdc.CLUSTER_ID + vdc_data[:ID] = vdc.vdc.ID - return [403, - OZones::Error.new("Error: Couldn't update vdc. " \ - "Hosts are not unique, use force to override").to_json] + force = vdc_data.delete(:FORCE) + + if (!force or force.upcase!="YES") and !host_unique?(vdc.zone, vdc_data) + return [403, OZones::Error.new("Error: Couldn't update vdc. " \ + "Hosts are not unique, use force to override").to_json] end - rc = vdc.update(hosts) + rc = vdc.update(vdc_data[:RESOURCES]) if !OpenNebula.is_error?(rc) return [200, rc] @@ -217,7 +217,7 @@ class OzonesServer < CloudServer vdc = OZones::OpenNebulaVdc.new(id) rc = vdc.destroy rescue => e - return [404, OZones::Error.new("Error: Can not delete vdc. " \ + return [404, OZones::Error.new("Error: Cannot delete vdc. " \ "Reason: #{e.message}").to_json] end @@ -236,9 +236,8 @@ class OzonesServer < CloudServer if zone rc = zone.destroy else - return [404, - OZones::Error.new("Error: Can not delete " \ - "zone. Reason: zone #{id} not found").to_json] + return [404, OZones::Error.new("Error: Cannot delete " \ + "zone. Reason: zone #{id} not found").to_json] end if !rc @@ -256,20 +255,25 @@ class OzonesServer < CloudServer private # Check if hosts are already include in any Vdc of the zone - def host_uniqueness?(zone, host_list, vdc_id = -1) - return true if host_list.empty? + def host_unique?(zone, vdc_data) - all_hosts = "" - zone.vdcs.all.each{|vdc| - if vdc.HOSTS != nil and !vdc.HOSTS.empty? and vdc.id != vdc_id - all_hosts << ',' << vdc.HOSTS + hosts = vdc_data[:RESOURCES][:HOSTS] + c_id = vdc_data[:CLUSTER_ID] + + return true if hosts.empty? + + all_hosts = Array.new + + zone.vdcs.all(:CLUSTER_ID =>c_id).each{ |vdc| + rsrc = vdc.resources + + if !rsrc[:HOSTS].empty? and vdc.ID != vdc_data[:ID] + all_hosts.concat(rsrc[:HOSTS]) end } - all_hosts = all_hosts.split(',') - - host_list.split(",").each{|host| - return false if all_hosts.include?(host) + hosts.each{|hid| + return false if all_hosts.include?(hid) } return true diff --git a/src/ozones/Server/public/css/layout.css b/src/ozones/Server/public/css/layout.css index 99812fc2b0..5a3762eb53 100644 --- a/src/ozones/Server/public/css/layout.css +++ b/src/ozones/Server/public/css/layout.css @@ -136,9 +136,31 @@ background-image: -moz-linear-gradient( padding-left: 30px; } +.navigation li.subsubTab { + line-height: 1.7em; + font-size: 11px; + text-align: left; + padding-left: 40px; +} -.navigation li a { +.navigation li.topTab span.plusIcon, +.navigation li.subTab span.plusIcon { + display : none; + float: right; + margin-right: 1em; +} + +.navigation li.topTab span.plusIcon { + margin-top: 5px; +} + +.navigation li.subTab span.plusIcon { + margin-top: 3px; +} + +.navigation li { color: #ffffff; + cursor: pointer; } .navigation li:hover, .navigation-active-li { @@ -169,9 +191,54 @@ background-image: -moz-linear-gradient( ); */ } -.navigation-active-li-a { +.navigation-active-li { font-weight: bold; } -.navigation li:hover a, .navigation-active-li-a { +.navigation li:hover { color: #ffffff !important; } + +/* top menu css */ +#menutop_container{ + margin:0px 171px; + color:#FFFFFF; + font-size:13px; + font-weight:bold; +} +#menutop_navbar{ + float:left; + height:25px; + font-size:13px; +} +#menutop_navbar ul{ + float:left; + height:25px; + color:#000000; + margin: 0 0; + padding-left: 1px; +} +#menutop_navbar ul{ + background-color: #353735; +} +#menutop_navbar ul li{ + float:left; + min-width:72px; + margin:0px 0 0 0; + height:22px; + display: inline; + text-align:center; + padding-left:5px; + padding-right: 5px; + padding-top: 4px; + padding-bottom: 4px; + border-left:1px solid white; + cursor:pointer; + color: white; +} + +#menutop_navbar ul li:hover { + background-color: #E69138; + +} + +/* end top menu css */ \ No newline at end of file diff --git a/src/pool/PoolObjectSQL.cc b/src/pool/PoolObjectSQL.cc index 2a1706b171..fc45b17dd1 100644 --- a/src/pool/PoolObjectSQL.cc +++ b/src/pool/PoolObjectSQL.cc @@ -172,7 +172,7 @@ int PoolObjectSQL::replace_template(const string& tmpl_str, string& error) if ( new_tmpl == 0 ) { - error = "Can not allocate a new template"; + error = "Cannot allocate a new template"; return -1; } diff --git a/src/rm/Request.cc b/src/rm/Request.cc index 4b8b929b96..bd5b9a9f82 100644 --- a/src/rm/Request.cc +++ b/src/rm/Request.cc @@ -177,6 +177,10 @@ string Request::object_name(PoolObjectSQL::ObjectType ob) return "group"; case PoolObjectSQL::ACL: return "ACL"; + case PoolObjectSQL::DATASTORE: + return "datastore"; + case PoolObjectSQL::CLUSTER: + return "cluster"; default: return "-"; } @@ -293,3 +297,31 @@ string Request::allocate_error (const string& error) /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ + +int Request::get_info( + PoolSQL * pool, + int id, + PoolObjectSQL::ObjectType type, + RequestAttributes& att, + PoolObjectAuth& perms, + string& name) +{ + PoolObjectSQL * ob; + + if ((ob = pool->get(id,true)) == 0 ) + { + failure_response(NO_EXISTS, get_error(object_name(type), id), att); + return -1; + } + + ob->get_permissions(perms); + + name = ob->get_name(); + + ob->unlock(); + + return 0; +} + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ diff --git a/src/rm/RequestManager.cc b/src/rm/RequestManager.cc index c0f2be8dce..9d2099ea18 100644 --- a/src/rm/RequestManager.cc +++ b/src/rm/RequestManager.cc @@ -33,6 +33,7 @@ #include "RequestManagerImage.h" #include "RequestManagerUser.h" #include "RequestManagerAcl.h" +#include "RequestManagerCluster.h" #include #include @@ -113,7 +114,7 @@ int RequestManager::setup_socket() { ostringstream oss; - oss << "Can not open server socket: " << strerror(errno); + oss << "Cannot open server socket: " << strerror(errno); NebulaLog::log("ReM",Log::ERROR,oss); return -1; @@ -125,7 +126,7 @@ int RequestManager::setup_socket() { ostringstream oss; - oss << "Can not set socket options: " << strerror(errno); + oss << "Cannot set socket options: " << strerror(errno); NebulaLog::log("ReM",Log::ERROR,oss); close(socket_fd); @@ -145,7 +146,7 @@ int RequestManager::setup_socket() { ostringstream oss; - oss << "Can not bind to port " << port << " : " << strerror(errno); + oss << "Cannot bind to port " << port << " : " << strerror(errno); NebulaLog::log("ReM",Log::ERROR,oss); close(socket_fd); @@ -254,6 +255,7 @@ void RequestManager::register_xml_methods() xmlrpc_c::methodPtr host_update(new HostUpdateTemplate()); xmlrpc_c::methodPtr vn_update(new VirtualNetworkUpdateTemplate()); xmlrpc_c::methodPtr user_update(new UserUpdateTemplate()); + xmlrpc_c::methodPtr datastore_update(new DatastoreUpdateTemplate()); // Allocate Methods xmlrpc_c::methodPtr vm_allocate(new VirtualMachineAllocate()); @@ -263,6 +265,8 @@ void RequestManager::register_xml_methods() xmlrpc_c::methodPtr template_allocate(new TemplateAllocate()); xmlrpc_c::methodPtr host_allocate(new HostAllocate()); xmlrpc_c::methodPtr user_allocate(new UserAllocate()); + xmlrpc_c::methodPtr datastore_allocate(new DatastoreAllocate()); + xmlrpc_c::methodPtr cluster_allocate(new ClusterAllocate()); // Delete Methods xmlrpc_c::methodPtr host_delete(new HostDelete()); @@ -271,6 +275,8 @@ void RequestManager::register_xml_methods() xmlrpc_c::methodPtr vn_delete(new VirtualNetworkDelete()); xmlrpc_c::methodPtr user_delete(new UserDelete()); xmlrpc_c::methodPtr image_delete(new ImageDelete()); + xmlrpc_c::methodPtr datastore_delete(new DatastoreDelete()); + xmlrpc_c::methodPtr cluster_delete(new ClusterDelete()); // Info Methods xmlrpc_c::methodPtr vm_info(new VirtualMachineInfo()); @@ -280,17 +286,19 @@ void RequestManager::register_xml_methods() xmlrpc_c::methodPtr vn_info(new VirtualNetworkInfo()); xmlrpc_c::methodPtr user_info(new UserInfo()); xmlrpc_c::methodPtr image_info(new ImageInfo()); + xmlrpc_c::methodPtr datastore_info(new DatastoreInfo()); + xmlrpc_c::methodPtr cluster_info(new ClusterInfo()); // PoolInfo Methods xmlrpc_c::methodPtr hostpool_info(new HostPoolInfo()); xmlrpc_c::methodPtr grouppool_info(new GroupPoolInfo()); xmlrpc_c::methodPtr userpool_info(new UserPoolInfo()); - - // PoolInfo Methods with Filtering + xmlrpc_c::methodPtr datastorepool_info(new DatastorePoolInfo()); xmlrpc_c::methodPtr vm_pool_info(new VirtualMachinePoolInfo()); xmlrpc_c::methodPtr template_pool_info(new TemplatePoolInfo()); xmlrpc_c::methodPtr vnpool_info(new VirtualNetworkPoolInfo()); xmlrpc_c::methodPtr imagepool_info(new ImagePoolInfo()); + xmlrpc_c::methodPtr clusterpool_info(new ClusterPoolInfo()); // Host Methods xmlrpc_c::methodPtr host_enable(new HostEnable()); @@ -306,18 +314,28 @@ void RequestManager::register_xml_methods() xmlrpc_c::methodPtr vn_chown(new VirtualNetworkChown()); xmlrpc_c::methodPtr image_chown(new ImageChown()); xmlrpc_c::methodPtr user_chown(new UserChown()); + xmlrpc_c::methodPtr datastore_chown(new DatastoreChown()); // Chmod Methods xmlrpc_c::methodPtr vm_chmod(new VirtualMachineChmod()); xmlrpc_c::methodPtr template_chmod(new TemplateChmod()); xmlrpc_c::methodPtr vn_chmod(new VirtualNetworkChmod()); xmlrpc_c::methodPtr image_chmod(new ImageChmod()); + xmlrpc_c::methodPtr datastore_chmod(new DatastoreChmod()); // ACL Methods xmlrpc_c::methodPtr acl_addrule(new AclAddRule()); xmlrpc_c::methodPtr acl_delrule(new AclDelRule()); xmlrpc_c::methodPtr acl_info(new AclInfo()); + // Cluster Methods + xmlrpc_c::methodPtr cluster_addhost(new ClusterAddHost()); + xmlrpc_c::methodPtr cluster_delhost(new ClusterDelHost()); + xmlrpc_c::methodPtr cluster_addds(new ClusterAddDatastore()); + xmlrpc_c::methodPtr cluster_delds(new ClusterDelDatastore()); + xmlrpc_c::methodPtr cluster_addvnet(new ClusterAddVNet()); + xmlrpc_c::methodPtr cluster_delvnet(new ClusterDelVNet()); + /* VM related methods */ RequestManagerRegistry.addMethod("one.vm.deploy", vm_deploy); RequestManagerRegistry.addMethod("one.vm.action", vm_action); @@ -399,6 +417,30 @@ void RequestManager::register_xml_methods() RequestManagerRegistry.addMethod("one.acl.addrule", acl_addrule); RequestManagerRegistry.addMethod("one.acl.delrule", acl_delrule); RequestManagerRegistry.addMethod("one.acl.info", acl_info); + + /* Datastore related methods */ + RequestManagerRegistry.addMethod("one.datastore.allocate",datastore_allocate); + RequestManagerRegistry.addMethod("one.datastore.delete", datastore_delete); + RequestManagerRegistry.addMethod("one.datastore.info", datastore_info); + RequestManagerRegistry.addMethod("one.datastore.update", datastore_update); + RequestManagerRegistry.addMethod("one.datastore.chown", datastore_chown); + RequestManagerRegistry.addMethod("one.datastore.chmod", datastore_chmod); + + RequestManagerRegistry.addMethod("one.datastorepool.info",datastorepool_info); + + /* Cluster related methods */ + RequestManagerRegistry.addMethod("one.cluster.allocate",cluster_allocate); + RequestManagerRegistry.addMethod("one.cluster.delete", cluster_delete); + RequestManagerRegistry.addMethod("one.cluster.info", cluster_info); + + RequestManagerRegistry.addMethod("one.cluster.addhost", cluster_addhost); + RequestManagerRegistry.addMethod("one.cluster.delhost", cluster_delhost); + RequestManagerRegistry.addMethod("one.cluster.adddatastore", cluster_addds); + RequestManagerRegistry.addMethod("one.cluster.deldatastore", cluster_delds); + RequestManagerRegistry.addMethod("one.cluster.addvnet", cluster_addvnet); + RequestManagerRegistry.addMethod("one.cluster.delvnet", cluster_delvnet); + + RequestManagerRegistry.addMethod("one.clusterpool.info",clusterpool_info); }; /* -------------------------------------------------------------------------- */ diff --git a/src/rm/RequestManagerAllocate.cc b/src/rm/RequestManagerAllocate.cc index 051463f1dc..e5c70bda4a 100644 --- a/src/rm/RequestManagerAllocate.cc +++ b/src/rm/RequestManagerAllocate.cc @@ -23,8 +23,10 @@ /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ -bool RequestManagerAllocate::allocate_authorization(Template * tmpl, - RequestAttributes& att) +bool RequestManagerAllocate::allocate_authorization( + Template * tmpl, + RequestAttributes& att, + PoolObjectAuth * cluster_perms) { if ( att.uid == 0 ) { @@ -42,6 +44,11 @@ bool RequestManagerAllocate::allocate_authorization(Template * tmpl, ar.add_create_auth(auth_object, tmpl_str); + if ( cluster_perms->oid != ClusterPool::NONE_CLUSTER_ID ) + { + ar.add_auth(AuthRequest::ADMIN, *cluster_perms); // ADMIN CLUSTER + } + if (UserPool::authorize(ar) == -1) { failure_response(AUTHORIZATION, @@ -57,8 +64,10 @@ bool RequestManagerAllocate::allocate_authorization(Template * tmpl, /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ -bool VirtualMachineAllocate::allocate_authorization(Template * tmpl, - RequestAttributes& att) +bool VirtualMachineAllocate::allocate_authorization( + Template * tmpl, + RequestAttributes& att, + PoolObjectAuth * cluster_perms) { if ( att.uid == 0 ) { @@ -67,9 +76,28 @@ bool VirtualMachineAllocate::allocate_authorization(Template * tmpl, AuthRequest ar(att.uid, att.gid); string t64; + string aname; VirtualMachineTemplate * ttmpl = static_cast(tmpl); + // Check template for restricted attributes + + if ( att.uid != 0 && att.gid != GroupPool::ONEADMIN_ID ) + { + if (ttmpl->check(aname)) + { + ostringstream oss; + + oss << "VM Template includes a restricted attribute " << aname; + + failure_response(AUTHORIZATION, + authorization_error(oss.str(), att), + att); + + return false; + } + } + ar.add_create_auth(auth_object, tmpl->to_xml(t64)); VirtualMachine::set_auth_request(att.uid, ar, ttmpl); @@ -89,6 +117,39 @@ bool VirtualMachineAllocate::allocate_authorization(Template * tmpl, /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ +bool ImageAllocate::allocate_authorization( + Template * tmpl, + RequestAttributes& att, + PoolObjectAuth * cluster_perms) +{ + string aname; + + ImageTemplate * itmpl = static_cast(tmpl); + + // Check template for restricted attributes + + if ( att.uid != 0 && att.gid != GroupPool::ONEADMIN_ID ) + { + if (itmpl->check(aname)) + { + ostringstream oss; + + oss << "Template includes a restricted attribute " << aname; + + failure_response(AUTHORIZATION, + authorization_error(oss.str(), att), + att); + + return false; + } + } + + return RequestManagerAllocate::allocate_authorization(tmpl, att, cluster_perms); +} + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + void RequestManagerAllocate::request_execute(xmlrpc_c::paramList const& params, RequestAttributes& att) { @@ -97,6 +158,11 @@ void RequestManagerAllocate::request_execute(xmlrpc_c::paramList const& params, string error_str; int rc, id; + Cluster * cluster = 0; + int cluster_id = ClusterPool::NONE_CLUSTER_ID; + string cluster_name = ClusterPool::NONE_CLUSTER_NAME; + PoolObjectAuth cluster_perms; + if ( do_template == true ) { string str_tmpl = xmlrpc_c::value_string(params.getString(1)); @@ -114,19 +180,77 @@ void RequestManagerAllocate::request_execute(xmlrpc_c::paramList const& params, } } - if ( allocate_authorization(tmpl, att) == false ) + cluster_id = get_cluster_id(params); + + if ( cluster_id != ClusterPool::NONE_CLUSTER_ID ) { + rc = get_info(clpool, cluster_id, PoolObjectSQL::CLUSTER, att, + cluster_perms, cluster_name); + + if ( rc != 0 ) + { + delete tmpl; + return; + } + } + else + { + cluster_perms.oid = ClusterPool::NONE_CLUSTER_ID; + } + + if ( allocate_authorization(tmpl, att, &cluster_perms) == false ) + { + delete tmpl; return; } - rc = pool_allocate(params, tmpl, id, error_str, att); + rc = pool_allocate(params, tmpl, id, error_str, att, cluster_id, cluster_name); if ( rc < 0 ) { failure_response(INTERNAL, allocate_error(error_str), att); return; } - + + if ( cluster_id != ClusterPool::NONE_CLUSTER_ID ) + { + cluster = clpool->get(cluster_id, true); + + if ( cluster == 0 ) + { + failure_response( + NO_EXISTS, + get_error(object_name(PoolObjectSQL::CLUSTER), cluster_id), + att); + return; + } + + rc = add_to_cluster(cluster, id, error_str); + + if ( rc < 0 ) + { + string drop_err; + PoolObjectSQL * obj = 0; + + cluster->unlock(); + + obj = pool->get(id, true); + + if ( obj != 0 ) + { + pool->drop(obj, drop_err); + obj->unlock(); + } + + failure_response(INTERNAL, allocate_error(error_str), att); + return; + } + + clpool->update(cluster); + + cluster->unlock(); + } + success_response(id, att); } @@ -150,33 +274,141 @@ int VirtualMachineAllocate::pool_allocate(xmlrpc_c::paramList const& paramList, /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ -int VirtualNetworkAllocate::pool_allocate(xmlrpc_c::paramList const& _paramList, - Template * tmpl, - int& id, - string& error_str, - RequestAttributes& att) +int VirtualNetworkAllocate::pool_allocate( + xmlrpc_c::paramList const& paramList, + Template * tmpl, + int& id, + string& error_str, + RequestAttributes& att, + int cluster_id, + const string& cluster_name) { VirtualNetworkPool * vpool = static_cast(pool); VirtualNetworkTemplate * vtmpl=static_cast(tmpl); return vpool->allocate(att.uid, att.gid, att.uname, att.gname, vtmpl, &id, - error_str); + cluster_id, cluster_name, error_str); } /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ -int ImageAllocate::pool_allocate(xmlrpc_c::paramList const& _paramList, - Template * tmpl, - int& id, - string& error_str, - RequestAttributes& att) +void ImageAllocate::request_execute(xmlrpc_c::paramList const& params, + RequestAttributes& att) { - ImagePool * ipool = static_cast(pool); - ImageTemplate * itmpl = static_cast(tmpl); + string error_str; + string ds_name; + string ds_data; + int rc, id; - return ipool->allocate(att.uid, att.gid, att.uname, att.gname, itmpl, &id, - error_str); + PoolObjectAuth ds_perms; + + string str_tmpl = xmlrpc_c::value_string(params.getString(1)); + int ds_id = xmlrpc_c::value_int(params.getInt(2)); + + Nebula& nd = Nebula::instance(); + + DatastorePool * dspool = nd.get_dspool(); + ImagePool * ipool = static_cast(pool); + + ImageTemplate * tmpl = new ImageTemplate; + Datastore * ds; + + // ------------------------- Parse image template -------------------------- + + rc = tmpl->parse_str_or_xml(str_tmpl, error_str); + + if ( rc != 0 ) + { + failure_response(INTERNAL, allocate_error(error_str), att); + + delete tmpl; + return; + } + + // ------------------------- Check Datastore exists ------------------------ + + if ( ds_id == DatastorePool::SYSTEM_DS_ID ) + { + ostringstream oss; + + oss << "New images cannot be allocated in the system datastore."; + failure_response(INTERNAL, allocate_error(oss.str()), att); + + delete tmpl; + return; + } + + if ((ds = dspool->get(ds_id,true)) == 0 ) + { + failure_response(NO_EXISTS, + get_error(object_name(PoolObjectSQL::DATASTORE), ds_id), + att); + + delete tmpl; + return; + } + + ds->get_permissions(ds_perms); + + ds_name = ds->get_name(); + + ds->to_xml(ds_data); + + ds->unlock(); + + // ------------- Set authorization request for non-oneadmin's -------------- + + if ( att.uid != 0 ) + { + AuthRequest ar(att.uid, att.gid); + string tmpl_str = ""; + + tmpl->to_xml(tmpl_str); + + ar.add_create_auth(auth_object, tmpl_str); // CREATE IMAGE + + ar.add_auth(AuthRequest::USE, ds_perms); // USE DATASTORE + + if (UserPool::authorize(ar) == -1) + { + failure_response(AUTHORIZATION, + authorization_error(ar.message, att), + att); + + delete tmpl; + return; + } + } + + rc = ipool->allocate(att.uid, + att.gid, + att.uname, + att.gname, + tmpl, + ds_id, + ds_name, + ds_data, + &id, + error_str); + if ( rc < 0 ) + { + failure_response(INTERNAL, allocate_error(error_str), att); + return; + } + + ds = dspool->get(ds_id, true); + + if ( ds != 0 ) // TODO: error otherwise or leave image in ERROR? + { + ds->add_image(id); + + dspool->update(ds); + + ds->unlock(); + } + + success_response(id, att); } /* -------------------------------------------------------------------------- */ @@ -199,24 +431,30 @@ int TemplateAllocate::pool_allocate(xmlrpc_c::paramList const& _paramList, /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ -int HostAllocate::pool_allocate(xmlrpc_c::paramList const& paramList, - Template * tmpl, - int& id, - string& error_str, - RequestAttributes& att) +int HostAllocate::pool_allocate( + xmlrpc_c::paramList const& paramList, + Template * tmpl, + int& id, + string& error_str, + RequestAttributes& att, + int cluster_id, + const string& cluster_name) { string host = xmlrpc_c::value_string(paramList.getString(1)); string im_mad = xmlrpc_c::value_string(paramList.getString(2)); string vmm_mad = xmlrpc_c::value_string(paramList.getString(3)); string vnm_mad = xmlrpc_c::value_string(paramList.getString(4)); - string tm_mad = xmlrpc_c::value_string(paramList.getString(5)); HostPool * hpool = static_cast(pool); - return hpool->allocate(&id, host, im_mad, vmm_mad, vnm_mad, tm_mad, - error_str); + return hpool->allocate(&id, host, im_mad, vmm_mad, vnm_mad, + cluster_id, cluster_name, error_str); + } +/* -------------------------------------------------------------------------- */ + + /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ @@ -265,3 +503,38 @@ int GroupAllocate::pool_allocate(xmlrpc_c::paramList const& paramList, return gpool->allocate(gname, &id, error_str); } +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + +int DatastoreAllocate::pool_allocate( + xmlrpc_c::paramList const& paramList, + Template * tmpl, + int& id, + string& error_str, + RequestAttributes& att, + int cluster_id, + const string& cluster_name) +{ + DatastorePool * dspool = static_cast(pool); + + DatastoreTemplate * ds_tmpl = static_cast(tmpl); + + return dspool->allocate(att.uid, att.gid, att.uname, att.gname, + ds_tmpl, &id, cluster_id, cluster_name, error_str); +} + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + +int ClusterAllocate::pool_allocate(xmlrpc_c::paramList const& paramList, + Template * tmpl, + int& id, + string& error_str, + RequestAttributes& att) +{ + string name = xmlrpc_c::value_string(paramList.getString(1)); + + ClusterPool * clpool = static_cast(pool); + + return clpool->allocate(name, &id, error_str); +} diff --git a/src/rm/RequestManagerChown.cc b/src/rm/RequestManagerChown.cc index 4ecbf9ee48..768a87c5e8 100644 --- a/src/rm/RequestManagerChown.cc +++ b/src/rm/RequestManagerChown.cc @@ -23,33 +23,6 @@ /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ -int RequestManagerChown::get_info (PoolSQL * pool, - int id, - PoolObjectSQL::ObjectType type, - RequestAttributes& att, - PoolObjectAuth& perms, - string& name) -{ - PoolObjectSQL * ob; - - if ((ob = pool->get(id,true)) == 0 ) - { - failure_response(NO_EXISTS, get_error(object_name(type), id), att); - return -1; - } - - ob->get_permissions(perms); - - name = ob->get_name(); - - ob->unlock(); - - return 0; -} - -/* -------------------------------------------------------------------------- */ -/* -------------------------------------------------------------------------- */ - void RequestManagerChown::request_execute(xmlrpc_c::paramList const& paramList, RequestAttributes& att) { diff --git a/src/rm/RequestManagerCluster.cc b/src/rm/RequestManagerCluster.cc new file mode 100644 index 0000000000..73bcd3da75 --- /dev/null +++ b/src/rm/RequestManagerCluster.cc @@ -0,0 +1,194 @@ +/* -------------------------------------------------------------------------- */ +/* Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); you may */ +/* not use this file except in compliance with the License. You may obtain */ +/* a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ +/* See the License for the specific language governing permissions and */ +/* limitations under the License. */ +/* -------------------------------------------------------------------------- */ + +#include "RequestManagerCluster.h" + +using namespace std; + +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ + +void RequestManagerCluster::add_generic( + int cluster_id, + int object_id, + RequestAttributes& att, + PoolSQL * pool, + PoolObjectSQL::ObjectType type) +{ + int rc; + + string cluster_name; + string obj_name; + string err_msg; + + Cluster * cluster; + Clusterable * cluster_obj = 0; + PoolObjectSQL * object = 0; + + + PoolObjectAuth c_perms; + PoolObjectAuth obj_perms; + + int old_cluster_id; + string old_cluster_name; + + if ( cluster_id != ClusterPool::NONE_CLUSTER_ID ) + { + rc = get_info(clpool, cluster_id, PoolObjectSQL::CLUSTER, att, c_perms, cluster_name); + + if ( rc == -1 ) + { + return; + } + } + else + { + cluster_name = ClusterPool::NONE_CLUSTER_NAME; + } + + rc = get_info(pool, object_id, type, att, obj_perms, obj_name); + + if ( rc == -1 ) + { + return; + } + + if ( att.uid != 0 ) + { + AuthRequest ar(att.uid, att.gid); + + if ( cluster_id != ClusterPool::NONE_CLUSTER_ID ) + { + ar.add_auth(auth_op, c_perms); // ADMIN CLUSTER + } + + ar.add_auth(AuthRequest::ADMIN, obj_perms); // ADMIN OBJECT + + if (UserPool::authorize(ar) == -1) + { + failure_response(AUTHORIZATION, + authorization_error(ar.message, att), + att); + + return; + } + } + + // ------------- Set new cluster id in object --------------------- + get(object_id, true, &object, &cluster_obj); + + if ( object == 0 ) + { + failure_response(NO_EXISTS, + get_error(object_name(type), object_id), + att); + + return; + } + + old_cluster_id = cluster_obj->get_cluster_id(); + old_cluster_name = cluster_obj->get_cluster_name(); + + if ( old_cluster_id == cluster_id ) + { + object->unlock(); + success_response(cluster_id, att); + return; + } + + cluster_obj->set_cluster(cluster_id, cluster_name); + + pool->update(object); + + object->unlock(); + + // ------------- Add object to new cluster --------------------- + if ( cluster_id != ClusterPool::NONE_CLUSTER_ID ) + { + cluster = clpool->get(cluster_id, true); + + if ( cluster == 0 ) + { + failure_response(NO_EXISTS, + get_error(object_name(PoolObjectSQL::CLUSTER),cluster_id), + att); + + // Rollback + get(object_id, true, &object, &cluster_obj); + + if ( object != 0 ) + { + cluster_obj->set_cluster(old_cluster_id, old_cluster_name); + + pool->update(object); + + object->unlock(); + } + + return; + } + + if ( add_object(cluster, object_id, err_msg) < 0 ) + { + cluster->unlock(); + + failure_response(INTERNAL, + request_error("Cannot add object to cluster", err_msg), + att); + + return; + } + + clpool->update(cluster); + + cluster->unlock(); + } + + // ------------- Remove host from old cluster --------------------- + + if ( old_cluster_id != ClusterPool::NONE_CLUSTER_ID ) + { + cluster = clpool->get(old_cluster_id, true); + + if ( cluster == 0 ) + { + // This point should be unreachable. + // The old cluster is not empty (at least has the host_id), + // so it cannot be deleted + success_response(cluster_id, att); + return; + } + + if ( del_object(cluster, object_id, err_msg) < 0 ) + { + cluster->unlock(); + + failure_response(INTERNAL, + request_error("Cannot remove object from cluster", err_msg), + att); + + return; + } + + clpool->update(cluster); + + cluster->unlock(); + } + + success_response(cluster_id, att); + + return; +} diff --git a/src/rm/RequestManagerDelete.cc b/src/rm/RequestManagerDelete.cc index be06c4f929..34ec4d0393 100644 --- a/src/rm/RequestManagerDelete.cc +++ b/src/rm/RequestManagerDelete.cc @@ -91,7 +91,7 @@ void RequestManagerDelete::request_execute(xmlrpc_c::paramList const& paramList, if ( rc != 0 ) { failure_response(INTERNAL, - request_error("Can not delete "+object_name(auth_object),error_msg), + request_error("Cannot delete "+object_name(auth_object),error_msg), att); return; } @@ -101,19 +101,95 @@ void RequestManagerDelete::request_execute(xmlrpc_c::paramList const& paramList, return; } +/* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ -int ImageDelete::drop(int oid, PoolObjectSQL * object, string& error_msg) +int RequestManagerDelete::drop( + int oid, + PoolObjectSQL * object, + string& error_msg) { - Nebula& nd = Nebula::instance(); - ImageManager * imagem = nd.get_imagem(); + int cluster_id = get_cluster_id(object); + + int rc = pool->drop(object, error_msg); object->unlock(); - int rc = imagem->delete_image(oid); + + if ( cluster_id != ClusterPool::NONE_CLUSTER_ID && rc == 0 ) + { + Cluster * cluster = clpool->get(cluster_id, true); + + if( cluster != 0 ) + { + rc = del_from_cluster(cluster, oid, error_msg); + + if ( rc < 0 ) + { + cluster->unlock(); + return rc; + } + + clpool->update(cluster); + + cluster->unlock(); + } + } return rc; } +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ + +int ImageDelete::drop(int oid, PoolObjectSQL * object, string& error_msg) +{ + Nebula& nd = Nebula::instance(); + + ImageManager * imagem = nd.get_imagem(); + DatastorePool * dspool = nd.get_dspool(); + + Datastore * ds; + Image * img; + + int ds_id, rc; + string ds_data; + + img = static_cast(object); + ds_id = img->get_ds_id(); + + img->unlock(); + + ds = dspool->get(ds_id, true); + + if ( ds == 0 ) + { + error_msg = "Datastore no longer exists cannot remove image"; + return -1; + } + + ds->to_xml(ds_data); + + ds->unlock(); + + rc = imagem->delete_image(oid, ds_data); + + if ( rc == 0 ) + { + ds = dspool->get(ds_id, true); + + if ( ds != 0 ) + { + ds->del_image(oid); + dspool->update(ds); + + ds->unlock(); + } + } + + return rc; +} + +/* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ int UserDelete::drop(int oid, PoolObjectSQL * object, string& error_msg) @@ -123,7 +199,7 @@ int UserDelete::drop(int oid, PoolObjectSQL * object, string& error_msg) if (oid == 0) { - error_msg = "oneadmin can not be deleted."; + error_msg = "oneadmin cannot be deleted."; object->unlock(); return -1; diff --git a/src/rm/RequestManagerPoolInfoFilter.cc b/src/rm/RequestManagerPoolInfoFilter.cc index c7f9897150..e40db5da8d 100644 --- a/src/rm/RequestManagerPoolInfoFilter.cc +++ b/src/rm/RequestManagerPoolInfoFilter.cc @@ -121,6 +121,26 @@ void UserPoolInfo::request_execute( /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ +void DatastorePoolInfo::request_execute( + xmlrpc_c::paramList const& paramList, + RequestAttributes& att) +{ + dump(att, ALL, -1, -1, "", ""); +} + +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ + +void ClusterPoolInfo::request_execute( + xmlrpc_c::paramList const& paramList, + RequestAttributes& att) +{ + dump(att, ALL, -1, -1, "", ""); +} + +/* ------------------------------------------------------------------------- */ +/* ------------------------------------------------------------------------- */ + void RequestManagerPoolInfoFilter::dump( RequestAttributes& att, int filter_flag, diff --git a/src/rm/RequestManagerUpdateTemplate.cc b/src/rm/RequestManagerUpdateTemplate.cc index 07226eb0b6..1c8988ccbb 100644 --- a/src/rm/RequestManagerUpdateTemplate.cc +++ b/src/rm/RequestManagerUpdateTemplate.cc @@ -54,7 +54,7 @@ void RequestManagerUpdateTemplate::request_execute( if ( rc != 0 ) { failure_response(INTERNAL, - request_error("Can not update template",error_str), + request_error("Cannot update template",error_str), att); object->unlock(); diff --git a/src/rm/RequestManagerVMTemplate.cc b/src/rm/RequestManagerVMTemplate.cc index a62aa2f56d..d354d5a037 100644 --- a/src/rm/RequestManagerVMTemplate.cc +++ b/src/rm/RequestManagerVMTemplate.cc @@ -39,6 +39,7 @@ void VMTemplateInstantiate::request_execute(xmlrpc_c::paramList const& paramList VMTemplate * rtmpl; string error_str; + string aname; rtmpl = tpool->get(id,true); @@ -57,6 +58,26 @@ void VMTemplateInstantiate::request_execute(xmlrpc_c::paramList const& paramList rtmpl->unlock(); + // Check template for restricted attributes, but only if the Template owner + // is not oneadmin + + if ( perms.uid != 0 && perms.gid != GroupPool::ONEADMIN_ID ) + { + if (tmpl->check(aname)) + { + ostringstream oss; + + oss << "VM Template includes a restricted attribute " << aname; + + failure_response(AUTHORIZATION, + authorization_error(oss.str(), att), + att); + + delete tmpl; + return; + } + } + tmpl->erase("NAME"); tmpl->set(new SingleAttribute("NAME",name)); diff --git a/src/rm/RequestManagerVirtualMachine.cc b/src/rm/RequestManagerVirtualMachine.cc index 084b31c274..59ef25b9ef 100644 --- a/src/rm/RequestManagerVirtualMachine.cc +++ b/src/rm/RequestManagerVirtualMachine.cc @@ -24,7 +24,8 @@ bool RequestManagerVirtualMachine::vm_authorization(int oid, ImageTemplate * tmpl, RequestAttributes& att, - PoolObjectAuth * host_perm) + PoolObjectAuth * host_perm, + PoolObjectAuth * ds_perm) { PoolObjectSQL * object; PoolObjectAuth vm_perms; @@ -57,13 +58,19 @@ bool RequestManagerVirtualMachine::vm_authorization(int oid, { ar.add_auth(AuthRequest::MANAGE, *host_perm); } - else if (tmpl != 0) + + if (tmpl != 0) { string t_xml; ar.add_create_auth(PoolObjectSQL::IMAGE, tmpl->to_xml(t_xml)); } + if ( ds_perm != 0 ) + { + ar.add_auth(AuthRequest::USE, *ds_perm); + } + if (UserPool::authorize(ar) == -1) { failure_response(AUTHORIZATION, @@ -83,7 +90,6 @@ int RequestManagerVirtualMachine::get_host_information(int hid, string& name, string& vmm, string& vnm, - string& tm, RequestAttributes& att, PoolObjectAuth& host_perms) { @@ -106,7 +112,6 @@ int RequestManagerVirtualMachine::get_host_information(int hid, name = host->get_name(); vmm = host->get_vmm_mad(); vnm = host->get_vnm_mad(); - tm = host->get_tm_mad(); host->get_permissions(host_perms); @@ -141,26 +146,21 @@ int RequestManagerVirtualMachine::add_history(VirtualMachine * vm, const string& hostname, const string& vmm_mad, const string& vnm_mad, - const string& tm_mad, RequestAttributes& att) { - Nebula& nd = Nebula::instance(); string vmdir; - int rc; VirtualMachinePool * vmpool = static_cast(pool); - nd.get_configuration_attribute("VM_DIR",vmdir); - - vm->add_history(hid,hostname,vmdir,vmm_mad,vnm_mad,tm_mad); + vm->add_history(hid,hostname,vmm_mad,vnm_mad); rc = vmpool->update_history(vm); if ( rc != 0 ) { failure_response(INTERNAL, - request_error("Can not update virtual machine history",""), + request_error("Cannot update virtual machine history",""), att); return -1; @@ -184,7 +184,7 @@ void VirtualMachineAction::request_execute(xmlrpc_c::paramList const& paramList, Nebula& nd = Nebula::instance(); DispatchManager * dm = nd.get_dm(); - if ( vm_authorization(id,0,att,0) == false ) + if ( vm_authorization(id, 0, att, 0, 0) == false ) { return; } @@ -278,19 +278,18 @@ void VirtualMachineDeploy::request_execute(xmlrpc_c::paramList const& paramList, string hostname; string vmm_mad; string vnm_mad; - string tm_mad; int id = xmlrpc_c::value_int(paramList.getInt(1)); int hid = xmlrpc_c::value_int(paramList.getInt(2)); bool auth = false; - if (get_host_information(hid,hostname,vmm_mad,vnm_mad,tm_mad, att, host_perms) != 0) + if (get_host_information(hid,hostname,vmm_mad,vnm_mad,att, host_perms) != 0) { return; } - auth = vm_authorization(id,0,att,&host_perms); + auth = vm_authorization(id, 0, att, &host_perms, 0); if ( auth == false ) { @@ -312,7 +311,7 @@ void VirtualMachineDeploy::request_execute(xmlrpc_c::paramList const& paramList, return; } - if ( add_history(vm,hid,hostname,vmm_mad,vnm_mad,tm_mad,att) != 0) + if ( add_history(vm,hid,hostname,vmm_mad,vnm_mad,att) != 0) { vm->unlock(); return; @@ -340,7 +339,6 @@ void VirtualMachineMigrate::request_execute(xmlrpc_c::paramList const& paramList string hostname; string vmm_mad; string vnm_mad; - string tm_mad; int id = xmlrpc_c::value_int(paramList.getInt(1)); int hid = xmlrpc_c::value_int(paramList.getInt(2)); @@ -348,12 +346,12 @@ void VirtualMachineMigrate::request_execute(xmlrpc_c::paramList const& paramList bool auth = false; - if (get_host_information(hid,hostname,vmm_mad,vnm_mad,tm_mad, att, host_perms) != 0) + if (get_host_information(hid,hostname,vmm_mad,vnm_mad,att, host_perms) != 0) { return; } - auth = vm_authorization(id,0,att,&host_perms); + auth = vm_authorization(id, 0, att, &host_perms, 0); if ( auth == false ) { @@ -377,7 +375,7 @@ void VirtualMachineMigrate::request_execute(xmlrpc_c::paramList const& paramList return; } - if ( add_history(vm,hid,hostname,vmm_mad,vnm_mad,tm_mad,att) != 0) + if ( add_history(vm,hid,hostname,vmm_mad,vnm_mad,att) != 0) { vm->unlock(); return; @@ -403,8 +401,10 @@ void VirtualMachineMigrate::request_execute(xmlrpc_c::paramList const& paramList void VirtualMachineSaveDisk::request_execute(xmlrpc_c::paramList const& paramList, RequestAttributes& att) { - Nebula& nd = Nebula::instance(); - ImagePool * ipool = nd.get_ipool(); + Nebula& nd = Nebula::instance(); + + ImagePool * ipool = nd.get_ipool(); + DatastorePool * dspool = nd.get_dspool(); int id = xmlrpc_c::value_int(paramList.getInt(1)); int disk_id = xmlrpc_c::value_int(paramList.getInt(2)); @@ -412,24 +412,100 @@ void VirtualMachineSaveDisk::request_execute(xmlrpc_c::paramList const& paramLis string img_type = xmlrpc_c::value_string(paramList.getString(4)); VirtualMachine * vm; - string vm_owner; - int iid; - ImageTemplate * itemplate; + int iid_orig; + + Image * img; + Datastore * ds; int rc; - ostringstream oss; string error_str; - // ------------------ Template for the new image ------------------ + // ------------------------------------------------------------------------- + // Prepare and check the VM/DISK to be saved_as + // ------------------------------------------------------------------------- + + if ( (vm = get_vm(id, att)) == 0 ) + { + failure_response(NO_EXISTS, + get_error(object_name(PoolObjectSQL::VM), id), + att); + return; + } - oss << "NAME= \"" << img_name << "\"" << endl; - oss << "PUBLIC = NO " << endl; - oss << "SOURCE = - " << endl; - oss << "SAVED_DISK_ID = " << disk_id << endl; - oss << "SAVED_VM_ID = " << id << endl; + iid_orig = vm->get_image_from_disk(disk_id, error_str); - if ( img_type != "" ) + pool->update(vm); + + vm->unlock(); + + if ( iid_orig == -1 ) + { + failure_response(INTERNAL, + request_error("Cannot use selected DISK", error_str), + att); + return; + } + + // ------------------------------------------------------------------------- + // Get the data of the Image to be saved + // ------------------------------------------------------------------------- + + img = ipool->get(iid_orig, true); + + if ( img == 0 ) + { + failure_response(NO_EXISTS, + get_error(object_name(PoolObjectSQL::IMAGE), iid_orig), + att); + return; + } + + int ds_id = img->get_ds_id(); + string ds_name = img->get_ds_name(); + int size = img->get_size(); + + Image::ImageType type = img->get_type(); + + img->unlock(); + + if ((ds = dspool->get(ds_id, true)) == 0 ) + { + failure_response(NO_EXISTS, + get_error(object_name(PoolObjectSQL::DATASTORE), ds_id), + att); + return; + } + + // ------------------------------------------------------------------------- + // Get the data of the DataStore for the new image + // ------------------------------------------------------------------------- + string ds_data; + PoolObjectAuth ds_perms; + + ds->get_permissions(ds_perms); + ds->to_xml(ds_data); + + ds->unlock(); + + // ------------------------------------------------------------------------- + // Create a template for the new Image + // ------------------------------------------------------------------------- + ImageTemplate * itemplate; + ostringstream oss; + + oss << "NAME = \"" << img_name << "\"" << endl; + oss << "SIZE = " << size << endl; + + oss << "SAVED_IMAGE_ID = " << iid_orig << endl; + oss << "SAVED_DISK_ID = " << disk_id << endl; + oss << "SAVED_VM_ID = " << id << endl; + + if ( img_type.empty() ) + { + oss << "TYPE = " << Image::type_to_str(type) << endl; + } + else { oss << "TYPE = " << img_type << endl; } @@ -438,18 +514,32 @@ void VirtualMachineSaveDisk::request_execute(xmlrpc_c::paramList const& paramLis itemplate->parse_str_or_xml(oss.str(), error_str); - // ------------------ Authorize the operation ------------------ + itemplate->set_saving(); - if ( vm_authorization(id,itemplate,att,0) == false ) + // ------------------------------------------------------------------------- + // Authorize the operation + // ------------------------------------------------------------------------- + + if ( vm_authorization(id, itemplate, att, 0, &ds_perms) == false ) { + delete itemplate; return; } - // ------------------ Create the image ------------------ - - rc = ipool->allocate(att.uid, att.gid, att.uname, att.gname, itemplate, - &iid, error_str); + // ------------------------------------------------------------------------- + // Create the image + // ------------------------------------------------------------------------- + rc = ipool->allocate(att.uid, + att.gid, + att.uname, + att.gname, + itemplate, + ds_id, + ds_name, + ds_data, + &iid, + error_str); if (rc < 0) { failure_response(INTERNAL, @@ -457,50 +547,6 @@ void VirtualMachineSaveDisk::request_execute(xmlrpc_c::paramList const& paramLis return; } - // ------------------ Store image id to save the disk ------------------ - - if ( (vm = get_vm(id, att)) == 0 ) - { - Image * img; - - if ( (img = ipool->get(iid,true)) != 0 ) - { - string tmp_error; - - ipool->drop(img, tmp_error); - img->unlock(); - } - - return; - } - - rc = vm->save_disk(disk_id, iid, error_str); - - if ( rc == 0 ) - { - pool->update(vm); - } - - vm->unlock(); - - if ( rc == -1 ) - { - Image * img; - - if ( (img = ipool->get(iid,true)) != 0 ) - { - string tmp_error; - - ipool->drop(img, tmp_error); - img->unlock(); - } - - failure_response(INTERNAL, - request_error("Can not save_as disk",error_str), - att); - return; - } - // Return the new allocated Image ID success_response(iid, att); } diff --git a/src/rm/SConstruct b/src/rm/SConstruct index cc692552f9..5c565c5b73 100644 --- a/src/rm/SConstruct +++ b/src/rm/SConstruct @@ -38,6 +38,7 @@ source_files=[ 'RequestManagerChown.cc', 'RequestManagerAcl.cc', 'RequestManagerChmod.cc', + 'RequestManagerCluster.cc' ] # Build library diff --git a/src/sunstone/bin/sunstone-server b/src/sunstone/bin/sunstone-server index 155ff26260..171a8a6938 100755 --- a/src/sunstone/bin/sunstone-server +++ b/src/sunstone/bin/sunstone-server @@ -53,7 +53,7 @@ setup() start() { if [ ! -f "$SUNSTONE_SERVER" ]; then - echo "Can not find $SUNSTONE_SERVER." + echo "Cannot find $SUNSTONE_SERVER." exit 1 fi diff --git a/src/sunstone/etc/sunstone-plugins.yaml b/src/sunstone/etc/sunstone-plugins.yaml index ce33cfff64..bc724898ba 100644 --- a/src/sunstone/etc/sunstone-plugins.yaml +++ b/src/sunstone/etc/sunstone-plugins.yaml @@ -9,27 +9,15 @@ :user: :group: oneadmin: false -- plugins/hosts-tab.js: +- plugins/config-tab.js: + :ALL: true + :user: + :group: +- plugins/system-tab.js: :ALL: false :user: :group: oneadmin: true -- plugins/vms-tab.js: - :ALL: true - :user: - :group: -- plugins/templates-tab.js: - :ALL: true - :user: - :group: -- plugins/vnets-tab.js: - :ALL: true - :user: - :group: -- plugins/images-tab.js: - :ALL: true - :user: - :group: - plugins/users-tab.js: :ALL: false :user: @@ -45,7 +33,41 @@ :user: :group: oneadmin: true -- plugins/config-tab.js: +- plugins/vresources-tab.js: :ALL: true :user: :group: +- plugins/vms-tab.js: + :ALL: true + :user: + :group: +- plugins/templates-tab.js: + :ALL: true + :user: + :group: +- plugins/images-tab.js: + :ALL: true + :user: + :group: +- plugins/infra-tab.js: + :ALL: true + :user: + :group: +- plugins/hosts-tab.js: + :ALL: false + :user: + :group: + oneadmin: true +- plugins/datastores-tab.js: + :ALL: true + :user: + :group: +- plugins/vnets-tab.js: + :ALL: true + :user: + :group: +- plugins/clusters-tab.js: + :ALL: false + :user: + :group: + oneadmin: true diff --git a/src/sunstone/models/OpenNebulaJSON.rb b/src/sunstone/models/OpenNebulaJSON.rb index ad5c493785..7055b7a302 100644 --- a/src/sunstone/models/OpenNebulaJSON.rb +++ b/src/sunstone/models/OpenNebulaJSON.rb @@ -19,6 +19,7 @@ include OpenNebula require 'OpenNebulaJSON/GroupJSON' require 'OpenNebulaJSON/HostJSON' +require 'OpenNebulaJSON/ClusterJSON' require 'OpenNebulaJSON/ImageJSON' require 'OpenNebulaJSON/TemplateJSON' require 'OpenNebulaJSON/JSONUtils' @@ -27,6 +28,7 @@ require 'OpenNebulaJSON/UserJSON' require 'OpenNebulaJSON/VirtualMachineJSON' require 'OpenNebulaJSON/VirtualNetworkJSON' require 'OpenNebulaJSON/AclJSON' +require 'OpenNebulaJSON/DatastoreJSON' module OpenNebula class Error diff --git a/src/sunstone/models/OpenNebulaJSON/ClusterJSON.rb b/src/sunstone/models/OpenNebulaJSON/ClusterJSON.rb new file mode 100644 index 0000000000..b834218a38 --- /dev/null +++ b/src/sunstone/models/OpenNebulaJSON/ClusterJSON.rb @@ -0,0 +1,77 @@ +# -------------------------------------------------------------------------- # +# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +require 'OpenNebulaJSON/JSONUtils' + +module OpenNebulaJSON + class ClusterJSON < OpenNebula::Cluster + include JSONUtils + + def create(template_json) + cluster_hash = parse_json(template_json, 'cluster') + if OpenNebula.is_error?(cluster_hash) + return cluster_hash + end + + self.allocate(cluster_hash['name']) + end + + def perform_action(template_json) + action_hash = parse_json(template_json, 'action') + if OpenNebula.is_error?(action_hash) + return action_hash + end + + rc = case action_hash['perform'] + when "addhost" then self.addhost(action_hash['params']) + when "delhost" then self.delhost(action_hash['params']) + when "adddatastore" then self.adddatastore(action_hash['params']) + when "deldatastore" then self.deldatastore(action_hash['params']) + when "addvnet" then self.addvnet(action_hash['params']) + when "delvnet" then self.delvnet(action_hash['params']) + + else + error_msg = "#{action_hash['perform']} action not " << + " available for this resource" + OpenNebula::Error.new(error_msg) + end + end + + def addhost(params=Hash.new) + super(params['host_id'].to_i) + end + + def delhost(params=Hash.new) + super(params['host_id'].to_i) + end + + def adddatastore(params=Hash.new) + super(params['ds_id'].to_i) + end + + def deldatastore(params=Hash.new) + super(params['ds_id'].to_i) + end + + def addvnet(params=Hash.new) + super(params['vnet_id'].to_i) + end + + def delvnet(params=Hash.new) + super(params['vnet_id'].to_i) + end + end +end diff --git a/src/sunstone/models/OpenNebulaJSON/DatastoreJSON.rb b/src/sunstone/models/OpenNebulaJSON/DatastoreJSON.rb new file mode 100644 index 0000000000..095f30d61a --- /dev/null +++ b/src/sunstone/models/OpenNebulaJSON/DatastoreJSON.rb @@ -0,0 +1,72 @@ +# -------------------------------------------------------------------------- # +# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +require 'OpenNebulaJSON/JSONUtils' + +module OpenNebulaJSON + class DatastoreJSON < OpenNebula::Datastore + include JSONUtils + + def create(template_json) + ds_hash = parse_json(template_json, 'datastore') + if OpenNebula.is_error?(ds_hash) + return ds_hash + end + + cluster_id = parse_json(template_json, 'cluster_id') + if OpenNebula.is_error?(cluster_id) + return cluster_id + end + + if ds_hash['datastore_raw'] + template = ds_hash['datastore_raw'] + else + template = template_to_str(ds_hash) + end + + self.allocate(template,cluster_id.to_i) + end + + def perform_action(template_json) + action_hash = parse_json(template_json, 'action') + if OpenNebula.is_error?(action_hash) + return action_hash + end + + rc = case action_hash['perform'] + when "update" then self.update(action_hash['params']) + when "chown" then self.chown(action_hash['params']) + when "chmod" then self.chmod_octet(action_hash['params']) + else + error_msg = "#{action_hash['perform']} action not " << + " available for this resource" + OpenNebula::Error.new(error_msg) + end + end + + def update(params=Hash.new) + super(params['template_raw']) + end + + def chown(params=Hash.new) + super(params['owner_id'].to_i,params['group_id'].to_i) + end + + def chmod_octet(params=Hash.new) + super(params['octet']) + end + end +end diff --git a/src/sunstone/models/OpenNebulaJSON/HostJSON.rb b/src/sunstone/models/OpenNebulaJSON/HostJSON.rb index b0dbac71b8..3ca11ecb0b 100644 --- a/src/sunstone/models/OpenNebulaJSON/HostJSON.rb +++ b/src/sunstone/models/OpenNebulaJSON/HostJSON.rb @@ -30,7 +30,7 @@ module OpenNebulaJSON host_hash['im_mad'], host_hash['vm_mad'], host_hash['vnm_mad'], - host_hash['tm_mad']) + host_hash['cluster_id'].to_i) end def delete diff --git a/src/sunstone/models/OpenNebulaJSON/ImageJSON.rb b/src/sunstone/models/OpenNebulaJSON/ImageJSON.rb index 8a5f0a0d35..99a0bdf04b 100644 --- a/src/sunstone/models/OpenNebulaJSON/ImageJSON.rb +++ b/src/sunstone/models/OpenNebulaJSON/ImageJSON.rb @@ -26,13 +26,18 @@ module OpenNebulaJSON return image_hash end + ds_id = parse_json(template_json, 'ds_id') + if OpenNebula.is_error?(ds_id) + return ds_id + end + if image_hash['image_raw'] template = image_hash['image_raw'] else template = template_to_str(image_hash) end - self.allocate(template) + self.allocate(template,ds_id.to_i) end def perform_action(template_json) diff --git a/src/sunstone/models/OpenNebulaJSON/PoolJSON.rb b/src/sunstone/models/OpenNebulaJSON/PoolJSON.rb index fbca6fe2a4..791b723645 100644 --- a/src/sunstone/models/OpenNebulaJSON/PoolJSON.rb +++ b/src/sunstone/models/OpenNebulaJSON/PoolJSON.rb @@ -25,4 +25,6 @@ module OpenNebulaJSON class GroupPoolJSON < OpenNebula::GroupPool; include JSONUtils; end class UserPoolJSON < OpenNebula::UserPool; include JSONUtils; end class AclPoolJSON < OpenNebula::AclPool; include JSONUtils; end + class ClusterPoolJSON < OpenNebula::ClusterPool; include JSONUtils; end + class DatastorePoolJSON < OpenNebula::DatastorePool; include JSONUtils; end end diff --git a/src/sunstone/models/SunstoneServer.rb b/src/sunstone/models/SunstoneServer.rb index 6696d7f436..2739d9b2b4 100644 --- a/src/sunstone/models/SunstoneServer.rb +++ b/src/sunstone/models/SunstoneServer.rb @@ -48,6 +48,7 @@ class SunstoneServer < CloudServer pool = case kind when "group" then GroupPoolJSON.new(@client) + when "cluster" then ClusterPoolJSON.new(@client) when "host" then HostPoolJSON.new(@client) when "image" then ImagePoolJSON.new(@client, user_flag) when "vmtemplate" then TemplatePoolJSON.new(@client, user_flag) @@ -55,6 +56,7 @@ class SunstoneServer < CloudServer when "vnet" then VirtualNetworkPoolJSON.new(@client, user_flag) when "user" then UserPoolJSON.new(@client) when "acl" then AclPoolJSON.new(@client) + when "datastore" then DatastorePoolJSON.new(@client) else error = Error.new("Error: #{kind} resource not supported") return [404, error.to_json] @@ -100,6 +102,7 @@ class SunstoneServer < CloudServer def create_resource(kind, template) resource = case kind when "group" then GroupJSON.new(Group.build_xml, @client) + when "cluster" then ClusterJSON.new(Group.build_xml, @client) when "host" then HostJSON.new(Host.build_xml, @client) when "image" then ImageJSON.new(Image.build_xml, @client) when "vmtemplate" then TemplateJSON.new(Template.build_xml, @client) @@ -107,6 +110,7 @@ class SunstoneServer < CloudServer when "vnet" then VirtualNetworkJSON.new(VirtualNetwork.build_xml, @client) when "user" then UserJSON.new(User.build_xml, @client) when "acl" then AclJSON.new(Acl.build_xml, @client) + when "datastore" then DatastoreJSON.new(Acl.build_xml, @client) else error = Error.new("Error: #{kind} resource not supported") return [404, error.to_json] @@ -126,9 +130,22 @@ class SunstoneServer < CloudServer ############################################################################ def upload(template, file_path) image_hash = parse_json(template, 'image') + if OpenNebula.is_error?(image_hash) + return [500, image_hash.to_json] + end + image_hash['PATH'] = file_path - new_template = {:image => image_hash}.to_json + ds_id = parse_json(template, 'ds_id') + if OpenNebula.is_error?(ds_id) + return [500, ds_id.to_json] + end + + new_template = { + :image => image_hash, + :ds_id => ds_id, + }.to_json + image = ImageJSON.new(Image.build_xml, @client) rc = image.create(new_template) @@ -274,14 +291,16 @@ class SunstoneServer < CloudServer ############################################################################ def retrieve_resource(kind, id) resource = case kind - when "group" then GroupJSON.new_with_id(id, @client) - when "host" then HostJSON.new_with_id(id, @client) - when "image" then ImageJSON.new_with_id(id, @client) + when "group" then GroupJSON.new_with_id(id, @client) + when "cluster" then ClusterJSON.new_with_id(id, @client) + when "host" then HostJSON.new_with_id(id, @client) + when "image" then ImageJSON.new_with_id(id, @client) when "vmtemplate" then TemplateJSON.new_with_id(id, @client) - when "vm" then VirtualMachineJSON.new_with_id(id, @client) - when "vnet" then VirtualNetworkJSON.new_with_id(id, @client) - when "user" then UserJSON.new_with_id(id, @client) - when "acl" then AclJSON.new_with_id(id, @client) + when "vm" then VirtualMachineJSON.new_with_id(id, @client) + when "vnet" then VirtualNetworkJSON.new_with_id(id, @client) + when "user" then UserJSON.new_with_id(id, @client) + when "acl" then AclJSON.new_with_id(id, @client) + when "datastore" then DatastoreJSON.new_with_id(id, @client) else error = Error.new("Error: #{kind} resource not supported") return error diff --git a/src/sunstone/public/css/application.css b/src/sunstone/public/css/application.css index 2f4bbcc15b..c4fca87454 100644 --- a/src/sunstone/public/css/application.css +++ b/src/sunstone/public/css/application.css @@ -31,6 +31,11 @@ select, button { padding: 2px; } +.inline-icon { + display:inline-block; + vertical-align:middle; +} + h2 { float:left; font-size:20px; @@ -46,19 +51,34 @@ h3 { margin: 0 0; } -table#dashboard_table{ +table.dashboard_table{ width:100%; margin: 0; } -table#dashboard_table tr { +table.dashboard_table tr { vertical-align: top; } -table#dashboard_table > tbody > tr > td{ +table.dashboard_table > tbody > tr > td{ width:50%; } +table.dashboard_table .inline-icon { + margin-left: 40px; +} + + +.dashboard_p { + color: #353735; + text-align:justify; +} + + +.clusterElemLi { + list-style: circle; +} + div.panel { background-color: #ffffff; padding:0; diff --git a/src/sunstone/public/css/layout.css b/src/sunstone/public/css/layout.css index f7b14cf703..4c28a08978 100644 --- a/src/sunstone/public/css/layout.css +++ b/src/sunstone/public/css/layout.css @@ -38,6 +38,10 @@ body { padding: 5px 10px 0 10px; } +.hidden { + display:none; +} + body { font-family: Arial, Verdana, Geneva, Helvetica, sans-serif; font-size: 13px; @@ -120,19 +124,50 @@ background-image: -moz-linear-gradient( rgb(53,55,53) 100% ); } -#navigation { + +.navigation { list-style: none; padding: 0; } -#navigation li { +.navigation li.topTab { line-height: 2em; - text-align: right; - padding-right: 10px; + text-align: left; + padding-left: 15px; } -#navigation li a { +.navigation li.subTab { + line-height: 1.8em; + font-size: 12px; + text-align: left; + padding-left: 30px; +} + +.navigation li.subsubTab { + line-height: 1.7em; + font-size: 11px; + text-align: left; + padding-left: 40px; +} + +.navigation li.topTab span.plusIcon, +.navigation li.subTab span.plusIcon { + display : none; + float: right; + margin-right: 1em; +} + +.navigation li.topTab span.plusIcon { + margin-top: 5px; +} + +.navigation li.subTab span.plusIcon { + margin-top: 3px; +} + +#navigation li { color: #ffffff; + cursor: pointer; } #navigation li:hover, .navigation-active-li { @@ -163,9 +198,55 @@ background-image: -moz-linear-gradient( ); */ } -.navigation-active-li-a { +.navigation-active-li { font-weight: bold; } -#navigation li:hover a, .navigation-active-li-a { +#navigation li:hover { color: #ffffff !important; } + + +/* top menu css */ +#menutop_container{ + margin:0px 171px; + color:#FFFFFF; + font-size:13px; + font-weight:bold; +} +#menutop_navbar{ + float:left; + height:25px; + font-size:13px; +} +#menutop_navbar ul{ + float:left; + height:25px; + color:#000000; + margin: 0 0; + padding-left: 1px; +} +#menutop_navbar ul{ + background-color: #353735; +} +#menutop_navbar ul li{ + float:left; + min-width:72px; + margin:0px 0 0 0; + height:22px; + display: inline; + text-align:center; + padding-left:5px; + padding-right: 5px; + padding-top: 4px; + padding-bottom: 4px; + border-left:1px solid white; + cursor:pointer; + color: white; +} + +#menutop_navbar ul li:hover { + background-color: #E69138; + +} + +/* end top menu css */ \ No newline at end of file diff --git a/src/sunstone/public/js/layout.js b/src/sunstone/public/js/layout.js index b783835236..c655a50b10 100644 --- a/src/sunstone/public/js/layout.js +++ b/src/sunstone/public/js/layout.js @@ -31,51 +31,104 @@ function popDialogLoading(){ popDialog(loading); } -function showTab(tabname){ - activeTab = tabname; +function showTab(tabname,highlight_tab){ + //Since menu items no longer have an element + //we no longer expect #tab_id here, but simply tab_id + //So safety check - remove # from #tab_id if present to ensure compatibility + if (tabname.indexOf('#') == 0) + tabname = tabname.substring(1); + if (highlight_tab && highlight_tab.indexOf('#') == 0) + highlight_tab == highlight.substring(1); + + var activeTab = tabname; + + if (!highlight_tab) highlight_tab = activeTab; //clean selected menu $("#navigation li").removeClass("navigation-active-li"); - $("#navigation li a").removeClass("navigation-active-li-a"); + $("div#header ul#menutop_ul li").removeClass("navigation-active-li"); - //select menu - var li = $("#navigation li:has(a[href='"+activeTab+"'])") - var li_a = $("#navigation li a[href='"+activeTab+"']") + //select tab in left menu + var li = $("#navigation li#li_"+highlight_tab) li.addClass("navigation-active-li"); - li_a.addClass("navigation-active-li-a"); + + //select tab in top menu + var top_li = $("div#header ul#menutop_ul li#top_"+highlight_tab); + top_li.addClass("navigation-active-li"); + //show tab $(".tab").hide(); - $(activeTab).show(); - //~ if (activeTab == '#dashboard') { - //~ emptyDashboard(); - //~ preloadTables(); - //~ } + $('#'+activeTab).show(); innerLayout.close("south"); } +function setupTabs(){ + + var topTabs = $(".outer-west ul li.topTab"); + var subTabs = $(".outer-west ul li.subTab"); + + subTabs.live("click",function(){ + //leave floor to topTab listener in case of tabs with both classes + if ($(this).hasClass('topTab')) return false; + + var tab = $(this).attr('id').substring(3); + showTab(tab); + return false; + }); + + topTabs.live("click",function(e){ + var tab = $(this).attr('id').substring(3); + //Subtabs have a class with the name of this tab + var subtabs = $('div#menu li.'+tab); + + //toggle subtabs only when clicking on the icon or when clicking on an + //already selected menu + if ($(e.target).is('span') || + $(this).hasClass("navigation-active-li")){ + //for each subtab, we hide the subsubtabs + subtabs.each(function(){ + //for each subtab, hide its subtabs + var subsubtabs = $(this).attr('id').substr(3); + //subsubtabs class + subsubtabs = $('div#menu li.'+subsubtabs); + subsubtabs.hide(); + }); + //hide subtabs and reset icon to + position, since all subsubtabs + //are hidden + subtabs.fadeToggle('fast'); + $('span',subtabs).removeClass('ui-icon-circle-minus'); + $('span',subtabs).addClass('ui-icon-circle-plus'); + //toggle icon on this tab + $('span',this).toggleClass('ui-icon-circle-plus ui-icon-circle-minus'); + }; + //if we are clicking on the icon only, do not show the tab + if ($(e.target).is('span')) return false; + + showTab(tab); + return false; + }); + +}; + +function setupTopMenu(){ + $('div#header ul#menutop_ul li').live('click',function(){ + var tab = "#" + $(this).attr('id').substring(4); + showTab(tab); + }); +}; + $(document).ready(function () { $(".tab").hide(); - $(".outer-west ul li.subTab").live("click",function(){ - var tab = $('a',this).attr('href'); - showTab(tab); - return false; - }); - - $(".outer-west ul li.topTab").live("click",function(){ - var tab = $('a',this).attr('href'); - //toggle subtabs trick - $('li.'+tab.substr(1)).toggle(); - showTab(tab); - return false; - }); + setupTabs(); + setupTopMenu(); outerLayout = $('body').layout({ applyDefaultStyles: false , center__paneSelector: ".outer-center" , west__paneSelector: ".outer-west" - , west__size: 133 + , west__size: 181 , north__size: 26 , south__size: 26 , spacing_open: 0 // ALL panes @@ -106,4 +159,3 @@ $(document).ready(function () { }); }); - diff --git a/src/sunstone/public/js/opennebula.js b/src/sunstone/public/js/opennebula.js index 9552c3563e..eb4f149d30 100644 --- a/src/sunstone/public/js/opennebula.js +++ b/src/sunstone/public/js/opennebula.js @@ -852,5 +852,91 @@ var OpenNebula = { "list" : function(params){ OpenNebula.Action.list(params,OpenNebula.Acl.resource); } - } + }, + + "Cluster" : { + "resource" : "CLUSTER", + + "create" : function(params){ + OpenNebula.Action.create(params,OpenNebula.Cluster.resource); + }, + "delete" : function(params){ + OpenNebula.Action.delete(params,OpenNebula.Cluster.resource); + }, + "list" : function(params){ + OpenNebula.Action.list(params,OpenNebula.Cluster.resource); + }, + "show" : function(params){ + OpenNebula.Action.show(params,OpenNebula.Cluster.resource); + }, + "addhost" : function(params){ + var action_obj = { "host_id": params.data.extra_param }; + OpenNebula.Action.simple_action(params,OpenNebula.Cluster.resource, + "addhost",action_obj); + }, + "delhost" : function(params){ + var action_obj = { "host_id": params.data.extra_param }; + OpenNebula.Action.simple_action(params,OpenNebula.Cluster.resource, + "delhost",action_obj); + }, + "adddatastore" : function(params){ + var action_obj = { "ds_id": params.data.extra_param }; + OpenNebula.Action.simple_action(params,OpenNebula.Cluster.resource, + "adddatastore",action_obj); + }, + "deldatastore" : function(params){ + var action_obj = { "ds_id": params.data.extra_param }; + OpenNebula.Action.simple_action(params,OpenNebula.Cluster.resource, + "deldatastore",action_obj); + }, + "addvnet" : function(params){ + var action_obj = { "vnet_id": params.data.extra_param }; + OpenNebula.Action.simple_action(params,OpenNebula.Cluster.resource, + "addvnet",action_obj); + }, + "delvnet" : function(params){ + var action_obj = { "vnet_id": params.data.extra_param }; + OpenNebula.Action.simple_action(params,OpenNebula.Cluster.resource, + "delvnet",action_obj); + }, + }, + "Datastore" : { + "resource" : "DATASTORE", + + "create" : function(params){ + OpenNebula.Action.create(params,OpenNebula.Datastore.resource); + }, + "delete" : function(params){ + OpenNebula.Action.delete(params,OpenNebula.Datastore.resource); + }, + "list" : function(params){ + OpenNebula.Action.list(params,OpenNebula.Datastore.resource); + }, + "show" : function(params){ + OpenNebula.Action.show(params,OpenNebula.Datastore.resource); + }, + "chown" : function(params){ + OpenNebula.Action.chown(params,OpenNebula.Datastore.resource); + }, + "chgrp" : function(params){ + OpenNebula.Action.chgrp(params,OpenNebula.Datastore.resource); + }, + "chmod" : function(params){ + var action_obj = params.data.extra_param; + OpenNebula.Action.simple_action(params, + OpenNebula.Datastore.resource, + "chmod", + action_obj); + }, + "update" : function(params){ + var action_obj = {"template_raw" : params.data.extra_param }; + OpenNebula.Action.simple_action(params, + OpenNebula.Datastore.resource, + "update", + action_obj); + }, + "fetch_template" : function(params){ + OpenNebula.Action.show(params,OpenNebula.Datastore.resource,"template"); + }, + }, } diff --git a/src/sunstone/public/js/plugins/acls-tab.js b/src/sunstone/public/js/plugins/acls-tab.js index 9216424a7b..6a78b08d4f 100644 --- a/src/sunstone/public/js/plugins/acls-tab.js +++ b/src/sunstone/public/js/plugins/acls-tab.js @@ -45,8 +45,10 @@ var create_acl_tmpl = \ \
\ - \ + \ '+tr("Hosts")+'
\ + '+tr("Clusters")+'
\ + '+tr("Datastores")+'
\ '+tr("Virtual Machines")+'
\ '+tr("Virtual Networks")+'
\ '+tr("Images")+'
\ @@ -154,7 +156,9 @@ var acl_buttons = { var acls_tab = { title: tr("ACLs"), content: acls_tab_content, - buttons: acl_buttons + buttons: acl_buttons, + tabClass: 'subTab', + parentTab: 'system_tab' } Sunstone.addActions(acl_actions); @@ -247,6 +251,12 @@ function parseAclString(string) { case "GROUP": resources_str+=tr("Groups")+", "; break; + case "CLUSTER": + resources_str+=tr("Clusters")+", "; + break; + case "DATASTORE": + resources_str+=tr("Datastores")+", "; + break; }; }; //remove ", " from end @@ -295,6 +305,7 @@ function updateAclsView(request,list){ }); updateView(list_array,dataTable_acls); updateDashboard("acls",list); + updateSystemDashboard("acls",list); } function setupCreateAclDialog(){ diff --git a/src/sunstone/public/js/plugins/clusters-tab.js b/src/sunstone/public/js/plugins/clusters-tab.js new file mode 100644 index 0000000000..b0cccd6aaa --- /dev/null +++ b/src/sunstone/public/js/plugins/clusters-tab.js @@ -0,0 +1,788 @@ +/* -------------------------------------------------------------------------- */ +/* Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); you may */ +/* not use this file except in compliance with the License. You may obtain */ +/* a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ +/* See the License for the specific language governing permissions and */ +/* limitations under the License. */ +/* -------------------------------------------------------------------------- */ + +/*Cluster tab plugin*/ + + +var clusters_tab_content = +'
\ +
\ +
\ +
\ + \ + \ + \ + \ + \ + \ + \ + \ + \ +
' + tr("All") + '' + tr("id") + '' + tr("Name") + '
\ +'; + +var create_cluster_tmpl = +'
\ +
\ + \ +
\ +
\ +
\ +
\ +
\ +
\ +
\ +
'; + +var clusters_select=""; +var dataTable_clusters; +var $create_cluster_dialog; + +//Setup actions +var cluster_actions = { + + "Cluster.create" : { + type: "create", + call : OpenNebula.Cluster.create, + callback : addClusterElement, + error : onError, + notify: true + }, + + "Cluster.create_dialog" : { + type: "custom", + call: popUpCreateClusterDialog + }, + + "Cluster.list" : { + type: "list", + call: OpenNebula.Cluster.list, + callback: updateClustersView, + error: onError + }, + + "Cluster.show" : { + type: "single", + call: OpenNebula.Cluster.show, + callback: updateClusterElement, + error: onError + }, + +/* + + "Cluster.showinfo" : { + type: "single", + call: OpenNebula.Cluster.show, + callback: updateClusterInfo, + error: onError + }, +*/ + + "Cluster.refresh" : { + type: "custom", + call: function(){ + waitingNodes(dataTable_clusters); + Sunstone.runAction("Cluster.list"); + }, + error: onError + }, + + "Cluster.autorefresh" : { + type: "custom", + call : function() { + OpenNebula.Cluster.list({timeout: true, success: updateClustersView,error: onError}); + } + }, + + "Cluster.addhost" : { + type: "single", + call : OpenNebula.Cluster.addhost, + callback : function (req) { + Sunstone.runAction("Host.show",req.request.data[0][1].host_id); + }, + error : onError, + }, + + "Cluster.delhost" : { + type: "single", + call : OpenNebula.Cluster.delhost, + callback : function (req) { + //Sunstone.runAction("Cluster.show",req.request.data[0]); + }, + error : onError, + notify: true + }, + + "Cluster.adddatastore" : { + type: "single", + call : OpenNebula.Cluster.adddatastore, + callback : function (req) { + Sunstone.runAction("Datastore.show",req.request.data[0][1].ds_id); + //Sunstone.runAction("Cluster.show",req.request.data[0]); + }, + error : onError, + }, + + "Cluster.deldatastore" : { + type: "single", + call : OpenNebula.Cluster.deldatastore, + callback : function (req) { + //Sunstone.runAction("Cluster.show",req.request.data[0]); + }, + error : onError, + }, + + "Cluster.addvnet" : { + type: "single", + call : OpenNebula.Cluster.addvnet, + callback : function (req) { + Sunstone.runAction("Network.show",req.request.data[0][1].vnet_id); + }, + error : onError, + }, + + "Cluster.delvnet" : { + type: "single", + call : OpenNebula.Cluster.delvnet, + callback : function (req) { + //Sunstone.runAction("Cluster.show",req.request.data[0]); + }, + error : onError, + notify: true + }, + + "Cluster.delete" : { + type: "multiple", + call : OpenNebula.Cluster.delete, + callback : deleteClusterElement, + elements: clusterElements, + error : onError, + notify:true + }, +}; + +var cluster_buttons = { + "Cluster.refresh" : { + type: "image", + text: tr("Refresh list"), + img: "images/Refresh-icon.png" + }, + "Cluster.create_dialog" : { + type: "create_dialog", + text: tr("+ New") + }, + "Cluster.delete" : { + type: "confirm", + text: tr("Delete") + } +}; + +/* +var host_info_panel = { + "host_info_tab" : { + title: tr("Host information"), + content:"" + }, + + "host_template_tab" : { + title: tr("Host template"), + content: "" + }, + "host_monitoring_tab": { + title: tr("Monitoring information"), + content: "" + } +}; +*/ + + +var clusters_tab = { + title: tr("Clusters"), + content: clusters_tab_content, + buttons: cluster_buttons, + showOnTopMenu: false, + tabClass: "topTab subTab", + parentTab: "infra_tab", +}; + +Sunstone.addActions(cluster_actions); +Sunstone.addMainTab('clusters_tab',clusters_tab); +//Sunstone.addInfoPanel("host_info_panel",host_info_panel); + + +function clusterElements(){ + return getSelectedNodes(dataTable_clusters); +} + +function clusterElementArray(element_json){ + + var element = element_json.CLUSTER; + + return [ + '', + element.ID, + element.NAME, + ]; +} + +/* +//Listen to clicks on the tds of the tables and shows the info dialogs. +function hostInfoListener(){ + $('#tbodyhosts tr',dataTable_hosts).live("click",function(e){ + //do nothing if we are clicking a checkbox! + if ($(e.target).is('input')) {return true;} + + var aData = dataTable_hosts.fnGetData(this); + var id = $(aData[0]).val(); + if (!id) return true; + + popDialogLoading(); + Sunstone.runAction("Host.showinfo",id); + return false; + }); +} +*/ + +//updates the host select by refreshing the options in it +function updateClusterSelect(){ + clusters_select = ''; + clusters_select += makeSelectOptions(dataTable_clusters, + 1,//id_col + 2,//name_col + [],//status_cols + [],//bad_st + true + ); +} + +//callback for an action affecting a host element +function updateClusterElement(request, element_json){ + var id = element_json.CLUSTER.ID; + var element = clusterElementArray(element_json); + updateSingleElement(element,dataTable_clusters,'#cluster_'+id); + updateClusterSelect(); +} + +//callback for actions deleting a host element +function deleteClusterElement(req){ + deleteElement(dataTable_clusters,'#cluster_'+req.request.data); + updateClusterSelect(); +} + +//call back for actions creating a host element +function addClusterElement(request,element_json){ + var id = element_json.CLUSTER.ID; + var element = clusterElementArray(element_json); + addElement(element,dataTable_clusters); + updateClusterSelect(); +} + +//callback to update the list of hosts. +function updateClustersView (request,list){ + var list_array = []; + + $.each(list,function(){ + //Grab table data from the host_list + list_array.push(clusterElementArray(this)); + }); + + removeClusterMenus(); + + updateView(list_array,dataTable_clusters); + updateClusterSelect(); + //dependency with the dashboard plugin + updateDashboard("clusters",list); + updateInfraDashboard("clusters",list); + newClusterMenu(list); +}; + + +function clusterTabContent(cluster_json) { + var cluster = cluster_json.CLUSTER; + var hosts_n = 0; + var dss_n = 0; + var vnets_n = 0; + + if (cluster.DATASTORES.ID && + cluster.DATASTORES.ID.constructor == Array){ + dss_n = cluster.DATASTORES.ID.length; + } else if (cluster.DATASTORES.ID) + dss_n = 1; + + if (cluster.HOSTS.ID && + cluster.HOSTS.ID.constructor == Array){ + hosts_n = cluster.HOSTS.ID.length; + } else if (cluster.HOSTS.ID) + hosts_n = 1; + + if (cluster.VNETS.ID && + cluster.VNETS.ID.constructor == Array){ + vnets_n = cluster.VNETS.ID.length; + } else if (cluster.VNETS.ID) + vnets_n = 1; + +/* + var dss_list = '
  • '+tr("No datastores in this cluster")+'
  • '; + if (cluster.DATASTORES.ID && + cluster.DATASTORES.ID.constructor == Array){ + dss_list = ''; + for (var i=0; i'; + }; + } else if (cluster.DATASTORES.ID) + dss_list = '
  • '+cluster.DATASTORES.ID+' - '+getDatastoreName(cluster.DATASTORES.ID)+'
  • '; + + var hosts_list = '
  • '+tr("No hosts in this cluster")+'
  • '; + if (cluster.HOSTS.ID && + cluster.HOSTS.ID.constructor == Array){ + hosts_list = ''; + for (var i=0; i'; + }; + } else if (cluster.HOSTS.ID) + hosts_list = '
  • '+cluster.HOSTS.ID+' - '+getHostName(cluster.HOSTS.ID)+'
  • '; + + var vnets_list = '
  • '+tr("No virtual networks in this cluster")+'
  • '; + if (cluster.VNETS.ID && + cluster.VNETS.ID.constructor == Array){ + vnets_list = ''; + for (var i=0; i'; + }; + } else if (cluster.VNETS.ID) + vnets_list = '
  • '+cluster.VNETS.ID+' - '+getVNetName(cluster.VNETS.ID)+'
  • '; +*/ + + + + var html_code = '\ +\ +\ +\ +\ +
    \ +\ + \ + \ + \ + \ + \ + \ +
    \ +
    \ +

    ' + tr("Cluster information") + '

    \ +
    \ +\ + \ + \ + \ + \ + \ + \ + \ + \ + \ +
    ' + tr("ID") + ''+cluster.ID+'
    ' + tr("Name") + ''+cluster.NAME+'
    \ +\ +
    \ +
    \ +
    \ +
    \ +

    ' + tr("Hosts") + '

    \ +
    \ +\ +


    '+tr("Current number of hosts in this cluster")+': '+hosts_n+'.

    \ + '+tr("Create new host")+'
    \ + '+tr("Manage cluster hosts")+'

    \ +\ +
    \ +
    \ +
    \ +\ + \ + \ + \ + \ + \ + \ +
    \ +
    \ +

    ' + tr("Datastores") + '

    \ +
    \ +\ +


    '+tr("Current number of datastores in this cluster")+': '+dss_n+'.

    \ +\ + '+tr("Create new datastore")+'
    \ + '+tr("Manage cluster datastores")+'

    \ +
    \ +
    \ +
    \ +
    \ +

    ' + tr("Virtual Networks") + '

    \ +
    \ +\ +


    '+tr("Current number of virtual networks in this cluster")+': '+vnets_n+'.

    \ +\ + '+tr("Create new virtual network")+'
    \ + '+tr("Manage cluster virtual networks")+'

    \ +
    \ +
    \ +
    \ +
    \ +'; + + return html_code; +}; + +function removeClusterMenus(){ + var data = dataTable_clusters.fnGetData(); + +// Sunstone.removeMainTab('cluster_vnets_tab_n',true); +// Sunstone.removeMainTab('cluster_datastores_tab_n',true); +// Sunstone.removeMainTab('cluster_hosts_tab_n',true); + Sunstone.removeMainTab('cluster_tab_n',true); + + for (var i=0; i < data.length; i++){ + var id = data[i][1]; +// Sunstone.removeMainTab('cluster_vnets_tab_'+id,true); +// Sunstone.removeMainTab('cluster_datastores_tab_'+id,true); +// Sunstone.removeMainTab('cluster_hosts_tab_'+id,true); + Sunstone.removeMainTab('cluster_tab_'+id,true); + }; +}; + + +function newClusterMenu(list){ + var cluster_none = { + 'CLUSTER' : { + 'NAME' : 'None', + 'ID' : '-', + 'DATASTORES' : [], + 'HOSTS' : [], + 'VNETS' : [] + } + }; + + newClusterMenuElement(cluster_none); + + for (var i=0; i < list.length; i++){ + newClusterMenuElement(list[i]); + }; + $('div#menu li#li_clusters_tab span').removeClass('ui-icon-circle-minus'); + $('div#menu li#li_clusters_tab span').addClass('ui-icon-circle-plus'); +}; + +function newClusterMenuElement(element){ + var cluster = element.CLUSTER; + var menu_name = cluster.NAME.length > 10 ? + cluster.NAME.substring(0,9)+'...' : cluster.NAME; + + var menu_cluster = { + title: menu_name + ' (id ' + cluster.ID + ')', + content: clusterTabContent(element), + tabClass: 'subTab subsubTab', + parentTab: 'clusters_tab' +// buttons: null + }; +/* + var submenu_hosts = { + title: tr("Hosts"), + content: '', + tabClass: "subTab clusterHosts subsubTab", + parentTab: "cluster_tab_" + cluster.ID + }; + + var submenu_datastores = { + title: tr("Datastores"), + content: '', + tabClass: "subTab clusterDatastores subsubTab", + parentTab: "cluster_tab_" + cluster.ID + }; + + var submenu_vnets = { + title: tr("Virtual Networks"), + content: '', + tabClass: "subTab clusterVnets subsubTab", + parentTab: "cluster_tab_" + cluster.ID + }; +*/ + Sunstone.addMainTab('cluster_tab_'+cluster.ID,menu_cluster,true); +// Sunstone.addMainTab('cluster_hosts_tab_'+cluster.ID,submenu_hosts,true); +// Sunstone.addMainTab('cluster_datastores_tab_'+cluster.ID,submenu_datastores,true); +// Sunstone.addMainTab('cluster_vnets_tab_'+cluster.ID,submenu_vnets,true); +}; + +function clusterResourceViewListeners(){ + //hack the menu selection + $('.show_tab_button').live('click',function(){ + var dest = $(this).attr('href').substring(1); + var filter_id = $(this).attr('filter_id'); + switch (dest) { + case 'hosts_tab': + dataTable_hosts.fnFilter(getClusterName(filter_id),3,false,true,false,true); + break; + case 'datastores_tab': + dataTable_datastores.fnFilter(getClusterName(filter_id),5,false,true,false,true); + break; + case 'vnets_tab': + dataTable_vNetworks.fnFilter(getClusterName(filter_id),5,false,true,false,true); + break; + }; + showTab(dest,'li_cluster_tab'+filter_id); + return false; + }); +/* + $('div#menu li.clusterHosts').live('click',function(){ + var id = $(this).attr('id'); + id = id.split('_'); + id = id[id.length-1]; + dataTable_hosts.fnFilter(getClusterName(id),3,false,true,false,true); + showTab('#hosts_tab',$(this).attr('id').substring(3)); + return false; + }); + + $('div#menu li.clusterDatastores').live('click',function(){ + var id = $(this).attr('id'); + id = id.split('_'); + id = id[id.length-1]; + dataTable_datastores.fnFilter(getClusterName(id),5,false,true,false,true); + showTab('#datastores_tab',$(this).attr('id').substring(3)); + return false; + }); + + $('div#menu li.clusterVnets').live('click',function(){ + var id = $(this).attr('id'); + id = id.split('_'); + id = id[id.length-1]; + dataTable_vNetworks.fnFilter(getClusterName(id),5,false,true,false,true); + showTab('#vnets_tab',$(this).attr('id').substring(3)); + return false; + }); +*/ +}; + +/* +//Updates the host info panel tab's content and pops it up +function updateHostInfo(request,host){ + var host_info = host.HOST; + + //Information tab + var info_tab = { + title : tr("Host information"), + content : + '\ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ +
    ' + tr("Host information") + ' - '+host_info.NAME+'
    ' + tr("id") + ''+host_info.ID+'
    ' + tr("Name") + ''+host_info.NAME+'
    ' + tr("Cluster") + ''+host_info.CLUSTER+'
    ' + tr("State") + ''+tr(OpenNebula.Helper.resource_state("host",host_info.STATE))+'
    ' + tr("IM MAD") + ''+host_info.IM_MAD+'
    ' + tr("VM MAD") + ''+host_info.VM_MAD+'
    '+ tr("VN MAD") +''+host_info.VN_MAD+'
    '+ tr("TM MAD") +''+host_info.TM_MAD+'
    \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ +
    ' + tr("Host shares") + '
    ' + tr("Max Mem") + ''+humanize_size(host_info.HOST_SHARE.MAX_MEM)+'
    ' + tr("Used Mem (real)") + ''+humanize_size(host_info.HOST_SHARE.USED_MEM)+'
    ' + tr("Used Mem (allocated)") + ''+humanize_size(host_info.HOST_SHARE.MAX_USAGE)+'
    ' + tr("Used CPU (real)") + ''+host_info.HOST_SHARE.USED_CPU+'
    ' + tr("Used CPU (allocated)") + ''+host_info.HOST_SHARE.CPU_USAGE+'
    ' + tr("Running VMs") + ''+host_info.HOST_SHARE.RUNNING_VMS+'
    ' + } + + //Template tab + var template_tab = { + title : tr("Host template"), + content : + '\ + '+ + prettyPrintJSON(host_info.TEMPLATE)+ + '
    ' + tr("Host template") + '
    ' + } + + var monitor_tab = { + title: tr("Monitoring information"), + content : generateMonitoringDivs(host_graphs,"host_monitor_") + } + + //Sunstone.updateInfoPanelTab(info_panel_name,tab_name, new tab object); + Sunstone.updateInfoPanelTab("host_info_panel","host_info_tab",info_tab); + Sunstone.updateInfoPanelTab("host_info_panel","host_template_tab",template_tab); + Sunstone.updateInfoPanelTab("host_info_panel","host_monitoring_tab",monitor_tab); + + Sunstone.popUpInfoPanel("host_info_panel"); + //pop up panel while we retrieve the graphs + for (var i=0; i'); + $create_cluster_dialog = $('div#create_cluster_dialog'); + var dialog = $create_cluster_dialog; + + dialog.html(create_cluster_tmpl); + dialog.dialog({ + autoOpen: false, + modal: true, + width: 500 + }); + + $('button',dialog).button(); + + //Handle the form submission + $('#create_cluster_form',dialog).submit(function(){ + if (!($('#name',this).val().length)){ + notifyError(tr("Cluster name missing!")); + return false; + } + + var cluster_json = { + "cluster": { + "name": $('#name',this).val(), + } + }; + + //Create the OpenNebula.Host. + //If it's successfull we refresh the list. + Sunstone.runAction("Cluster.create",cluster_json); + $create_cluster_dialog.dialog('close'); + return false; + }); +} + +//Open creation dialogs +function popUpCreateClusterDialog(){ + $create_cluster_dialog.dialog('open'); + return false; +} + +//Prepares the autorefresh for hosts +function setClusterAutorefresh() { + setInterval(function(){ + var selected_menu = $('div#menu li.navigation-active-li'); + var inSubMenu = selected_menu.attr('id').indexOf('cluster') > 0; + + var checked = $('input.check_item:checked',dataTable_clusters); + var filter = $("#datatable_clusters_filter input",dataTable_clusters.parents('#datatable_clusters_wrapper')).attr('value'); + if (!checked.length && !filter.length && !inSubMenu){ + Sunstone.runAction("Cluster.autorefresh"); + } + },INTERVAL+someTime()); +} + + +function clusters_sel() { + return clusters_select; +} + +//This is executed after the sunstone.js ready() is run. +//Here we can basicly init the host datatable, preload it +//and add specific listeners +$(document).ready(function(){ + + //prepare host datatable + dataTable_clusters = $("#datatable_clusters",main_tabs_context).dataTable({ + "bJQueryUI": true, + "bSortClasses": false, + "bAutoWidth":false, + "sPaginationType": "full_numbers", + "aoColumnDefs": [ + { "bSortable": false, "aTargets": ["check"] }, + { "sWidth": "60px", "aTargets": [0] }, + { "sWidth": "35px", "aTargets": [1] }, + ], + "oLanguage": (datatable_lang != "") ? + { + sUrl: "locale/"+lang+"/"+datatable_lang + } : "" + }); + + //preload it + dataTable_clusters.fnClearTable(); + addElement([ + spinner, + '','',''],dataTable_clusters); + Sunstone.runAction("Cluster.list"); + + setupCreateClusterDialog(); + + setClusterAutorefresh(); + + clusterResourceViewListeners(); + + initCheckAllBoxes(dataTable_clusters); + tableCheckboxesListener(dataTable_clusters); +// clusterInfoListener(); +}); diff --git a/src/sunstone/public/js/plugins/config-tab.js b/src/sunstone/public/js/plugins/config-tab.js index 303961fb60..7b2ec99427 100644 --- a/src/sunstone/public/js/plugins/config-tab.js +++ b/src/sunstone/public/js/plugins/config-tab.js @@ -58,8 +58,10 @@ var config_actions = { var config_tab = { title: tr("Configuration"), - content: config_tab_content -} + content: config_tab_content, + tabClass: "subTab", + parentTab: "dashboard_tab", +}; Sunstone.addActions(config_actions); Sunstone.addMainTab('config_tab',config_tab); diff --git a/src/sunstone/public/js/plugins/dashboard-tab.js b/src/sunstone/public/js/plugins/dashboard-tab.js index 362c51e347..e333870e9e 100644 --- a/src/sunstone/public/js/plugins/dashboard-tab.js +++ b/src/sunstone/public/js/plugins/dashboard-tab.js @@ -43,7 +43,7 @@ var graph4 = { }; var dashboard_tab_content = -'\ +'
    \ \ \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ +
    \ \ @@ -58,29 +58,37 @@ var dashboard_tab_content = \ \ \ + \ + \ + \ + \ \ \ \ \ \ - \ - \ + \ + \ \ \ \ \ \ \ - \ - \ + \ + \ \ \ - \ - \ + \ + \ + \ + \ + \ + \ \ \ \ @@ -100,18 +108,20 @@ var dashboard_tab_content = \ \ @@ -148,7 +158,8 @@ var dashboard_tab_content = var dashboard_tab = { title: tr("Dashboard"), - content: dashboard_tab_content + content: dashboard_tab_content, + showOnTopMenu: false, } Sunstone.addMainTab('dashboard_tab',dashboard_tab); @@ -199,13 +210,6 @@ function plot_global_graph(data,info){ $.plot($('#'+id+'_graph',context),series,options); } -function quickstart_setup(){ - - $('#dashboard_table #quickstart_form input',main_tabs_context).click(function(){ - Sunstone.runAction($(this).val()); - }); -} - function graph_autorefresh(){ setInterval(function(){ refresh_graphs(); @@ -221,17 +225,8 @@ function refresh_graphs(){ } $(document).ready(function(){ - //Dashboard link listener - $("#dashboard_table h3 a",main_tabs_context).live("click", function (){ - var tab = $(this).attr('href'); - showTab(tab); - return false; - }); - emptyDashboard(); - quickstart_setup(); - refresh_graphs(); graph_autorefresh(); @@ -278,14 +273,8 @@ function updateDashboard(what,json_info){ $('#failed_vms',db).html(failed_vms); break; case "vnets": - var public_vnets=0; var total_vnets=json_info.length; - $.each(json_info,function(){ - if (parseInt(this.VNET.PUBLIC)){ - public_vnets++;} - }); - $('#total_vnets',db).html(total_vnets+' / '); - $('#public_vnets',db).html(public_vnets); + $('#total_vnets',db).html(total_vnets); break; case "users": var total_users=json_info.length; @@ -293,28 +282,23 @@ function updateDashboard(what,json_info){ break; case "images": var total_images=json_info.length; - var public_images=0; - $.each(json_info,function(){ - if (parseInt(this.IMAGE.PUBLIC)){ - public_images++;} - }); - $('#total_images',db).html(total_images+' / '); - $('#public_images',db).html(public_images); + $('#total_images',db).html(total_images); break; case "templates": var total_templates=json_info.length; - var public_templates=0; - $.each(json_info,function(){ - if (parseInt(this.VMTEMPLATE.PUBLIC)){ - public_templates++; - } - }); - $('#total_templates',db).html(total_templates+' / '); - $('#public_templates',db).html(public_templates); + $('#total_templates',db).html(total_templates); break; case "acls": var total_acls=json_info.length; $('#total_acls',db).html(total_acls); break; + case "clusters": + var total_clusters=json_info.length; + $('#total_clusters',db).html(total_clusters); + break; + case "datastores": + var total_datastores=json_info.length; + $('#total_datastores',db).html(total_datastores); + break; } -} +}; diff --git a/src/sunstone/public/js/plugins/dashboard-users-tab.js b/src/sunstone/public/js/plugins/dashboard-users-tab.js index 4e602814be..93c7367d45 100644 --- a/src/sunstone/public/js/plugins/dashboard-users-tab.js +++ b/src/sunstone/public/js/plugins/dashboard-users-tab.js @@ -43,7 +43,7 @@ var graph4 = { }; var dashboard_tab_content = -'
    ' + tr("Hosts (total/active)") + '
    ' + tr("Clusters") + '
    ' + tr("Groups") + '
    ' + tr("VM Templates (total/public)") + '' + tr("VM Templates") + '
    ' + - tr("VM Instances")+ ' (' + + tr("VM Instances")+ ' (' + tr("total") + '/' + - tr("running") + '/' + + tr("running") + '/' + tr("failed") + ')
    ' + tr("Virtual Networks (total/public)") + '' + tr("Virtual Networks") + '
    ' + tr("Images (total/public)") + '' + tr("Datastores") + '
    ' + tr("Images") + '
    ' + tr("Users")+'\
    \

    ' + tr("Quickstart") + '

    \ -
    \ -
    \ -\ - ' + tr("Host") + '
    \ - ' + tr("VM Instance") + '
    \ - ' + tr("VM Template") + '
    \ - ' + tr("Virtual Network") + '
    \ - ' + tr("Image") + '
    \ - ' + tr("User") + '
    \ - ' + tr("Group") + '
    \ - ' + tr("Acl") + '
    \ -
    \ + \
    \
    \ +'
    \ \ \ \ @@ -127,7 +131,8 @@ var dashboard_tab_content = var dashboard_tab = { title: tr("Dashboard"), - content: dashboard_tab_content + content: dashboard_tab_content, + showOnTopMenu: false, } Sunstone.addMainTab('dashboard_tab',dashboard_tab); @@ -179,13 +184,6 @@ function plot_global_graph(data,info){ $.plot($('#'+id+'_graph',context),series,options); } -function quickstart_setup(){ - - $('#dashboard_table #quickstart_form input',main_tabs_context).click(function(){ - Sunstone.runAction($(this).val()); - }); -} - function graph_autorefresh(){ setInterval(function(){ refresh_graphs(); @@ -201,17 +199,8 @@ function refresh_graphs(){ } $(document).ready(function(){ - //Dashboard link listener - $("#dashboard_table h3 a",main_tabs_context).live("click", function (){ - var tab = $(this).attr('href'); - showTab(tab); - return false; - }); - emptyDashboard(); - quickstart_setup(); - refresh_graphs(); graph_autorefresh(); @@ -258,14 +247,8 @@ function updateDashboard(what,json_info){ $('#failed_vms',db).html(failed_vms); break; case "vnets": - var public_vnets=0; var total_vnets=json_info.length; - $.each(json_info,function(){ - if (parseInt(this.VNET.PUBLIC)){ - public_vnets++;} - }); - $('#total_vnets',db).html(total_vnets+' / '); - $('#public_vnets',db).html(public_vnets); + $('#total_vnets',db).html(total_vnets); break; case "users": var total_users=json_info.length; @@ -273,28 +256,23 @@ function updateDashboard(what,json_info){ break; case "images": var total_images=json_info.length; - var public_images=0; - $.each(json_info,function(){ - if (parseInt(this.IMAGE.PUBLIC)){ - public_images++;} - }); - $('#total_images',db).html(total_images+' / '); - $('#public_images',db).html(public_images); + $('#total_images',db).html(total_images); break; case "templates": var total_templates=json_info.length; - var public_templates=0; - $.each(json_info,function(){ - if (parseInt(this.VMTEMPLATE.PUBLIC)){ - public_templates++; - } - }); - $('#total_templates',db).html(total_templates+' / '); - $('#public_templates',db).html(public_templates); + $('#total_templates',db).html(total_templates); break; case "acls": var total_acls=json_info.length; $('#total_acls',db).html(total_acls); break; + case "clusters": + var total_clusters=json_info.length; + $('#total_clusters',db).html(total_clusters); + break; + case "datastores": + var total_datastores=json_info.length; + $('#total_datastores',db).html(total_datastores); + break; } -} \ No newline at end of file +}; diff --git a/src/sunstone/public/js/plugins/datastores-tab.js b/src/sunstone/public/js/plugins/datastores-tab.js new file mode 100644 index 0000000000..854bbba145 --- /dev/null +++ b/src/sunstone/public/js/plugins/datastores-tab.js @@ -0,0 +1,694 @@ +/* -------------------------------------------------------------------------- */ +/* Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); you may */ +/* not use this file except in compliance with the License. You may obtain */ +/* a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ +/* See the License for the specific language governing permissions and */ +/* limitations under the License. */ +/* -------------------------------------------------------------------------- */ + +/*Datastore tab plugin*/ + + +var datastores_tab_content = +'\ +
    \ +
    \ +
    \ \ @@ -55,8 +55,8 @@ var dashboard_tab_content = \
    \ \ - \ - \ + \ + \ \ \ \ \ \ - \ - \ + \ + \ \ \ - \ - \ + \ + \ + \ + \ + \ + \ \
    '+tr("VM Templates (total/public)")+''+tr("VM Templates")+'
    '+tr("VM Instances")+' ('+ @@ -66,12 +66,16 @@ var dashboard_tab_content =
    '+tr("Virtual Networks (total/public)")+''+tr("Virtual Networks")+'
    '+tr("Images (total/public)")+'' + tr("Datastores") + '
    '+tr("Images")+'
    \ \ @@ -83,14 +87,14 @@ var dashboard_tab_content =
    \
    \

    '+tr("Quickstart")+'

    \ -
    \ -
    \ - \ - '+tr("VM Template")+'
    \ - '+tr("VM Instance")+'
    \ - '+tr("Virtual Network")+'
    \ - '+tr("Image")+'
    \ -
    \ + \
    \
    \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ +
    ' + tr("All") + ''+tr("ID")+''+tr("Owner")+''+tr("Group")+''+tr("Name")+''+tr("Cluster")+'
    \ +'; + +var create_datastore_tmpl = +'
    \ +
    \ + \ + \ + \ + \ + \ + \ + \ + \ +
    \ +
    \ +
    \ +
    \ +
    \ +
    \ +
    \ +
    '; + +var update_datastore_tmpl = + '
    \ +

    '+tr("Please, choose and modify the datastore you want to update")+':

    \ +
    \ + \ + \ +
    \ +
    \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ +
    '+tr("Permissions")+':'+tr("Use")+''+tr("Manage")+''+tr("Admin")+'
    '+tr("Owner")+'
    '+tr("Group")+'
    '+tr("Other")+'
    \ +
    \ + \ +
    \ + \ +
    \ +
    \ +
    \ + \ +
    \ +
    \ +
    '; + +var datastores_select=""; +var dataTable_datastores; +var $create_datastore_dialog; + +//Setup actions +var datastore_actions = { + + "Datastore.create" : { + type: "create", + call : OpenNebula.Datastore.create, + callback : addDatastoreElement, + error : onError, + notify: true + }, + + "Datastore.create_dialog" : { + type: "custom", + call: popUpCreateDatastoreDialog + }, + + "Datastore.list" : { + type: "list", + call: OpenNebula.Datastore.list, + callback: updateDatastoresView, + error: onError + }, + + "Datastore.show" : { + type: "single", + call: OpenNebula.Datastore.show, + callback: updateDatastoreElement, + error: onError + }, + + "Datastore.showinfo" : { + type: "single", + call: OpenNebula.Datastore.show, + callback: updateDatastoreInfo, + error: onError + }, + + "Datastore.refresh" : { + type: "custom", + call: function(){ + waitingNodes(dataTable_datastores); + Sunstone.runAction("Datastore.list"); + }, + error: onError + }, + + "Datastore.fetch_template" : { + type: "single", + call: OpenNebula.Datastore.fetch_template, + callback: function (request,response) { + $('#datastore_template_update_dialog #datastore_template_update_textarea').val(response.template); + }, + error: onError + }, + + "Datastore.fetch_permissions" : { + type: "single", + call: OpenNebula.Datastore.show, + callback: function(request,element_json){ + var dialog = $('#datastore_template_update_dialog form'); + var ds = element_json.DATASTORE; + setPermissionsTable(ds,dialog); + }, + error: onError + }, + + "Datastore.update_dialog" : { + type: "custom", + call: popUpDatastoreTemplateUpdateDialog, + }, + + "Datastore.update" : { + type: "single", + call: OpenNebula.Datastore.update, + callback: function() { + notifyMessage(tr("Datastore updated correctly")); + }, + error: onError + }, + + "Datastore.autorefresh" : { + type: "custom", + call : function() { + OpenNebula.Datastore.list({timeout: true, success: updateDatastoresView,error: onError}); + } + }, + + "Datastore.delete" : { + type: "multiple", + call : OpenNebula.Datastore.delete, + callback : deleteDatastoreElement, + elements: datastoreElements, + error : onError, + notify:true + }, + + "Datastore.chown" : { + type: "multiple", + call: OpenNebula.Datastore.chown, + callback: function (req) { + Sunstone.runAction("Datastore.show",req.request.data[0][0]); + }, + elements: datastoreElements, + error: onError, + notify: true + }, + + "Datastore.chgrp" : { + type: "multiple", + call: OpenNebula.Datastore.chgrp, + callback: function (req) { + Sunstone.runAction("Datastore.show",req.request.data[0][0]); + }, + elements: datastoreElements, + error: onError, + notify: true + }, + + "Datastore.chmod" : { + type: "single", + call: OpenNebula.Datastore.chmod, +// callback + error: onError, + notify: true + }, + + "Datastore.addtocluster" : { + type: "multiple", + call: function(params){ + var cluster = params.data.extra_param; + var ds = params.data.id; + Sunstone.runAction("Cluster.adddatastore",cluster,ds); + }, + elements: datastoreElements, + notify:true, + }, + +}; + +var datastore_buttons = { + "Datastore.refresh" : { + type: "image", + text: tr("Refresh list"), + img: "images/Refresh-icon.png" + }, + "Datastore.create_dialog" : { + type: "create_dialog", + text: tr("+ New"), + condition: mustBeAdmin, + }, + "Datastore.update_dialog" : { + type: "action", + text: tr("Update properties"), + alwaysActive: true + }, + "Datastore.addtocluster" : { + type: "confirm_with_select", + text: tr("Select cluster"), + select: clusters_sel, + tip: tr("Select the destination cluster:"), + condition: mustBeAdmin, + }, + "Datastore.chown" : { + type: "confirm_with_select", + text: tr("Change owner"), + select: users_sel, + tip: tr("Select the new owner")+":", + condition: mustBeAdmin + }, + "Datastore.chgrp" : { + type: "confirm_with_select", + text: tr("Change group"), + select: groups_sel, + tip: tr("Select the new group")+":", + condition: mustBeAdmin + }, + "Datastore.delete" : { + type: "confirm", + text: tr("Delete") + } +} + +var datastore_info_panel = { + "datastore_info_tab" : { + title: tr("Datastore information"), + content: "" + }, + "datastore_template_tab" : { + title: tr("Datastore template"), + content: "" + }, +} + +var datastores_tab = { + title: tr("Datastores"), + content: datastores_tab_content, + buttons: datastore_buttons, + tabClass: "subTab", + parentTab: "infra_tab", + showOnTopMenu: false, +} + +Sunstone.addActions(datastore_actions); +Sunstone.addMainTab('datastores_tab',datastores_tab); +Sunstone.addInfoPanel('datastore_info_panel',datastore_info_panel); + + +function datastoreElements() { + return getSelectedNodes(dataTable_datastores); +} + +function vmShow(req) { + Sunstone.runAction("Datastore.show",req.request.data[0]); +} + +function datastoreElementArray(element_json){ + var element = element_json.DATASTORE; + + return [ + '', + element.ID, + element.UNAME, + element.GNAME, + element.NAME, + element.CLUSTER.length ? element.CLUSTER : "-" + ]; +} + +function datastoreInfoListener(){ + + $('#tbodydatastores tr',dataTable_datastores).live("click", function(e){ + if ($(e.target).is('input') || $(e.target).is('a img')) {return true;} + + var aData = dataTable_datastores.fnGetData(this); + var id = $(aData[0]).val(); + if (!id) return true; + + popDialogLoading(); + Sunstone.runAction("Datastore.showinfo",id); + return false; + }); +}; + +function updateDatastoreSelect(){ + datastores_select = makeSelectOptions(dataTable_datastores, + 1, + 4, + [], + [] + ); +}; + +function updateDatastoreElement(request, element_json){ + var id = element_json.DATASTORE.ID; + var element = datastoreElementArray(element_json); + updateSingleElement(element,dataTable_datastores,'#datastore_'+id) + updateDatastoreSelect(); +} + +function deleteDatastoreElement(request){ + deleteElement(dataTable_datastores,'#datastore_'+request.request.data); + updateDatastoreSelect(); +} + +function addDatastoreElement(request,element_json){ + var id = element_json.DATASTORE.ID; + var element = datastoreElementArray(element_json); + addElement(element,dataTable_datastores); + updateDatastoreSelect(); +} + + +function updateDatastoresView(request, list){ + var list_array = []; + + $.each(list,function(){ + list_array.push( datastoreElementArray(this)); + }); + + updateView(list_array,dataTable_datastores); + updateDatastoreSelect(); + updateDashboard("datastores",list); + updateInfraDashboard("datastores",list); +} + + +function updateDatastoreInfo(request,ds){ + var info = ds.DATASTORE; + var images_str = ""; + if (info.IMAGES.ID && + info.IMAGES.ID.constructor == Array){ + for (var i=0; i\ +
    '+tr("Datastore information")+' - '+info.NAME+'
    '+tr("ID")+''+info.ID+'
    '+tr("Name")+''+info.NAME+'
    '+tr("Owner")+''+info.UNAME+'
    '+tr("Group")+''+info.GNAME+'
    '+tr("Cluster")+''+(element.CLUSTER.length ? element.CLUSTER : "-")+'
    '+tr("DS Mad")+''+info.DS_MAD+'
    '+tr("TM Mad")+''+ info.TM_MAD +'
    '+tr("Base path")+''+info.BASE_PATH+'
    '+tr("Images")+''+images_str+'
    '+tr("Permissions")+'
         '+tr("Owner")+''+ownerPermStr(info)+'
         '+tr("Group")+''+groupPermStr(info)+'
         '+tr("Other")+''+otherPermStr(info)+'
    ' + } + + var template_tab = { + title: tr("Datastore Template"), + content: + '\ + '+ + prettyPrintJSON(info.TEMPLATE)+ + '
    '+tr("Datastore template")+'
    ' + } + + Sunstone.updateInfoPanelTab("datastore_info_panel","datastore_info_tab",info_tab); + Sunstone.updateInfoPanelTab("datastore_info_panel","datastore_template_tab",template_tab); + Sunstone.popUpInfoPanel("datastore_info_panel"); +} + +// Sets up the create-template dialog and all the processing associated to it, +// which is a lot. +function setupCreateDatastoreDialog(){ + + dialogs_context.append('
    '); + //Insert HTML in place + $create_datastore_dialog = $('#create_datastore_dialog') + var dialog = $create_datastore_dialog; + dialog.html(create_datastore_tmpl); + + //Prepare jquery dialog + dialog.dialog({ + autoOpen: false, + modal: true, + width: 400 + }); + + $('button',dialog).button(); + setupTips(dialog); + + $('#create_datastore_form',dialog).submit(function(){ + var name = $('#name',this).val(); + var cluster_id = $('#cluster_id',this).val(); + var ds_mad = $('#ds_mad',this).val(); + var tm_mad = $('#tm_mad',this).val(); + + if (!name){ + notifyError("Please provide a name"); + return false; + }; + + var ds_obj = { + "datastore" : { + "name" : name, + "ds_mad" : ds_mad, + "tm_mad" : tm_mad + }, + "cluster_id" : cluster_id + }; + + Sunstone.runAction("Datastore.create",ds_obj); + + $create_datastore_dialog.dialog('close'); + return false; + }); +} + +function popUpCreateDatastoreDialog(){ + $('select#cluster_id',$create_datastore_dialog).html(clusters_sel()); + $create_datastore_dialog.dialog('open'); +} + +function setupDatastoreTemplateUpdateDialog(){ + //Append to DOM + dialogs_context.append('
    '); + var dialog = $('#datastore_template_update_dialog',dialogs_context); + + //Put HTML in place + dialog.html(update_datastore_tmpl); + + var height = Math.floor($(window).height()*0.8); //set height to a percentage of the window + + //Convert into jQuery + dialog.dialog({ + autoOpen:false, + width:500, + modal:true, + height:height, + resizable:true, + }); + + $('button',dialog).button(); + + $('#datastore_template_update_select',dialog).change(function(){ + var id = $(this).val(); + $('.permissions_table input',dialog).removeAttr('checked'); + $('.permissions_table',dialog).removeAttr('update'); + if (id && id.length){ + var dialog = $('#datastore_template_update_dialog'); + $('#template_template_update_textarea',dialog).val(tr("Loading")+"..."); + Sunstone.runAction("Datastore.fetch_template",id); + Sunstone.runAction("Datastore.fetch_permissions",id); + } else { + $('#datastore_template_update_textarea',dialog).val(""); + }; + }); + + $('.permissions_table input',dialog).change(function(){ + $(this).parents('table').attr('update','update'); + }); + + $('form',dialog).submit(function(){ + var dialog = $(this); + var new_template = $('#datastore_template_update_textarea',dialog).val(); + var id = $('#datastore_template_update_select',dialog).val(); + if (!id || !id.length) { + $(this).parents('#datastore_template_update_dialog').dialog('close'); + return false; + }; + + var permissions = $('.permissions_table',dialog); + if (permissions.attr('update')){ + var perms = { + octet : buildOctet(permissions) + }; + Sunstone.runAction("Datastore.chmod",id,perms); + }; + + Sunstone.runAction("Datastore.update",id,new_template); + $(this).parents('#datastore_template_update_dialog').dialog('close'); + return false; + }); +}; + +function popUpDatastoreTemplateUpdateDialog(){ + var select = makeSelectOptions(dataTable_datastores, + 1,//id_col + 4,//name_col + [], + [] + ); + var sel_elems = getSelectedNodes(dataTable_datastores); + + + var dialog = $('#datastore_template_update_dialog'); + $('#datastore_template_update_select',dialog).html(select); + $('#datastore_template_update_textarea',dialog).val(""); + $('.permissions_table input',dialog).removeAttr('checked'); + $('.permissions_table',dialog).removeAttr('update'); + + if (sel_elems.length >= 1){ //several items in the list are selected + //grep them + var new_select= sel_elems.length > 1? '' : ""; + $('option','').each(function(){ + var val = $(this).val(); + if ($.inArray(val,sel_elems) >= 0){ + new_select+=''; + }; + }); + $('#datastore_template_update_select',dialog).html(new_select); + if (sel_elems.length == 1) { + $('#datastore_template_update_select option',dialog).attr('selected','selected'); + $('#datastore_template_update_select',dialog).trigger("change"); + }; + }; + + dialog.dialog('open'); + return false; +}; + +//Prepares autorefresh +function setDatastoreAutorefresh(){ + setInterval(function(){ + var checked = $('input.check_item:checked',dataTable_datastores); + var filter = $("#datatable_datastores_filter input", + dataTable_datastores.parents('#datatable_datastores_wrapper')).attr('value'); + if (!checked.length && !filter.length){ + Sunstone.runAction("Datastore.autorefresh"); + }; + },INTERVAL+someTime()); +} + + +$(document).ready(function(){ + + dataTable_datastores = $("#datatable_datastores",main_tabs_context).dataTable({ + "bJQueryUI": true, + "bSortClasses": false, + "sPaginationType": "full_numbers", + "bAutoWidth":false, + "aoColumnDefs": [ + { "bSortable": false, "aTargets": ["check"] }, + { "sWidth": "60px", "aTargets": [0] }, + { "sWidth": "35px", "aTargets": [1] }, + { "sWidth": "100px", "aTargets": [2,3,5] } + ], + "oLanguage": (datatable_lang != "") ? + { + sUrl: "locale/"+lang+"/"+datatable_lang + } : "" + }); + + dataTable_datastores.fnClearTable(); + addElement([ + spinner, + '','','','',''],dataTable_datastores); + Sunstone.runAction("Datastore.list"); + + setupCreateDatastoreDialog(); + setupDatastoreTemplateUpdateDialog(); + setDatastoreAutorefresh(); + + initCheckAllBoxes(dataTable_datastores); + tableCheckboxesListener(dataTable_datastores); + datastoreInfoListener(); + + $('div#menu li#li_datastores_tab').live('click',function(){ + dataTable_datastores.fnFilter('',5); + }); +}) \ No newline at end of file diff --git a/src/sunstone/public/js/plugins/groups-tab.js b/src/sunstone/public/js/plugins/groups-tab.js index 981ed8f3c7..bd267b42e7 100644 --- a/src/sunstone/public/js/plugins/groups-tab.js +++ b/src/sunstone/public/js/plugins/groups-tab.js @@ -142,8 +142,10 @@ var group_buttons = { var groups_tab = { title: tr("Groups"), content: groups_tab_content, - buttons: group_buttons -} + buttons: group_buttons, + tabClass: 'subTab', + parentTab: 'system_tab' +}; Sunstone.addActions(group_actions); Sunstone.addMainTab('groups_tab',groups_tab); @@ -224,6 +226,7 @@ function updateGroupsView(request, group_list){ updateView(group_list_array,dataTable_groups); updateGroupSelect(group_list); updateDashboard("groups",group_list); + updateSystemDashboard("groups",group_list); } //Prepares the dialog to create diff --git a/src/sunstone/public/js/plugins/hosts-tab.js b/src/sunstone/public/js/plugins/hosts-tab.js index d076b59573..0abc980a5b 100644 --- a/src/sunstone/public/js/plugins/hosts-tab.js +++ b/src/sunstone/public/js/plugins/hosts-tab.js @@ -43,6 +43,7 @@ var hosts_tab_content = ' + tr("All") + '\ ' + tr("id") + '\ ' + tr("Name") + '\ + ' + tr("Cluster") + '\ ' + tr("Running VMs") + '\ ' + tr("CPU Use") + '\ ' + tr("Memory use") + '\ @@ -93,19 +94,15 @@ var create_host_tmpl = \ \ \ -
    \ - \ - \ \
    \ \
    \
    \ -
    \ +
    \
    \
    \
    \ @@ -250,7 +247,20 @@ var host_actions = { notifyMessage(tr("Template updated correctly")); }, error: onError - } + }, + + "Host.addtocluster" : { + type: "multiple", + call: function(params){ + var cluster = params.data.extra_param; + var host = params.data.id; + Sunstone.runAction("Cluster.addhost",cluster,host); + }, + callback: null, + elements: hostElements, + notify:true, + }, + }; var host_buttons = { @@ -268,6 +278,12 @@ var host_buttons = { text: tr("Update a template"), alwaysActive: true }, + "Host.addtocluster" : { + type: "confirm_with_select", + text: tr("Select cluster"), + select: clusters_sel, + tip: tr("Select the destination cluster:"), + }, "Host.enable" : { type: "action", text: tr("Enable") @@ -302,8 +318,11 @@ var host_info_panel = { var hosts_tab = { title: tr("Hosts"), content: hosts_tab_content, - buttons: host_buttons -} + buttons: host_buttons, + tabClass: "subTab", + parentTab: "infra_tab", + showOnTopMenu: false, +}; Sunstone.addActions(host_actions); Sunstone.addMainTab('hosts_tab',hosts_tab); @@ -362,6 +381,7 @@ function hostElementArray(host_json){ '', host.ID, host.NAME, + host.CLUSTER.length ? host.CLUSTER : "-", host.HOST_SHARE.RUNNING_VMS, //rvm pb_cpu, pb_mem, @@ -389,7 +409,7 @@ function updateHostSelect(){ hosts_select = makeSelectOptions(dataTable_hosts, 1,//id_col 2,//name_col - [6,6],//status_cols + [7,7],//status_cols ["ERROR","OFF"]//bad_st ); } @@ -429,6 +449,7 @@ function updateHostsView (request,host_list){ updateHostSelect(); //dependency with the dashboard plugin updateDashboard("hosts",host_list); + updateInfraDashboard("hosts",host_list); } //Updates the host info panel tab's content and pops it up @@ -448,6 +469,14 @@ function updateHostInfo(request,host){ ' + tr("id") + '\ '+host_info.ID+'\ \ + \ + ' + tr("Name") + '\ + '+host_info.NAME+'\ + \ + \ + ' + tr("Cluster") + '\ + '+(host.CLUSTER.length ? host.CLUSTER : "-")+'\ + \ \ ' + tr("State") + '\ '+tr(OpenNebula.Helper.resource_state("host",host_info.STATE))+'\ @@ -553,15 +582,19 @@ function setupCreateHostDialog(){ notifyError(tr("Host name missing!")); return false; } + + var cluster_id = $('#host_cluster_id',this).val(); + if (!cluster_id) cluster_id = "-1"; + var host_json = { "host": { "name": $('#name',this).val(), - "tm_mad": $('#tm_mad :selected',this).val(), - "vm_mad": $('#vmm_mad :selected',this).val(), - "vnm_mad": $('#vnm_mad :selected',this).val(), - "im_mad": $('#im_mad :selected',this).val() + "vm_mad": $('#vmm_mad',this).val(), + "vnm_mad": $('#vnm_mad',this).val(), + "im_mad": $('#im_mad',this).val(), + "cluster_id": cluster_id } - } + }; //Create the OpenNebula.Host. //If it's successfull we refresh the list. @@ -573,6 +606,7 @@ function setupCreateHostDialog(){ //Open creation dialogs function popUpCreateHostDialog(){ + $('#host_cluster_id',$create_host_dialog).html(clusters_sel()); $create_host_dialog.dialog('open'); return false; } @@ -597,10 +631,6 @@ function hostMonitorError(req,error_json){ $('#host_monitoring_tab '+id).html('
    '+message+'
    '); } -function hosts_sel() { - return hosts_select; -} - //This is executed after the sunstone.js ready() is run. //Here we can basicly init the host datatable, preload it //and add specific listeners @@ -614,22 +644,22 @@ $(document).ready(function(){ "sPaginationType": "full_numbers", "aoColumnDefs": [ { "bSortable": false, "aTargets": ["check"] }, - { "sWidth": "60px", "aTargets": [0,3] }, + { "sWidth": "60px", "aTargets": [0,4] }, { "sWidth": "35px", "aTargets": [1] }, - { "sWidth": "100px", "aTargets": [6] }, - { "sWidth": "200px", "aTargets": [4,5] } + { "sWidth": "100px", "aTargets": [7,3] }, + { "sWidth": "200px", "aTargets": [5,6] } ], - "oLanguage": (datatable_lang != "") ? - { - sUrl: "locale/"+lang+"/"+datatable_lang - } : "" + "oLanguage": (datatable_lang != "") ? + { + sUrl: "locale/"+lang+"/"+datatable_lang + } : "" }); //preload it dataTable_hosts.fnClearTable(); addElement([ spinner, - '','','','','',''],dataTable_hosts); + '','','','','','',''],dataTable_hosts); Sunstone.runAction("Host.list"); setupCreateHostDialog(); @@ -639,4 +669,8 @@ $(document).ready(function(){ initCheckAllBoxes(dataTable_hosts); tableCheckboxesListener(dataTable_hosts); hostInfoListener(); + + $('div#menu li#li_hosts_tab').live('click',function(){ + dataTable_hosts.fnFilter('',3); + }); }); diff --git a/src/sunstone/public/js/plugins/images-tab.js b/src/sunstone/public/js/plugins/images-tab.js index 7834fed26a..52af42ef20 100644 --- a/src/sunstone/public/js/plugins/images-tab.js +++ b/src/sunstone/public/js/plugins/images-tab.js @@ -28,6 +28,7 @@ var images_tab_content = '+tr("Owner")+'\ '+tr("Group")+'\ '+tr("Name")+'\ + '+tr("Datastore")+'\ '+tr("Size")+'\ '+tr("Type")+'\ '+tr("Registration time")+'\ @@ -62,6 +63,12 @@ var create_image_tmpl = \
    '+tr("Human readable description of the image for other users.")+'
    \ \ +
    \ + \ + \ +
    '+tr("Select the datastore for this image")+'
    \ +
    \ \
    \
    \ @@ -316,7 +323,7 @@ var image_actions = { type: "single", call: OpenNebula.Image.update, callback: function() { - notifyMessage(tr("Template updated correctly")); + notifyMessage(tr("Image updated correctly")); }, error: onError }, @@ -378,7 +385,7 @@ var image_actions = { type: "multiple", call: OpenNebula.Image.chown, callback: function (req) { - Sunstone.runAction("Image.show",req.request.data[0]); + Sunstone.runAction("Image.show",req.request.data[0][0]); }, elements: imageElements, error: onError, @@ -389,7 +396,7 @@ var image_actions = { type: "multiple", call: OpenNebula.Image.chgrp, callback: function (req) { - Sunstone.runAction("Image.show",req.request.data[0]); + Sunstone.runAction("Image.show",req.request.data[0][0]); }, elements: imageElements, error: onError, @@ -489,7 +496,9 @@ var image_info_panel = { var images_tab = { title: tr("Images"), content: images_tab_content, - buttons: image_buttons + buttons: image_buttons, + tabClass: 'subTab', + parentTab: 'vres_tab' } Sunstone.addActions(image_actions); @@ -522,6 +531,7 @@ function imageElementArray(image_json){ image.UNAME, image.GNAME, image.NAME, + image.DATASTORE, image.SIZE, '', pretty_time(image.REGTIME), @@ -578,6 +588,7 @@ function updateImagesView(request, images_list){ updateView(image_list_array,dataTable_images); updateDashboard("images",images_list); + updateVResDashboard("images",images_list); } // Callback to update the information panel tabs and pop it up @@ -599,6 +610,10 @@ function updateImageInfo(request,img){ '+tr("Name")+'\ '+img_info.NAME+'\ \ + \ + '+tr("Datastore")+'\ + '+img_info.DATASTORE+'\ + \ \ '+tr("Owner")+'\ '+img_info.UNAME+'\ @@ -643,7 +658,7 @@ function updateImageInfo(request,img){ '+tr("Running #VMS")+'\ '+img_info.RUNNING_VMS+'\ \ - Permissions\ + '+tr("Permissions")+'\ \      '+tr("Owner")+'\ '+ownerPermStr(img_info)+'\ @@ -843,6 +858,13 @@ function setupCreateImageDialog(){ } }); if (exit) { return false; } + + var ds_id = $('#img_datastore',this).val(); + if (!ds_id){ + notifyError(tr("Please select a datastore for this image")); + return false; + }; + var img_json = {}; var name = $('#img_name',this).val(); @@ -897,8 +919,8 @@ function setupCreateImageDialog(){ img_json[attr_name] = attr_value; }); - - img_obj = { "image" : img_json }; + img_obj = { "image" : img_json, + "ds_id" : ds_id}; if (upload){ uploader._onInputChange(file_input); @@ -922,6 +944,8 @@ function popUpCreateImageDialog(){ $('#file-uploader input',$create_image_dialog).removeAttr("style"); $('#file-uploader input',$create_image_dialog).attr('style','margin:0;width:256px!important'); + $('#img_datastore',$create_image_dialog).html(datastores_sel()); + $create_image_dialog.dialog('open'); } @@ -1094,10 +1118,10 @@ $(document).ready(function(){ "sPaginationType": "full_numbers", "aoColumnDefs": [ { "bSortable": false, "aTargets": ["check"] }, - { "sWidth": "60px", "aTargets": [0,2,3,8,9] }, - { "sWidth": "35px", "aTargets": [1,5,10] }, - { "sWidth": "100px", "aTargets": [6] }, - { "sWidth": "150px", "aTargets": [7] } + { "sWidth": "60px", "aTargets": [0,2,3,9,10] }, + { "sWidth": "35px", "aTargets": [1,6,11] }, + { "sWidth": "100px", "aTargets": [5,7] }, + { "sWidth": "150px", "aTargets": [8] } ], "oLanguage": (datatable_lang != "") ? { @@ -1108,7 +1132,7 @@ $(document).ready(function(){ dataTable_images.fnClearTable(); addElement([ spinner, - '','','','','','','','','',''],dataTable_images); + '','','','','','','','','','',''],dataTable_images); Sunstone.runAction("Image.list"); setupCreateImageDialog(); diff --git a/src/sunstone/public/js/plugins/infra-tab.js b/src/sunstone/public/js/plugins/infra-tab.js new file mode 100644 index 0000000000..49e3782ed7 --- /dev/null +++ b/src/sunstone/public/js/plugins/infra-tab.js @@ -0,0 +1,121 @@ +/* -------------------------------------------------------------------------- */ +/* Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); you may */ +/* not use this file except in compliance with the License. You may obtain */ +/* a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ +/* See the License for the specific language governing permissions and */ +/* limitations under the License. */ +/* -------------------------------------------------------------------------- */ + +var infra_tab_content = +'\ +\ +\ +\ +
    \ +\ + \ + \ + \ + \ + \ + \ +
    \ +
    \ +

    ' + tr("Summary of infrastructure resources") + '

    \ +
    \ +\ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ +
    ' + tr("Clusters") + '
    ' + tr("Hosts") + '
    ' + tr("Datastores") + '
    ' + tr("Virtual Networks") + '
    \ +\ +
    \ +
    \ +
    \ + \ +
    \ +
    \ +\ + \ + \ + \ +
    \ +
    \ +

    ' + tr("Infrastructure resources") + '

    \ +
    \ +

    '+tr("The Infrastructure menu allows management of Hosts, Datastores, Virtual Networks and the Clusters they are placed in. The Clusters node can be expanded, and resources can be managed for each cluster.")+'

    \ +

    '+tr("You can find further information on the following links:")+'

    \ + \ +
    \ +
    \ +
    \ +
    '; + +var infra_tab = { + title: tr("Infrastructure"), + content: infra_tab_content +} + +Sunstone.addMainTab('infra_tab',infra_tab); + +function updateInfraDashboard(what,json_info){ + var db = $('#infra_tab',main_tabs_context); + switch (what){ + case "hosts": + var total_hosts=json_info.length; + $('#infra_total_hosts',db).html(total_hosts); + break; + case "vnets": + var total_vnets=json_info.length; + $('#infra_total_vnets',db).html(total_vnets); + break; + case "datastores": + var total_datastores=json_info.length; + $('#infra_total_datastores',db).html(total_datastores); + break; + case "clusters": + var total_clusters=json_info.length; + $('#infra_total_clusters',db).html(total_clusters); + break; + }; +}; + +$(document).ready(function(){ + +}); \ No newline at end of file diff --git a/src/sunstone/public/js/plugins/system-tab.js b/src/sunstone/public/js/plugins/system-tab.js new file mode 100644 index 0000000000..ac636eb503 --- /dev/null +++ b/src/sunstone/public/js/plugins/system-tab.js @@ -0,0 +1,113 @@ +/* -------------------------------------------------------------------------- */ +/* Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); you may */ +/* not use this file except in compliance with the License. You may obtain */ +/* a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ +/* See the License for the specific language governing permissions and */ +/* limitations under the License. */ +/* -------------------------------------------------------------------------- */ + +var system_tab_content = +'\ +\ +\ +\ +
    \ +\ + \ + \ + \ + \ + \ + \ +
    \ +
    \ +

    ' + tr("Summary of system resources") + '

    \ +
    \ +\ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + \ +
    ' + tr("Groups") + '
    ' + tr("Users")+'
    ' + tr("ACL Rules") + '
    \ +\ +
    \ +
    \ +
    \ +
    \ +

    ' + tr("Quickstart") + '

    \ + \ +
    \ +
    \ +
    \ +\ + \ + \ + \ +
    \ +
    \ +

    ' + tr("System Resources") + '

    \ +
    \ +

    '+tr("System resources management is only accesible to users of the oneadmin group. It comprises the operations regarding OpenNebula groups, users and ACLs.")+'

    \ +

    '+tr("You can find further information on the following links:")+'

    \ + \ +
    \ +
    \ +
    \ +
    '; + +var system_tab = { + title: tr("System"), + content: system_tab_content +} + +Sunstone.addMainTab('system_tab',system_tab); + +function updateSystemDashboard(what, json_info){ + var db = $('#system_tab',main_tabs_context); + switch (what){ + case "groups": + var total_groups=json_info.length; + $('#system_total_groups',db).html(total_groups); + break; + case "users": + var total_users=json_info.length; + $('#system_total_users',db).html(total_users); + break; + case "acls": + var total_acls=json_info.length; + $('#system_total_acls',db).html(total_acls); + break; + }; +} + +$(document).ready(function(){ + +}); \ No newline at end of file diff --git a/src/sunstone/public/js/plugins/templates-tab.js b/src/sunstone/public/js/plugins/templates-tab.js index c7b253496f..f7496b0892 100644 --- a/src/sunstone/public/js/plugins/templates-tab.js +++ b/src/sunstone/public/js/plugins/templates-tab.js @@ -599,21 +599,21 @@ var update_template_tmpl = '+tr("Admin")+'\ \ '+tr("Owner")+'\ - \ - \ - \ + \ + \ + \ \ \ '+tr("Group")+'\ - \ - \ - \ + \ + \ + \ \ \ '+tr("Other")+'\ - \ - \ - \ + \ + \ + \ \ \
    \ @@ -624,7 +624,7 @@ var update_template_tmpl =
    \
    \ \
    \
    \ @@ -818,7 +818,9 @@ var template_info_panel = { var templates_tab = { title: tr("Templates"), content: templates_tab_content, - buttons: template_buttons + buttons: template_buttons, + tabClass: 'subTab', + parentTab: 'vres_tab' } Sunstone.addActions(template_actions); @@ -910,7 +912,7 @@ function updateTemplatesView(request, templates_list){ updateView(template_list_array,dataTable_templates); updateTemplateSelect(); updateDashboard("templates",templates_list); - + updateVResDashboard("templates",templates_list); } // Callback to update the information panel tabs and pop it up @@ -944,7 +946,7 @@ function updateTemplateInfo(request,template){ '+tr("Register time")+'\ '+pretty_time(template_info.REGTIME)+'\ \ - Permissions\ + '+tr("Permissions")+'\ \      '+tr("Owner")+'\ '+ownerPermStr(template_info)+'\ @@ -2076,7 +2078,6 @@ function setupTemplateTemplateUpdateDialog(){ Sunstone.runAction("Template.update",id,new_template); $(this).parents('#template_template_update_dialog').dialog('close'); - dialog.dialog('close'); return false; }); }; diff --git a/src/sunstone/public/js/plugins/users-tab.js b/src/sunstone/public/js/plugins/users-tab.js index 19d48435e1..906004e77d 100644 --- a/src/sunstone/public/js/plugins/users-tab.js +++ b/src/sunstone/public/js/plugins/users-tab.js @@ -306,8 +306,10 @@ var user_info_panel = { var users_tab = { title: tr("Users"), content: users_tab_content, - buttons: user_buttons -} + buttons: user_buttons, + tabClass: 'subTab', + parentTab: 'system_tab' +}; Sunstone.addActions(user_actions); Sunstone.addMainTab('users_tab',users_tab); @@ -383,6 +385,7 @@ function updateUsersView(request,users_list){ }); updateView(user_list_array,dataTable_users); updateDashboard("users",users_list); + updateSystemDashboard("users",users_list); updateUserSelect(); }; diff --git a/src/sunstone/public/js/plugins/vms-tab.js b/src/sunstone/public/js/plugins/vms-tab.js index cfb477c9c7..ff5815c3fe 100644 --- a/src/sunstone/public/js/plugins/vms-tab.js +++ b/src/sunstone/public/js/plugins/vms-tab.js @@ -598,7 +598,9 @@ var vm_info_panel = { var vms_tab = { title: tr("Virtual Machines"), content: vms_tab_content, - buttons: vm_buttons + buttons: vm_buttons, + tabClass: 'subTab', + parentTab: 'vres_tab' } Sunstone.addActions(vm_actions); @@ -700,6 +702,7 @@ function updateVMachinesView(request, vmachine_list){ updateView(vmachine_list_array,dataTable_vMachines); updateDashboard("vms",vmachine_list); + updateVResDashboard("vms",vmachine_list); } @@ -1302,7 +1305,6 @@ $(document).ready(function(){ setupSaveasDialog(); setVMAutorefresh(); setupVNC(); - setupTips initCheckAllBoxes(dataTable_vMachines); tableCheckboxesListener(dataTable_vMachines); diff --git a/src/sunstone/public/js/plugins/vnets-tab.js b/src/sunstone/public/js/plugins/vnets-tab.js index daa3f60a3e..d65f672ebe 100644 --- a/src/sunstone/public/js/plugins/vnets-tab.js +++ b/src/sunstone/public/js/plugins/vnets-tab.js @@ -28,6 +28,7 @@ var vnets_tab_content = '+tr("Owner")+'\ '+tr("Group")+'\ '+tr("Name")+'\ + '+tr("Cluster")+'\ '+tr("Type")+'\ '+tr("Bridge")+'\ '+tr("Total Leases")+'\ @@ -49,6 +50,7 @@ var create_vn_tmpl =
    \ \
    \ +
    \
    \
    \ \ @@ -380,6 +382,18 @@ var vnet_actions = { error: onError }, + "Network.addtocluster" : { + type: "multiple", + call: function(params){ + var cluster = params.data.extra_param; + var vnet = params.data.id; + Sunstone.runAction("Cluster.addvnet",cluster,vnet); + }, + callback: null, + elements: vnElements, + notify:true, + }, + }; @@ -400,7 +414,13 @@ var vnet_buttons = { text: tr("Update properties"), alwaysActive: true }, - + "Network.addtocluster" : { + type: "confirm_with_select", + text: tr("Select cluster"), + select: clusters_sel, + tip: tr("Select the destination cluster:"), + condition: mustBeAdmin, + }, "Network.chown" : { type: "confirm_with_select", text: tr("Change owner"), @@ -437,7 +457,10 @@ var vnet_info_panel = { var vnets_tab = { title: tr("Virtual Networks"), content: vnets_tab_content, - buttons: vnet_buttons + buttons: vnet_buttons, + tabClass: "subTab", + parentTab: "infra_tab", + showOnTopMenu: false, } Sunstone.addActions(vnet_actions); @@ -463,6 +486,7 @@ function vNetworkElementArray(vn_json){ network.UNAME, network.GNAME, network.NAME, + network.CLUSTER.length ? network.CLUSTER : "-", parseInt(network.TYPE) ? "FIXED" : "RANGED", network.BRIDGE, network.TOTAL_LEASES ]; @@ -519,6 +543,7 @@ function updateVNetworksView(request, network_list){ updateView(network_list_array,dataTable_vNetworks); //dependency with dashboard updateDashboard("vnets",network_list); + updateInfraDashboard("vnets",network_list); } @@ -539,6 +564,10 @@ function updateVNetworkInfo(request,vn){ '+tr("Name")+'\ '+vn_info.NAME+'\ \ + \ + '+tr("Cluster")+'\ + '+(network.CLUSTER.length ? network.CLUSTER : "-")+'\ + \ \ '+tr("Owner")+'\ '+vn_info.UNAME+'\ @@ -563,7 +592,7 @@ function updateVNetworkInfo(request,vn){ '+tr("VLAN ID")+'\ '+ (typeof(vn_info.VLAN_ID) == "object" ? "--": vn_info.VLAN_ID) +'\ \ - Permissions\ + '+tr("Permissions")+'\ \      '+tr("Owner")+'\ '+ownerPermStr(vn_info)+'\ @@ -849,6 +878,7 @@ function setupCreateVNetDialog() { var phydev = $('#phydev',this).val(); var vlan = $('#vlan',this).val(); var vlan_id = $('#vlan_id',this).val(); + switch (network_mode) { case "default": if (!bridge && !phydev){ @@ -948,7 +978,9 @@ function setupCreateVNetDialog() { //Create the VNetwork. - network_json = {"vnet" : network_json}; + network_json = { + "vnet" : network_json, + }; Sunstone.runAction("Network.create",network_json); $create_vn_dialog.dialog('close'); @@ -1145,9 +1177,9 @@ $(document).ready(function(){ "sPaginationType": "full_numbers", "aoColumnDefs": [ { "bSortable": false, "aTargets": ["check"] }, - { "sWidth": "60px", "aTargets": [0,5,6,7] }, + { "sWidth": "60px", "aTargets": [0,6,7,8] }, { "sWidth": "35px", "aTargets": [1] }, - { "sWidth": "100px", "aTargets": [2,3] } + { "sWidth": "100px", "aTargets": [2,3,5] } ], "oLanguage": (datatable_lang != "") ? { @@ -1158,7 +1190,7 @@ $(document).ready(function(){ dataTable_vNetworks.fnClearTable(); addElement([ spinner, - '','','','','','',''],dataTable_vNetworks); + '','','','','','','',''],dataTable_vNetworks); Sunstone.runAction("Network.list"); setupCreateVNetDialog(); @@ -1169,4 +1201,8 @@ $(document).ready(function(){ initCheckAllBoxes(dataTable_vNetworks); tableCheckboxesListener(dataTable_vNetworks); vNetworkInfoListener(); + + $('div#menu li#li_vnets_tab').live('click',function(){ + dataTable_vNetworks.fnFilter('',5); + }); }); diff --git a/src/sunstone/public/js/plugins/vresources-tab.js b/src/sunstone/public/js/plugins/vresources-tab.js new file mode 100644 index 0000000000..4e78534912 --- /dev/null +++ b/src/sunstone/public/js/plugins/vresources-tab.js @@ -0,0 +1,138 @@ +/* -------------------------------------------------------------------------- */ +/* Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); you may */ +/* not use this file except in compliance with the License. You may obtain */ +/* a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ +/* See the License for the specific language governing permissions and */ +/* limitations under the License. */ +/* -------------------------------------------------------------------------- */ + +var vres_tab_content = +'\ +\ +\ +\ +
    \ +\ + \ + \ + \ + \ + \ + \ +
    \ +
    \ +

    ' + tr("Summary of virtual resources") + '

    \ +
    \ +\ + \ + \ + \ + \ + \ + \ + \ + \ + \ +\ + \ + \ + \ + \ +
    ' + tr("VM Templates") + '
    ' + + tr("VM Instances")+ ' (' + + tr("total") + '/' + + tr("running") + '/' + + tr("failed") + ')
    ' + tr("Images") + '
    \ +\ +
    \ +
    \ +
    \ + \ +
    \ +
    \ +\ + \ + \ + \ +
    \ +
    \ +

    ' + tr("Virtual Resources") + '

    \ +
    \ +

    '+tr("The Virtual Resources menu allows management of Virtual Machine Templates, Instances and Images.")+'

    \ +

    '+tr("You can find further information on the following links:")+'

    \ + \ +
    \ +
    \ +
    \ +
    '; + +var vres_tab = { + title: tr("Virtual Resources"), + content: vres_tab_content +} + +Sunstone.addMainTab('vres_tab',vres_tab); + +function updateVResDashboard(what,json_info){ + var db = $('#vres_tab',main_tabs_context); + switch (what){ + case "vms": + var total_vms=json_info.length; + var running_vms=0; + failed_vms=0; + $.each(json_info,function(){ + vm_state = parseInt(this.VM.STATE); + if (vm_state == 3){ + running_vms++; + } + else if (vm_state == 7) { + failed_vms++; + } + }); + $('#vres_total_vms',db).html(total_vms+' / '); + $('#vres_running_vms',db).html(running_vms+' / '); + $('#vres_failed_vms',db).html(failed_vms); + break; + case "vnets": + var total_vnets=json_info.length; + $('#vres_total_vnets',db).html(total_vnets); + break; + case "images": + var total_images=json_info.length; + $('#vres_total_images',db).html(total_images); + break; + case "templates": + var total_templates=json_info.length; + $('#vres_total_templates',db).html(total_templates); + break; + }; +}; + +$(document).ready(function(){ + +}); \ No newline at end of file diff --git a/src/sunstone/public/js/sunstone-util.js b/src/sunstone/public/js/sunstone-util.js index 24a4cf81ae..22064569de 100644 --- a/src/sunstone/public/js/sunstone-util.js +++ b/src/sunstone/public/js/sunstone-util.js @@ -403,19 +403,54 @@ function waitingNodes(dataTable){ function getUserName(uid){ if (typeof(dataTable_users) != "undefined"){ - return getName(uid,dataTable_users); + return getName(uid,dataTable_users,2); } return uid; } function getGroupName(gid){ if (typeof(dataTable_groups) != "undefined"){ - return getName(gid,dataTable_groups); + return getName(gid,dataTable_groups,2); } return gid; } -function getName(id,dataTable){ +function getImageName(id){ + if (typeof(dataTable_images) != "undefined"){ + return getName(id,dataTable_images,4); + } + return id; +}; + +function getClusterName(id){ + if (typeof(dataTable_clusters) != "undefined"){ + return getName(id,dataTable_clusters,2); + } + return id; +}; + +function getDatastoreName(id){ + if (typeof(dataTable_datastores) != "undefined"){ + return getName(id,dataTable_datastores,4); + } + return id; +}; + +function getVNetName(id){ + if (typeof(dataTable_vNetworks) != "undefined"){ + return getName(id,dataTable_vNetworks,4); + } + return id; +}; + +function getHostName(id){ + if (typeof(dataTable_hosts) != "undefined"){ + return getName(id,dataTable_hosts,2); + } + return id; +}; + +function getName(id,dataTable,name_col){ var name = id; if (typeof(dataTable) == "undefined") { return name; @@ -424,7 +459,7 @@ function getName(id,dataTable){ $.each(nodes,function(){ if (id == this[1]) { - name = this[2]; + name = this[name_col]; return false; } }); @@ -751,6 +786,16 @@ function hosts_sel(){ return hosts_select; } +function clusters_sel() { + return clusters_select; +} + +function datastores_sel() { + return datastores_select; +} + + + function ownerUse(resource){ return parseInt(resource.PERMISSIONS.OWNER_U); }; diff --git a/src/sunstone/public/js/sunstone.js b/src/sunstone/public/js/sunstone.js index 73cb79fd84..df9659a96c 100644 --- a/src/sunstone/public/js/sunstone.js +++ b/src/sunstone/public/js/sunstone.js @@ -296,7 +296,7 @@ $(document).ready(function(){ //Insert the tabs in the DOM and their buttons. insertTabs(); - //hideSubTabs(); +// hideSubTabs(); insertButtons(); //Enhace the look of select buttons @@ -373,7 +373,7 @@ $(document).ready(function(){ }); //Start with the dashboard (supposing we have one). - showTab('#dashboard_tab'); + showTab('dashboard_tab'); }); @@ -452,26 +452,30 @@ function insertTabs(){ //adding the content to the proper div and by adding a list item //link to the navigation menu function insertTab(tab_name){ - var tab_info = SunstoneCfg["tabs"][tab_name]; - var condition = tab_info["condition"]; - var tabClass = tab_info["tabClass"]; - var parent = ""; - - if (!tabClass) { - tabClass="topTab"; - } else if (tabClass=="subTab") { - parent = tab_info["parentTab"]; - }; + var tab_info = SunstoneCfg['tabs'][tab_name]; + var condition = tab_info['condition']; + var tabClass = tab_info['tabClass'] ? tab_info['tabClass'] : 'topTab'; + var parent = tab_info['parentTab'] ? tab_info['parentTab'] : ''; + var showOnTop = tab_info['showOnTopMenu']; //skip this tab if we do not meet the condition if (condition && !condition()) {return;} - main_tabs_context.append('
    '); + main_tabs_context.append(''); $('div#'+tab_name,main_tabs_context).html(tab_info.content); - $('div#menu ul#navigation').append('
  • '+tab_info.title+'
  • '); -} + $('div#menu ul#navigation').append('
  • '+tab_info.title+'
  • '); + + if (parent){ //this is a subtab + $('div#menu li#li_'+tab_name).hide();//hide by default + $('div#menu li#li_'+parent+' span').css("display","inline-block"); + }; + + if (showOnTop){ + $('div#header ul#menutop_ul').append('
  • '+tab_info.title+'
  • '); + }; +}; function hideSubTabs(){ for (tab in SunstoneCfg["tabs"]){ diff --git a/src/sunstone/views/index.erb b/src/sunstone/views/index.erb index 45f277b48c..a2ad30582d 100644 --- a/src/sunstone/views/index.erb +++ b/src/sunstone/views/index.erb @@ -58,7 +58,7 @@ @@ -66,6 +66,15 @@ + + +
    Welcome  | Sign Out
    diff --git a/src/test/Nebula.cc b/src/test/Nebula.cc index 86229b0dbe..14684ebe4e 100644 --- a/src/test/Nebula.cc +++ b/src/test/Nebula.cc @@ -87,6 +87,16 @@ void Nebula::start() delete gpool; } + if ( dspool != 0) + { + delete dspool; + } + + if ( clpool != 0) + { + delete clpool; + } + if ( vmm != 0) { delete vmm; @@ -184,6 +194,8 @@ void Nebula::start() VMTemplatePool::bootstrap(db); GroupPool::bootstrap(db); AclManager::bootstrap(db); + DatastorePool::bootstrap(db); + ClusterPool::bootstrap(db); } catch (exception&) { @@ -199,6 +211,11 @@ void Nebula::start() string default_image_type = "OS"; string default_device_prefix = "hd"; + if (tester->need_cluster_pool) + { + clpool = tester->create_clpool(db); + } + if (tester->need_vm_pool) { vmpool = tester->create_vmpool(db,hook_location,var_location); @@ -235,6 +252,11 @@ void Nebula::start() { tpool = tester->create_tpool(db); } + + if (tester->need_datastore_pool) + { + dspool = tester->create_dspool(db); + } } catch (exception&) { diff --git a/src/test/NebulaTest.cc b/src/test/NebulaTest.cc index eda496815d..1527e57982 100644 --- a/src/test/NebulaTest.cc +++ b/src/test/NebulaTest.cc @@ -62,6 +62,16 @@ GroupPool* NebulaTest::create_gpool(SqlDB* db) return new GroupPool(db); } +DatastorePool* NebulaTest::create_dspool(SqlDB* db) +{ + return new DatastorePool(db); +} + +ClusterPool* NebulaTest::create_clpool(SqlDB* db) +{ + return new ClusterPool(db); +} + // ----------------------------------------------------------- // Managers // ----------------------------------------------------------- diff --git a/src/tm/TransferManager.cc b/src/tm/TransferManager.cc index 1b19f759d4..13af39a96b 100644 --- a/src/tm/TransferManager.cc +++ b/src/tm/TransferManager.cc @@ -22,6 +22,11 @@ /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ +const char * TransferManager::transfer_driver_name = "transfer_exe"; + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + extern "C" void * tm_action_loop(void *arg) { TransferManager * tm; @@ -209,6 +214,7 @@ void TransferManager::prolog_action(int vid) string files; string size; string format; + string tm_mad, system_tm_mad; VirtualMachine * vm; Nebula& nd = Nebula::instance(); @@ -224,6 +230,8 @@ void TransferManager::prolog_action(int vid) // Setup & Transfer script // ------------------------------------------------------------------------ + system_tm_mad = nd.get_system_ds_tm_mad(); + vm = vmpool->get(vid,true); if (vm == 0) @@ -236,7 +244,7 @@ void TransferManager::prolog_action(int vid) goto error_history; } - tm_md = get(vm->get_tm_mad()); + tm_md = get(); if ( tm_md == 0 ) { @@ -257,7 +265,7 @@ void TransferManager::prolog_action(int vid) num = vm->get_template_attribute("DISK",attrs); - for (int i=0; i < num ;i++,source="",type="",clon="") + for (int i=0; i < num ;i++, source="", type="", clon="", tm_mad="") { disk = dynamic_cast(attrs[i]); @@ -287,8 +295,12 @@ void TransferManager::prolog_action(int vid) continue; } - xfr << "MKSWAP " << size << " " << vm->get_hostname() << ":" - << vm->get_remote_dir() << "/disk." << i << endl; + //MKSWAP tm_mad size host:remote_system_dir/disk.i + xfr << "MKSWAP " + << system_tm_mad << " " + << size << " " + << vm->get_hostname() << ":" + << vm->get_remote_system_dir() << "/disk." << i << endl; } else if ( type == "FS" ) { @@ -304,13 +316,23 @@ void TransferManager::prolog_action(int vid) " skipping"); continue; } - - xfr << "MKIMAGE " << size << " " << format << " " - << vm->get_hostname() << ":" << vm->get_remote_dir() - << "/disk." << i << endl; + //MKIMAGE tm_mad size format host:remote_system_dir/disk.i + xfr << "MKIMAGE " + << system_tm_mad << " " + << size << " " + << format << " " + << vm->get_hostname() << ":" + << vm->get_remote_system_dir() << "/disk." << i << endl; } else { + tm_mad = disk->vector_value("TM_MAD"); + + if ( tm_mad.empty() ) + { + goto error_tm_mad; + } + // ----------------------------------------------------------------- // CLONE or LINK disk images // ----------------------------------------------------------------- @@ -327,6 +349,7 @@ void TransferManager::prolog_action(int vid) (int(*)(int))toupper); } + // tm_mad fe:SOURCE host:remote_system_ds/disk.i size if (clon == "YES") { xfr << "CLONE "; @@ -336,6 +359,8 @@ void TransferManager::prolog_action(int vid) xfr << "LN "; } + xfr << tm_mad << " "; + // ----------------------------------------------------------------- // Get the disk image, and set source URL // ----------------------------------------------------------------- @@ -354,9 +379,9 @@ void TransferManager::prolog_action(int vid) { xfr << source << " "; } - - xfr << vm->get_hostname() << ":" << vm->get_remote_dir() - << "/disk." << i; + + xfr << vm->get_hostname() << ":" + << vm->get_remote_system_dir() << "/disk." << i; if (!size.empty()) //Add size for dev based disks { @@ -380,15 +405,19 @@ void TransferManager::prolog_action(int vid) if ( context_result ) { - xfr << "CONTEXT " << vm->get_context_file() << " "; + //CONTEXT tm_mad files hostname:remote_system_dir/disk.i + xfr << "CONTEXT " + << system_tm_mad << " " + << vm->get_context_file() << " "; if (!files.empty()) { xfr << files << " "; } - xfr << vm->get_hostname() << ":" << vm->get_remote_dir() - << "/disk." << num << endl; + xfr << vm->get_hostname() << ":" + << vm->get_remote_system_dir() << "/disk." << num + << endl; } xfr.close(); @@ -416,9 +445,14 @@ error_file: error_driver: os.str(""); - os << "prolog, error getting driver " << vm->get_tm_mad(); + os << "prolog, error getting Transfer Manager driver."; goto error_common; +error_tm_mad: + os.str(""); + os << "prolog, undefined TM_MAD for disk image in VM template"; + xfr.close(); + error_empty_disk: os.str(""); os << "prolog, undefined source disk image in VM template"; @@ -441,16 +475,24 @@ void TransferManager::prolog_migr_action(int vid) ostringstream os; string xfr_name; + const VectorAttribute * disk; + string tm_mad; + string system_tm_mad; + + vector attrs; + int num; + VirtualMachine * vm; Nebula& nd = Nebula::instance(); const TransferManagerDriver * tm_md; - // ------------------------------------------------------------------------ // Setup & Transfer script // ------------------------------------------------------------------------ + system_tm_mad = nd.get_system_ds_tm_mad(); + vm = vmpool->get(vid,true); if (vm == 0) @@ -463,7 +505,7 @@ void TransferManager::prolog_migr_action(int vid) goto error_history; } - tm_md = get(vm->get_tm_mad()); + tm_md = get(); if ( tm_md == 0 ) { @@ -479,12 +521,43 @@ void TransferManager::prolog_migr_action(int vid) } // ------------------------------------------------------------------------ - // Move image directory + // Move system directory and disks // ------------------------------------------------------------------------ - xfr << "MV "; - xfr << vm->get_previous_hostname() << ":" << vm->get_remote_dir() << " "; - xfr << vm->get_hostname() << ":" << vm->get_remote_dir() << endl; + num = vm->get_template_attribute("DISK",attrs); + + for (int i=0 ; i < num ; i++, tm_mad="") + { + disk = dynamic_cast(attrs[i]); + + if ( disk == 0 ) + { + continue; + } + + tm_mad = disk->vector_value("TM_MAD"); + + if ( tm_mad.empty() ) + { + continue; + } + + //MV tm_mad prev_host:remote_system_dir/disk.i host:remote_system_dir/disk.i + xfr << "MV " + << tm_mad << " " + << vm->get_previous_hostname() << ":" + << vm->get_remote_system_dir() << "/disk." << i << " " + << vm->get_hostname() << ":" + << vm->get_remote_system_dir() << "/disk." << i << endl; + } + + //MV tm_mad prev_host:remote_system_dir host:remote_system_dir + xfr << "MV " + << system_tm_mad << " " + << vm->get_previous_hostname() << ":" + << vm->get_remote_system_dir() << " " + << vm->get_hostname() << ":" + << vm->get_remote_system_dir() << endl; xfr.close(); @@ -506,7 +579,7 @@ error_file: error_driver: os.str(""); - os << "prolog_migr, error getting driver " << vm->get_tm_mad(); + os << "prolog_migr, error getting Transfer Manager driver."; error_common: (nd.get_lcm())->trigger(LifeCycleManager::PROLOG_FAILURE,vid); @@ -525,16 +598,24 @@ void TransferManager::prolog_resume_action(int vid) ostringstream os; string xfr_name; + const VectorAttribute * disk; + string tm_mad; + string system_tm_mad; + + vector attrs; + int num; + VirtualMachine * vm; Nebula& nd = Nebula::instance(); const TransferManagerDriver * tm_md; - // ------------------------------------------------------------------------ // Setup & Transfer script // ------------------------------------------------------------------------ + system_tm_mad = nd.get_system_ds_tm_mad(); + vm = vmpool->get(vid,true); if (vm == 0) @@ -547,7 +628,7 @@ void TransferManager::prolog_resume_action(int vid) goto error_history; } - tm_md = get(vm->get_tm_mad()); + tm_md = get(); if ( tm_md == 0 ) { @@ -563,13 +644,41 @@ void TransferManager::prolog_resume_action(int vid) } // ------------------------------------------------------------------------ - // Move image directory + // Move system directory and disks // ------------------------------------------------------------------------ + num = vm->get_template_attribute("DISK",attrs); - xfr << "MV "; - xfr << nd.get_nebula_hostname() << ":" << vm->get_local_dir() << "/images "; - xfr << vm->get_hostname() << ":" << vm->get_remote_dir() << endl; + for (int i=0 ; i < num ; i++, tm_mad="") + { + disk = dynamic_cast(attrs[i]); + if ( disk == 0 ) + { + continue; + } + + tm_mad = disk->vector_value("TM_MAD"); + + if ( tm_mad.empty() ) + { + continue; + } + + //MV tm_mad fe:system_dir/disk.i host:remote_system_dir/disk.i + xfr << "MV " + << tm_mad << " " + << nd.get_nebula_hostname() << ":" + << vm->get_system_dir() << "/disk." << i << " " + << vm->get_hostname() << ":" + << vm->get_remote_system_dir() << "/disk." << i << endl; + } + + //MV tm_mad fe:system_dir host:remote_system_dir + xfr << "MV " + << system_tm_mad << " " + << nd.get_nebula_hostname() << ":"<< vm->get_system_dir() << " " + << vm->get_hostname() << ":" << vm->get_remote_system_dir() << endl; + xfr.close(); tm_md->transfer(vid,xfr_name); @@ -590,7 +699,7 @@ error_file: error_driver: os.str(""); - os << "prolog_resume, error getting driver " << vm->get_tm_mad(); + os << "prolog_resume, error getting Transfer Manager driver."; error_common: (nd.get_lcm())->trigger(LifeCycleManager::PROLOG_FAILURE,vid); @@ -600,7 +709,6 @@ error_common: return; } - /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ @@ -609,6 +717,8 @@ void TransferManager::epilog_action(int vid) ofstream xfr; ostringstream os; string xfr_name; + string system_tm_mad; + string tm_mad; const VectorAttribute * disk; string save; @@ -625,6 +735,8 @@ void TransferManager::epilog_action(int vid) // Setup & Transfer script // ------------------------------------------------------------------------ + system_tm_mad = nd.get_system_ds_tm_mad(); + vm = vmpool->get(vid,true); if (vm == 0) @@ -637,7 +749,7 @@ void TransferManager::epilog_action(int vid) goto error_history; } - tm_md = get(vm->get_tm_mad()); + tm_md = get(); if ( tm_md == 0 ) { @@ -676,17 +788,49 @@ void TransferManager::epilog_action(int vid) transform(save.begin(),save.end(),save.begin(),(int(*)(int))toupper); - if ( save == "YES" ) + tm_mad = disk->vector_value("TM_MAD"); + + if ( save == "YES" ) //TODO SAVE_SOURCE { - xfr << "MV " << vm->get_hostname() << ":" << vm->get_remote_dir() - << "/disk." << i << " " - << nd.get_nebula_hostname() << ":" << vm->get_local_dir() - << "/disk." << i << endl; + string source; + string save_source; + + source = disk->vector_value("SOURCE"); + save_source = disk->vector_value("SAVE_AS_SOURCE"); + + if ( source.empty() && save_source.empty() ) + { + vm->log("TM", Log::ERROR, "No SOURCE to save disk image"); + continue; + } + + if (!save_source.empty()) //Use the save as source instead + { + source = save_source; + } + + //MVDS tm_mad hostname:remote_system_dir/disk.0 + xfr << "MVDS " + << tm_mad << " " + << vm->get_hostname() << ":" + << vm->get_remote_system_dir() << "/disk." << i << " " + << source << endl; + } + else if ( !tm_mad.empty() ) //No saving disk and no system_ds disk + { + //DELETE tm_mad hostname:remote_system_dir/disk.i + xfr << "DELETE " + << tm_mad << " " + << vm->get_hostname() << ":" + << vm->get_remote_system_dir() << "/disk." << i << endl; } } - xfr << "DELETE " << vm->get_hostname() <<":"<< vm->get_remote_dir() << endl; - + //DELETE system_tm_mad hostname:remote_system_dir + xfr << "DELETE " + << system_tm_mad << " " + << vm->get_hostname() << ":" << vm->get_remote_system_dir() << endl; + xfr.close(); tm_md->transfer(vid,xfr_name); @@ -707,7 +851,7 @@ error_file: error_driver: os.str(""); - os << "epilog, error getting driver " << vm->get_vmm_mad(); + os << "epilog, error getting Transfer Manager driver."; error_common: (nd.get_lcm())->trigger(LifeCycleManager::EPILOG_FAILURE,vid); @@ -722,19 +866,25 @@ error_common: void TransferManager::epilog_stop_action(int vid) { - ofstream xfr; - ostringstream os; - string xfr_name; + ofstream xfr; + ostringstream os; + string xfr_name; + string tm_mad; + string system_tm_mad; - VirtualMachine * vm; - Nebula& nd = Nebula::instance(); + VirtualMachine * vm; + Nebula& nd = Nebula::instance(); const TransferManagerDriver * tm_md; + vector attrs; + const VectorAttribute * disk; + int num; // ------------------------------------------------------------------------ // Setup & Transfer script // ------------------------------------------------------------------------ + system_tm_mad = nd.get_system_ds_tm_mad(); vm = vmpool->get(vid,true); @@ -748,7 +898,7 @@ void TransferManager::epilog_stop_action(int vid) goto error_history; } - tm_md = get(vm->get_tm_mad()); + tm_md = get(); if ( tm_md == 0 ) { @@ -764,12 +914,40 @@ void TransferManager::epilog_stop_action(int vid) } // ------------------------------------------------------------------------ - // Move image directory + // Move system directory and disks // ------------------------------------------------------------------------ + num = vm->get_template_attribute("DISK",attrs); - xfr << "MV "; - xfr << vm->get_hostname() << ":" << vm->get_remote_dir() << " "; - xfr << nd.get_nebula_hostname() << ":" << vm->get_local_dir() << endl; + for (int i=0 ; i < num ; i++, tm_mad="") + { + disk = dynamic_cast(attrs[i]); + + if ( disk == 0 ) + { + continue; + } + + tm_mad = disk->vector_value("TM_MAD"); + + if ( tm_mad.empty() ) + { + continue; + } + + //MV tm_mad host:remote_system_dir/disk.i fe:system_dir/disk.i + xfr << "MV " + << tm_mad << " " + << vm->get_hostname() << ":" + << vm->get_remote_system_dir() << "/disk." << i << " " + << nd.get_nebula_hostname() << ":" + << vm->get_system_dir() << "/disk." << i << endl; + } + + //MV system_tm_mad hostname:remote_system_dir fe:system_dir + xfr << "MV " + << system_tm_mad << " " + << vm->get_hostname() << ":" << vm->get_remote_system_dir() << " " + << nd.get_nebula_hostname() << ":" << vm->get_system_dir() << endl; xfr.close(); @@ -791,7 +969,7 @@ error_file: error_driver: os.str(""); - os << "epilog_stop, error getting driver " << vm->get_tm_mad(); + os << "epilog_stop, error getting Transfer Manager driver."; error_common: (nd.get_lcm())->trigger(LifeCycleManager::EPILOG_FAILURE,vid); @@ -807,17 +985,25 @@ error_common: void TransferManager::epilog_delete_action(int vid) { - ofstream xfr; - ostringstream os; - string xfr_name; + ofstream xfr; + ostringstream os; + string xfr_name; + string system_tm_mad; + string tm_mad; - VirtualMachine * vm; + VirtualMachine * vm; + Nebula& nd = Nebula::instance(); const TransferManagerDriver * tm_md; + const VectorAttribute * disk; + vector attrs; + int num; + // ------------------------------------------------------------------------ // Setup & Transfer script // ------------------------------------------------------------------------ + system_tm_mad = nd.get_system_ds_tm_mad(); vm = vmpool->get(vid,true); @@ -831,7 +1017,7 @@ void TransferManager::epilog_delete_action(int vid) goto error_history; } - tm_md = get(vm->get_tm_mad()); + tm_md = get(); if ( tm_md == 0 ) { @@ -846,11 +1032,38 @@ void TransferManager::epilog_delete_action(int vid) goto error_file; } - // ------------------------------------------------------------------------ - // Delete the remote VM Directory - // ------------------------------------------------------------------------ - - xfr << "DELETE " << vm->get_hostname() <<":"<< vm->get_remote_dir() << endl; + // ------------------------------------------------------------------------- + // Delete disk images and the remote system Directory + // ------------------------------------------------------------------------- + num = vm->get_template_attribute("DISK",attrs); + + for (int i=0 ; i < num ; i++, tm_mad="") + { + disk = dynamic_cast(attrs[i]); + + if ( disk == 0 ) + { + continue; + } + + tm_mad = disk->vector_value("TM_MAD"); + + if ( tm_mad.empty() ) + { + continue; + } + + //DELETE tm_mad host:remote_system_dir/disk.i + xfr << "DELETE " + << tm_mad << " " + << vm->get_hostname() << ":" + << vm->get_remote_system_dir() << "/disk." << i << endl; + } + + //DELETE system_tm_mad hostname:remote_system_dir + xfr << "DELETE " + << system_tm_mad << " " + << vm->get_hostname() <<":"<< vm->get_remote_system_dir() << endl; xfr.close(); @@ -869,14 +1082,14 @@ error_file: os.str(""); os << "epilog_delete, could not open file: " << xfr_name; os << ". You may need to manually clean " << vm->get_hostname() - << ":" << vm->get_remote_dir(); + << ":" << vm->get_remote_system_dir(); goto error_common; error_driver: os.str(""); - os << "epilog_delete, error getting driver " << vm->get_vmm_mad(); + os << "epilog_delete, error getting driver Transfer Manager driver."; os << ". You may need to manually clean " << vm->get_hostname() - << ":" << vm->get_remote_dir(); + << ":" << vm->get_remote_system_dir(); error_common: vm->log("TM", Log::ERROR, os); @@ -890,18 +1103,27 @@ error_common: void TransferManager::epilog_delete_previous_action(int vid) { - ofstream xfr; - ostringstream os; - string xfr_name; + ofstream xfr; + ostringstream os; + string xfr_name; + string system_tm_mad; + string tm_mad; - VirtualMachine * vm; + VirtualMachine * vm; + Nebula& nd = Nebula::instance(); const TransferManagerDriver * tm_md; + const VectorAttribute * disk; + vector attrs; + int num; + // ------------------------------------------------------------------------ // Setup & Transfer script // ------------------------------------------------------------------------ + system_tm_mad = nd.get_system_ds_tm_mad(); + vm = vmpool->get(vid,true); if (vm == 0) @@ -914,7 +1136,7 @@ void TransferManager::epilog_delete_previous_action(int vid) goto error_history; } - tm_md = get(vm->get_previous_tm_mad()); + tm_md = get(); if ( tm_md == 0 ) { @@ -932,10 +1154,37 @@ void TransferManager::epilog_delete_previous_action(int vid) // ------------------------------------------------------------------------ // Delete the remote VM Directory // ------------------------------------------------------------------------ - - xfr << "DELETE " << vm->get_previous_hostname() <<":"<< vm->get_remote_dir() - << endl; + num = vm->get_template_attribute("DISK",attrs); + for (int i=0 ; i < num ; i++, tm_mad="") + { + disk = dynamic_cast(attrs[i]); + + if ( disk == 0 ) + { + continue; + } + + tm_mad = disk->vector_value("TM_MAD"); + + if ( tm_mad.empty() ) + { + continue; + } + + //DELETE tm_mad prev_host:remote_system_dir/disk.i + xfr << "DELETE " + << tm_mad << " " + << vm->get_previous_hostname() << ":" + << vm->get_remote_system_dir() << "/disk." << i << endl; + } + + //DELTE system_tm_mad prev_host:remote_system_dir + xfr << "DELETE " + << system_tm_mad << " " + << vm->get_previous_hostname() <<":"<< vm->get_remote_system_dir() + << endl; + xfr.close(); tm_md->transfer(vid,xfr_name); @@ -953,14 +1202,14 @@ error_file: os.str(""); os << "epilog_delete, could not open file: " << xfr_name; os << ". You may need to manually clean " << vm->get_previous_hostname() - << ":" << vm->get_remote_dir(); + << ":" << vm->get_remote_system_dir(); goto error_common; error_driver: os.str(""); - os << "epilog_delete, error getting driver " << vm->get_vmm_mad(); + os << "epilog_delete, error getting driver Transfer Manager driver."; os << ". You may need to manually clean " << vm->get_previous_hostname() - << ":" << vm->get_remote_dir(); + << ":" << vm->get_remote_system_dir(); error_common: vm->log("TM", Log::ERROR, os); @@ -993,12 +1242,7 @@ void TransferManager::driver_cancel_action(int vid) return; } - if (!vm->hasHistory()) - { - goto error_history; - } - - tm_md = get(vm->get_tm_mad()); + tm_md = get(); if ( tm_md == 0 ) { @@ -1015,16 +1259,10 @@ void TransferManager::driver_cancel_action(int vid) return; -error_history: - os.str(""); - os << "driver_cancel, VM " << vid << " has no history"; - goto error_common; - error_driver: os.str(""); - os << "driver_cancel, error getting driver " << vm->get_vmm_mad(); + os << "driver_cancel, error getting driver Transfer Manager driver."; -error_common: vm->log("TM", Log::ERROR, os); vm->unlock(); @@ -1045,43 +1283,44 @@ void TransferManager::checkpoint_action(int vid) void TransferManager::load_mads(int uid) { - unsigned int i; - ostringstream oss; - const VectorAttribute * vattr; - int rc; - string name; - TransferManagerDriver * tm_driver = 0; + ostringstream oss; - oss << "Loading Transfer Manager drivers."; + int rc; + string name; + + const VectorAttribute * vattr = 0; + TransferManagerDriver * tm_driver = 0; + + oss << "Loading Transfer Manager driver."; NebulaLog::log("TM",Log::INFO,oss); - for(i=0,oss.str("");i 0 ) { - vattr = static_cast(mad_conf[i]); + vattr = static_cast(mad_conf[0]); + } - name = vattr->vector_value("NAME"); + if ( vattr == 0 ) + { + NebulaLog::log("TM",Log::ERROR,"Failed to load Transfer Manager driver."); + return; + } - oss << "\tLoading driver: " << name; - NebulaLog::log("VMM", Log::INFO, oss); + VectorAttribute tm_conf("TM_MAD",vattr->value()); - tm_driver = new TransferManagerDriver( - uid, - vattr->value(), - (uid != 0), - vmpool); + tm_conf.replace("NAME",transfer_driver_name); - if ( tm_driver == 0 ) - continue; + tm_driver = new TransferManagerDriver(uid, + tm_conf.value(), + (uid != 0), + vmpool); + rc = add(tm_driver); - rc = add(tm_driver); + if ( rc == 0 ) + { + oss.str(""); + oss << "\tTransfer manager driver loaded"; - if ( rc == 0 ) - { - oss.str(""); - oss << "\tDriver " << name << " loaded."; - - NebulaLog::log("TM",Log::INFO,oss); - } + NebulaLog::log("TM",Log::INFO,oss); } } diff --git a/src/tm/TransferManagerDriver.cc b/src/tm/TransferManagerDriver.cc index 6eb1e2b8c6..f50388ec7b 100644 --- a/src/tm/TransferManagerDriver.cc +++ b/src/tm/TransferManagerDriver.cc @@ -142,7 +142,7 @@ void TransferManagerDriver::protocol( getline(is,info); os.str(""); - os << "Error excuting image transfer script"; + os << "Error executing image transfer script"; if (!info.empty() && info[0] != '-') { diff --git a/src/tm_mad/TMScript.rb b/src/tm_mad/TMScript.rb deleted file mode 100644 index 538982bf2e..0000000000 --- a/src/tm_mad/TMScript.rb +++ /dev/null @@ -1,217 +0,0 @@ - -# -------------------------------------------------------------------------- # -# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) # -# # -# Licensed under the Apache License, Version 2.0 (the "License"); you may # -# not use this file except in compliance with the License. You may obtain # -# a copy of the License at # -# # -# http://www.apache.org/licenses/LICENSE-2.0 # -# # -# Unless required by applicable law or agreed to in writing, software # -# distributed under the License is distributed on an "AS IS" BASIS, # -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # -# See the License for the specific language governing permissions and # -# limitations under the License. # -#--------------------------------------------------------------------------- # - -require 'pp' -require 'open3' -require 'CommandManager' - -=begin rdoc - -TMPlugin holds the name of the scripts that will be used for each -TransferManager script command. It is basically a hash where keys -are the names of the commands (uppercase) and contain the path of -the script that will be executed for that command. - -It also contains some methods to execute the scripts, get the output -of the script (success/failure, error and log messages). - -=end -class TMPlugin < Hash - # If a +scripts_file+ is supplied commands are loaded from it. - def initialize(scripts_file=nil) - # Pass nil default value to hash initialization or it will use - # scripts file as the default value if the key does not exist - super(nil) - load_scripts(scripts_file) if scripts_file - end - - # Executes the script associated with the +command+ using - # specified arguments. +logger+ is a proc that takes a message - # as its unique argument. - # - # Returns: - # * It will return +nil+ if the +command+ is not defined. - # * LocalCommand object (exit code and - # error message in case of failure) - # - # Note: the exit code will be written like this: - # ExitCode: 0 - def execute(logger, command, *args) - # Command is not defined - return nil if !self[command] - - # Generates the line to call the script with all the - # arguments provided. - cmd=[self[command], *args].join(" ") - - local_command = LocalCommand.run(cmd, logger) - - logger.call(local_command.stdout) if logger - - local_command - end - - private - - # Loads definitions of commands from the configuration file - def load_scripts(scripts_file) - scripts_text="" - - if File.exist?(scripts_file) - scripts_text = File.read(scripts_file) - else - STDERR.puts("Can not open #{scripts_file}") - STDERR.flush - return - end - - one_location=ENV['ONE_LOCATION'] - - if one_location == nil - tm_commands_location = "/usr/lib/one/tm_commands/" - else - tm_commands_location = one_location + "/lib/tm_commands/" - end - - scripts_text.each_line {|line| - case line - when /^\s*(#.*)?$/ - # skip empty or commented lines - next - when /^\s*(\w+)\s*=\s*(.*)\s*$/ - command = $1.strip.upcase - path = $2.strip - - # Prepend default location for tm commands if the path does not - # start with / - path = tm_commands_location+path if path[0]!=?/ - - self[command] = path - else - STDERR.puts("Can not parse line: #{line}") - end - } - end -end - -# This class will parse and execute TransferManager scripts. -class TMScript - attr_accessor :lines - - # +script_text+ contains the script to be executed. - # +logger+ is a lambda that receives a message and sends it - # to OpenNebula server - def initialize(script_text, logger=nil) - @lines = Array.new - @logger = logger - - parse_script(script_text) - end - - # Executes the script using the TMPlugin specified by +plugin+. - # Returns an array where first element tells if succeded and the - # second one is the error message in case of failure. - def execute(plugin) - return [true,""] if @lines.empty? - - result = @lines.each {|line| - res = plugin.execute(@logger, *line) - - if !res - @logger.call("COMMAND not found: #{line.join(" ")}.") if @logger - - res = [false, "COMMAND not found: #{line.join(" ")}."] - else - if res.code == 0 - res = [true, ""] - else - res = [false, res.get_error_message] - end - end - - # do not continue if command failed - break res if !res[0] - } - - result - end - - private - - # Gets commands from the script and populates +@lines+ - def parse_script(script_text) - script_text.each_line {|line| - # skip if the line is commented - next if line.match(/^\s*#/) - # skip if the line is empty - next if line.match(/^\s*$/) - - command=line.split(" ") - command[0].upcase! - @lines<< command - } - end -end - - -if $0 == __FILE__ - -=begin - require 'one_log' - - logger=ONELog.new - - log_proc=lambda{|message| - logger.log("TRANSFER", "0", message) - } - - log_proc.call(<<-EOT) - Multiple - lines log - - thingy - EOT - -=end - - log_proc=lambda{|message| - puts message - } - - script_text=" - - CLONE localhost:/tmp/source.img ursa:/tmp/one_jfontan/0/hda.img - - - CLONE localhost:/tmp/source.img ursa:/tmp/one_jfontan/1/hda.img - - WRONG the program for WRONG does not exist - ERROR a command not in plugin - - " - - plugin=TMPlugin.new - plugin["OTHER"]="./tm_clone.sh" - plugin["CLONE"]="echo" - plugin["WRONG"]="it_does_not_exist" - - scr=TMScript.new(script_text, log_proc) - pp scr.lines - - - scr.execute(plugin) -end diff --git a/src/tm_mad/common/context b/src/tm_mad/common/context new file mode 100755 index 0000000000..d519110140 --- /dev/null +++ b/src/tm_mad/common/context @@ -0,0 +1,81 @@ +#!/bin/bash + +# -------------------------------------------------------------------------- # +# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +# context context.sh file1 file2 ... fileN host:remote_system_ds/disk.i +# - context.sh file are the contents of the context ISO +# - host is the target host to deploy the VM +# - remote_system_ds is the path for the system datastore in the host + +while (( "$#" )); do + if [ "$#" == "1" ]; then + DST=$1 + else + SRC="$SRC $1" + fi + shift +done + +if [ -z "${ONE_LOCATION}" ]; then + TMCOMMON=/var/lib/one/remotes/tm/tm_common.sh +else + TMCOMMON=$ONE_LOCATION/var/remotes/tm/tm_common.sh +fi + +. $TMCOMMON + +#------------------------------------------------------------------------------- +# Set dst path and dirs +#------------------------------------------------------------------------------- +DST_PATH=`arg_path $DST` +DST_HOST=`arg_host $DST` +DST_DIR=`dirname $DST_PATH` + +ssh_make_path $DST_HOST $DST_DIR + +#------------------------------------------------------------------------------- +# Build the Context Block device (locally) and copy it remotely +#------------------------------------------------------------------------------- +log "Generating context block device at $DST" + +VM_ID=`basename $DST_DIR` +ISO_DIR="$DS_DIR/.isofiles/$VM_ID" +ISO_FILE="$ISO_DIR/$VM_ID.iso" + +exec_and_log "mkdir -p $ISO_DIR" "Could not create tmp dir to make context dev" + +for f in $SRC; do + case $f in + http://*) + exec_and_log "$WGET -P $ISO_DIR $f" "Error downloading $f" + ;; + *) + exec_and_log "cp -R $f $ISO_DIR" "Error copying $f to $ISO_DIR" + ;; + esac +done + +exec_and_log "$MKISOFS -o $ISO_FILE -J -R $ISO_DIR" "Error creating iso fs" + +exec_and_log "$SCP $ISO_FILE $DST" "Error copying context ISO to $DST" + +# Creates symbolic link to add a .iso suffix, needed for VMware CDROMs +ssh_exec_and_log $DST_HOST "$LN -s $DST_PATH $DST_PATH.iso" "Error creating ISO symbolic link" + +rm -rf $ISO_DIR > /dev/null 2>&1 + +exit 0 diff --git a/src/tm_mad/ssh/tm_mv.sh b/src/tm_mad/common/delete similarity index 64% rename from src/tm_mad/ssh/tm_mv.sh rename to src/tm_mad/common/delete index e7c7e2d668..f0c68244a6 100755 --- a/src/tm_mad/ssh/tm_mv.sh +++ b/src/tm_mad/common/delete @@ -16,33 +16,32 @@ # limitations under the License. # #--------------------------------------------------------------------------- # -SRC=$1 -DST=$2 +# DELETE +# - host is the target host to deploy the VM +# - remote_system_ds is the path for the system datastore in the host + +DST=$1 if [ -z "${ONE_LOCATION}" ]; then - TMCOMMON=/usr/lib/one/mads/tm_common.sh + TMCOMMON=/var/lib/one/remotes/tm/tm_common.sh else - TMCOMMON=$ONE_LOCATION/lib/mads/tm_common.sh + TMCOMMON=$ONE_LOCATION/var/remotes/tm/tm_common.sh fi . $TMCOMMON -SRC_PATH=`arg_path $SRC` +#------------------------------------------------------------------------------- +# Return if deleting a disk, we will delete them when removing the +# remote_system_ds directory for the VM (remotely) +#------------------------------------------------------------------------------- DST_PATH=`arg_path $DST` - -SRC_HOST=`arg_host $SRC` DST_HOST=`arg_host $DST` -DST_DIR=`dirname $DST_PATH` - -if full_src_and_dst_equal; then - log "Not moving $SRC to $DST, they are the same path" -else - log "Moving $SRC_PATH" - exec_and_log "$SSH $DST_HOST mkdir -p $DST_DIR" \ - "Unable to create directory $DST_DIR" - exec_and_log "$SCP -r $SRC $DST" \ - "Could not copy $SRC to $DST" - exec_and_log "$SSH $SRC_HOST rm -rf $SRC_PATH" +if [ `is_disk $DST_PATH` -eq 1 ]; then + exit 0 fi +log "Deleting $DST_PATH" +ssh_exec_and_log $DST_HOST "rm -rf $DST_PATH" "Error deleting $DST_PATH" + +exit 0 diff --git a/src/tm_mad/shared/tm_sharedrc b/src/tm_mad/common/dummy.sh old mode 100644 new mode 100755 similarity index 98% rename from src/tm_mad/shared/tm_sharedrc rename to src/tm_mad/common/dummy.sh index 1e12168116..6c1cd9257d --- a/src/tm_mad/shared/tm_sharedrc +++ b/src/tm_mad/common/dummy.sh @@ -1,3 +1,5 @@ +#!/bin/sh + # -------------------------------------------------------------------------- # # Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) # # # @@ -13,3 +15,5 @@ # See the License for the specific language governing permissions and # # limitations under the License. # #--------------------------------------------------------------------------- # + +exit 0 diff --git a/src/tm_mad/ssh/tm_mkimage.sh b/src/tm_mad/common/mkimage similarity index 58% rename from src/tm_mad/ssh/tm_mkimage.sh rename to src/tm_mad/common/mkimage index ace8017ca8..3471e81410 100755 --- a/src/tm_mad/ssh/tm_mkimage.sh +++ b/src/tm_mad/common/mkimage @@ -16,28 +16,46 @@ # limitations under the License. # #--------------------------------------------------------------------------- # -if [ -z "${ONE_LOCATION}" ]; then - TMCOMMON=/usr/lib/one/mads/tm_common.sh -else - TMCOMMON=$ONE_LOCATION/lib/mads/tm_common.sh -fi - -. $TMCOMMON +# mkimage size format host:remote_system_ds/disk.i size +# - size in MB of the image +# - format for the image +# - host is the target host to deploy the VM +# - remote_system_ds is the path for the system datastore in the host SIZE=$1 FSTYPE=$2 DST=$3 +if [ -z "${ONE_LOCATION}" ]; then + TMCOMMON=/var/lib/one/remotes/tm/tm_common.sh +else + TMCOMMON=$ONE_LOCATION/var/remotes/tm/tm_common.sh +fi + +. $TMCOMMON + +#------------------------------------------------------------------------------- +# Set dst path and dir +#------------------------------------------------------------------------------- DST_PATH=`arg_path $DST` DST_HOST=`arg_host $DST` DST_DIR=`dirname $DST_PATH` +ssh_make_path $DST_HOST $DST_DIR + +#------------------------------------------------------------------------------- +# Make the new image (file-based) +#------------------------------------------------------------------------------- MKFS_CMD=`mkfs_command $DST_PATH $FSTYPE` -exec_and_log "$SSH $DST_HOST mkdir -p $DST_DIR" \ - "Error creating directory $DST_DIR" -exec_and_log "$SSH $DST_HOST $DD if=/dev/zero of=$DST_PATH bs=1 count=1 seek=${SIZE}M" \ - "Could not create image $DST_PATH" -exec_and_log "$SSH $DST_HOST $MKFS_CMD" \ - "Unable to create filesystem $FSTYPE in $DST_PATH" -exec_and_log "$SSH $DST_HOST chmod a+rw $DST_PATH" +MKSCRIPT=$(cat < +# - host is the target host to deploy the VM +# - remote_system_ds is the path for the system datastore in the host + +DST=$1 if [ -z "${ONE_LOCATION}" ]; then - TMCOMMON=/usr/lib/one/mads/tm_common.sh + TMCOMMON=/var/lib/one/remotes/tm/tm_common.sh else - TMCOMMON=$ONE_LOCATION/lib/mads/tm_common.sh + TMCOMMON=$ONE_LOCATION/var/remotes/tm/tm_common.sh fi . $TMCOMMON +#------------------------------------------------------------------------------- +# Return if deleting a disk, we will delete them when removing the +# remote_system_ds directory for the VM (remotely) +#------------------------------------------------------------------------------- DST_PATH=`arg_path $DST` DST_HOST=`arg_host $DST` -DST_DIR=`dirname $DST_PATH` - -log "Creating ${SIZE}Mb image in $DST_PATH" -exec_and_log "$SSH $DST_HOST mkdir -p $DST_DIR" -exec_and_log "$SSH $DST_HOST $DD if=/dev/zero of=$DST_PATH bs=1 count=1 seek=${SIZE}M" \ - "Could not create image file $DST_PATH" - -log "Initializing swap space" -exec_and_log "$SSH $DST_HOST $MKSWAP $DST_PATH" \ - "Could not create swap on $DST_PATH" - -exec_and_log "$SSH $DST_HOST chmod a+w $DST_PATH" +if [ `is_disk $DST_PATH` -eq 1 ]; then + # Disk + LOGOUT_CMD=$(cat < +# +# - hostX is the target host to deploy the VM +# - system_ds is the path for the system datastore in the host + +SRC=$1 +DST=$2 + + +if [ -z "${ONE_LOCATION}" ]; then + TMCOMMON=/var/lib/one/remotes/tm/tm_common.sh +else + TMCOMMON=$ONE_LOCATION/var/remotes/tm/tm_common.sh +fi + +. $TMCOMMON + +DRIVER_PATH=$(dirname $0) + +source ${DRIVER_PATH}/../../datastore/iscsi/iscsi.conf + +# {:path=> +# "/var/lib/one/remotes/tm/iscsi/mv o202:/var/lib/one//datastores/0/3/disk.0 rama:/var/lib/one/datastores/0/3/disk.0", +# :result=>"SUCCESS", +# :info=>"-"} + +# {:path=> +# "/var/lib/one/remotes/tm/shared/mv o202:/var/lib/one//datastores/0/3 rama:/var/lib/one/datastores/0/3", +# :result=>"SUCCESS", +# :info=>"-"} + +#------------------------------------------------------------------------------- +# Return if moving a disk, we will move them when moving the whole system_ds +# directory for the VM +#------------------------------------------------------------------------------- +SRC_PATH=`arg_path $SRC` +DST_PATH=`arg_path $DST` + +SRC_HOST=`arg_host $SRC` +DST_HOST=`arg_host $DST` + +DST_DIR=`dirname $DST_PATH` + +if [ `is_disk $SRC_PATH` -eq 0 ]; then + ssh_make_path $DST_HOST $DST_DIR + + log "Moving $SRC to $DST" + + exec_and_log "$SCP -r $SRC $DST" "Could not copy $SRC to $DST" + + ssh_exec_and_log "$SRC_HOST" "rm -rf $SRC_PATH" \ + "Could not remove $SRC_HOST:$SRC_PATH" + + exit 0 +fi + +if [ "$SRC" == "$DST" ]; then + log "Not moving $SRC to $DST, they are the same path" + exit 0 +fi + +if is_iscsi "$SRC_HOST"; then + log "Logging out of $IQN in $SRC_HOST" + + LOGOUT_CMD=$(cat < true + :concurrency => 15, + :threaded => true, + :retries => 0 }.merge!(options) - super('', @options) + super('tm/', @options) - @plugin=plugin + if tm_type == nil + @types = Dir["#{@local_scripts_path}/*/"].map do |d| + d.split('/')[-1] + end + elsif tm_type.class == String + @types = [tm_type] + else + @types = tm_type + end # register actions register_action(:TRANSFER, method("action_transfer")) end - def action_transfer(number, script_file) - script_text="" + # Driver Action: TRANSFER id script_file + # Executes a transfer script + def action_transfer(id, script_file) - if File.exist?(script_file) - open(script_file) {|f| - script_text=f.read - } + script = parse_script(script_file) - script=TMScript.new(script_text, log_method(number)) - res=script.execute(@plugin) - - if res[0] - send_message("TRANSFER", RESULT[:success], number) - else - send_message("TRANSFER", RESULT[:failure], number, res[1]) - end - else - send_message("TRANSFER", RESULT[:failure], number, - "Transfer file not found: #{script_file}") + if script.nil? + send_message("TRANSFER", + RESULT[:failure], + id, + "Transfer file '#{script_file}' does not exist") + return end + + script.each { |command| + result, info = do_transfer_action(id, command) + + if result == RESULT[:failure] + send_message("TRANSFER", result, id, info) + return + end + } + + send_message("TRANSFER", RESULT[:success], id) end + private + + # Parse a script file + # @param sfile [String] path to the transfer script + # @return lines [Array] with the commands of the script. Each command is an + # array itself. + def parse_script(sfile) + return nil if !File.exist?(sfile) + + stext = File.read(sfile) + lines = Array.new + + stext.each_line {|line| + next if line.match(/^\s*#/) # skip if the line is commented + next if line.match(/^\s*$/) # skip if the line is empty + + command = line.split(" ") + + lines << command + } + + return lines + end + + # Executes a single transfer action (command), as returned by the parse + # method + # @param id [String] with the OpenNebula ID for the TRANSFER action + # @param command [Array] + def do_transfer_action(id, command) + cmd = command[0].downcase + tm = command[1] + args = command[2..-1].join(" ") + + if not @types.include?(tm) + return RESULT[:failure], "Transfer Driver '#{tm}' not available" + end + + path = File.join(@local_scripts_path, tm, cmd) + path << " " << args + + rc = LocalCommand.run(path, log_method(id)) + + result, info = get_info_from_execution(rc) + + return result, info + end end -tm_conf=ARGV[0] +################################################################################ +################################################################################ +# TransferManager Driver Main program +################################################################################ +################################################################################ -if !tm_conf - puts "You need to specify config file." +opts = GetoptLong.new( + [ '--threads', '-t', GetoptLong::OPTIONAL_ARGUMENT ], + [ '--tm-types', '-d', GetoptLong::OPTIONAL_ARGUMENT ] +) + +tm_type = nil +threads = 15 + +begin + opts.each do |opt, arg| + case opt + when '--threads' + threads = arg.to_i + when '--tm-types' + tm_type = arg.split(',').map {|a| a.strip } + end + end +rescue Exception => e exit(-1) end -tm_conf=ETC_LOCATION+tm_conf if tm_conf[0] != ?/ - -plugin=TMPlugin.new(tm_conf) - -tm=TransferManager.new(plugin, - :concurrency => 15) - -tm.start_driver - - - - +tm_driver = TransferManagerDriver.new(tm_type, :concurrency => threads) +tm_driver.start_driver diff --git a/src/tm_mad/shared/clone b/src/tm_mad/shared/clone new file mode 100755 index 0000000000..c5126bbeeb --- /dev/null +++ b/src/tm_mad/shared/clone @@ -0,0 +1,67 @@ +#!/bin/bash + +# -------------------------------------------------------------------------- # +# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +# clone fe:SOURCE host:remote_system_ds/disk.i size +# - fe is the front-end hostname +# - SOURCE is the path of the disk image in the form DS_BASE_PATH/disk +# - host is the target host to deploy the VM +# - remote_system_ds is the path for the system datastore in the host + +SRC=$1 +DST=$2 + +if [ -z "${ONE_LOCATION}" ]; then + TMCOMMON=/var/lib/one/remotes/tm/tm_common.sh +else + TMCOMMON=$ONE_LOCATION/var/remotes/tm/tm_common.sh +fi + +. $TMCOMMON + +#------------------------------------------------------------------------------- +# Set dst path and dir +#------------------------------------------------------------------------------- +SRC_PATH=`arg_path $SRC` +SRC_PATH="../../${SRC_PATH##"$DS_DIR/"}" + +DST_PATH=`arg_path $DST` +DST_HOST=`arg_host $DST` +DST_DIR=`dirname $DST_PATH` + +ssh_make_path $DST_HOST $DST_DIR + +#------------------------------------------------------------------------------- +# Clone (cp) SRC into DST +#------------------------------------------------------------------------------- +case $SRC in +http://*) + log "Downloading $SRC into $DST_PATH" + ssh_exec_and_log $DST_HOST \ + "$WGET -O $DST_PATH $SRC" \ + "Error downloading $SRC" + ;; + +*) + log "Cloning $SRC_PATH in $DST" + ssh_exec_and_log $DST_HOST \ + "cd $DST_DIR; cp -r $SRC_PATH $DST_PATH" \ + "Error copying $SRC to $DST" + ;; +esac + +exit 0 diff --git a/src/tm_mad/shared/context b/src/tm_mad/shared/context new file mode 120000 index 0000000000..921312e950 --- /dev/null +++ b/src/tm_mad/shared/context @@ -0,0 +1 @@ +../common/context \ No newline at end of file diff --git a/src/tm_mad/shared/delete b/src/tm_mad/shared/delete new file mode 120000 index 0000000000..230e56774f --- /dev/null +++ b/src/tm_mad/shared/delete @@ -0,0 +1 @@ +../common/delete \ No newline at end of file diff --git a/src/tm_mad/shared/ln b/src/tm_mad/shared/ln new file mode 100755 index 0000000000..2a9395d484 --- /dev/null +++ b/src/tm_mad/shared/ln @@ -0,0 +1,83 @@ +#!/bin/bash + +# -------------------------------------------------------------------------- # +# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); you may # +# not use this file except in compliance with the License. You may obtain # +# a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +#--------------------------------------------------------------------------- # + +# ln fe:SOURCE host:remote_system_ds/disk.i +# - fe is the front-end hostname +# - SOURCE is the path of the disk image in the form DS_BASE_PATH/disk +# - host is the target host to deploy the VM +# - remote_system_ds is the path for the system datastore in the host + +SRC=$1 +DST=$2 + +if [ -z "${ONE_LOCATION}" ]; then + TMCOMMON=/var/lib/one/remotes/tm/tm_common.sh +else + TMCOMMON=$ONE_LOCATION/var/remotes/tm/tm_common.sh +fi + +. $TMCOMMON + +#------------------------------------------------------------------------------- +# Set dst path and dir +#------------------------------------------------------------------------------- +SRC_PATH=`arg_path $SRC` +SRC_PATH="../../${SRC_PATH##"$DS_DIR/"}" + +DST_PATH=`arg_path $DST` +DST_HOST=`arg_host $DST` +DST_DIR=`dirname $DST_PATH` + +#------------------------------------------------------------------------------- +# Link (ln) SRC into DST +#------------------------------------------------------------------------------- + +# Is it a file or a folder (VMware)? +if [ -d `arg_path $SRC` ]; then + ssh_make_path $DST_HOST $DST_PATH + + # It's a folder, make links for all elements + SRC_FOLDER_NAME=`basename $SRC_PATH` + SRC_WITH_NO_FOLDER=`dirname $SRC_PATH` + SRC_DS_NAME=`basename $SRC_WITH_NO_FOLDER` + REL_SRC_PATH="../../../$SRC_DS_NAME/$SRC_FOLDER_NAME" + + log "Link all files in $SRC_PATH to $DST_PATH" + +LINK_SCRIPT=$(cat < +# +# - hostX is the target host to deploy the VM +# - system_ds is the path for the system datastore in the host + +SRC=$1 +DST=$2 + +if [ -z "${ONE_LOCATION}" ]; then + TMCOMMON=/var/lib/one/remotes/tm/tm_common.sh +else + TMCOMMON=$ONE_LOCATION/var/remotes/tm/tm_common.sh +fi + +. $TMCOMMON + +#------------------------------------------------------------------------------- +# Return if moving a disk, we will move them when moving the whole system_ds +# directory for the VM +#------------------------------------------------------------------------------- +SRC_PATH=`arg_path $SRC` +DST_PATH=`arg_path $DST` + +SRC_HOST=`arg_host $SRC` +DST_HOST=`arg_host $DST` + +DST_DIR=`dirname $DST_PATH` + +SRC_DS_DIR=`dirname $SRC_PATH` +SRC_VM_DIR=`basename $SRC_PATH` + +if [ `is_disk $DST_PATH` -eq 1 ]; then + exit 0 +fi + +if [ "$SRC" == "$DST" ]; then + log "Not moving $SRC to $DST, they are the same path" + exit 0 +fi + +ssh_make_path "$DST_HOST" "$DST_DIR" + +log "Moving $SRC to $DST" + +ssh_exec_and_log "$DST_HOST" "rm -rf '$DST_PATH'" \ + "Error removing target path to prevent overwrite errors" + +TAR_COPY="$SSH $SRC_HOST '$TAR -C $SRC_DS_DIR -cf - $SRC_VM_DIR'" +TAR_COPY="$TAR_COPY | $SSH $DST_HOST '$TAR -C $DST_DIR -xf -'" + +exec_and_log "eval $TAR_COPY" "Error copying disk directory to target host" + +exec_and_log "$SSH $SRC_HOST rm -rf $SRC_PATH" + +exit 0 diff --git a/src/tm_mad/shared/tm_clone.sh b/src/tm_mad/ssh/mvds similarity index 57% rename from src/tm_mad/shared/tm_clone.sh rename to src/tm_mad/ssh/mvds index 5cc373b86e..47c66b6b41 100755 --- a/src/tm_mad/shared/tm_clone.sh +++ b/src/tm_mad/ssh/mvds @@ -16,46 +16,39 @@ # limitations under the License. # #--------------------------------------------------------------------------- # +# mvds host:remote_system_ds/disk.i fe:SOURCE +# - fe is the front-end hostname +# - SOURCE is the path of the disk image in the form DS_BASE_PATH/disk +# - host is the target host to deploy the VM +# - remote_system_ds is the path for the system datastore in the host + SRC=$1 DST=$2 if [ -z "${ONE_LOCATION}" ]; then - TMCOMMON=/usr/lib/one/mads/tm_common.sh + TMCOMMON=/var/lib/one/remotes/tm/tm_common.sh else - TMCOMMON=$ONE_LOCATION/lib/mads/tm_common.sh + TMCOMMON=$ONE_LOCATION/var/remotes/tm/tm_common.sh fi . $TMCOMMON -get_vmdir +set_ds_location +#------------------------------------------------------------------------------- +# Set dst path and dir +#------------------------------------------------------------------------------- SRC_PATH=`arg_path $SRC` DST_PATH=`arg_path $DST` -fix_paths +DST_PATH="$RMT_DS_DIR/${DST_PATH##"$DS_DIR/"}" -log_debug "$1 $2" -log_debug "DST: $DST_PATH" +SRC_HOST=`arg_host $SRC` -DST_DIR=`dirname $DST_PATH` - -log "Creating directory $DST_DIR" -exec_and_log "mkdir -p $DST_DIR" -exec_and_log "chmod a+w $DST_DIR" - -case $SRC in -http://*) - log "Downloading $SRC" - exec_and_log "$WGET -O $DST_PATH $SRC" \ - "Error downloading $SRC" - ;; - -*) - log "Cloning $SRC_PATH" - exec_and_log "cp -r $SRC_PATH $DST_PATH" \ - "Error copying $SRC to $DST" - ;; -esac - -exec_and_log "chmod a+rw $DST_PATH" +#------------------------------------------------------------------------------- +# Move the image back to the datastore +#------------------------------------------------------------------------------- +log "Moving $SRC_PATH to datastore as $DST_PATH" +exec_and_log "$SCP -r $SRC $DST" "Error copying $SRC to $DST" +exit 0 diff --git a/src/tm_mad/ssh/tm_context.sh b/src/tm_mad/ssh/tm_context.sh deleted file mode 100755 index 0827fdef9f..0000000000 --- a/src/tm_mad/ssh/tm_context.sh +++ /dev/null @@ -1,72 +0,0 @@ -#!/bin/bash - -# -------------------------------------------------------------------------- # -# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) # -# # -# Licensed under the Apache License, Version 2.0 (the "License"); you may # -# not use this file except in compliance with the License. You may obtain # -# a copy of the License at # -# # -# http://www.apache.org/licenses/LICENSE-2.0 # -# # -# Unless required by applicable law or agreed to in writing, software # -# distributed under the License is distributed on an "AS IS" BASIS, # -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # -# See the License for the specific language governing permissions and # -# limitations under the License. # -#--------------------------------------------------------------------------- # - -while (( "$#" )); do - if [ "$#" == "1" ]; then - DST=$1 - else - SRC="$SRC $1" - fi - shift -done - - -if [ -z "${ONE_LOCATION}" ]; then - TMCOMMON=/usr/lib/one/mads/tm_common.sh -else - TMCOMMON=$ONE_LOCATION/lib/mads/tm_common.sh -fi - -. $TMCOMMON - - -DST_PATH=`arg_path $DST` -DST_DIR=`dirname $DST_PATH` -DST_FILE=`basename $DST_PATH` -DST_HASH=`echo -n $DST | $MD5SUM | $AWK '{print $1}'` -if [ -z "$ONE_LOCATION" ]; then - TMP_DIR="/var/lib/one/$DST_HASH" -else - TMP_DIR="$ONE_LOCATION/var/$DST_HASH" -fi -ISO_DIR="$TMP_DIR/isofiles" - - -exec_and_log "mkdir -p $ISO_DIR" \ - "Error creating directory $ISO_DIR" - -for f in $SRC; do - case $f in - http://*) - exec_and_log "$WGET -P $ISO_DIR $f" \ - "Error downloading $f" - ;; - - *) - exec_and_log "cp -R $f $ISO_DIR" \ - "Error copying $f to $ISO_DIR" - ;; - esac -done - -exec_and_log "$MKISOFS -o $TMP_DIR/$DST_FILE -J -R $ISO_DIR" \ - "Error creating iso fs" -exec_and_log "$SCP $TMP_DIR/$DST_FILE $DST" \ - "Error copying $TMP_DIR/$DST_FILE to $DST" -exec_and_log "rm -rf $TMP_DIR" \ - "Error deleting $TMP_DIR" diff --git a/src/tm_mad/ssh/tm_delete.sh b/src/tm_mad/ssh/tm_delete.sh deleted file mode 100755 index 9046c19414..0000000000 --- a/src/tm_mad/ssh/tm_delete.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -# -------------------------------------------------------------------------- # -# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) # -# # -# Licensed under the Apache License, Version 2.0 (the "License"); you may # -# not use this file except in compliance with the License. You may obtain # -# a copy of the License at # -# # -# http://www.apache.org/licenses/LICENSE-2.0 # -# # -# Unless required by applicable law or agreed to in writing, software # -# distributed under the License is distributed on an "AS IS" BASIS, # -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # -# See the License for the specific language governing permissions and # -# limitations under the License. # -#--------------------------------------------------------------------------- # - -SRC=$1 -DST=$2 - -if [ -z "${ONE_LOCATION}" ]; then - TMCOMMON=/usr/lib/one/mads/tm_common.sh -else - TMCOMMON=$ONE_LOCATION/lib/mads/tm_common.sh -fi - -. $TMCOMMON - -SRC_PATH=`arg_path $SRC` -SRC_HOST=`arg_host $SRC` - -log "Deleting $SRC_PATH" -exec_and_log "$SSH $SRC_HOST rm -rf $SRC_PATH" \ - "Error deleting $SRC_PATH" diff --git a/src/tm_mad/ssh/tm_ln.sh b/src/tm_mad/ssh/tm_ln.sh deleted file mode 100755 index 40d8e6f2e5..0000000000 --- a/src/tm_mad/ssh/tm_ln.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -# -------------------------------------------------------------------------- # -# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) # -# # -# Licensed under the Apache License, Version 2.0 (the "License"); you may # -# not use this file except in compliance with the License. You may obtain # -# a copy of the License at # -# # -# http://www.apache.org/licenses/LICENSE-2.0 # -# # -# Unless required by applicable law or agreed to in writing, software # -# distributed under the License is distributed on an "AS IS" BASIS, # -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # -# See the License for the specific language governing permissions and # -# limitations under the License. # -#--------------------------------------------------------------------------- # - -SRC=$1 -DST=$2 - -if [ -z "${ONE_LOCATION}" ]; then - TMCOMMON=/usr/lib/one/mads/tm_common.sh - TM_COMMANDS_LOCATION=/usr/lib/one/tm_commands/ -else - TMCOMMON=$ONE_LOCATION/lib/mads/tm_common.sh - TM_COMMANDS_LOCATION=$ONE_LOCATION/lib/tm_commands/ -fi - -. $TMCOMMON - -log "Link $SRC_PATH (non shared dir, will clone)" -#exec_and_log "ln -s $SRC_PATH $DST_PATH" -exec $TM_COMMANDS_LOCATION/ssh/tm_clone.sh $SRC $DST diff --git a/src/tm_mad/tm_common.sh b/src/tm_mad/tm_common.sh index b5d204afe8..4aeeb7ad84 100644 --- a/src/tm_mad/tm_common.sh +++ b/src/tm_mad/tm_common.sh @@ -16,84 +16,26 @@ export LANG=C +# ------------------------------------------------------------------------------ +# Set enviroment for the tm drivers (bash-based) +# ------------------------------------------------------------------------------ if [ -z "$ONE_LOCATION" ]; then ONE_LOCAL_VAR=/var/lib/one ONE_LIB=/usr/lib/one + DS_DIR=/var/lib/one/datastores else ONE_LOCAL_VAR=$ONE_LOCATION/var ONE_LIB=$ONE_LOCATION/lib + DS_DIR=$ONE_LOCATION/var/datastores fi ONE_SH=$ONE_LIB/sh . $ONE_SH/scripts_common.sh - - -if [ "x$(uname -s)" = "xLinux" ]; then - SED="$SED -r" -else - SED="/usr/bin/sed -E" -fi - -function get_vmdir -{ - VMDIR=`grep '^VM_DIR=' $ONE_LOCAL_VAR/config | cut -d= -f2` - fix_var_slashes -} - -# Takes out uneeded slashes. Repeated and final directory slashes: -# /some//path///somewhere/ -> /some/path/somewhere -function fix_dir_slashes -{ - dirname "$1/file" | $SED 's/\/+/\//g' -} - -function get_compare_target -{ - echo "$1" | $SED 's/\/+/\//g' | $SED 's/\/images$//' -} - -function full_src_and_dst_equal -{ - s=`get_compare_target "$SRC"` - d=`get_compare_target "$DST"` - - [ "$s" == "$d" ] - -} - -function fix_var_slashes -{ - ONE_LOCAL_VAR=`fix_dir_slashes "$ONE_LOCAL_VAR"` - VMDIR=`fix_dir_slashes "$VMDIR"` -} - -function fix_paths -{ - if [ "x$ONE_LOCAL_VAR" != "x$VMDIR" ]; then - SRC_PATH=`fix_dir_slashes "$SRC_PATH"` - SRC_PATH=${SRC_PATH/$VMDIR/$ONE_LOCAL_VAR} - DST_PATH=`fix_dir_slashes "$DST_PATH"` - DST_PATH=${DST_PATH/$VMDIR/$ONE_LOCAL_VAR} - fi -} - -function fix_src_path -{ - if [ "x$ONE_LOCAL_VAR" != "x$VMDIR" ]; then - SRC_PATH=`fix_dir_slashes "$SRC_PATH"` - SRC_PATH=${SRC_PATH/$VMDIR/$ONE_LOCAL_VAR} - fi -} - -function fix_dst_path -{ - if [ "x$ONE_LOCAL_VAR" != "x$VMDIR" ]; then - DST_PATH=`fix_dir_slashes "$DST_PATH"` - DST_PATH=${DST_PATH/$VMDIR/$ONE_LOCAL_VAR} - fi -} +# ------------------------------------------------------------------------------ +# Function to get hosts and paths from arguments +# ------------------------------------------------------------------------------ # Gets the host from an argument function arg_host @@ -104,8 +46,42 @@ function arg_host # Gets the path from an argument function arg_path { - echo $1 | $SED 's/^[^:]*:(.*)$/\1/' + ARG_PATH=`echo $1 | $SED 's/^[^:]*:(.*)$/\1/'` + fix_dir_slashes "$ARG_PATH" } +#Return the DATASTORE_LOCATION from OpenNebula configuration +function set_ds_location +{ + RMT_DS_DIR=`$GREP '^DATASTORE_LOCATION=' $ONE_LOCAL_VAR/config | cut -d= -f2` + RMT_DS_DIR=`fix_dir_slashes $RMT_DS_DIR` + export RMT_DS_DIR +} +#Return 1 if the first argument is a disk +function is_disk +{ + echo "$1" | $GREP '/disk\.[0-9]\+' > /dev/null 2>&1 + + if [ $? -eq 0 ]; then + echo "1" + else + echo "0" + fi +} + +#Makes path src ($1) relative to dst ($2) +function make_relative { + src=$1 + dst=$2 + + common=$dst + + while [ -z "`echo $src | grep -E "^$common"`" ]; do + common=`dirname $common` + dots="../$dots" + done + + echo $dots${src#$common/} +} diff --git a/src/tm_mad/vmware/tm_clone.sh b/src/tm_mad/vmware/clone similarity index 100% rename from src/tm_mad/vmware/tm_clone.sh rename to src/tm_mad/vmware/clone diff --git a/src/tm_mad/vmware/tm_context.sh b/src/tm_mad/vmware/context similarity index 100% rename from src/tm_mad/vmware/tm_context.sh rename to src/tm_mad/vmware/context diff --git a/src/tm_mad/vmware/functions.sh b/src/tm_mad/vmware/functions.sh index d55effbd78..e66d89ee44 100644 --- a/src/tm_mad/vmware/functions.sh +++ b/src/tm_mad/vmware/functions.sh @@ -24,7 +24,7 @@ function fix_iso { if [ $? -eq 0 ]; then bname=`basename $dst_path` exec_and_log "ln -s $bname $dst_path/$bname.iso" \ - "Can not link ISO file." + "Cannot link ISO file." fi fi } diff --git a/src/tm_mad/vmware/tm_ln.sh b/src/tm_mad/vmware/ln similarity index 100% rename from src/tm_mad/vmware/tm_ln.sh rename to src/tm_mad/vmware/ln diff --git a/src/tm_mad/vmware/tm_mv.sh b/src/tm_mad/vmware/mv similarity index 100% rename from src/tm_mad/vmware/tm_mv.sh rename to src/tm_mad/vmware/mv diff --git a/src/tm_mad/vmware/tm_vmware.conf b/src/tm_mad/vmware/tm_vmware.conf deleted file mode 100644 index 0de9eeb95e..0000000000 --- a/src/tm_mad/vmware/tm_vmware.conf +++ /dev/null @@ -1,23 +0,0 @@ -# ---------------------------------------------------------------------------- # -# Copyright 2010-2011, C12G Labs S.L # -# # -# Licensed under the Apache License, Version 2.0 (the "License"); you may # -# not use this file except in compliance with the License. You may obtain # -# a copy of the License at # -# # -# http://www.apache.org/licenses/LICENSE-2.0 # -# # -# Unless required by applicable law or agreed to in writing, software # -# distributed under the License is distributed on an "AS IS" BASIS, # -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # -# See the License for the specific language governing permissions and # -# limitations under the License. # -# ---------------------------------------------------------------------------- # - -CLONE = vmware/tm_clone.sh -LN = vmware/tm_ln.sh -MKSWAP = dummy/tm_dummy.sh -MKIMAGE = dummy/tm_dummy.sh -DELETE = shared/tm_delete.sh -MV = vmware/tm_mv.sh -CONTEXT = vmware/tm_context.sh diff --git a/src/um/UserPool.cc b/src/um/UserPool.cc index 49598ed499..d06e6d90d6 100644 --- a/src/um/UserPool.cc +++ b/src/um/UserPool.cc @@ -40,11 +40,15 @@ const char * UserPool::DEFAULT_AUTH = "default"; const char * UserPool::SERVER_NAME = "serveradmin"; +const int UserPool::ONEADMIN_ID = 0; + /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ time_t UserPool::_session_expiration_time; +string UserPool::oneadmin_name; + /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ @@ -74,8 +78,13 @@ UserPool::UserPool(SqlDB * db, _session_expiration_time = __session_expiration_time; - if (get(0,false) != 0) + User * oneadmin_user = get(0, true); + + if (oneadmin_user != 0) { + oneadmin_name = oneadmin_user->get_name(); + oneadmin_user->unlock(); + return; } @@ -122,6 +131,8 @@ UserPool::UserPool(SqlDB * db, goto error_token; } + oneadmin_name = one_name; + if ( one_name == SERVER_NAME ) { goto error_one_name; diff --git a/src/um/test/SConstruct b/src/um/test/SConstruct index e35ea02570..5d0ff663e0 100644 --- a/src/um/test/SConstruct +++ b/src/um/test/SConstruct @@ -23,6 +23,7 @@ env.Prepend(LIBS=[ 'nebula_im', 'nebula_hm', 'nebula_rm', + 'nebula_datastore', 'nebula_dm', 'nebula_tm', 'nebula_um', @@ -33,6 +34,7 @@ env.Prepend(LIBS=[ 'nebula_template', 'nebula_image', 'nebula_pool', + 'nebula_cluster', 'nebula_host', 'nebula_vnm', 'nebula_vm', diff --git a/src/um/test/UserPoolTest.cc b/src/um/test/UserPoolTest.cc index fd111fcd4a..8ed333ebfd 100644 --- a/src/um/test/UserPoolTest.cc +++ b/src/um/test/UserPoolTest.cc @@ -380,7 +380,7 @@ public: { User *user_oid, *user_name; int oid_0; - int uid_0; + //int uid_0; string name_0; oid_0 = allocate(0); @@ -393,7 +393,7 @@ public: CPPUNIT_ASSERT(user_oid != 0); name_0 = user_oid->get_name(); - uid_0 = user_oid->get_uid(); + //uid_0 = user_oid->get_uid(); user_oid->unlock(); diff --git a/src/vm/History.cc b/src/vm/History.cc index 520f527fcc..7d581a1937 100644 --- a/src/vm/History.cc +++ b/src/vm/History.cc @@ -40,11 +40,9 @@ History::History( oid(_oid), seq(_seq), hostname(""), - vm_dir(""), hid(-1), vmm_mad_name(""), vnm_mad_name(""), - tm_mad_name(""), stime(0), etime(0), prolog_stime(0), @@ -62,18 +60,14 @@ History::History( int _seq, int _hid, const string& _hostname, - const string& _vm_dir, const string& _vmm, - const string& _vnm, - const string& _tm): + const string& _vnm): oid(_oid), seq(_seq), hostname(_hostname), - vm_dir(_vm_dir), hid(_hid), vmm_mad_name(_vmm), vnm_mad_name(_vnm), - tm_mad_name(_tm), stime(0), etime(0), prolog_stime(0), @@ -92,8 +86,13 @@ History::History( void History::non_persistent_data() { - ostringstream os; - Nebula& nd = Nebula::instance(); + ostringstream os; + + string vm_lhome; + string vm_rhome; + string ds_location; + + Nebula& nd = Nebula::instance(); // ----------- Local Locations ------------ os.str(""); @@ -116,13 +115,15 @@ void History::non_persistent_data() context_file = os.str(); // ----------- Remote Locations ------------ + os.str(""); - os << vm_dir << "/" << oid << "/images"; + + nd.get_configuration_attribute("DATASTORE_LOCATION", ds_location); + os << ds_location << "/" << DatastorePool::SYSTEM_DS_ID << "/" << oid; vm_rhome = os.str(); os << "/checkpoint"; - checkpoint_file = os.str(); os.str(""); @@ -263,22 +264,20 @@ string& History::to_xml(string& xml) const oss << "" << - "" << seq << "" << - ""<< hostname << ""<< - "" << vm_dir << ""<< - "" << hid << "" << - "" << stime << "" << - "" << etime << "" << - "" << vmm_mad_name << ""<< - "" << vnm_mad_name << ""<< - "" << tm_mad_name << "" << - "" << prolog_stime << ""<< - "" << prolog_etime << ""<< - "" << running_stime << ""<< - "" << running_etime << ""<< - "" << epilog_stime << ""<< - "" << epilog_etime << ""<< - "" << reason << ""<< + "" << seq << "" << + "" << hostname << ""<< + "" << hid << "" << + "" << stime << "" << + "" << etime << "" << + "" << vmm_mad_name << ""<< + "" << vnm_mad_name << ""<< + "" << prolog_stime << ""<< + "" << prolog_etime << ""<< + "" << running_stime << ""<< + "" << running_etime << ""<< + "" << epilog_stime << ""<< + "" << epilog_etime << ""<< + "" << reason << ""<< ""; xml = oss.str(); @@ -294,22 +293,20 @@ int History::rebuild_attributes() int int_reason; int rc = 0; - rc += xpath(seq , "/HISTORY/SEQ", -1); - rc += xpath(hostname , "/HISTORY/HOSTNAME", "not_found"); - rc += xpath(vm_dir , "/HISTORY/VM_DIR", "not_found"); - rc += xpath(hid , "/HISTORY/HID", -1); - rc += xpath(stime , "/HISTORY/STIME", 0); - rc += xpath(etime , "/HISTORY/ETIME", 0); - rc += xpath(vmm_mad_name , "/HISTORY/VMMMAD", "not_found"); - xpath(vnm_mad_name , "/HISTORY/VNMMAD", "dummy"); - rc += xpath(tm_mad_name , "/HISTORY/TMMAD", "not_found"); - rc += xpath(prolog_stime , "/HISTORY/PSTIME", 0); - rc += xpath(prolog_etime , "/HISTORY/PETIME", 0); - rc += xpath(running_stime, "/HISTORY/RSTIME", 0); - rc += xpath(running_etime, "/HISTORY/RETIME", 0); - rc += xpath(epilog_stime , "/HISTORY/ESTIME", 0); - rc += xpath(epilog_etime , "/HISTORY/EETIME", 0); - rc += xpath(int_reason , "/HISTORY/REASON", 0); + rc += xpath(seq , "/HISTORY/SEQ", -1); + rc += xpath(hostname , "/HISTORY/HOSTNAME", "not_found"); + rc += xpath(hid , "/HISTORY/HID", -1); + rc += xpath(stime , "/HISTORY/STIME", 0); + rc += xpath(etime , "/HISTORY/ETIME", 0); + rc += xpath(vmm_mad_name , "/HISTORY/VMMMAD", "not_found"); + xpath(vnm_mad_name , "/HISTORY/VNMMAD", "dummy"); + rc += xpath(prolog_stime , "/HISTORY/PSTIME", 0); + rc += xpath(prolog_etime , "/HISTORY/PETIME", 0); + rc += xpath(running_stime , "/HISTORY/RSTIME", 0); + rc += xpath(running_etime , "/HISTORY/RETIME", 0); + rc += xpath(epilog_stime , "/HISTORY/ESTIME", 0); + rc += xpath(epilog_etime , "/HISTORY/EETIME", 0); + rc += xpath(int_reason , "/HISTORY/REASON", 0); reason = static_cast(int_reason); diff --git a/src/vm/VirtualMachine.cc b/src/vm/VirtualMachine.cc index 49b2575054..87e6a19943 100644 --- a/src/vm/VirtualMachine.cc +++ b/src/vm/VirtualMachine.cc @@ -109,10 +109,11 @@ int VirtualMachine::select(SqlDB * db) ostringstream oss; ostringstream ose; - int rc; - int last_seq; + string system_dir; + int rc; + int last_seq; - Nebula& nd = Nebula::instance(); + Nebula& nd = Nebula::instance(); // Rebuild the VirtualMachine object rc = PoolObjectSQL::select(db); @@ -148,14 +149,25 @@ int VirtualMachine::select(SqlDB * db) } } - //Create support directory for this VM + if ( state == DONE ) //Do not recreate dirs. They may be deleted + { + _log = 0; + + return 0; + } + + //-------------------------------------------------------------------------- + //Create support directories for this VM + //-------------------------------------------------------------------------- oss.str(""); oss << nd.get_var_location() << oid; - mkdir(oss.str().c_str(), 0777); - chmod(oss.str().c_str(), 0777); - + mkdir(oss.str().c_str(), 0700); + chmod(oss.str().c_str(), 0700); + + //-------------------------------------------------------------------------- //Create Log support for this VM + //-------------------------------------------------------------------------- try { _log = new FileLog(nd.get_vm_log_filename(oid),Log::DEBUG); @@ -171,7 +183,7 @@ int VirtualMachine::select(SqlDB * db) return 0; error_previous_history: - ose << "Can not get previous history record (seq:" << history->seq + ose << "Cannot get previous history record (seq:" << history->seq << ") for VM id: " << oid; log("ONE", Log::ERROR, ose); @@ -187,26 +199,9 @@ int VirtualMachine::insert(SqlDB * db, string& error_str) string name; SingleAttribute * attr; - string aname; string value; ostringstream oss; - - // ------------------------------------------------------------------------ - // Check template for restricted attributes - // ------------------------------------------------------------------------ - - if ( uid != 0 && gid != GroupPool::ONEADMIN_ID ) - { - VirtualMachineTemplate *vt = - static_cast(obj_template); - - if (vt->check(aname)) - { - goto error_restricted; - } - } - // ------------------------------------------------------------------------ // Set a name if the VM has not got one and VM_ID // ------------------------------------------------------------------------ @@ -277,6 +272,13 @@ int VirtualMachine::insert(SqlDB * db, string& error_str) goto error_requirements; } + rc = automatic_requirements(error_str); + + if ( rc != 0 ) + { + goto error_requirements; + } + parse_graphics(); // ------------------------------------------------------------------------ @@ -308,11 +310,6 @@ error_leases_rollback: release_network_leases(); goto error_common; -error_restricted: - oss << "VM Template includes a restricted attribute " << aname << "."; - error_str = oss.str(); - goto error_common; - error_name_length: oss << "NAME is too long; max length is 128 chars."; error_str = oss.str(); @@ -361,7 +358,7 @@ int VirtualMachine::parse_context(string& error_str) if (str == 0) { - NebulaLog::log("ONE",Log::ERROR, "Can not marshall CONTEXT"); + NebulaLog::log("ONE",Log::ERROR, "Cannot marshall CONTEXT"); return -1; } @@ -507,6 +504,148 @@ int VirtualMachine::parse_requirements(string& error_str) /* ------------------------------------------------------------------------ */ /* ------------------------------------------------------------------------ */ +int VirtualMachine::automatic_requirements(string& error_str) +{ + int num_vatts; + vector v_attributes; + VectorAttribute * vatt; + + ostringstream oss; + string requirements; + string cluster_id = ""; + string vatt_cluster_id; + + // Get cluster id from all DISK vector attributes + + num_vatts = obj_template->get("DISK",v_attributes); + + for(int i=0; i(v_attributes[i]); + + if ( vatt == 0 ) + { + continue; + } + + vatt_cluster_id = vatt->vector_value("CLUSTER_ID"); + + if ( !vatt_cluster_id.empty() ) + { + if ( !cluster_id.empty() && cluster_id != vatt_cluster_id ) + { + goto error; + } + + cluster_id = vatt_cluster_id; + } + } + + // Get cluster id from all NIC vector attributes + + v_attributes.clear(); + num_vatts = obj_template->get("NIC",v_attributes); + + for(int i=0; i(v_attributes[i]); + + if ( vatt == 0 ) + { + continue; + } + + vatt_cluster_id = vatt->vector_value("CLUSTER_ID"); + + if ( !vatt_cluster_id.empty() ) + { + if ( !cluster_id.empty() && cluster_id != vatt_cluster_id ) + { + goto error; + } + + cluster_id = vatt_cluster_id; + } + } + + if ( !cluster_id.empty() ) + { + oss.str(""); + oss << "CLUSTER_ID = " << cluster_id; + + obj_template->get("REQUIREMENTS", requirements); + + if ( !requirements.empty() ) + { + oss << " & ( " << requirements << " )"; + } + + replace_template_attribute("REQUIREMENTS", oss.str()); + } + + return 0; + +error: + + oss << "Incompatible cluster IDs."; + + // Get cluster id from all DISK vector attributes + + v_attributes.clear(); + num_vatts = obj_template->get("DISK",v_attributes); + + for(int i=0; i(v_attributes[i]); + + if ( vatt == 0 ) + { + continue; + } + + vatt_cluster_id = vatt->vector_value("CLUSTER_ID"); + + if ( !vatt_cluster_id.empty() ) + { + oss << endl << "DISK [" << i << "]: IMAGE [" + << vatt->vector_value("IMAGE_ID") << "] from DATASTORE [" + << vatt->vector_value("DATASTORE_ID") << "] requires CLUSTER [" + << vatt_cluster_id << "]"; + } + } + + // Get cluster id from all NIC vector attributes + + v_attributes.clear(); + num_vatts = obj_template->get("NIC",v_attributes); + + for(int i=0; i(v_attributes[i]); + + if ( vatt == 0 ) + { + continue; + } + + vatt_cluster_id = vatt->vector_value("CLUSTER_ID"); + + if ( !vatt_cluster_id.empty() ) + { + oss << endl << "NIC [" << i << "]: NETWORK [" + << vatt->vector_value("NETWORK_ID") << "] requires CLUSTER [" + << vatt_cluster_id << "]"; + } + } + + error_str = oss.str(); + + return -1; +} + +/* ------------------------------------------------------------------------ */ +/* ------------------------------------------------------------------------ */ + int VirtualMachine::insert_replace(SqlDB *db, bool replace, string& error_str) { ostringstream oss; @@ -603,10 +742,8 @@ error_common: void VirtualMachine::add_history( int hid, const string& hostname, - const string& vm_dir, const string& vmm_mad, - const string& vnm_mad, - const string& tm_mad) + const string& vnm_mad) { ostringstream os; int seq; @@ -622,7 +759,12 @@ void VirtualMachine::add_history( previous_history = history; } - history = new History(oid,seq,hid,hostname,vm_dir,vmm_mad,vnm_mad,tm_mad); + history = new History(oid, + seq, + hid, + hostname, + vmm_mad, + vnm_mad); history_records.push_back(history); }; @@ -643,11 +785,8 @@ void VirtualMachine::cp_history() history->seq + 1, history->hid, history->hostname, - history->vm_dir, history->vmm_mad_name, - history->vnm_mad_name, - history->tm_mad_name); - + history->vnm_mad_name); previous_history = history; history = htmp; @@ -671,10 +810,8 @@ void VirtualMachine::cp_previous_history() history->seq + 1, previous_history->hid, previous_history->hostname, - previous_history->vm_dir, previous_history->vmm_mad_name, - previous_history->vnm_mad_name, - previous_history->tm_mad_name); + previous_history->vnm_mad_name); previous_history = history; history = htmp; @@ -746,8 +883,13 @@ int VirtualMachine::get_disk_images(string& error_str) continue; } - rc = ipool->disk_attribute(disk, i, &index, &img_type, uid, image_id); - + rc = ipool->disk_attribute(disk, + i, + &index, + &img_type, + uid, + image_id, + error_str); if (rc == 0 ) { acquired_images.push_back(image_id); @@ -784,26 +926,22 @@ int VirtualMachine::get_disk_images(string& error_str) } else if ( rc == -1 ) { - goto error_image; + goto error_common; } } return 0; error_max_os: - error_str = "VM can not use more than one OS image."; + error_str = "VM cannot use more than one OS image."; goto error_common; error_max_cd: - error_str = "VM can not use more than one CDROM image."; + error_str = "VM cannot use more than one CDROM image."; goto error_common; error_max_db: - error_str = "VM can not use more than 10 DATABLOCK images."; - goto error_common; - -error_image: - error_str = "Could not get disk image for VM."; + error_str = "VM cannot use more than 10 DATABLOCK images."; goto error_common; error_common: @@ -813,9 +951,7 @@ error_common: for ( it=acquired_images.begin() ; it < acquired_images.end(); it++ ) { - // Set disk_path and save_id to empty string, this way the image manager - // won't try to move any files - imagem->release_image(*it,"",-1,""); + imagem->release_image(*it, false); } return -1; @@ -826,8 +962,9 @@ error_common: void VirtualMachine::release_disk_images() { - string iid; - string saveas; + int iid; + int save_as_id; + int rc; int num_disks; vector disks; @@ -838,12 +975,7 @@ void VirtualMachine::release_disk_images() Nebula& nd = Nebula::instance(); imagem = nd.get_imagem(); - num_disks = get_template_attribute("DISK",disks); - - if (hasHistory() != 0) - { - disk_base_path = get_local_dir(); - } + num_disks = get_template_attribute("DISK",disks); for(int i=0; ivector_value("IMAGE_ID"); - saveas = disk->vector_value("SAVE_AS"); + rc = disk->vector_value("IMAGE_ID", iid); - if ( iid.empty() ) + if ( rc == 0 ) { - if (!saveas.empty()) - { - imagem->disk_to_image(disk_base_path,i,saveas); - } + imagem->release_image(iid, (state == FAILED)); } - else + + rc = disk->vector_value("SAVE_AS", save_as_id); + + if ( rc == 0 ) { - imagem->release_image(iid,disk_base_path,i,saveas); + imagem->release_image(save_as_id, (state == FAILED)); } } } @@ -896,19 +1027,15 @@ int VirtualMachine::get_network_leases(string& estr) continue; } - rc = vnpool->nic_attribute(nic, uid, oid); + rc = vnpool->nic_attribute(nic, uid, oid, estr); if (rc == -1) { - goto error_vnet; + return -1; } } return 0; - -error_vnet: - estr = "Could not get virtual network for VM."; - return -1; } /* -------------------------------------------------------------------------- */ @@ -1025,29 +1152,28 @@ int VirtualMachine::generate_context(string &files) return 1; } -/* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ -int VirtualMachine::save_disk(int disk_id, int img_id, string& error_str) +int VirtualMachine::get_image_from_disk(int disk_id, string& error_str) { - int num_disks; + int num_disks; + int tid; + int iid = -1; + int rc; + vector disks; VectorAttribute * disk; - string disk_id_str; - int tmp_disk_id; - ostringstream oss; - istringstream iss; + + num_disks = obj_template->get("DISK",disks); if ( state == DONE || state == FAILED ) { goto error_state; } - num_disks = obj_template->get("DISK",disks); - - for(int i=0; i(disks[i]); @@ -1056,12 +1182,14 @@ int VirtualMachine::save_disk(int disk_id, int img_id, string& error_str) continue; } - disk_id_str = disk->vector_value("DISK_ID"); + rc = disk->vector_value("DISK_ID", tid); - iss.str(disk_id_str); - iss >> tmp_disk_id; + if ( rc != 0 ) + { + continue; + } - if ( tmp_disk_id == disk_id ) + if ( disk_id == tid ) { if(!((disk->vector_value("SAVE_AS")).empty())) { @@ -1073,12 +1201,16 @@ int VirtualMachine::save_disk(int disk_id, int img_id, string& error_str) goto error_persistent; } + rc = disk->vector_value("IMAGE_ID", iid); + + if ( rc != 0 ) + { + goto error_image_id; + } + disk->replace("SAVE", "YES"); - oss << (img_id); - disk->replace("SAVE_AS", oss.str()); - - return 0; + return iid; } } @@ -1096,6 +1228,10 @@ error_saved: oss << "The DISK " << disk_id << " is already going to be saved."; goto error_common; +error_image_id: + oss << "The DISK " << disk_id << " does not have a valid IMAGE_ID."; + goto error_common; + error_not_found: oss << "The DISK " << disk_id << " does not exist for VM " << oid << "."; @@ -1108,6 +1244,53 @@ error_common: /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ +int VirtualMachine::save_disk(const string& disk_id, + const string& source, + int img_id) +{ + vector disks; + VectorAttribute * disk; + + int num_disks; + string tdisk_id; + + ostringstream oss; + + if ( state == DONE || state == FAILED ) + { + return -1; + } + + num_disks = obj_template->get("DISK",disks); + + for(int i=0; i(disks[i]); + + if ( disk == 0 ) + { + continue; + } + + tdisk_id = disk->vector_value("DISK_ID"); + + if ( tdisk_id == disk_id ) + { + disk->replace("SAVE_AS_SOURCE", source); + + oss << (img_id); + disk->replace("SAVE_AS", oss.str()); + + break; + } + } + + return 0; +} + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + void VirtualMachine::set_auth_request(int uid, AuthRequest& ar, VirtualMachineTemplate *tmpl) @@ -1378,3 +1561,29 @@ int VirtualMachine::from_xml(const string &xml_str) /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ + +string VirtualMachine::get_remote_system_dir() const +{ + ostringstream oss; + + string ds_location; + Nebula& nd = Nebula::instance(); + + nd.get_configuration_attribute("DATASTORE_LOCATION", ds_location); + oss << ds_location << "/" << DatastorePool::SYSTEM_DS_ID << "/" << oid; + + return oss.str(); +} + +/* -------------------------------------------------------------------------- */ +/* -------------------------------------------------------------------------- */ + +string VirtualMachine::get_system_dir() const +{ + ostringstream oss; + Nebula& nd = Nebula::instance(); + + oss << nd.get_ds_location() << DatastorePool::SYSTEM_DS_ID << "/"<< oid; + + return oss.str(); +}; diff --git a/src/vm/VirtualMachinePool.cc b/src/vm/VirtualMachinePool.cc index 65beced047..206d2b4cb4 100644 --- a/src/vm/VirtualMachinePool.cc +++ b/src/vm/VirtualMachinePool.cc @@ -170,7 +170,7 @@ VirtualMachinePool::VirtualMachinePool(SqlDB * db, { ostringstream oss; - oss << "Unkown VM_HOOK " << on << ". Hook not registered!"; + oss << "Unknown VM_HOOK " << on << ". Hook not registered!"; NebulaLog::log("VM",Log::WARNING,oss); } } @@ -216,7 +216,7 @@ int VirtualMachinePool::allocate ( { vm->state = VirtualMachine::PENDING; } - + // ------------------------------------------------------------------------ // Insert the Object in the pool // ------------------------------------------------------------------------ diff --git a/src/vm/test/SConstruct b/src/vm/test/SConstruct index 9fc71af828..98fd9ad244 100644 --- a/src/vm/test/SConstruct +++ b/src/vm/test/SConstruct @@ -28,6 +28,9 @@ env.Prepend(LIBS=[ 'nebula_pool', 'nebula_xml', 'nebula_image', + 'nebula_datastore', + 'nebula_cluster', + 'nebula_um', 'nebula_mad', 'nebula_common', 'nebula_log', diff --git a/src/vm/test/VirtualMachinePoolTest.cc b/src/vm/test/VirtualMachinePoolTest.cc index d74ecfbc3d..3a0ac90c90 100644 --- a/src/vm/test/VirtualMachinePoolTest.cc +++ b/src/vm/test/VirtualMachinePoolTest.cc @@ -66,7 +66,7 @@ const string xml_dump_where = "011the_userusersVM one110000000010000000000000000"; const string xml_history_dump = - "001the_userusersVM one110000000010000000000000000101the_userusersSecond VM1100000000200000000000000000A_hostnameA_vm_dir000A_vmm_madA_vnm_madA_tm_mad0000000201the_userusersVM one1100000000200000000000000001C_hostnameC_vm_dir200C_vmm_madC_vnm_madC_tm_mad0000000311the_userusersVM one110000000060000000000000000"; + "001the_userusersVM one110000000010000000000000000101the_userusersSecond VM1100000000200000000000000000A_hostnameA_vm_dir000A_vmm_madA_vnm_mad0000000201the_userusersVM one1100000000200000000000000001C_hostnameC_vm_dir200C_vmm_madC_vnm_mad0000000311the_userusersVM one110000000060000000000000000"; /* ************************************************************************* */ /* ************************************************************************* */ @@ -194,7 +194,6 @@ public: string hostname = "hostname"; string vm_dir = "vm_dir"; string vmm_mad = "vm_mad"; - string tm_mad = "tm_mad"; // Allocate two VMs oid = allocate(0); @@ -299,10 +298,8 @@ public: VirtualMachine* vm; string hostnames[] = {"A_hostname", "B_hostname", "C_hostname"}; - string vm_dirs[] = {"A_vm_dir", "B_vm_dir", "C_vm_dir"}; string vmm_mads[] = {"A_vmm_mad", "B_vmm_mad", "C_vmm_mad"}; string vnm_mads[] = {"A_vnm_mad", "B_vnm_mad", "C_vnm_mad"}; - string tm_mads[] = {"A_tm_mad", "B_tm_mad", "C_tm_mad"}; int oid, rc; ostringstream oss; @@ -324,7 +321,7 @@ public: CPPUNIT_ASSERT( vm != 0 ); // Add a history item - vm->add_history(0, hostnames[0], vm_dirs[0], vmm_mads[0], vnm_mads[0], tm_mads[0]); + vm->add_history(0, hostnames[0], vmm_mads[0], vnm_mads[0]); rc = vmp->update(vm); CPPUNIT_ASSERT( rc == 0 ); @@ -342,7 +339,7 @@ public: CPPUNIT_ASSERT( vm != 0 ); // Add a history item - vm->add_history(1, hostnames[1], vm_dirs[1], vmm_mads[1], vnm_mads[1], tm_mads[1]); + vm->add_history(1, hostnames[1], vmm_mads[1], vnm_mads[1]); rc = vmp->update(vm); CPPUNIT_ASSERT( rc == 0 ); @@ -351,7 +348,7 @@ public: CPPUNIT_ASSERT( rc == 0 ); // Add another history item - vm->add_history(2, hostnames[2], vm_dirs[2], vmm_mads[2], vnm_mads[2], tm_mads[2]); + vm->add_history(2, hostnames[2], vmm_mads[2], vnm_mads[2]); rc = vmp->update(vm); CPPUNIT_ASSERT( rc == 0 ); @@ -406,10 +403,8 @@ public: string hostname = "hostname"; string new_hostname = "new_hostname"; - string vm_dir = "vm_dir"; string vmm_mad = "vm_mad"; string vnm_mad = "vn_mad"; - string tm_mad = "tm_mad"; // Allocate a VM oid = allocate(0); @@ -419,7 +414,7 @@ public: CPPUNIT_ASSERT( vm != 0 ); // Add a history item - vm->add_history(0, hostname, vm_dir, vmm_mad, vnm_mad, tm_mad); + vm->add_history(0, hostname, vmm_mad, vnm_mad); rc = vmp->update(vm); CPPUNIT_ASSERT( rc == 0 ); @@ -427,7 +422,7 @@ public: rc = vmp->update_history(vm); CPPUNIT_ASSERT( rc == 0 ); - vm->add_history(0, new_hostname, vm_dir, vmm_mad, vnm_mad, tm_mad); + vm->add_history(0, new_hostname, vmm_mad, vnm_mad); rc = vmp->update(vm); CPPUNIT_ASSERT( rc == 0 ); diff --git a/src/vm_template/test/SConstruct b/src/vm_template/test/SConstruct index 20e5d89e58..b87e48c44f 100644 --- a/src/vm_template/test/SConstruct +++ b/src/vm_template/test/SConstruct @@ -22,6 +22,8 @@ env.Prepend(LIBS=[ 'nebula_vm', 'nebula_vmtemplate', 'nebula_hm', + 'nebula_cluster', + 'nebula_datastore', 'nebula_vnm', 'nebula_authm', 'nebula_acl', diff --git a/src/vmm/LibVirtDriverKVM.cc b/src/vmm/LibVirtDriverKVM.cc index 29c1b5eb1f..4952f733f6 100644 --- a/src/vmm/LibVirtDriverKVM.cc +++ b/src/vmm/LibVirtDriverKVM.cc @@ -339,20 +339,20 @@ int LibVirtDriver::deployment_description_kvm( if ( type == "BLOCK" ) { file << "\t\t" << endl - << "\t\t\t" << endl; + << "\t\t\t" << endl; } else if ( type == "CDROM" ) { file << "\t\t" << endl - << "\t\t\t" << endl; + << "\t\t\t" << endl; } else { file << "\t\t" << endl - << "\t\t\t" << endl; + << "\t\t\t" << endl; } // ---- target device to map the disk ---- @@ -419,8 +419,10 @@ int LibVirtDriver::deployment_description_kvm( if ( !target.empty() ) { file << "\t\t" << endl; - file << "\t\t\t" << endl; + + file << "\t\t\t" << endl; + file << "\t\t\t" << endl; file << "\t\t\t" << endl; diff --git a/src/vmm/LibVirtDriverVMware.cc b/src/vmm/LibVirtDriverVMware.cc index 856ef5131f..076c864ed8 100644 --- a/src/vmm/LibVirtDriverVMware.cc +++ b/src/vmm/LibVirtDriverVMware.cc @@ -222,19 +222,19 @@ int LibVirtDriver::deployment_description_vmware( { file << "\t\t" << endl; file << "\t\t\tget_oid() - << "/images/disk." << i << "'/>" << endl; + << "/disk." << i << "'/>" << endl; } else if ( type == "CDROM" ) { file << "\t\t" << endl; file << "\t\t\tget_oid() - << "/images/disk." << i << ".iso'/>" << endl; + << "/disk." << i << ".iso'/>" << endl; } else { file << "\t\t" << endl << "\t\t\t" << endl; + << "/disk." << i << "/disk.vmdk'/>" << endl; } file << "\t\t\tget_remote_dir() << "/disk." << i << "," + file << vm->get_remote_system_dir() << "/disk." << i << "," << target << "," << mode << "'," << endl; @@ -312,7 +312,7 @@ int XenDriver::deployment_description( file << default_driver; } - file << vm->get_remote_dir() << "/disk." << num <<","<< target <<"," + file << vm->get_remote_system_dir() << "/disk." << num <<","<< target <<"," << "r'," << endl; } else diff --git a/src/vmm_mad/ec2/one_vmm_ec2.rb b/src/vmm_mad/ec2/one_vmm_ec2.rb index a4ce862b05..dbcd250245 100755 --- a/src/vmm_mad/ec2/one_vmm_ec2.rb +++ b/src/vmm_mad/ec2/one_vmm_ec2.rb @@ -185,7 +185,7 @@ class EC2Driver < VirtualMachineDriver return unless ec2_info if !ec2_value(ec2_info, 'AMI') - msg = "Can not find AMI in deployment file" + msg = "Cannot find AMI in deployment file" send_message(ACTION[:deploy], RESULT[:failure], id, msg) return end @@ -294,7 +294,7 @@ private if !local_dfile send_message(ACTION[:deploy],RESULT[:failure],id, - "Can not open deployment file #{local_dfile}") + "Cannot open deployment file #{local_dfile}") return end @@ -322,7 +322,7 @@ private ec2 = all_ec2_elements[0] else send_message(ACTION[:deploy],RESULT[:failure],id, - "Can not find EC2 element in deployment file "<< + "Cannot find EC2 element in deployment file "<< "#{local_dfile} or couldn't find any EC2 site matching "<< "one of the template.") return diff --git a/src/vmm_mad/exec/one_vmm_exec.rb b/src/vmm_mad/exec/one_vmm_exec.rb index 3570203cef..bd53cfe49e 100755 --- a/src/vmm_mad/exec/one_vmm_exec.rb +++ b/src/vmm_mad/exec/one_vmm_exec.rb @@ -259,7 +259,7 @@ class ExecDriver < VirtualMachineDriver if !local_dfile || File.zero?(local_dfile) send_message(ACTION[:deploy],RESULT[:failure],id, - "Can not open deployment file #{local_dfile}") + "Cannot open deployment file #{local_dfile}") return end diff --git a/src/vmm_mad/exec/vmm_exec_vmware.conf b/src/vmm_mad/exec/vmm_exec_vmware.conf index 3075ff7625..75f9556ff9 100644 --- a/src/vmm_mad/exec/vmm_exec_vmware.conf +++ b/src/vmm_mad/exec/vmm_exec_vmware.conf @@ -28,8 +28,9 @@ MEMORY = 256 OS = [ ARCH = i686 ] DISK = [ DRIVER = file ] -# Name of the datastore in the remote VMware hypervisors -# mounting $ONE_LOCATION/var exported as a nfs share -# by the OpenNebula front-end +# Name of the system datastore in the remote VMware hypervisors +# mounting DATASTORE_LOCATION/var/datastore/0 exported as a nfs share +# by the OpenNebula front-end. This would need to be changed +# *only* with custom TM drivers -DATASTORE = images +DATASTORE = 0 diff --git a/src/vmm_mad/remotes/vmware/vmware_driver.rb b/src/vmm_mad/remotes/vmware/vmware_driver.rb index 570b7b0b81..9ce7c9abd7 100644 --- a/src/vmm_mad/remotes/vmware/vmware_driver.rb +++ b/src/vmm_mad/remotes/vmware/vmware_driver.rb @@ -165,9 +165,10 @@ class VMwareDriver def restore(checkpoint) begin # Define the VM - dfile = File.dirname(File.dirname(checkpoint)) + "/deployment.0" + dfile = VAR_LOCATION + "/" + + File.basename(File.dirname(checkpoint)) + "/deployment.0" rescue => e - OpenNebula.log_error("Can not open checkpoint #{e.message}") + OpenNebula.log_error("Cannot open checkpoint #{e.message}") exit -1 end diff --git a/src/vmm_mad/remotes/xen/shutdown b/src/vmm_mad/remotes/xen/shutdown index b217235577..d7ed0a16c6 100755 --- a/src/vmm_mad/remotes/xen/shutdown +++ b/src/vmm_mad/remotes/xen/shutdown @@ -30,7 +30,7 @@ exec_and_log "$XM_SHUTDOWN $deploy_id" \ OUT=$(gdm) -while [ -n "$OUT" -a $(echo $OUT | awk '{print $5}') != "---s--" ]; do +while [ -n "$OUT" -a "$(echo $OUT | awk '{print $5}')" != "---s--" ]; do sleep 1 OUT=$(gdm) done diff --git a/src/vnm/VirtualNetwork.cc b/src/vnm/VirtualNetwork.cc index d8083944a9..ae6282be7f 100644 --- a/src/vnm/VirtualNetwork.cc +++ b/src/vnm/VirtualNetwork.cc @@ -24,6 +24,7 @@ #include "FixedLeases.h" #include "AuthManager.h" +#include "ClusterPool.h" #define TO_UPPER(S) transform(S.begin(),S.end(),S.begin(),(int(*)(int))toupper) @@ -35,8 +36,11 @@ VirtualNetwork::VirtualNetwork(int _uid, int _gid, const string& _uname, const string& _gname, + int _cluster_id, + const string& _cluster_name, VirtualNetworkTemplate * _vn_template): PoolObjectSQL(-1,NET,"",_uid,_gid,_uname,_gname,table), + Clusterable(_cluster_id, _cluster_name), bridge(""), type(UNINITIALIZED), leases(0) @@ -474,16 +478,18 @@ string& VirtualNetwork::to_xml_extended(string& xml, bool extended) const os << "" << - "" << oid << "" << - "" << uid << "" << - "" << gid << "" << - "" << uname << "" << - "" << gname << "" << - "" << name << "" << - perms_to_xml(perm_str) << - "" << type << "" << - "" << bridge << ""<< - "" << vlan << ""; + "" << oid << "" << + "" << uid << "" << + "" << gid << "" << + "" << uname << "" << + "" << gname << "" << + "" << name << "" << + perms_to_xml(perm_str) << + "" << cluster_id << "" << + "" << cluster << "" << + "" << type << "" << + "" << bridge << ""<< + "" << vlan << ""; if (!phydev.empty()) { @@ -566,6 +572,9 @@ int VirtualNetwork::from_xml(const string &xml_str) rc += xpath(bridge, "/VNET/BRIDGE", "not_found"); rc += xpath(vlan, "/VNET/VLAN", 0); + rc += xpath(cluster_id, "/VNET/CLUSTER_ID", -1); + rc += xpath(cluster, "/VNET/CLUSTER", "not_found"); + // Permissions rc += perms_from_xml(); @@ -618,10 +627,10 @@ int VirtualNetwork::nic_attribute(VectorAttribute *nic, int vid) string ip; string mac; - ostringstream vnid; + ostringstream oss; - ip = nic->vector_value("IP"); - vnid << oid; + ip = nic->vector_value("IP"); + oss << oid; //-------------------------------------------------------------------------- // GET NETWORK LEASE @@ -646,7 +655,7 @@ int VirtualNetwork::nic_attribute(VectorAttribute *nic, int vid) //-------------------------------------------------------------------------- nic->replace("NETWORK" ,name); - nic->replace("NETWORK_ID",vnid.str()); + nic->replace("NETWORK_ID",oss.str()); nic->replace("BRIDGE" ,bridge); nic->replace("MAC" ,mac); nic->replace("IP" ,ip); @@ -670,6 +679,14 @@ int VirtualNetwork::nic_attribute(VectorAttribute *nic, int vid) nic->replace("VLAN_ID", vlan_id); } + if ( get_cluster_id() != ClusterPool::NONE_CLUSTER_ID ) + { + oss.str(""); + oss << get_cluster_id(); + + nic->replace("CLUSTER_ID", oss.str()); + } + return 0; } diff --git a/src/vnm/VirtualNetworkPool.cc b/src/vnm/VirtualNetworkPool.cc index 47b05a2e47..348863b813 100644 --- a/src/vnm/VirtualNetworkPool.cc +++ b/src/vnm/VirtualNetworkPool.cc @@ -78,6 +78,8 @@ int VirtualNetworkPool::allocate ( const string& gname, VirtualNetworkTemplate * vn_template, int * oid, + int cluster_id, + const string& cluster_name, string& error_str) { VirtualNetwork * vn; @@ -85,7 +87,8 @@ int VirtualNetworkPool::allocate ( string name; ostringstream oss; - vn = new VirtualNetwork(uid, gid, uname, gname, vn_template); + vn = new VirtualNetwork(uid, gid, uname, gname, + cluster_id, cluster_name, vn_template); // Check name vn->get_template_attribute("NAME", name); @@ -139,7 +142,8 @@ error_common: VirtualNetwork * VirtualNetworkPool::get_nic_by_name(VectorAttribute * nic, const string& name, - int _uid) + int _uid, + string& error) { istringstream is; @@ -147,6 +151,8 @@ VirtualNetwork * VirtualNetworkPool::get_nic_by_name(VectorAttribute * nic, string uname; int uid; + VirtualNetwork * vnet; + if (!(uid_s = nic->vector_value("NETWORK_UID")).empty()) { is.str(uid_s); @@ -154,6 +160,7 @@ VirtualNetwork * VirtualNetworkPool::get_nic_by_name(VectorAttribute * nic, if( is.fail() ) { + error = "Cannot get user in NETWORK_UID"; return 0; } } @@ -167,6 +174,7 @@ VirtualNetwork * VirtualNetworkPool::get_nic_by_name(VectorAttribute * nic, if ( user == 0 ) { + error = "User set in NETWORK_UNAME does not exist"; return 0; } @@ -179,39 +187,63 @@ VirtualNetwork * VirtualNetworkPool::get_nic_by_name(VectorAttribute * nic, uid = _uid; } - return get(name,uid,true); + vnet = get(name,uid,true); + + if (vnet == 0) + { + ostringstream oss; + oss << "Virtual network " << name << " does not exist for user " << uid; + + error = oss.str(); + } + + return vnet; } /* -------------------------------------------------------------------------- */ -VirtualNetwork * VirtualNetworkPool::get_nic_by_id(const string& id_s) +VirtualNetwork * VirtualNetworkPool::get_nic_by_id(const string& id_s, + string& error) { istringstream is; int id; + VirtualNetwork * vnet = 0; + is.str(id_s); is >> id; - if( is.fail() ) + if( !is.fail() ) { - return 0; + vnet = get(id,true); } - return get(id,true); + if (vnet == 0) + { + ostringstream oss; + oss << "Virtual network with ID: " << id_s << " does not exist"; + + error = oss.str(); + } + + return vnet; } -int VirtualNetworkPool::nic_attribute(VectorAttribute * nic, int uid, int vid) +int VirtualNetworkPool::nic_attribute(VectorAttribute * nic, + int uid, + int vid, + string& error) { string network; VirtualNetwork * vnet = 0; if (!(network = nic->vector_value("NETWORK")).empty()) { - vnet = get_nic_by_name (nic, network, uid); + vnet = get_nic_by_name (nic, network, uid, error); } else if (!(network = nic->vector_value("NETWORK_ID")).empty()) { - vnet = get_nic_by_id(network); + vnet = get_nic_by_id(network, error); } else //Not using a pre-defined network { @@ -229,6 +261,10 @@ int VirtualNetworkPool::nic_attribute(VectorAttribute * nic, int uid, int vid) { update(vnet); } + else + { + error = "Cannot get IP/MAC lease from virtual network."; + } vnet->unlock(); @@ -245,14 +281,15 @@ void VirtualNetworkPool::authorize_nic(VectorAttribute * nic, string network; VirtualNetwork * vnet = 0; PoolObjectAuth perm; + string error; if (!(network = nic->vector_value("NETWORK")).empty()) { - vnet = get_nic_by_name (nic, network, uid); + vnet = get_nic_by_name (nic, network, uid, error); } else if (!(network = nic->vector_value("NETWORK_ID")).empty()) { - vnet = get_nic_by_id(network); + vnet = get_nic_by_id(network, error); } else //Not using a pre-defined network { diff --git a/src/vnm/test/SConstruct b/src/vnm/test/SConstruct index 8aac83869b..1bb4837eb8 100644 --- a/src/vnm/test/SConstruct +++ b/src/vnm/test/SConstruct @@ -23,6 +23,8 @@ env.Prepend(LIBS=[ 'nebula_im', 'nebula_hm', 'nebula_rm', + 'nebula_datastore', + 'nebula_cluster', 'nebula_dm', 'nebula_tm', 'nebula_um', diff --git a/src/vnm/test/VirtualNetworkPoolTest.cc b/src/vnm/test/VirtualNetworkPoolTest.cc index 5e57f71373..a620f9cdaa 100644 --- a/src/vnm/test/VirtualNetworkPoolTest.cc +++ b/src/vnm/test/VirtualNetworkPoolTest.cc @@ -125,7 +125,9 @@ public: if( rc == 0 ) { - return VirtualNetworkPool::allocate(uid, 0,"the_user","oneadmin", vn_template, oid, err); + return VirtualNetworkPool::allocate(uid, 0,"the_user","oneadmin", + vn_template, oid, ClusterPool::NONE_CLUSTER_ID, + ClusterPool::NONE_CLUSTER_NAME, err); } else { @@ -1067,7 +1069,7 @@ public: VectorAttribute * disk; int oid_0, oid_1; - string value; + string value, error; // --------------------------------------------------------------------- // Allocate 2 vnets @@ -1092,9 +1094,9 @@ public: disk = new VectorAttribute("DISK"); disk->replace("NETWORK", "Net 0"); - ((VirtualNetworkPool*)vnp)->nic_attribute(disk, 0, 0); + ((VirtualNetworkPool*)vnp)->nic_attribute(disk, 0, 0, error); - ((VirtualNetworkPool*)vnp)->nic_attribute(disk, 0, 0); + ((VirtualNetworkPool*)vnp)->nic_attribute(disk, 0, 0, error); value = ""; value = disk->vector_value("NETWORK"); @@ -1123,7 +1125,7 @@ public: disk = new VectorAttribute("DISK"); disk->replace("NETWORK_ID", "1"); - ((VirtualNetworkPool*)vnp)->nic_attribute(disk,0, 0); + ((VirtualNetworkPool*)vnp)->nic_attribute(disk,0, 0, error); value = ""; value = disk->vector_value("NETWORK");