mirror of
https://github.com/OpenNebula/one.git
synced 2025-03-16 22:50:10 +03:00
Merge remote-tracking branch 'upstream/master'
This commit is contained in:
commit
97550f806c
@ -57,6 +57,8 @@ main_env.Append(LIBPATH=[
|
||||
cwd+'/src/log',
|
||||
cwd+'/src/sql',
|
||||
cwd+'/src/host',
|
||||
cwd+'/src/cluster',
|
||||
cwd+'/src/datastore',
|
||||
cwd+'/src/group',
|
||||
cwd+'/src/mad',
|
||||
cwd+'/src/nebula',
|
||||
@ -187,6 +189,8 @@ build_scripts=[
|
||||
'src/common/SConstruct',
|
||||
'src/template/SConstruct',
|
||||
'src/host/SConstruct',
|
||||
'src/cluster/SConstruct',
|
||||
'src/datastore/SConstruct',
|
||||
'src/group/SConstruct',
|
||||
'src/mad/SConstruct',
|
||||
'src/mad/utils/SConstruct',
|
||||
@ -236,6 +240,8 @@ if testing=='yes':
|
||||
'src/authm/test/SConstruct',
|
||||
'src/common/test/SConstruct',
|
||||
'src/host/test/SConstruct',
|
||||
'src/cluster/test/SConstruct',
|
||||
'src/datastore/test/SConstruct',
|
||||
'src/group/test/SConstruct',
|
||||
'src/image/test/SConstruct',
|
||||
'src/lcm/test/SConstruct',
|
||||
|
@ -243,6 +243,16 @@ public:
|
||||
*/
|
||||
string vector_value(const char *name) const;
|
||||
|
||||
/**
|
||||
* Returns the integer value
|
||||
*
|
||||
* @param name Name of the attribute
|
||||
* @param value Integer value
|
||||
*
|
||||
* @return 0 on success, -1 otherwise
|
||||
*/
|
||||
int vector_value(const char *name, int & value) const;
|
||||
|
||||
/**
|
||||
* Marshall the attribute in a single string. The string MUST be freed
|
||||
* by the calling function. The string is in the form:
|
||||
|
264
include/Cluster.h
Normal file
264
include/Cluster.h
Normal file
@ -0,0 +1,264 @@
|
||||
/* ------------------------------------------------------------------------ */
|
||||
/* Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) */
|
||||
/* */
|
||||
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
|
||||
/* not use this file except in compliance with the License. You may obtain */
|
||||
/* a copy of the License at */
|
||||
/* */
|
||||
/* http://www.apache.org/licenses/LICENSE-2.0 */
|
||||
/* */
|
||||
/* Unless required by applicable law or agreed to in writing, software */
|
||||
/* distributed under the License is distributed on an "AS IS" BASIS, */
|
||||
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
|
||||
/* See the License for the specific language governing permissions and */
|
||||
/* limitations under the License. */
|
||||
/* -------------------------------------------------------------------------*/
|
||||
|
||||
#ifndef CLUSTER_H_
|
||||
#define CLUSTER_H_
|
||||
|
||||
#include "PoolSQL.h"
|
||||
#include "ObjectCollection.h"
|
||||
#include "DatastorePool.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
/**
|
||||
* The Cluster class.
|
||||
*/
|
||||
class Cluster : public PoolObjectSQL
|
||||
{
|
||||
public:
|
||||
|
||||
// *************************************************************************
|
||||
// Object Collections (Public)
|
||||
// *************************************************************************
|
||||
|
||||
/**
|
||||
* Adds this host ID to the set.
|
||||
* @param id to be added to the cluster
|
||||
* @param error_msg Error message, if any
|
||||
* @return 0 on success
|
||||
*/
|
||||
int add_host(int id, string& error_msg)
|
||||
{
|
||||
int rc = hosts.add_collection_id(id);
|
||||
|
||||
if ( rc < 0 )
|
||||
{
|
||||
error_msg = "Host ID is already in the cluster set.";
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes this host ID from the set.
|
||||
* @param id to be deleted from the cluster
|
||||
* @param error_msg Error message, if any
|
||||
* @return 0 on success
|
||||
*/
|
||||
int del_host(int id, string& error_msg)
|
||||
{
|
||||
int rc = hosts.del_collection_id(id);
|
||||
|
||||
if ( rc < 0 )
|
||||
{
|
||||
error_msg = "Host ID is not part of the cluster set.";
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds this datastore ID to the set.
|
||||
* @param id to be added to the cluster
|
||||
* @param error_msg Error message, if any
|
||||
* @return 0 on success
|
||||
*/
|
||||
int add_datastore(int id, string& error_msg)
|
||||
{
|
||||
if ( id == DatastorePool::SYSTEM_DS_ID )
|
||||
{
|
||||
ostringstream oss;
|
||||
oss << "Datastore '"<< DatastorePool::SYSTEM_DS_NAME
|
||||
<< "' cannot be added to any cluster.";
|
||||
|
||||
error_msg = oss.str();
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
int rc = datastores.add_collection_id(id);
|
||||
|
||||
if ( rc < 0 )
|
||||
{
|
||||
error_msg = "Datastore ID is already in the cluster set.";
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes this datastore ID from the set.
|
||||
* @param id to be deleted from the cluster
|
||||
* @param error_msg Error message, if any
|
||||
* @return 0 on success
|
||||
*/
|
||||
int del_datastore(int id, string& error_msg)
|
||||
{
|
||||
int rc = datastores.del_collection_id(id);
|
||||
|
||||
if ( rc < 0 )
|
||||
{
|
||||
error_msg = "Datastore ID is not part of the cluster set.";
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds this vnet ID to the set.
|
||||
* @param id to be added to the cluster
|
||||
* @param error_msg Error message, if any
|
||||
* @return 0 on success
|
||||
*/
|
||||
int add_vnet(int id, string& error_msg)
|
||||
{
|
||||
int rc = vnets.add_collection_id(id);
|
||||
|
||||
if ( rc < 0 )
|
||||
{
|
||||
error_msg = "Network ID is already in the cluster set.";
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes this vnet ID from the set.
|
||||
* @param id to be deleted from the cluster
|
||||
* @param error_msg Error message, if any
|
||||
* @return 0 on success
|
||||
*/
|
||||
int del_vnet(int id, string& error_msg)
|
||||
{
|
||||
int rc = vnets.del_collection_id(id);
|
||||
|
||||
if ( rc < 0 )
|
||||
{
|
||||
error_msg = "Network ID is not part of the cluster set.";
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
// *************************************************************************
|
||||
// DataBase implementation (Public)
|
||||
// *************************************************************************
|
||||
|
||||
/**
|
||||
* Function to print the Cluster object into a string in XML format
|
||||
* @param xml the resulting XML string
|
||||
* @return a reference to the generated string
|
||||
*/
|
||||
string& to_xml(string& xml) const;
|
||||
|
||||
/**
|
||||
* Rebuilds the object from an xml formatted string
|
||||
* @param xml_str The xml-formatted string
|
||||
*
|
||||
* @return 0 on success, -1 otherwise
|
||||
*/
|
||||
int from_xml(const string &xml_str);
|
||||
|
||||
private:
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Friends
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
friend class ClusterPool;
|
||||
|
||||
// *************************************************************************
|
||||
// Constructor
|
||||
// *************************************************************************
|
||||
|
||||
Cluster(int id, const string& name):
|
||||
PoolObjectSQL(id,CLUSTER,name,-1,-1,"","",table),
|
||||
hosts("HOSTS"),
|
||||
datastores("DATASTORES"),
|
||||
vnets("VNETS"){};
|
||||
|
||||
virtual ~Cluster(){};
|
||||
|
||||
// *************************************************************************
|
||||
// Object Collections (Private)
|
||||
// *************************************************************************
|
||||
|
||||
ObjectCollection hosts;
|
||||
ObjectCollection datastores;
|
||||
ObjectCollection vnets;
|
||||
|
||||
// *************************************************************************
|
||||
// DataBase implementation (Private)
|
||||
// *************************************************************************
|
||||
|
||||
static const char * db_names;
|
||||
|
||||
static const char * db_bootstrap;
|
||||
|
||||
static const char * table;
|
||||
|
||||
/**
|
||||
* Execute an INSERT or REPLACE Sql query.
|
||||
* @param db The SQL DB
|
||||
* @param replace Execute an INSERT or a REPLACE
|
||||
* @param error_str Returns the error reason, if any
|
||||
* @return 0 one success
|
||||
*/
|
||||
int insert_replace(SqlDB *db, bool replace, string& error_str);
|
||||
|
||||
/**
|
||||
* Bootstraps the database table(s) associated to the Cluster
|
||||
* @return 0 on success
|
||||
*/
|
||||
static int bootstrap(SqlDB * db)
|
||||
{
|
||||
ostringstream oss(Cluster::db_bootstrap);
|
||||
|
||||
return db->exec(oss);
|
||||
};
|
||||
|
||||
/**
|
||||
* Writes the Cluster in the database.
|
||||
* @param db pointer to the db
|
||||
* @return 0 on success
|
||||
*/
|
||||
int insert(SqlDB *db, string& error_str)
|
||||
{
|
||||
return insert_replace(db, false, error_str);
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes/updates the Cluster's data fields in the database.
|
||||
* @param db pointer to the db
|
||||
* @return 0 on success
|
||||
*/
|
||||
int update(SqlDB *db)
|
||||
{
|
||||
string error_str;
|
||||
return insert_replace(db, true, error_str);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if all the collections are empty, and therefore this cluster
|
||||
* can be dropped.
|
||||
*
|
||||
* @param error_msg Error message, if any.
|
||||
* @return 0 if cluster can be dropped, -1 otherwise
|
||||
*/
|
||||
int check_drop(string& error_msg);
|
||||
};
|
||||
|
||||
#endif /*CLUSTER_H_*/
|
158
include/ClusterPool.h
Normal file
158
include/ClusterPool.h
Normal file
@ -0,0 +1,158 @@
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) */
|
||||
/* */
|
||||
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
|
||||
/* not use this file except in compliance with the License. You may obtain */
|
||||
/* a copy of the License at */
|
||||
/* */
|
||||
/* http://www.apache.org/licenses/LICENSE-2.0 */
|
||||
/* */
|
||||
/* Unless required by applicable law or agreed to in writing, software */
|
||||
/* distributed under the License is distributed on an "AS IS" BASIS, */
|
||||
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
|
||||
/* See the License for the specific language governing permissions and */
|
||||
/* limitations under the License. */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
#ifndef CLUSTER_POOL_H_
|
||||
#define CLUSTER_POOL_H_
|
||||
|
||||
#include "Cluster.h"
|
||||
#include "SqlDB.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
|
||||
class ClusterPool : public PoolSQL
|
||||
{
|
||||
public:
|
||||
ClusterPool(SqlDB * db);
|
||||
|
||||
~ClusterPool(){};
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
/* Constants for DB management */
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* Name for the "none" cluster
|
||||
*/
|
||||
static const string NONE_CLUSTER_NAME;
|
||||
|
||||
/**
|
||||
* Identifier for the "none" cluster
|
||||
*/
|
||||
static const int NONE_CLUSTER_ID;
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
/* Methods for DB management */
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* Allocates a new cluster, writting it in the pool database. No memory is
|
||||
* allocated for the object.
|
||||
* @param name Cluster name
|
||||
* @param oid the id assigned to the Cluster
|
||||
* @param error_str Returns the error reason, if any
|
||||
*
|
||||
* @return the oid assigned to the object, -1 in case of failure
|
||||
*/
|
||||
int allocate(string name,
|
||||
int * oid,
|
||||
string& error_str);
|
||||
|
||||
/**
|
||||
* Function to get a cluster from the pool, if the object is not in memory
|
||||
* it is loaded from the DB
|
||||
* @param oid cluster unique id
|
||||
* @param lock locks the cluster mutex
|
||||
* @return a pointer to the cluster, 0 if the cluster could not be loaded
|
||||
*/
|
||||
Cluster * get(int oid, bool lock)
|
||||
{
|
||||
return static_cast<Cluster *>(PoolSQL::get(oid,lock));
|
||||
};
|
||||
|
||||
/**
|
||||
* Gets an object from the pool (if needed the object is loaded from the
|
||||
* database).
|
||||
* @param name of the object
|
||||
* @param lock locks the object if true
|
||||
*
|
||||
* @return a pointer to the object, 0 in case of failure
|
||||
*/
|
||||
Cluster * get(const string& name, bool lock)
|
||||
{
|
||||
// The owner is set to -1, because it is not used in the key() method
|
||||
return static_cast<Cluster *>(PoolSQL::get(name,-1,lock));
|
||||
};
|
||||
|
||||
/**
|
||||
* Generate an index key for the object
|
||||
* @param name of the object
|
||||
* @param uid owner of the object, only used if needed
|
||||
*
|
||||
* @return the key, a string
|
||||
*/
|
||||
string key(const string& name, int uid)
|
||||
{
|
||||
// Name is enough key because Clusters can't repeat names.
|
||||
return name;
|
||||
};
|
||||
|
||||
/** Update a particular Cluster
|
||||
* @param user pointer to Cluster
|
||||
* @return 0 on success
|
||||
*/
|
||||
int update(Cluster * cluster)
|
||||
{
|
||||
return cluster->update(db);
|
||||
};
|
||||
|
||||
/**
|
||||
* Drops the Cluster from the data base. The object mutex SHOULD be
|
||||
* locked.
|
||||
* @param objsql a pointer to a Cluster object
|
||||
* @param error_msg Error reason, if any
|
||||
* @return 0 on success,
|
||||
* -1 DB error,
|
||||
* -2 object is a system cluster (ID < 100)
|
||||
* -3 Cluster's User IDs set is not empty
|
||||
*/
|
||||
int drop(PoolObjectSQL * objsql, string& error_msg);
|
||||
|
||||
/**
|
||||
* Bootstraps the database table(s) associated to the Cluster pool
|
||||
* @return 0 on success
|
||||
*/
|
||||
static int bootstrap(SqlDB * _db)
|
||||
{
|
||||
return Cluster::bootstrap(_db);
|
||||
};
|
||||
|
||||
/**
|
||||
* Dumps the Cluster pool in XML format. A filter can be also added to the
|
||||
* query
|
||||
* @param oss the output stream to dump the pool contents
|
||||
* @param where filter for the objects, defaults to all
|
||||
*
|
||||
* @return 0 on success
|
||||
*/
|
||||
int dump(ostringstream& oss, const string& where)
|
||||
{
|
||||
return PoolSQL::dump(oss, "CLUSTER_POOL", Cluster::table, where);
|
||||
};
|
||||
|
||||
private:
|
||||
|
||||
/**
|
||||
* Factory method to produce objects
|
||||
* @return a pointer to the new object
|
||||
*/
|
||||
PoolObjectSQL * create()
|
||||
{
|
||||
return new Cluster(-1,"");
|
||||
};
|
||||
};
|
||||
|
||||
#endif /*CLUSTER_POOL_H_*/
|
78
include/Clusterable.h
Normal file
78
include/Clusterable.h
Normal file
@ -0,0 +1,78 @@
|
||||
/* ------------------------------------------------------------------------ */
|
||||
/* Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) */
|
||||
/* */
|
||||
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
|
||||
/* not use this file except in compliance with the License. You may obtain */
|
||||
/* a copy of the License at */
|
||||
/* */
|
||||
/* http://www.apache.org/licenses/LICENSE-2.0 */
|
||||
/* */
|
||||
/* Unless required by applicable law or agreed to in writing, software */
|
||||
/* distributed under the License is distributed on an "AS IS" BASIS, */
|
||||
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
|
||||
/* See the License for the specific language governing permissions and */
|
||||
/* limitations under the License. */
|
||||
/* -------------------------------------------------------------------------*/
|
||||
|
||||
#ifndef CLUSTERABLE_H_
|
||||
#define CLUSTERABLE_H_
|
||||
|
||||
using namespace std;
|
||||
|
||||
class Clusterable
|
||||
{
|
||||
public:
|
||||
|
||||
/**
|
||||
* Changes the cluster this object belongs to
|
||||
*
|
||||
* @param _cluster_id Id of the new cluster
|
||||
* @param _cluster Name of the new cluster
|
||||
*/
|
||||
void set_cluster(int _cluster_id, const string& _cluster)
|
||||
{
|
||||
cluster_id = _cluster_id;
|
||||
cluster = _cluster;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns the cluster ID
|
||||
*
|
||||
* @return The cluster ID
|
||||
*/
|
||||
int get_cluster_id() const
|
||||
{
|
||||
return cluster_id;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns the cluster name
|
||||
*
|
||||
* @return The cluster name
|
||||
*/
|
||||
const string& get_cluster_name() const
|
||||
{
|
||||
return cluster;
|
||||
};
|
||||
|
||||
|
||||
protected:
|
||||
|
||||
Clusterable(int _cluster_id, const string& _cluster):
|
||||
cluster_id(_cluster_id),
|
||||
cluster(_cluster){};
|
||||
|
||||
~Clusterable(){};
|
||||
|
||||
/**
|
||||
* ID of the cluster this object belongs to.
|
||||
*/
|
||||
int cluster_id;
|
||||
|
||||
/**
|
||||
* Name of the cluster this object belongs to.
|
||||
*/
|
||||
string cluster;
|
||||
};
|
||||
|
||||
#endif /*CLUSTERABLE_H_*/
|
200
include/Datastore.h
Normal file
200
include/Datastore.h
Normal file
@ -0,0 +1,200 @@
|
||||
/* ------------------------------------------------------------------------ */
|
||||
/* Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) */
|
||||
/* */
|
||||
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
|
||||
/* not use this file except in compliance with the License. You may obtain */
|
||||
/* a copy of the License at */
|
||||
/* */
|
||||
/* http://www.apache.org/licenses/LICENSE-2.0 */
|
||||
/* */
|
||||
/* Unless required by applicable law or agreed to in writing, software */
|
||||
/* distributed under the License is distributed on an "AS IS" BASIS, */
|
||||
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
|
||||
/* See the License for the specific language governing permissions and */
|
||||
/* limitations under the License. */
|
||||
/* -------------------------------------------------------------------------*/
|
||||
|
||||
#ifndef DATASTORE_H_
|
||||
#define DATASTORE_H_
|
||||
|
||||
#include "PoolSQL.h"
|
||||
#include "ObjectCollection.h"
|
||||
#include "DatastoreTemplate.h"
|
||||
#include "Clusterable.h"
|
||||
|
||||
/**
|
||||
* The Datastore class.
|
||||
*/
|
||||
class Datastore : public PoolObjectSQL, ObjectCollection, public Clusterable
|
||||
{
|
||||
public:
|
||||
|
||||
/**
|
||||
* Function to print the Datastore object into a string in XML format
|
||||
* @param xml the resulting XML string
|
||||
* @return a reference to the generated string
|
||||
*/
|
||||
string& to_xml(string& xml) const;
|
||||
|
||||
/**
|
||||
* Rebuilds the object from an xml formatted string
|
||||
* @param xml_str The xml-formatted string
|
||||
*
|
||||
* @return 0 on success, -1 otherwise
|
||||
*/
|
||||
int from_xml(const string &xml_str);
|
||||
|
||||
/**
|
||||
* Adds this image's ID to the set.
|
||||
* @param id of the image to be added to the Datastore
|
||||
* @return 0 on success
|
||||
*/
|
||||
int add_image(int id)
|
||||
{
|
||||
return add_collection_id(id);
|
||||
};
|
||||
|
||||
/**
|
||||
* Deletes this image's ID from the set.
|
||||
* @param id of the image to be deleted from the Datastore
|
||||
* @return 0 on success
|
||||
*/
|
||||
int del_image(int id)
|
||||
{
|
||||
return del_collection_id(id);
|
||||
};
|
||||
|
||||
/**
|
||||
* Retrieves TM mad name
|
||||
* @return string tm mad name
|
||||
*/
|
||||
const string& get_tm_mad() const
|
||||
{
|
||||
return tm_mad;
|
||||
};
|
||||
|
||||
/**
|
||||
* Retrieves the base path
|
||||
* @return base path string
|
||||
*/
|
||||
const string& get_base_path() const
|
||||
{
|
||||
return base_path;
|
||||
};
|
||||
|
||||
/**
|
||||
* Modifies the given VM disk attribute adding the relevant datastore
|
||||
* attributes
|
||||
*
|
||||
* @param disk
|
||||
* @return 0 on success
|
||||
*/
|
||||
int disk_attribute(VectorAttribute * disk);
|
||||
|
||||
/**
|
||||
* Replace template for this object. Object should be updated
|
||||
* after calling this method
|
||||
* @param tmpl string representation of the template
|
||||
*/
|
||||
int replace_template(const string& tmpl_str, string& error);
|
||||
|
||||
private:
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Friends
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
friend class DatastorePool;
|
||||
|
||||
// *************************************************************************
|
||||
// Datastore Private Attributes
|
||||
// *************************************************************************
|
||||
|
||||
/**
|
||||
* Name of the datastore driver used to register new images
|
||||
*/
|
||||
string ds_mad;
|
||||
|
||||
/**
|
||||
* Name of the TM driver used to transfer file to and from the hosts
|
||||
*/
|
||||
string tm_mad;
|
||||
|
||||
/**
|
||||
* Base path for the storage
|
||||
*/
|
||||
string base_path;
|
||||
|
||||
// *************************************************************************
|
||||
// Constructor
|
||||
// *************************************************************************
|
||||
|
||||
Datastore(
|
||||
int uid,
|
||||
int gid,
|
||||
const string& uname,
|
||||
const string& gname,
|
||||
DatastoreTemplate* ds_template,
|
||||
int cluster_id,
|
||||
const string& cluster_name);
|
||||
|
||||
virtual ~Datastore(){};
|
||||
|
||||
// *************************************************************************
|
||||
// DataBase implementation (Private)
|
||||
// *************************************************************************
|
||||
|
||||
static const char * db_names;
|
||||
|
||||
static const char * db_bootstrap;
|
||||
|
||||
static const char * table;
|
||||
|
||||
/**
|
||||
* Execute an INSERT or REPLACE Sql query.
|
||||
* @param db The SQL DB
|
||||
* @param replace Execute an INSERT or a REPLACE
|
||||
* @param error_str Returns the error reason, if any
|
||||
* @return 0 one success
|
||||
*/
|
||||
int insert_replace(SqlDB *db, bool replace, string& error_str);
|
||||
|
||||
/**
|
||||
* Bootstraps the database table(s) associated to the Datastore
|
||||
* @return 0 on success
|
||||
*/
|
||||
static int bootstrap(SqlDB * db)
|
||||
{
|
||||
ostringstream oss(Datastore::db_bootstrap);
|
||||
|
||||
return db->exec(oss);
|
||||
};
|
||||
|
||||
/**
|
||||
* Writes the Datastore in the database.
|
||||
* @param db pointer to the db
|
||||
* @return 0 on success
|
||||
*/
|
||||
int insert(SqlDB *db, string& error_str);
|
||||
|
||||
/**
|
||||
* Writes/updates the Datastore's data fields in the database.
|
||||
* @param db pointer to the db
|
||||
* @return 0 on success
|
||||
*/
|
||||
int update(SqlDB *db)
|
||||
{
|
||||
string error_str;
|
||||
return insert_replace(db, true, error_str);
|
||||
}
|
||||
|
||||
/**
|
||||
* Factory method for virtual network templates
|
||||
*/
|
||||
Template * get_new_template() const
|
||||
{
|
||||
return new DatastoreTemplate;
|
||||
}
|
||||
};
|
||||
|
||||
#endif /*DATASTORE_H_*/
|
179
include/DatastorePool.h
Normal file
179
include/DatastorePool.h
Normal file
@ -0,0 +1,179 @@
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) */
|
||||
/* */
|
||||
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
|
||||
/* not use this file except in compliance with the License. You may obtain */
|
||||
/* a copy of the License at */
|
||||
/* */
|
||||
/* http://www.apache.org/licenses/LICENSE-2.0 */
|
||||
/* */
|
||||
/* Unless required by applicable law or agreed to in writing, software */
|
||||
/* distributed under the License is distributed on an "AS IS" BASIS, */
|
||||
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
|
||||
/* See the License for the specific language governing permissions and */
|
||||
/* limitations under the License. */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
#ifndef DATASTORE_POOL_H_
|
||||
#define DATASTORE_POOL_H_
|
||||
|
||||
#include "Datastore.h"
|
||||
#include "SqlDB.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
|
||||
class DatastorePool : public PoolSQL
|
||||
{
|
||||
public:
|
||||
DatastorePool(SqlDB * db);
|
||||
|
||||
~DatastorePool(){};
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
/* Constants for DB management */
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* Name for the system datastore
|
||||
*/
|
||||
static const string SYSTEM_DS_NAME;
|
||||
|
||||
/**
|
||||
* Identifier for the system datastore
|
||||
*/
|
||||
static const int SYSTEM_DS_ID;
|
||||
|
||||
/**
|
||||
* Name for the default datastore
|
||||
*/
|
||||
static const string DEFAULT_DS_NAME;
|
||||
|
||||
/**
|
||||
* Identifier for the default datastore
|
||||
*/
|
||||
static const int DEFAULT_DS_ID;
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
/* Methods for DB management */
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* Allocates a new Datastore, writing it in the pool database. No memory is
|
||||
* allocated for the object.
|
||||
* @param uid the user id of the Datastore owner
|
||||
* @param gid the id of the group this object is assigned to
|
||||
* @param uname name of the user
|
||||
* @param gname name of the group
|
||||
* @param ds_template Datastore definition template
|
||||
* @param oid the id assigned to the Datastore
|
||||
* @param cluster_id the id of the cluster this Datastore will belong to
|
||||
* @param cluster_name the name of the cluster this Datastore will belong to
|
||||
* @param error_str Returns the error reason, if any
|
||||
*
|
||||
* @return the oid assigned to the object, -1 in case of failure
|
||||
*/
|
||||
int allocate(
|
||||
int uid,
|
||||
int gid,
|
||||
const string& uname,
|
||||
const string& gname,
|
||||
DatastoreTemplate * ds_template,
|
||||
int * oid,
|
||||
int cluster_id,
|
||||
const string& cluster_name,
|
||||
string& error_str);
|
||||
|
||||
/**
|
||||
* Function to get a Datastore from the pool, if the object is not in memory
|
||||
* it is loaded from the DB
|
||||
* @param oid Datastore unique id
|
||||
* @param lock locks the Datastore mutex
|
||||
* @return a pointer to the Datastore, 0 if the Datastore could not be loaded
|
||||
*/
|
||||
Datastore * get(int oid, bool lock)
|
||||
{
|
||||
return static_cast<Datastore *>(PoolSQL::get(oid,lock));
|
||||
};
|
||||
|
||||
/**
|
||||
* Gets an object from the pool (if needed the object is loaded from the
|
||||
* database).
|
||||
* @param name of the object
|
||||
* @param lock locks the object if true
|
||||
*
|
||||
* @return a pointer to the object, 0 in case of failure
|
||||
*/
|
||||
Datastore * get(const string& name, bool lock)
|
||||
{
|
||||
// The owner is set to -1, because it is not used in the key() method
|
||||
return static_cast<Datastore *>(PoolSQL::get(name,-1,lock));
|
||||
};
|
||||
|
||||
/**
|
||||
* Generate an index key for the object
|
||||
* @param name of the object
|
||||
* @param uid owner of the object, only used if needed
|
||||
*
|
||||
* @return the key, a string
|
||||
*/
|
||||
string key(const string& name, int uid)
|
||||
{
|
||||
// Name is enough key because Datastores can't repeat names.
|
||||
return name;
|
||||
};
|
||||
|
||||
/** Update a particular Datastore
|
||||
* @param user pointer to Datastore
|
||||
* @return 0 on success
|
||||
*/
|
||||
int update(Datastore * datastore)
|
||||
{
|
||||
return datastore->update(db);
|
||||
};
|
||||
|
||||
/**
|
||||
* Drops the Datastore data in the data base. The object mutex SHOULD be
|
||||
* locked.
|
||||
* @param objsql a pointer to the Datastore object
|
||||
* @param error_msg Error reason, if any
|
||||
* @return 0 on success, -1 DB error
|
||||
* -3 Datastore's Image IDs set is not empty
|
||||
*/
|
||||
int drop(PoolObjectSQL * objsql, string& error_msg);
|
||||
|
||||
/**
|
||||
* Bootstraps the database table(s) associated to the Datastore pool
|
||||
* @return 0 on success
|
||||
*/
|
||||
static int bootstrap(SqlDB * _db)
|
||||
{
|
||||
return Datastore::bootstrap(_db);
|
||||
};
|
||||
|
||||
/**
|
||||
* Dumps the Datastore pool in XML format. A filter can be also added to the
|
||||
* query
|
||||
* @param oss the output stream to dump the pool contents
|
||||
* @param where filter for the objects, defaults to all
|
||||
*
|
||||
* @return 0 on success
|
||||
*/
|
||||
int dump(ostringstream& oss, const string& where)
|
||||
{
|
||||
return PoolSQL::dump(oss, "DATASTORE_POOL", Datastore::table, where);
|
||||
};
|
||||
|
||||
private:
|
||||
|
||||
/**
|
||||
* Factory method to produce objects
|
||||
* @return a pointer to the new object
|
||||
*/
|
||||
PoolObjectSQL * create()
|
||||
{
|
||||
return new Datastore(-1,-1,"","", 0, -1, "");
|
||||
};
|
||||
};
|
||||
|
||||
#endif /*DATASTORE_POOL_H_*/
|
39
include/DatastoreTemplate.h
Normal file
39
include/DatastoreTemplate.h
Normal file
@ -0,0 +1,39 @@
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) */
|
||||
/* */
|
||||
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
|
||||
/* not use this file except in compliance with the License. You may obtain */
|
||||
/* a copy of the License at */
|
||||
/* */
|
||||
/* http://www.apache.org/licenses/LICENSE-2.0 */
|
||||
/* */
|
||||
/* Unless required by applicable law or agreed to in writing, software */
|
||||
/* distributed under the License is distributed on an "AS IS" BASIS, */
|
||||
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
|
||||
/* See the License for the specific language governing permissions and */
|
||||
/* limitations under the License. */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
#ifndef DATASTORE_TEMPLATE_H_
|
||||
#define DATASTORE_TEMPLATE_H_
|
||||
|
||||
#include "Template.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
/**
|
||||
* Datastore Template class
|
||||
*/
|
||||
class DatastoreTemplate : public Template
|
||||
{
|
||||
public:
|
||||
DatastoreTemplate():
|
||||
Template(false,'=',"TEMPLATE"){};
|
||||
|
||||
~DatastoreTemplate(){};
|
||||
};
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
#endif /*DATASTORE_TEMPLATE_H_*/
|
@ -31,7 +31,7 @@ public:
|
||||
~GroupPool(){};
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
/* Constants r DB management */
|
||||
/* Constants for DB management */
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
|
@ -44,10 +44,8 @@ public:
|
||||
int seq,
|
||||
int hid,
|
||||
const string& hostname,
|
||||
const string& vm_dir,
|
||||
const string& vmm,
|
||||
const string& vnm,
|
||||
const string& tm);
|
||||
const string& vnm);
|
||||
|
||||
~History(){};
|
||||
|
||||
@ -87,13 +85,10 @@ private:
|
||||
int seq;
|
||||
|
||||
string hostname;
|
||||
string vm_dir;
|
||||
|
||||
int hid;
|
||||
|
||||
string vmm_mad_name;
|
||||
string vnm_mad_name;
|
||||
string tm_mad_name;
|
||||
|
||||
time_t stime;
|
||||
time_t etime;
|
||||
@ -109,13 +104,15 @@ private:
|
||||
|
||||
MigrationReason reason;
|
||||
|
||||
//Non-persistent history fields
|
||||
string vm_lhome;
|
||||
// -------------------------------------------------------------------------
|
||||
// Non-persistent history fields
|
||||
// -------------------------------------------------------------------------
|
||||
// Local paths
|
||||
string transfer_file;
|
||||
string deployment_file;
|
||||
string context_file;
|
||||
|
||||
string vm_rhome;
|
||||
// Remote paths
|
||||
string checkpoint_file;
|
||||
string rdeployment_file;
|
||||
|
||||
|
@ -20,13 +20,14 @@
|
||||
#include "PoolSQL.h"
|
||||
#include "HostTemplate.h"
|
||||
#include "HostShare.h"
|
||||
#include "Clusterable.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
/**
|
||||
* The Host class.
|
||||
*/
|
||||
class Host : public PoolObjectSQL
|
||||
class Host : public PoolObjectSQL, public Clusterable
|
||||
{
|
||||
public:
|
||||
|
||||
@ -139,15 +140,6 @@ public:
|
||||
return vnm_mad_name;
|
||||
};
|
||||
|
||||
/**
|
||||
* Retrives TM mad name
|
||||
* @return string tm mad name
|
||||
*/
|
||||
const string& get_tm_mad() const
|
||||
{
|
||||
return tm_mad_name;
|
||||
};
|
||||
|
||||
/**
|
||||
* Retrives IM mad name
|
||||
* @return string im mad name
|
||||
@ -289,7 +281,7 @@ public:
|
||||
/**
|
||||
* Factory method for host templates
|
||||
*/
|
||||
Template * get_new_template()
|
||||
Template * get_new_template() const
|
||||
{
|
||||
return new HostTemplate;
|
||||
}
|
||||
@ -325,11 +317,6 @@ private:
|
||||
*/
|
||||
string vnm_mad_name;
|
||||
|
||||
/**
|
||||
* Name of the TM driver used to transfer file to and from this host
|
||||
*/
|
||||
string tm_mad_name;
|
||||
|
||||
/**
|
||||
* If Host State= MONITORED last time it got fully monitored or 1 Jan 1970
|
||||
* Host State = MONITORING last time it got a signal to be monitored
|
||||
@ -348,12 +335,13 @@ private:
|
||||
// Constructor
|
||||
// *************************************************************************
|
||||
|
||||
Host(int id=-1,
|
||||
const string& hostname="",
|
||||
const string& im_mad_name="",
|
||||
const string& vmm_mad_name="",
|
||||
const string& vnm_mad_name="",
|
||||
const string& tm_mad_name="");
|
||||
Host(int id,
|
||||
const string& hostname,
|
||||
const string& im_mad_name,
|
||||
const string& vmm_mad_name,
|
||||
const string& vnm_mad_name,
|
||||
int cluster_id,
|
||||
const string& cluster_name);
|
||||
|
||||
virtual ~Host();
|
||||
|
||||
|
@ -53,7 +53,8 @@ public:
|
||||
const string& im_mad_name,
|
||||
const string& vmm_mad_name,
|
||||
const string& vnm_mad_name,
|
||||
const string& tm_mad_name,
|
||||
int cluster_id,
|
||||
const string& cluster_name,
|
||||
string& error_str);
|
||||
|
||||
/**
|
||||
@ -203,7 +204,7 @@ private:
|
||||
*/
|
||||
PoolObjectSQL * create()
|
||||
{
|
||||
return new Host;
|
||||
return new Host(-1,"","","","",-1,"");
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -39,6 +39,22 @@ public:
|
||||
DATABLOCK = 2 /** < User persistent data device */
|
||||
};
|
||||
|
||||
/**
|
||||
* Return the string representation of an ImageType
|
||||
* @param ob the type
|
||||
* @return the string
|
||||
*/
|
||||
static string type_to_str(ImageType ob)
|
||||
{
|
||||
switch (ob)
|
||||
{
|
||||
case OS: return "OS" ; break;
|
||||
case CDROM: return "CDROM" ; break;
|
||||
case DATABLOCK: return "DATABLOCK" ; break;
|
||||
default: return "";
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Image State
|
||||
*/
|
||||
@ -199,6 +215,17 @@ public:
|
||||
return (group_u == 1 || other_u == 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if the image is used for saving_as a current one
|
||||
* @return true if the image will be used to save an existing image.
|
||||
*/
|
||||
bool isSaving()
|
||||
{
|
||||
ImageTemplate * it = static_cast<ImageTemplate *>(obj_template);
|
||||
|
||||
return it->is_saving();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set permissions for the Image. Extends the PoolSQLObject method
|
||||
* by checking the persistent state of the image.
|
||||
@ -291,11 +318,27 @@ public:
|
||||
/**
|
||||
* Factory method for image templates
|
||||
*/
|
||||
Template * get_new_template()
|
||||
Template * get_new_template() const
|
||||
{
|
||||
return new ImageTemplate;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the Datastore ID
|
||||
*/
|
||||
int get_ds_id() const
|
||||
{
|
||||
return ds_id;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns the Datastore ID
|
||||
*/
|
||||
const string& get_ds_name() const
|
||||
{
|
||||
return ds_name;
|
||||
};
|
||||
|
||||
private:
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
@ -353,6 +396,16 @@ private:
|
||||
*/
|
||||
int running_vms;
|
||||
|
||||
/**
|
||||
* Datastore ID
|
||||
*/
|
||||
int ds_id;
|
||||
|
||||
/**
|
||||
* Datastore name
|
||||
*/
|
||||
string ds_name;
|
||||
|
||||
// *************************************************************************
|
||||
// DataBase implementation (Private)
|
||||
// *************************************************************************
|
||||
|
@ -82,60 +82,26 @@ public:
|
||||
/**
|
||||
* Try to acquire an image from the repository for a VM.
|
||||
* @param image_id id of image
|
||||
* @param error string describing the error
|
||||
* @return pointer to the image or 0 if could not be acquired
|
||||
*/
|
||||
Image * acquire_image(int image_id);
|
||||
Image * acquire_image(int image_id, string& error);
|
||||
|
||||
/**
|
||||
* Try to acquire an image from the repository for a VM.
|
||||
* @param name of the image
|
||||
* @param id of owner
|
||||
* @param error string describing the error
|
||||
* @return pointer to the image or 0 if could not be acquired
|
||||
*/
|
||||
Image * acquire_image(const string& name, int uid);
|
||||
Image * acquire_image(const string& name, int uid, string& error);
|
||||
|
||||
/**
|
||||
* Releases an image and triggers any needed operations in the repo
|
||||
* @param iid image id of the image to be released
|
||||
* @param disk_path base path for disk location
|
||||
* @param disk number for this image in the VM
|
||||
* @param saveid id of image to save the current image
|
||||
* @param failed the associated VM releasing the images is FAILED
|
||||
*/
|
||||
void release_image(const string& iid,
|
||||
const string& disk_path,
|
||||
int disk_num,
|
||||
const string& saveid)
|
||||
{
|
||||
int image_id;
|
||||
istringstream iss;
|
||||
|
||||
iss.str(iid);
|
||||
iss >> image_id;
|
||||
|
||||
release_image(image_id, disk_path, disk_num, saveid);
|
||||
};
|
||||
|
||||
/**
|
||||
* Releases an image and triggers any needed operations in the repo
|
||||
* @param iid image id of the image to be released
|
||||
* @param disk_path base path for disk location
|
||||
* @param disk number for this image in the VM
|
||||
* @param saveid id of image to save the current image
|
||||
*/
|
||||
void release_image(int iid,
|
||||
const string& disk_path,
|
||||
int disk_num,
|
||||
const string& saveid);
|
||||
|
||||
/**
|
||||
* Moves a VM disk to the Image Repository
|
||||
* @param disk_path base path for disk location
|
||||
* @param disk number for this image in the VM
|
||||
* @param saveid id of image to save the current image
|
||||
*/
|
||||
void disk_to_image(const string& disk_path,
|
||||
int disk_num,
|
||||
const string& save_id);
|
||||
void release_image(int iid, bool failed);
|
||||
|
||||
/**
|
||||
* Enables the image
|
||||
@ -146,17 +112,18 @@ public:
|
||||
|
||||
/**
|
||||
* Adds a new image to the repository copying or creating it as needed
|
||||
* @param iid id of image
|
||||
* @param img pointer to the image
|
||||
* @param ds_data data of the associated datastore in XML format
|
||||
* @return 0 on success
|
||||
*/
|
||||
int register_image(int iid);
|
||||
int register_image(int iid, const string& ds_data);
|
||||
|
||||
/**
|
||||
* Deletes an image from the repository and the DB
|
||||
* @param iid id of image
|
||||
* @return 0 on success
|
||||
*/
|
||||
int delete_image(int iid);
|
||||
int delete_image(int iid, const string& ds_data);
|
||||
|
||||
private:
|
||||
/**
|
||||
@ -202,16 +169,14 @@ private:
|
||||
* @param action the name of the action
|
||||
* @param arg arguments for the action function
|
||||
*/
|
||||
void do_action(
|
||||
const string & action,
|
||||
void * arg);
|
||||
void do_action(const string& action, void * arg);
|
||||
|
||||
/**
|
||||
* Acquires an image updating its state.
|
||||
* @param image pointer to image, it should be locked
|
||||
* @return 0 on success
|
||||
*/
|
||||
int acquire_image(Image *img);
|
||||
int acquire_image(Image *img, string& error);
|
||||
|
||||
/**
|
||||
* Moves a file to an image in the repository
|
||||
@ -219,6 +184,15 @@ private:
|
||||
* @param source path of the disk file
|
||||
*/
|
||||
void move_image(Image *img, const string& source);
|
||||
|
||||
/**
|
||||
* Formats an XML message for the MAD
|
||||
*
|
||||
* @param img_data Image XML representation
|
||||
* @param ds_data Datastore XML representation
|
||||
* @return the XML message
|
||||
*/
|
||||
string * format_message(const string& img_data, const string& ds_data);
|
||||
};
|
||||
|
||||
#endif /*IMAGE_MANAGER_H*/
|
||||
|
@ -70,7 +70,12 @@ private:
|
||||
*/
|
||||
//Template driver_conf;
|
||||
|
||||
void cp(int oid, const string& source) const;
|
||||
/**
|
||||
* Sends a copy request to the MAD
|
||||
* @param oid the image id.
|
||||
* @param drv_msg xml data for the mad operation.
|
||||
*/
|
||||
void cp(int oid, const string& drv_msg) const;
|
||||
|
||||
/**
|
||||
* Sends a move request to the MAD: "MV IMAGE_ID SRC_PATH DST_PATH"
|
||||
@ -84,18 +89,15 @@ private:
|
||||
/**
|
||||
* Sends a make filesystem request to the MAD: "MKFS IMAGE_ID PATH SIZE_MB"
|
||||
* @param oid the image id.
|
||||
* @param fs type
|
||||
* @param size_mb of the image to be created
|
||||
* @param drv_msg xml data for the mad operation.
|
||||
*/
|
||||
void mkfs(int oid,
|
||||
const string& fs,
|
||||
int size_mb) const;
|
||||
void mkfs(int oid, const string& drv_msg) const;
|
||||
/**
|
||||
* Sends a delete request to the MAD: "DELETE IMAGE_ID PATH"
|
||||
* @param oid the image id.
|
||||
* @param destination is the path to the image to be removed
|
||||
* @param drv_msg xml data for the mad operation.
|
||||
*/
|
||||
void rm(int oid, const string& destination) const;
|
||||
void rm(int oid, const string& drv_msg) const;
|
||||
};
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
@ -52,6 +52,9 @@ public:
|
||||
* @param uname name of the user
|
||||
* @param gname name of the group
|
||||
* @param img_template template associated with the image
|
||||
* @param ds_id the id of the datastore
|
||||
* @param ds_name the name of the datastore
|
||||
* @param ds_data the datastore data
|
||||
* @param oid the id assigned to the Image
|
||||
* @param error_str Returns the error reason, if any
|
||||
* @return the oid assigned to the object,
|
||||
@ -64,6 +67,9 @@ public:
|
||||
const string& uname,
|
||||
const string& gname,
|
||||
ImageTemplate * img_template,
|
||||
int ds_id,
|
||||
const string& ds_name,
|
||||
const string& ds_data,
|
||||
int * oid,
|
||||
string& error_str);
|
||||
|
||||
@ -133,6 +139,7 @@ public:
|
||||
* @param img_type will be set to the used image's type
|
||||
* @param uid of VM owner (to look for the image id within its images)
|
||||
* @param image_id on success returns the acquired image id
|
||||
* @param error_str string describing the error
|
||||
* @return 0 on success,
|
||||
* -1 error,
|
||||
* -2 not using the pool,
|
||||
@ -142,7 +149,8 @@ public:
|
||||
int * index,
|
||||
Image::ImageType * img_type,
|
||||
int uid,
|
||||
int& image_id);
|
||||
int& image_id,
|
||||
string& error_str);
|
||||
/**
|
||||
* Generates an Authorization token for the DISK attribute
|
||||
* @param disk the disk to be authorized
|
||||
|
@ -41,11 +41,36 @@ public:
|
||||
return Template::check(rs_attr, restricted_attributes);
|
||||
};
|
||||
|
||||
bool is_saving()
|
||||
{
|
||||
string saving;
|
||||
|
||||
get(saving_attribute, saving);
|
||||
|
||||
return (saving.empty() == false);
|
||||
}
|
||||
|
||||
void set_saving()
|
||||
{
|
||||
SingleAttribute * attr= new SingleAttribute(saving_attribute, "YES");
|
||||
|
||||
erase(saving_attribute);
|
||||
|
||||
set(attr);
|
||||
}
|
||||
|
||||
void unset_saving()
|
||||
{
|
||||
erase(saving_attribute);
|
||||
}
|
||||
|
||||
private:
|
||||
friend class ImagePool;
|
||||
|
||||
static vector<string> restricted_attributes;
|
||||
|
||||
static string saving_attribute;
|
||||
|
||||
/**
|
||||
* Stores the attributes as restricted, these attributes will be used in
|
||||
* ImageTemplate::check
|
||||
|
@ -27,6 +27,8 @@
|
||||
#include "UserPool.h"
|
||||
#include "VMTemplatePool.h"
|
||||
#include "GroupPool.h"
|
||||
#include "DatastorePool.h"
|
||||
#include "ClusterPool.h"
|
||||
|
||||
#include "VirtualMachineManager.h"
|
||||
#include "LifeCycleManager.h"
|
||||
@ -91,6 +93,16 @@ public:
|
||||
return tpool;
|
||||
};
|
||||
|
||||
DatastorePool * get_dspool()
|
||||
{
|
||||
return dspool;
|
||||
};
|
||||
|
||||
ClusterPool * get_clpool()
|
||||
{
|
||||
return clpool;
|
||||
};
|
||||
|
||||
// --------------------------------------------------------------
|
||||
// Manager Accessors
|
||||
// --------------------------------------------------------------
|
||||
@ -187,9 +199,8 @@ public:
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns the path where the OpenNebula DB and the VM local directories
|
||||
* are stored. When ONE_LOCATION is defined this path points to
|
||||
* $ONE_LOCATION/var, otherwise it is /var/lib/one.
|
||||
* Returns the default var location. When ONE_LOCATION is defined this path
|
||||
* points to $ONE_LOCATION/var, otherwise it is /var/lib/one.
|
||||
* @return the log location.
|
||||
*/
|
||||
const string& get_var_location()
|
||||
@ -197,6 +208,40 @@ public:
|
||||
return var_location;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns the default var location. When ONE_LOCATION is defined this path
|
||||
* points to $ONE_LOCATION/var, otherwise it is /var/lib/one.
|
||||
* @return the log location.
|
||||
*/
|
||||
const string& get_ds_location()
|
||||
{
|
||||
return ds_location;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns the Transfer Manager for the system datastore
|
||||
* @return the tm name.
|
||||
*/
|
||||
string get_system_ds_tm_mad()
|
||||
{
|
||||
Datastore * ds;
|
||||
string tm_mad = "";
|
||||
|
||||
ds = dspool->get(DatastorePool::SYSTEM_DS_ID, true);
|
||||
|
||||
if ( ds == 0 )
|
||||
{
|
||||
NebulaLog::log("DaS", Log::ERROR, "Can not get system datastore");
|
||||
return tm_mad;
|
||||
}
|
||||
|
||||
tm_mad = ds->get_tm_mad();
|
||||
|
||||
ds->unlock();
|
||||
|
||||
return tm_mad;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns the path of the log file for a VM, depending where OpenNebula is
|
||||
* installed,
|
||||
@ -254,8 +299,8 @@ private:
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
Nebula():nebula_configuration(0),db(0),vmpool(0),hpool(0),vnpool(0),
|
||||
upool(0),ipool(0),gpool(0),tpool(0),lcm(0),vmm(0),im(0),tm(0),
|
||||
dm(0),rm(0),hm(0),authm(0),aclm(0),imagem(0)
|
||||
upool(0),ipool(0),gpool(0),tpool(0),dspool(0),clpool(0),
|
||||
lcm(0),vmm(0),im(0),tm(0),dm(0),rm(0),hm(0),authm(0),aclm(0),imagem(0)
|
||||
{
|
||||
const char * nl = getenv("ONE_LOCATION");
|
||||
|
||||
@ -268,6 +313,7 @@ private:
|
||||
log_location = "/var/log/one/";
|
||||
var_location = "/var/lib/one/";
|
||||
remotes_location = "/var/lib/one/remotes/";
|
||||
ds_location = "/var/lib/one/datastores/";
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -283,6 +329,7 @@ private:
|
||||
log_location = nebula_location + "var/";
|
||||
var_location = nebula_location + "var/";
|
||||
remotes_location = nebula_location + "var/remotes/";
|
||||
ds_location = nebula_location + "var/datastores/";
|
||||
}
|
||||
};
|
||||
|
||||
@ -323,6 +370,16 @@ private:
|
||||
delete tpool;
|
||||
}
|
||||
|
||||
if ( dspool != 0)
|
||||
{
|
||||
delete dspool;
|
||||
}
|
||||
|
||||
if ( clpool != 0)
|
||||
{
|
||||
delete clpool;
|
||||
}
|
||||
|
||||
if ( vmm != 0)
|
||||
{
|
||||
delete vmm;
|
||||
@ -400,6 +457,7 @@ private:
|
||||
string var_location;
|
||||
string hook_location;
|
||||
string remotes_location;
|
||||
string ds_location;
|
||||
|
||||
string hostname;
|
||||
|
||||
@ -421,6 +479,8 @@ private:
|
||||
ImagePool * ipool;
|
||||
GroupPool * gpool;
|
||||
VMTemplatePool * tpool;
|
||||
DatastorePool * dspool;
|
||||
ClusterPool * clpool;
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Nebula Managers
|
||||
|
@ -28,7 +28,7 @@ using namespace std;
|
||||
*/
|
||||
class ObjectCollection
|
||||
{
|
||||
protected:
|
||||
public:
|
||||
|
||||
ObjectCollection(const string& _collection_name)
|
||||
:collection_name(_collection_name){};
|
||||
|
@ -49,29 +49,33 @@ public:
|
||||
*/
|
||||
enum ObjectType
|
||||
{
|
||||
VM = 0x0000001000000000LL,
|
||||
HOST = 0x0000002000000000LL,
|
||||
NET = 0x0000004000000000LL,
|
||||
IMAGE = 0x0000008000000000LL,
|
||||
USER = 0x0000010000000000LL,
|
||||
TEMPLATE = 0x0000020000000000LL,
|
||||
GROUP = 0x0000040000000000LL,
|
||||
ACL = 0x0000080000000000LL
|
||||
VM = 0x0000001000000000LL,
|
||||
HOST = 0x0000002000000000LL,
|
||||
NET = 0x0000004000000000LL,
|
||||
IMAGE = 0x0000008000000000LL,
|
||||
USER = 0x0000010000000000LL,
|
||||
TEMPLATE = 0x0000020000000000LL,
|
||||
GROUP = 0x0000040000000000LL,
|
||||
ACL = 0x0000080000000000LL,
|
||||
DATASTORE = 0x0000100000000000LL,
|
||||
CLUSTER = 0x0000200000000000LL
|
||||
};
|
||||
|
||||
static string type_to_str(ObjectType ob)
|
||||
{
|
||||
switch (ob)
|
||||
{
|
||||
case VM: return "VM" ; break;
|
||||
case HOST: return "HOST" ; break;
|
||||
case NET: return "NET" ; break;
|
||||
case IMAGE: return "IMAGE" ; break;
|
||||
case USER: return "USER" ; break;
|
||||
case TEMPLATE: return "TEMPLATE" ; break;
|
||||
case GROUP: return "GROUP" ; break;
|
||||
case ACL: return "ACL" ; break;
|
||||
default: return "";
|
||||
case VM: return "VM" ; break;
|
||||
case HOST: return "HOST" ; break;
|
||||
case NET: return "NET" ; break;
|
||||
case IMAGE: return "IMAGE" ; break;
|
||||
case USER: return "USER" ; break;
|
||||
case TEMPLATE: return "TEMPLATE" ; break;
|
||||
case GROUP: return "GROUP" ; break;
|
||||
case ACL: return "ACL" ; break;
|
||||
case DATASTORE: return "DATASTORE" ; break;
|
||||
case CLUSTER: return "CLUSTER" ; break;
|
||||
default: return "";
|
||||
}
|
||||
};
|
||||
|
||||
@ -359,7 +363,7 @@ public:
|
||||
* by classes that uses templates
|
||||
* @return a new template
|
||||
*/
|
||||
virtual Template * get_new_template()
|
||||
virtual Template * get_new_template() const
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@ -369,7 +373,7 @@ public:
|
||||
* after calling this method
|
||||
* @param tmpl string representation of the template
|
||||
*/
|
||||
int replace_template(const string& tmpl_str, string& error);
|
||||
virtual int replace_template(const string& tmpl_str, string& error);
|
||||
|
||||
|
||||
/**
|
||||
|
@ -219,6 +219,26 @@ protected:
|
||||
* @return string for logging
|
||||
*/
|
||||
string allocate_error (PoolObjectSQL::ObjectType obj, const string& error);
|
||||
|
||||
/**
|
||||
* Locks the requested object, gets information, and unlocks it
|
||||
*
|
||||
* @param pool object pool
|
||||
* @param id of the object
|
||||
* @param type of the object
|
||||
* @param att the specific request attributes
|
||||
*
|
||||
* @param perms returns the object's permissions
|
||||
* @param name returns the object's name
|
||||
*
|
||||
* @return 0 on success, -1 otherwise
|
||||
*/
|
||||
int get_info (PoolSQL * pool,
|
||||
int id,
|
||||
PoolObjectSQL::ObjectType type,
|
||||
RequestAttributes& att,
|
||||
PoolObjectAuth& perms,
|
||||
string& name);
|
||||
};
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include "VirtualNetworkTemplate.h"
|
||||
#include "ImageTemplate.h"
|
||||
#include "VirtualMachineTemplate.h"
|
||||
#include "DatastoreTemplate.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
@ -40,17 +41,21 @@ protected:
|
||||
:Request(method_name,xml_args,help), do_template(dt)
|
||||
{
|
||||
auth_op = AuthRequest::CREATE;
|
||||
|
||||
Nebula& nd = Nebula::instance();
|
||||
clpool = nd.get_clpool();
|
||||
};
|
||||
|
||||
~RequestManagerAllocate(){};
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
|
||||
void request_execute(xmlrpc_c::paramList const& _paramList,
|
||||
virtual void request_execute(xmlrpc_c::paramList const& _paramList,
|
||||
RequestAttributes& att);
|
||||
|
||||
virtual bool allocate_authorization(Template * obj_template,
|
||||
RequestAttributes& att);
|
||||
virtual bool allocate_authorization(Template * obj_template,
|
||||
RequestAttributes& att,
|
||||
PoolObjectAuth * cluster_perms);
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
|
||||
@ -60,7 +65,35 @@ protected:
|
||||
Template * tmpl,
|
||||
int& id,
|
||||
string& error_str,
|
||||
RequestAttributes& att) = 0;
|
||||
RequestAttributes& att)
|
||||
{
|
||||
return -1;
|
||||
};
|
||||
|
||||
virtual int pool_allocate(xmlrpc_c::paramList const& _paramList,
|
||||
Template * tmpl,
|
||||
int& id,
|
||||
string& error_str,
|
||||
RequestAttributes& att,
|
||||
int cluster_id,
|
||||
const string& cluster_name)
|
||||
{
|
||||
return pool_allocate(_paramList, tmpl, id, error_str, att);
|
||||
};
|
||||
|
||||
virtual int get_cluster_id(xmlrpc_c::paramList const& paramList)
|
||||
{
|
||||
return ClusterPool::NONE_CLUSTER_ID;
|
||||
};
|
||||
|
||||
virtual int add_to_cluster(Cluster* cluster, int id, string& error_msg)
|
||||
{
|
||||
return -1;
|
||||
};
|
||||
|
||||
protected:
|
||||
ClusterPool * clpool;
|
||||
|
||||
private:
|
||||
|
||||
bool do_template;
|
||||
@ -85,6 +118,7 @@ public:
|
||||
};
|
||||
|
||||
~VirtualMachineAllocate(){};
|
||||
|
||||
/* --------------------------------------------------------------------- */
|
||||
|
||||
Template * get_object_template()
|
||||
@ -98,8 +132,9 @@ public:
|
||||
string& error_str,
|
||||
RequestAttributes& att);
|
||||
|
||||
bool allocate_authorization(Template * obj_template,
|
||||
RequestAttributes& att);
|
||||
bool allocate_authorization(Template * obj_template,
|
||||
RequestAttributes& att,
|
||||
PoolObjectAuth * cluster_perms);
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
@ -111,7 +146,7 @@ public:
|
||||
VirtualNetworkAllocate():
|
||||
RequestManagerAllocate("VirtualNetworkAllocate",
|
||||
"Allocates a new virtual network",
|
||||
"A:ss",
|
||||
"A:ssi",
|
||||
true)
|
||||
{
|
||||
Nebula& nd = Nebula::instance();
|
||||
@ -128,11 +163,23 @@ public:
|
||||
return new VirtualNetworkTemplate;
|
||||
};
|
||||
|
||||
int pool_allocate(xmlrpc_c::paramList const& _paramList,
|
||||
int pool_allocate(xmlrpc_c::paramList const& _paramList,
|
||||
Template * tmpl,
|
||||
int& id,
|
||||
int& id,
|
||||
string& error_str,
|
||||
RequestAttributes& att);
|
||||
RequestAttributes& att,
|
||||
int cluster_id,
|
||||
const string& cluster_name);
|
||||
|
||||
int get_cluster_id(xmlrpc_c::paramList const& paramList)
|
||||
{
|
||||
return xmlrpc_c::value_int(paramList.getInt(2));
|
||||
};
|
||||
|
||||
int add_to_cluster(Cluster* cluster, int id, string& error_msg)
|
||||
{
|
||||
return cluster->add_vnet(id, error_msg);
|
||||
};
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
@ -144,9 +191,9 @@ public:
|
||||
ImageAllocate():
|
||||
RequestManagerAllocate("ImageAllocate",
|
||||
"Allocates a new image",
|
||||
"A:ss",
|
||||
"A:ssi",
|
||||
true)
|
||||
{
|
||||
{
|
||||
Nebula& nd = Nebula::instance();
|
||||
pool = nd.get_ipool();
|
||||
auth_object = PoolObjectSQL::IMAGE;
|
||||
@ -156,16 +203,12 @@ public:
|
||||
|
||||
/* --------------------------------------------------------------------- */
|
||||
|
||||
Template * get_object_template()
|
||||
{
|
||||
return new ImageTemplate;
|
||||
};
|
||||
void request_execute(xmlrpc_c::paramList const& _paramList,
|
||||
RequestAttributes& att);
|
||||
|
||||
int pool_allocate(xmlrpc_c::paramList const& _paramList,
|
||||
Template * tmpl,
|
||||
int& id,
|
||||
string& error_str,
|
||||
RequestAttributes& att);
|
||||
bool allocate_authorization(Template * obj_template,
|
||||
RequestAttributes& att,
|
||||
PoolObjectAuth * cluster_perms);
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
@ -210,7 +253,7 @@ public:
|
||||
HostAllocate():
|
||||
RequestManagerAllocate("HostAllocate",
|
||||
"Allocates a new host",
|
||||
"A:ssssss",
|
||||
"A:sssssi",
|
||||
false)
|
||||
{
|
||||
Nebula& nd = Nebula::instance();
|
||||
@ -220,11 +263,25 @@ public:
|
||||
|
||||
~HostAllocate(){};
|
||||
|
||||
int pool_allocate(xmlrpc_c::paramList const& _paramList,
|
||||
/* --------------------------------------------------------------------- */
|
||||
|
||||
int pool_allocate(xmlrpc_c::paramList const& _paramList,
|
||||
Template * tmpl,
|
||||
int& id,
|
||||
int& id,
|
||||
string& error_str,
|
||||
RequestAttributes& att);
|
||||
RequestAttributes& att,
|
||||
int cluster_id,
|
||||
const string& cluster_name);
|
||||
|
||||
int get_cluster_id(xmlrpc_c::paramList const& paramList)
|
||||
{
|
||||
return xmlrpc_c::value_int(paramList.getInt(5));
|
||||
};
|
||||
|
||||
int add_to_cluster(Cluster* cluster, int id, string& error_msg)
|
||||
{
|
||||
return cluster->add_host(id, error_msg);
|
||||
};
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
@ -279,6 +336,77 @@ public:
|
||||
RequestAttributes& att);
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
class DatastoreAllocate: public RequestManagerAllocate
|
||||
{
|
||||
public:
|
||||
DatastoreAllocate():
|
||||
RequestManagerAllocate("DatastoreAllocate",
|
||||
"Allocates a new Datastore",
|
||||
"A:ssi",
|
||||
true)
|
||||
{
|
||||
Nebula& nd = Nebula::instance();
|
||||
pool = nd.get_dspool();
|
||||
auth_object = PoolObjectSQL::DATASTORE;
|
||||
};
|
||||
|
||||
~DatastoreAllocate(){};
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
|
||||
Template * get_object_template()
|
||||
{
|
||||
return new DatastoreTemplate;
|
||||
};
|
||||
|
||||
int pool_allocate(xmlrpc_c::paramList const& _paramList,
|
||||
Template * tmpl,
|
||||
int& id,
|
||||
string& error_str,
|
||||
RequestAttributes& att,
|
||||
int cluster_id,
|
||||
const string& cluster_name);
|
||||
|
||||
int get_cluster_id(xmlrpc_c::paramList const& paramList)
|
||||
{
|
||||
return xmlrpc_c::value_int(paramList.getInt(2));
|
||||
};
|
||||
|
||||
int add_to_cluster(Cluster* cluster, int id, string& error_msg)
|
||||
{
|
||||
return cluster->add_datastore(id, error_msg);
|
||||
};
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
class ClusterAllocate: public RequestManagerAllocate
|
||||
{
|
||||
public:
|
||||
ClusterAllocate():
|
||||
RequestManagerAllocate("ClusterAllocate",
|
||||
"Allocates a new cluster",
|
||||
"A:ss",
|
||||
false)
|
||||
{
|
||||
Nebula& nd = Nebula::instance();
|
||||
pool = nd.get_clpool();
|
||||
auth_object = PoolObjectSQL::CLUSTER;
|
||||
};
|
||||
|
||||
~ClusterAllocate(){};
|
||||
|
||||
int pool_allocate(xmlrpc_c::paramList const& _paramList,
|
||||
Template * tmpl,
|
||||
int& id,
|
||||
string& error_str,
|
||||
RequestAttributes& att);
|
||||
};
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
@ -81,7 +81,6 @@ public:
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
|
||||
class VirtualNetworkChmod: public RequestManagerChmod
|
||||
{
|
||||
public:
|
||||
@ -117,6 +116,25 @@ public:
|
||||
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
class DatastoreChmod: public RequestManagerChmod
|
||||
{
|
||||
public:
|
||||
DatastoreChmod():
|
||||
RequestManagerChmod("DatastoreChmod",
|
||||
"Changes permission bits of a datastore")
|
||||
{
|
||||
Nebula& nd = Nebula::instance();
|
||||
pool = nd.get_dspool();
|
||||
auth_object = PoolObjectSQL::DATASTORE;
|
||||
};
|
||||
|
||||
~DatastoreChmod(){};
|
||||
|
||||
};
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
@ -52,13 +52,6 @@ protected:
|
||||
|
||||
virtual void request_execute(xmlrpc_c::paramList const& _paramList,
|
||||
RequestAttributes& att);
|
||||
|
||||
int get_info (PoolSQL * pool,
|
||||
int id,
|
||||
PoolObjectSQL::ObjectType type,
|
||||
RequestAttributes& att,
|
||||
PoolObjectAuth& perms,
|
||||
string& name);
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
@ -160,6 +153,25 @@ public:
|
||||
RequestAttributes& att);
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
class DatastoreChown: public RequestManagerChown
|
||||
{
|
||||
public:
|
||||
DatastoreChown():
|
||||
RequestManagerChown("Datastore",
|
||||
"Changes ownership of a datastore")
|
||||
{
|
||||
Nebula& nd = Nebula::instance();
|
||||
pool = nd.get_dspool();
|
||||
auth_object = PoolObjectSQL::DATASTORE;
|
||||
};
|
||||
|
||||
~DatastoreChown(){};
|
||||
|
||||
};
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
329
include/RequestManagerCluster.h
Normal file
329
include/RequestManagerCluster.h
Normal file
@ -0,0 +1,329 @@
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) */
|
||||
/* */
|
||||
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
|
||||
/* not use this file except in compliance with the License. You may obtain */
|
||||
/* a copy of the License at */
|
||||
/* */
|
||||
/* http://www.apache.org/licenses/LICENSE-2.0 */
|
||||
/* */
|
||||
/* Unless required by applicable law or agreed to in writing, software */
|
||||
/* distributed under the License is distributed on an "AS IS" BASIS, */
|
||||
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
|
||||
/* See the License for the specific language governing permissions and */
|
||||
/* limitations under the License. */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
#ifndef REQUEST_MANAGER_CLUSTER_H
|
||||
#define REQUEST_MANAGER_CLUSTER_H
|
||||
|
||||
#include "Request.h"
|
||||
#include "Nebula.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
class RequestManagerCluster: public Request
|
||||
{
|
||||
protected:
|
||||
RequestManagerCluster(const string& method_name,
|
||||
const string& help,
|
||||
const string& params)
|
||||
:Request(method_name,params,help)
|
||||
{
|
||||
Nebula& nd = Nebula::instance();
|
||||
clpool = nd.get_clpool();
|
||||
hpool = nd.get_hpool();
|
||||
dspool = nd.get_dspool();
|
||||
vnpool = nd.get_vnpool();
|
||||
|
||||
auth_object = PoolObjectSQL::CLUSTER;
|
||||
auth_op = AuthRequest::ADMIN;
|
||||
};
|
||||
|
||||
~RequestManagerCluster(){};
|
||||
|
||||
/* --------------------------------------------------------------------- */
|
||||
|
||||
ClusterPool * clpool;
|
||||
HostPool * hpool;
|
||||
DatastorePool * dspool;
|
||||
VirtualNetworkPool * vnpool;
|
||||
|
||||
/* --------------------------------------------------------------------- */
|
||||
|
||||
virtual void request_execute(xmlrpc_c::paramList const& paramList,
|
||||
RequestAttributes& att) = 0;
|
||||
|
||||
void add_generic(
|
||||
int cluster_id,
|
||||
int object_id,
|
||||
RequestAttributes& att,
|
||||
PoolSQL * pool,
|
||||
PoolObjectSQL::ObjectType type);
|
||||
|
||||
virtual int add_object(Cluster* cluster, int id, string& error_msg) = 0;
|
||||
|
||||
virtual int del_object(Cluster* cluster, int id, string& error_msg) = 0;
|
||||
|
||||
virtual void get(int oid, bool lock, PoolObjectSQL ** object, Clusterable ** cluster_obj) = 0;
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
class RequestManagerClusterHost : public RequestManagerCluster
|
||||
{
|
||||
public:
|
||||
RequestManagerClusterHost(
|
||||
const string& method_name,
|
||||
const string& help,
|
||||
const string& params):
|
||||
RequestManagerCluster(method_name, help, params){};
|
||||
|
||||
~RequestManagerClusterHost(){};
|
||||
|
||||
virtual int add_object(Cluster* cluster, int id, string& error_msg)
|
||||
{
|
||||
return cluster->add_host(id, error_msg);
|
||||
};
|
||||
|
||||
virtual int del_object(Cluster* cluster, int id, string& error_msg)
|
||||
{
|
||||
return cluster->del_host(id, error_msg);
|
||||
};
|
||||
|
||||
virtual void get(int oid, bool lock, PoolObjectSQL ** object, Clusterable ** cluster_obj)
|
||||
{
|
||||
Host * host = hpool->get(oid, lock);
|
||||
|
||||
*object = static_cast<PoolObjectSQL *>(host);
|
||||
*cluster_obj = static_cast<Clusterable *>(host);
|
||||
};
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
class ClusterAddHost : public RequestManagerClusterHost
|
||||
{
|
||||
public:
|
||||
ClusterAddHost():
|
||||
RequestManagerClusterHost("ClusterAddHost",
|
||||
"Adds a host to the cluster",
|
||||
"A:sii"){};
|
||||
|
||||
~ClusterAddHost(){};
|
||||
|
||||
void request_execute(xmlrpc_c::paramList const& paramList,
|
||||
RequestAttributes& att)
|
||||
{
|
||||
int cluster_id = xmlrpc_c::value_int(paramList.getInt(1));
|
||||
int object_id = xmlrpc_c::value_int(paramList.getInt(2));
|
||||
|
||||
return add_generic(cluster_id, object_id, att,
|
||||
hpool, PoolObjectSQL::HOST);
|
||||
}
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
class ClusterDelHost : public RequestManagerClusterHost
|
||||
{
|
||||
public:
|
||||
ClusterDelHost():
|
||||
RequestManagerClusterHost("ClusterDelHost",
|
||||
"Deletes a host from its cluster",
|
||||
"A:sii"){};
|
||||
|
||||
~ClusterDelHost(){};
|
||||
|
||||
void request_execute(xmlrpc_c::paramList const& paramList,
|
||||
RequestAttributes& att)
|
||||
{
|
||||
// First param is ignored, as objects can be assigned to only
|
||||
// one cluster
|
||||
int cluster_id = ClusterPool::NONE_CLUSTER_ID;
|
||||
int object_id = xmlrpc_c::value_int(paramList.getInt(2));
|
||||
|
||||
return add_generic(cluster_id, object_id, att,
|
||||
hpool, PoolObjectSQL::HOST);
|
||||
}
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
class RequestManagerClusterDatastore : public RequestManagerCluster
|
||||
{
|
||||
public:
|
||||
RequestManagerClusterDatastore(
|
||||
const string& method_name,
|
||||
const string& help,
|
||||
const string& params):
|
||||
RequestManagerCluster(method_name, help, params){};
|
||||
|
||||
~RequestManagerClusterDatastore(){};
|
||||
|
||||
virtual int add_object(Cluster* cluster, int id, string& error_msg)
|
||||
{
|
||||
return cluster->add_datastore(id, error_msg);
|
||||
};
|
||||
|
||||
virtual int del_object(Cluster* cluster, int id, string& error_msg)
|
||||
{
|
||||
return cluster->del_datastore(id, error_msg);
|
||||
};
|
||||
|
||||
virtual void get(int oid, bool lock, PoolObjectSQL ** object, Clusterable ** cluster_obj)
|
||||
{
|
||||
Datastore * ds = dspool->get(oid, lock);
|
||||
|
||||
*object = static_cast<PoolObjectSQL *>(ds);
|
||||
*cluster_obj = static_cast<Clusterable *>(ds);
|
||||
};
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
class ClusterAddDatastore : public RequestManagerClusterDatastore
|
||||
{
|
||||
public:
|
||||
ClusterAddDatastore():
|
||||
RequestManagerClusterDatastore("ClusterAddDatastore",
|
||||
"Adds a datastore to the cluster",
|
||||
"A:sii"){};
|
||||
|
||||
~ClusterAddDatastore(){};
|
||||
|
||||
void request_execute(xmlrpc_c::paramList const& paramList,
|
||||
RequestAttributes& att)
|
||||
{
|
||||
int cluster_id = xmlrpc_c::value_int(paramList.getInt(1));
|
||||
int object_id = xmlrpc_c::value_int(paramList.getInt(2));
|
||||
|
||||
return add_generic(cluster_id, object_id, att,
|
||||
dspool, PoolObjectSQL::DATASTORE);
|
||||
}
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
class ClusterDelDatastore : public RequestManagerClusterDatastore
|
||||
{
|
||||
public:
|
||||
ClusterDelDatastore():
|
||||
RequestManagerClusterDatastore("ClusterDelDatastore",
|
||||
"Deletes a datastore from its cluster",
|
||||
"A:sii"){};
|
||||
|
||||
~ClusterDelDatastore(){};
|
||||
|
||||
void request_execute(xmlrpc_c::paramList const& paramList,
|
||||
RequestAttributes& att)
|
||||
{
|
||||
// First param is ignored, as objects can be assigned to only
|
||||
// one cluster
|
||||
int cluster_id = ClusterPool::NONE_CLUSTER_ID;
|
||||
int object_id = xmlrpc_c::value_int(paramList.getInt(2));
|
||||
|
||||
return add_generic(cluster_id, object_id, att,
|
||||
dspool, PoolObjectSQL::DATASTORE);
|
||||
}
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
class RequestManagerClusterVNet : public RequestManagerCluster
|
||||
{
|
||||
public:
|
||||
|
||||
RequestManagerClusterVNet(
|
||||
const string& method_name,
|
||||
const string& help,
|
||||
const string& params):
|
||||
RequestManagerCluster(method_name, help, params){};
|
||||
|
||||
~RequestManagerClusterVNet(){};
|
||||
|
||||
virtual int add_object(Cluster* cluster, int id, string& error_msg)
|
||||
{
|
||||
return cluster->add_vnet(id, error_msg);
|
||||
};
|
||||
|
||||
virtual int del_object(Cluster* cluster, int id, string& error_msg)
|
||||
{
|
||||
return cluster->del_vnet(id, error_msg);
|
||||
};
|
||||
|
||||
virtual void get(int oid, bool lock, PoolObjectSQL ** object, Clusterable ** cluster_obj)
|
||||
{
|
||||
VirtualNetwork * vnet = vnpool->get(oid, lock);
|
||||
|
||||
*object = static_cast<PoolObjectSQL *>(vnet);
|
||||
*cluster_obj = static_cast<Clusterable *>(vnet);
|
||||
};
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
class ClusterAddVNet : public RequestManagerClusterVNet
|
||||
{
|
||||
public:
|
||||
ClusterAddVNet():
|
||||
RequestManagerClusterVNet("ClusterAddVNet",
|
||||
"Adds a virtual network to the cluster",
|
||||
"A:sii"){};
|
||||
|
||||
~ClusterAddVNet(){};
|
||||
|
||||
void request_execute(xmlrpc_c::paramList const& paramList,
|
||||
RequestAttributes& att)
|
||||
{
|
||||
int cluster_id = xmlrpc_c::value_int(paramList.getInt(1));
|
||||
int object_id = xmlrpc_c::value_int(paramList.getInt(2));
|
||||
|
||||
return add_generic(cluster_id, object_id, att,
|
||||
vnpool, PoolObjectSQL::NET);
|
||||
}
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
class ClusterDelVNet : public RequestManagerClusterVNet
|
||||
{
|
||||
public:
|
||||
ClusterDelVNet():
|
||||
RequestManagerClusterVNet("ClusterDelVNet",
|
||||
"Deletes a virtual network from its cluster",
|
||||
"A:sii"){};
|
||||
|
||||
~ClusterDelVNet(){};
|
||||
|
||||
void request_execute(xmlrpc_c::paramList const& paramList,
|
||||
RequestAttributes& att)
|
||||
{
|
||||
// First param is ignored, as objects can be assigned to only
|
||||
// one cluster
|
||||
int cluster_id = ClusterPool::NONE_CLUSTER_ID;
|
||||
int object_id = xmlrpc_c::value_int(paramList.getInt(2));
|
||||
|
||||
return add_generic(cluster_id, object_id, att,
|
||||
vnpool, PoolObjectSQL::NET);
|
||||
}
|
||||
};
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
#endif
|
@ -35,6 +35,9 @@ protected:
|
||||
:Request(method_name,"A:si",help)
|
||||
{
|
||||
auth_op = AuthRequest::MANAGE;
|
||||
|
||||
Nebula& nd = Nebula::instance();
|
||||
clpool = nd.get_clpool();
|
||||
};
|
||||
|
||||
~RequestManagerDelete(){};
|
||||
@ -49,15 +52,20 @@ protected:
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
|
||||
virtual int drop(int oid, PoolObjectSQL * object, string& error_msg)
|
||||
virtual int drop(int oid, PoolObjectSQL * object, string& error_msg);
|
||||
|
||||
virtual int get_cluster_id(PoolObjectSQL * object)
|
||||
{
|
||||
int rc = pool->drop(object, error_msg);
|
||||
|
||||
object->unlock();
|
||||
|
||||
return rc;
|
||||
return ClusterPool::NONE_CLUSTER_ID;
|
||||
};
|
||||
|
||||
virtual int del_from_cluster(Cluster* cluster, int id, string& error_msg)
|
||||
{
|
||||
return -1;
|
||||
};
|
||||
|
||||
private:
|
||||
ClusterPool * clpool;
|
||||
};
|
||||
|
||||
|
||||
@ -95,6 +103,18 @@ public:
|
||||
};
|
||||
|
||||
~VirtualNetworkDelete(){};
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
|
||||
int get_cluster_id(PoolObjectSQL * object)
|
||||
{
|
||||
return static_cast<VirtualNetwork*>(object)->get_cluster_id();
|
||||
};
|
||||
|
||||
int del_from_cluster(Cluster* cluster, int id, string& error_msg)
|
||||
{
|
||||
return cluster->del_vnet(id, error_msg);
|
||||
};
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
@ -135,12 +155,23 @@ public:
|
||||
};
|
||||
|
||||
~HostDelete(){};
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
|
||||
int get_cluster_id(PoolObjectSQL * object)
|
||||
{
|
||||
return static_cast<Host*>(object)->get_cluster_id();
|
||||
};
|
||||
|
||||
int del_from_cluster(Cluster* cluster, int id, string& error_msg)
|
||||
{
|
||||
return cluster->del_host(id, error_msg);
|
||||
};
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
|
||||
class GroupDelete: public RequestManagerDelete
|
||||
{
|
||||
public:
|
||||
@ -178,6 +209,54 @@ public:
|
||||
int drop(int oid, PoolObjectSQL * object, string& error_msg);
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
class DatastoreDelete: public RequestManagerDelete
|
||||
{
|
||||
public:
|
||||
DatastoreDelete():
|
||||
RequestManagerDelete("DatastoreDelete", "Deletes a datastore")
|
||||
{
|
||||
Nebula& nd = Nebula::instance();
|
||||
pool = nd.get_dspool();
|
||||
auth_object = PoolObjectSQL::DATASTORE;
|
||||
auth_op = AuthRequest::ADMIN;
|
||||
};
|
||||
|
||||
~DatastoreDelete(){};
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
|
||||
int get_cluster_id(PoolObjectSQL * object)
|
||||
{
|
||||
return static_cast<Datastore*>(object)->get_cluster_id();
|
||||
};
|
||||
|
||||
int del_from_cluster(Cluster* cluster, int id, string& error_msg)
|
||||
{
|
||||
return cluster->del_datastore(id, error_msg);
|
||||
};
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
class ClusterDelete: public RequestManagerDelete
|
||||
{
|
||||
public:
|
||||
ClusterDelete():
|
||||
RequestManagerDelete("ClusterDelete", "Deletes a cluster")
|
||||
{
|
||||
Nebula& nd = Nebula::instance();
|
||||
pool = nd.get_clpool();
|
||||
auth_object = PoolObjectSQL::CLUSTER;
|
||||
auth_op = AuthRequest::ADMIN;
|
||||
};
|
||||
|
||||
~ClusterDelete(){};
|
||||
};
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
@ -162,7 +162,6 @@ public:
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
|
||||
class GroupInfo: public RequestManagerInfo
|
||||
{
|
||||
public:
|
||||
@ -196,6 +195,42 @@ public:
|
||||
~UserInfo(){};
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
class DatastoreInfo: public RequestManagerInfo
|
||||
{
|
||||
public:
|
||||
DatastoreInfo():
|
||||
RequestManagerInfo("DatastoreInfo",
|
||||
"Returns datastore information")
|
||||
{
|
||||
Nebula& nd = Nebula::instance();
|
||||
pool = nd.get_dspool();
|
||||
auth_object = PoolObjectSQL::DATASTORE;
|
||||
};
|
||||
|
||||
~DatastoreInfo(){};
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
class ClusterInfo: public RequestManagerInfo
|
||||
{
|
||||
public:
|
||||
ClusterInfo():
|
||||
RequestManagerInfo("ClusterInfo",
|
||||
"Returns cluster information")
|
||||
{
|
||||
Nebula& nd = Nebula::instance();
|
||||
pool = nd.get_clpool();
|
||||
auth_object = PoolObjectSQL::CLUSTER;
|
||||
};
|
||||
|
||||
~ClusterInfo(){};
|
||||
};
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
@ -224,6 +224,54 @@ public:
|
||||
xmlrpc_c::paramList const& paramList, RequestAttributes& att);
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
class DatastorePoolInfo: public RequestManagerPoolInfoFilter
|
||||
{
|
||||
public:
|
||||
DatastorePoolInfo():
|
||||
RequestManagerPoolInfoFilter("DatastorePoolInfo",
|
||||
"Returns the datastore pool",
|
||||
"A:s")
|
||||
{
|
||||
Nebula& nd = Nebula::instance();
|
||||
pool = nd.get_dspool();
|
||||
auth_object = PoolObjectSQL::DATASTORE;
|
||||
};
|
||||
|
||||
~DatastorePoolInfo(){};
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
|
||||
void request_execute(
|
||||
xmlrpc_c::paramList const& paramList, RequestAttributes& att);
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
class ClusterPoolInfo: public RequestManagerPoolInfoFilter
|
||||
{
|
||||
public:
|
||||
ClusterPoolInfo():
|
||||
RequestManagerPoolInfoFilter("ClusterPoolInfo",
|
||||
"Returns the cluster pool",
|
||||
"A:s")
|
||||
{
|
||||
Nebula& nd = Nebula::instance();
|
||||
pool = nd.get_clpool();
|
||||
auth_object = PoolObjectSQL::CLUSTER;
|
||||
};
|
||||
|
||||
~ClusterPoolInfo(){};
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
|
||||
void request_execute(
|
||||
xmlrpc_c::paramList const& paramList, RequestAttributes& att);
|
||||
};
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
@ -134,6 +134,24 @@ public:
|
||||
~UserUpdateTemplate(){};
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
class DatastoreUpdateTemplate : public RequestManagerUpdateTemplate
|
||||
{
|
||||
public:
|
||||
DatastoreUpdateTemplate():
|
||||
RequestManagerUpdateTemplate("DatastoreUpdateTemplate",
|
||||
"Updates a datastore template")
|
||||
{
|
||||
Nebula& nd = Nebula::instance();
|
||||
pool = nd.get_dspool();
|
||||
auth_object = PoolObjectSQL::DATASTORE;
|
||||
};
|
||||
|
||||
~DatastoreUpdateTemplate(){};
|
||||
};
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
@ -48,18 +48,20 @@ protected:
|
||||
virtual void request_execute(xmlrpc_c::paramList const& _paramList,
|
||||
RequestAttributes& att) = 0;
|
||||
|
||||
bool vm_authorization(int id, ImageTemplate *tmpl,
|
||||
RequestAttributes& att, PoolObjectAuth* host_perms);
|
||||
bool vm_authorization(int id,
|
||||
ImageTemplate * tmpl,
|
||||
RequestAttributes& att,
|
||||
PoolObjectAuth * host_perms,
|
||||
PoolObjectAuth * ds_perm);
|
||||
|
||||
int get_host_information(int hid, string& name, string& vmm, string& vnm,
|
||||
string& tm, RequestAttributes& att, PoolObjectAuth& host_perms);
|
||||
RequestAttributes& att, PoolObjectAuth& host_perms);
|
||||
|
||||
int add_history(VirtualMachine * vm,
|
||||
int hid,
|
||||
const string& hostname,
|
||||
const string& vmm_mad,
|
||||
const string& vnm_mad,
|
||||
const string& tm_mad,
|
||||
RequestAttributes& att);
|
||||
|
||||
VirtualMachine * get_vm(int id, RequestAttributes& att);
|
||||
|
@ -115,6 +115,11 @@ private:
|
||||
*/
|
||||
ActionManager am;
|
||||
|
||||
/**
|
||||
* Generic name for the TransferManager driver
|
||||
*/
|
||||
static const char * transfer_driver_name;
|
||||
|
||||
/**
|
||||
* Returns a pointer to a Transfer Manager driver.
|
||||
* @param name of an attribute of the driver (e.g. its type)
|
||||
@ -145,6 +150,18 @@ private:
|
||||
(MadManager::get(0,_name,name));
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns a pointer to a Transfer Manager driver. The driver is
|
||||
* searched by its name.
|
||||
* @return the TM driver for the Transfer Manager
|
||||
*/
|
||||
const TransferManagerDriver * get()
|
||||
{
|
||||
string _name("NAME");
|
||||
return static_cast<const TransferManagerDriver *>
|
||||
(MadManager::get(0,_name,transfer_driver_name));
|
||||
};
|
||||
|
||||
/**
|
||||
* Function to execute the Manager action loop method within a new pthread
|
||||
* (requires C linkage)
|
||||
|
@ -162,7 +162,7 @@ public:
|
||||
/**
|
||||
* Factory method for image templates
|
||||
*/
|
||||
Template * get_new_template()
|
||||
Template * get_new_template() const
|
||||
{
|
||||
return new UserTemplate;
|
||||
}
|
||||
|
@ -176,6 +176,16 @@ public:
|
||||
*/
|
||||
static const char * SERVER_NAME;
|
||||
|
||||
/**
|
||||
* Name of the oneadmin user
|
||||
*/
|
||||
static string oneadmin_name;
|
||||
|
||||
/**
|
||||
* Identifier for the oneadmin user
|
||||
*/
|
||||
static const int ONEADMIN_ID;
|
||||
|
||||
private:
|
||||
//--------------------------------------------------------------------------
|
||||
// Configuration Attributes for Users
|
||||
|
@ -44,7 +44,7 @@ public:
|
||||
/**
|
||||
* Factory method for virtual machine templates
|
||||
*/
|
||||
Template * get_new_template()
|
||||
Template * get_new_template() const
|
||||
{
|
||||
return new VirtualMachineTemplate;
|
||||
}
|
||||
|
@ -206,6 +206,24 @@ public:
|
||||
etime = et;
|
||||
};
|
||||
|
||||
// ------------------------------------------------------------------------
|
||||
// Access to VM locations
|
||||
// ------------------------------------------------------------------------
|
||||
/**
|
||||
* Returns the remote VM directory. The VM remote dir is in the form:
|
||||
* $DATASTORE_LOCATION/$SYSTEM_DS_ID/$VM_ID. The remote system_dir stores
|
||||
* disks for a running VM in the target host.
|
||||
* @return the remote system directory for the VM
|
||||
*/
|
||||
string get_remote_system_dir() const;
|
||||
|
||||
/**
|
||||
* Returns the local VM directory. The VM local dir is in the form:
|
||||
* $SYSTEM_DS_BASE_PATH/$VM_ID. Temporary stores VM disks.
|
||||
* @return the system directory for the VM
|
||||
*/
|
||||
string get_system_dir() const;
|
||||
|
||||
// ------------------------------------------------------------------------
|
||||
// History
|
||||
// ------------------------------------------------------------------------
|
||||
@ -215,10 +233,8 @@ public:
|
||||
void add_history(
|
||||
int hid,
|
||||
const string& hostname,
|
||||
const string& vm_dir,
|
||||
const string& vmm_mad,
|
||||
const string& vnm_mad,
|
||||
const string& tm_mad);
|
||||
const string& vnm_mad);
|
||||
|
||||
/**
|
||||
* Duplicates the last history record. Only the host related fields are
|
||||
@ -294,26 +310,6 @@ public:
|
||||
return previous_history->vnm_mad_name;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns the TM driver name for the current host. The hasHistory()
|
||||
* function MUST be called before this one.
|
||||
* @return the TM mad name
|
||||
*/
|
||||
const string & get_tm_mad() const
|
||||
{
|
||||
return history->tm_mad_name;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns the TM driver name for the previous host. The
|
||||
* hasPreviousHistory() function MUST be called before this one.
|
||||
* @return the TM mad name
|
||||
*/
|
||||
const string & get_previous_tm_mad() const
|
||||
{
|
||||
return previous_history->tm_mad_name;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns the transfer filename. The transfer file is in the form:
|
||||
* $ONE_LOCATION/var/$VM_ID/transfer.$SEQ
|
||||
@ -376,30 +372,6 @@ public:
|
||||
return history->checkpoint_file;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns the remote VM directory. The VM remote dir is in the form:
|
||||
* $VM_DIR/$VM_ID/
|
||||
* or, in case that OpenNebula is installed in root
|
||||
* /var/lib/one/$VM_ID/
|
||||
* The hasHistory() function MUST be called before this one.
|
||||
* @return the remote directory
|
||||
*/
|
||||
const string & get_remote_dir() const
|
||||
{
|
||||
return history->vm_rhome;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns the local VM directory. The VM local dir is in the form:
|
||||
* $ONE_LOCATION/var/$VM_ID/
|
||||
* The hasHistory() function MUST be called before this one.
|
||||
* @return the remote directory
|
||||
*/
|
||||
const string & get_local_dir() const
|
||||
{
|
||||
return history->vm_lhome;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns the hostname for the current host. The hasHistory()
|
||||
* function MUST be called before this one.
|
||||
@ -570,7 +542,7 @@ public:
|
||||
/**
|
||||
* Factory method for virtual machine templates
|
||||
*/
|
||||
Template * get_new_template()
|
||||
Template * get_new_template() const
|
||||
{
|
||||
return new VirtualMachineTemplate;
|
||||
}
|
||||
@ -681,15 +653,27 @@ public:
|
||||
int generate_context(string &files);
|
||||
|
||||
// ------------------------------------------------------------------------
|
||||
// Image repository related functions
|
||||
// Datastore related functions
|
||||
// ------------------------------------------------------------------------
|
||||
/**
|
||||
* Set the SAVE_AS attribute for the "disk_id"th disk.
|
||||
* @param disk_id Index of the disk to save
|
||||
* @param img_id ID of the image this disk will be saved to.
|
||||
* @param source to save the disk (SAVE_AS_SOURCE)
|
||||
* @param img_id ID of the image this disk will be saved to (SAVE_AS).
|
||||
* @return 0 if success
|
||||
*/
|
||||
int save_disk(int disk_id, int img_id, string& error_str);
|
||||
int save_disk(const string& disk_id,
|
||||
const string& source,
|
||||
int img_id);
|
||||
|
||||
/**
|
||||
* Get the original image id of the disk. It also checks that the disk can
|
||||
* be saved_as.
|
||||
* @param disk_id Index of the disk to save
|
||||
* @param error_str describes the error
|
||||
* @return -1 if failure
|
||||
*/
|
||||
int get_image_from_disk(int disk_id, string& error_str);
|
||||
|
||||
// ------------------------------------------------------------------------
|
||||
// Authorization related functions
|
||||
@ -781,14 +765,13 @@ private:
|
||||
*/
|
||||
History * previous_history;
|
||||
|
||||
|
||||
/**
|
||||
* Complete set of history records for the VM
|
||||
*/
|
||||
vector<History *> history_records;
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Logging
|
||||
// Logging & Dirs
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
@ -797,7 +780,7 @@ private:
|
||||
* or, in case that OpenNebula is installed in root
|
||||
* /var/log/one/$VM_ID.log
|
||||
*/
|
||||
FileLog * _log;
|
||||
FileLog * _log;
|
||||
|
||||
// *************************************************************************
|
||||
// DataBase implementation (Private)
|
||||
@ -894,6 +877,14 @@ private:
|
||||
*/
|
||||
int parse_requirements(string& error_str);
|
||||
|
||||
/**
|
||||
* Adds automatic placement requirements: Datastore and Cluster
|
||||
*
|
||||
* @param error_str Returns the error reason, if any
|
||||
* @return 0 on success
|
||||
*/
|
||||
int automatic_requirements(string& error_str);
|
||||
|
||||
/**
|
||||
* Parse the "GRAPHICS" attribute and generates a default PORT if not
|
||||
* defined
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include "PoolSQL.h"
|
||||
#include "Leases.h"
|
||||
#include "VirtualNetworkTemplate.h"
|
||||
#include "Clusterable.h"
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
@ -39,7 +40,7 @@ using namespace std;
|
||||
* leases. One lease is formed by one IP and one MAC address.
|
||||
* MAC address are derived from IP addresses.
|
||||
*/
|
||||
class VirtualNetwork : public PoolObjectSQL
|
||||
class VirtualNetwork : public PoolObjectSQL, public Clusterable
|
||||
{
|
||||
public:
|
||||
|
||||
@ -61,7 +62,7 @@ public:
|
||||
/**
|
||||
* Factory method for virtual network templates
|
||||
*/
|
||||
Template * get_new_template()
|
||||
Template * get_new_template() const
|
||||
{
|
||||
return new VirtualNetworkTemplate;
|
||||
}
|
||||
@ -287,6 +288,8 @@ private:
|
||||
int gid,
|
||||
const string& _uname,
|
||||
const string& _gname,
|
||||
int _cluster_id,
|
||||
const string& _cluster_name,
|
||||
VirtualNetworkTemplate * _vn_template = 0);
|
||||
|
||||
~VirtualNetwork();
|
||||
|
@ -45,6 +45,8 @@ public:
|
||||
* @param gid the id of the group this object is assigned to
|
||||
* @param vn_template a VirtualNetworkTemplate describing the VNET
|
||||
* @param oid the id assigned to the VM (output)
|
||||
* @param cluster_id the id of the cluster this VNET will belong to
|
||||
* @param cluster_name the name of the cluster this VNET will belong to
|
||||
* @param error_str Returns the error reason, if any
|
||||
* @return oid on success, -1 error
|
||||
*/
|
||||
@ -55,6 +57,8 @@ public:
|
||||
const string& gname,
|
||||
VirtualNetworkTemplate * vn_template,
|
||||
int * oid,
|
||||
int cluster_id,
|
||||
const string& cluster_name,
|
||||
string& error_str);
|
||||
|
||||
/**
|
||||
@ -92,11 +96,12 @@ public:
|
||||
* metadata
|
||||
* @param nic the nic attribute to be generated
|
||||
* @param vid of the VM requesting the lease
|
||||
* @param error_str string describing the error
|
||||
* @return 0 on success,
|
||||
* -1 error,
|
||||
* -2 not using the pool
|
||||
*/
|
||||
int nic_attribute(VectorAttribute * nic, int uid, int vid);
|
||||
int nic_attribute(VectorAttribute * nic, int uid, int vid, string& error_str);
|
||||
|
||||
/**
|
||||
* Generates an Authorization token for a NIC attribute
|
||||
@ -162,7 +167,7 @@ private:
|
||||
*/
|
||||
PoolObjectSQL * create()
|
||||
{
|
||||
return new VirtualNetwork(-1,-1,"","",0);
|
||||
return new VirtualNetwork(-1,-1,"","",-1,"",0);
|
||||
};
|
||||
|
||||
/**
|
||||
@ -171,12 +176,13 @@ private:
|
||||
*/
|
||||
VirtualNetwork * get_nic_by_name(VectorAttribute * nic,
|
||||
const string& name,
|
||||
int _uid);
|
||||
int _uidi,
|
||||
string& error);
|
||||
|
||||
/**
|
||||
* Function to get a VirtualNetwork by its id, as provided by a VM template
|
||||
*/
|
||||
VirtualNetwork * get_nic_by_id(const string& id_s);
|
||||
VirtualNetwork * get_nic_by_id(const string& id_s, string& error);
|
||||
};
|
||||
|
||||
#endif /*VIRTUAL_NETWORK_POOL_H_*/
|
||||
|
@ -26,6 +26,8 @@
|
||||
#include "HostPool.h"
|
||||
#include "UserPool.h"
|
||||
#include "VMTemplatePool.h"
|
||||
#include "DatastorePool.h"
|
||||
#include "ClusterPool.h"
|
||||
|
||||
#include "VirtualMachineManager.h"
|
||||
#include "LifeCycleManager.h"
|
||||
@ -45,7 +47,8 @@ protected:
|
||||
NebulaTest():mysql(false), need_host_pool(false), need_vm_pool(false),
|
||||
need_vnet_pool(false), need_image_pool(false),
|
||||
need_user_pool(false), need_template_pool(false),
|
||||
need_group_pool(false),
|
||||
need_group_pool(false), need_datastore_pool(false),
|
||||
need_cluster_pool(false),
|
||||
need_vmm(false),
|
||||
need_im(false), need_tm(false),
|
||||
need_lcm(false), need_dm(false),
|
||||
@ -67,6 +70,8 @@ public:
|
||||
bool need_user_pool;
|
||||
bool need_template_pool;
|
||||
bool need_group_pool;
|
||||
bool need_datastore_pool;
|
||||
bool need_cluster_pool;
|
||||
|
||||
bool need_vmm;
|
||||
bool need_im;
|
||||
@ -107,6 +112,10 @@ public:
|
||||
|
||||
virtual GroupPool* create_gpool(SqlDB* db);
|
||||
|
||||
virtual DatastorePool* create_dspool(SqlDB* db);
|
||||
|
||||
virtual ClusterPool* create_clpool(SqlDB* db);
|
||||
|
||||
// ------------------------------------------------------------------------
|
||||
// Managers
|
||||
// ------------------------------------------------------------------------
|
||||
|
249
install.sh
249
install.sh
@ -99,7 +99,8 @@ if [ -z "$ROOT" ] ; then
|
||||
VAR_LOCATION="/var/lib/one"
|
||||
SUNSTONE_LOCATION="$LIB_LOCATION/sunstone"
|
||||
OZONES_LOCATION="$LIB_LOCATION/ozones"
|
||||
IMAGES_LOCATION="$VAR_LOCATION/images"
|
||||
SYSTEM_DS_LOCATION="$VAR_LOCATION/datastores/0"
|
||||
DEFAULT_DS_LOCATION="$VAR_LOCATION/datastores/1"
|
||||
RUN_LOCATION="/var/run/one"
|
||||
LOCK_LOCATION="/var/lock/one"
|
||||
INCLUDE_LOCATION="/usr/include"
|
||||
@ -130,7 +131,7 @@ if [ -z "$ROOT" ] ; then
|
||||
MAKE_DIRS="$BIN_LOCATION $LIB_LOCATION $ETC_LOCATION $VAR_LOCATION \
|
||||
$INCLUDE_LOCATION $SHARE_LOCATION \
|
||||
$LOG_LOCATION $RUN_LOCATION $LOCK_LOCATION \
|
||||
$IMAGES_LOCATION $MAN_LOCATION"
|
||||
$SYSTEM_DS_LOCATION $DEFAULT_DS_LOCATION $MAN_LOCATION"
|
||||
|
||||
DELETE_DIRS="$LIB_LOCATION $ETC_LOCATION $LOG_LOCATION $VAR_LOCATION \
|
||||
$RUN_LOCATION $SHARE_DIRS"
|
||||
@ -145,7 +146,8 @@ else
|
||||
VAR_LOCATION="$ROOT/var"
|
||||
SUNSTONE_LOCATION="$LIB_LOCATION/sunstone"
|
||||
OZONES_LOCATION="$LIB_LOCATION/ozones"
|
||||
IMAGES_LOCATION="$VAR_LOCATION/images"
|
||||
SYSTEM_DS_LOCATION="$VAR_LOCATION/datastores/0"
|
||||
DEFAULT_DS_LOCATION="$VAR_LOCATION/datastores/1"
|
||||
INCLUDE_LOCATION="$ROOT/include"
|
||||
SHARE_LOCATION="$ROOT/share"
|
||||
MAN_LOCATION="$ROOT/share/man/man1"
|
||||
@ -166,8 +168,8 @@ else
|
||||
DELETE_DIRS="$MAKE_DIRS"
|
||||
else
|
||||
MAKE_DIRS="$BIN_LOCATION $LIB_LOCATION $ETC_LOCATION $VAR_LOCATION \
|
||||
$INCLUDE_LOCATION $SHARE_LOCATION $IMAGES_LOCATION \
|
||||
$MAN_LOCATION $OZONES_LOCATION"
|
||||
$INCLUDE_LOCATION $SHARE_LOCATION $SYSTEM_DS_LOCATION \
|
||||
$DEFAULT_DS_LOCATION $MAN_LOCATION $OZONES_LOCATION"
|
||||
|
||||
DELETE_DIRS="$MAKE_DIRS"
|
||||
|
||||
@ -180,15 +182,10 @@ fi
|
||||
SHARE_DIRS="$SHARE_LOCATION/examples \
|
||||
$SHARE_LOCATION/examples/tm"
|
||||
|
||||
ETC_DIRS="$ETC_LOCATION/image \
|
||||
$ETC_LOCATION/im_ec2 \
|
||||
ETC_DIRS="$ETC_LOCATION/im_ec2 \
|
||||
$ETC_LOCATION/vmm_ec2 \
|
||||
$ETC_LOCATION/vmm_exec \
|
||||
$ETC_LOCATION/tm_shared \
|
||||
$ETC_LOCATION/tm_ssh \
|
||||
$ETC_LOCATION/tm_dummy \
|
||||
$ETC_LOCATION/tm_vmware \
|
||||
$ETC_LOCATION/tm_lvm \
|
||||
$ETC_LOCATION/tm \
|
||||
$ETC_LOCATION/hm \
|
||||
$ETC_LOCATION/auth \
|
||||
$ETC_LOCATION/auth/certificates \
|
||||
@ -205,12 +202,6 @@ LIB_DIRS="$LIB_LOCATION/ruby \
|
||||
$LIB_LOCATION/ruby/cloud/occi \
|
||||
$LIB_LOCATION/ruby/cloud/CloudAuth \
|
||||
$LIB_LOCATION/ruby/onedb \
|
||||
$LIB_LOCATION/tm_commands \
|
||||
$LIB_LOCATION/tm_commands/shared \
|
||||
$LIB_LOCATION/tm_commands/ssh \
|
||||
$LIB_LOCATION/tm_commands/dummy \
|
||||
$LIB_LOCATION/tm_commands/lvm \
|
||||
$LIB_LOCATION/tm_commands/vmware \
|
||||
$LIB_LOCATION/mads \
|
||||
$LIB_LOCATION/sh \
|
||||
$LIB_LOCATION/ruby/cli \
|
||||
@ -223,7 +214,10 @@ VAR_DIRS="$VAR_LOCATION/remotes \
|
||||
$VAR_LOCATION/remotes/im/xen.d \
|
||||
$VAR_LOCATION/remotes/im/vmware.d \
|
||||
$VAR_LOCATION/remotes/im/ganglia.d \
|
||||
$VAR_LOCATION/remotes/vmm \
|
||||
$VAR_LOCATION/remotes/vmm/kvm \
|
||||
$VAR_LOCATION/remotes/vmm/xen \
|
||||
$VAR_LOCATION/remotes/vmm/vmware \
|
||||
$VAR_LOCATION/remotes/vnm \
|
||||
$VAR_LOCATION/remotes/vnm/802.1Q \
|
||||
$VAR_LOCATION/remotes/vnm/dummy \
|
||||
@ -231,12 +225,21 @@ VAR_DIRS="$VAR_LOCATION/remotes \
|
||||
$VAR_LOCATION/remotes/vnm/fw \
|
||||
$VAR_LOCATION/remotes/vnm/ovswitch \
|
||||
$VAR_LOCATION/remotes/vnm/vmware \
|
||||
$VAR_LOCATION/remotes/vmm/xen \
|
||||
$VAR_LOCATION/remotes/vmm/vmware \
|
||||
$VAR_LOCATION/remotes/tm/ \
|
||||
$VAR_LOCATION/remotes/tm/dummy \
|
||||
$VAR_LOCATION/remotes/tm/lvm \
|
||||
$VAR_LOCATION/remotes/tm/shared \
|
||||
$VAR_LOCATION/remotes/tm/qcow2 \
|
||||
$VAR_LOCATION/remotes/tm/ssh \
|
||||
$VAR_LOCATION/remotes/tm/vmware \
|
||||
$VAR_LOCATION/remotes/tm/iscsi \
|
||||
$VAR_LOCATION/remotes/hooks \
|
||||
$VAR_LOCATION/remotes/hooks/ft \
|
||||
$VAR_LOCATION/remotes/image \
|
||||
$VAR_LOCATION/remotes/image/fs \
|
||||
$VAR_LOCATION/remotes/datastore \
|
||||
$VAR_LOCATION/remotes/datastore/dummy \
|
||||
$VAR_LOCATION/remotes/datastore/fs \
|
||||
$VAR_LOCATION/remotes/datastore/vmware \
|
||||
$VAR_LOCATION/remotes/datastore/iscsi \
|
||||
$VAR_LOCATION/remotes/auth \
|
||||
$VAR_LOCATION/remotes/auth/plain \
|
||||
$VAR_LOCATION/remotes/auth/ssh \
|
||||
@ -381,12 +384,19 @@ INSTALL_FILES=(
|
||||
VMM_EXEC_KVM_SCRIPTS:$VAR_LOCATION/remotes/vmm/kvm
|
||||
VMM_EXEC_XEN_SCRIPTS:$VAR_LOCATION/remotes/vmm/xen
|
||||
VMM_EXEC_VMWARE_SCRIPTS:$VAR_LOCATION/remotes/vmm/vmware
|
||||
SHARED_TM_COMMANDS_LIB_FILES:$LIB_LOCATION/tm_commands/shared
|
||||
SSH_TM_COMMANDS_LIB_FILES:$LIB_LOCATION/tm_commands/ssh
|
||||
VMWARE_TM_COMMANDS_LIB_FILES:$LIB_LOCATION/tm_commands/vmware
|
||||
DUMMY_TM_COMMANDS_LIB_FILES:$LIB_LOCATION/tm_commands/dummy
|
||||
LVM_TM_COMMANDS_LIB_FILES:$LIB_LOCATION/tm_commands/lvm
|
||||
IMAGE_DRIVER_FS_SCRIPTS:$VAR_LOCATION/remotes/image/fs
|
||||
TM_FILES:$VAR_LOCATION/remotes/tm
|
||||
TM_SHARED_FILES:$VAR_LOCATION/remotes/tm/shared
|
||||
TM_QCOW2_FILES:$VAR_LOCATION/remotes/tm/qcow2
|
||||
TM_SSH_FILES:$VAR_LOCATION/remotes/tm/ssh
|
||||
TM_VMWARE_FILES:$VAR_LOCATION/remotes/tm/vmware
|
||||
TM_ISCSI_FILES:$VAR_LOCATION/remotes/tm/iscsi
|
||||
TM_DUMMY_FILES:$VAR_LOCATION/remotes/tm/dummy
|
||||
TM_LVM_FILES:$VAR_LOCATION/remotes/tm/lvm
|
||||
DATASTORE_DRIVER_COMMON_SCRIPTS:$VAR_LOCATION/remotes/datastore/
|
||||
DATASTORE_DRIVER_DUMMY_SCRIPTS:$VAR_LOCATION/remotes/datastore/dummy
|
||||
DATASTORE_DRIVER_FS_SCRIPTS:$VAR_LOCATION/remotes/datastore/fs
|
||||
DATASTORE_DRIVER_VMWARE_SCRIPTS:$VAR_LOCATION/remotes/datastore/vmware
|
||||
DATASTORE_DRIVER_ISCSI_SCRIPTS:$VAR_LOCATION/remotes/datastore/iscsi
|
||||
NETWORK_FILES:$VAR_LOCATION/remotes/vnm
|
||||
NETWORK_8021Q_FILES:$VAR_LOCATION/remotes/vnm/802.1Q
|
||||
NETWORK_DUMMY_FILES:$VAR_LOCATION/remotes/vnm/dummy
|
||||
@ -526,13 +536,8 @@ INSTALL_ETC_FILES=(
|
||||
VMWARE_ETC_FILES:$ETC_LOCATION
|
||||
VMM_EC2_ETC_FILES:$ETC_LOCATION/vmm_ec2
|
||||
VMM_EXEC_ETC_FILES:$ETC_LOCATION/vmm_exec
|
||||
IMAGE_DRIVER_FS_ETC_FILES:$ETC_LOCATION/image/
|
||||
IM_EC2_ETC_FILES:$ETC_LOCATION/im_ec2
|
||||
TM_SHARED_ETC_FILES:$ETC_LOCATION/tm_shared
|
||||
TM_SSH_ETC_FILES:$ETC_LOCATION/tm_ssh
|
||||
TM_DUMMY_ETC_FILES:$ETC_LOCATION/tm_dummy
|
||||
TM_LVM_ETC_FILES:$ETC_LOCATION/tm_lvm
|
||||
TM_VMWARE_ETC_FILES:$ETC_LOCATION/tm_vmware
|
||||
TM_LVM_ETC_FILES:$ETC_LOCATION/tm/
|
||||
HM_ETC_FILES:$ETC_LOCATION/hm
|
||||
AUTH_ETC_FILES:$ETC_LOCATION/auth
|
||||
ECO_ETC_FILES:$ETC_LOCATION
|
||||
@ -558,6 +563,8 @@ BIN_FILES="src/nebula/oned \
|
||||
src/cli/onegroup \
|
||||
src/cli/onetemplate \
|
||||
src/cli/oneacl \
|
||||
src/cli/onedatastore \
|
||||
src/cli/onecluster \
|
||||
src/onedb/onedb \
|
||||
src/authm_mad/remotes/quota/onequota \
|
||||
src/mad/utils/tty_expect \
|
||||
@ -585,7 +592,6 @@ RUBY_LIB_FILES="src/mad/ruby/ActionManager.rb \
|
||||
src/vnm_mad/one_vnm.rb \
|
||||
src/mad/ruby/Ganglia.rb \
|
||||
src/oca/ruby/OpenNebula.rb \
|
||||
src/tm_mad/TMScript.rb \
|
||||
src/authm_mad/remotes/ssh/ssh_auth.rb \
|
||||
src/authm_mad/remotes/quota/quota.rb \
|
||||
src/authm_mad/remotes/server_x509/server_x509_auth.rb \
|
||||
@ -606,7 +612,6 @@ MAD_RUBY_LIB_FILES="src/mad/ruby/scripts_common.rb"
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
MADS_LIB_FILES="src/mad/sh/madcommon.sh \
|
||||
src/tm_mad/tm_common.sh \
|
||||
src/vmm_mad/exec/one_vmm_exec.rb \
|
||||
src/vmm_mad/exec/one_vmm_exec \
|
||||
src/vmm_mad/exec/one_vmm_sh \
|
||||
@ -629,8 +634,8 @@ MADS_LIB_FILES="src/mad/sh/madcommon.sh \
|
||||
src/hm_mad/one_hm \
|
||||
src/authm_mad/one_auth_mad.rb \
|
||||
src/authm_mad/one_auth_mad \
|
||||
src/image_mad/one_image.rb \
|
||||
src/image_mad/one_image"
|
||||
src/datastore_mad/one_datastore.rb \
|
||||
src/datastore_mad/one_datastore"
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# VMM SH Driver KVM scripts, to be installed under $REMOTES_LOCATION/vmm/kvm
|
||||
@ -754,60 +759,97 @@ NETWORK_VMWARE_FILES="src/vnm_mad/remotes/vmware/clean \
|
||||
src/vnm_mad/remotes/vmware/pre \
|
||||
src/vnm_mad/remotes/vmware/VMware.rb"
|
||||
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Transfer Manager commands, to be installed under $LIB_LOCATION/tm_commands
|
||||
# - SHARED TM, $LIB_LOCATION/tm_commands/shared
|
||||
# - SSH TM, $LIB_LOCATION/tm_commands/ssh
|
||||
# - dummy TM, $LIB_LOCATION/tm_commands/dummy
|
||||
# - LVM TM, $LIB_LOCATION/tm_commands/lvm
|
||||
# - SHARED TM, $VAR_LOCATION/tm/shared
|
||||
# - QCOW2 TM, $VAR_LOCATION/tm/qcow2
|
||||
# - SSH TM, $VAR_LOCATION/tm/ssh
|
||||
# - dummy TM, $VAR_LOCATION/tm/dummy
|
||||
# - LVM TM, $VAR_LOCATION/tm/lvm
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
SHARED_TM_COMMANDS_LIB_FILES="src/tm_mad/shared/tm_clone.sh \
|
||||
src/tm_mad/shared/tm_delete.sh \
|
||||
src/tm_mad/shared/tm_ln.sh \
|
||||
src/tm_mad/shared/tm_mkswap.sh \
|
||||
src/tm_mad/shared/tm_mkimage.sh \
|
||||
src/tm_mad/shared/tm_mv.sh \
|
||||
src/tm_mad/shared/tm_context.sh"
|
||||
TM_FILES="src/tm_mad/tm_common.sh"
|
||||
|
||||
SSH_TM_COMMANDS_LIB_FILES="src/tm_mad/ssh/tm_clone.sh \
|
||||
src/tm_mad/ssh/tm_delete.sh \
|
||||
src/tm_mad/ssh/tm_ln.sh \
|
||||
src/tm_mad/ssh/tm_mkswap.sh \
|
||||
src/tm_mad/ssh/tm_mkimage.sh \
|
||||
src/tm_mad/ssh/tm_mv.sh \
|
||||
src/tm_mad/ssh/tm_context.sh"
|
||||
TM_SHARED_FILES="src/tm_mad/shared/clone \
|
||||
src/tm_mad/shared/delete \
|
||||
src/tm_mad/shared/ln \
|
||||
src/tm_mad/shared/mkswap \
|
||||
src/tm_mad/shared/mkimage \
|
||||
src/tm_mad/shared/mv \
|
||||
src/tm_mad/shared/context \
|
||||
src/tm_mad/shared/mvds"
|
||||
|
||||
DUMMY_TM_COMMANDS_LIB_FILES="src/tm_mad/dummy/tm_dummy.sh"
|
||||
TM_QCOW2_FILES="src/tm_mad/qcow2/clone \
|
||||
src/tm_mad/qcow2/delete \
|
||||
src/tm_mad/qcow2/ln \
|
||||
src/tm_mad/qcow2/mkswap \
|
||||
src/tm_mad/qcow2/mkimage \
|
||||
src/tm_mad/qcow2/mv \
|
||||
src/tm_mad/qcow2/context \
|
||||
src/tm_mad/qcow2/mvds"
|
||||
|
||||
LVM_TM_COMMANDS_LIB_FILES="src/tm_mad/lvm/tm_clone.sh \
|
||||
src/tm_mad/lvm/tm_delete.sh \
|
||||
src/tm_mad/lvm/tm_ln.sh \
|
||||
src/tm_mad/lvm/tm_mkswap.sh \
|
||||
src/tm_mad/lvm/tm_mkimage.sh \
|
||||
src/tm_mad/lvm/tm_mv.sh \
|
||||
src/tm_mad/lvm/tm_context.sh"
|
||||
TM_SSH_FILES="src/tm_mad/ssh/clone \
|
||||
src/tm_mad/ssh/delete \
|
||||
src/tm_mad/ssh/ln \
|
||||
src/tm_mad/ssh/mkswap \
|
||||
src/tm_mad/ssh/mkimage \
|
||||
src/tm_mad/ssh/mv \
|
||||
src/tm_mad/ssh/context \
|
||||
src/tm_mad/ssh/mvds"
|
||||
|
||||
VMWARE_TM_COMMANDS_LIB_FILES="src/tm_mad/vmware/tm_clone.sh \
|
||||
src/tm_mad/vmware/tm_ln.sh \
|
||||
src/tm_mad/vmware/tm_mv.sh \
|
||||
src/tm_mad/vmware/functions.sh \
|
||||
src/tm_mad/vmware/tm_context.sh"
|
||||
TM_DUMMY_FILES="src/tm_mad/dummy/clone \
|
||||
src/tm_mad/dummy/delete \
|
||||
src/tm_mad/dummy/ln \
|
||||
src/tm_mad/dummy/mkswap \
|
||||
src/tm_mad/dummy/mkimage \
|
||||
src/tm_mad/dummy/mv \
|
||||
src/tm_mad/dummy/context \
|
||||
src/tm_mad/dummy/mvds"
|
||||
|
||||
TM_LVM_FILES="src/tm_mad/lvm/clone \
|
||||
src/tm_mad/lvm/delete \
|
||||
src/tm_mad/lvm/ln \
|
||||
src/tm_mad/lvm/mkswap \
|
||||
src/tm_mad/lvm/mkimage \
|
||||
src/tm_mad/lvm/mv \
|
||||
src/tm_mad/lvm/context"
|
||||
|
||||
TM_VMWARE_FILES="src/tm_mad/vmware/clone \
|
||||
src/tm_mad/vmware/ln \
|
||||
src/tm_mad/vmware/mv \
|
||||
src/tm_mad/vmware/functions.sh \
|
||||
src/tm_mad/vmware/context"
|
||||
|
||||
TM_ISCSI_FILES="src/tm_mad/iscsi/clone \
|
||||
src/tm_mad/iscsi/ln \
|
||||
src/tm_mad/iscsi/mv \
|
||||
src/tm_mad/iscsi/mvds \
|
||||
src/tm_mad/iscsi/delete"
|
||||
#-------------------------------------------------------------------------------
|
||||
# Image Repository drivers, to be installed under $REMOTES_LOCATION/image
|
||||
# - FS based Image Repository, $REMOTES_LOCATION/image/fs
|
||||
# Datastore drivers, to be installed under $REMOTES_LOCATION/datastore
|
||||
# - FS based Image Repository, $REMOTES_LOCATION/datastore/fs
|
||||
# - VMware based Image Repository, $REMOTES_LOCATION/datastore/vmware
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
IMAGE_DRIVER_FS_ETC_FILES="src/image_mad/remotes/fs/fs.conf"
|
||||
DATASTORE_DRIVER_COMMON_SCRIPTS="src/datastore_mad/remotes/xpath.rb \
|
||||
src/datastore_mad/remotes/libfs.sh"
|
||||
|
||||
IMAGE_DRIVER_FS_SCRIPTS="src/image_mad/remotes/fs/cp \
|
||||
src/image_mad/remotes/fs/mkfs \
|
||||
src/image_mad/remotes/fs/mv \
|
||||
src/image_mad/remotes/fs/fsrc \
|
||||
src/image_mad/remotes/fs/rm"
|
||||
DATASTORE_DRIVER_DUMMY_SCRIPTS="src/datastore_mad/remotes/dummy/cp \
|
||||
src/datastore_mad/remotes/dummy/mkfs \
|
||||
src/datastore_mad/remotes/dummy/rm"
|
||||
|
||||
DATASTORE_DRIVER_FS_SCRIPTS="src/datastore_mad/remotes/fs/cp \
|
||||
src/datastore_mad/remotes/fs/mkfs \
|
||||
src/datastore_mad/remotes/fs/rm"
|
||||
|
||||
DATASTORE_DRIVER_VMWARE_SCRIPTS="src/datastore_mad/remotes/vmware/cp \
|
||||
src/datastore_mad/remotes/vmware/mkfs \
|
||||
src/datastore_mad/remotes/vmware/rm"
|
||||
|
||||
DATASTORE_DRIVER_ISCSI_SCRIPTS="src/datastore_mad/remotes/iscsi/cp \
|
||||
src/datastore_mad/remotes/iscsi/mkfs \
|
||||
src/datastore_mad/remotes/iscsi/rm \
|
||||
src/datastore_mad/remotes/iscsi/iscsi.conf"
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Migration scripts for onedb command, to be installed under $LIB_LOCATION
|
||||
@ -821,6 +863,7 @@ ONEDB_MIGRATOR_FILES="src/onedb/2.0_to_2.9.80.rb \
|
||||
src/onedb/3.1.80_to_3.2.0.rb \
|
||||
src/onedb/3.2.0_to_3.2.1.rb \
|
||||
src/onedb/3.2.1_to_3.3.0.rb \
|
||||
src/onedb/3.3.0_to_3.3.80.rb \
|
||||
src/onedb/onedb.rb \
|
||||
src/onedb/onedb_backend.rb"
|
||||
|
||||
@ -865,19 +908,7 @@ IM_EC2_ETC_FILES="src/im_mad/ec2/im_ec2rc \
|
||||
# - lvm, $ETC_LOCATION/tm_lvm
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
TM_SHARED_ETC_FILES="src/tm_mad/shared/tm_shared.conf \
|
||||
src/tm_mad/shared/tm_sharedrc"
|
||||
|
||||
TM_SSH_ETC_FILES="src/tm_mad/ssh/tm_ssh.conf \
|
||||
src/tm_mad/ssh/tm_sshrc"
|
||||
|
||||
TM_DUMMY_ETC_FILES="src/tm_mad/dummy/tm_dummy.conf \
|
||||
src/tm_mad/dummy/tm_dummyrc"
|
||||
|
||||
TM_LVM_ETC_FILES="src/tm_mad/lvm/tm_lvm.conf \
|
||||
src/tm_mad/lvm/tm_lvmrc"
|
||||
|
||||
TM_VMWARE_ETC_FILES="src/tm_mad/vmware/tm_vmware.conf"
|
||||
TM_LVM_ETC_FILES="src/tm_mad/lvm/lvm.conf"
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Hook Manager driver config. files, to be installed under $ETC_LOCATION/hm
|
||||
@ -948,6 +979,10 @@ RUBY_OPENNEBULA_LIB_FILES="src/oca/ruby/OpenNebula/Host.rb \
|
||||
src/oca/ruby/OpenNebula/GroupPool.rb \
|
||||
src/oca/ruby/OpenNebula/Acl.rb \
|
||||
src/oca/ruby/OpenNebula/AclPool.rb \
|
||||
src/oca/ruby/OpenNebula/Datastore.rb \
|
||||
src/oca/ruby/OpenNebula/DatastorePool.rb \
|
||||
src/oca/ruby/OpenNebula/Cluster.rb \
|
||||
src/oca/ruby/OpenNebula/ClusterPool.rb \
|
||||
src/oca/ruby/OpenNebula/XMLUtils.rb"
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
@ -963,7 +998,8 @@ COMMON_CLOUD_CLIENT_LIB_FILES="src/cloud/common/CloudClient.rb"
|
||||
CLOUD_AUTH_LIB_FILES="src/cloud/common/CloudAuth/OCCICloudAuth.rb \
|
||||
src/cloud/common/CloudAuth/SunstoneCloudAuth.rb \
|
||||
src/cloud/common/CloudAuth/EC2CloudAuth.rb \
|
||||
src/cloud/common/CloudAuth/X509CloudAuth.rb"
|
||||
src/cloud/common/CloudAuth/X509CloudAuth.rb \
|
||||
src/cloud/common/CloudAuth/OpenNebulaCloudAuth.rb"
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# EC2 Query for OpenNebula
|
||||
@ -1053,7 +1089,9 @@ ONE_CLI_LIB_FILES="src/cli/one_helper/onegroup_helper.rb \
|
||||
src/cli/one_helper/oneuser_helper.rb \
|
||||
src/cli/one_helper/onevm_helper.rb \
|
||||
src/cli/one_helper/onevnet_helper.rb \
|
||||
src/cli/one_helper/oneacl_helper.rb"
|
||||
src/cli/one_helper/oneacl_helper.rb \
|
||||
src/cli/one_helper/onedatastore_helper.rb \
|
||||
src/cli/one_helper/onecluster_helper.rb"
|
||||
|
||||
CLI_BIN_FILES="src/cli/onevm \
|
||||
src/cli/onehost \
|
||||
@ -1062,7 +1100,9 @@ CLI_BIN_FILES="src/cli/onevm \
|
||||
src/cli/oneimage \
|
||||
src/cli/onetemplate \
|
||||
src/cli/onegroup \
|
||||
src/cli/oneacl"
|
||||
src/cli/oneacl \
|
||||
src/cli/onedatastore \
|
||||
src/cli/onecluster"
|
||||
|
||||
CLI_CONF_FILES="src/cli/etc/onegroup.yaml \
|
||||
src/cli/etc/onehost.yaml \
|
||||
@ -1071,7 +1111,9 @@ CLI_CONF_FILES="src/cli/etc/onegroup.yaml \
|
||||
src/cli/etc/oneuser.yaml \
|
||||
src/cli/etc/onevm.yaml \
|
||||
src/cli/etc/onevnet.yaml \
|
||||
src/cli/etc/oneacl.yaml"
|
||||
src/cli/etc/oneacl.yaml \
|
||||
src/cli/etc/onedatastore.yaml \
|
||||
src/cli/etc/onecluster.yaml"
|
||||
|
||||
ETC_CLIENT_FILES="src/cli/etc/group.default"
|
||||
|
||||
@ -1100,6 +1142,8 @@ SUNSTONE_MODELS_JSON_FILES="src/sunstone/models/OpenNebulaJSON/HostJSON.rb \
|
||||
src/sunstone/models/OpenNebulaJSON/VirtualMachineJSON.rb \
|
||||
src/sunstone/models/OpenNebulaJSON/TemplateJSON.rb \
|
||||
src/sunstone/models/OpenNebulaJSON/AclJSON.rb \
|
||||
src/sunstone/models/OpenNebulaJSON/ClusterJSON.rb \
|
||||
src/sunstone/models/OpenNebulaJSON/DatastoreJSON.rb \
|
||||
src/sunstone/models/OpenNebulaJSON/VirtualNetworkJSON.rb"
|
||||
|
||||
SUNSTONE_TEMPLATE_FILES="src/sunstone/templates/login.html \
|
||||
@ -1118,6 +1162,11 @@ SUNSTONE_PUBLIC_JS_PLUGINS_FILES="\
|
||||
src/sunstone/public/js/plugins/dashboard-tab.js \
|
||||
src/sunstone/public/js/plugins/dashboard-users-tab.js \
|
||||
src/sunstone/public/js/plugins/hosts-tab.js \
|
||||
src/sunstone/public/js/plugins/clusters-tab.js \
|
||||
src/sunstone/public/js/plugins/datastores-tab.js \
|
||||
src/sunstone/public/js/plugins/system-tab.js \
|
||||
src/sunstone/public/js/plugins/vresources-tab.js \
|
||||
src/sunstone/public/js/plugins/infra-tab.js \
|
||||
src/sunstone/public/js/plugins/groups-tab.js \
|
||||
src/sunstone/public/js/plugins/images-tab.js \
|
||||
src/sunstone/public/js/plugins/templates-tab.js \
|
||||
@ -1404,6 +1453,8 @@ MAN_FILES="share/man/oneauth.1.gz \
|
||||
share/man/onetemplate.1.gz \
|
||||
share/man/onegroup.1.gz \
|
||||
share/man/onedb.1.gz \
|
||||
share/man/onedatastore.1.gz \
|
||||
share/man/onecluster.1.gz \
|
||||
share/man/econe-describe-images.1.gz \
|
||||
share/man/econe-describe-instances.1.gz \
|
||||
share/man/econe-register.1.gz \
|
||||
@ -1430,7 +1481,7 @@ if [ "$UNINSTALL" = "no" ] ; then
|
||||
done
|
||||
|
||||
# Remove old migrators
|
||||
rm $LIB_LOCATION/ruby/onedb/*.rb
|
||||
rm $LIB_LOCATION/ruby/onedb/*.rb &> /dev/null
|
||||
fi
|
||||
|
||||
# --- Install/Uninstall files ---
|
||||
@ -1504,12 +1555,6 @@ if [ "$UNINSTALL" = "no" ] ; then
|
||||
for d in $CHOWN_DIRS; do
|
||||
chown -R $ONEADMIN_USER:$ONEADMIN_GROUP $DESTDIR$d
|
||||
done
|
||||
|
||||
# --- Set correct permissions for Image Repository ---
|
||||
|
||||
if [ -d "$DESTDIR$IMAGES_LOCATION" ]; then
|
||||
chmod 3770 $DESTDIR$IMAGES_LOCATION
|
||||
fi
|
||||
else
|
||||
for d in `echo $DELETE_DIRS | awk '{for (i=NF;i>=1;i--) printf $i" "}'`; do
|
||||
rmdir $d
|
||||
|
@ -16,11 +16,6 @@
|
||||
# (use 0 to disable VM monitoring).
|
||||
# VM_PER_INTERVAL: Number of VMs monitored in each interval.
|
||||
#
|
||||
# VM_DIR: Remote path to store the VM images, it should be shared between all
|
||||
# the cluster nodes to perform live migrations. This variable is the default
|
||||
# for all the hosts in the cluster. VM_DIR IS ONLY FOR THE NODES AND *NOT* THE
|
||||
# FRONT-END
|
||||
#
|
||||
# SCRIPTS_REMOTE_DIR: Remote path to store the monitoring and VM management
|
||||
# scripts.
|
||||
#
|
||||
@ -49,7 +44,6 @@ HOST_MONITORING_INTERVAL = 600
|
||||
VM_POLLING_INTERVAL = 600
|
||||
#VM_PER_INTERVAL = 5
|
||||
|
||||
#VM_DIR=/srv/cloud/one/var
|
||||
|
||||
SCRIPTS_REMOTE_DIR=/var/tmp/one
|
||||
|
||||
@ -84,8 +78,13 @@ NETWORK_SIZE = 254
|
||||
MAC_PREFIX = "02:00"
|
||||
|
||||
#*******************************************************************************
|
||||
# Image Repository Configuration
|
||||
# DataStore Configuration
|
||||
#*******************************************************************************
|
||||
# DATASTORE_LOCATION: Path for Datastores in the hosts. It IS the same for all
|
||||
# the hosts in the cluster. DATASTORE_LOCATION IS ONLY FOR THE HOSTS AND *NOT*
|
||||
# THE FRONT-END. It defaults to /var/lib/one/datastores (or
|
||||
# $ONE_LOCATION/var/datastores in self-contained mode)
|
||||
#
|
||||
# DEFAULT_IMAGE_TYPE: This can take values
|
||||
# OS Image file holding an operating system
|
||||
# CDROM Image file holding a CDROM
|
||||
@ -97,6 +96,9 @@ MAC_PREFIX = "02:00"
|
||||
# xvd XEN Virtual Disk
|
||||
# vd KVM virtual disk
|
||||
#*******************************************************************************
|
||||
|
||||
#DATASTORE_LOCATION = /var/lib/one/datastores
|
||||
|
||||
DEFAULT_IMAGE_TYPE = "OS"
|
||||
DEFAULT_DEVICE_PREFIX = "hd"
|
||||
|
||||
@ -199,7 +201,7 @@ IM_MAD = [
|
||||
# KVM Virtualization Driver Manager Configuration
|
||||
# -r number of retries when monitoring a host
|
||||
# -t number of threads, i.e. number of hosts monitored at the same time
|
||||
# -l <actions[=command_name]> actions executed locally, command can be
|
||||
# -l <actions[=command_name]> actions executed locally, command can be
|
||||
# overridden for each action.
|
||||
# Valid actions: deploy, shutdown, cancel, save, restore, migrate, poll
|
||||
# An example: "-l migrate,poll=poll_ganglia,save"
|
||||
@ -216,7 +218,7 @@ VM_MAD = [
|
||||
# XEN Virtualization Driver Manager Configuration
|
||||
# -r number of retries when monitoring a host
|
||||
# -t number of threads, i.e. number of hosts monitored at the same time
|
||||
# -l <actions[=command_name]> actions executed locally, command can be
|
||||
# -l <actions[=command_name]> actions executed locally, command can be
|
||||
# overridden for each action.
|
||||
# Valid actions: deploy, shutdown, cancel, save, restore, migrate, poll
|
||||
# An example: "-l migrate,poll=poll_ganglia,save"
|
||||
@ -271,75 +273,33 @@ VM_MAD = [
|
||||
# executable: path of the transfer driver executable, can be an
|
||||
# absolute path or relative to $ONE_LOCATION/lib/mads (or
|
||||
# /usr/lib/one/mads/ if OpenNebula was installed in /)
|
||||
#
|
||||
# arguments : for the driver executable, usually a commands configuration file
|
||||
# , can be an absolute path or relative to $ONE_LOCATION/etc (or
|
||||
# /etc/one/ if OpenNebula was installed in /)
|
||||
# arguments :
|
||||
# -t: number of threads, i.e. number of transfers made at the same time
|
||||
# -d: list of transfer drivers separated by commas, if not defined all the
|
||||
# drivers available will be enabled
|
||||
#*******************************************************************************
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# SHARED Transfer Manager Driver Configuration
|
||||
#-------------------------------------------------------------------------------
|
||||
TM_MAD = [
|
||||
name = "tm_shared",
|
||||
executable = "one_tm",
|
||||
arguments = "tm_shared/tm_shared.conf" ]
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# SSH Transfer Manager Driver Configuration
|
||||
#-------------------------------------------------------------------------------
|
||||
#TM_MAD = [
|
||||
# name = "tm_ssh",
|
||||
# executable = "one_tm",
|
||||
# arguments = "tm_ssh/tm_ssh.conf" ]
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Dummy Transfer Manager Driver Configuration
|
||||
#-------------------------------------------------------------------------------
|
||||
#TM_MAD = [
|
||||
# name = "tm_dummy",
|
||||
# executable = "one_tm",
|
||||
# arguments = "tm_dummy/tm_dummy.conf" ]
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# LVM Transfer Manager Driver Configuration
|
||||
#-------------------------------------------------------------------------------
|
||||
#TM_MAD = [
|
||||
# name = "tm_lvm",
|
||||
# executable = "one_tm",
|
||||
# arguments = "tm_lvm/tm_lvm.conf" ]
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# VMware DataStore Transfer Manager Driver Configuration
|
||||
#-------------------------------------------------------------------------------
|
||||
#TM_MAD = [
|
||||
# name = "tm_vmware",
|
||||
# executable = "one_tm",
|
||||
# arguments = "tm_vmware/tm_vmware.conf" ]
|
||||
#-------------------------------------------------------------------------------
|
||||
arguments = "-t 15 -d dummy,lvm,shared,qcow2,ssh,vmware,iscsi" ]
|
||||
|
||||
#*******************************************************************************
|
||||
# Image Manager Driver Configuration
|
||||
# Datastore Driver Configuration
|
||||
#*******************************************************************************
|
||||
# Drivers to manage the image repository, specialized for the storage backend
|
||||
# Drivers to manage the datastores, specialized for the storage backend
|
||||
# executable: path of the transfer driver executable, can be an
|
||||
# absolute path or relative to $ONE_LOCATION/lib/mads (or
|
||||
# /usr/lib/one/mads/ if OpenNebula was installed in /)
|
||||
#
|
||||
# arguments : for the driver executable
|
||||
# -t number of threads, i.e. number of repo operations at the same time
|
||||
# -d datastore mads separated by commas
|
||||
#*******************************************************************************
|
||||
#-------------------------------------------------------------------------------
|
||||
# FS based Image Manager Driver Configuration
|
||||
# -t number of threads, i.e. number of repo operations at the same time
|
||||
#-------------------------------------------------------------------------------
|
||||
IMAGE_MAD = [
|
||||
executable = "one_image",
|
||||
arguments = "fs -t 15" ]
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
DATASTORE_MAD = [
|
||||
executable = "one_datastore",
|
||||
arguments = "-t 15 -d fs,vmware,iscsi"
|
||||
]
|
||||
|
||||
#*******************************************************************************
|
||||
# Hook Manager Configuration
|
||||
@ -376,7 +336,6 @@ IMAGE_MAD = [
|
||||
# allocated
|
||||
# - NO, The hook is executed in the OpenNebula server (default)
|
||||
#
|
||||
#
|
||||
# Host Hooks (HOST_HOOK) defined by:
|
||||
# name : for the hook, useful to track the hook (OPTIONAL)
|
||||
# on : when the hook should be executed,
|
||||
@ -395,10 +354,8 @@ IMAGE_MAD = [
|
||||
# - YES, The hook is executed in the host
|
||||
# - NO, The hook is executed in the OpenNebula server (default)
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
HM_MAD = [
|
||||
executable = "one_hm" ]
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
#*******************************************************************************
|
||||
@ -455,7 +412,7 @@ HM_MAD = [
|
||||
# --authz: authorization module
|
||||
#
|
||||
# SESSION_EXPIRATION_TIME: Time in seconds to keep an authenticated token as
|
||||
# valid. During this time, the driver is not used. Use 0 to disable session
|
||||
# valid. During this time, the driver is not used. Use 0 to disable session
|
||||
# caching
|
||||
#
|
||||
# ENABLE_OTHER_PERMISSIONS: Whether or not to enable the permissions for
|
||||
|
@ -151,7 +151,7 @@ module WatchHelper
|
||||
String :name
|
||||
String :im_mad
|
||||
String :vm_mad
|
||||
String :tm_mad
|
||||
String :vn_mad
|
||||
end
|
||||
|
||||
DB.create_table? :vm_timestamps do
|
||||
@ -422,7 +422,7 @@ module WatchHelper
|
||||
h.name = host['NAME']
|
||||
h.im_mad = host['IM_MAD']
|
||||
h.vm_mad = host['VM_MAD']
|
||||
h.tm_mad = host['TM_MAD']
|
||||
h.vn_mad = host['VN_MAD']
|
||||
}
|
||||
end
|
||||
|
||||
|
@ -166,7 +166,7 @@ bool AclRule::malformed(string& error_str) const
|
||||
oss << "when using the ALL bit, [resource] ID must be 0";
|
||||
}
|
||||
|
||||
if ( (resource & 0xFF000000000LL) == 0 )
|
||||
if ( (resource & 0xFFF000000000LL) == 0 )
|
||||
{
|
||||
if ( error )
|
||||
{
|
||||
@ -177,7 +177,7 @@ bool AclRule::malformed(string& error_str) const
|
||||
oss << "[resource] type is missing";
|
||||
}
|
||||
|
||||
if ( (resource & 0xFFFFF00000000000LL) != 0 )
|
||||
if ( (resource & 0xFFFF000000000000LL) != 0 )
|
||||
{
|
||||
if ( error )
|
||||
{
|
||||
@ -253,12 +253,14 @@ void AclRule::build_str()
|
||||
PoolObjectSQL::IMAGE,
|
||||
PoolObjectSQL::USER,
|
||||
PoolObjectSQL::TEMPLATE,
|
||||
PoolObjectSQL::GROUP
|
||||
PoolObjectSQL::GROUP,
|
||||
PoolObjectSQL::DATASTORE,
|
||||
PoolObjectSQL::CLUSTER
|
||||
};
|
||||
|
||||
bool prefix = false;
|
||||
|
||||
for ( int i = 0; i < 7; i++ )
|
||||
for ( int i = 0; i < 9; i++ )
|
||||
{
|
||||
if ( (resource & objects[i]) != 0 )
|
||||
{
|
||||
|
@ -429,7 +429,7 @@ void AuthManager::notify_request(int auth_id,bool result,const string& message)
|
||||
void AuthManager::load_mads(int uid)
|
||||
{
|
||||
ostringstream oss;
|
||||
const VectorAttribute * vattr;
|
||||
const VectorAttribute * vattr = 0;
|
||||
int rc;
|
||||
string name;
|
||||
AuthManagerDriver * authm_driver = 0;
|
||||
@ -438,7 +438,10 @@ void AuthManager::load_mads(int uid)
|
||||
|
||||
NebulaLog::log("AuM",Log::INFO,oss);
|
||||
|
||||
vattr = static_cast<const VectorAttribute *>(mad_conf[0]);
|
||||
if ( mad_conf.size() > 0 )
|
||||
{
|
||||
vattr = static_cast<const VectorAttribute *>(mad_conf[0]);
|
||||
}
|
||||
|
||||
if ( vattr == 0 )
|
||||
{
|
||||
|
@ -22,6 +22,7 @@ env.Prepend(LIBS=[
|
||||
'nebula_im',
|
||||
'nebula_hm',
|
||||
'nebula_rm',
|
||||
'nebula_datastore',
|
||||
'nebula_dm',
|
||||
'nebula_tm',
|
||||
'nebula_um',
|
||||
@ -32,6 +33,7 @@ env.Prepend(LIBS=[
|
||||
'nebula_template',
|
||||
'nebula_image',
|
||||
'nebula_pool',
|
||||
'nebula_cluster',
|
||||
'nebula_host',
|
||||
'nebula_vnm',
|
||||
'nebula_vm',
|
||||
|
@ -9,9 +9,9 @@
|
||||
:size: 8
|
||||
:right: true
|
||||
|
||||
:RES_VHNIUTG:
|
||||
:RES_VHNIUTGDC:
|
||||
:desc: Which resource the rule applies to
|
||||
:size: 11
|
||||
:size: 13
|
||||
|
||||
:RID:
|
||||
:desc: Resource ID
|
||||
@ -26,6 +26,6 @@
|
||||
:default:
|
||||
- :ID
|
||||
- :USER
|
||||
- :RES_VHNIUTG
|
||||
- :RES_VHNIUTGDC
|
||||
- :RID
|
||||
- :OPE_UMAC
|
||||
|
31
src/cli/etc/onecluster.yaml
Normal file
31
src/cli/etc/onecluster.yaml
Normal file
@ -0,0 +1,31 @@
|
||||
---
|
||||
:ID:
|
||||
:desc: ONE identifier for the Cluster
|
||||
:size: 4
|
||||
|
||||
:NAME:
|
||||
:desc: Name of the Cluster
|
||||
:size: 15
|
||||
:left: true
|
||||
|
||||
:HOSTS:
|
||||
:desc: Number of Hosts
|
||||
:size: 5
|
||||
:left: true
|
||||
|
||||
:NETS:
|
||||
:desc: Number of Networks
|
||||
:size: 5
|
||||
:left: true
|
||||
|
||||
:DATASTORES:
|
||||
:desc: Number of Datastores
|
||||
:size: 10
|
||||
:left: true
|
||||
|
||||
:default:
|
||||
- :ID
|
||||
- :NAME
|
||||
- :HOSTS
|
||||
- :NETS
|
||||
- :DATASTORES
|
38
src/cli/etc/onedatastore.yaml
Normal file
38
src/cli/etc/onedatastore.yaml
Normal file
@ -0,0 +1,38 @@
|
||||
---
|
||||
:ID:
|
||||
:desc: ONE identifier for the Datastore
|
||||
:size: 4
|
||||
|
||||
:NAME:
|
||||
:desc: Name of the Datastore
|
||||
:size: 15
|
||||
:left: true
|
||||
|
||||
:CLUSTER:
|
||||
:desc: Name of the Cluster
|
||||
:size: 8
|
||||
:left: true
|
||||
|
||||
:IMAGES:
|
||||
:desc: Number of Images
|
||||
:size: 6
|
||||
:left: true
|
||||
|
||||
:TYPE:
|
||||
:desc: Datastore driver
|
||||
:size: 6
|
||||
:left: true
|
||||
|
||||
:TM:
|
||||
:desc: Transfer driver
|
||||
:size: 6
|
||||
:left: true
|
||||
|
||||
:default:
|
||||
- :ID
|
||||
- :NAME
|
||||
- :CLUSTER
|
||||
- :IMAGES
|
||||
- :TYPE
|
||||
- :TM
|
||||
|
@ -5,7 +5,12 @@
|
||||
|
||||
:NAME:
|
||||
:desc: Name of the Host
|
||||
:size: 15
|
||||
:size: 12
|
||||
:left: true
|
||||
|
||||
:CLUSTER:
|
||||
:desc: Name of the Cluster
|
||||
:size: 8
|
||||
:left: true
|
||||
|
||||
:RVM:
|
||||
@ -38,11 +43,12 @@
|
||||
|
||||
:STAT:
|
||||
:desc: Host status
|
||||
:size: 6
|
||||
:size: 4
|
||||
|
||||
:default:
|
||||
- :ID
|
||||
- :NAME
|
||||
- :CLUSTER
|
||||
- :RVM
|
||||
- :TCPU
|
||||
- :FCPU
|
||||
|
@ -3,11 +3,6 @@
|
||||
:desc: ONE identifier for the Image
|
||||
:size: 4
|
||||
|
||||
:NAME:
|
||||
:desc: Name of the Image
|
||||
:size: 12
|
||||
:left: true
|
||||
|
||||
:USER:
|
||||
:desc: Username of the Virtual Machine owner
|
||||
:size: 8
|
||||
@ -18,6 +13,16 @@
|
||||
:size: 8
|
||||
:left: true
|
||||
|
||||
:NAME:
|
||||
:desc: Name of the Image
|
||||
:size: 12
|
||||
:left: true
|
||||
|
||||
:DATASTORE:
|
||||
:desc: Name of the Datastore
|
||||
:size: 10
|
||||
:left: true
|
||||
|
||||
:SIZE:
|
||||
:desc: Size of the Image
|
||||
:size: 7
|
||||
@ -47,9 +52,9 @@
|
||||
- :USER
|
||||
- :GROUP
|
||||
- :NAME
|
||||
- :DATASTORE
|
||||
- :SIZE
|
||||
- :TYPE
|
||||
- :REGTIME
|
||||
- :PERSISTENT
|
||||
- :STAT
|
||||
- :RVMS
|
||||
|
@ -3,11 +3,6 @@
|
||||
:desc: ONE identifier for Virtual Network
|
||||
:size: 4
|
||||
|
||||
:NAME:
|
||||
:desc: Name of the Virtual Network
|
||||
:size: 15
|
||||
:left: true
|
||||
|
||||
:USER:
|
||||
:desc: Username of the Virtual Network owner
|
||||
:size: 8
|
||||
@ -18,6 +13,16 @@
|
||||
:size: 8
|
||||
:left: true
|
||||
|
||||
:NAME:
|
||||
:desc: Name of the Virtual Network
|
||||
:size: 15
|
||||
:left: true
|
||||
|
||||
:CLUSTER:
|
||||
:desc: Name of the Cluster
|
||||
:size: 8
|
||||
:left: true
|
||||
|
||||
:TYPE:
|
||||
:desc: Type of Virtual Network
|
||||
:size: 6
|
||||
@ -39,6 +44,7 @@
|
||||
- :USER
|
||||
- :GROUP
|
||||
- :NAME
|
||||
- :CLUSTER
|
||||
- :TYPE
|
||||
- :BRIDGE
|
||||
- :LEASES
|
||||
|
@ -178,7 +178,7 @@ EOT
|
||||
# Formatters for arguments
|
||||
########################################################################
|
||||
def to_id(name)
|
||||
return 0, name if name.match(/^[0123456789]+$/)
|
||||
return 0, name.to_i if name.match(/^[0123456789]+$/)
|
||||
|
||||
rc = get_pool
|
||||
return rc if rc.first != 0
|
||||
@ -202,7 +202,7 @@ EOT
|
||||
|
||||
result = names.split(',').collect { |name|
|
||||
if name.match(/^[0123456789]+$/)
|
||||
name
|
||||
name.to_i
|
||||
else
|
||||
rc = OneHelper.name_to_id(name, pool, poolname)
|
||||
|
||||
@ -288,11 +288,11 @@ EOT
|
||||
|
||||
def pool_to_array(pool)
|
||||
if !pool.instance_of?(Hash)
|
||||
phash = pool.to_hash
|
||||
phash = pool.to_hash
|
||||
else
|
||||
phash = pool
|
||||
end
|
||||
|
||||
|
||||
rname = self.class.rname
|
||||
|
||||
if phash["#{rname}_POOL"] &&
|
||||
@ -329,9 +329,15 @@ EOT
|
||||
client = OpenNebula::Client.new
|
||||
|
||||
pool = case poolname
|
||||
when "HOST" then OpenNebula::HostPool.new(client)
|
||||
when "GROUP" then OpenNebula::GroupPool.new(client)
|
||||
when "USER" then OpenNebula::UserPool.new(client)
|
||||
when "HOST" then OpenNebula::HostPool.new(client)
|
||||
when "GROUP" then OpenNebula::GroupPool.new(client)
|
||||
when "USER" then OpenNebula::UserPool.new(client)
|
||||
when "DATASTORE" then OpenNebula::DatastorePool.new(client)
|
||||
when "CLUSTER" then OpenNebula::ClusterPool.new(client)
|
||||
when "VNET" then OpenNebula::VirtualNetworkPool.new(client)
|
||||
when "IMAGE" then OpenNebula::ImagePool.new(client)
|
||||
when "VMTEMPLATE" then OpenNebula::TemplatePool.new(client)
|
||||
when "VM" then OpenNebula::VirtualMachinePool.new(client)
|
||||
end
|
||||
|
||||
rc = pool.info
|
||||
@ -393,6 +399,18 @@ EOT
|
||||
end
|
||||
end
|
||||
|
||||
# If the cluster name is empty, returns a '-' char.
|
||||
#
|
||||
# @param str [String || Hash] Cluster name, or empty Hash (when <CLUSTER/>)
|
||||
# @return [String] the same Cluster name, or '-' if it is empty
|
||||
def OpenNebulaHelper.cluster_str(str)
|
||||
if str != nil && !str.empty?
|
||||
str
|
||||
else
|
||||
"-"
|
||||
end
|
||||
end
|
||||
|
||||
def OpenNebulaHelper.update_template(id, resource)
|
||||
require 'tempfile'
|
||||
|
||||
@ -402,7 +420,7 @@ EOT
|
||||
rc = resource.info
|
||||
|
||||
if OpenNebula.is_error?(rc)
|
||||
puts rc.message
|
||||
puts rc.message
|
||||
exit -1
|
||||
end
|
||||
|
||||
|
@ -44,7 +44,7 @@ private
|
||||
def self.resource_mask(str)
|
||||
resource_type=str.split("/")[0]
|
||||
|
||||
mask = "-------"
|
||||
mask = "---------"
|
||||
|
||||
resource_type.split("+").each{|type|
|
||||
case type
|
||||
@ -62,6 +62,10 @@ private
|
||||
mask[5] = "T"
|
||||
when "GROUP"
|
||||
mask[6] = "G"
|
||||
when "DATASTORE"
|
||||
mask[7] = "D"
|
||||
when "CLUSTER"
|
||||
mask[8] = "C"
|
||||
end
|
||||
}
|
||||
mask
|
||||
@ -101,8 +105,8 @@ private
|
||||
d['STRING'].split(" ")[0]
|
||||
end
|
||||
|
||||
column :RES_VHNIUTG, "Resource to which the rule applies",
|
||||
:size => 11 do |d|
|
||||
column :RES_VHNIUTGDC, "Resource to which the rule applies",
|
||||
:size => 13 do |d|
|
||||
OneAclHelper::resource_mask d['STRING'].split(" ")[1]
|
||||
end
|
||||
|
||||
@ -115,7 +119,7 @@ private
|
||||
OneAclHelper::right_mask d['STRING'].split(" ")[2]
|
||||
end
|
||||
|
||||
default :ID, :USER, :RES_VHNIUTG, :RID, :OPE_UMAC
|
||||
default :ID, :USER, :RES_VHNIUTGDC, :RID, :OPE_UMAC
|
||||
end
|
||||
|
||||
table
|
||||
|
119
src/cli/one_helper/onecluster_helper.rb
Normal file
119
src/cli/one_helper/onecluster_helper.rb
Normal file
@ -0,0 +1,119 @@
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
# a copy of the License at #
|
||||
# #
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 #
|
||||
# #
|
||||
# Unless required by applicable law or agreed to in writing, software #
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, #
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
require 'one_helper'
|
||||
|
||||
class OneClusterHelper < OpenNebulaHelper::OneHelper
|
||||
|
||||
CLUSTER = {
|
||||
:name => "cluster",
|
||||
:short => "-c id|name",
|
||||
:large => "--cluster id|name" ,
|
||||
:description => "Selects the cluster",
|
||||
:format => String,
|
||||
:proc => lambda { |o, options|
|
||||
ch = OneClusterHelper.new
|
||||
rc, cid = ch.to_id(o)
|
||||
if rc == 0
|
||||
options[:cluster] = cid
|
||||
else
|
||||
puts cid
|
||||
puts "option cluster: Parsing error"
|
||||
exit -1
|
||||
end
|
||||
}
|
||||
}
|
||||
|
||||
def self.rname
|
||||
"CLUSTER"
|
||||
end
|
||||
|
||||
def self.conf_file
|
||||
"onecluster.yaml"
|
||||
end
|
||||
|
||||
def format_pool(options)
|
||||
config_file = self.class.table_conf
|
||||
|
||||
table = CLIHelper::ShowTable.new(config_file, self) do
|
||||
column :ID, "ONE identifier for the Cluster", :size=>4 do |d|
|
||||
d["ID"]
|
||||
end
|
||||
|
||||
column :NAME, "Name of the Cluster", :left, :size=>15 do |d|
|
||||
d["NAME"]
|
||||
end
|
||||
|
||||
column :HOSTS, "Number of Hosts", :left, :size=>5 do |d|
|
||||
d["HOSTS"].size
|
||||
end
|
||||
|
||||
column :NETS, "Number of Networks", :left, :size=>5 do |d|
|
||||
d["HOSTS"].size
|
||||
end
|
||||
|
||||
column :DATASTORES, "Number of Datastores", :left, :size=>10 do |d|
|
||||
d["DATASTORES"].size
|
||||
end
|
||||
|
||||
default :ID, :NAME, :HOSTS, :NETS, :DATASTORES
|
||||
end
|
||||
|
||||
table
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def factory(id=nil)
|
||||
if id
|
||||
OpenNebula::Cluster.new_with_id(id, @client)
|
||||
else
|
||||
xml=OpenNebula::Cluster.build_xml
|
||||
OpenNebula::Cluster.new(xml, @client)
|
||||
end
|
||||
end
|
||||
|
||||
def factory_pool(user_flag=-2)
|
||||
OpenNebula::ClusterPool.new(@client)
|
||||
end
|
||||
|
||||
def format_resource(cluster)
|
||||
str="%-15s: %-20s"
|
||||
str_h1="%-80s"
|
||||
|
||||
CLIHelper.print_header(str_h1 % "CLUSTER #{cluster['ID']} INFORMATION")
|
||||
puts str % ["ID", cluster.id.to_s]
|
||||
puts str % ["NAME", cluster.name]
|
||||
puts
|
||||
|
||||
CLIHelper.print_header("%-15s" % ["HOSTS"])
|
||||
cluster.host_ids.each do |id|
|
||||
puts "%-15s" % [id]
|
||||
end
|
||||
|
||||
puts
|
||||
CLIHelper.print_header("%-15s" % ["VNETS"])
|
||||
cluster.vnet_ids.each do |id|
|
||||
puts "%-15s" % [id]
|
||||
end
|
||||
|
||||
puts
|
||||
CLIHelper.print_header("%-15s" % ["DATASTORES"])
|
||||
cluster.datastore_ids.each do |id|
|
||||
puts "%-15s" % [id]
|
||||
end
|
||||
end
|
||||
end
|
139
src/cli/one_helper/onedatastore_helper.rb
Normal file
139
src/cli/one_helper/onedatastore_helper.rb
Normal file
@ -0,0 +1,139 @@
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
# a copy of the License at #
|
||||
# #
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 #
|
||||
# #
|
||||
# Unless required by applicable law or agreed to in writing, software #
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, #
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
require 'one_helper'
|
||||
|
||||
class OneDatastoreHelper < OpenNebulaHelper::OneHelper
|
||||
DATASTORE = {
|
||||
:name => "datastore",
|
||||
:short => "-d id|name",
|
||||
:large => "--datastore id|name" ,
|
||||
:description => "Selects the datastore",
|
||||
:format => String,
|
||||
:proc => lambda { |o, options|
|
||||
ch = OneDatastoreHelper.new
|
||||
rc, dsid = ch.to_id(o)
|
||||
if rc == 0
|
||||
options[:datastore] = dsid
|
||||
else
|
||||
puts dsid
|
||||
puts "option datastore: Parsing error"
|
||||
exit -1
|
||||
end
|
||||
}
|
||||
}
|
||||
|
||||
def self.rname
|
||||
"DATASTORE"
|
||||
end
|
||||
|
||||
def self.conf_file
|
||||
"onedatastore.yaml"
|
||||
end
|
||||
|
||||
def format_pool(options)
|
||||
config_file = self.class.table_conf
|
||||
|
||||
table = CLIHelper::ShowTable.new(config_file, self) do
|
||||
column :ID, "ONE identifier for the Datastore", :size=>4 do |d|
|
||||
d["ID"]
|
||||
end
|
||||
|
||||
column :NAME, "Name of the Datastore", :left, :size=>12 do |d|
|
||||
d["NAME"]
|
||||
end
|
||||
|
||||
column :CLUSTER, "Name of the Cluster", :left, :size=>8 do |d|
|
||||
OpenNebulaHelper.cluster_str(d["CLUSTER"])
|
||||
end
|
||||
|
||||
column :IMAGES, "Number of Images", :left, :size=>6 do |d|
|
||||
if d["IMAGES"]["ID"].nil?
|
||||
"0"
|
||||
else
|
||||
d["IMAGES"]["ID"].size
|
||||
end
|
||||
end
|
||||
|
||||
column :TYPE, "Datastore driver", :left, :size=>6 do |d|
|
||||
d["DS_MAD"]
|
||||
end
|
||||
|
||||
column :TM, "Transfer driver", :left, :size=>6 do |d|
|
||||
d["TM_MAD"]
|
||||
end
|
||||
|
||||
default :ID, :CLUSTER, :NAME, :IMAGES, :TYPE, :TM_MAD
|
||||
end
|
||||
|
||||
table
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def factory(id=nil)
|
||||
if id
|
||||
OpenNebula::Datastore.new_with_id(id, @client)
|
||||
else
|
||||
xml=OpenNebula::Datastore.build_xml
|
||||
OpenNebula::Datastore.new(xml, @client)
|
||||
end
|
||||
end
|
||||
|
||||
def factory_pool(user_flag=-2)
|
||||
#TBD OpenNebula::UserPool.new(@client, user_flag)
|
||||
OpenNebula::DatastorePool.new(@client)
|
||||
end
|
||||
|
||||
def format_resource(datastore)
|
||||
str="%-15s: %-20s"
|
||||
str_h1="%-80s"
|
||||
|
||||
CLIHelper.print_header(str_h1 % "DATASTORE #{datastore['ID']} INFORMATION")
|
||||
puts str % ["ID", datastore.id.to_s]
|
||||
puts str % ["NAME", datastore.name]
|
||||
puts str % ["USER", datastore['UNAME']]
|
||||
puts str % ["GROUP", datastore['GNAME']]
|
||||
puts str % ["CLUSTER", OpenNebulaHelper.cluster_str(datastore['CLUSTER'])]
|
||||
|
||||
puts str % ["DS_MAD", datastore['DS_MAD']]
|
||||
puts str % ["TM_MAD", datastore['TM_MAD']]
|
||||
puts str % ["BASE PATH",datastore['BASE_PATH']]
|
||||
puts
|
||||
|
||||
CLIHelper.print_header(str_h1 % "PERMISSIONS",false)
|
||||
|
||||
["OWNER", "GROUP", "OTHER"].each { |e|
|
||||
mask = "---"
|
||||
mask[0] = "u" if datastore["PERMISSIONS/#{e}_U"] == "1"
|
||||
mask[1] = "m" if datastore["PERMISSIONS/#{e}_M"] == "1"
|
||||
mask[2] = "a" if datastore["PERMISSIONS/#{e}_A"] == "1"
|
||||
|
||||
puts str % [e, mask]
|
||||
}
|
||||
puts
|
||||
|
||||
CLIHelper.print_header(str_h1 % "DATASTORE TEMPLATE", false)
|
||||
puts datastore.template_str
|
||||
|
||||
puts
|
||||
|
||||
CLIHelper.print_header("%-15s" % "IMAGES")
|
||||
datastore.img_ids.each do |id|
|
||||
puts "%-15s" % [id]
|
||||
end
|
||||
end
|
||||
end
|
@ -39,10 +39,14 @@ class OneHostHelper < OpenNebulaHelper::OneHelper
|
||||
d["ID"]
|
||||
end
|
||||
|
||||
column :NAME, "Name of the Host", :left, :size=>15 do |d|
|
||||
column :NAME, "Name of the Host", :left, :size=>12 do |d|
|
||||
d["NAME"]
|
||||
end
|
||||
|
||||
column :CLUSTER, "Name of the Cluster", :left, :size=>8 do |d|
|
||||
OpenNebulaHelper.cluster_str(d["CLUSTER"])
|
||||
end
|
||||
|
||||
column :RVM, "Number of Virtual Machines running", :size=>6 do |d|
|
||||
d["HOST_SHARE"]["RUNNING_VMS"]
|
||||
end
|
||||
@ -82,11 +86,11 @@ class OneHostHelper < OpenNebulaHelper::OneHelper
|
||||
OpenNebulaHelper.unit_to_str(acpu,options)
|
||||
end
|
||||
|
||||
column :STAT, "Host status", :size=>6 do |d|
|
||||
column :STAT, "Host status", :size=>4 do |d|
|
||||
OneHostHelper.state_to_str(d["STATE"])
|
||||
end
|
||||
|
||||
default :ID, :NAME, :RVM, :TCPU, :FCPU, :ACPU, :TMEM, :FMEM,
|
||||
default :ID, :NAME, :CLUSTER, :RVM, :TCPU, :FCPU, :ACPU, :TMEM, :FMEM,
|
||||
:AMEM, :STAT
|
||||
end
|
||||
|
||||
@ -118,11 +122,11 @@ class OneHostHelper < OpenNebulaHelper::OneHelper
|
||||
|
||||
puts str % ["ID", host.id.to_s]
|
||||
puts str % ["NAME", host.name]
|
||||
puts str % ["CLUSTER", OpenNebulaHelper.cluster_str(host['CLUSTER'])]
|
||||
puts str % ["STATE", host.state_str]
|
||||
puts str % ["IM_MAD", host['IM_MAD']]
|
||||
puts str % ["VM_MAD", host['VM_MAD']]
|
||||
puts str % ["VN_MAD", host['VN_MAD']]
|
||||
puts str % ["TM_MAD", host['TM_MAD']]
|
||||
puts str % ["LAST MONITORING TIME", host['LAST_MON_TIME']]
|
||||
puts
|
||||
|
||||
|
@ -45,10 +45,6 @@ class OneImageHelper < OpenNebulaHelper::OneHelper
|
||||
d["ID"]
|
||||
end
|
||||
|
||||
column :NAME, "Name of the Image", :left, :size=>12 do |d|
|
||||
d["NAME"]
|
||||
end
|
||||
|
||||
column :USER, "Username of the Virtual Machine owner", :left,
|
||||
:size=>8 do |d|
|
||||
helper.user_name(d, options)
|
||||
@ -59,6 +55,14 @@ class OneImageHelper < OpenNebulaHelper::OneHelper
|
||||
helper.group_name(d, options)
|
||||
end
|
||||
|
||||
column :NAME, "Name of the Image", :left, :size=>12 do |d|
|
||||
d["NAME"]
|
||||
end
|
||||
|
||||
column :DATASTORE, "Name of the Image", :left, :size=>10 do |d|
|
||||
d["DATASTORE"]
|
||||
end
|
||||
|
||||
column :TYPE, "Type of the Image", :size=>4 do |d,e|
|
||||
OneImageHelper.type_to_str(d["TYPE"])
|
||||
end
|
||||
@ -67,7 +71,7 @@ class OneImageHelper < OpenNebulaHelper::OneHelper
|
||||
:size=>20 do |d|
|
||||
OpenNebulaHelper.time_to_str(d["REGTIME"])
|
||||
end
|
||||
|
||||
|
||||
column :PERSISTENT, "Whether the Image is persistent or not",
|
||||
:size=>3 do |d|
|
||||
OpenNebulaHelper.boolean_to_str(d["PERSISTENT"])
|
||||
@ -87,7 +91,7 @@ class OneImageHelper < OpenNebulaHelper::OneHelper
|
||||
OpenNebulaHelper.unit_to_str(d['SIZE'].to_i,options,"M")
|
||||
end
|
||||
|
||||
default :ID, :USER, :GROUP, :NAME, :SIZE, :TYPE, :REGTIME,
|
||||
default :ID, :USER, :GROUP, :NAME, :DATASTORE, :SIZE, :TYPE,
|
||||
:PERSISTENT , :STAT, :RVMS
|
||||
end
|
||||
|
||||
@ -118,6 +122,7 @@ class OneImageHelper < OpenNebulaHelper::OneHelper
|
||||
puts str % ["NAME", image.name]
|
||||
puts str % ["USER", image['UNAME']]
|
||||
puts str % ["GROUP",image['GNAME']]
|
||||
puts str % ["DATASTORE",image['DATASTORE']]
|
||||
puts str % ["TYPE", image.type_str]
|
||||
puts str % ["REGISTER TIME",
|
||||
OpenNebulaHelper.time_to_str(image['REGTIME'])]
|
||||
|
@ -34,7 +34,7 @@ class OneUserHelper < OpenNebulaHelper::OneHelper
|
||||
begin
|
||||
password = File.read(arg).split("\n").first
|
||||
rescue
|
||||
return -1, "Can not read file: #{arg}"
|
||||
return -1, "Cannot read file: #{arg}"
|
||||
end
|
||||
else
|
||||
password = arg.dup
|
||||
|
@ -39,11 +39,6 @@ class OneVNetHelper < OpenNebulaHelper::OneHelper
|
||||
d["ID"]
|
||||
end
|
||||
|
||||
column :NAME, "Name of the Virtual Network", :left,
|
||||
:size=>15 do |d|
|
||||
d["NAME"]
|
||||
end
|
||||
|
||||
column :USER, "Username of the Virtual Network owner", :left,
|
||||
:size=>8 do |d|
|
||||
helper.user_name(d, options)
|
||||
@ -54,11 +49,20 @@ class OneVNetHelper < OpenNebulaHelper::OneHelper
|
||||
helper.group_name(d, options)
|
||||
end
|
||||
|
||||
column :NAME, "Name of the Virtual Network", :left,
|
||||
:size=>15 do |d|
|
||||
d["NAME"]
|
||||
end
|
||||
|
||||
column :CLUSTER, "Name of the Cluster", :left, :size=>8 do |d|
|
||||
OpenNebulaHelper.cluster_str(d["CLUSTER"])
|
||||
end
|
||||
|
||||
column :TYPE, "Type of Virtual Network", :size=>6 do |d|
|
||||
OneVNetHelper.type_to_str(d["TYPE"])
|
||||
end
|
||||
|
||||
column :SIZE, "Size of the Virtual Network", :size=>6 do |d|
|
||||
column :SIZE, "Size of the Virtual Network", :size=>5 do |d|
|
||||
d["SIZE"]
|
||||
end
|
||||
|
||||
@ -68,7 +72,7 @@ class OneVNetHelper < OpenNebulaHelper::OneHelper
|
||||
end
|
||||
|
||||
column :LEASES, "Number of this Virtual Network's given leases",
|
||||
:size=>7 do |d|
|
||||
:size=>6 do |d|
|
||||
d["TOTAL_LEASES"]
|
||||
end
|
||||
|
||||
@ -103,6 +107,7 @@ class OneVNetHelper < OpenNebulaHelper::OneHelper
|
||||
puts str % ["NAME", vn['NAME']]
|
||||
puts str % ["USER", vn['UNAME']]
|
||||
puts str % ["GROUP", vn['GNAME']]
|
||||
puts str % ["CLUSTER", OpenNebulaHelper.cluster_str(vn['CLUSTER'])]
|
||||
puts str % ["TYPE", vn.type_str]
|
||||
puts str % ["BRIDGE", vn["BRIDGE"]]
|
||||
puts str % ["VLAN", OpenNebulaHelper.boolean_to_str(vn['VLAN'])]
|
||||
|
176
src/cli/onecluster
Executable file
176
src/cli/onecluster
Executable file
@ -0,0 +1,176 @@
|
||||
#!/usr/bin/env ruby
|
||||
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
# a copy of the License at #
|
||||
# #
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 #
|
||||
# #
|
||||
# Unless required by applicable law or agreed to in writing, software #
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, #
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
ONE_LOCATION=ENV["ONE_LOCATION"]
|
||||
|
||||
if !ONE_LOCATION
|
||||
RUBY_LIB_LOCATION="/usr/lib/one/ruby"
|
||||
else
|
||||
RUBY_LIB_LOCATION=ONE_LOCATION+"/lib/ruby"
|
||||
end
|
||||
|
||||
$: << RUBY_LIB_LOCATION
|
||||
$: << RUBY_LIB_LOCATION+"/cli"
|
||||
|
||||
require 'command_parser'
|
||||
require 'one_helper/onecluster_helper'
|
||||
|
||||
cmd=CommandParser::CmdParser.new(ARGV) do
|
||||
usage "`onecluster` <command> [<args>] [<options>]"
|
||||
version OpenNebulaHelper::ONE_VERSION
|
||||
|
||||
helper = OneClusterHelper.new
|
||||
|
||||
########################################################################
|
||||
# Global Options
|
||||
########################################################################
|
||||
set :option, CommandParser::OPTIONS
|
||||
|
||||
list_options = CLIHelper::OPTIONS
|
||||
list_options << OpenNebulaHelper::XML
|
||||
list_options << OpenNebulaHelper::NUMERIC
|
||||
|
||||
########################################################################
|
||||
# Formatters for arguments
|
||||
########################################################################
|
||||
set :format, :clusterid, OneClusterHelper.to_id_desc do |arg|
|
||||
helper.to_id(arg)
|
||||
end
|
||||
|
||||
set :format, :clusterid_list, OneClusterHelper.list_to_id_desc do |arg|
|
||||
helper.list_to_id(arg)
|
||||
end
|
||||
|
||||
set :format, :vnetid, OpenNebulaHelper.rname_to_id_desc("VNET") do |arg|
|
||||
OpenNebulaHelper.rname_to_id(arg, "VNET")
|
||||
end
|
||||
|
||||
set :format, :hostid, OpenNebulaHelper.rname_to_id_desc("HOST") do |arg|
|
||||
OpenNebulaHelper.rname_to_id(arg, "HOST")
|
||||
end
|
||||
|
||||
set :format, :datastoreid, OpenNebulaHelper.rname_to_id_desc("DATASTORE") do |arg|
|
||||
OpenNebulaHelper.rname_to_id(arg, "DATASTORE")
|
||||
end
|
||||
|
||||
########################################################################
|
||||
# Commands
|
||||
########################################################################
|
||||
|
||||
create_desc = <<-EOT.unindent
|
||||
Creates a new Cluster
|
||||
EOT
|
||||
|
||||
command :create, create_desc, :name do
|
||||
helper.create_resource(options) do |cluster|
|
||||
cluster.allocate(args[0])
|
||||
end
|
||||
end
|
||||
|
||||
delete_desc = <<-EOT.unindent
|
||||
Deletes the given Cluster
|
||||
EOT
|
||||
|
||||
command :delete, delete_desc, [:range, :clusterid_list] do
|
||||
helper.perform_actions(args[0],options,"deleted") do |obj|
|
||||
obj.delete
|
||||
end
|
||||
end
|
||||
|
||||
list_desc = <<-EOT.unindent
|
||||
Lists Clusters in the pool
|
||||
EOT
|
||||
|
||||
command :list, list_desc, :options=>list_options do
|
||||
helper.list_pool(options)
|
||||
end
|
||||
|
||||
show_desc = <<-EOT.unindent
|
||||
Shows information for the given Cluster
|
||||
EOT
|
||||
|
||||
command :show, show_desc,:clusterid, :options=>OpenNebulaHelper::XML do
|
||||
helper.show_resource(args[0],options)
|
||||
end
|
||||
|
||||
addhost_desc = <<-EOT.unindent
|
||||
Adds a Host to the given Cluster
|
||||
EOT
|
||||
|
||||
# TODO: allow the second param to be [:range, :hostid_list]
|
||||
command :addhost, addhost_desc,:clusterid, :hostid do
|
||||
helper.perform_action(args[0],options,"updated") do |cluster|
|
||||
cluster.addhost(args[1].to_i)
|
||||
end
|
||||
end
|
||||
|
||||
delhost_desc = <<-EOT.unindent
|
||||
Deletes a Host from the given Cluster
|
||||
EOT
|
||||
|
||||
# TODO: allow the second param to be [:range, :hostid_list]
|
||||
command :delhost, delhost_desc, :clusterid, :hostid do
|
||||
helper.perform_action(args[0],options,"updated") do |cluster|
|
||||
cluster.delhost(args[1].to_i)
|
||||
end
|
||||
end
|
||||
|
||||
adddatastore_desc = <<-EOT.unindent
|
||||
Adds a Datastore to the given Cluster
|
||||
EOT
|
||||
|
||||
# TODO: allow the second param to be [:range, :datastoreid_list]
|
||||
command :adddatastore, adddatastore_desc,:clusterid, :datastoreid do
|
||||
helper.perform_action(args[0],options,"updated") do |cluster|
|
||||
cluster.adddatastore(args[1].to_i)
|
||||
end
|
||||
end
|
||||
|
||||
deldatastore_desc = <<-EOT.unindent
|
||||
Deletes a Datastore from the given Cluster
|
||||
EOT
|
||||
|
||||
# TODO: allow the second param to be [:range, :datastoreid_list]
|
||||
command :deldatastore, deldatastore_desc, :clusterid, :datastoreid do
|
||||
helper.perform_action(args[0],options,"updated") do |cluster|
|
||||
cluster.deldatastore(args[1].to_i)
|
||||
end
|
||||
end
|
||||
|
||||
addvnet_desc = <<-EOT.unindent
|
||||
Adds a Virtual Network to the given Cluster
|
||||
EOT
|
||||
|
||||
# TODO: allow the second param to be [:range, :vnetid_list]
|
||||
command :addvnet, addvnet_desc,:clusterid, :vnetid do
|
||||
helper.perform_action(args[0],options,"updated") do |cluster|
|
||||
cluster.addvnet(args[1].to_i)
|
||||
end
|
||||
end
|
||||
|
||||
delvnet_desc = <<-EOT.unindent
|
||||
Deletes a Virtual Network from the given Cluster
|
||||
EOT
|
||||
|
||||
# TODO: allow the second param to be [:range, :vnetid_list]
|
||||
command :delvnet, delvnet_desc,:clusterid, :vnetid do
|
||||
helper.perform_action(args[0],options,"updated") do |cluster|
|
||||
cluster.delvnet(args[1].to_i)
|
||||
end
|
||||
end
|
||||
end
|
163
src/cli/onedatastore
Executable file
163
src/cli/onedatastore
Executable file
@ -0,0 +1,163 @@
|
||||
#!/usr/bin/env ruby
|
||||
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
# a copy of the License at #
|
||||
# #
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 #
|
||||
# #
|
||||
# Unless required by applicable law or agreed to in writing, software #
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, #
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
ONE_LOCATION=ENV["ONE_LOCATION"]
|
||||
|
||||
if !ONE_LOCATION
|
||||
RUBY_LIB_LOCATION="/usr/lib/one/ruby"
|
||||
else
|
||||
RUBY_LIB_LOCATION=ONE_LOCATION+"/lib/ruby"
|
||||
end
|
||||
|
||||
$: << RUBY_LIB_LOCATION
|
||||
$: << RUBY_LIB_LOCATION+"/cli"
|
||||
|
||||
require 'command_parser'
|
||||
require 'one_helper/onedatastore_helper'
|
||||
require 'one_helper/onecluster_helper'
|
||||
|
||||
cmd=CommandParser::CmdParser.new(ARGV) do
|
||||
usage "`onedatastore` <command> [<args>] [<options>]"
|
||||
version OpenNebulaHelper::ONE_VERSION
|
||||
|
||||
helper = OneDatastoreHelper.new
|
||||
|
||||
########################################################################
|
||||
# Global Options
|
||||
########################################################################
|
||||
set :option, CommandParser::OPTIONS
|
||||
|
||||
list_options = CLIHelper::OPTIONS
|
||||
list_options << OpenNebulaHelper::XML
|
||||
list_options << OpenNebulaHelper::NUMERIC
|
||||
|
||||
########################################################################
|
||||
# Formatters for arguments
|
||||
########################################################################
|
||||
set :format, :datastoreid, OneDatastoreHelper.to_id_desc do |arg|
|
||||
helper.to_id(arg)
|
||||
end
|
||||
|
||||
set :format, :datastoreid_list, OneDatastoreHelper.list_to_id_desc do |arg|
|
||||
helper.list_to_id(arg)
|
||||
end
|
||||
|
||||
set :format, :clusterid, OpenNebulaHelper.rname_to_id_desc("CLUSTER") do |arg|
|
||||
OpenNebulaHelper.rname_to_id(arg, "CLUSTER")
|
||||
end
|
||||
|
||||
set :format, :groupid, OpenNebulaHelper.rname_to_id_desc("GROUP") do |arg|
|
||||
OpenNebulaHelper.rname_to_id(arg, "GROUP")
|
||||
end
|
||||
|
||||
set :format, :userid, OpenNebulaHelper.rname_to_id_desc("USER") do |arg|
|
||||
OpenNebulaHelper.rname_to_id(arg, "USER")
|
||||
end
|
||||
|
||||
########################################################################
|
||||
# Commands
|
||||
########################################################################
|
||||
|
||||
create_desc = <<-EOT.unindent
|
||||
Creates a new Datastore from the given template file
|
||||
EOT
|
||||
|
||||
command :create, create_desc, :file, :options=>[OneClusterHelper::CLUSTER] do
|
||||
|
||||
cid = options[:cluster] || ClusterPool::NONE_CLUSTER_ID
|
||||
|
||||
helper.create_resource(options) do |datastore|
|
||||
begin
|
||||
template=File.read(args[0])
|
||||
datastore.allocate(template, cid)
|
||||
rescue =>e
|
||||
STDERR.puts e.message
|
||||
exit -1
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
delete_desc = <<-EOT.unindent
|
||||
Deletes the given Datastore
|
||||
EOT
|
||||
|
||||
command :delete, delete_desc, [:range, :datastoreid_list] do
|
||||
helper.perform_actions(args[0],options,"deleted") do |obj|
|
||||
obj.delete
|
||||
end
|
||||
end
|
||||
|
||||
chgrp_desc = <<-EOT.unindent
|
||||
Changes the Datastore group
|
||||
EOT
|
||||
|
||||
command :chgrp, chgrp_desc,[:range, :datastoreid_list], :groupid do
|
||||
helper.perform_actions(args[0],options,"Group changed") do |obj|
|
||||
obj.chown(-1, args[1].to_i)
|
||||
end
|
||||
end
|
||||
|
||||
chown_desc = <<-EOT.unindent
|
||||
Changes the Datastore owner and group
|
||||
EOT
|
||||
|
||||
command :chown, chown_desc, [:range, :datastoreid_list], :userid,
|
||||
[:groupid,nil] do
|
||||
gid = args[2].nil? ? -1 : args[2].to_i
|
||||
helper.perform_actions(args[0],options,"Owner/Group changed") do |obj|
|
||||
obj.chown(args[1].to_i, gid)
|
||||
end
|
||||
end
|
||||
|
||||
chmod_desc = <<-EOT.unindent
|
||||
Changes the Datastore permissions
|
||||
EOT
|
||||
|
||||
command :chmod, chmod_desc, [:range, :datastoreid_list], :octet do
|
||||
helper.perform_actions(args[0],options, "Permissions changed") do |obj|
|
||||
obj.chmod_octet(args[1])
|
||||
end
|
||||
end
|
||||
|
||||
list_desc = <<-EOT.unindent
|
||||
Lists Datastores in the pool
|
||||
EOT
|
||||
|
||||
command :list, list_desc, :options=>list_options do
|
||||
helper.list_pool(options)
|
||||
end
|
||||
|
||||
show_desc = <<-EOT.unindent
|
||||
Shows information for the given Datastore
|
||||
EOT
|
||||
|
||||
command :show, show_desc, :datastoreid, :options=>OpenNebulaHelper::XML do
|
||||
helper.show_resource(args[0],options)
|
||||
end
|
||||
|
||||
update_desc = <<-EOT.unindent
|
||||
Launches the system editor to modify and update the template contents
|
||||
EOT
|
||||
|
||||
command :update, update_desc, :datastoreid do
|
||||
helper.perform_action(args[0],options,"modified") do |obj|
|
||||
str = OpenNebulaHelper.update_template(args[0], obj)
|
||||
obj.update(str)
|
||||
end
|
||||
end
|
||||
end
|
@ -29,6 +29,7 @@ $: << RUBY_LIB_LOCATION+"/cli"
|
||||
|
||||
require 'command_parser'
|
||||
require 'one_helper/onehost_helper'
|
||||
require 'one_helper/onecluster_helper'
|
||||
|
||||
cmd=CommandParser::CmdParser.new(ARGV) do
|
||||
usage "`onehost` <command> [<args>] [<options>]"
|
||||
@ -41,9 +42,36 @@ cmd=CommandParser::CmdParser.new(ARGV) do
|
||||
########################################################################
|
||||
set :option, CommandParser::OPTIONS
|
||||
|
||||
IM = {
|
||||
:name => "im",
|
||||
:short => "-i im_mad",
|
||||
:large => "--im im_mad" ,
|
||||
:description => "Set the information driver for the host",
|
||||
:format => String
|
||||
}
|
||||
|
||||
VMM = {
|
||||
:name => "vmm",
|
||||
:short => "-v vmm_mad",
|
||||
:large => "--vm vmm_mad" ,
|
||||
:description => "Set the virtualization driver for the host",
|
||||
:format => String
|
||||
}
|
||||
|
||||
VNET = {
|
||||
:name => "vnm",
|
||||
:short => "-n vnet_mad",
|
||||
:large => "--net vnet_mad" ,
|
||||
:description => "Set the network driver for the host",
|
||||
:format => String
|
||||
}
|
||||
|
||||
CREAT_OPTIONS = [ IM, VMM, VNET, OneClusterHelper::CLUSTER ]
|
||||
|
||||
########################################################################
|
||||
# Formatters for arguments
|
||||
########################################################################
|
||||
|
||||
set :format, :hostid, OneHostHelper.to_id_desc do |arg|
|
||||
helper.to_id(arg)
|
||||
end
|
||||
@ -60,10 +88,23 @@ cmd=CommandParser::CmdParser.new(ARGV) do
|
||||
Creates a new Host
|
||||
EOT
|
||||
|
||||
command :create, create_desc, :hostname, :im_mad, :vmm_mad,
|
||||
:tm_mad, :vnm_mad do
|
||||
command :create, create_desc, :hostname, :options=>CREAT_OPTIONS do
|
||||
if options[:im].nil? or options[:vmm].nil? or options[:vnm].nil?
|
||||
STDERR.puts "Drivers are mandatory to create a host:"
|
||||
STDERR.puts "\t -i information driver"
|
||||
STDERR.puts "\t -v hypervisor driver"
|
||||
STDERR.puts "\t -n network driver"
|
||||
exit -1
|
||||
end
|
||||
|
||||
cid = options[:cluster] || ClusterPool::NONE_CLUSTER_ID
|
||||
|
||||
helper.create_resource(options) do |host|
|
||||
host.allocate(args[0], args[1], args[2], args[4], args[3])
|
||||
host.allocate(args[0],
|
||||
options[:im],
|
||||
options[:vmm],
|
||||
options[:vnm],
|
||||
cid)
|
||||
end
|
||||
end
|
||||
|
||||
|
@ -29,6 +29,7 @@ $: << RUBY_LIB_LOCATION+"/cli"
|
||||
|
||||
require 'command_parser'
|
||||
require 'one_helper/oneimage_helper'
|
||||
require 'one_helper/onedatastore_helper'
|
||||
|
||||
cmd=CommandParser::CmdParser.new(ARGV) do
|
||||
usage "`oneimage` <command> [<args>] [<options>]"
|
||||
@ -45,6 +46,8 @@ cmd=CommandParser::CmdParser.new(ARGV) do
|
||||
list_options << OpenNebulaHelper::XML
|
||||
list_options << OpenNebulaHelper::NUMERIC
|
||||
|
||||
CREATE_OPTIONS = [OneDatastoreHelper::DATASTORE]
|
||||
|
||||
########################################################################
|
||||
# Formatters for arguments
|
||||
########################################################################
|
||||
@ -76,10 +79,20 @@ cmd=CommandParser::CmdParser.new(ARGV) do
|
||||
Creates a new Image from the given template file
|
||||
EOT
|
||||
|
||||
command :create, create_desc, :file do
|
||||
command :create, create_desc, :file, :options=>CREATE_OPTIONS do
|
||||
if options[:datastore].nil?
|
||||
STDERR.puts "Datastore to save the image is mandatory: "
|
||||
STDERR.puts "\t -d datastore_id"
|
||||
exit -1
|
||||
end
|
||||
helper.create_resource(options) do |image|
|
||||
template=File.read(args[0])
|
||||
image.allocate(template)
|
||||
begin
|
||||
template=File.read(args[0])
|
||||
image.allocate(template, options[:datastore] )
|
||||
rescue => e
|
||||
STDERR.puts e.messsage
|
||||
exit -1
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
@ -29,6 +29,7 @@ $: << RUBY_LIB_LOCATION+"/cli"
|
||||
|
||||
require 'command_parser'
|
||||
require 'one_helper/onevnet_helper'
|
||||
require 'one_helper/onecluster_helper'
|
||||
|
||||
cmd=CommandParser::CmdParser.new(ARGV) do
|
||||
usage "`onevnet` <command> [<args>] [<options>]"
|
||||
@ -41,6 +42,8 @@ cmd=CommandParser::CmdParser.new(ARGV) do
|
||||
########################################################################
|
||||
set :option, CommandParser::OPTIONS
|
||||
|
||||
CREATE_OPTIONS = [OneClusterHelper::CLUSTER]
|
||||
|
||||
########################################################################
|
||||
# Formatters for arguments
|
||||
########################################################################
|
||||
@ -72,10 +75,17 @@ cmd=CommandParser::CmdParser.new(ARGV) do
|
||||
Creates a new Virtual Network from the given template file
|
||||
EOT
|
||||
|
||||
command :create, create_desc, :file do
|
||||
command :create, create_desc, :file, :options=>CREATE_OPTIONS do
|
||||
cid = options[:cluster] || ClusterPool::NONE_CLUSTER_ID
|
||||
|
||||
helper.create_resource(options) do |vn|
|
||||
template=File.read(args[0])
|
||||
vn.allocate(template)
|
||||
begin
|
||||
template=File.read(args[0])
|
||||
vn.allocate(template, cid)
|
||||
rescue => e
|
||||
STDERR.puts e.message
|
||||
exit -1
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
@ -14,13 +14,16 @@
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
require 'thread'
|
||||
|
||||
class CloudAuth
|
||||
# These are the authentication methods for the user requests
|
||||
AUTH_MODULES = {
|
||||
"occi" => 'OCCICloudAuth',
|
||||
"sunstone" => 'SunstoneCloudAuth' ,
|
||||
"ec2" => 'EC2CloudAuth',
|
||||
"x509" => 'X509CloudAuth'
|
||||
"occi" => 'OCCICloudAuth',
|
||||
"sunstone" => 'SunstoneCloudAuth' ,
|
||||
"ec2" => 'EC2CloudAuth',
|
||||
"x509" => 'X509CloudAuth',
|
||||
"opennebula" => 'OpenNebulaCloudAuth'
|
||||
}
|
||||
|
||||
# These are the authentication modules for the OpenNebula requests
|
||||
@ -43,9 +46,11 @@ class CloudAuth
|
||||
|
||||
# conf a hash with the configuration attributes as symbols
|
||||
def initialize(conf, logger=nil)
|
||||
@conf = conf
|
||||
@conf = conf
|
||||
@logger = logger
|
||||
|
||||
@lock = Mutex.new
|
||||
|
||||
@token_expiration_time = Time.now.to_i + EXPIRE_DELTA
|
||||
|
||||
if AUTH_MODULES.include?(@conf[:auth])
|
||||
@ -55,11 +60,10 @@ class CloudAuth
|
||||
raise "Auth module not specified"
|
||||
end
|
||||
|
||||
|
||||
if AUTH_CORE_MODULES.include?(@conf[:core_auth])
|
||||
core_auth = AUTH_CORE_MODULES[@conf[:core_auth]]
|
||||
else
|
||||
core_auth =AUTH_CORE_MODULES["cipher"]
|
||||
core_auth = AUTH_CORE_MODULES["cipher"]
|
||||
end
|
||||
|
||||
begin
|
||||
@ -68,6 +72,11 @@ class CloudAuth
|
||||
rescue => e
|
||||
raise e.message
|
||||
end
|
||||
|
||||
@user_pool = OpenNebula::UserPool.new(client)
|
||||
|
||||
rc = @user_pool.info
|
||||
raise rc.message if OpenNebula.is_error?(rc)
|
||||
end
|
||||
|
||||
# Generate a new OpenNebula client for the target User, if the username
|
||||
@ -75,19 +84,24 @@ class CloudAuth
|
||||
# ussername:: _String_ Name of the User
|
||||
# [return] _Client_
|
||||
def client(username=nil)
|
||||
expiration_time = @lock.synchronize {
|
||||
time_now = Time.now.to_i
|
||||
|
||||
if time_now > @token_expiration_time - EXPIRE_MARGIN
|
||||
@token_expiration_time = time_now + EXPIRE_DELTA
|
||||
end
|
||||
|
||||
@token_expiration_time
|
||||
}
|
||||
|
||||
token = @server_auth.login_token(expiration_time,username)
|
||||
|
||||
Client.new(token,@conf[:one_xmlrpc])
|
||||
end
|
||||
|
||||
def update_userpool_cache
|
||||
@user_pool = OpenNebula::UserPool.new(client)
|
||||
|
||||
rc = @user_pool.info
|
||||
if OpenNebula.is_error?(rc)
|
||||
raise rc.message
|
||||
end
|
||||
end
|
||||
|
||||
#
|
||||
#
|
||||
#
|
||||
def auth(env, params={})
|
||||
username = do_auth(env, params)
|
||||
|
||||
@ -101,36 +115,47 @@ class CloudAuth
|
||||
|
||||
protected
|
||||
|
||||
def expiration_time
|
||||
time_now = Time.now.to_i
|
||||
|
||||
if time_now > @token_expiration_time - EXPIRE_MARGIN
|
||||
@token_expiration_time = time_now + EXPIRE_DELTA
|
||||
# Gets the password associated with a username
|
||||
# username:: _String_ the username
|
||||
# driver:: _String_ list of valid drivers for the user, | separated
|
||||
# [return] _Hash_ with the username
|
||||
def get_password(username, driver=nil)
|
||||
xpath = "USER[NAME=\"#{username}\""
|
||||
if driver
|
||||
xpath << " and (AUTH_DRIVER=\""
|
||||
xpath << driver.split('|').join("\" or AUTH_DRIVER=\"") << '")'
|
||||
end
|
||||
xpath << "]/PASSWORD"
|
||||
|
||||
@token_expiration_time
|
||||
end
|
||||
|
||||
# If @user_pool is not defined it will retrieve it from OpenNebula
|
||||
def get_userpool
|
||||
update_userpool_cache if @user_pool.nil?
|
||||
@user_pool
|
||||
end
|
||||
|
||||
def get_password(username, non_public_user=false)
|
||||
if non_public_user == true
|
||||
xp="USER[NAME=\"#{username}\" and AUTH_DRIVER!=\"public\"]/PASSWORD"
|
||||
else
|
||||
xp="USER[NAME=\"#{username}\"]/PASSWORD"
|
||||
end
|
||||
|
||||
return get_userpool[xp]
|
||||
retrieve_from_userpool(xpath)
|
||||
end
|
||||
|
||||
# Gets the username associated with a password
|
||||
# password:: _String_ the password
|
||||
# [return] _Hash_ with the username
|
||||
def get_username(password)
|
||||
return get_userpool["USER[contains(PASSWORD, \"#{password}\")]/NAME"]
|
||||
xpath = "USER[contains(PASSWORD, \"#{password}\")]/NAME"
|
||||
|
||||
retrieve_from_userpool(xpath)
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def retrieve_from_userpool(xpath)
|
||||
@lock.synchronize {
|
||||
@user_pool[xpath]
|
||||
}
|
||||
end
|
||||
|
||||
|
||||
def update_userpool_cache
|
||||
oneadmin_client = client
|
||||
|
||||
@lock.synchronize {
|
||||
@user_pool = OpenNebula::UserPool.new(oneadmin_client)
|
||||
|
||||
rc = @user_pool.info
|
||||
raise rc.message if OpenNebula.is_error?(rc)
|
||||
}
|
||||
end
|
||||
end
|
@ -17,7 +17,7 @@
|
||||
module EC2CloudAuth
|
||||
def do_auth(env, params={})
|
||||
username = params['AWSAccessKeyId']
|
||||
one_pass = get_password(username)
|
||||
one_pass = get_password(username, 'core|public')
|
||||
return nil unless one_pass
|
||||
|
||||
signature = case params['SignatureVersion']
|
||||
|
@ -21,7 +21,7 @@ module OCCICloudAuth
|
||||
if auth.provided? && auth.basic?
|
||||
username, password = auth.credentials
|
||||
|
||||
one_pass = get_password(username)
|
||||
one_pass = get_password(username, 'core|public')
|
||||
|
||||
if one_pass && one_pass == password
|
||||
return username
|
||||
|
37
src/tm_mad/shared/tm_delete.sh → src/cloud/common/CloudAuth/OpenNebulaCloudAuth.rb
Executable file → Normal file
37
src/tm_mad/shared/tm_delete.sh → src/cloud/common/CloudAuth/OpenNebulaCloudAuth.rb
Executable file → Normal file
@ -1,5 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) #
|
||||
# #
|
||||
@ -16,23 +14,26 @@
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
SRC=$1
|
||||
DST=$2
|
||||
module OpenNebulaCloudAuth
|
||||
def do_auth(env, params={})
|
||||
auth = Rack::Auth::Basic::Request.new(env)
|
||||
|
||||
if [ -z "${ONE_LOCATION}" ]; then
|
||||
TMCOMMON=/usr/lib/one/mads/tm_common.sh
|
||||
else
|
||||
TMCOMMON=$ONE_LOCATION/lib/mads/tm_common.sh
|
||||
fi
|
||||
if auth.provided? && auth.basic?
|
||||
username, password = auth.credentials
|
||||
|
||||
. $TMCOMMON
|
||||
client = OpenNebula::Client.new("#{username}:#{password}")
|
||||
user = OpenNebula::User.new_with_id(OpenNebula::User::SELF, client)
|
||||
|
||||
get_vmdir
|
||||
rc = user.info
|
||||
if OpenNebula.is_error?(rc)
|
||||
logger.error { "User #{username} could not be authenticated" }
|
||||
logger.error { rc.message }
|
||||
return nil
|
||||
end
|
||||
|
||||
return username
|
||||
end
|
||||
|
||||
SRC_PATH=`arg_path $SRC`
|
||||
|
||||
fix_src_path
|
||||
|
||||
log "Deleting $SRC_PATH"
|
||||
exec_and_log "rm -rf $SRC_PATH" \
|
||||
"Error deleting $SRC_PATH"
|
||||
return nil
|
||||
end
|
||||
end
|
@ -21,7 +21,7 @@ module SunstoneCloudAuth
|
||||
if auth.provided? && auth.basic?
|
||||
username, password = auth.credentials
|
||||
|
||||
one_pass = get_password(username, true)
|
||||
one_pass = get_password(username, 'core')
|
||||
|
||||
if one_pass && one_pass == Digest::SHA1.hexdigest(password)
|
||||
return username
|
||||
|
@ -37,7 +37,7 @@ class CloudServer
|
||||
##########################################################################
|
||||
# Public attributes
|
||||
##########################################################################
|
||||
attr_reader :config, :logger
|
||||
attr_reader :config
|
||||
|
||||
# Initializes the Cloud server based on a config file
|
||||
# config_file:: _String_ for the server. MUST include the following
|
||||
@ -47,9 +47,18 @@ class CloudServer
|
||||
# XMLRPC
|
||||
def initialize(config, logger=nil)
|
||||
# --- Load the Cloud Server configuration file ---
|
||||
@config = config
|
||||
@logger = logger
|
||||
@config = config
|
||||
@@logger = logger
|
||||
end
|
||||
|
||||
def self.logger
|
||||
return @@logger
|
||||
end
|
||||
|
||||
def logger
|
||||
return @@logger
|
||||
end
|
||||
|
||||
#
|
||||
# Prints the configuration of the server
|
||||
#
|
||||
@ -101,15 +110,27 @@ module CloudLogger
|
||||
DATE_FORMAT = "%a %b %d %H:%M:%S %Y"
|
||||
|
||||
# Patch logger class to be compatible with Rack::CommonLogger
|
||||
class ::Logger
|
||||
class CloudLogger < Logger
|
||||
|
||||
def initialize(path)
|
||||
super(path)
|
||||
end
|
||||
|
||||
def write(msg)
|
||||
info msg.chop
|
||||
end
|
||||
|
||||
def add(severity, message = nil, progname = nil, &block)
|
||||
rc = super(severity, message, progname, &block)
|
||||
@logdev.dev.flush
|
||||
|
||||
rc
|
||||
end
|
||||
end
|
||||
|
||||
def enable_logging(path=nil, debug_level=3)
|
||||
path ||= $stdout
|
||||
logger = ::Logger.new(path)
|
||||
logger = CloudLogger.new(path)
|
||||
logger.level = DEBUG_LEVEL[debug_level]
|
||||
logger.formatter = proc do |severity, datetime, progname, msg|
|
||||
MSG_FORMAT % [
|
||||
|
@ -53,7 +53,7 @@ setup()
|
||||
start()
|
||||
{
|
||||
if [ ! -f "$ECONE_SERVER" ]; then
|
||||
echo "Can not find $ECONE_SERVER."
|
||||
echo "Cannot find $ECONE_SERVER."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
@ -37,6 +37,9 @@
|
||||
# 0 = ERROR, 1 = WARNING, 2 = INFO, 3 = DEBUG
|
||||
:debug_level: 3
|
||||
|
||||
:cluster_id:
|
||||
:datastore_id:
|
||||
|
||||
# VM types allowed and its template file (inside templates directory)
|
||||
:instance_types:
|
||||
:m1.small:
|
||||
|
@ -79,7 +79,7 @@ class EC2QueryServer < CloudServer
|
||||
return OpenNebula::Error.new('Unsupported'), 400
|
||||
end
|
||||
|
||||
rc = image.allocate(template)
|
||||
rc = image.allocate(template, @config[:datastore_id]||1)
|
||||
if OpenNebula.is_error?(rc)
|
||||
return OpenNebula::Error.new('Unsupported'), 400
|
||||
end
|
||||
|
@ -35,7 +35,7 @@ EC2_AUTH = VAR_LOCATION + "/.one/ec2_auth"
|
||||
EC2_LOG = LOG_LOCATION + "/econe-server.log"
|
||||
CONFIGURATION_FILE = ETC_LOCATION + "/econe.conf"
|
||||
|
||||
TEMPLATE_LOCATION = ETC_LOCATION + "/econe_templates"
|
||||
TEMPLATE_LOCATION = ETC_LOCATION + "/ec2query_templates"
|
||||
VIEWS_LOCATION = RUBY_LIB_LOCATION + "/cloud/econe/views"
|
||||
|
||||
$: << RUBY_LIB_LOCATION
|
||||
|
@ -53,7 +53,7 @@ setup()
|
||||
start()
|
||||
{
|
||||
if [ ! -x "$OCCI_SERVER" ]; then
|
||||
echo "Can not find $OCCI_SERVER."
|
||||
echo "Cannot find $OCCI_SERVER."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
@ -27,6 +27,7 @@
|
||||
# Authentication driver for incomming requests
|
||||
# occi, for OpenNebula's user-password scheme
|
||||
# x509, for x509 certificates based authentication
|
||||
# opennebula, use the driver defined for the user in OpenNebula
|
||||
:auth: occi
|
||||
|
||||
# Authentication driver to communicate with OpenNebula core
|
||||
@ -37,6 +38,9 @@
|
||||
# 0 = ERROR, 1 = WARNING, 2 = INFO, 3 = DEBUG
|
||||
:debug_level: 3
|
||||
|
||||
:cluster_id:
|
||||
:datastore_id:
|
||||
|
||||
# VM types allowed and its template file (inside templates directory)
|
||||
:instance_types:
|
||||
:small:
|
||||
|
@ -131,7 +131,7 @@ module OCCIClient
|
||||
file_path="/"+m[1]
|
||||
end
|
||||
elsif !image_info.elements['TYPE'] == "DATABLOCK"
|
||||
return CloudClient::Error.new("Can not find URL")
|
||||
return CloudClient::Error.new("Cannot find URL")
|
||||
end
|
||||
|
||||
if curb
|
||||
@ -316,7 +316,7 @@ module OCCIClient
|
||||
end
|
||||
|
||||
if info.elements['ID'] == nil
|
||||
return CloudClient::Error.new("Can not find RESOURCE ID")
|
||||
return CloudClient::Error.new("Cannot find RESOURCE ID")
|
||||
end
|
||||
|
||||
resource_id = info.elements['ID'].text
|
||||
|
@ -342,7 +342,7 @@ class OCCIServer < CloudServer
|
||||
template = network.to_one_template
|
||||
return template, 500 if OpenNebula.is_error?(template)
|
||||
|
||||
rc = network.allocate(template)
|
||||
rc = network.allocate(template, @config[:cluster_id]||ClusterPool::NONE_CLUSTER_ID)
|
||||
if OpenNebula.is_error?(rc)
|
||||
return rc, CloudServer::HTTP_ERROR_CODE[rc.errno]
|
||||
end
|
||||
@ -446,7 +446,7 @@ class OCCIServer < CloudServer
|
||||
template = image.to_one_template
|
||||
return template, 500 if OpenNebula.is_error?(template)
|
||||
|
||||
rc = image.allocate(template)
|
||||
rc = image.allocate(template, @config[:datastore_id]||1)
|
||||
if OpenNebula.is_error?(rc)
|
||||
return rc, CloudServer::HTTP_ERROR_CODE[rc.errno]
|
||||
end
|
||||
|
@ -125,14 +125,44 @@ background-image: -moz-linear-gradient(
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
#navigation li {
|
||||
.navigation li.topTab {
|
||||
line-height: 2em;
|
||||
text-align: right;
|
||||
padding-right: 10px;
|
||||
text-align: left;
|
||||
padding-left: 15px;
|
||||
}
|
||||
|
||||
#navigation li a {
|
||||
.navigation li.subTab {
|
||||
line-height: 1.8em;
|
||||
font-size: 12px;
|
||||
text-align: left;
|
||||
padding-left: 30px;
|
||||
}
|
||||
|
||||
.navigation li.subsubTab {
|
||||
line-height: 1.7em;
|
||||
font-size: 11px;
|
||||
text-align: left;
|
||||
padding-left: 40px;
|
||||
}
|
||||
|
||||
.navigation li.topTab span.plusIcon,
|
||||
.navigation li.subTab span.plusIcon {
|
||||
display : none;
|
||||
float: right;
|
||||
margin-right: 1em;
|
||||
}
|
||||
|
||||
.navigation li.topTab span.plusIcon {
|
||||
margin-top: 5px;
|
||||
}
|
||||
|
||||
.navigation li.subTab span.plusIcon {
|
||||
margin-top: 3px;
|
||||
}
|
||||
|
||||
#navigation li {
|
||||
color: #ffffff;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
#navigation li:hover, .navigation-active-li {
|
||||
@ -163,10 +193,10 @@ background-image: -moz-linear-gradient(
|
||||
);
|
||||
*/
|
||||
}
|
||||
.navigation-active-li-a {
|
||||
.navigation-active-li {
|
||||
font-weight: bold;
|
||||
}
|
||||
#navigation li:hover a, .navigation-active-li-a {
|
||||
#navigation li:hover {
|
||||
color: #ffffff !important;
|
||||
}
|
||||
|
||||
@ -181,3 +211,48 @@ background-image: -moz-linear-gradient(
|
||||
width: 100px;
|
||||
height: 22px;
|
||||
}
|
||||
|
||||
/* top menu css */
|
||||
#menutop_container{
|
||||
margin:0px 171px;
|
||||
color:#FFFFFF;
|
||||
font-size:13px;
|
||||
font-weight:bold;
|
||||
}
|
||||
#menutop_navbar{
|
||||
float:left;
|
||||
height:25px;
|
||||
font-size:13px;
|
||||
}
|
||||
#menutop_navbar ul{
|
||||
float:left;
|
||||
height:25px;
|
||||
color:#000000;
|
||||
margin: 0 0;
|
||||
padding-left: 1px;
|
||||
}
|
||||
#menutop_navbar ul{
|
||||
background-color: #353735;
|
||||
}
|
||||
#menutop_navbar ul li{
|
||||
float:left;
|
||||
min-width:72px;
|
||||
margin:0px 0 0 0;
|
||||
height:22px;
|
||||
display: inline;
|
||||
text-align:center;
|
||||
padding-left:5px;
|
||||
padding-right: 5px;
|
||||
padding-top: 4px;
|
||||
padding-bottom: 4px;
|
||||
border-left:1px solid white;
|
||||
cursor:pointer;
|
||||
color: white;
|
||||
}
|
||||
|
||||
#menutop_navbar ul li:hover {
|
||||
background-color: #E69138;
|
||||
|
||||
}
|
||||
|
||||
/* end top menu css */
|
@ -14,16 +14,21 @@
|
||||
/* limitations under the License. */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
//This file is mostly a copy of layout.js from Sunstone.
|
||||
//Instead of opening a south panel, it opens an east panel.
|
||||
//Apart from document.ready() modifications, the rest of different lines are
|
||||
//makerd with MODIFIED
|
||||
|
||||
var activeTab;
|
||||
var outerLayout, innerLayout;
|
||||
|
||||
function hideDialog(){
|
||||
innerLayout.close("east");
|
||||
innerLayout.close("east");//MODIFIED
|
||||
}
|
||||
|
||||
function popDialog(content){
|
||||
$("#dialog").html(content);
|
||||
innerLayout.open("east");
|
||||
innerLayout.open("east");//MODIFIED
|
||||
}
|
||||
|
||||
function popDialogLoading(){
|
||||
@ -31,45 +36,98 @@ function popDialogLoading(){
|
||||
popDialog(loading);
|
||||
}
|
||||
|
||||
function showTab(tabname){
|
||||
activeTab = tabname;
|
||||
function showTab(tabname,highlight_tab){
|
||||
//Since menu items no longer have an <a> element
|
||||
//we no longer expect #tab_id here, but simply tab_id
|
||||
//So safety check - remove # from #tab_id if present to ensure compatibility
|
||||
if (tabname.indexOf('#') == 0)
|
||||
tabname = tabname.substring(1);
|
||||
if (highlight_tab && highlight_tab.indexOf('#') == 0)
|
||||
highlight_tab == highlight.substring(1);
|
||||
|
||||
var activeTab = tabname;
|
||||
|
||||
if (!highlight_tab) highlight_tab = activeTab;
|
||||
|
||||
//clean selected menu
|
||||
$("#navigation li").removeClass("navigation-active-li");
|
||||
$("#navigation li a").removeClass("navigation-active-li-a");
|
||||
$("div#header ul#menutop_ul li").removeClass("navigation-active-li");
|
||||
|
||||
//select menu
|
||||
var li = $("#navigation li:has(a[href='"+activeTab+"'])")
|
||||
var li_a = $("#navigation li a[href='"+activeTab+"']")
|
||||
//select tab in left menu
|
||||
var li = $("#navigation li#li_"+highlight_tab)
|
||||
li.addClass("navigation-active-li");
|
||||
li_a.addClass("navigation-active-li-a");
|
||||
|
||||
//select tab in top menu
|
||||
var top_li = $("div#header ul#menutop_ul li#top_"+highlight_tab);
|
||||
top_li.addClass("navigation-active-li");
|
||||
|
||||
|
||||
//show tab
|
||||
$(".tab").hide();
|
||||
$(activeTab).show();
|
||||
//~ if (activeTab == '#dashboard') {
|
||||
//~ emptyDashboard();
|
||||
//~ preloadTables();
|
||||
//~ }
|
||||
innerLayout.close("south");
|
||||
}
|
||||
$('#'+activeTab).show();
|
||||
// innerLayout.close("south");//MODIFIED commented
|
||||
};
|
||||
|
||||
function setupTabs(){
|
||||
|
||||
var topTabs = $(".outer-west ul li.topTab");
|
||||
var subTabs = $(".outer-west ul li.subTab");
|
||||
|
||||
subTabs.live("click",function(){
|
||||
//leave floor to topTab listener in case of tabs with both classes
|
||||
if ($(this).hasClass('topTab')) return false;
|
||||
|
||||
var tab = $(this).attr('id').substring(3);
|
||||
showTab(tab);
|
||||
return false;
|
||||
});
|
||||
|
||||
topTabs.live("click",function(e){
|
||||
var tab = $(this).attr('id').substring(3);
|
||||
//Subtabs have a class with the name of this tab
|
||||
var subtabs = $('div#menu li.'+tab);
|
||||
|
||||
//toggle subtabs only when clicking on the icon or when clicking on an
|
||||
//already selected menu
|
||||
if ($(e.target).is('span') ||
|
||||
$(this).hasClass("navigation-active-li")){
|
||||
//for each subtab, we hide the subsubtabs
|
||||
subtabs.each(function(){
|
||||
//for each subtab, hide its subtabs
|
||||
var subsubtabs = $(this).attr('id').substr(3);
|
||||
//subsubtabs class
|
||||
subsubtabs = $('div#menu li.'+subsubtabs);
|
||||
subsubtabs.hide();
|
||||
});
|
||||
//hide subtabs and reset icon to + position, since all subsubtabs
|
||||
//are hidden
|
||||
subtabs.fadeToggle('fast');
|
||||
$('span',subtabs).removeClass('ui-icon-circle-minus');
|
||||
$('span',subtabs).addClass('ui-icon-circle-plus');
|
||||
//toggle icon on this tab
|
||||
$('span',this).toggleClass('ui-icon-circle-plus ui-icon-circle-minus');
|
||||
};
|
||||
//if we are clicking on the icon only, do not show the tab
|
||||
if ($(e.target).is('span')) return false;
|
||||
|
||||
showTab(tab);
|
||||
return false;
|
||||
});
|
||||
|
||||
};
|
||||
|
||||
function setupTopMenu(){
|
||||
$('div#header ul#menutop_ul li').live('click',function(){
|
||||
var tab = "#" + $(this).attr('id').substring(4);
|
||||
showTab(tab);
|
||||
});
|
||||
};
|
||||
|
||||
$(document).ready(function () {
|
||||
$(".tab").hide();
|
||||
|
||||
$(".outer-west ul li.subTab").live("click",function(){
|
||||
var tab = $('a',this).attr('href');
|
||||
showTab(tab);
|
||||
return false;
|
||||
});
|
||||
|
||||
$(".outer-west ul li.topTab").live("click",function(){
|
||||
var tab = $('a',this).attr('href');
|
||||
//toggle subtabs trick
|
||||
$('li.'+tab.substr(1)).toggle();
|
||||
showTab(tab);
|
||||
return false;
|
||||
});
|
||||
setupTabs();
|
||||
//setupTopMenu();
|
||||
|
||||
outerLayout = $('body').layout({
|
||||
applyDefaultStyles: false
|
||||
|
269
src/cluster/Cluster.cc
Normal file
269
src/cluster/Cluster.cc
Normal file
@ -0,0 +1,269 @@
|
||||
/* ------------------------------------------------------------------------ */
|
||||
/* Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) */
|
||||
/* */
|
||||
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
|
||||
/* not use this file except in compliance with the License. You may obtain */
|
||||
/* a copy of the License at */
|
||||
/* */
|
||||
/* http://www.apache.org/licenses/LICENSE-2.0 */
|
||||
/* */
|
||||
/* Unless required by applicable law or agreed to in writing, software */
|
||||
/* distributed under the License is distributed on an "AS IS" BASIS, */
|
||||
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
|
||||
/* See the License for the specific language governing permissions and */
|
||||
/* limitations under the License. */
|
||||
/* ------------------------------------------------------------------------ */
|
||||
|
||||
#include <limits.h>
|
||||
#include <string.h>
|
||||
|
||||
#include <iostream>
|
||||
#include <sstream>
|
||||
|
||||
#include "Cluster.h"
|
||||
#include "GroupPool.h"
|
||||
|
||||
const char * Cluster::table = "cluster_pool";
|
||||
|
||||
const char * Cluster::db_names =
|
||||
"oid, name, body, uid, gid, owner_u, group_u, other_u";
|
||||
|
||||
const char * Cluster::db_bootstrap = "CREATE TABLE IF NOT EXISTS cluster_pool ("
|
||||
"oid INTEGER PRIMARY KEY, name VARCHAR(128), body TEXT, uid INTEGER, "
|
||||
"gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, "
|
||||
"UNIQUE(name))";
|
||||
|
||||
/* ------------------------------------------------------------------------ */
|
||||
/* ------------------------------------------------------------------------ */
|
||||
|
||||
int Cluster::check_drop(string& error_msg)
|
||||
{
|
||||
ostringstream oss;
|
||||
|
||||
if ( hosts.get_collection_size() > 0 )
|
||||
{
|
||||
oss << "Cluster " << oid << " is not empty, it contains "
|
||||
<< hosts.get_collection_size() << " hosts.";
|
||||
|
||||
goto error_common;
|
||||
}
|
||||
|
||||
if ( datastores.get_collection_size() > 0 )
|
||||
{
|
||||
oss << "Cluster " << oid << " is not empty, it contains "
|
||||
<< datastores.get_collection_size() << " datastores.";
|
||||
|
||||
goto error_common;
|
||||
}
|
||||
|
||||
if ( vnets.get_collection_size() > 0 )
|
||||
{
|
||||
oss << "Cluster " << oid << " is not empty, it contains "
|
||||
<< vnets.get_collection_size() << " vnets.";
|
||||
|
||||
goto error_common;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error_common:
|
||||
error_msg = oss.str();
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* ************************************************************************ */
|
||||
/* Cluster :: Database Access Functions */
|
||||
/* ************************************************************************ */
|
||||
|
||||
int Cluster::insert_replace(SqlDB *db, bool replace, string& error_str)
|
||||
{
|
||||
ostringstream oss;
|
||||
|
||||
int rc;
|
||||
string xml_body;
|
||||
|
||||
char * sql_name;
|
||||
char * sql_xml;
|
||||
|
||||
// Set the owner and group to oneadmin
|
||||
set_user(0, "");
|
||||
set_group(GroupPool::ONEADMIN_ID, GroupPool::ONEADMIN_NAME);
|
||||
|
||||
// Update the Cluster
|
||||
|
||||
sql_name = db->escape_str(name.c_str());
|
||||
|
||||
if ( sql_name == 0 )
|
||||
{
|
||||
goto error_name;
|
||||
}
|
||||
|
||||
sql_xml = db->escape_str(to_xml(xml_body).c_str());
|
||||
|
||||
if ( sql_xml == 0 )
|
||||
{
|
||||
goto error_body;
|
||||
}
|
||||
|
||||
if ( validate_xml(sql_xml) != 0 )
|
||||
{
|
||||
goto error_xml;
|
||||
}
|
||||
|
||||
if ( replace )
|
||||
{
|
||||
oss << "REPLACE";
|
||||
}
|
||||
else
|
||||
{
|
||||
oss << "INSERT";
|
||||
}
|
||||
|
||||
// Construct the SQL statement to Insert or Replace
|
||||
|
||||
oss <<" INTO "<<table <<" ("<< db_names <<") VALUES ("
|
||||
<< oid << ","
|
||||
<< "'" << sql_name << "',"
|
||||
<< "'" << sql_xml << "',"
|
||||
<< uid << ","
|
||||
<< gid << ","
|
||||
<< owner_u << ","
|
||||
<< group_u << ","
|
||||
<< other_u << ")";
|
||||
|
||||
|
||||
rc = db->exec(oss);
|
||||
|
||||
db->free_str(sql_name);
|
||||
db->free_str(sql_xml);
|
||||
|
||||
return rc;
|
||||
|
||||
error_xml:
|
||||
db->free_str(sql_name);
|
||||
db->free_str(sql_xml);
|
||||
|
||||
error_str = "Error transforming the Cluster to XML.";
|
||||
|
||||
goto error_common;
|
||||
|
||||
error_body:
|
||||
db->free_str(sql_name);
|
||||
goto error_generic;
|
||||
|
||||
error_name:
|
||||
goto error_generic;
|
||||
|
||||
error_generic:
|
||||
error_str = "Error inserting Cluster in DB.";
|
||||
error_common:
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------------ */
|
||||
/* ------------------------------------------------------------------------ */
|
||||
|
||||
string& Cluster::to_xml(string& xml) const
|
||||
{
|
||||
ostringstream oss;
|
||||
string host_collection_xml;
|
||||
string ds_collection_xml;
|
||||
string vnet_collection_xml;
|
||||
|
||||
oss <<
|
||||
"<CLUSTER>" <<
|
||||
"<ID>" << oid << "</ID>" <<
|
||||
"<NAME>" << name << "</NAME>" <<
|
||||
|
||||
hosts.to_xml(host_collection_xml) <<
|
||||
datastores.to_xml(ds_collection_xml) <<
|
||||
vnets.to_xml(vnet_collection_xml) <<
|
||||
|
||||
"</CLUSTER>";
|
||||
|
||||
xml = oss.str();
|
||||
|
||||
return xml;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------------ */
|
||||
/* ------------------------------------------------------------------------ */
|
||||
|
||||
int Cluster::from_xml(const string& xml)
|
||||
{
|
||||
int rc = 0;
|
||||
vector<xmlNodePtr> content;
|
||||
|
||||
// Initialize the internal XML object
|
||||
update_from_str(xml);
|
||||
|
||||
// Get class base attributes
|
||||
rc += xpath(oid, "/CLUSTER/ID", -1);
|
||||
rc += xpath(name,"/CLUSTER/NAME", "not_found");
|
||||
|
||||
// Set oneadmin as the owner
|
||||
set_user(0,"");
|
||||
|
||||
// Set the Cluster ID as the cluster it belongs to
|
||||
set_group(oid, name);
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Get associated hosts
|
||||
// -------------------------------------------------------------------------
|
||||
ObjectXML::get_nodes("/CLUSTER/HOSTS", content);
|
||||
|
||||
if (content.empty())
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Set of IDs
|
||||
rc += hosts.from_xml_node(content[0]);
|
||||
|
||||
ObjectXML::free_nodes(content);
|
||||
content.clear();
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Get associated datastores
|
||||
// -------------------------------------------------------------------------
|
||||
ObjectXML::get_nodes("/CLUSTER/DATASTORES", content);
|
||||
|
||||
if (content.empty())
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Set of IDs
|
||||
rc += datastores.from_xml_node(content[0]);
|
||||
|
||||
ObjectXML::free_nodes(content);
|
||||
content.clear();
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Get associated vnets
|
||||
// -------------------------------------------------------------------------
|
||||
ObjectXML::get_nodes("/CLUSTER/VNETS", content);
|
||||
|
||||
if (content.empty())
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Set of IDs
|
||||
rc += vnets.from_xml_node(content[0]);
|
||||
|
||||
ObjectXML::free_nodes(content);
|
||||
content.clear();
|
||||
|
||||
if (rc != 0)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------------ */
|
||||
/* ------------------------------------------------------------------------ */
|
||||
|
133
src/cluster/ClusterPool.cc
Normal file
133
src/cluster/ClusterPool.cc
Normal file
@ -0,0 +1,133 @@
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) */
|
||||
/* */
|
||||
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
|
||||
/* not use this file except in compliance with the License. You may obtain */
|
||||
/* a copy of the License at */
|
||||
/* */
|
||||
/* http://www.apache.org/licenses/LICENSE-2.0 */
|
||||
/* */
|
||||
/* Unless required by applicable law or agreed to in writing, software */
|
||||
/* distributed under the License is distributed on an "AS IS" BASIS, */
|
||||
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
|
||||
/* See the License for the specific language governing permissions and */
|
||||
/* limitations under the License. */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
#include "ClusterPool.h"
|
||||
#include "Nebula.h"
|
||||
#include "NebulaLog.h"
|
||||
|
||||
#include <stdexcept>
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* There is a default cluster boostrapped by the core: */
|
||||
/* The first 100 cluster IDs are reserved for system clusters. */
|
||||
/* Regular ones start from ID 100 */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
const string ClusterPool::NONE_CLUSTER_NAME = "";
|
||||
const int ClusterPool::NONE_CLUSTER_ID = -1;
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
ClusterPool::ClusterPool(SqlDB * db):PoolSQL(db, Cluster::table)
|
||||
{
|
||||
ostringstream oss;
|
||||
string error_str;
|
||||
|
||||
if (get_lastOID() == -1) //lastOID is set in PoolSQL::init_cb
|
||||
{
|
||||
set_update_lastOID(99);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
int ClusterPool::allocate(string name, int * oid, string& error_str)
|
||||
{
|
||||
Cluster * cluster;
|
||||
ostringstream oss;
|
||||
|
||||
if ( name.empty() )
|
||||
{
|
||||
goto error_name;
|
||||
}
|
||||
|
||||
if ( name.length() > 128 )
|
||||
{
|
||||
goto error_name_length;
|
||||
}
|
||||
|
||||
// Check for duplicates
|
||||
cluster = get(name, false);
|
||||
|
||||
if( cluster != 0 )
|
||||
{
|
||||
goto error_duplicated;
|
||||
}
|
||||
|
||||
// Build a new Cluster object
|
||||
cluster = new Cluster(-1, name);
|
||||
|
||||
// Insert the Object in the pool
|
||||
*oid = PoolSQL::allocate(cluster, error_str);
|
||||
|
||||
return *oid;
|
||||
|
||||
error_name:
|
||||
oss << "NAME cannot be empty.";
|
||||
goto error_common;
|
||||
|
||||
error_name_length:
|
||||
oss << "NAME is too long; max length is 128 chars.";
|
||||
goto error_common;
|
||||
|
||||
error_duplicated:
|
||||
oss << "NAME is already taken by CLUSTER " << cluster->get_oid() << ".";
|
||||
|
||||
error_common:
|
||||
*oid = -1;
|
||||
error_str = oss.str();
|
||||
|
||||
return *oid;
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
int ClusterPool::drop(PoolObjectSQL * objsql, string& error_msg)
|
||||
{
|
||||
Cluster * cluster = static_cast<Cluster*>(objsql);
|
||||
|
||||
int rc;
|
||||
|
||||
// Return error if the cluster is a default one.
|
||||
if( cluster->get_oid() < 100 )
|
||||
{
|
||||
error_msg = "System Clusters (ID < 100) cannot be deleted.";
|
||||
NebulaLog::log("CLUSTER", Log::ERROR, error_msg);
|
||||
return -2;
|
||||
}
|
||||
|
||||
if ( cluster->check_drop(error_msg) < 0 )
|
||||
{
|
||||
NebulaLog::log("CLUSTER", Log::ERROR, error_msg);
|
||||
|
||||
return -3;
|
||||
}
|
||||
|
||||
rc = cluster->drop(db);
|
||||
|
||||
if( rc != 0 )
|
||||
{
|
||||
error_msg = "SQL DB error";
|
||||
rc = -1;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
@ -1,3 +1,5 @@
|
||||
# SConstruct for src/group
|
||||
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) #
|
||||
# #
|
||||
@ -14,10 +16,15 @@
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
CLONE = shared/tm_clone.sh
|
||||
LN = shared/tm_ln.sh
|
||||
MKSWAP = shared/tm_mkswap.sh
|
||||
MKIMAGE = shared/tm_mkimage.sh
|
||||
DELETE = shared/tm_delete.sh
|
||||
MV = shared/tm_mv.sh
|
||||
CONTEXT = shared/tm_context.sh
|
||||
Import('env')
|
||||
|
||||
lib_name='nebula_cluster'
|
||||
|
||||
# Sources to generate the library
|
||||
source_files=[
|
||||
'ClusterPool.cc',
|
||||
'Cluster.cc'
|
||||
]
|
||||
|
||||
# Build library
|
||||
env.StaticLibrary(lib_name, source_files)
|
@ -201,3 +201,27 @@ string VectorAttribute::vector_value(const char *name) const
|
||||
}
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
int VectorAttribute::vector_value(const char *name, int & value) const
|
||||
{
|
||||
map<string,string>::const_iterator it;
|
||||
|
||||
it = attribute_value.find(name);
|
||||
|
||||
if ( it == attribute_value.end() )
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ( it->second.empty() )
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
istringstream iss(it->second);
|
||||
iss >> value;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
373
src/datastore/Datastore.cc
Normal file
373
src/datastore/Datastore.cc
Normal file
@ -0,0 +1,373 @@
|
||||
/* ------------------------------------------------------------------------ */
|
||||
/* Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) */
|
||||
/* */
|
||||
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
|
||||
/* not use this file except in compliance with the License. You may obtain */
|
||||
/* a copy of the License at */
|
||||
/* */
|
||||
/* http://www.apache.org/licenses/LICENSE-2.0 */
|
||||
/* */
|
||||
/* Unless required by applicable law or agreed to in writing, software */
|
||||
/* distributed under the License is distributed on an "AS IS" BASIS, */
|
||||
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
|
||||
/* See the License for the specific language governing permissions and */
|
||||
/* limitations under the License. */
|
||||
/* ------------------------------------------------------------------------ */
|
||||
|
||||
#include "Datastore.h"
|
||||
#include "GroupPool.h"
|
||||
#include "NebulaLog.h"
|
||||
#include "Nebula.h"
|
||||
|
||||
const char * Datastore::table = "datastore_pool";
|
||||
|
||||
const char * Datastore::db_names =
|
||||
"oid, name, body, uid, gid, owner_u, group_u, other_u";
|
||||
|
||||
const char * Datastore::db_bootstrap =
|
||||
"CREATE TABLE IF NOT EXISTS datastore_pool ("
|
||||
"oid INTEGER PRIMARY KEY, name VARCHAR(128), body TEXT, uid INTEGER, "
|
||||
"gid INTEGER, owner_u INTEGER, group_u INTEGER, other_u INTEGER, "
|
||||
"UNIQUE(name))";
|
||||
|
||||
/* ************************************************************************ */
|
||||
/* Datastore :: Constructor/Destructor */
|
||||
/* ************************************************************************ */
|
||||
|
||||
Datastore::Datastore(
|
||||
int uid,
|
||||
int gid,
|
||||
const string& uname,
|
||||
const string& gname,
|
||||
DatastoreTemplate* ds_template,
|
||||
int cluster_id,
|
||||
const string& cluster_name):
|
||||
PoolObjectSQL(-1,DATASTORE,"",uid,gid,uname,gname,table),
|
||||
ObjectCollection("IMAGES"),
|
||||
Clusterable(cluster_id, cluster_name),
|
||||
ds_mad(""),
|
||||
tm_mad(""),
|
||||
base_path("")
|
||||
{
|
||||
group_u = 1;
|
||||
|
||||
if (ds_template != 0)
|
||||
{
|
||||
obj_template = ds_template;
|
||||
}
|
||||
else
|
||||
{
|
||||
obj_template = new DatastoreTemplate;
|
||||
}
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------------ */
|
||||
/* ------------------------------------------------------------------------ */
|
||||
|
||||
int Datastore::disk_attribute(VectorAttribute * disk)
|
||||
{
|
||||
ostringstream oss;
|
||||
|
||||
oss << oid;
|
||||
|
||||
disk->replace("DATASTORE", get_name());
|
||||
disk->replace("DATASTORE_ID", oss.str());
|
||||
disk->replace("TM_MAD", get_tm_mad());
|
||||
|
||||
if ( get_cluster_id() != ClusterPool::NONE_CLUSTER_ID )
|
||||
{
|
||||
oss.str("");
|
||||
oss << get_cluster_id();
|
||||
|
||||
disk->replace("CLUSTER_ID", oss.str());
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* ************************************************************************ */
|
||||
/* Datastore :: Database Access Functions */
|
||||
/* ************************************************************************ */
|
||||
|
||||
/* ------------------------------------------------------------------------ */
|
||||
/* ------------------------------------------------------------------------ */
|
||||
|
||||
int Datastore::insert(SqlDB *db, string& error_str)
|
||||
{
|
||||
int rc;
|
||||
ostringstream oss;
|
||||
|
||||
Nebula& nd = Nebula::instance();
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
// Check default datastore attributes
|
||||
// ---------------------------------------------------------------------
|
||||
|
||||
erase_template_attribute("NAME", name);
|
||||
// NAME is checked in DatastorePool::allocate
|
||||
|
||||
get_template_attribute("DS_MAD", ds_mad);
|
||||
|
||||
if ( ds_mad.empty() == true )
|
||||
{
|
||||
goto error_ds;
|
||||
}
|
||||
|
||||
get_template_attribute("TM_MAD", tm_mad);
|
||||
|
||||
if ( tm_mad.empty() == true )
|
||||
{
|
||||
goto error_tm;
|
||||
}
|
||||
|
||||
oss << nd.get_ds_location() << oid;
|
||||
|
||||
base_path = oss.str();
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
// Insert the Datastore
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
rc = insert_replace(db, false, error_str);
|
||||
|
||||
return rc;
|
||||
|
||||
error_ds:
|
||||
error_str = "No DS_MAD in template.";
|
||||
goto error_common;
|
||||
|
||||
error_tm:
|
||||
error_str = "No TM_MAD in template.";
|
||||
goto error_common;
|
||||
|
||||
error_common:
|
||||
NebulaLog::log("DATASTORE", Log::ERROR, error_str);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------------ */
|
||||
/* ------------------------------------------------------------------------ */
|
||||
|
||||
int Datastore::insert_replace(SqlDB *db, bool replace, string& error_str)
|
||||
{
|
||||
ostringstream oss;
|
||||
|
||||
int rc;
|
||||
string xml_body;
|
||||
|
||||
char * sql_name;
|
||||
char * sql_xml;
|
||||
|
||||
// Update the Datastore
|
||||
|
||||
sql_name = db->escape_str(name.c_str());
|
||||
|
||||
if ( sql_name == 0 )
|
||||
{
|
||||
goto error_name;
|
||||
}
|
||||
|
||||
sql_xml = db->escape_str(to_xml(xml_body).c_str());
|
||||
|
||||
if ( sql_xml == 0 )
|
||||
{
|
||||
goto error_body;
|
||||
}
|
||||
|
||||
if ( validate_xml(sql_xml) != 0 )
|
||||
{
|
||||
goto error_xml;
|
||||
}
|
||||
|
||||
if ( replace )
|
||||
{
|
||||
oss << "REPLACE";
|
||||
}
|
||||
else
|
||||
{
|
||||
oss << "INSERT";
|
||||
}
|
||||
|
||||
// Construct the SQL statement to Insert or Replace
|
||||
|
||||
oss <<" INTO "<<table <<" ("<< db_names <<") VALUES ("
|
||||
<< oid << ","
|
||||
<< "'" << sql_name << "',"
|
||||
<< "'" << sql_xml << "',"
|
||||
<< uid << ","
|
||||
<< gid << ","
|
||||
<< owner_u << ","
|
||||
<< group_u << ","
|
||||
<< other_u << ")";
|
||||
|
||||
|
||||
rc = db->exec(oss);
|
||||
|
||||
db->free_str(sql_name);
|
||||
db->free_str(sql_xml);
|
||||
|
||||
return rc;
|
||||
|
||||
error_xml:
|
||||
db->free_str(sql_name);
|
||||
db->free_str(sql_xml);
|
||||
|
||||
error_str = "Error transforming the Datastore to XML.";
|
||||
|
||||
goto error_common;
|
||||
|
||||
error_body:
|
||||
db->free_str(sql_name);
|
||||
goto error_generic;
|
||||
|
||||
error_name:
|
||||
goto error_generic;
|
||||
|
||||
error_generic:
|
||||
error_str = "Error inserting Datastore in DB.";
|
||||
error_common:
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------------ */
|
||||
/* ------------------------------------------------------------------------ */
|
||||
|
||||
string& Datastore::to_xml(string& xml) const
|
||||
{
|
||||
ostringstream oss;
|
||||
string collection_xml;
|
||||
string template_xml;
|
||||
string perms_xml;
|
||||
|
||||
ObjectCollection::to_xml(collection_xml);
|
||||
|
||||
oss <<
|
||||
"<DATASTORE>" <<
|
||||
"<ID>" << oid << "</ID>" <<
|
||||
"<UID>" << uid << "</UID>" <<
|
||||
"<GID>" << gid << "</GID>" <<
|
||||
"<UNAME>" << uname << "</UNAME>" <<
|
||||
"<GNAME>" << gname << "</GNAME>" <<
|
||||
"<NAME>" << name << "</NAME>" <<
|
||||
perms_to_xml(perms_xml) <<
|
||||
"<DS_MAD>" << ds_mad << "</DS_MAD>" <<
|
||||
"<TM_MAD>" << tm_mad << "</TM_MAD>" <<
|
||||
"<BASE_PATH>" << base_path << "</BASE_PATH>" <<
|
||||
"<CLUSTER_ID>" << cluster_id << "</CLUSTER_ID>" <<
|
||||
"<CLUSTER>" << cluster << "</CLUSTER>" <<
|
||||
collection_xml <<
|
||||
obj_template->to_xml(template_xml) <<
|
||||
"</DATASTORE>";
|
||||
|
||||
xml = oss.str();
|
||||
|
||||
return xml;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------------ */
|
||||
/* ------------------------------------------------------------------------ */
|
||||
|
||||
int Datastore::from_xml(const string& xml)
|
||||
{
|
||||
int rc = 0;
|
||||
vector<xmlNodePtr> content;
|
||||
|
||||
// Initialize the internal XML object
|
||||
update_from_str(xml);
|
||||
|
||||
// Get class base attributes
|
||||
rc += xpath(oid, "/DATASTORE/ID", -1);
|
||||
rc += xpath(uid, "/DATASTORE/UID", -1);
|
||||
rc += xpath(gid, "/DATASTORE/GID", -1);
|
||||
rc += xpath(uname, "/DATASTORE/UNAME", "not_found");
|
||||
rc += xpath(gname, "/DATASTORE/GNAME", "not_found");
|
||||
rc += xpath(name, "/DATASTORE/NAME", "not_found");
|
||||
rc += xpath(ds_mad, "/DATASTORE/DS_MAD", "not_found");
|
||||
rc += xpath(tm_mad, "/DATASTORE/TM_MAD", "not_found");
|
||||
rc += xpath(base_path, "/DATASTORE/BASE_PATH", "not_found");
|
||||
|
||||
rc += xpath(cluster_id, "/DATASTORE/CLUSTER_ID", -1);
|
||||
rc += xpath(cluster, "/DATASTORE/CLUSTER", "not_found");
|
||||
|
||||
// Permissions
|
||||
rc += perms_from_xml();
|
||||
|
||||
// Get associated classes
|
||||
ObjectXML::get_nodes("/DATASTORE/IMAGES", content);
|
||||
|
||||
if (content.empty())
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Set of IDs
|
||||
rc += ObjectCollection::from_xml_node(content[0]);
|
||||
|
||||
ObjectXML::free_nodes(content);
|
||||
content.clear();
|
||||
|
||||
// Get associated classes
|
||||
ObjectXML::get_nodes("/DATASTORE/TEMPLATE", content);
|
||||
|
||||
if (content.empty())
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
rc += obj_template->from_xml_node(content[0]);
|
||||
|
||||
ObjectXML::free_nodes(content);
|
||||
|
||||
if (rc != 0)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
int Datastore::replace_template(const string& tmpl_str, string& error)
|
||||
{
|
||||
string new_ds_mad;
|
||||
string new_tm_mad;
|
||||
|
||||
int rc;
|
||||
|
||||
rc = PoolObjectSQL::replace_template(tmpl_str, error);
|
||||
|
||||
if ( rc != 0 )
|
||||
{
|
||||
return rc;
|
||||
}
|
||||
|
||||
get_template_attribute("DS_MAD", new_ds_mad);
|
||||
|
||||
if ( !new_ds_mad.empty() )
|
||||
{
|
||||
ds_mad = new_ds_mad;
|
||||
}
|
||||
else
|
||||
{
|
||||
replace_template_attribute("DS_MAD", ds_mad);
|
||||
}
|
||||
|
||||
get_template_attribute("TM_MAD", new_tm_mad);
|
||||
|
||||
if ( !new_tm_mad.empty() )
|
||||
{
|
||||
tm_mad = new_tm_mad;
|
||||
}
|
||||
else
|
||||
{
|
||||
replace_template_attribute("TM_MAD", tm_mad);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------------ */
|
||||
/* ------------------------------------------------------------------------ */
|
||||
|
245
src/datastore/DatastorePool.cc
Normal file
245
src/datastore/DatastorePool.cc
Normal file
@ -0,0 +1,245 @@
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) */
|
||||
/* */
|
||||
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
|
||||
/* not use this file except in compliance with the License. You may obtain */
|
||||
/* a copy of the License at */
|
||||
/* */
|
||||
/* http://www.apache.org/licenses/LICENSE-2.0 */
|
||||
/* */
|
||||
/* Unless required by applicable law or agreed to in writing, software */
|
||||
/* distributed under the License is distributed on an "AS IS" BASIS, */
|
||||
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
|
||||
/* See the License for the specific language governing permissions and */
|
||||
/* limitations under the License. */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
#include "DatastorePool.h"
|
||||
#include "Nebula.h"
|
||||
#include "NebulaLog.h"
|
||||
|
||||
#include <stdexcept>
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* There is a default datastore boostrapped by the core: */
|
||||
/* The first 100 IDs are reserved for system datastores. Regular ones start */
|
||||
/* from ID 100 */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
const string DatastorePool::SYSTEM_DS_NAME = "system";
|
||||
const int DatastorePool::SYSTEM_DS_ID = 0;
|
||||
|
||||
const string DatastorePool::DEFAULT_DS_NAME = "default";
|
||||
const int DatastorePool::DEFAULT_DS_ID = 1;
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
DatastorePool::DatastorePool(SqlDB * db):
|
||||
PoolSQL(db, Datastore::table)
|
||||
{
|
||||
ostringstream oss;
|
||||
string error_str;
|
||||
|
||||
if (get_lastOID() == -1) //lastOID is set in PoolSQL::init_cb
|
||||
{
|
||||
DatastoreTemplate * ds_tmpl;
|
||||
Datastore * ds;
|
||||
|
||||
int rc;
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
// Create the system datastore
|
||||
// ---------------------------------------------------------------------
|
||||
|
||||
oss << "NAME = " << SYSTEM_DS_NAME << endl
|
||||
<< "DS_MAD = -" << endl
|
||||
<< "TM_MAD = shared";
|
||||
|
||||
ds_tmpl = new DatastoreTemplate;
|
||||
rc = ds_tmpl->parse_str_or_xml(oss.str(), error_str);
|
||||
|
||||
if( rc < 0 )
|
||||
{
|
||||
goto error_bootstrap;
|
||||
}
|
||||
|
||||
allocate(UserPool::ONEADMIN_ID,
|
||||
GroupPool::ONEADMIN_ID,
|
||||
UserPool::oneadmin_name,
|
||||
GroupPool::ONEADMIN_NAME,
|
||||
ds_tmpl,
|
||||
&rc,
|
||||
ClusterPool::NONE_CLUSTER_ID,
|
||||
ClusterPool::NONE_CLUSTER_NAME,
|
||||
error_str);
|
||||
|
||||
if( rc < 0 )
|
||||
{
|
||||
goto error_bootstrap;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
// Create the default datastore
|
||||
// ---------------------------------------------------------------------
|
||||
oss.str("");
|
||||
|
||||
oss << "NAME = " << DEFAULT_DS_NAME << endl
|
||||
<< "DS_MAD = fs" << endl
|
||||
<< "TM_MAD = shared";
|
||||
|
||||
ds_tmpl = new DatastoreTemplate;
|
||||
rc = ds_tmpl->parse_str_or_xml(oss.str(), error_str);
|
||||
|
||||
if( rc < 0 )
|
||||
{
|
||||
goto error_bootstrap;
|
||||
}
|
||||
|
||||
allocate(UserPool::ONEADMIN_ID,
|
||||
GroupPool::ONEADMIN_ID,
|
||||
UserPool::oneadmin_name,
|
||||
GroupPool::ONEADMIN_NAME,
|
||||
ds_tmpl,
|
||||
&rc,
|
||||
ClusterPool::NONE_CLUSTER_ID,
|
||||
ClusterPool::NONE_CLUSTER_NAME,
|
||||
error_str);
|
||||
|
||||
if( rc < 0 )
|
||||
{
|
||||
goto error_bootstrap;
|
||||
}
|
||||
|
||||
ds = get(rc, true);
|
||||
|
||||
ds->set_permissions(
|
||||
-1,-1,-1,
|
||||
-1,-1,-1,
|
||||
1,-1,-1,
|
||||
error_str);
|
||||
|
||||
update(ds);
|
||||
|
||||
ds->unlock();
|
||||
|
||||
// User created datastores will start from ID 100
|
||||
set_update_lastOID(99);
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
error_bootstrap:
|
||||
oss.str("");
|
||||
oss << "Error trying to create default datastore: " << error_str;
|
||||
NebulaLog::log("DATASTORE",Log::ERROR,oss);
|
||||
|
||||
throw runtime_error(oss.str());
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
int DatastorePool::allocate(
|
||||
int uid,
|
||||
int gid,
|
||||
const string& uname,
|
||||
const string& gname,
|
||||
DatastoreTemplate * ds_template,
|
||||
int * oid,
|
||||
int cluster_id,
|
||||
const string& cluster_name,
|
||||
string& error_str)
|
||||
{
|
||||
Datastore * ds;
|
||||
Datastore * ds_aux = 0;
|
||||
string name;
|
||||
ostringstream oss;
|
||||
|
||||
ds = new Datastore(uid, gid, uname, gname,
|
||||
ds_template, cluster_id, cluster_name);
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Check name & duplicates
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
ds->get_template_attribute("NAME", name);
|
||||
|
||||
if ( name.empty() )
|
||||
{
|
||||
goto error_name;
|
||||
}
|
||||
|
||||
if ( name.length() > 128 )
|
||||
{
|
||||
goto error_name_length;
|
||||
}
|
||||
|
||||
ds_aux = get(name,false);
|
||||
|
||||
if( ds_aux != 0 )
|
||||
{
|
||||
goto error_duplicated;
|
||||
}
|
||||
|
||||
*oid = PoolSQL::allocate(ds, error_str);
|
||||
|
||||
return *oid;
|
||||
|
||||
error_name:
|
||||
oss << "NAME cannot be empty.";
|
||||
goto error_common;
|
||||
|
||||
error_name_length:
|
||||
oss << "NAME is too long; max length is 128 chars.";
|
||||
goto error_common;
|
||||
|
||||
error_duplicated:
|
||||
oss << "NAME is already taken by DATASTORE " << ds_aux->get_oid() << ".";
|
||||
|
||||
error_common:
|
||||
delete ds;
|
||||
|
||||
*oid = -1;
|
||||
error_str = oss.str();
|
||||
|
||||
return *oid;
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
int DatastorePool::drop(PoolObjectSQL * objsql, string& error_msg)
|
||||
{
|
||||
Datastore * datastore = static_cast<Datastore*>(objsql);
|
||||
|
||||
int rc;
|
||||
|
||||
// Return error if the datastore is a default one.
|
||||
if( datastore->get_oid() < 100 )
|
||||
{
|
||||
error_msg = "System Datastores (ID < 100) cannot be deleted.";
|
||||
NebulaLog::log("DATASTORE", Log::ERROR, error_msg);
|
||||
return -2;
|
||||
}
|
||||
|
||||
if( datastore->get_collection_size() > 0 )
|
||||
{
|
||||
ostringstream oss;
|
||||
oss << "Datastore " << datastore->get_oid() << " is not empty.";
|
||||
error_msg = oss.str();
|
||||
NebulaLog::log("DATASTORE", Log::ERROR, error_msg);
|
||||
|
||||
return -3;
|
||||
}
|
||||
|
||||
rc = datastore->drop(db);
|
||||
|
||||
if( rc != 0 )
|
||||
{
|
||||
error_msg = "SQL DB error";
|
||||
rc = -1;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
21
src/tm_mad/ssh/tm_ssh.conf → src/datastore/SConstruct
Executable file → Normal file
21
src/tm_mad/ssh/tm_ssh.conf → src/datastore/SConstruct
Executable file → Normal file
@ -1,3 +1,5 @@
|
||||
# SConstruct for src/datastore
|
||||
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) #
|
||||
# #
|
||||
@ -14,10 +16,15 @@
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
CLONE = ssh/tm_clone.sh
|
||||
LN = ssh/tm_ln.sh
|
||||
MKSWAP = ssh/tm_mkswap.sh
|
||||
MKIMAGE = ssh/tm_mkimage.sh
|
||||
DELETE = ssh/tm_delete.sh
|
||||
MV = ssh/tm_mv.sh
|
||||
CONTEXT = ssh/tm_context.sh
|
||||
Import('env')
|
||||
|
||||
lib_name='nebula_datastore'
|
||||
|
||||
# Sources to generate the library
|
||||
source_files=[
|
||||
'DatastorePool.cc',
|
||||
'Datastore.cc'
|
||||
]
|
||||
|
||||
# Build library
|
||||
env.StaticLibrary(lib_name, source_files)
|
@ -20,21 +20,15 @@
|
||||
DRIVER_NAME=`basename $0 | cut -d. -f1`
|
||||
|
||||
if [ -z "${ONE_LOCATION}" ]; then
|
||||
DRIVERRC=/etc/one/${DRIVER_NAME}/${DRIVER_NAME}rc
|
||||
MADCOMMON=/usr/lib/one/mads/madcommon.sh
|
||||
VAR_LOCATION=/var/lib/one
|
||||
else
|
||||
DRIVERRC=$ONE_LOCATION/etc/${DRIVER_NAME}/${DRIVER_NAME}rc
|
||||
MADCOMMON=$ONE_LOCATION/lib/mads/madcommon.sh
|
||||
VAR_LOCATION=$ONE_LOCATION/var
|
||||
fi
|
||||
|
||||
. $MADCOMMON
|
||||
|
||||
# Export the im_mad specific rc
|
||||
|
||||
export_rc_vars $DRIVERRC
|
||||
|
||||
# Go to var directory ONE_LOCATION/var or /var/lib/one
|
||||
cd $VAR_LOCATION
|
||||
|
177
src/datastore_mad/one_datastore.rb
Executable file
177
src/datastore_mad/one_datastore.rb
Executable file
@ -0,0 +1,177 @@
|
||||
#!/usr/bin/env ruby
|
||||
|
||||
# -------------------------------------------------------------------------- */
|
||||
# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may */
|
||||
# not use this file except in compliance with the License. You may obtain */
|
||||
# a copy of the License at */
|
||||
# */
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 */
|
||||
# */
|
||||
# Unless required by applicable law or agreed to in writing, software */
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, */
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
|
||||
# See the License for the specific language governing permissions and */
|
||||
# limitations under the License. */
|
||||
# -------------------------------------------------------------------------- */
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Set up the environment for the driver
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
ONE_LOCATION = ENV["ONE_LOCATION"]
|
||||
|
||||
if !ONE_LOCATION
|
||||
RUBY_LIB_LOCATION = "/usr/lib/one/ruby"
|
||||
VAR_LOCATION = "/var/lib/one"
|
||||
else
|
||||
RUBY_LIB_LOCATION = ONE_LOCATION + "/lib/ruby"
|
||||
VAR_LOCATION = ONE_LOCATION + "/var"
|
||||
end
|
||||
|
||||
$: << RUBY_LIB_LOCATION
|
||||
|
||||
require "OpenNebulaDriver"
|
||||
require 'getoptlong'
|
||||
require 'base64'
|
||||
require 'rexml/document'
|
||||
|
||||
# This class provides basic messaging and logging functionality
|
||||
# to implement Datastore Drivers. A datastore driver
|
||||
# is a program (or a set of) that specialize the OpenNebula behavior
|
||||
# by interfacing with specific infrastructure storage solutions.
|
||||
class DatastoreDriver < OpenNebulaDriver
|
||||
|
||||
# Image Driver Protocol constants
|
||||
ACTION = {
|
||||
:mv => "MV",
|
||||
:cp => "CP",
|
||||
:rm => "RM",
|
||||
:mkfs => "MKFS",
|
||||
:log => "LOG"
|
||||
}
|
||||
|
||||
# Register default actions for the protocol
|
||||
def initialize(ds_type, options={})
|
||||
@options={
|
||||
:concurrency => 10,
|
||||
:threaded => true,
|
||||
:retries => 0,
|
||||
:local_actions => {
|
||||
ACTION[:mv] => nil,
|
||||
ACTION[:cp] => nil,
|
||||
ACTION[:rm] => nil,
|
||||
ACTION[:mkfs] => nil
|
||||
}
|
||||
}.merge!(options)
|
||||
|
||||
super("datastore/", @options)
|
||||
|
||||
if ds_type == nil
|
||||
@types = Dir["#{@local_scripts_path}/*/"].map do |d|
|
||||
d.split('/')[-1]
|
||||
end
|
||||
elsif ds_type.class == String
|
||||
@types = [ds_type]
|
||||
else
|
||||
@types = ds_type
|
||||
end
|
||||
|
||||
# register_action(ACTION[:mv].to_sym, method("mv"))
|
||||
register_action(ACTION[:cp].to_sym, method("cp"))
|
||||
register_action(ACTION[:rm].to_sym, method("rm"))
|
||||
register_action(ACTION[:mkfs].to_sym, method("mkfs"))
|
||||
end
|
||||
|
||||
############################################################################
|
||||
# Image Manager Protocol Actions (generic implementation)
|
||||
############################################################################
|
||||
# TODO: Integrate this with TM
|
||||
# def mv(id, ds, src, dst)
|
||||
# do_image_action(id, ds, :mv, "'#{src}' '#{dst}' '#{id}'")
|
||||
# end
|
||||
|
||||
def cp(id, drv_message)
|
||||
ds = get_ds_type(drv_message)
|
||||
do_image_action(id, ds, :cp, "#{drv_message} #{id}")
|
||||
end
|
||||
|
||||
def rm(id, drv_message)
|
||||
ds = get_ds_type(drv_message)
|
||||
do_image_action(id, ds, :rm, "#{drv_message} #{id}")
|
||||
end
|
||||
|
||||
def mkfs(id, drv_message)
|
||||
ds = get_ds_type(drv_message)
|
||||
do_image_action(id, ds, :mkfs, "#{drv_message} #{id}")
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def is_available?(ds, id, action)
|
||||
if @types.include?(ds)
|
||||
return true
|
||||
else
|
||||
send_message(ACTION[action], RESULT[:failure], id,
|
||||
"Datastore driver '#{ds}' not available")
|
||||
return false
|
||||
end
|
||||
end
|
||||
|
||||
def do_image_action(id, ds, action, arguments)
|
||||
return if not is_available?(ds,id,action)
|
||||
|
||||
path = File.join(@local_scripts_path, ds)
|
||||
cmd = File.join(path, ACTION[action].downcase)
|
||||
|
||||
cmd << " " << arguments
|
||||
|
||||
rc = LocalCommand.run(cmd, log_method(id))
|
||||
|
||||
result, info = get_info_from_execution(rc)
|
||||
|
||||
|
||||
PP.pp([ACTION[action], result, id, info],STDERR)
|
||||
send_message(ACTION[action], result, id, info)
|
||||
end
|
||||
|
||||
def get_ds_type(drv_message)
|
||||
message = Base64.decode64(drv_message)
|
||||
xml_doc = REXML::Document.new(message)
|
||||
|
||||
dsxml = xml_doc.root.elements['/DS_DRIVER_ACTION_DATA/DATASTORE/DS_MAD']
|
||||
dstxt = dsxml.text if dsxml
|
||||
|
||||
return dstxt
|
||||
end
|
||||
end
|
||||
|
||||
################################################################################
|
||||
################################################################################
|
||||
# DatastoreDriver Main program
|
||||
################################################################################
|
||||
################################################################################
|
||||
|
||||
opts = GetoptLong.new(
|
||||
[ '--threads', '-t', GetoptLong::OPTIONAL_ARGUMENT ],
|
||||
[ '--ds-types', '-d', GetoptLong::OPTIONAL_ARGUMENT ]
|
||||
)
|
||||
|
||||
ds_type = nil
|
||||
threads = 15
|
||||
|
||||
begin
|
||||
opts.each do |opt, arg|
|
||||
case opt
|
||||
when '--threads'
|
||||
threads = arg.to_i
|
||||
when '--ds-types'
|
||||
ds_type = arg.split(',').map {|a| a.strip }
|
||||
end
|
||||
end
|
||||
rescue Exception => e
|
||||
exit(-1)
|
||||
end
|
||||
|
||||
ds_driver = DatastoreDriver.new(ds_type, :concurrency => threads)
|
||||
ds_driver.start_driver
|
4
src/tm_mad/dummy/tm_dummyrc → src/datastore_mad/remotes/dummy/cp
Normal file → Executable file
4
src/tm_mad/dummy/tm_dummyrc → src/datastore_mad/remotes/dummy/cp
Normal file → Executable file
@ -1,3 +1,5 @@
|
||||
#!/bin/sh
|
||||
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) #
|
||||
# #
|
||||
@ -13,3 +15,5 @@
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
echo "dummy_path 1024"
|
4
src/tm_mad/ssh/tm_sshrc → src/datastore_mad/remotes/dummy/mkfs
Normal file → Executable file
4
src/tm_mad/ssh/tm_sshrc → src/datastore_mad/remotes/dummy/mkfs
Normal file → Executable file
@ -1,3 +1,5 @@
|
||||
#!/bin/sh
|
||||
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) #
|
||||
# #
|
||||
@ -13,3 +15,5 @@
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
echo "dummy_path 1024"
|
91
src/datastore_mad/remotes/fs/cp
Executable file
91
src/datastore_mad/remotes/fs/cp
Executable file
@ -0,0 +1,91 @@
|
||||
#!/bin/bash
|
||||
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
# a copy of the License at #
|
||||
# #
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 #
|
||||
# #
|
||||
# Unless required by applicable law or agreed to in writing, software #
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, #
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
###############################################################################
|
||||
# This script is used to copy a VM image (SRC) to the image repository as DST
|
||||
# Several SRC types are supported
|
||||
###############################################################################
|
||||
|
||||
# -------- Set up the environment to source common tools & conf ------------
|
||||
|
||||
if [ -z "${ONE_LOCATION}" ]; then
|
||||
LIB_LOCATION=/usr/lib/one
|
||||
else
|
||||
LIB_LOCATION=$ONE_LOCATION/lib
|
||||
fi
|
||||
|
||||
. $LIB_LOCATION/sh/scripts_common.sh
|
||||
|
||||
DRIVER_PATH=$(dirname $0)
|
||||
source ${DRIVER_PATH}/../libfs.sh
|
||||
|
||||
# -------- Get cp and datastore arguments from OpenNebula core ------------
|
||||
|
||||
DRV_ACTION=$1
|
||||
ID=$2
|
||||
|
||||
XPATH="${DRIVER_PATH}/../xpath.rb -b $DRV_ACTION"
|
||||
|
||||
unset i XPATH_ELEMENTS
|
||||
|
||||
while IFS= read -r -d '' element; do
|
||||
XPATH_ELEMENTS[i++]="$element"
|
||||
done < <($XPATH /DS_DRIVER_ACTION_DATA/DATASTORE/BASE_PATH \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/RESTRICTED_DIRS \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/SAFE_DIRS \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/UMASK \
|
||||
/DS_DRIVER_ACTION_DATA/IMAGE/PATH)
|
||||
|
||||
BASE_PATH="${XPATH_ELEMENTS[0]}"
|
||||
RESTRICTED_DIRS="${XPATH_ELEMENTS[1]}"
|
||||
SAFE_DIRS="${XPATH_ELEMENTS[2]}"
|
||||
UMASK="${XPATH_ELEMENTS[3]}"
|
||||
SRC="${XPATH_ELEMENTS[4]}"
|
||||
|
||||
mkdir -p "$BASE_PATH"
|
||||
set_up_datastore "$BASE_PATH" "$RESTRICTED_DIRS" "$SAFE_DIRS" "$UMASK"
|
||||
|
||||
DST=`generate_image_path`
|
||||
|
||||
# ------------ Copy the image to the repository -------------
|
||||
|
||||
case $SRC in
|
||||
http://*)
|
||||
log "Downloading $SRC to the image repository"
|
||||
|
||||
exec_and_log "$WGET -O $DST $SRC" "Error downloading $SRC"
|
||||
;;
|
||||
|
||||
*)
|
||||
if [ `check_restricted $SRC` -eq 1 ]; then
|
||||
log_error "Not allowed to copy images from $RESTRICTED_DIRS"
|
||||
error_message "Not allowed to copy image file $SRC"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
log "Copying local image $SRC to the image repository"
|
||||
|
||||
exec_and_log "cp -f $SRC $DST" "Error copying $SRC to $DST"
|
||||
;;
|
||||
esac
|
||||
|
||||
# ---------------- Get the size of the image ------------
|
||||
|
||||
SIZE=`fs_du $DST`
|
||||
|
||||
echo "$DST $SIZE"
|
84
src/datastore_mad/remotes/fs/mkfs
Executable file
84
src/datastore_mad/remotes/fs/mkfs
Executable file
@ -0,0 +1,84 @@
|
||||
#!/bin/bash
|
||||
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
# a copy of the License at #
|
||||
# #
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 #
|
||||
# #
|
||||
# Unless required by applicable law or agreed to in writing, software #
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, #
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
###############################################################################
|
||||
# This script is used to create a VM image (SRC) of size (SIZE) and formatted
|
||||
# as (FS)
|
||||
###############################################################################
|
||||
|
||||
# -------- Set up the environment to source common tools & conf ------------
|
||||
|
||||
if [ -z "${ONE_LOCATION}" ]; then
|
||||
LIB_LOCATION=/usr/lib/one
|
||||
else
|
||||
LIB_LOCATION=$ONE_LOCATION/lib
|
||||
fi
|
||||
|
||||
. $LIB_LOCATION/sh/scripts_common.sh
|
||||
|
||||
DRIVER_PATH=$(dirname $0)
|
||||
source ${DRIVER_PATH}/../libfs.sh
|
||||
|
||||
# -------- Get mkfs and datastore arguments from OpenNebula core ------------
|
||||
|
||||
DRV_ACTION=$1
|
||||
ID=$2
|
||||
|
||||
XPATH="${DRIVER_PATH}/../xpath.rb -b $DRV_ACTION"
|
||||
|
||||
unset i XPATH_ELEMENTS
|
||||
|
||||
while IFS= read -r -d '' element; do
|
||||
XPATH_ELEMENTS[i++]="$element"
|
||||
done < <($XPATH /DS_DRIVER_ACTION_DATA/DATASTORE/BASE_PATH \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/RESTRICTED_DIRS \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/SAFE_DIRS \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/UMASK \
|
||||
/DS_DRIVER_ACTION_DATA/IMAGE/FSTYPE \
|
||||
/DS_DRIVER_ACTION_DATA/IMAGE/SIZE)
|
||||
|
||||
|
||||
BASE_PATH="${XPATH_ELEMENTS[0]}"
|
||||
RESTRICTED_DIRS="${XPATH_ELEMENTS[1]}"
|
||||
SAFE_DIRS="${XPATH_ELEMENTS[2]}"
|
||||
UMASK="${XPATH_ELEMENTS[3]}"
|
||||
FSTYPE="${XPATH_ELEMENTS[4]}"
|
||||
SIZE="${XPATH_ELEMENTS[5]}"
|
||||
|
||||
mkdir -p "$BASE_PATH"
|
||||
set_up_datastore "$BASE_PATH" "$RESTRICTED_DIRS" "$SAFE_DIRS" "$UMASK"
|
||||
|
||||
DST=`generate_image_path`
|
||||
|
||||
# ------------ Image to save_as disk, no need to create a FS ------------
|
||||
|
||||
if [ "$FSTYPE" = "save_as" ]; then
|
||||
echo "$DST $SIZE"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# ------------ Create the image to the repository ------------
|
||||
|
||||
MKFS_CMD=`mkfs_command $DST $FSTYPE`
|
||||
|
||||
exec_and_log "$DD if=/dev/zero of=$DST bs=1 count=1 seek=${SIZE}M" \
|
||||
"Could not create image $DST"
|
||||
exec_and_log "$MKFS_CMD" \
|
||||
"Unable to create filesystem $FSTYPE in $DST"
|
||||
|
||||
echo "$DST $SIZE"
|
@ -17,10 +17,10 @@
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
###############################################################################
|
||||
# This script is used to remove a VM image (SRC) from the image repository
|
||||
# This script is used to remove a VM image (SRC) from the image repository
|
||||
###############################################################################
|
||||
|
||||
# ------------ Set up the environment to source common tools ------------
|
||||
# ------------ Set up the environment to source common tools ------------
|
||||
|
||||
if [ -z "${ONE_LOCATION}" ]; then
|
||||
LIB_LOCATION=/usr/lib/one
|
||||
@ -29,11 +29,19 @@ else
|
||||
fi
|
||||
|
||||
. $LIB_LOCATION/sh/scripts_common.sh
|
||||
source $(dirname $0)/fsrc
|
||||
|
||||
# ------------ Remove the image to the repository ------------
|
||||
DRIVER_PATH=$(dirname $0)
|
||||
source ${DRIVER_PATH}/../libfs.sh
|
||||
|
||||
SRC=$1
|
||||
# -------- Get rm and datastore arguments from OpenNebula core ------------
|
||||
|
||||
DRV_ACTION=$1
|
||||
ID=$2
|
||||
|
||||
XPATH="${DRIVER_PATH}/../xpath.rb -b $DRV_ACTION"
|
||||
SRC=`$XPATH /DS_DRIVER_ACTION_DATA/IMAGE/SOURCE`
|
||||
|
||||
# ------------ Remove the image from the repository ------------
|
||||
|
||||
if [ -e $SRC ] ; then
|
||||
log "Removing $SRC from the image repository"
|
113
src/datastore_mad/remotes/iscsi/cp
Executable file
113
src/datastore_mad/remotes/iscsi/cp
Executable file
@ -0,0 +1,113 @@
|
||||
#!/bin/bash
|
||||
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
# a copy of the License at #
|
||||
# #
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 #
|
||||
# #
|
||||
# Unless required by applicable law or agreed to in writing, software #
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, #
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
###############################################################################
|
||||
# This script is used to copy a VM image (SRC) to the image repository as DST
|
||||
# Several SRC types are supported
|
||||
###############################################################################
|
||||
|
||||
# -------- Set up the environment to source common tools & conf ------------
|
||||
|
||||
if [ -z "${ONE_LOCATION}" ]; then
|
||||
LIB_LOCATION=/usr/lib/one
|
||||
else
|
||||
LIB_LOCATION=$ONE_LOCATION/lib
|
||||
fi
|
||||
|
||||
. $LIB_LOCATION/sh/scripts_common.sh
|
||||
|
||||
DRIVER_PATH=$(dirname $0)
|
||||
source ${DRIVER_PATH}/../libfs.sh
|
||||
source ${DRIVER_PATH}/iscsi.conf
|
||||
|
||||
# -------- Get cp and datastore arguments from OpenNebula core ------------
|
||||
|
||||
DRV_ACTION=$1
|
||||
ID=$2
|
||||
|
||||
XPATH="${DRIVER_PATH}/../xpath.rb -b $DRV_ACTION"
|
||||
|
||||
unset i XPATH_ELEMENTS
|
||||
|
||||
while IFS= read -r -d '' element; do
|
||||
XPATH_ELEMENTS[i++]="$element"
|
||||
done < <($XPATH /DS_DRIVER_ACTION_DATA/DATASTORE/BASE_PATH \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/RESTRICTED_DIRS \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/SAFE_DIRS \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/UMASK \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/HOST \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/VG_NAME \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/BASE_IQN \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/BASE_TID \
|
||||
/DS_DRIVER_ACTION_DATA/IMAGE/PATH)
|
||||
|
||||
|
||||
BASE_PATH="${XPATH_ELEMENTS[0]}"
|
||||
RESTRICTED_DIRS="${XPATH_ELEMENTS[1]}"
|
||||
SAFE_DIRS="${XPATH_ELEMENTS[2]}"
|
||||
UMASK="${XPATH_ELEMENTS[3]}"
|
||||
DST_HOST="${XPATH_ELEMENTS[4]}"
|
||||
VG_NAME="${XPATH_ELEMENTS[5]:-$VG_NAME}"
|
||||
BASE_IQN="${XPATH_ELEMENTS[6]:-$BASE_IQN}"
|
||||
BASE_TID="${XPATH_ELEMENTS[7]:-$BASE_TID}"
|
||||
SRC="${XPATH_ELEMENTS[8]}"
|
||||
|
||||
set_up_datastore "$BASE_PATH" "$RESTRICTED_DIRS" "$SAFE_DIRS" "$UMASK"
|
||||
|
||||
SIZE=`fs_du $SRC`
|
||||
LV_NAME="lv-one-${ID}"
|
||||
IQN="$BASE_IQN:$DST_HOST.$VG_NAME.$LV_NAME"
|
||||
DEV="/dev/$VG_NAME/$LV_NAME"
|
||||
|
||||
let TID=ID+BASE_TID
|
||||
|
||||
REGISTER_CMD=$(cat <<EOF
|
||||
set -e
|
||||
$SUDO $LVCREATE -L${SIZE}M ${VG_NAME} -n ${LV_NAME}
|
||||
$SUDO $(tgtadm_target_new "$TID" "$IQN")
|
||||
$SUDO $(tgtadm_target_bind_all "$TID")
|
||||
$SUDO $(tgtadm_logicalunit_new "$TID" "$DEV")
|
||||
EOF
|
||||
)
|
||||
|
||||
case $SRC in
|
||||
http://*)
|
||||
log "Downloading $SRC to the image repository"
|
||||
|
||||
DUMP="$WGET $SRC -O-"
|
||||
;;
|
||||
|
||||
*)
|
||||
if [ `check_restricted $SRC` -eq 1 ]; then
|
||||
log_error "Not allowed to copy images from $RESTRICTED_DIRS"
|
||||
error_message "Not allowed to copy image file $SRC"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
log "Copying local image $SRC to the image repository"
|
||||
|
||||
DUMP="cat $SRC"
|
||||
;;
|
||||
esac
|
||||
|
||||
ssh_exec_and_log "$DST_HOST" "$REGISTER_CMD" "Error registering $DST_HOST:$DEV"
|
||||
|
||||
exec_and_log "eval $DUMP | $SSH $DST_HOST $SUDO $DD of=$DEV bs=64k" \
|
||||
"Error dumping $SRC to $DST_HOST:$DEV"
|
||||
|
||||
echo "$IQN $SIZE"
|
28
src/datastore_mad/remotes/iscsi/iscsi.conf
Normal file
28
src/datastore_mad/remotes/iscsi/iscsi.conf
Normal file
@ -0,0 +1,28 @@
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
# a copy of the License at #
|
||||
# #
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 #
|
||||
# #
|
||||
# Unless required by applicable law or agreed to in writing, software #
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, #
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
# Default IQN path
|
||||
BASE_IQN=iqn.2012-02.org.opennebula
|
||||
|
||||
# Default volume group
|
||||
VG_NAME=vg-one
|
||||
|
||||
# Starting TID for iSCSI targets
|
||||
BASE_TID=1
|
||||
|
||||
# Lists of hosts (separated by spaces) for which no iscsiadm login or logout
|
||||
# is performed.
|
||||
NO_ISCSI="$HOSTNAME"
|
102
src/datastore_mad/remotes/iscsi/mkfs
Executable file
102
src/datastore_mad/remotes/iscsi/mkfs
Executable file
@ -0,0 +1,102 @@
|
||||
#!/bin/bash
|
||||
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
# a copy of the License at #
|
||||
# #
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 #
|
||||
# #
|
||||
# Unless required by applicable law or agreed to in writing, software #
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, #
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
###############################################################################
|
||||
# This script is used to create a VM image (SRC) of size (SIZE) and formatted
|
||||
# as (FS)
|
||||
###############################################################################
|
||||
|
||||
# -------- Set up the environment to source common tools & conf ------------
|
||||
|
||||
if [ -z "${ONE_LOCATION}" ]; then
|
||||
LIB_LOCATION=/usr/lib/one
|
||||
else
|
||||
LIB_LOCATION=$ONE_LOCATION/lib
|
||||
fi
|
||||
|
||||
. $LIB_LOCATION/sh/scripts_common.sh
|
||||
|
||||
DRIVER_PATH=$(dirname $0)
|
||||
source ${DRIVER_PATH}/../libfs.sh
|
||||
source ${DRIVER_PATH}/iscsi.conf
|
||||
|
||||
# -------- Get mkfs and datastore arguments from OpenNebula core ------------
|
||||
|
||||
DRV_ACTION=$1
|
||||
ID=$2
|
||||
|
||||
XPATH="${DRIVER_PATH}/../xpath.rb -b $DRV_ACTION"
|
||||
|
||||
unset i XPATH_ELEMENTS
|
||||
|
||||
while IFS= read -r -d '' element; do
|
||||
XPATH_ELEMENTS[i++]="$element"
|
||||
done < <($XPATH /DS_DRIVER_ACTION_DATA/DATASTORE/BASE_PATH \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/RESTRICTED_DIRS \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/SAFE_DIRS \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/UMASK \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/HOST \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/VG_NAME \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/BASE_IQN \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/BASE_TID \
|
||||
/DS_DRIVER_ACTION_DATA/IMAGE/FSTYPE \
|
||||
/DS_DRIVER_ACTION_DATA/IMAGE/SIZE)
|
||||
|
||||
|
||||
BASE_PATH="${XPATH_ELEMENTS[0]}"
|
||||
RESTRICTED_DIRS="${XPATH_ELEMENTS[1]}"
|
||||
SAFE_DIRS="${XPATH_ELEMENTS[2]}"
|
||||
UMASK="${XPATH_ELEMENTS[3]}"
|
||||
DST_HOST="${XPATH_ELEMENTS[4]}"
|
||||
VG_NAME="${XPATH_ELEMENTS[5]:-$VG_NAME}"
|
||||
BASE_IQN="${XPATH_ELEMENTS[6]:-$BASE_IQN}"
|
||||
BASE_TID="${XPATH_ELEMENTS[7]:-$BASE_TID}"
|
||||
FSTYPE="${XPATH_ELEMENTS[8]}"
|
||||
SIZE="${XPATH_ELEMENTS[9]:-0}"
|
||||
|
||||
set_up_datastore "$BASE_PATH" "$RESTRICTED_DIRS" "$SAFE_DIRS" "$UMASK"
|
||||
|
||||
LV_NAME="lv-one-${ID}"
|
||||
IQN="$BASE_IQN:$DST_HOST.$VG_NAME.$LV_NAME"
|
||||
DEV="/dev/$VG_NAME/$LV_NAME"
|
||||
|
||||
let TID=ID+BASE_TID
|
||||
|
||||
REGISTER_CMD=$(cat <<EOF
|
||||
set -e
|
||||
$SUDO $LVCREATE -L${SIZE}M ${VG_NAME} -n ${LV_NAME}
|
||||
$SUDO $(tgtadm_target_new "$TID" "$IQN")
|
||||
$SUDO $(tgtadm_target_bind_all "$TID")
|
||||
$SUDO $(tgtadm_logicalunit_new "$TID" "$DEV")
|
||||
$SUDO $(mkfs_command "$DEV" "$FSTYPE")
|
||||
EOF
|
||||
)
|
||||
|
||||
# ------------ Image to save_as disk, no need to create a FS ------------
|
||||
|
||||
if [ "$FSTYPE" = "save_as" ]; then
|
||||
echo "$DST $SIZE"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# ------------ Create the image to the repository ------------
|
||||
|
||||
ssh_exec_and_log "$DST_HOST" "$REGISTER_CMD" \
|
||||
"Error registering $DST_HOST:$DEV"
|
||||
|
||||
echo "$IQN $SIZE"
|
@ -17,12 +17,10 @@
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
###############################################################################
|
||||
# This script is used to move a VM image (SRC) to the image repository as DST
|
||||
# Several SRC types are supported
|
||||
# This script is used to remove a VM image (SRC) from the image repository
|
||||
###############################################################################
|
||||
|
||||
# ------------ Set up the environment to source common tools ------------
|
||||
|
||||
# ------------ Set up the environment to source common tools ------------
|
||||
if [ -z "${ONE_LOCATION}" ]; then
|
||||
LIB_LOCATION=/usr/lib/one
|
||||
else
|
||||
@ -30,46 +28,47 @@ else
|
||||
fi
|
||||
|
||||
. $LIB_LOCATION/sh/scripts_common.sh
|
||||
source $(dirname $0)/fsrc
|
||||
|
||||
SRC=$1
|
||||
DST=$2
|
||||
ID=$3
|
||||
DRIVER_PATH=$(dirname $0)
|
||||
source ${DRIVER_PATH}/../libfs.sh
|
||||
source ${DRIVER_PATH}/iscsi.conf
|
||||
|
||||
# ------------ Generate a filename for the image ------------
|
||||
# -------- Get rm and datastore arguments from OpenNebula core ------------
|
||||
|
||||
if [ "$DST" = "-" ] ; then
|
||||
DST=`generate_image_path`
|
||||
fi
|
||||
DRV_ACTION=$1
|
||||
ID=$2
|
||||
|
||||
# ------------ Move the image to the repository ------------
|
||||
XPATH="${DRIVER_PATH}/../xpath.rb -b $DRV_ACTION"
|
||||
|
||||
case $SRC in
|
||||
http://*)
|
||||
log "Downloading $SRC to the image repository"
|
||||
exec_and_log "$WGET -O $DST $SRC" \
|
||||
"Error downloading $SRC"
|
||||
;;
|
||||
unset i XPATH_ELEMENTS
|
||||
|
||||
*)
|
||||
log "Moving local image $SRC to the image repository"
|
||||
|
||||
if [ \( -L $SRC \) -a \
|
||||
\( "`$READLINK -f $SRC`" = "`$READLINK -f $DST`" \) ] ; then
|
||||
log "Not moving files to image repo, they are the same"
|
||||
else
|
||||
exec_and_log "mv -f $SRC $DST" "Could not move $SRC to $DST"
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
while IFS= read -r -d '' element; do
|
||||
XPATH_ELEMENTS[i++]="$element"
|
||||
done < <($XPATH /DS_DRIVER_ACTION_DATA/IMAGE/SOURCE \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/HOST \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/BASE_TID)
|
||||
|
||||
if [ -d $DST ]; then
|
||||
exec_and_log "chmod 0770 $DST"
|
||||
else
|
||||
exec_and_log "chmod 0660 $DST"
|
||||
fi
|
||||
SRC="${XPATH_ELEMENTS[0]}"
|
||||
DST_HOST="${XPATH_ELEMENTS[1]}"
|
||||
BASE_TID="${XPATH_ELEMENTS[2]:-$BASE_TID}"
|
||||
|
||||
# ---------------- Get the size of the image ------------
|
||||
SIZE=`fs_du $DST`
|
||||
BASE_IQN=`echo $SRC|$CUT -d: -f1`
|
||||
TARGET=`echo $SRC|$CUT -d: -f2`
|
||||
LV_NAME=`echo $TARGET|$AWK -F. '{print $(NF)}'`
|
||||
VG_NAME=`echo $TARGET|$AWK -F. '{print $(NF-1)}'`
|
||||
DEV="/dev/$VG_NAME/$LV_NAME"
|
||||
|
||||
echo "$DST $SIZE"
|
||||
let TID=ID+BASE_TID
|
||||
|
||||
RM_COMMAND=$(cat <<EOF
|
||||
$SUDO $(tgtadm_target_delete "$TID")
|
||||
$SUDO $LVREMOVE -f $VG_NAME/$LV_NAME
|
||||
EOF
|
||||
)
|
||||
|
||||
log "Removing $DST_HOST:$DEV from the image repository"
|
||||
|
||||
ssh_exec_and_log "$DST_HOST" "$RM_COMMAND" \
|
||||
"Error removing $DST_HOST:$DEV"
|
||||
|
||||
exit 0
|
138
src/datastore_mad/remotes/libfs.sh
Normal file
138
src/datastore_mad/remotes/libfs.sh
Normal file
@ -0,0 +1,138 @@
|
||||
#!/bin/bash
|
||||
|
||||
# -------------------------------------------------------------------------- #
|
||||
# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) #
|
||||
# #
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
|
||||
# not use this file except in compliance with the License. You may obtain #
|
||||
# a copy of the License at #
|
||||
# #
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 #
|
||||
# #
|
||||
# Unless required by applicable law or agreed to in writing, software #
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, #
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
|
||||
# See the License for the specific language governing permissions and #
|
||||
# limitations under the License. #
|
||||
#--------------------------------------------------------------------------- #
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# Set up environment variables
|
||||
# @param $1 - Datastore base_path
|
||||
# @param $2 - Restricted directories
|
||||
# @param $3 - Safe dirs
|
||||
# @param $4 - Umask for new file creation (default: 0007)
|
||||
# @return sets the following environment variables
|
||||
# - RESTRICTED_DIRS: Paths that cannot be used to register images
|
||||
# - SAFE_DIRS: Paths that are safe to specify image paths
|
||||
# - BASE_PATH: Path where the images will be stored
|
||||
#------------------------------------------------------------------------------
|
||||
function set_up_datastore {
|
||||
#
|
||||
# Load the default configuration for FS datastores
|
||||
#
|
||||
BASE_PATH="$1"
|
||||
RESTRICTED_DIRS="$2"
|
||||
SAFE_DIRS="$3"
|
||||
UMASK="$4"
|
||||
|
||||
if [ -z "${ONE_LOCATION}" ]; then
|
||||
VAR_LOCATION=/var/lib/one/
|
||||
ETC_LOCATION=/etc/one/
|
||||
else
|
||||
VAR_LOCATION=$ONE_LOCATION/var/
|
||||
ETC_LOCATION=$ONE_LOCATION/etc/
|
||||
fi
|
||||
|
||||
#
|
||||
# RESTRICTED AND SAFE DIRS (from default configuration)
|
||||
#
|
||||
RESTRICTED_DIRS="$VAR_LOCATION $ETC_LOCATION $HOME/ $RESTRICTED_DIRS"
|
||||
|
||||
export BASE_PATH
|
||||
export RESTRICTED_DIRS
|
||||
export SAFE_DIRS
|
||||
|
||||
if [ -n "$UMASK" ]; then
|
||||
umask $UMASK
|
||||
else
|
||||
umask 0007
|
||||
fi
|
||||
}
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Generates an unique image path. Requires BASE_PATH to be set
|
||||
# @return path for the image (empty if error)
|
||||
#-------------------------------------------------------------------------------
|
||||
function generate_image_path {
|
||||
|
||||
CANONICAL_STR="`$DATE +%s`:$ID"
|
||||
|
||||
CANONICAL_MD5=$($MD5SUM - << EOF
|
||||
$CANONICAL_STR
|
||||
EOF
|
||||
)
|
||||
echo "${BASE_PATH}/`echo $CANONICAL_MD5 | cut -d ' ' -f1`"
|
||||
}
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Computes the size of an image
|
||||
# @param $1 - Path to the image
|
||||
# @return size of the image in Mb
|
||||
#-------------------------------------------------------------------------------
|
||||
function fs_du {
|
||||
if [ -d "$1" ]; then
|
||||
SIZE=`du -sb "$1" | cut -f1`
|
||||
error=$?
|
||||
else
|
||||
SIZE=`stat -c %s "$1"`
|
||||
error=$?
|
||||
fi
|
||||
|
||||
if [ $error -ne 0 ]; then
|
||||
SIZE=0
|
||||
else
|
||||
SIZE=$((($SIZE+1048575)/1048576))
|
||||
fi
|
||||
|
||||
echo "$SIZE"
|
||||
}
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Computes the size of an image
|
||||
# @param $1 - Path to the image
|
||||
# @return size of the image in Mb
|
||||
#-------------------------------------------------------------------------------
|
||||
function qemu_size {
|
||||
DISK="$1"
|
||||
|
||||
SIZE=`$QEMU_IMG info $DISK|grep "^virtual size:"|\
|
||||
sed 's/^.*(\([0-9]\+\) bytes.*$/\1/g'`
|
||||
|
||||
SIZE=$((($SIZE+1048575)/1048576))
|
||||
|
||||
echo "$SIZE"
|
||||
}
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Checks if a path is safe for copying the image from
|
||||
# @param $1 - Path to the image
|
||||
# @return 0 if the path is safe, 1 otherwise
|
||||
#-------------------------------------------------------------------------------
|
||||
function check_restricted {
|
||||
for path in $SAFE_DIRS ; do
|
||||
if [ -n "`readlink -f $1 | grep -E "^$path"`" ] ; then
|
||||
echo 0
|
||||
return
|
||||
fi
|
||||
done
|
||||
|
||||
for path in $RESTRICTED_DIRS ; do
|
||||
if [ -n "`readlink -f $1 | grep -E "^$path"`" ] ; then
|
||||
echo 1
|
||||
return
|
||||
fi
|
||||
done
|
||||
|
||||
echo 0
|
||||
}
|
@ -21,7 +21,7 @@
|
||||
# Several SRC types are supported
|
||||
###############################################################################
|
||||
|
||||
# -------- Set up the environment to source common tools & conf ------------
|
||||
# -------- Set up the environment to source common tools & conf ------------
|
||||
|
||||
if [ -z "${ONE_LOCATION}" ]; then
|
||||
LIB_LOCATION=/usr/lib/one
|
||||
@ -30,27 +30,42 @@ else
|
||||
fi
|
||||
|
||||
. $LIB_LOCATION/sh/scripts_common.sh
|
||||
source $(dirname $0)/fsrc
|
||||
|
||||
SRC=$1
|
||||
DRIVER_PATH=$(dirname $0)
|
||||
source ${DRIVER_PATH}/../libfs.sh
|
||||
|
||||
# -------- Get cp and datastore arguments from OpenNebula core ------------
|
||||
|
||||
DRV_ACTION=$1
|
||||
ID=$2
|
||||
|
||||
XPATH="${DRIVER_PATH}/../xpath.rb -b $DRV_ACTION"
|
||||
|
||||
unset i XPATH_ELEMENTS
|
||||
|
||||
while IFS= read -r -d '' element; do
|
||||
XPATH_ELEMENTS[i++]="$element"
|
||||
done < <($XPATH /DS_DRIVER_ACTION_DATA/DATASTORE/BASE_PATH \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/RESTRICTED_DIRS \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/SAFE_DIRS \
|
||||
/DS_DRIVER_ACTION_DATA/DATASTORE/TEMPLATE/UMASK \
|
||||
/DS_DRIVER_ACTION_DATA/IMAGE/PATH)
|
||||
|
||||
BASE_PATH="${XPATH_ELEMENTS[0]}"
|
||||
RESTRICTED_DIRS="${XPATH_ELEMENTS[1]}"
|
||||
SAFE_DIRS="${XPATH_ELEMENTS[2]}"
|
||||
UMASK="${XPATH_ELEMENTS[3]}"
|
||||
SRC="${XPATH_ELEMENTS[4]}"
|
||||
|
||||
mkdir -p "$BASE_PATH"
|
||||
set_up_datastore "$BASE_PATH" "$RESTRICTED_DIRS" "$SAFE_DIRS" "$UMASK"
|
||||
|
||||
DST=`generate_image_path`
|
||||
|
||||
# ------------ Copy the image to the repository -------------
|
||||
# ------------ Copy the image to the repository -------------
|
||||
|
||||
case $SRC in
|
||||
http://*)
|
||||
log "Downloading $SRC to the image repository"
|
||||
|
||||
exec_and_log "$WGET -O $DST $SRC" \
|
||||
"Error downloading $SRC"
|
||||
|
||||
exec_and_log "chmod 0660 $DST"
|
||||
;;
|
||||
|
||||
vmware://*)
|
||||
SRC=`echo $SRC|sed 's/vmware:\/\///g'`
|
||||
|
||||
*)
|
||||
if [ `check_restricted $SRC` -eq 1 ]; then
|
||||
log_error "Not allowed to copy images from $RESTRICTED_DIRS"
|
||||
error_message "Not allowed to copy image file $SRC"
|
||||
@ -59,8 +74,7 @@ vmware://*)
|
||||
|
||||
log "Copying local disk folder $SRC to the image repository"
|
||||
|
||||
exec_and_log "cp -rf $SRC $DST" \
|
||||
"Error copying $SRC to $DST"
|
||||
exec_and_log "cp -rf $SRC $DST" "Error copying $SRC to $DST"
|
||||
|
||||
if [ ! -f $DST/disk.vmdk ]; then
|
||||
BASE_DISK_FILE=`ls $DST | grep -v '.*-s[0-9]*\.vmdk'`
|
||||
@ -68,28 +82,10 @@ vmware://*)
|
||||
exec_and_log "mv -f $DST/$BASE_DISK_FILE $DST/disk.vmdk" \
|
||||
"Error renaming disk file $BASE_DISK_FILE to disk.vmdk"
|
||||
fi
|
||||
|
||||
exec_and_log "chmod 0770 $DST"
|
||||
;;
|
||||
|
||||
*)
|
||||
if [ `check_restricted $SRC` -eq 1 ]; then
|
||||
log_error "Not allowed to copy images from $RESTRICTED_DIRS"
|
||||
error_message "Not allowed to copy image file $SRC"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
log "Copying local image $SRC to the image repository"
|
||||
|
||||
exec_and_log "cp -f $SRC $DST" \
|
||||
"Error copying $SRC to $DST"
|
||||
|
||||
exec_and_log "chmod 0660 $DST"
|
||||
;;
|
||||
esac
|
||||
|
||||
# ---------------- Get the size of the image & fix perms ------------
|
||||
|
||||
# ---------------- Get the size of the image ------------
|
||||
|
||||
SIZE=`fs_du $DST`
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user